diff --git "a/2365.jsonl" "b/2365.jsonl"
new file mode 100644--- /dev/null
+++ "b/2365.jsonl"
@@ -0,0 +1,1308 @@
+{"seq_id":"14842593595","text":"from collections import defaultdict\nimport numpy as np\nfrom pprint import pprint\n\nclass RoadLoader:\n\n def __init__(self, pathToBundledVertices = './DataFiles/output_verticesWiki.txt',\n pathToBundledEdges = './DataFiles/output_edgesWiki.txt',\n pathToOriginalVertices = './DataFiles/Original Vertices.txt',\n pathToOriginalEdges = './DataFiles/OriginalEdges.txt',\n pathToSemanticTree = './DataFiles/output_semanticTreeWiki.txt'):\n\n self.bundledVertices= {}\n with open(pathToBundledVertices) as ptbv:\n for line in ptbv:\n lst = line.split()\n self.bundledVertices[lst[0]] = lst[1:]\n\n self.bundledEdges = defaultdict(list)\n with open(pathToBundledEdges) as pte:\n for line in pte:\n lst = line.split()\n self.bundledEdges[lst[0]].append(lst[1:])\n\n self.originalVertices = {}\n with open(pathToOriginalVertices) as ptov:\n for line in ptov:\n lst = line.split()\n if len(lst) == 1: continue\n self.originalVertices[lst[0]] = lst[1:]\n\n self.inBoundEdges = defaultdict(list)\n self.outBoundEdges = defaultdict(list)\n with open(pathToOriginalEdges) as ptoe:\n for line in ptoe:\n lst = line.split()\n if len(lst) == 1: continue\n self.inBoundEdges[lst[0]].append(lst[1])\n self.outBoundEdges[lst[1]].append(lst[0])\n self.edgesSemanticTree = {}\n\n if pathToSemanticTree:\n with open(pathToSemanticTree) as ptst:\n for line in ptst:\n lst = line.split()\n self.edgesSemanticTree[lst[0]] = lst[1:]\n\n\n\n\n\n def get_vertices_and_edges_at_viewport(self, view_port = [0, 0, 0, 0]):\n vertices = self.originalVertices.keys()\n verticesInViewPort = {}\n other = {}\n for vertex in vertices:\n vertexInfo = self.originalVertices[vertex]\n\n if(self.__verticeIsInViewPort([float(vertexInfo[0]), float(vertexInfo[1])], view_port)):\n verticesInViewPort[vertex] = vertexInfo\n\n return verticesInViewPort, other\n\n\n\n def __verticeIsInViewPort(self, vertexCoor, view_port = [0, 0, 0, 0]):\n xmin = view_port[0]\n xmax = view_port[2]\n ymin = view_port[1]\n ymax = view_port[3]\n\n if xmax > float(vertexCoor[0]) > xmin and ymin < float(vertexCoor[1]) < ymax:\n return True\n else:\n return False\n\n\n\n\n\n\n#cTests = RoadLoader(pathToBundledVertices='./DataFiles/TestFiles/philippines/outputvertices.txt',\n# pathToBundledEdges='./DataFiles/TestFiles/philippines/outputEdges.txt',\n# pathToOriginalVertices='./DataFiles/TestFiles/philippines/philippines_list_vertices.txt',\n# pathToOriginalEdges='./DataFiles/TestFiles/philippines/philippines_list_edges.txt',\n# pathToSemanticTree='./DataFiles/TestFiles/philippines/semantic_edges.txt')\ncTests = RoadLoader()\nov = cTests.originalVertices.keys()\nbv = cTests.bundledVertices.keys()\n\n#oe = cTests.originalEdges.keys()\nbe = cTests.bundledEdges.keys()\n\nst = cTests.edgesSemanticTree.keys()\n\n#print(cTests.originalVertices[ov[11]], cTests.bundledVertices[bv[11]], oe[1], cTests.bundledEdges[be[11]], cTests.edgesSemanticTree[st[11]])\n\nveInViewPort = cTests.get_vertices_and_edges_at_viewport([-20, -20, 20, 20])\n","repo_name":"talha-ahsan/CartoGraphRoadAPI","sub_path":"src/RoadsAPI.py","file_name":"RoadsAPI.py","file_ext":"py","file_size_in_byte":3518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"13809386044","text":"# https://adventofcode.com/2021/day/4\n\n\ndef read_input(filename) -> (\n list[int],\n list[{'rows': list[set[int]], 'cols': list[set[int]], 'items': set[int], 'ended': bool}]):\n nums = []\n boards = []\n with open(filename) as file:\n lines = file.readlines()\n for index, line in enumerate(lines):\n if index == 0:\n nums = [int(num) for num in line.rstrip().split(',')]\n elif line == '\\n':\n boards.append({'rows': [], 'cols': [set(), set(), set(), set(), set()], 'items': set(),\n 'ended': False})\n\n else:\n cur_board = boards[-1]\n row = ' '.join(line.split()).rstrip(\"\\n\").split(' ')\n row = [int(num) for num in row]\n cur_board['items'].update(row)\n cur_board['rows'].append(set(row))\n for i, item in enumerate(row):\n cur_board['cols'][i].add(item)\n\n return nums, boards\n\n\ndef solution_part1(filename) -> int:\n nums, boards = read_input(filename)\n\n if len(boards) == 0 or len(nums) == 0:\n return 0\n\n for num in nums:\n for board in boards:\n if num in board['items']:\n for row in board['rows']:\n if num in row:\n row.remove(num)\n board['items'].discard(num)\n if len(row) == 0:\n return num * sum(board['items'])\n for col in board['cols']:\n if num in col:\n col.remove(num)\n board['items'].discard(num)\n if len(col) == 0:\n return num * sum(board['items'])\n return 0\n\n\ndef solution_part2(filename) -> int:\n nums, boards = read_input(filename)\n\n if len(boards) == 0 or len(nums) == 0:\n return 0\n\n win_count = 0\n\n for num in nums:\n for board in boards:\n win_flag = False\n if num in board['items'] and not board['ended']:\n for row in board['rows']:\n if num in row and len(row):\n row.discard(num)\n board['items'].discard(num)\n if len(row) == 0:\n board['ended'] = True\n win_count += 1\n win_flag = True\n if len(boards) == win_count:\n total_sum = sum(board['items'])\n return num * total_sum\n if not win_flag:\n for col in board['cols']:\n if num in col and len(col):\n col.discard(num)\n board['items'].discard(num)\n if len(col) == 0:\n board['ended'] = True\n win_count += 1\n if len(boards) == win_count:\n total_sum = sum(board['items'])\n return num * total_sum\n return 0\n\n\nassert (solution_part1('input/day4.test.txt') == 4512)\nprint('Result Part 1: ', solution_part1('input/day4.txt'))\n\nassert (solution_part2('input/day4.test.txt') == 1924)\nprint('Result Part 2: ', solution_part2('input/day4.txt'))\n","repo_name":"basvasilich/AdventOfCode2021","sub_path":"day4.py","file_name":"day4.py","file_ext":"py","file_size_in_byte":3444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"41690176248","text":"# According to the Wikipedia's article: \"The Game of Life, also known simply as Life, is a cellular automaton devised by the British mathematician John Horton Conway in 1970.\"\n\n# Given a board with m by n cells, each cell has an initial state live (1) or dead (0). Each cell interacts with its eight neighbors (horizontal, vertical, diagonal) using the following four rules (taken from the above Wikipedia article):\n\n# Any live cell with fewer than two live neighbors dies, as if caused by under-population.\n# Any live cell with two or three live neighbors lives on to the next generation.\n# Any live cell with more than three live neighbors dies, as if by over-population..\n# Any dead cell with exactly three live neighbors becomes a live cell, as if by reproduction.\n# Write a function to compute the next state (after one update) of the board given its current state. The next state is created by applying the above rules simultaneously to every cell in the current state, where births and deaths occur simultaneously.\n\n# Example:\n\n# Input: \n# [\n# [0,1,0],\n# [0,0,1],\n# [1,1,1],\n# [0,0,0]\n# ]\n# Output: \n# [\n# [0,0,0],\n# [1,0,1],\n# [0,1,1],\n# [0,1,0]\n# ]\n# Follow up:\n\n# Could you solve it in-place? Remember that the board needs to be updated at the same time: You cannot update some cells first and then use their updated values to update other cells.\n# In this question, we represent the board using a 2D array. In principle, the board is infinite, which would cause problems when the active area encroaches the border of the array. How would you address these problems?\n\n# 思路\n# 最简单的方法是再建一个矩阵保存,不过当inplace解时,如果我们直接根据每个点周围的存活数量来修改当前值,由于矩阵是顺序遍历的,这样会影响到下一个点的计算。如何在修改值的同时又保证下一个点的计算不会被影响呢?实际上我们只要将值稍作编码就行了,因为题目给出的是一个int矩阵,大有空间可以利用。这里我们假设对于某个点,值的含义为\n\n# 0 : 上一轮是0,这一轮过后还是0\n# 1 : 上一轮是1,这一轮过后还是1\n# 2 : 上一轮是1,这一轮过后变为0\n# 3 : 上一轮是0,这一轮过后变为1\n# 这样,对于一个节点来说,如果它周边的点是1或者2,就说明那个点上一轮是活的。最后,在遍历一遍数组,把我们编码再解回去,即0和2都变回0��1和3都变回1,就行了。\n\n# 注意\n# 注意编码方式,1和3都是这一轮过后为1,这样就可以用一个模2操作来直接解码了\n# 参考 https://segmentfault.com/a/1190000003819277 \n\nclass Solution(object):\n def gameOfLife(self, board):\n \"\"\"\n :type board: List[List[int]]\n :rtype: void Do not return anything, modify board in-place instead.\n \"\"\"\n row, col = len(board), len(board[0])\n for i in range(row):\n for j in range(col):\n cnt = 0 \n\n if i-1 >= 0 and j-1 >= 0 and board[i-1][j-1] in [1,2]:\n cnt += 1\n if i-1 >= 0 and board[i-1][j] in [1,2]:\n cnt += 1\n if i-1 >= 0 and j+1 < col and board[i-1][j+1] in [1,2]:\n cnt += 1\n if j-1 >= 0 and board[i][j-1] in [1,2]:\n cnt += 1\n if j+1 < col and board[i][j+1] in [1,2]:\n cnt += 1\n if i+1 < row and j-1 >= 0 and board[i+1][j-1] in [1,2]:\n cnt += 1\n if i+1 < row and board[i+1][j] in [1,2]:\n cnt += 1\n if i+1 < row and j+1 < col and board[i+1][j+1] in [1,2]:\n cnt += 1\n if board[i][j] == 0 and cnt == 3:\n board[i][j] = 3\n elif board[i][j] == 1:\n if cnt<2 or cnt>3:\n board[i][j] = 2\n for i in range(len(board)):\n for j in range(len(board[0])):\n board[i][j] %= 2\n","repo_name":"cherryzoe/Leetcode","sub_path":"289. Game of Life.py","file_name":"289. Game of Life.py","file_ext":"py","file_size_in_byte":4049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"25722578224","text":"import pandas as pd\r\nimport urllib, json #data input from url and reading json\r\nimport matplotlib.pyplot as plt # graph plotting\r\nimport geopandas as gpd\r\nfrom math import pi\r\nfrom Rainfall_Analysis_matplotlib import rain_analysis_mat\r\n\r\ndef bokeh_plot():\r\n #1: Read shape file as geoDataFrame\r\n #Pune admin level data - https://github.com/datameet\r\n fp = 'E:\\Coursera Material\\Python For Everyone\\Birds\\Owl analysis\\Datameet\\Pune_wards-master\\GeoData\\pune-admin-wards.geojson'\r\n #reading the file stored in variable fp\r\n map_df = gpd.read_file(fp)\r\n map_df.plot(color='skyblue', linewidth=0.1, edgecolor='black')\r\n \r\n #2: Read ward to sector data\r\n rain_data = 'E:\\Coursera Material\\Python For Everyone\\Pune Rainfall\\Rainfall Volunteers_2020 daily.xlsx'\r\n rain_data_spreadsheet = pd.read_excel(rain_data, sheet_name=None)\r\n \r\n admin_to_sector = rain_data_spreadsheet['Sector_admin']\r\n \r\n map_df_admin = map_df.merge(admin_to_sector, on='name')\r\n \r\n map_df_sector = map_df_admin[['geometry','Sector']]\r\n map_df_sector = map_df_sector.dissolve(by='Sector')\r\n map_df_sector.reset_index(inplace=True)\r\n \r\n #Reference link - https://www.earthdatascience.org/workshops/gis-open-source-python/dissolve-polygons-in-python-geopandas-shapely/\r\n \r\n #saving file as geojson format\r\n map_df_sector.to_file(\"pune_sectors.geojson\", driver='GeoJSON')\r\n \r\n \r\n #3.1: Calculate centroid of a multipolygon\r\n map_df_sector[\"centroid\"] = map_df_sector[\"geometry\"].centroid\r\n \r\n for index, row in map_df_sector.iterrows():\r\n centroid_coords = row.geometry.centroid.coords.xy\r\n map_df_sector.loc[index, 'cen_x'] = centroid_coords[0][0]\r\n map_df_sector.loc[index, 'cen_y'] = centroid_coords[1][0]\r\n \r\n map_df_sector.drop(['centroid'], axis=1, inplace=True)\r\n \r\n \r\n #Read data to json.\r\n merged_json = json.loads(map_df_sector.to_json())\r\n #Convert to String like object.\r\n json_data = json.dumps(merged_json)\r\n \r\n #json check - merged_json['features'][17]['properties']\r\n #Plot Bokeh graph\r\n from bokeh.io import output_notebook, show, output_file, curdoc\r\n from bokeh.plotting import figure\r\n from bokeh.models import GeoJSONDataSource, ColumnDataSource, LinearColorMapper, ColorBar, Div, Panel\r\n from bokeh.models import (Slider,HoverTool,Select, DatetimeTickFormatter, LabelSet)\r\n from bokeh.palettes import brewer\r\n from bokeh.layouts import widgetbox, row, column\r\n from bokeh.transform import factor_cmap\r\n from bokeh.tile_providers import get_provider, Vendors\r\n from bokeh.palettes import Spectral6\r\n \r\n #Input GeoJSON source that contains features for plotting.\r\n geosource = GeoJSONDataSource(geojson = json_data)\r\n \r\n #Create Pune City map object.\r\n p = figure(title = 'Rainfall collection points in Pune City', plot_height = 600 , plot_width = 600, \r\n toolbar_location = None, match_aspect=True)\r\n p.xgrid.grid_line_color = None\r\n p.ygrid.grid_line_color = None\r\n tile_provider = get_provider(Vendors.OSM)\r\n p.add_tile(tile_provider) #to investigate why tile is not getting rendered\r\n #Add patch renderer to figure. \r\n p.patches('xs','ys', source = geosource, line_color = 'black', line_width = 0.5, fill_alpha = 0)\r\n \r\n #p.add_tools(HoverTool(renderers=[r1], tooltips=[ ('admin name','@name')]))\r\n \r\n labels = LabelSet(x='cen_x', y='cen_y', source = geosource, text='Sector', level='glyph',\r\n x_offset=0, y_offset=0, render_mode='css')\r\n p.add_layout(labels)\r\n \r\n #Plot rain data collection pointson Pune City map object\r\n \r\n rain_data = rain_data_spreadsheet['June_per_day']\r\n rain_data['Date'] = rain_data['Date'].dt.date\r\n rain_data_scope = rain_data[(rain_data['Region']=='PMC') &\r\n (rain_data['Obs Status']=='complete')]\r\n \r\n #Allocate rain_data points to 'Sector'\r\n \r\n rain_data_GeoDataFrame = gpd.GeoDataFrame(rain_data_scope, geometry=gpd.points_from_xy(rain_data_scope.Long,\r\n rain_data_scope.Lat))\r\n rain_data_GeoDataFrame.crs = {'init': 'epsg:4326'}\r\n rain_data_alloc_to_Sector = gpd.sjoin(rain_data_GeoDataFrame, map_df_sector)\r\n \r\n rain_data_total = rain_data_alloc_to_Sector.groupby(['Region','Location','Lat','Long','Obs Status','Sector']).sum()[['Rainfall']].reset_index()\r\n \r\n rain_data_total = rain_data_total.sort_values(by=['Sector','Location'])\r\n rain_data_points = ColumnDataSource(data = {'x': rain_data_total['Long'], \r\n 'y': rain_data_total['Lat'],\r\n 'Obs Status': rain_data_total['Obs Status'],\r\n 'Location':rain_data_total['Location'],\r\n 'Rainfall':rain_data_total['Rainfall']})\r\n STATUS = ['complete','na']\r\n STATUS_colour = ['green','orange']\r\n r2= p.circle('x', 'y', source = rain_data_points, alpha = 0.4, legend_field='Obs Status', \r\n color = factor_cmap('Obs Status', STATUS_colour, STATUS),#'navy',\r\n size = 10)\r\n p.add_tools(HoverTool(renderers=[r2], tooltips=[ ('Location','@Location')]))\r\n \r\n \r\n #Create Bar chart:\r\n '''\r\n Method 1:\r\n b = figure(x_range=rain_data_total['Location'], y_range=(0,200),plot_height=700, title=\"Sector Wise Rainfall\",\r\n toolbar_location=None, tools=\"\")\r\n b.vbar(x=rain_data_total['Location'], top=rain_data_total['Rainfall'], width=0.5)\r\n '''\r\n \r\n #Method 2:\r\n bar_chart_data = ColumnDataSource(data = {'x': rain_data_total['Location'], \r\n 'y': rain_data_total['Rainfall'],\r\n 'Sector': rain_data_total['Sector']})\r\n \r\n sector = rain_data_total['Sector'].unique()\r\n b = figure(x_range=rain_data_total['Location'], y_range=(0,100),plot_height=400, title=\"Sector Wise Rainfall\",\r\n toolbar_location=None)\r\n b.vbar(x='x', top='y', width=0.5, source=bar_chart_data,legend_field='Sector',\r\n color=factor_cmap('Sector',Spectral6,sector))\r\n \r\n b.xgrid.grid_line_color = None\r\n b.xaxis.major_label_orientation = pi/2.5\r\n b.xaxis.major_label_text_align = 'left'\r\n b.y_range.start = 0\r\n \r\n m=row(p)\r\n #show(m)\r\n \r\n #Plot matplotlib line graph\r\n rain_analysis_mat(rain_data_alloc_to_Sector)\r\n \r\n \r\n import folium\r\n from folium import Choropleth, Circle, Marker\r\n from folium.plugins import HeatMap, MarkerCluster\r\n \r\n m_2 = folium.Map(location=[18.5, 73.9], tiles='StamenTerrain', zoom_start=12) #StamenTerrain, StamenToner, StamenWatercolor\r\n \r\n #map_gdf_admin = map_df[['name','geometry']].set_index('name')\r\n map_gdf_sector = map_df_sector[['Sector','geometry']].set_index('Sector')\r\n \r\n #admin_count = map_df.name.value_counts()\r\n sector_count = map_df_sector.Sector.value_counts() \r\n \r\n Choropleth(geo_data = map_gdf_sector.__geo_interface__,\r\n data = sector_count,\r\n key_on=\"feature.id\",\r\n fill_color = 'YlGn',\r\n fill_opacity=0,\r\n line_opacity=0.2,\r\n #legend_name = 'Jo count per state',\r\n smooth_factor=0).add_to(m_2)\r\n \r\n #https://python-visualization.github.io/folium/quickstart.html#Choropleth-maps\r\n folium.GeoJson(map_gdf_sector.__geo_interface__,\r\n name='geojson'\r\n ).add_to(m_2)\r\n \r\n folium_color = {'complete':'green','na':'orange'}\r\n rain_data_total.apply(lambda row:folium.CircleMarker(location=[row[\"Lat\"], row[\"Long\"]], \r\n radius=5,\r\n color=folium_color[row['Obs Status']],\r\n popup=row['Obs Status']).add_to(m_2), axis=1)\r\n # ref link -https://stackoverflow.com/questions/42756934/how-to-plot-lat-and-long-from-pandas-dataframe-on-folium-map-group-by-some-label\r\n \r\n m_2.save('static/pune_Chropleth.html')\r\n folium_html = m_2._repr_html_()\r\n \r\n \r\n \r\n #Plot output path\r\n #C:/Users/dell/AppData/Local/Temp/tmpzm9borp4.html\r\n return(m,folium_html)","repo_name":"mandarsj11/PuneRainfall","sub_path":"Rainfall_Analysis_bokeh_sector_admin.py","file_name":"Rainfall_Analysis_bokeh_sector_admin.py","file_ext":"py","file_size_in_byte":8505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"39910289332","text":"from django.contrib import admin\nfrom .models import *\nfrom import_export import resources\nfrom import_export.admin import ImportExportModelAdmin\n# Register your models here.\nclass resourceBarberia (resources.ModelResource):\n class Meta:\n model = barberia\n\nclass adminBarberia(ImportExportModelAdmin, admin.ModelAdmin):\n search_fields = ['nombre']\n list_display = ['nombre', 'direccion', 'telefono']\n resource_class = resourceBarberia\n\nadmin.site.register(barberia, adminBarberia)\n\nclass resourceServicios(resources.ModelResource):\n class Meta:\n model = servicios\n\nclass adminServicios(ImportExportModelAdmin, admin.ModelAdmin):\n search_fields = ['pk_servicios']\n list_display = ['tipo','precio','descuento']\n resource_class = resourceServicios\n\nadmin.site.register(servicios, adminServicios)\n\nclass resourceEmpleados(resources.ModelResource):\n class Meta:\n model = empleados\n\nclass adminEmpleados(ImportExportModelAdmin, admin.ModelAdmin):\n search_fields = ['pk_empleados']\n list_display = ['nombre', 'apellidos', 'telefono','fk_barberia']\n resource_class = resourceEmpleados\n\nadmin.site.register(empleados, adminEmpleados)\n\nclass resourceCliente(resources.ModelResource):\n class Meta:\n model = cliente\n\nclass adminCliente(ImportExportModelAdmin, admin.ModelAdmin):\n search_fields = ['nombre']\n list_display = ['nombre', 'apellidos', 'c_cita','fk_barberia', 'fk_servicios']\n resource_class = resourceCliente\n\nadmin.site.register(cliente, adminCliente)\n\nclass resourceCita(resources.ModelResource):\n class Meta:\n model = cita\n\nclass adminCita(ImportExportModelAdmin, admin.ModelAdmin):\n search_fields = ['fecha']\n list_display = ['fecha','horario','fk_cliente']\n resource_class = resourceCita\n\nadmin.site.register(cita, adminCita)\n\n","repo_name":"MadayDomtt/djangoBarber","sub_path":"apps/catalogo/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"12031409031","text":"\"\"\"\nUnit test for Table class\n\"\"\"\nfrom __future__ import annotations\n\nimport pytest\n\nfrom featurebyte.api.dimension_table import DimensionTable\nfrom featurebyte.api.event_table import EventTable\nfrom featurebyte.api.item_table import ItemTable\nfrom featurebyte.api.scd_table import SCDTable\nfrom featurebyte.api.table import Table\nfrom featurebyte.exception import RecordRetrievalException\n\n\ndef test_get_event_table(saved_event_table, snowflake_event_table):\n \"\"\"\n Test Table.get function to retrieve EventTable\n \"\"\"\n # load the event table from the persistent\n loaded_event_table = Table.get(snowflake_event_table.name)\n assert loaded_event_table.saved is True\n assert loaded_event_table == snowflake_event_table\n assert EventTable.get_by_id(id=snowflake_event_table.id) == snowflake_event_table\n\n # load the event table use get_by_id\n loaded_table = Table.get_by_id(snowflake_event_table.id)\n assert loaded_table == loaded_event_table\n\n with pytest.raises(RecordRetrievalException) as exc:\n Table.get(\"unknown_event_table\")\n expected_msg = (\n 'Table (name: \"unknown_event_table\") not found. ' \"Please save the Table object first.\"\n )\n assert expected_msg in str(exc.value)\n\n\ndef test_get_item_table(snowflake_item_table, saved_item_table):\n \"\"\"\n Test Table.get function to retrieve ItemTable\n \"\"\"\n # load the item table from the persistent\n loaded_table = Table.get(saved_item_table.name)\n assert loaded_table.saved is True\n assert loaded_table == snowflake_item_table\n assert ItemTable.get_by_id(id=loaded_table.id) == snowflake_item_table\n\n with pytest.raises(RecordRetrievalException) as exc:\n Table.get(\"unknown_item_table\")\n expected_msg = (\n 'Table (name: \"unknown_item_table\") not found. ' \"Please save the Table object first.\"\n )\n assert expected_msg in str(exc.value)\n\n\ndef test_get_scd_table(saved_scd_table, snowflake_scd_table):\n \"\"\"\n Test Table.get function to retrieve SCDTable\n \"\"\"\n # load the scd table from the persistent\n loaded_scd_table = Table.get(snowflake_scd_table.name)\n assert loaded_scd_table.saved is True\n assert loaded_scd_table == snowflake_scd_table\n assert SCDTable.get_by_id(id=snowflake_scd_table.id) == snowflake_scd_table\n\n with pytest.raises(RecordRetrievalException) as exc:\n Table.get(\"unknown_scd_table\")\n expected_msg = (\n 'Table (name: \"unknown_scd_table\") not found. ' \"Please save the Table object first.\"\n )\n assert expected_msg in str(exc.value)\n\n\ndef test_get_dimension_table(saved_dimension_table, snowflake_dimension_table):\n \"\"\"\n Test Table.get function to retrieve DimensionTable\n \"\"\"\n # load the dimension table from the persistent\n loaded_scd_table = Table.get(snowflake_dimension_table.name)\n assert loaded_scd_table.saved is True\n assert loaded_scd_table == snowflake_dimension_table\n assert DimensionTable.get_by_id(id=snowflake_dimension_table.id) == snowflake_dimension_table\n\n with pytest.raises(RecordRetrievalException) as exc:\n Table.get(\"unknown_dimension_table\")\n expected_msg = (\n 'Table (name: \"unknown_dimension_table\") not found. ' \"Please save the Table object first.\"\n )\n assert expected_msg in str(exc.value)\n","repo_name":"featurebyte/featurebyte","sub_path":"tests/unit/api/test_table.py","file_name":"test_table.py","file_ext":"py","file_size_in_byte":3293,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"78"}
+{"seq_id":"26955754992","text":"# 347. Top K Frequent Elements\n# Given an integer array nums and an integer k, return the k most frequent elements. You may return the answer in any order.\n\n#Using Heap - It is not complete \n\nimport heapq\nfrom collections import Counter\n\ndef top_k_element(nums,k):\n counts=Counter(nums)\n min_heap = []\n for element, frequency in counts.items():\n heappush(min_heap, (frequency, element))\n if len(min_heap) > k:\n heappop(min_heap)\n return [num for (count ,num) in min_heap]\n\nif __name__ == \"__main__\":\n nums=[1,2,3,4,1,1,1,12,1,1,1,2,4,4,4]\n k=2\n print (\"Current numbers {} and {}\".format(nums,k))\n print (\"Most repeated numbers are {}\".format(top_k_element(nums,k)))","repo_name":"smohapatra1/scripting","sub_path":"python/practice/start_again/2023/07212022/topkelements_using_heap.py","file_name":"topkelements_using_heap.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"28516646250","text":"from PyQt5 import uic\r\nfrom PyQt5.QtWidgets import QMainWindow\r\nfrom PyQt5.QtCore import QTimer\r\nfrom main_menu import MainMenu\r\nimport bcrypt\r\n\r\n\r\nclass Menu_Pin_Code(QMainWindow):\r\n def __init__(self):\r\n super().__init__()\r\n self.timer = 0\r\n self.times = 0\r\n self.pin_code_hash = ''\r\n uic.loadUi('Project_pin_code.ui', self)\r\n self.setFixedSize(290, 550)\r\n self.pin_code_to_print = ''\r\n self.attempt_input_pin_code = 0\r\n self.buttons_of_keyboard = [self.pin_code_button_1, self.pin_code_button_2, self.pin_code_button_3,\r\n self.pin_code_button_4, self.pin_code_button_5, self.pin_code_button_6,\r\n self.pin_code_button_7, self.pin_code_button_8, self.pin_code_button_9,\r\n self.pin_code_button_0, self.pin_code_button_clear_last,\r\n self.pin_code_button_clear_all]\r\n self.buttons_of_key_pin_code = [self.pin_code_0, self.pin_code_1, self.pin_code_2, self.pin_code_3]\r\n # self.styleSheet_button = [\"background-color: #FFFFFF; border: 1px solid black; border-radius: 22px;\",\r\n # \"background-color: #000000; border: 1px solid black; border-radius: 22px;\"]\r\n self.pin_code_button_1.clicked.connect(self.add_one)\r\n self.pin_code_button_2.clicked.connect(self.add_two)\r\n self.pin_code_button_3.clicked.connect(self.add_three)\r\n self.pin_code_button_4.clicked.connect(self.add_four)\r\n self.pin_code_button_5.clicked.connect(self.add_five)\r\n self.pin_code_button_6.clicked.connect(self.add_six)\r\n self.pin_code_button_7.clicked.connect(self.add_seven)\r\n self.pin_code_button_8.clicked.connect(self.add_eight)\r\n self.pin_code_button_9.clicked.connect(self.add_nine)\r\n self.pin_code_button_0.clicked.connect(self.add_zero)\r\n self.pin_code_button_clear_last.clicked.connect(self.clear_last)\r\n self.pin_code_button_clear_all.clicked.connect(self.clear_all)\r\n self.color_pin_code_buttons()\r\n for button in self.buttons_of_key_pin_code:\r\n button.setEnabled(False)\r\n\r\n def add_one(self):\r\n self.pin_code_to_print += '1'\r\n self.color_pin_code_buttons()\r\n\r\n def add_two(self):\r\n self.pin_code_to_print += '2'\r\n self.color_pin_code_buttons()\r\n\r\n def add_three(self):\r\n self.pin_code_to_print += '3'\r\n self.color_pin_code_buttons()\r\n\r\n def add_four(self):\r\n self.pin_code_to_print += '4'\r\n self.color_pin_code_buttons()\r\n\r\n def add_five(self):\r\n self.pin_code_to_print += '5'\r\n self.color_pin_code_buttons()\r\n\r\n def add_six(self):\r\n self.pin_code_to_print += '6'\r\n self.color_pin_code_buttons()\r\n\r\n def add_seven(self):\r\n self.pin_code_to_print += '7'\r\n self.color_pin_code_buttons()\r\n\r\n def add_eight(self):\r\n self.pin_code_to_print += '8'\r\n self.color_pin_code_buttons()\r\n\r\n def add_nine(self):\r\n self.pin_code_to_print += '9'\r\n self.color_pin_code_buttons()\r\n\r\n def add_zero(self):\r\n self.pin_code_to_print += '0'\r\n self.color_pin_code_buttons()\r\n\r\n def clear_last(self):\r\n self.pin_code_to_print = self.pin_code_to_print[:-1]\r\n self.color_pin_code_buttons()\r\n\r\n def clear_all(self):\r\n self.pin_code_to_print = ''\r\n self.color_pin_code_buttons()\r\n\r\n def color_pin_code_buttons(self):\r\n if len(self.pin_code_to_print) == 0:\r\n self.pin_code_0.setStyleSheet(f\"background-color: #FFFFFF; border: 1px solid black; \"\r\n f\"border-radius: 22px;\")\r\n self.pin_code_1.setStyleSheet(\r\n f\"background-color: #FFFFFF; border: 1px solid black; border-radius: 22px;\")\r\n self.pin_code_2.setStyleSheet(\r\n f\"background-color: #FFFFFF; border: 1px solid black; border-radius: 22px;\")\r\n self.pin_code_3.setStyleSheet(\r\n f\"background-color: #FFFFFF; border: 1px solid black; border-radius: 22px;\")\r\n elif len(self.pin_code_to_print) == 1:\r\n self.input_pin_code.resize(230, 40)\r\n self.input_pin_code.setText('ВВЕДИТЕ ПИН-КОД')\r\n self.input_pin_code.setStyleSheet(f\"font-size: 14pt; color: #000000;\")\r\n self.pin_code_0.setStyleSheet(\r\n f\"background-color: #000000; border: 1px solid black; border-radius: 22px;\")\r\n self.pin_code_1.setStyleSheet(\r\n f\"background-color: #FFFFFF; border: 1px solid black; border-radius: 22px;\")\r\n self.pin_code_2.setStyleSheet(\r\n f\"background-color: #FFFFFF; border: 1px solid black; border-radius: 22px;\")\r\n self.pin_code_3.setStyleSheet(\r\n f\"background-color: #FFFFFF; border: 1px solid black; border-radius: 22px;\")\r\n elif len(self.pin_code_to_print) == 2:\r\n self.pin_code_0.setStyleSheet(\r\n f\"background-color: #000000; border: 1px solid black; border-radius: 22px;\")\r\n self.pin_code_1.setStyleSheet(\r\n f\"background-color: #000000; border: 1px solid black; border-radius: 22px;\")\r\n self.pin_code_2.setStyleSheet(\r\n f\"background-color: #FFFFFF; border: 1px solid black; border-radius: 22px;\")\r\n self.pin_code_3.setStyleSheet(\r\n f\"background-color: #FFFFFF; border: 1px solid black; border-radius: 22px;\")\r\n elif len(self.pin_code_to_print) == 3:\r\n self.pin_code_0.setStyleSheet(\r\n f\"background-color: #000000; border: 1px solid black; border-radius: 22px;\")\r\n self.pin_code_1.setStyleSheet(\r\n f\"background-color: #000000; border: 1px solid black; border-radius: 22px;\")\r\n self.pin_code_2.setStyleSheet(\r\n f\"background-color: #000000; border: 1px solid black; border-radius: 22px;\")\r\n self.pin_code_3.setStyleSheet(\r\n f\"background-color: #FFFFFF; border: 1px solid black; border-radius: 22px;\")\r\n elif len(self.pin_code_to_print) == 4:\r\n self.pin_code_0.setStyleSheet(\r\n f\"background-color: #000000; border: 1px solid black; border-radius: 22px;\")\r\n self.pin_code_1.setStyleSheet(\r\n f\"background-color: #000000; border: 1px solid black; border-radius: 22px;\")\r\n self.pin_code_2.setStyleSheet(\r\n f\"background-color: #000000; border: 1px solid black; border-radius: 22px;\")\r\n self.pin_code_3.setStyleSheet(\r\n f\"background-color: #000000; border: 1px solid black; border-radius: 22px;\")\r\n self.check_the_pin_code()\r\n\r\n def return_to_default(self):\r\n self.pin_code_to_print = ''\r\n for button in self.buttons_of_key_pin_code:\r\n button.setStyleSheet(\"background-color: #FFFFFF; border: 1px solid black; border-radius: 22px;\")\r\n\r\n def many_attempts_pin_code(self):\r\n self.timer = QTimer()\r\n self.timer.timeout.connect(self.update_wrong_pin_message)\r\n self.timer.start(1000)\r\n self.times = 30\r\n\r\n def hide_pin_code(self):\r\n if bcrypt.checkpw(self.pin_code_to_print.encode('utf-8'), self.pin_code_hash):\r\n for button in self.buttons_of_key_pin_code:\r\n button.hide()\r\n else:\r\n self.pin_code_to_print = ''\r\n self.return_to_default()\r\n\r\n def hide_keyword(self):\r\n for button in self.buttons_of_keyboard:\r\n button.hide()\r\n\r\n def update_wrong_pin_message(self):\r\n self.wrong_pin_code.setText(\r\n f'ВЫ ВВЕЛИ НЕВЕРНЫЙ ПАРОЛЬ 3 РАЗА ПОДРЯД!\\nПОПРОБУЙТЕ ЧЕРЕЗ {self.times} СЕКУНД')\r\n self.times -= 1\r\n if self.times < 0:\r\n self.wrong_pin_code.setText('')\r\n for buttons in self.buttons_of_keyboard:\r\n buttons.setEnabled(True)\r\n self.input_pin_code.setText('ПОПРОБУЙТЕ ЕЩЕ РАЗ')\r\n self.input_pin_code.move(40, 60)\r\n self.input_pin_code.resize(230, 40)\r\n self.input_pin_code.setStyleSheet(\"font-size: 14pt; color: #000000\")\r\n self.timer.stop()\r\n\r\n def check_the_pin_code(self):\r\n with open('password.txt', 'rb') as pin_code_file:\r\n self.pin_code_hash = pin_code_file.read()\r\n if bcrypt.checkpw(self.pin_code_to_print.encode('utf-8'), self.pin_code_hash):\r\n self.input_pin_code.setText('ДОСТУП РАЗРЕШЕН!')\r\n self.input_pin_code.move(40, 220)\r\n self.input_pin_code.resize(230, 40)\r\n self.input_pin_code.setStyleSheet(\"font-size: 14pt; color: #90ee90;\")\r\n self.hide_pin_code()\r\n self.hide_keyword()\r\n self.close()\r\n Menu_Pin_Code().hide()\r\n MainMenu().show()\r\n else:\r\n self.return_to_default()\r\n self.attempt_input_pin_code += 1\r\n print(self.attempt_input_pin_code)\r\n self.input_pin_code.setText('НЕВЕРНЫЙ ПАРОЛЬ!')\r\n self.input_pin_code.move(40, 60)\r\n self.input_pin_code.resize(230, 40)\r\n self.input_pin_code.setStyleSheet(\"font-size: 14pt; color: #ff0000\")\r\n if self.attempt_input_pin_code == 3:\r\n self.many_attempts_pin_code()\r\n for buttons in self.buttons_of_keyboard:\r\n buttons.setEnabled(False)\r\n self.attempt_input_pin_code = 0","repo_name":"HedRed1/PyQT5","sub_path":"pin_code_system.py","file_name":"pin_code_system.py","file_ext":"py","file_size_in_byte":9634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"40341539370","text":"# -*- coding: utf-8 -*-\n\"\"\"\nMoler's device has 2 main responsibilities:\n- be the factory that returns commands of that device\n- be the state machine that controls which commands may run in given state\n\"\"\"\n\n__author__ = 'Grzegorz Latuszek'\n__copyright__ = 'Copyright (C) 2020, Nokia'\n__email__ = 'grzegorz.latuszek@nokia.com'\n\nfrom moler.device.textualdevice import TextualDevice\n# from moler.device.proxy_pc import ProxyPc # TODO: allow jumping towards AT_REMOTE via proxy-pc\nfrom moler.device.unixlocal import UnixLocal\nfrom moler.device.unixremote import UnixRemote\nfrom moler.helpers import call_base_class_method_with_same_name, mark_to_call_base_class_method_with_same_name\nfrom moler.cmd.at.genericat import GenericAtCommand\n\n\n@call_base_class_method_with_same_name\nclass AtRemote(UnixRemote):\n r\"\"\"\n AtRemote device class.\n\n Example of device in yaml configuration file:\n -without PROXY_PC:\n AT_1:\n DEVICE_CLASS: moler.device.atremote.AtRemote\n CONNECTION_HOPS:\n UNIX_LOCAL:\n UNIX_REMOTE:\n execute_command: ssh # default value\n command_params:\n expected_prompt: unix_remote_prompt\n host: host_ip\n login: login\n password: password\n UNIX_REMOTE:\n AT_REMOTE:\n execute_command: plink_serial # default value\n command_params:\n serial_devname: 'COM5'\n AT_REMOTE:\n UNIX_REMOTE:\n execute_command: ctrl_c # default value\n \"\"\"\n\n at_remote = \"AT_REMOTE\"\n\n def __init__(self, sm_params, name=None, io_connection=None, io_type=None, variant=None, io_constructor_kwargs=None,\n initial_state=None, lazy_cmds_events=False):\n \"\"\"\n Create AT device communicating over io_connection\n :param sm_params: dict with parameters of state machine for device\n :param name: name of device\n :param io_connection: External-IO connection having embedded moler-connection\n :param io_type: type of connection - tcp, udp, ssh, telnet, ...\n :param variant: connection implementation variant, ex. 'threaded', 'twisted', 'asyncio', ...\n (if not given then default one is taken)\n :param io_constructor_kwargs: additional parameter into constructor of selected connection type\n (if not given then default one is taken)\n :param initial_state: name of initial state. State machine tries to enter this state just after creation.\n :param lazy_cmds_events: set False to load all commands and events when device is initialized, set True to load\n commands and events when they are required for the first time.\n \"\"\"\n initial_state = initial_state if initial_state is not None else AtRemote.at_remote\n super(AtRemote, self).__init__(name=name, io_connection=io_connection,\n io_type=io_type, variant=variant,\n io_constructor_kwargs=io_constructor_kwargs,\n sm_params=sm_params, initial_state=initial_state,\n lazy_cmds_events=lazy_cmds_events)\n\n @mark_to_call_base_class_method_with_same_name\n def _get_default_sm_configuration_without_proxy_pc(self):\n \"\"\"\n Return State Machine default configuration without proxy_pc state.\n :return: default sm configuration without proxy_pc state.\n \"\"\"\n config = { # TODO: shell we use direct-string names of config dicts? change simplicity vs readability\n TextualDevice.connection_hops: {\n UnixRemote.unix_remote: { # from\n AtRemote.at_remote: { # to\n \"execute_command\": \"plink_serial\",\n \"command_params\": { # with parameters\n \"target_newline\": \"\\r\\n\"\n },\n \"required_command_params\": [\n \"serial_devname\"\n ]\n },\n },\n AtRemote.at_remote: { # from\n UnixRemote.unix_remote: { # to\n \"execute_command\": \"ctrl_c\", # using command\n \"command_params\": { # with parameters\n \"expected_prompt\": 'remote_prompt', # overwritten in _configure_state_machine\n },\n \"required_command_params\": [\n ]\n },\n },\n }\n }\n return config\n\n @mark_to_call_base_class_method_with_same_name\n def _prepare_transitions_without_proxy_pc(self):\n \"\"\"\n Prepare transitions to change states without proxy_pc state.\n :return: transitions without proxy_pc state.\n \"\"\"\n transitions = {\n UnixRemote.unix_remote: {\n AtRemote.at_remote: {\n \"action\": [\n \"_execute_command_to_change_state\"\n ],\n }\n },\n AtRemote.at_remote: {\n UnixRemote.unix_remote: {\n \"action\": [\n \"_execute_command_to_change_state\"\n ],\n }\n },\n }\n return transitions\n\n @mark_to_call_base_class_method_with_same_name\n def _prepare_state_prompts_without_proxy_pc(self):\n \"\"\"\n Prepare textual prompt for each state for State Machine without proxy_pc state.\n :return: textual prompt for each state without proxy_pc state.\n \"\"\"\n hops_config = self._configurations[TextualDevice.connection_hops]\n serial_devname = hops_config[UnixRemote.unix_remote][AtRemote.at_remote][\"command_params\"][\"serial_devname\"]\n proxy_prompt = \"{}> port READY\".format(serial_devname)\n at_cmds_prompt = GenericAtCommand._re_default_at_prompt.pattern\n state_prompts = {\n AtRemote.at_remote: \"{}|{}\".format(proxy_prompt, at_cmds_prompt)\n }\n return state_prompts\n\n @mark_to_call_base_class_method_with_same_name\n def _prepare_newline_chars_without_proxy_pc(self):\n \"\"\"\n Prepare newline char for each state for State Machine without proxy_pc state.\n :return: newline char for each state without proxy_pc state.\n \"\"\"\n hops_config = self._configurations[TextualDevice.connection_hops]\n hops_2_at_remote_config = hops_config[UnixRemote.unix_remote][AtRemote.at_remote]\n newline_chars = {\n AtRemote.at_remote: hops_2_at_remote_config[\"command_params\"][\"target_newline\"],\n }\n return newline_chars\n\n @mark_to_call_base_class_method_with_same_name\n def _prepare_state_hops_without_proxy_pc(self):\n \"\"\"\n Prepare non direct transitions for each state for State Machine without proxy_pc state.\n :return: non direct transitions for each state without proxy_pc state.\n \"\"\"\n state_hops = {\n TextualDevice.not_connected: {\n UnixLocal.unix_local_root: UnixLocal.unix_local,\n UnixRemote.unix_remote: UnixLocal.unix_local,\n UnixRemote.unix_remote_root: UnixLocal.unix_local,\n AtRemote.at_remote: UnixLocal.unix_local,\n },\n UnixLocal.unix_local: {\n UnixRemote.unix_remote_root: UnixRemote.unix_remote,\n AtRemote.at_remote: UnixRemote.unix_remote,\n },\n UnixLocal.unix_local_root: {\n TextualDevice.not_connected: UnixLocal.unix_local,\n UnixRemote.unix_remote: UnixLocal.unix_local,\n UnixRemote.unix_remote_root: UnixLocal.unix_local,\n AtRemote.at_remote: UnixLocal.unix_local,\n },\n UnixRemote.unix_remote: {\n TextualDevice.not_connected: UnixLocal.unix_local,\n UnixLocal.unix_local_root: UnixLocal.unix_local,\n },\n UnixRemote.unix_remote_root: {\n TextualDevice.not_connected: UnixRemote.unix_remote,\n UnixLocal.unix_local: UnixRemote.unix_remote,\n UnixLocal.unix_local_root: UnixRemote.unix_remote,\n AtRemote.at_remote: UnixRemote.unix_remote,\n },\n AtRemote.at_remote: {\n TextualDevice.not_connected: UnixRemote.unix_remote,\n UnixLocal.unix_local: UnixRemote.unix_remote,\n UnixLocal.unix_local_root: UnixRemote.unix_remote,\n UnixRemote.unix_remote_root: UnixRemote.unix_remote,\n },\n }\n return state_hops\n\n def _configure_state_machine(self, sm_params):\n \"\"\"\n Configure device State Machine.\n :param sm_params: dict with parameters of state machine for device.\n :return: None.\n \"\"\"\n super(AtRemote, self)._configure_state_machine(sm_params)\n\n # copy prompt for AT_REMOTE/ctrl_c from UNIX_REMOTE_ROOT/exit\n hops_config = self._configurations[TextualDevice.connection_hops]\n remote_ux_root_exit_params = hops_config[UnixRemote.unix_remote_root][UnixRemote.unix_remote][\"command_params\"]\n remote_ux_prompt = remote_ux_root_exit_params[\"expected_prompt\"]\n hops_config[AtRemote.at_remote][UnixRemote.unix_remote][\"command_params\"][\"expected_prompt\"] = remote_ux_prompt\n\n def _get_packages_for_state(self, state, observer):\n \"\"\"\n Get available packages containing cmds and events for each state.\n :param state: device state.\n :param observer: observer type, available: cmd, events\n :return: available cmds or events for specific device state.\n \"\"\"\n available = super(AtRemote, self)._get_packages_for_state(state, observer)\n\n if not available:\n if state == AtRemote.at_remote:\n available = {TextualDevice.cmds: ['moler.cmd.at', 'moler.cmd.unix.ctrl_c'],\n TextualDevice.events: ['moler.events.shared']}\n if available:\n return available[observer]\n elif state == UnixRemote.unix_remote: # this is unix extended with plink_serial command\n if observer == TextualDevice.cmds:\n available.append('moler.cmd.at.plink_serial')\n available.append('moler.cmd.at.cu')\n\n return available\n","repo_name":"nokia/moler","sub_path":"moler/device/atremote.py","file_name":"atremote.py","file_ext":"py","file_size_in_byte":10499,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"78"}
+{"seq_id":"20905980026","text":"from typing import List\n\nfrom txmatching.database.services.patient_upload_service import \\\n replace_or_add_patients_from_one_country\nfrom txmatching.database.services.txm_event_service import \\\n get_txm_event_complete\nfrom txmatching.utils.export.export_txm_event import \\\n get_patients_upload_json_from_txm_event_for_country\n\n\ndef copy_patients_between_events(txm_event_id_from: int, txm_event_id_to: int, donor_ids: List[int]) -> List[int]:\n txm_event_from = get_txm_event_complete(txm_event_id_from, load_antibodies_raw=True)\n txm_event_to = get_txm_event_complete(txm_event_id_to, load_antibodies_raw=True)\n donors_to_copy = [donor for donor in txm_event_from.active_and_valid_donors_dict.values()\n if donor.db_id in donor_ids]\n\n donor_related_recipients = [\n txm_event_from.active_and_valid_recipients_dict[donor.related_recipient_db_id] for donor in donors_to_copy if\n donor.related_recipient_db_id is not None]\n\n # raise error if the recipient is already in the event\n txm_event_to_recipient_medical_ids = [recipient.medical_id for recipient in\n txm_event_to.active_and_valid_recipients_dict.values()]\n\n for recipient in donor_related_recipients:\n if recipient.medical_id in txm_event_to_recipient_medical_ids:\n raise ValueError(\n f'Recipient with medical id {recipient.medical_id} already exists in event {txm_event_to.name}. '\n f'Unfortunately, we do not support copying donors with the related recipient that is'\n f' already in TxmEventTo yet.')\n\n # actual copying\n donor_countries = set(donor.parameters.country_code for donor in donors_to_copy)\n donor_ids_to_copy_by_country = {country: [donor.medical_id for donor in donors_to_copy\n if donor.parameters.country_code == country] for country in\n donor_countries}\n\n patient_upload_dtos_for_country = [\n get_patients_upload_json_from_txm_event_for_country(txm_event_id_from,\n country,\n txm_event_to.name,\n donor_ids)\n for country, donor_ids in donor_ids_to_copy_by_country.items()]\n\n new_donor_ids = []\n for patient_upload_dto in patient_upload_dtos_for_country:\n patients = replace_or_add_patients_from_one_country(patient_upload_dto=patient_upload_dto)\n new_donor_ids = new_donor_ids + [donor.id for donor in patients[0]]\n\n return new_donor_ids\n","repo_name":"mild-blue/txmatching","sub_path":"txmatching/utils/copy/copy_patients_from_event_to_event.py","file_name":"copy_patients_from_event_to_event.py","file_ext":"py","file_size_in_byte":2681,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"}
+{"seq_id":"12851199974","text":"import pytest\n\nfrom tests.support.case import ModuleCase\n\n\n@pytest.mark.flaky(max_runs=4)\n@pytest.mark.skip_unless_on_windows\n@pytest.mark.windows_whitelisted\n@pytest.mark.slow_test\nclass NTPTest(ModuleCase):\n \"\"\"\n Validate windows ntp module\n \"\"\"\n\n @pytest.mark.destructive_test\n @pytest.mark.slow_test\n def test_ntp_set_servers(self):\n \"\"\"\n test ntp get and set servers\n \"\"\"\n ntp_srv = \"pool.ntp.org\"\n set_srv = self.run_function(\"ntp.set_servers\", [ntp_srv])\n self.assertTrue(set_srv)\n\n get_srv = self.run_function(\"ntp.get_servers\")\n self.assertEqual(ntp_srv, get_srv[0])\n","repo_name":"saltstack/salt","sub_path":"tests/integration/modules/test_win_ntp.py","file_name":"test_win_ntp.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":13606,"dataset":"github-code","pt":"78"}
+{"seq_id":"5085869376","text":"###############################\n###### Made by GbaCretin ######\n###############################\n\n# LUT from C2 to B7 (for SSG OPNBs)\n\nnumber_of_octaves = 6\nbase_pitches = [\n# C2 C#2 D2 D#2 E2 F2\n\t65.41, 69.30, 73.42, 77.78, 82.41, 87.31,\n# F#2 G2 G#2 A2 A#2 B2\n 92.50, 98.00, 103.83, 110.0, 116.54, 123.47]\n \nLUT = bytearray()\n\nfor octave in range(1, number_of_octaves+1):\n\tfor base_pitch in base_pitches:\n\t\tpitch = base_pitch * pow(2,octave)\n\t\tSSG_pitch = round(250000 / pitch)\n\n\t\tSSG_pitch_L = SSG_pitch & 0x00FF\n\t\tSSG_pitch_H = (SSG_pitch & 0xFF00) >> 8\n\n\t\t# Little endian\n\t\tLUT.append(SSG_pitch_L)\n\t\tLUT.append(SSG_pitch_H)\n\n\t\t# Big endian\n\t\t# LUT.append(SSG_pitch_H)\n\t\t# LUT.append(SSG_pitch_L)\n\nfile = open(\"ssg_pitch_lut.bin\", \"wb\")\nfile.write(LUT)\nfile.close()\n","repo_name":"stereomimi/Mezz-Estate-NeoGeo-Audio-Driver","sub_path":"scripts/ssg_pitch_lut.py","file_name":"ssg_pitch_lut.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"78"}
+{"seq_id":"6484193180","text":"from __future__ import annotations\n\nimport numbers\nfrom abc import ABC\nfrom typing import Union, List\n\nfrom .base import Prior\nfrom .binding import DREXInstructionsFile, DREXResultsFile\nfrom .util import transform_to_unified_drex_input_sequence_representation\nimport numpy as np\n\nfrom .worker import MatlabWorker\nfrom ..lib.model import ModelBuilder, Model\n\n\nclass DREXInstructionBuilder(ModelBuilder, ABC):\n def __init__(self):\n \"\"\"\n D-REX builder. Uses the default values, i.e.:\n\n * hazard = 0.01\n * memory = inf\n * maxhyp = inf\n * obsnz = 0\n * max_ncomp = 10 (relevant for GMM priors)\n * beta = 0.001 (relevant for GMM priors)\n * predscale = 0.01\n * change decision threshold = 0.01\n\n Further required values:\n\n * prior\n * input sequence\n\n \"\"\"\n # Use original default values\n super().__init__()\n self._input_sequence = None\n self._hazard = 0.01\n self._memory = np.inf\n self._maxhyp = np.inf\n # self._D = 1 # implicitly specified by prior\n self._change_decision_threshold = 0.01\n self._obsnz = 0\n self._predscale = 0.001\n self._max_ncomp = 10\n self._beta = 0.001\n\n self._prior = None\n\n def prior(self, prior: Prior) -> DREXInstructionBuilder:\n \"\"\"\n Set the (un)processed prior.\n\n Parameters\n ----------\n prior\n Prior\n Returns\n -------\n DREXInstructionBuilder\n self\n \"\"\"\n self._prior = prior\n return self\n\n def input_sequence(self, input_sequence: Union[list, np.ndarray]) -> DREXInstructionBuilder:\n \"\"\"\n Set the input sequence\n\n Parameters\n ----------\n input_sequence\n np.ndarray of shape (time, feature)\n\n Returns\n -------\n DREXInstructionBuilder\n self\n \"\"\"\n iseq = transform_to_unified_drex_input_sequence_representation(input_sequence)\n [_, input_sequence_features] = iseq[0].shape\n\n # Check correspondence to prior (if present)\n if self._prior is not None:\n if self._prior.feature_count() < input_sequence_features:\n raise ValueError(\"input_sequence invalid! Its number of features must be equal the prior's.\")\n\n # Automatically adjust obsnz if obsnz is scalar\n if isinstance(self._obsnz, numbers.Number):\n self._obsnz = [self._obsnz] * input_sequence_features\n\n self._input_sequence = iseq\n return self\n\n def hazard(self, hazard: Union[float, List[float]]) -> DREXInstructionBuilder:\n \"\"\"\n Set the hazard rate(s)\n\n Parameters\n ----------\n hazard\n Value between [0,1], or list of such values (as much as the input sequence has elements)\n\n Returns\n -------\n DREXInstructionBuilder\n self\n \"\"\"\n if isinstance(hazard, list):\n hazard = np.array(hazard)\n\n if type(hazard) is np.ndarray:\n # Check shape\n if len(hazard.shape) != 1:\n raise ValueError(\"Shape of hazard is invalid! Expected one dimension: time.\")\n [hazard_times] = hazard.shape\n\n # Check correspondence to input sequence (if present)\n if len(hazard) > 1 and len(self._input_sequence) > 0:\n if len(self._input_sequence) != len(hazard_times):\n raise ValueError(\"hazard invalid! There must be either one or as much as \"\n \"len(input_sequence) elements.\")\n\n # Check value(s)\n if not ((hazard >= 0).all() and (hazard <= 1).all()):\n raise ValueError(\"hazard invalid! Value(s) must be within range of [0,1].\")\n\n self._hazard = hazard\n return self\n\n def obsnz(self, obsnz: Union[float, List[float]]) -> DREXInstructionBuilder: # TODO per feature separate value\n \"\"\"\n Set the observation noise\n\n Parameters\n ----------\n obsnz\n Single values used across features, or list of values (as much as the input sequence has features)\n\n Returns\n -------\n DREXInstructionBuilder\n self\n \"\"\"\n self._obsnz = obsnz\n return self\n\n def memory(self, memory: int) -> DREXInstructionBuilder:\n \"\"\"\n Set the memory parameter which limits the number of previous hypotheses to process at each time step.\n Parameters\n ----------\n memory\n positive int (greather than or equal 2) or np.inf\n Returns\n -------\n DREXInstructionBuilder\n self\n \"\"\"\n if not memory >= 2:\n raise ValueError(\"memory invalid! Value must be greater than or equal 2.\")\n\n self._memory = memory\n return self\n\n def maxhyp(self, maxhyp: int) -> DREXInstructionBuilder:\n \"\"\"\n Set the maxhyp parameter which limits the number of previous hypotheses to process at each time step.\n\n Parameters\n ----------\n maxhyp\n positive int or np.inf\n Returns\n -------\n DREXInstructionBuilder\n self\n \"\"\"\n self._maxhyp = maxhyp\n return self\n\n def change_decision_threshold(self, change_decision_threshold: float) -> DREXInstructionBuilder:\n \"\"\"\n Set the change decision threshold.\n\n Parameters\n ----------\n change_decision_threshold\n float in range [0,1]\n\n Returns\n -------\n DREXInstructionBuilder\n self\n \"\"\"\n if not change_decision_threshold >= 0 and change_decision_threshold <= 1:\n raise ValueError(\"change_decision_threshold invalid! Value must be in range of [0,1].\")\n self._change_decision_threshold = change_decision_threshold\n return self\n\n def predscale(self, predscale: float) -> DREXInstructionBuilder:\n \"\"\"\n Set the predscale value.\n\n Parameters\n ----------\n predscale\n float in range [0,1]\n\n Returns\n -------\n DREXInstructionBuilder\n self\n \"\"\"\n if not predscale > 0 and predscale <= 1:\n raise ValueError(\"predscale invalid! Value must be in range (0,1].\")\n self._predscale = float(predscale)\n return self\n\n def max_ncomp(self, max_ncomp: int) -> DREXInstructionBuilder:\n \"\"\"\n Set the max_ncomp value D-REX's handling of GMM priors.\n\n Parameters\n ----------\n max_ncomp\n Maximum number of components\n\n Returns\n -------\n DREXInstructionBuilder\n self\n \"\"\"\n if not (max_ncomp > 0 and isinstance(max_ncomp, int)):\n raise ValueError(\"max_ncomp invalid! Value must be positive and integer.\")\n\n self._max_ncomp = max_ncomp\n\n return self\n\n def beta(self, beta: float) -> DREXInstructionBuilder:\n \"\"\"\n Set the beta value for D-REX's handling of GMM priors.\n\n Parameters\n ----------\n beta\n Threshold for new GMM components.\n\n Returns\n -------\n DREXInstructionBuilder\n self\n \"\"\"\n if not (0 <= beta <= 1):\n raise ValueError(\"beta invalid! Value must be between 0 and 1.\")\n\n self._beta = beta\n\n return self\n\n def to_instructions_file(self) -> DREXInstructionsFile:\n return DREXInstructionsFile(self._input_sequence, self._prior,\n self._hazard, self._memory,\n self._maxhyp, self._obsnz,\n self._max_ncomp, self._beta,\n self._predscale, self._change_decision_threshold)\n\n\nclass DREXModel(Model):\n \"\"\"\n High-level interface for using D-REX.\n Using +instance+, one can hyper-parameterize D-REX.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n\n def run(self, instructions_file_path) -> DREXResultsFile:\n results_file_path = MatlabWorker.run_model(instructions_file_path)\n results_file = DREXResultsFile.load(results_file_path)\n return results_file\n\n @staticmethod\n def run_instructions_file_at_path(file_path: str) -> DREXResultsFile:\n return DREXModel().run(file_path)\n","repo_name":"lexngu/cmme","sub_path":"src/cmme/drex/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8431,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"}
+{"seq_id":"272079339","text":"import datetime\nfrom django.shortcuts import render, redirect\nfrom .models import ToDo\nfrom .forms import AddForm\n\n# Create your views here.\ndef home(request):\n to_dos = ToDo.objects.all()\n\n today = datetime.date.today()\n expired = to_dos.filter(due_date__lt=today).order_by('due_date')\n prompt = to_dos.filter(due_date__gte=today).\\\n filter(due_date__lte=today+datetime.timedelta(days=7)).order_by('due_date')\n normal = to_dos.filter(due_date__gt=today+datetime.timedelta(days=7)).order_by('due_date')\n\n context = {\n 'expired': expired,\n 'prompt': prompt,\n 'normal': normal,\n }\n return render(request, 'myApp/home.html', context)\n\ndef add(request):\n if request.method == 'POST':\n form = AddForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('home')\n else:\n form = AddForm()\n return render(request, 'myApp/add.html', {'form': form})\n\ndef delete(request, pk):\n target = ToDo.objects.get(id=pk)\n target.delete()\n return redirect('home')\n","repo_name":"Mapal-developer/TODO","sub_path":"myApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"70017667452","text":"import os\nimport numpy as np\n\n\ndef read_pcd(filepath):\n # read .pcd format data\n lidar = []\n with open(filepath,'r',) as f:\n line = f.readline().strip()\n\n while line:\n linestr = line.split(\" \")\n\n if len(linestr) == 9 and linestr[0] != '#':\n linestr_convert = list(map(float, linestr))\n\n lidar.append(linestr_convert[0:4])\n line = f.readline().strip()\n return np.array(lidar)\n\n\ndef convert(pcdfolder, binfolder):\n # convert .pcd format data to .bin format point cloud data\n current_path = os.getcwd()\n ori_path = os.path.join(current_path, pcdfolder)\n file_list = os.listdir(ori_path)\n des_path = os.path.join(current_path, binfolder)\n if os.path.exists(des_path):\n pass\n else:\n os.makedirs(des_path)\n for file in file_list: \n (filename,extension) = os.path.splitext(file)\n velodyne_file = os.path.join(ori_path, filename) + '.pcd'\n pl = read_pcd(velodyne_file)\n pl = pl.reshape(-1, 4).astype(np.float32)\n\n # remove '.'\n strls = filename.split(\".\")\n filename = strls[0] + strls[1]\n\n velodyne_file_new = os.path.join(des_path, filename) + '.bin'\n pl.tofile(velodyne_file_new)\n print(pl)\n \npcdfolder = '2022-05-04-11-20-11_converted'\nbinfolder = '2022-05-04-11-20-11_bin'\n\nconvert(pcdfolder, binfolder)\n\n\n","repo_name":"Saleh-Ibtasham/4dpls_stream","sub_path":"pcd2bin.py","file_name":"pcd2bin.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"39836903381","text":"\"\"\"\nThe performance monitoring data at some instant\n\"\"\"\n\n\nclass InstantaneousPMData:\n def __init__(self, power: float, ber: float, snr: float, dgd: float, qfactor: float, chromatic_dispersion: float, carrier_offset: float) -> None:\n \"\"\"\n power: the instantaneous power levels\n ber: the instantaneous bit error rate\n snr: the instantaneous signal to noise ratio\n dgd: the instantaneous differential group delay\n qfactor: the instantaneous quality-factor\n chromatic_dispersion: the instantaneous chromatic dispersion\n carrier_offset: the instantaneous carrier offeset\n \"\"\"\n self.power = power\n self.ber = ber\n self.snr = snr\n self.dgd = dgd\n self.qfactor = qfactor\n self.chromatic_dispersion = chromatic_dispersion\n self.carrier_offset = carrier_offset\n","repo_name":"yunusmohammed/adva-performance-monitoring","sub_path":"Models/InstantaneousPMData.py","file_name":"InstantaneousPMData.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"43124860725","text":"import os\nimport chess.pgn\nfrom PIL import Image, ImageDraw\nfrom time import perf_counter\n\npiece_colors = {True: \"w\", False: \"b\"}\n\npiece_names = {1: \"p\", 2: \"n\", 3: \"b\", 4: \"r\", 5: \"q\", 6: \"k\"}\ndef generate_gifs(pgn_file, output_folder, frame_duration=1000, last_frame_duration=5000):\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n\n board_image = Image.open(\"assets/board.png\").convert(\"RGBA\")\n\n piece_images = {}\n for color in [\"w\", \"b\"]:\n for piece_name in [\"k\", \"q\", \"r\", \"b\", \"n\", \"p\"]:\n piece_image = Image.open(f\"assets/{color}{piece_name}.png\").convert(\"RGBA\")\n piece_images[f\"{color}{piece_name}\"] = piece_image\n\n pgn = open(pgn_file)\n game_number = 1\n\n while True:\n start = perf_counter()\n game = chess.pgn.read_game(pgn)\n if game is None:\n break\n\n board = game.board()\n board_positions = [board.copy()]\n moves = list(game.mainline_moves())\n for move in moves:\n board.push(move)\n board_positions.append(board.copy())\n\n gif_frames = []\n for i, position in enumerate(board_positions):\n frame = board_image.copy()\n draw = ImageDraw.Draw(frame)\n for square, piece in position.piece_map().items():\n if piece.piece_type != chess.PieceType(0):\n piece_image = piece_images[\n f\"{piece_colors[piece.color]}{piece_names[piece.piece_type]}\"\n ]\n x = (square % 8) * 177\n y = (7 - square // 8) * 177\n frame.paste(piece_image, (x, y), piece_image)\n\n if i < len(moves):\n move = moves[i]\n source_square = move.from_square\n destination_square = move.to_square\n source_x = (source_square % 8) * 177\n source_y = (7 - source_square // 8) * 177\n dest_x = (destination_square % 8) * 177\n dest_y = (7 - destination_square // 8) * 177\n\n draw.rectangle(\n [(source_x, source_y), (source_x + 176, source_y + 176)],\n outline=\"red\",\n width=8,\n )\n draw.rectangle(\n [(dest_x, dest_y), (dest_x + 176, dest_y + 176)],\n outline=\"green\",\n width=8,\n )\n\n gif_frames.append(frame)\n\n if i == len(board_positions) - 1:\n gif_frames[-1].info[\"duration\"] = last_frame_duration\n else:\n gif_frames[-1].info[\"duration\"] = frame_duration\n\n output_file = os.path.join(output_folder, f\"game{game_number}.gif\")\n gif_frames[0].save(\n output_file,\n format=\"GIF\",\n append_images=gif_frames[1:],\n save_all=True,\n loop=0,\n )\n\n print(f\"GIF file {output_file} generated in {((perf_counter() - start) * 1000):.0f}ms.\")\n game_number += 1\n\n\n# Example\nif __name__ == \"__main__\":\n pgn_file = \"game.pgn\"\n output_folder = \"output\"\n generate_gifs(pgn_file, output_folder)\n","repo_name":"SuperOrca/chess-gif","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"29610930246","text":"from pathlib import Path\nfrom typing import Final\n\n\nMODE: Final[str] = \"dev\"\nPORT: Final[int] = 5000\nNUM_THREADS: Final[int] = 50\nDB_ENGINE: Final[str] = \"mysql\"\nDB_CHARSET: Final[str] = \"utf8mb4\"\nMAX_LANGUAGES_COUNT: Final[int] = 8\nMAX_LOGIN_SIZE: Final[int] = 32\nMAX_FULLNAME_SIZE: Final[int] = 256\nMAX_NAME_SIZE: Final[int] = 64\nMAX_PROJECT_NAME_SIZE: Final[int] = 512\nMAX_TAG_SIZE: Final[int] = 256\nMAX_USER_COMMENT_SIZE: Final[int] = 512\nMAX_QUESTION_COMMENT_SIZE: Final[int] = 512\nMAX_BLOCK_NAME_SIZE: Final[int] = 64\nMAX_QUESTION_TEXT_SIZE: Final[int] = 256\nMAX_ANSWER_BLOCK_NAME: Final[int] = 128\nMAX_ANSWER_OPTION_SIZE: Final[int] = 128\nMAX_SHORT_ANSWER_OPTION_SIZE: Final[int] = 32\nMAX_SHEET_TITLE_SIZE: Final[int] = 16\nMAX_ANSWER_SIZE: Final[int] = 2048\nMAX_TOPONYM_SIZE: Final[int] = 128\nMAX_SHORT_QUESTION_SIZE: Final[int] = 32\nMAX_TEXT_SIZE: Final[int] = 4096\nINT_MIN: Final[int] = -0xffffffffffffffff\nINT_MAX: Final[int] = -INT_MIN\nSOURCE_QUESTION_ID: Final[int] = -1\nANSWER_ROW_QUESTION_ID: Final[int] = -2\nDATE_FORMAT: Final[str] = \"%Y-%m-%d\"\nDATETIME_FORMAT: Final[str] = DATE_FORMAT + \" %H:%M:%S\"\nREQUEST_CONTEXT_USE_DELETED_ITEMS: Final[str] = \"use_deleted_items\"\nJWT_ACCESS_TOKEN_EXPIRES: Final[int] = 1\nJWT_REFRESH_EXPIRING_TIME: Final[int] = 30\nNAME_COLUMN_NAME = {\n 'ru': 'Имя/Название',\n 'en': 'Name'\n}\nTOPONYMS_TABLE_URL: Final[str] = \"https://simplemaps.com/static/data/country-cities/id/id.json\"\nTOPONYMS_REQUEST_TIMEOUT: Final[int] = 179\nDEFAULT_LANGUAGE: Final[str] = 'en'\nALL_LANGUAGES_TAG: Final[str] = 'all'\n\nMAX_LEADERS_PAGE_SIZE: int = 200\nMAX_PROJECTS_PAGE_SIZE: int = 200\nMAX_COMMENT_SIZE_IN_LEADERS_LIST: int = 10\nLOGS_DIRECTORY = Path(__file__).resolve().parent.parent / 'logs'\nUPLOADS_DIRECTORY = Path(__file__).resolve().parent.parent / 'uploads'\n","repo_name":"KennelTeam/sol-db-in","sub_path":"backend/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"}
+{"seq_id":"42593340926","text":"\r\n#Receive a sentence from the user\r\nsentence = input ((\"enter a sentenc:\"))\r\n\r\n#Print the sentence for each line separately:\r\ni = 0\r\nfor i in range(len(sentence)):\r\n if sentence[i]==\" \":\r\n print()\r\n else:\r\n print(sentence[i], end=\"\")\r\n","repo_name":"SarayMordechai/HW-Python-","sub_path":"HW1/words.py","file_name":"words.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"36216373259","text":"import asyncio\nimport logging\nfrom typing import Callable, Optional, Awaitable\n\nfrom aiohttp import web\nfrom aiohttp.abc import Request\n\nfrom fbmessenger.api import API\nfrom fbmessenger.models import Message\n\n\nclass Messenger(API):\n access_token: str\n verify_token: str\n callback: Callable[[Message], Awaitable[None]]\n log: logging.Logger = logging.getLogger(__name__)\n\n def __init__(self, access_token: str, verify_token: str, message_callback: Callable[[Message], Awaitable[None]],\n attachment_location: Optional[str] = None, public_attachment_url: Optional[str] = None):\n super().__init__(access_token, attachment_location, public_attachment_url)\n self.access_token = access_token\n self.verify_token = verify_token\n self.callback = message_callback\n\n def start_receiving(self, port=8080):\n app = web.Application()\n app.add_routes([web.post('/{tail:.*}', self._message_handler),\n web.get('/{tail:.*}', self._verification_handler)])\n web.run_app(app, port=port)\n\n async def _message_handler(self, request: Request):\n self.log.debug(f'Received request:\\n{await request.text()}')\n raw_message = await request.json()\n for event in raw_message['entry']:\n\n # Handle messaging events\n if 'messaging' in event:\n for m in event['messaging']:\n message = None\n\n if 'message' in m:\n if 'text' in m['message']:\n message = Message(m['sender']['id'], m['recipient']['id'], text=m['message']['text'])\n\n if 'quick_reply' in m['message']:\n message.payload = m['message']['quick_reply']['payload']\n\n if 'postback' in m:\n message = Message(m['sender']['id'], m['recipient']['id'],\n payload=m['postback']['payload'])\n\n if not message:\n self.log.debug(\"No content, skip message\")\n continue\n\n asyncio.ensure_future(self.callback(message))\n\n # Handle postback events\n\n return web.Response(text=\"\")\n\n async def _verification_handler(self, request: Request):\n self.log.debug(f'Received verification request with query {request.query_string}')\n\n if request.query.get('hub.verify_token') != self.verify_token:\n raise web.HTTPForbidden(reason=\"Verify token is invalid\")\n\n challenge = request.query.get('hub.challenge')\n if not challenge:\n raise web.HTTPBadRequest(reason='hub.challenge not set')\n\n return web.Response(text=challenge)\n","repo_name":"eknoes/simple-fbmessenger","sub_path":"fbmessenger/messenger.py","file_name":"messenger.py","file_ext":"py","file_size_in_byte":2757,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"}
+{"seq_id":"41153943522","text":"import tensorflow as tf\nfrom tensorflow import keras\nimport keras_cv\nimport numpy as np\nimport os\nimport sys\nimport copy\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Rectangle\n\nNAME_BACKBONE = \"yolo_v8_xs_backbone\"\nCONFIDENCE = 0.7\n\nimages_test = np.load(\"matrices_test.npy\")\n\nprint(\"Nb images : \" + str(len(images_test)))\n\nlabels_test = {\n \"boxes\": np.load(\"labels_test.npy\"),\n \"classes\": np.load(\"classes_test.npy\")\n}\n\nmodel = keras_cv.models.YOLOV8Detector(\n num_classes=2,\n bounding_box_format=\"center_xywh\",\n backbone=keras_cv.models.YOLOV8Backbone.from_preset(\n NAME_BACKBONE\n ),\n fpn_depth=2\n)\n\nif not os.path.isfile(NAME_BACKBONE+\".h5\"):\n sys.exit(1)\n\nmodel.load_weights(NAME_BACKBONE+\".h5\", skip_mismatch=False, by_name=False, options=None)\n\n# Get predictions using the model\nresults = model.predict(images_test)\n\nfor i in range(len(results[\"boxes\"].to_tensor())):\n plt.imshow(images_test[i])\n\n for j in range(len(results[\"boxes\"][i])):\n if results[\"classes\"][i][j] == 1 and results[\"confidence\"][i][j] >= CONFIDENCE:\n x = results[\"boxes\"][i][j].numpy()[0]-int(round((results[\"boxes\"][i][j].numpy()[2])/2))\n y = results[\"boxes\"][i][j].numpy()[1]-int(round((results[\"boxes\"][i][j].numpy()[3])/2))\n w = results[\"boxes\"][i][j].numpy()[2]\n h = results[\"boxes\"][i][j].numpy()[3]\n plt.gca().add_patch(Rectangle((x, y), w, h, edgecolor=\"#7736e3\", facecolor=\"none\", lw=3))\n\n for k in range(len(labels_test[\"boxes\"][i])):\n if labels_test[\"boxes\"][i][k][0] < 50:\n x = labels_test[\"boxes\"][i][k][0]-int(round(labels_test[\"boxes\"][i][k][2]/2))\n y = labels_test[\"boxes\"][i][k][1]-int(round(labels_test[\"boxes\"][i][k][3]/2))\n w = labels_test[\"boxes\"][i][k][2]\n h = labels_test[\"boxes\"][i][k][3]\n plt.gca().add_patch(Rectangle((x, y), w, h, edgecolor=\"red\", facecolor=\"none\", lw=3))\n\n plt.show()\n\n# print(\"Boxes : \", labels_test[\"boxes\"][:1])\n# print(\"Classes : \", labels_test[\"classes\"][:1])","repo_name":"MagiPrince/keras-cv-yolov8-quantized","sub_path":"display_gt_and_pred.py","file_name":"display_gt_and_pred.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"}
+{"seq_id":"73873597690","text":"# Deep Deterministic Policy Gradient\n# following paper: Continuous control with deep reinforcement learning\n# (https://arxiv.org/pdf/1509.02971.pdf)\n#\n# ---\n# @author Yiren Lu\n# @email luyiren [at] seas [dot] upenn [dot] edu\n#\n# MIT License\n\nimport tensorflow as tf\nimport tf_utils\n\n\n\nclass ActorNetwork(object):\n\n\n def __init__(self, state_size, action_size, lr, n_h1=400, n_h2=300, tau=0.001):\n self.state_size = state_size\n self.action_size = action_size\n self.optimizer = tf.train.AdamOptimizer(lr)\n self.tau = tau\n\n self.n_h1 = n_h1\n self.n_h2 = n_h2\n\n self.input_s, self.actor_variables, self.action_values = self._build_network(\"actor\")\n self.input_s_target, self.actor_variables_target, self.action_values_target = self._build_network(\"actor_target\")\n\n self.action_gradients = tf.placeholder(tf.float32, [None, self.action_size])\n self.actor_gradients = tf.gradients(self.action_values, self.actor_variables, -self.action_gradients)\n self.update_target_op = [self.actor_variables_target[i].assign(tf.multiply(self.actor_variables[i], self.tau) + tf.multiply(self.actor_variables_target[i], 1 - self.tau))\n for i in range(len(self.actor_variables))]\n self.optimize = self.optimizer.apply_gradients(zip(self.actor_gradients, self.actor_variables))\n\n\n def _build_network(self, name):\n input_s = tf.placeholder(tf.float32, [None, self.state_size])\n with tf.variable_scope(name):\n layer_1 = tf_utils.fc(input_s, self.n_h1, scope=\"fc1\", activation_fn=tf.nn.relu,\n initializer=tf.contrib.layers.variance_scaling_initializer(mode=\"FAN_IN\"))\n layer_2 = tf_utils.fc(layer_1, self.n_h2, scope=\"fc2\", activation_fn=tf.nn.relu,\n initializer=tf.contrib.layers.variance_scaling_initializer(mode=\"FAN_IN\"))\n action_values = tf_utils.fc(layer_2, self.action_size, scope=\"out\", activation_fn=tf.nn.sigmoid,\n initializer=tf.random_uniform_initializer(-3e-3, 3e-3))\n actor_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=name)\n return input_s, actor_variables, action_values\n\n\n def get_action(self, state, sess):\n return sess.run(self.action_values, feed_dict={self.input_s: state})\n\n\n def get_action_target(self, state, sess):\n return sess.run(self.action_values_target, feed_dict={self.input_s_target: state})\n\n\n def train(self, state, action_gradients, sess):\n sess.run(self.optimize, feed_dict={\n self.input_s: state,\n self.action_gradients: action_gradients\n })\n\n\n def update_target(self, sess):\n sess.run(self.update_target_op)\n","repo_name":"yjkim721/interference-pattern-ddpg","sub_path":"actor.py","file_name":"actor.py","file_ext":"py","file_size_in_byte":2614,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"}
+{"seq_id":"43639051601","text":"from base import constant, common_tools, db_tools\n\nfrom zhkt import zhkt_tools\nfrom zhkt.appview import kt_student_view as kt_student_view\n\n# 全局业务表表名\ntable_name = 'zhkt_stu_act_result'\n\n\ndef find_list(param_strs={}, sel_head=\" select t.* \", camelize=False, my_default_sidx=None):\n sql_from_where = init_from_where_sql(param_strs)\n sql = sel_head + sql_from_where\n if my_default_sidx:\n sql += ' order by ' + my_default_sidx\n return db_tools.find_dict_list_by_sql(sql, camelize)\n\n\n# 找到指定题目的互动数据\ndef one_ques_rs_list(mainId, actQuesId):\n param_strs = {'mainId': mainId, 'actQuesId': actQuesId}\n sel_head = \" select t.*, s.student_name, s.student_gender \"\n return find_list(param_strs, sel_head, False, ' t.create_time_stamp ')\n\n\n# 查询某个学生的互动结果\ndef one_stu_ques_rs(mainId, actQuesId, studentId):\n param_strs = {'mainId': mainId, 'actQuesId': actQuesId, 'studentId': studentId}\n base_list = find_list(param_strs)\n return base_list[0] if base_list and len(base_list) else None\n\n\n# 整合查询用户的原生sql\ndef init_from_where_sql(params):\n mainId = params['mainId'] if 'mainId' in params else ''\n actQuesId = params['actQuesId'] if 'actQuesId' in params else ''\n isHandUp = params['isHandUp'] if 'isHandUp' in params else ''\n isReplyAnswer = params['isReplyAnswer'] if 'isReplyAnswer' in params else ''\n studentId = params['studentId'] if 'studentId' in params else ''\n sql = \"\"\" from {} t\n left join zhkt_student s on s.student_id = t.student_id and s.main_id = t.main_id\n where 1 = 1 \"\"\".format(table_name)\n if isHandUp:\n sql += \" and t.is_hand_up = '\" + isHandUp + \"' \"\n if isReplyAnswer:\n sql += \" and t.is_reply_answer = '\" + isReplyAnswer + \"' \"\n if mainId:\n sql += \" and t.main_id = '\" + mainId + \"' \"\n if actQuesId:\n sql += \" and t.act_ques_id = '\" + actQuesId + \"' \"\n if studentId:\n sql += \" and t.student_id = '\" + studentId + \"' \"\n return sql\n\n\n# 互动题目统计 - 投票/举手回答 (单选/判断题有效)\ndef init_act_ques_anls(mainId, actQuesId):\n optAnlsMap = {} # 各选项关键字-整体对象的对应map\n act_ques = zhkt_tools.act_ques_by_id(mainId, actQuesId) # 判断题型\n quesAnswer = act_ques['answer'] # 互动题目答案\n if constant.QUES_TYPE_JUDGE == act_ques['ques_type']: # 判断题\n # 放入正确对应的选项\n optAnlsMap['Y'] = {'optionKey': 'Y', 'optionNo': '对', 'isAnswer': 'Y' if 'Y' == quesAnswer else 'N', 'chooseNum': 0}\n # 放入错误对应的选项\n optAnlsMap['N'] = {'optionKey': 'N', 'optionNo': '错', 'isAnswer': 'Y' if 'N' == quesAnswer else 'N', 'chooseNum': 0}\n if quesAnswer:\n quesAnswer = '对' if 'Y' == quesAnswer else '错'\n else:\n quesAnswer = ''\n elif constant.QUES_TYPE_RADIO == act_ques['ques_type']: # 单选题\n baseOptList = act_ques['opt_list']\n for row in baseOptList:\n optAnlsMap[row['option_no']] = {'optionKey': row['option_no'], 'optionNo': row['option_no'], 'isAnswer': row['is_answer'], 'chooseNum': 0}\n elif constant.QUES_TYPE_MULTIPLE == act_ques['ques_type']: # 多选题\n quesAnswer = quesAnswer.replace(',', '') if quesAnswer else '' # 多选题, 答案移除逗号\n\n act_rs_list = one_ques_rs_list(mainId, actQuesId)\n allStuNum = zhkt_tools.init_main_by_id(mainId).stu_num # 参与上课的学生数量\n allStuNum = allStuNum if allStuNum else 0\n isReplyNum = 0 # 已回答人数\n handUpNum = 0 # 举手人数\n rightNum = 0 # 回答正确人数\n actNum = 0 # 参与人数\n replyStuList = [] # 真正回答问题的学生\n if act_rs_list and len(act_rs_list):\n for temp in act_rs_list:\n actNum += 1 # 参与人数累计\n if 'Y' == temp['is_hand_up']:\n handUpNum += 1 # 举手人数\n if 'Y' == temp['is_reply_answer']:\n isReplyNum += 1 # 真正回答有效的人数\n replyStuList.append({'studentId': temp['student_id'], 'studentName': temp['student_name'], 'studentGender': temp['student_gender'],\n 'rightResult': temp['right_result'], 'result': temp['result']})\n if 'Y' == temp['right_result']:\n rightNum += 1 # 正确人数累计\n answer = temp['answer']\n if answer:\n answer = answer.replace(\",\", \"\")\n optAnls = optAnlsMap[answer]\n if not optAnls:\n optAnls = {'optionKey': answer, 'optionNo': answer, 'isAnswer': 'Y' if answer == quesAnswer else 'N', 'chooseNum': 0}\n optAnls['chooseNum'] = optAnls['chooseNum'] + 1 # 累计选择人数\n # 统计正确率(整体)\n rightPercent = round(rightNum * 100.0 / isReplyNum, 2) if isReplyNum > 0 else 0.0\n # 统计参与率\n actPercent = round(actNum * 100.0 / allStuNum, 2) if allStuNum > 0 else 0.0\n # 整合选项统计\n optAnlsList = []\n for (k, v) in optAnlsMap.items():\n v['choosePercent'] = 0.0 if isReplyNum <= 0 else round(v['chooseNum'] * 100.0 / isReplyNum, 2) # 计算各个项选择比例\n optAnlsList.append(v)\n # 最终的返回结果\n return {\n 'allStuNum': allStuNum,\n 'isReplyNum': isReplyNum,\n 'handUpNum': handUpNum,\n 'rightNum': rightNum,\n 'rightPercent': rightPercent,\n 'actNum': actNum,\n 'actPercent': actPercent,\n 'quesAnswer': quesAnswer,\n 'optAnlsList': optAnlsList,\n 'replyStuList': replyStuList,\n }\n\n\n# 保存随机提问时, 所选的随机学生 - 同时通知画屏\ndef save_act_random_stus(mainId, actQuesId, studentIds):\n # 构造随机回答学生结果\n actStuList = []\n for studentId in studentIds.split(','):\n actStuList.append({\n 'main_id': mainId,\n 'act_ques_id': actQuesId,\n 'student_id': studentId,\n 'bonus': 1, # 随机提问到的学生+1积分\n 'is_hand_up': 'N', # 随机提问不为举手\n 'create_time': common_tools.now(), # 当前回答时间\n })\n db_tools.ins_batch_to_db(table_name, actStuList, True)\n # 更新课堂学生表的积分/得分情况\n addBonus, addActNum = 1, 1\n kt_student_view.stus_add_bonus_num(mainId=mainId, studentIds=studentIds, bonus=addBonus, actNum=addActNum)\n # todo 给画屏推送参与随机提问的人\n\n\n# 保护课堂互动时 - 学生答题结果 (教师选择对错)\ndef save_act_stu_result(mainId, actQuesId, studentId, rightResult, result):\n createTime = common_tools.now() # 当前时间\n addBonus = 0 # 本次回答后, 学生增加的积分\n if \"Y\" == rightResult:\n addBonus = 1 # 回答正确加1分\n elif result and int(result) >= 3:\n addBonus = 1 # 评价超过3星, 加1分\n addActNum = 0 # 增加的互动次数\n actStu = one_stu_ques_rs(mainId=mainId, actQuesId=actQuesId, studentId=studentId)\n if actStu:\n if 'create_time' not in actStu or not actStu['create_time']:\n actStu['create_time'] = createTime\n bonus = actStu['bonus']\n actStu['bonus'] = (bonus if bonus else 0) + addBonus # 最终本次积分\n actStu['right_result'] = rightResult\n actStu['result'] = result\n actStu['is_reply_answer'] = \"Y\" # 回答问题标记\n db_tools.upd_dict_to_db(table_name, actStu)\n else:\n addActNum = 1\n actStu = {\n 'main_id': mainId,\n 'act_ques_id': actQuesId,\n 'student_id': studentId,\n 'create_time': createTime,\n 'is_reply_answer': 'Y', # 回答问题标记\n 'result': result,\n 'right_result': rightResult,\n 'bonus': addBonus\n }\n db_tools.ins_dict_to_db(table_name, actStu)\n # 更新学生表积分\n if addBonus != 0 or addActNum != 0:\n kt_student_view.stus_add_bonus_num(mainId=mainId, studentIds=studentId, bonus=addBonus, actNum=addActNum)\n","repo_name":"lovederh/magicbox","sub_path":"zhkt/appview/stu_act_result_view.py","file_name":"stu_act_result_view.py","file_ext":"py","file_size_in_byte":8113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"25131031077","text":"#!/usr/bin/env python\n\n\"\"\"\nTests for `VSPEC.psg_api` module\n\"\"\"\nfrom pathlib import Path\nimport pytest\nfrom astropy import units as u\nimport matplotlib.pyplot as plt\n\nfrom VSPEC.psg_api import call_api,call_api_from_file, PSGrad, get_reflected, parse_full_output\nfrom VSPEC.helpers import is_port_in_use,set_psg_state\n\nAPI_KEY_PATH = Path.home() / 'psg_key.txt'\nPSG_CONFIG_PATH = Path(__file__).parent / 'data' / 'test_cfg.txt'\nVSPEC_CONFIG_PATH = Path(__file__).parent / 'default.yaml'\nPSG_PORT = 3000\n\nRAD_PATH = Path(__file__).parent / 'data' / 'transit_reflected'\n\n\n\n\n@pytest.mark.skipif(not API_KEY_PATH.exists(),reason='This test requires an API key')\ndef test_call_api_nonlocal():\n \"\"\"\n Run tests for `VSPEC.psg_api.call_api()`\n This test expects that you have a file at\n `~/` called `psg_key.txt` that contains your own\n API key. Otherwise it is skipped.\n \"\"\"\n psg_url = 'https://psg.gsfc.nasa.gov'\n with open(API_KEY_PATH,'r',encoding='UTF-8') as file:\n api_key = file.read()\n data = '
'),' '), # remove closing paragraph tag\n (re.compile(r' '),'\\n\\n'), # convert breaks to newlines.\n (re.compile(r' '),'\\n\\n'), # convert breaks to newlines.\n (re.compile(r'<[^>]*>'),' ')] # remove any remaining markup\n \n kdescriptionrewrites = khtmrewrites + kescaperemovals\n\n #\n # Called from outside the thread\n #\n def __init__(self, url, logger) :\n feedmanager.Feed.__init__(self, \"NEWS\", logger)\n self.setfeedurl(url) # set feed URL\n self.expirationsecs = 60*60*24*2 # expire after not seen for 2 days\n self.maxage = 60*60*24*NEWSMAXAGEDAYS # don't show items older than this\n ####print(feedparser._HTMLSanitizer.acceptable_elements) # ***TEMP***\n feedparser._HTMLSanitizer.acceptable_elements = self.acceptable_elements\n ####print(feedparser._HTMLSanitizer.acceptable_elements) # ***TEMP***\n ####self.expirationsecs = 60 # ***TEMP DEBUG***\n\n def setfeedurl(self, url) : # set new feed URL\n self.url = url # save URL\n self.hdrtitle = None # no header title yet\n ####self.hdrdate = None # no header date yet\n self.etag = None # no feed sequence id yet\n self.modified = None # no last-modified timestamp yet\n self.itemqueued = {} # item has been queued for printing\n self.markingallasread = True # marking all stories as read.\n\n def markallasread(self) : # mark all stories as read\n try: \n while True : # drain\n self.inqueue.get_nowait() # get input, if any\n except queue.Empty: # when empty\n pass # done\n self.logger.info(\"News feed queue emptied.\")\n self.markingallasread = True # mark all as read for one cycle \n\n def unmarkallasread(self) : # clear items already read\n try: \n while True : # drain\n self.inqueue.get_nowait() # get input, if any\n except queue.Empty: # when empty\n pass # done\n self.logger.info(\"News feed queue restarted.\") # restarting from beginning\n self.markingallasread = False # do not mark all as read\n self.itemqueued = {} # no item has been queued for printing\n self.modified = None # no last-modified date\n self.etag = None # no previous RSS read\n self.forcepoll() # force an immediate poll\n\n def gettitle(self) : # get feed title \n if self.hdrtitle :\n return(self.hdrtitle)\n else:\n return(self.url) # use URL if unable to read\n\n def getpollinterval(self) : # how often to poll\n return(KPOLLINTERVAL)\n\n def itemdone(self, item) : # done with this item - item printed\n pass # we don't keep persistent state of news printed\n\n def formattext(self, msgitem) : # format a msg item, long form\n emsg = msgitem.errmsg\n date_string = \"%s, %s\" % (msgitem.msgdate, msgitem.msgtime) # formatted time\n # Format for printing as display message\n if emsg : # short format for errors\n s = \"%s: %s\\n\" % (date_string, emsg)\n return(s) # return with error msg\n # Long form display\n s = msgitem.subject + '\\n(' + date_string + ')\\n' + msgitem.body + '\\n\\n' # Add CR at end\n return(s) # no error\n\n def summarytext(self, msgitem) :\n emsg = msgitem.errmsg\n # Format for printing as short message\n if emsg : # short format for errors\n s = \"%s: %s\\n\" % (msgitem.msgtime, emsg)\n return(s) # return with error msg\n date_string = \"%s, %s\" % (msgitem.msgdate, msgitem.msgtime) # formatted time\n fmt = \"FROM %s TIME %s: %s\"\n s = fmt % (msgitem.msgfrom, date_string, msgitem.body[:40])\n return(s) # no error\n \n #\n # Called from within the thread\n # \n def fetchitems(self) : \n \"\"\"\n Fetch more items from feed source.\n \"\"\"\n try : # try fetching\n now = time.time() # timestamp\n d = feedparser.parse(self.url,etag=self.etag,modified=self.modified) # fetch from URL\n if d is None or not hasattr(d,\"status\") : # if network failure\n raise IOError(\"of network or news source failure\")\n if d.status == 304 : # if no new items\n self.logger.debug(\"Feed polled, no changes.\")\n return # nothing to do\n self.logger.debug(\"Read feed: %d entries, status %s\" % (len(d.entries), d.status))\n if d.status != 200 : # if bad status\n raise IOError(\"of connection error No. %d\" % (d.status,))\n # Get fields from feed. \n if not \"title\" in d.feed : # if no title\n msg = self.handleunrecognizedfeed(self.url) # Is this some non-RSS thing?\n raise IOError(msg) # handle error\n self.hdrtitle = d.feed.title # feed title\n hdrdescription = d.feed.description # feed description\n oldetag = self.etag # save old etag for diagnosis\n oldmodified = self.modified # save old timestamp for diagnosis\n if hasattr(d,\"etag\") : # if feed has etag indicating sequence \n self.etag = d.etag # save position in feed for next time\n else : # no etag, must re-read whole feed every time\n etag = None\n self.modified = getattr(d,\"modified\",None) # save last update timestamp, if any, for next time\n hdrdate = \"\" #### d.feed.date # date as string\n # Process all entries in feed just read.\n # Ignore items that were previously seen\n for entry in d.entries : # get items from feed\n msgitem = self.doentry(entry, now) # do this entry\n if msgitem : # if new item to print\n self.inqueue.put(msgitem) # save this item\n self.markingallasread = False # if marking all as read, stop doing that.\n # Purge stories not seen in a while.\n self.purgeolditems(now-self.expirationsecs, self.itemqueued) # purge old previousy read stories when expired\n\n except (IOError, AttributeError) as message : # if trouble\n self.logger.exception(message) # debug\n errmsg = 'No \"%s\" news because %s.' % (self.gettitle(), str(message))\n self.logerror(errmsg) # log\n\n def purgeolditems(self,expirationtime,dict) : # purge old items already seen and printed\n # We have to do this the hard way, because stories can appear in the feed, be preempted\n # by higher priority stories, and reappear later.\n expired = [] # expired items\n for elt in dict : # for item in dictionary\n if dict[elt] < expirationtime : # if expired\n expired.append(elt) # note expired\n for elt in expired : # for all expired items\n del(dict[elt]) # delete from dict\n self.logger.debug(\"Expired: %s\" % (elt,)) # debug\n\n def doentry(self,entry, now) : # do one feed entry\n title = self.cleandescription(entry.title) # title of entry\n id = getattr(entry,\"id\", None) # ID of entry\n description = entry.description # description of entry\n # Clean up news item. Should do this via feedparser utilities.\n description = self.cleandescription(entry.description)\n # Check for title just being the beginning of the description\n if textsubset(title, description) : # if title is just beginning of description\n title = \"\" # drop title\n try : # feedparser >= 5.1.1\n date = entry.published # publication date of entry\n dateparsed = entry.published_parsed # date parsed\n except AttributeError: # older feedparser\n date = entry.date # feedparser < 5.1.1\n dateparsed = entry.date_parsed\n # convert to local time. Feedparser times are UT\n timestamp = calendar.timegm(dateparsed) # get timestamp value\n ageinsecs = time.time() - timestamp # age of item in seconds\n if ageinsecs > self.maxage : # if too old\n self.logger.debug(\"Very old feed item date: %s - dropped\" % (repr(date)))\n return(None)\n dateparsed = datetime.datetime.fromtimestamp(timestamp)\n assert(isinstance(dateparsed, datetime.datetime))\n msgitem = feedmanager.FeedItem(self, self.gettitle(), \n msgutils.editdate(dateparsed), \n msgutils.edittime(dateparsed), \n title, description)\n # Have we read this item already? Check for duplicates.\n # If either the ID or the text is duplicated, it's a duplicate.\n # Sometimes IDs change when the text does not, because of server-side problems.\n seen = msgitem.digest in self.itemqueued # true if already seen\n if self.markingallasread : # if marking all as read\n seen = True # pretend we've seen this story\n self.itemqueued[msgitem.digest] = now # keep keys of stories read\n logtext = \"NO TITLE\" # text for logging only\n if title : # use title\n logtext = title[:40].encode('ascii','replace')\n elif description : # or description\n logtext = description[:40].encode('ascii','replace')\n if seen : # if already seen\n self.logger.debug(\"Old feed item: (%s) %s\" % (id, logtext)) # Note news item\n ####self.logger.debug(\"Old feed item date: %s %s\" % (repr(date), repr(dateparsed))) # ***TEMP***\n return(None)\n # New news item, prepare for display\n self.logger.debug(\"New feed item: (%s) %s\" % (id,logtext)) # Note news item\n ####self.logger.debug(\"New feed item date: %s %s\" % (repr(date), repr(dateparsed))) # ***TEMP***\n return(msgitem) # build and return new item\n\n def cleandescription(self, s) : # clean up description (item body) for printing\n if s is None :\n return(s) # handle no description case\n # Clean up news item. Should do this via feedparser utilities.\n ####print(\"Before clean:\\n\" + s.encode('ascii','replace')) # ***TEMP***\n for (pattern, rep) in self.kdescriptionrewrites : # apply all rewrite rules\n s = pattern.sub(rep, s) # in sequence\n ####print(\"After clean:\\n\" + s.encode('ascii','replace')) # ***TEMP***\n return(s.strip()) # remove any lead/trail white space \n\n def calcdigest(self, item) : \n \"\"\"\n Calculate message digest for uniqueness check\n Version for news feeds only. Only looks at source, title and body.\n Some news sources (esp. Reuters) will resend the same message with a new timestamp. \n \"\"\"\n m = hashlib.md5() # begin a hash of the fields present\n m.update(repr(item.msgfrom).encode(\"utf8\")) # source\n m.update(repr(item.subject).encode(\"utf8\")) # subject\n m.update(repr(item.body).encode(\"utf8\")) # body of msg\n item.digest = m.hexdigest() # get message digest as hex string, to check if seen before\n \n \n \n\n\n \n","repo_name":"John-Nagle/baudotrss","sub_path":"messager/newsfeed.py","file_name":"newsfeed.py","file_ext":"py","file_size_in_byte":18154,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"78"}
+{"seq_id":"10567043029","text":"\"\"\"\n Evaluation Functions in Collaborative Filter \n\"\"\"\nimport random as rd\nimport numpy as np\nimport pandas as pd\nimport time\nfrom scipy import sparse\nfrom functools import wraps\n\n\n# record time\ndef fn_timer(function):\n\t@wraps(function)\n\tdef function_timer(*args, **kwargs):\n\t\tstart = time.clock()\n\t\tresult = function(*args, **kwargs)\n\t\tend = time.clock()\n\t\tprint('%s use time: %s'%(function.__name__, str(end-start)))\n\t\treturn result\n\treturn function_timer\n\n\n@fn_timer\ndef file_read(para_name, para_splitprecent=0.9):\n \"\"\"\n Read rating matrix, and split the file into trian set and test set. \n Return rating matrix and test data.\n :param para_name: file name\n :param para_splitprecent: the percent of train set and test set, default 0.9\n :return: rating matrix, test data, dict based on user and dict based on item\n user_dict = {\n 0: [(0, 5.0), (1, 2.0), ... ],\n 1: [(0, 1.0), (1, 0.0), ... ],\n ...\n }\n ===> user_id: [(item_id, rating)...]\n item_dict = { \n 0: [(0, 5.0), (1, 2.0), ... ],\n 1: [(0, 1.0), (1, 0.0), ... ],\n ...\n }\n ===> item_id: [(user_id, rating)...]\n \"\"\"\n test_data = []\n rate = pd.read_csv(para_name)\n del rate[\"timestamp\"]\n user_num = max(rate['userId'])\n item_num = max(rate['movieId'])\n rate = rate.values\n rate_m = sparse.dok_matrix((user_num, item_num))\n user_dict = {}\n item_dict = {}\n for vec in rate:\n if rd.random() > para_splitprecent: # test data\n test_data.append([int(vec[0]-1), int(vec[1]-1), vec[2]]) \n else: # train data\n rate_m[int(vec[0]-1), int(vec[1]-1)] = vec[2] # array start from 0 \n try:\n user_dict[int(vec[0]-1)].append((int(vec[1]-1), vec[2]))\n except:\n user_dict[int(vec[0]-1)] = []\n user_dict[int(vec[0]-1)].append((int(vec[1]-1), vec[2]))\n try:\n item_dict[int(vec[1]-1)].append((int(vec[0]-1), vec[2]))\n except:\n item_dict[int(vec[1]-1)] = []\n item_dict[int(vec[1]-1)].append((int(vec[0]-1), vec[2]))\n return rate_m, test_data, user_dict, item_dict\n \n\n@fn_timer\ndef file_read_2(para_name, para_splitprecent=0.9, para_max_user=100000, para_max_item=200000):\n \"\"\"\n When file is to large to load in memory, use this function.\n Read rating matrix, and split the file into trian set and test set. \n Return rating matrix and test data.\n :param para_name: file name\n :param para_splitprecent: the percent of train set and test set, default 0.9\n :return: rating matrix and test data\n user_dict = {\n 0: [(0, 5.0), (1, 2.0), ... ],\n 1: [(0, 1.0), (1, 0.0), ... ],\n ...\n }\n ===> user_id: [(item_id, rating)...]\n item_dict = { \n 0: [(0, 5.0), (1, 2.0), ... ],\n 1: [(0, 1.0), (1, 0.0), ... ],\n ...\n }\n ===> item_id: [(user_id, rating)...]\n \"\"\"\n test_data = []\n rate_m = sparse.dok_matrix((para_max_user, para_max_item))\n with open(para_name) as f:\n f.readline()\n while True:\n tmp = f.readline().split(',')\n if len(tmp) < 2:\n break\n vec = [int(tmp[0]), int(tmp[1]), float(tmp[2])] \n if rd.random() > para_splitprecent: # test data\n test_data.append([int(vec[0]-1), int(vec[1]-1), vec[2]]) \n else: # train data\n rate_m[int(vec[0]-1), int(vec[1]-1)] = vec[2] # array start from 0\n if user_dict[int(vec[0]-1)] == None:\n user_dict[int(vec[0]-1)] = []\n if item_dict[int(vec[1]-1)] == None:\n item_dict[int(vec[1]-1)] = []\n user_dict[int(vec[0]-1)].append((int(vec[1]-1), vec[2]))\n item_dict[int(vec[1]-1)].append((int(vec[0]-1), vec[2]))\n return rate_m, test_data, user_dict, item_dict\n\n\n@fn_timer\ndef get_dict_user(para_m):\n \"\"\"\n Return a dict based on user.\n Record rating information of one user.\n user = {\n 0: [(0, 5.0), (1, 2.0), ... ],\n 1: [(0, 1.0), (1, 0.0), ... ],\n ...\n }\n user_id: [(item_id, rating)...]\n :param para_m: rating matrix\n :return: a dict \n \"\"\"\n num_user, num_item = para_m.shape\n user = {}\n for i in range(num_user):\n user[i] = []\n for j in range(num_item):\n if para_m[i,j] > 0:\n user[i].append((j, para_m[i,j]))\n return user\n\n\n@fn_timer\ndef get_dict_item(para_m):\n \"\"\"\n Return a dict based on item.\n Record rating information of one item.\n item = { \n 0: [(0, 5.0), (1, 2.0), ... ],\n 1: [(0, 1.0), (1, 0.0), ... ],\n ...\n }\n item_id: [(user_id, rating)...]\n :param para_m: rating matrix\n :return: a dict \n \"\"\"\n num_user, num_item = para_m.shape\n item = {}\n for j in range(num_item):\n item[j] = []\n for i in range(num_user):\n if para_m[i,j] > 0:\n item[j] = (i, para_m[i,j])\n return item\n\n\n@fn_timer\ndef get_dict_i_u(para_m):\n \"\"\"\n Return a dict based on item and dict.\n Record rating information of one item.\n item = { \n 0: [(0, 5.0), (1, 2.0), ... ],\n 1: [(0, 1.0), (1, 0.0), ... ],\n ...\n }\n item_id: [(user_id, rating)...]\n user = {\n 0: [(0, 5.0), (1, 2.0), ... ],\n 1: [(0, 1.0), (1, 0.0), ... ],\n ...\n }\n user_id: [(item_id, rating)...] \n :param para_m: rating matrix\n :return: dict of item and user \n \"\"\" \n num_user, num_item = para_m.shape\n item = {}\n user = {}\n for j in range(num_item):\n for i in range(num_user):\n if para_m[i,j] > 0:\n if j not in item.keys():\n item[j] = []\n if i not in user.keys():\n user[i] = []\n item[j].append((i, para_m[i,j]))\n user[i].append((j, para_m[i,j])) \n return item, user\n\n\n# @fn_timer\ndef loss_rmse(para_hat, para_true, skip=0):\n \"\"\"\n The RMSE loss.\n The format of input vector:\n user_id, item_id, rate\n :param para_hat: estimated value\n :param para_true: true value\n :return: the rmse loss\n \"\"\"\n loss = 0\n n = len(para_hat)\n for ii in range(n):\n loss += pow(para_hat[ii][2] - para_true[ii][2], 2)\n return loss/(n-skip)\n\n\n# @fn_timer\ndef loss_rmae(para_hat, para_true, skip=0):\n \"\"\"\n The RMSE loss.\n The format of input vector:\n user_id, item_id, rate\n :param para_hat: estimated value\n :param para_true: true value\n :return: the rmse loss\n \"\"\"\n loss = 0\n n = len(para_hat)\n for ii in range(n):\n loss += abs(para_hat[ii][2] - para_true[ii][2])\n return loss/(n-skip)\n","repo_name":"YuyangZhangFTD/MyLab","sub_path":"RecLab/RecTool.py","file_name":"RecTool.py","file_ext":"py","file_size_in_byte":7329,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"}
+{"seq_id":"8170648394","text":"from django.shortcuts import render, redirect\nfrom django.core.files.uploadhandler import InMemoryUploadedFile\nfrom .models import History\nfrom io import BytesIO\nfrom pathlib import Path\nfrom PIL import Image\nimport os, sys\nimport torch\nimport AdaIN\nimport random\nfrom django.conf import settings\n\ndef setseq(request):\n history_list = History.objects.all().order_by('id')\n seq = 1\n\n for history in history_list:\n\n if history.id != seq:\n History.objects.filter(id=history.id).update(id=seq)\n \n seq += 1\n\n return redirect('history')\n\ndef initseq(Model):\n num = Model.objects.count()\n\n if num == 0:\n seq = 1\n else:\n model = Model.objects.last()\n seq = model.id + 1\n \n return seq\n\n# Create your views here.\ndef main_view(request):\n return render(request, 'index.html')\n\n\ndef transfer_view(request):\n return render(request, 'transfer.html')\n\n\ndef result_view(request):\n \n try:\n preserve_color = request.POST['color_checkbox']\n except:\n preserve_color = '0'\n try:\n is_nature = request.POST['image_type_checkbox']\n except:\n is_nature = '0'\n\n alpha = float(request.POST['weight'])\n\n PATH = 'HM/model_made/'\n vgg_path = PATH + 'vgg_normalised.pth'\n decoder_model_nature_path = PATH + 'nature_7_pattern_30.tar'\n decoder_model_pattern_path = PATH + 'pattern_7_pattern_30.tar'\n\n \n # try except로 input 중 파일을 우선 받고 exception 발생하면 post로 전달받은 이미지 url 이용\n try : \n content_image = request.FILES['content_image']\n except : \n content_image = request.POST['content_selected']\n \n try : \n style_image = request.FILES['style_image']\n except : \n style_image = request.POST['style_selected']\n \n \n\n if int(is_nature):\n generated_result = AdaIN.main(vgg_path, decoder_model_nature_path, content_image, style_image, alpha=alpha, interpolation_weights=None, preserve_color = int(preserve_color))\n\n else:\n generated_result = AdaIN.main(vgg_path, decoder_model_pattern_path, content_image, style_image, alpha=alpha, interpolation_weights=None, preserve_color = int(preserve_color))\n\n output = generated_result['output_image']\n content_image = generated_result['content_image']\n style_image = generated_result['style_image']\n\n\n output_io = BytesIO()\n output.save(output_io, format='JPEG')\n\n content_io = BytesIO()\n content_image.save(content_io, format='JPEG')\n\n style_io = BytesIO()\n style_image.save(style_io, format='JPEG')\n\n\n final_output = InMemoryUploadedFile(file=output_io,\n field_name=\"ImageField\",\n name='stylized.jpg',\n content_type='image/jpeg',\n size=sys.getsizeof(output_io),\n charset=None)\n\n final_content_image = InMemoryUploadedFile(file=content_io,\n field_name=\"ImageField\",\n name='content.jpg',\n content_type='image/jpeg',\n size=sys.getsizeof(content_io),\n charset=None)\n\n final_style_image = InMemoryUploadedFile(file=style_io,\n field_name=\"ImageField\",\n name='style.jpg',\n content_type='image/jpeg',\n size=sys.getsizeof(style_io),\n charset=None)\n \n \n history = History()\n history.id = initseq(History)\n history.content_image = final_content_image\n history.style_image = final_style_image\n history.output_image = final_output\n history.preserve_color = True if int(preserve_color) == 1 else False\n history.nature_pattern = True if int(is_nature) == 1 else False\n history.alpha = alpha\n history.save()\n \n result = History.objects.order_by('-pk')[0]\n \n \n return render(request, 'result.html', {'args':result})\n\ndef history_view(request):\n history = History.objects\n return render(request, 'history.html', {'history':history})\n\n\ndef delete_history(request):\n \n check_list = request.GET.getlist('chk')\n\n history = History.objects.filter(id__in=check_list)\n \n content_image = [i.content_image.path for i in history]\n style_image = [i.style_image.path for i in history]\n output_image = [i.output_image.path for i in history]\n\n history.delete()\n\n\n try:\n for i,j,k in content_image, style_image, output_image:\n os.remove(os.path.join(settings.MEDIA_ROOT, i))\n os.remove(os.path.join(settings.MEDIA_ROOT, j))\n os.remove(os.path.join(settings.MEDIA_ROOT, k))\n \n except:\n pass\n \n \n return redirect('history')\n\n\n\n###### Static Images Section ######\ndef get_images(request):\n\n pattern_category = ['black_white_patterns','figure_patterns','fractal_patterns',\n 'geometric_patterns','hexagon_patterns','line_patterns','patterns']\n\n nature_category = ['animal_images','animal_skin_images','bee_images','bird_images','butterfly_images',\n 'crystal_images','dragonfly_images','eyes_images','flower_images','nature_images','reptile_images',\n 'spider_images','tree_images','wave_images']\n\n is_pattern = 0\n\n try:\n pattern_cate_idx = int(request.POST['pattern_image'])\n is_pattern = 1\n except:\n nature_cate_idx = int(request.POST['nature_image'])\n\n\n if is_pattern:\n path_mid = pattern_category[pattern_cate_idx-1]\n path = './DesignAssistant/static/img/pattern_images_by_keywords/'+path_mid+'/'\n path_last = 'pattern_images_by_keywords/'+path_mid+'/'\n else:\n path_mid = nature_category[nature_cate_idx-1]\n path = './DesignAssistant/static/img/nature_images_by_keywords/'+path_mid+'/'\n path_last = 'nature_images_by_keywords/'+path_mid+'/'\n\n images = getImages(path)\n file_names = [i.split('/')[-1] for i in images]\n real_images = ['../static/img/'+path_last+i for i in file_names]\n\n\n info: dict = {\n 'real_images' : real_images\n }\n \n\n return render(request, 'transfer.html', {'info' : info})\n\n# 이미지 경로 불러오는 메소드\ndef getImages(path: str) : \n image_list: list = os.listdir(path) # 입력된 path 내의 모든 '파일명' 호출 : 출력 예시) ['0.jpg', '1.jpg', ...]\n \n\n if len(image_list) >= 30:\n random_images: list = random.sample(image_list, 30) # 랜덤하게 30개만 추출\n \n else:\n random_images = image_list\n\n image_path_list: list = getFullPath(path, random_images) # 파일명만 있기 때문에 getFullPath 메소드로 경로 생성\n return image_path_list\n\n# 이미지 경로 생성 메소드\ndef getFullPath(path: str, image_list: list) : \n fullPath: list = []\n\n for image in image_list : \n temp_path = '%s%s' % (path, image) # 입력된 path에 파일명을 붙여 경로 생성 : 출력 예시) './DesignAssistant/static/img/pattern_images/0.jpg'\n fullPath.append(temp_path)\n\n return fullPath\n\n###### Static Images Section End ######\n\n\n","repo_name":"Kyungpyo-Kang/HM","sub_path":"DesignAssistant/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"70904040572","text":"import cherrypy\n\nfrom feedbuffer import core, database, log, settings\nfrom feedbuffer.settings import DEFAULT_UPDATE_INTERVAL\n\n_logger = log.get_logger(__name__)\n\n\nclass Server:\n @cherrypy.expose\n def index(self, url, update_interval=DEFAULT_UPDATE_INTERVAL):\n if not database.feed_exists(url):\n _logger.info('Adding feed: %s', url)\n try:\n core.update_feed(url)\n except Exception:\n _logger.exception('Exception occurred during initial feed update: %s', url)\n return None\n\n core.schedule_feed_update(url)\n elif url not in core.scheduled:\n _logger.info('Updating feed: %s', url)\n core.executor.submit(core.update_feed, url)\n core.schedule_feed_update(url)\n\n feed = database.get_feed(url)\n update_interval = int(update_interval)\n if feed.update_interval != update_interval:\n _logger.info('Changing update interval from %d to %d seconds for feed: %s',\n feed.update_interval, update_interval, url)\n database.update_model_data(feed, update_interval=update_interval)\n core.schedule_feed_update(url)\n\n _logger.info('Generating feed: %s with %d entries...', url, len(feed.entries))\n response = core.generate_feed(feed.data, [entry.data for entry in feed.entries])\n database.flush_feed(feed)\n cherrypy.response.headers['Content-Type'] = ''\n return response\n","repo_name":"cryzed/Feedbuffer","sub_path":"feedbuffer/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"78"}
+{"seq_id":"1104362716","text":"\"\"\"! @brief СКБ201 Тур ТВ Методы Программирования ЛР3\"\"\"\n\n##\n# @mainpage Лабораторная работа номер 3 по курсу \"Методы Программирования\"\n#\n# @brief СКБ201 Тур Т.В. Методы Программирования ЛР3\n#\n# @section intro Введение\n# Лабораторная работа номер 3 по курсу \"Методы программирования\". Выполнена студентом Туром Тимофем Владимировичем группы СКБ201.\n#\n# @section description Описание\n# В данной лабораторной работе тредуется реализовать 2 алгоритма хэширования для ключевого поля данных из лабораторной работы 2, построить хэш-таблицы, написать функцию поиска в ней, после чего проверить их работу и сравнить эффективность по времени с предыдущей лабораторной.\n# Мой вариант - 24. Хэширование происходит по полю ФИО.\n#\n# @section link Ссылка на репозиторий\n# Данный проект хранится в репозитории github по ссылке https://github.com/TimothyTur/MP_L3 .\n# В силу явной ненужности многих данных doxygen, они будут отсутствовать там (кроме нужных, например как этот отчет).\n# \n# @section hashTable Реализация хэш-таблицы\n# Эта тема предшествует функицям хэширования, потому что в ней определятся основополагающие параметры.\n# За размерность хэш-таблицы будет взята 2^18, что равно 262144. Этого уже достаточно для 100000 элементов, а предыдущая степень (131072) рискует иметь множество коллизий.\n# Сами коллизии решены методом цепочек.\n# \n# @section badHash \"Простая\" функция хэширования\n# Простая хэш-функия реализована через полином по буквам в ключе. За коэффициент полинома взято 31, так как большие буквы будут восприниматься как малые, пробелы будут игнорироваться.\n# \n# @section goodHash \"Сложная\" функция хэширования\n# Сложная хэш-функция реализована по подобию rot13. В силу ограничения в 2^18 побитовый сдвиг будет взят на 11 и на 7.\n# В теории этот алгоритм быстрее, так как не требует умножения, лишь побитовый сдвиг, также он даст меньше коллизий, так как основан на rot13, в котором в принципе число коллизий минимально.\n# \n# @section diagramSearch Сравнительный график работы поиска\n# Данный график демонстрирует время, затраченное на поиск элемента. Данные о поисков линейного, multimap, бинарного с сортировкой и просто бинарного взяты напрямую из предыдущей лабораторной.\n# @image latex mpLab3Graph1.png \"График работы поиска\"\n# На данном графике видно, что бинарный поиск, multimap и оба хэша складываются в одну прямую линию в нуле. Бинарному поиску в случае 100 000 элементов требуется не более 17 сравнений. multimap в случае python реализован на словарях, а они реализованы на хэш-таблицах, а значит его время должно примерно совпадать с новыми измерениями в этой лабораторной. Поиск по хэш-таблице в общем случае требует константное время. Число операций и там и там мало, но видимо достаточно минимально чтобы везде работать почти моментально. Потому имеем прямые и самый быстрый реализованный поиск.\n# \n# @section diagramCollision Сравнительный график числа коллизий\n# Данный график демонстрирует общее число коллизий в массивах в зависимости от числа элементов в выборке.\n# @image latex mpLab3Graph2.png \"График числа коллизий\"\n# На графике особой разницы в числе коллизий не видно. Что странно, так как алгоритмы принципиально разные, и вроде как сложный должен сработать лучше. Но разница видимо настолько никакая, что в виду допустимых погрешностей ее и не видно. Остается только предположить что требуется в разы больший объем выборки. Около миллиона, а то и целого миллиарда.\n# Также велик шанс того что число коллизий обусловленно тем, что в выборке изначально есть элементы с одинаковыми ключами (что продемонстрированно в предыдущей лабораторной).\n# \n# @section collision Предположение причины числа коллизий\n# В попытках понять почему получается даже если близкое, но такое огромное число коллизий, я решил ввести счетчики уникальных ключей, используемых в таблице. Тогда каждая заполненная ячейка вносит в эту переменную вклад в единицу. В то же время функция подсчета коллизий вычисляет количество одинаковых элементов по ключу. На практике это удобно реализовать через возможности связного списка. Посчитав его длину и вычтя 1 получаем число совпадений данного конкретного хэша, а значит сумма по всем заполненным хэшам даст общее число коллизий. Это значит что каждая заполненная ячейка вносит в число коллизий вклад в длину этой ячейки минус 1.\n# Тогда сумма уникальных и сумма коллизий должна совпасть с длинной выборки, так как будем иметь сумму 1 за каждую зуполненную ячейку плюс длина ячейки минус 1. Единицы сокращаются, остается длина ячейки. Сумма длинн по всем ячейкам даст число в принципе распределенных по таблице ячеек, что есть длина исходного распределения.\n# И эта сумма показательна для состоятельной проверки работы программы, так как число уникальных элементов изменяется во время операций над таблицей, а вычисление коллизий - функция, вычисляемая в момент. Эта разница во времени и дает состоятельность, при совпадении чисел.\n# Это было пояснение к тесту, который я сделал, чтобы проверить что все работает правильно. И так и оказалось. Программа работает корректно, но я не совсем понимаю почему тогда столько коллизий.\n# Я перечитал свою же документацию выше, и подумал, а что если совпадение ключей имеет куда большее значение. В первой лабораторной, где генерируется моя выборка, за генератор ФИО я взял учебный список нашей группы, разбил на подэлементы, и генератору буквально сказал выбирать соответственно случайные элементы из полчившегося массива. И в этом и была проблема. В группе нас около 27, что дает 27 имен, 27 фамилий и 27 отчеств (не считая совпадений по группе). Тогда это 27*27*27=19683 различных значений. А значит и не удивительно что на выборке длины 100000 имеется целых 80000 коллизий.\n# На чем, получается, можно сделать вывод, что сама выборка ключей изначально не подходит для исследования коллизий в хэштаблице, в виду очень возможных совпадений.\n# \n\n##\n# @file full_code.py\n#\n# @brief Основной исполняевый файл лабораторной работы\n# \n# @sectioin description Описание\n# Лабораторная работа в изначальном свое виде выполнялась в оболочке \"jupyter notebook\" в силу его удобства для таких целей. Этот файл является прямым последовательным копированием ячеек из итогового документа (также прикрепленного в github), по причине того что doxygen на файлы \".ipynb\" не работает.\n# \n# @section differs Функциональное отличие\n# В предыдущих лабораторных работах вычисления производились над всеми выборками сразу. В этой же лабораторной хэш-таблица требует слишком много данных, потому вычисления будут происходить последовательно, совершая нужные измерения, после чего удаляя таблицу, приступая к следующей.\n# Также в этой лабораторной работе требуются данные из предыдущей. Данные, используемые здесь, были напрямую скопированные из выводов, сохраненных как часть документации. Также поиск элементов будет осуществляться на первых найденных элементах из предыдущей работы, также сохраненных как часть документации.\n# \n# @section results Результаты тестирования\n# Cледующая секция представляет из себя набор вывода программы по тестам.\n# Вывод для всех призводится по формату:\n# <размер выборки> <время поиска в простой таблице> <время поиска в сложной> <ключ искомого элемента>\n# <хэш найденного элемента простого алгоритма> <его сдвиг по цепочке> <найденный элемент>\n# <хэш найденного элемента сложного алгоритма> <его сдвиг по цепочке> <найденный элемент>\n# --------------------------------------------------\n# <повтор для всех размерностей>\n# \n# 100 0.0 0.0 Недомолкин Елизавета Эдикович\n# 237488 0 Недомолкин Елизавета Эдикович 92 2011/04/06 2012/02/27 91881\n# 28767 0 Недомолкин Елизавета Эдикович 92 2011/04/06 2012/02/27 91881\n# --------------------------------------------------\n# 500 0.0 0.0 Ташлыков Григорий Николаевич\n# 83313 0 Ташлыков Григорий Николаевич 92 2004/05/30 2013/03/23 32489\n# 95710 0 Ташлыков Григорий Николаевич 92 2004/05/30 2013/03/23 32489\n# --------------------------------------------------\n# 1000 0.0 0.0 Гришаев Андрей Сергеевич\n# 74419 0 Гришаев Андрей Сергеевич 78 2004/03/07 2008/10/15 95617\n# 168114 0 Гришаев Андрей Сергеевич 78 2004/03/07 2008/10/15 95617\n# --------------------------------------------------\n# 2000 0.0 0.0 Абдуллабеков Илья Николаевна\n# 239180 0 Абдуллабеков Илья Николаевна 96 2004/05/12 2013/11/10 71681\n# 233913 0 Абдуллабеков Илья Николаевна 96 2004/05/12 2013/11/10 71681\n# --------------------------------------------------\n# 5000 0.0 0.0 Самунин Тимофей Эдуардович\n# 37393 0 Самунин Тимофей Эдуардович 54 2009/04/23 2009/09/30 95564\n# 112789 0 Самунин Тимофей Эдуардович 54 2009/04/23 2009/09/30 95564\n# --------------------------------------------------\n# 10000 0.0 0.0 Ташлыков Артём Эдуардович\n# 66484 0 Ташлыков Артём Эдуардович 72 2005/08/13 2010/07/07 68301\n# 45054 0 Ташлыков Артём Эдуардович 72 2005/08/13 2010/07/07 68301\n# --------------------------------------------------\n# 20000 0.0 0.0 Красов Илья Александровна\n# 231440 2 Красов Илья Александровна 20 2006/01/24 2014/04/03 37206\n# 184951 2 Красов Илья Александровна 20 2006/01/24 2014/04/03 37206\n# --------------------------------------------------\n# 50000 0.0 0.0 Осипова Радомир Ашотович\n# 139305 0 Осипова Радомир Ашотович 99 2007/12/03 2008/08/06 21649\n# 39101 0 Осипова Радомир Ашотович 99 2007/12/03 2008/08/06 21649\n# --------------------------------------------------\n# 100000 0.0 0.0 Грицун Илья Сергеевич\n# 154063 27 Грицун Илья Сергеевич 42 2009/06/14 2011/04/01 60423\n# 247281 27 Грицун Илья Сергеевич 42 2009/06/14 2011/04/01 60423\n# --------------------------------------------------\n# \n\n# Imports\nimport random as rnd\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass HashTable:\n \"\"\"! Класс объектов, требуемых по заданию третьей лабораторной работы.\n \n Содержит в себе обе таблицы и весь требуемый функционал для них. В том числе и статические функции вычисления хэша.\n В таблицах во избежание коллизии сиспользуется метод цепочек. В силу реализации как таковых массивов в python, связный список будет реализован через простой лист.\n \"\"\"\n def __init__(self):\n \"\"\"! Конструктор хэш-таблицы по заданию лабораторной работы.\n \n Конструктор выделяет массивы для обеих хэш-таблиц.\n \"\"\"\n self.bad = [None]*(1<<18)\n self.good = [None]*(1<<18)\n self.uniBad = 0\n self.uniGood = 0\n \n @staticmethod\n def badHash(obj):\n \"\"\"! Вариант простой хэш-функции.\n Реализован через полином по буквам в ключе. За коэффициент полинома взято 31.\n\n @param obj Объект вычисления хэша. Должен обладать строковым свойством key в русском алфавите.\n\n @return Вычисленный хэш.\n \"\"\"\n nums = [ord(i.lower())-ord('а') if i!=' ' else None for i in obj.key]\n p = 1\n res = 0\n for i in nums:\n if i != None:\n res = (res+i*p)&0o777777 #модуль не сработает корректно\n p*=31 #весьмеричное число => 18/3=6 семерок\n return res\n @staticmethod\n def goodHash(obj):\n \"\"\"! Вариант сложной хэш-функции.\n Реализова по подобию rot13. В силу ограничения в 2^18 побитовый сдвиг будет взят на 11 и на 7.\n\n @param obj Объект вычисления хэша. Должен обладать строковым свойством key в русском алфавите\n\n @return Вычисленный хэш.\n \"\"\"\n nums = [ord(i.lower())-ord('а') if i!=' ' else None for i in obj.key]\n res = 0\n for i in nums:\n if i != None:\n res = (res+i)&0o777777\n res = (((res<<7)&0o777777)|(res>>11))&0o777777\n return res \n\n def addBad(self, elem):\n \"\"\"! Добавление элемента в хэш-таблицу простого хэша.\n \n @param elem Элемент для добавления в таблицу\n \"\"\"\n addr = HashTable.badHash(elem)\n if self.bad[addr] == None:\n self.bad[addr] = [elem]\n self.uniBad += 1\n else:\n self.bad[addr].append(elem)\n def addGood(self, elem):\n \"\"\"! Добавление элемента в хэш-таблицу сложного хэша.\n \n @param elem Элемент для добавления в таблицу\n \"\"\"\n addr = HashTable.goodHash(elem)\n if self.good[addr] == None:\n self.good[addr] = [elem]\n self.uniGood += 1\n else:\n self.good[addr].append(elem)\n\n def getBad(self, addr, step):\n \"\"\"! Возвращает элемент таблицы простого хэша\n \n @param addr Хэш искомого элемента.\n @param step Сдвиг в цепи элементов\n \n @return Искомый элемент или None, если такого элемента нет\n \"\"\"\n if addr>=(1<<18) or addr<0: return None\n if self.bad[addr] == None: return None\n if len(self.bad[addr])<=step: return None\n return self.bad[addr][step]\n def getGood(self, addr, step):\n \"\"\"! Возвращает элемент таблицы сложного хэша\n \n @param addr Хэш искомого элемента.\n @param step Сдвиг в цепи элементов\n \n @return Искомый элемент или None, если такого элемента нет\n \"\"\"\n if addr>=(1<<18) or addr<0: return None\n if self.good[addr] == None: return None\n if len(self.good[addr])<=step: return None\n if step<0: return None\n return self.good[addr][step]\n \n def popBad(self, addr, step):\n \"\"\"! Удаляет элемент таблицы простого хэша.\n \n @param addr Хэш искомого элемента.\n @param step Сдвиг в цепи элементов.\n \n @return Возвращает удаленный элемент, None при ошибке.\n \"\"\"\n if addr>=(1<<18) or addr<0: return None\n if self.bad[addr] == None: return None\n if len(self.bad[addr])<=step: return None\n if step<0: return None\n res = self.bad[addr].pop(step)\n if self.bad[addr] == []:\n self.bad[addr] = None\n self.uniBad -= 1\n return res\n def popGood(self, addr, step):\n \"\"\"! Удаляет элемент таблицы сложного хэша.\n \n @param addr Хэш искомого элемента.\n @param step Сдвиг в цепи элементов.\n \n @return Возвращает удаленный элемент, None при ошибке.\n \"\"\"\n if addr>=(1<<18) or addr<0: return None\n if self.good[addr] == None: return None\n if len(self.good[addr])<=step: return None\n if step<0: return None\n res = self.good[addr].pop(step)\n if self.good[addr] == []:\n self.good[addr] = None\n self.uniGood -= 1\n return res\n \n def searchBad(self, elem):\n \"\"\"! Поиск элемента в таблице простого хэша.\n \n @param elem Искомый элемент.\n \n @return При успехе, возвращает пару (хэш, смещение), иначе '-1'.\n \"\"\"\n addr = HashTable.badHash(elem)\n if self.bad[addr] == None:\n return -1\n for i in range(len(self.bad[addr])):\n if self.bad[addr][i].equal(elem):\n return (addr, i)\n return -1\n def searchGood(self, elem):\n \"\"\"! Поиск элемента в таблице простого хэша.\n \n @param elem Искомый элемент.\n \n @return При успехе, возвращает пару (хэш, смещение), иначе '-1'.\n \"\"\"\n addr = HashTable.goodHash(elem)\n if self.good[addr] == None:\n return -1\n for i in range(len(self.good[addr])):\n if self.good[addr][i].equal(elem):\n return (addr, i)\n return -1\n \n def collisionsBad(self):\n \"\"\"! Функция просчитывает текущее число коллизий в таблице простого хэша.\n \n @return Число коллизий.\n \"\"\"\n res = 0\n for i in self.bad:\n if i == None: continue\n res += len(i)-1\n return res\n def collisionsGood(self):\n \"\"\"! Функция просчитывает текущее число коллизий в таблице сложного хэша.\n \n @return Число коллизий.\n \"\"\"\n res = 0\n for i in self.good:\n if i == None: continue\n res += len(i)-1\n return res\n \n#класс\nclass MyObject:\n \"\"\"! Класс объектов, требуемых по заданию первой лабораторной работы.\n \n В предыдущей лабораторной был убран генератор класса как таковой, так как он полностью считывается с файла. Поэтому вводить вычисление хэша в конструктор не требуется. Однако это актуально для задачи чтения.\n \"\"\"\n def __init__(self):\n \"\"\"! Конструктор класса MyObject\n Конструктор класса объявляет переменные, которые в нем есть. Не имеет параметров.\n \"\"\"\n self.fio, self.num, self.din, self.dou, self.pay, \\\n self.goodHash, self.badHash = \\\n None, None, None, None, None, None, None\n \n #key\n @property\n def key(self):\n \"\"\"! Выделенное свойство класса - ключ\n Свойство созданно выделенным, чтобы в разы упросить обращение к нему, подмену для тестов, в то же время не требуя дополнительных ресурсов.\n \"\"\"\n return self.fio\n \n #==\n def __eq__(self, other):\n \"\"\"! Проверка на равенство.\n \n @param other Объект сравнения класса MyObject.\n \n @return bool.\n \"\"\"\n return self.key==other.key\n #>=\n def __ge__(self, other):\n \"\"\"! Проверка на больше или равно.\n \n @param other Объект сравнения класса MyObject.\n \n @return bool.\n \"\"\"\n return self.key>=other.key\n #>\n def __gt__(self, other):\n \"\"\"! Проверка на больше.\n \n @param other Объект сравнения класса MyObject.\n \n @return bool.\n \"\"\"\n return self.key>other.key\n #<=\n def __le__(self, other):\n \"\"\"! Проверка на меньше или равно.\n \n @param other Объект сравнения класса MyObject.\n \n @return bool.\n \"\"\"\n return self.key<=other.key\n #<\n def __lt__(self, other):\n \"\"\"! Проверка на меньше.\n \n @param other Объект сравнения класса MyObject.\n \n @return bool.\n \"\"\"\n return self.key (mini_batch X O)\n W1 = np.random.randn(I, H)\n b1 = np.random.randn(H)\n W2 = np.random.randn(H, O)\n b2 = np.random.randn(O)\n\n # 三层,两层仿射中间夹一层激活\n self.layers = [\n Affine(W1, b1),\n Sigmoid(),\n Affine(W2, b2)\n ]\n\n # 权重\n self.params = []\n for layer in self.layers:\n self.params += layer.params\n\n # 前向传播\n def predict(self, x):\n for layer in self.layers:\n x = layer.forward(x)\n return x","repo_name":"qitianyuu/DeepLearnWithNumpy","sub_path":"c01/forward_net.py","file_name":"forward_net.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"35902694990","text":"from django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import include, path\nfrom django.views import defaults as default_views\nfrom django.views.generic import TemplateView, RedirectView\nfrom rest_framework.authtoken.views import obtain_auth_token\nfrom django.contrib.flatpages.views import flatpage\nfrom backend.users.admin import constellation_admin as cl8_admin\nfrom backend.users.views import sample_csv_template\n\nurlpatterns = [\n # serve the vue template instead of the default home\n path(\"\", TemplateView.as_view(template_name=\"pages/vue.html\"), name=\"home\"),\n # Django Admin, use {% url 'admin:index' %}\n path(\"admin/\", cl8_admin.urls),\n path(\"advanced-admin/\", admin.site.urls),\n path(\n \"admin/import-csv/sample.csv\", sample_csv_template, name=\"sample-csv-template\"\n ),\n # User management\n path(\"users/\", include(\"backend.users.urls\", namespace=\"users\")),\n path(\"accounts/\", include(\"allauth.urls\")),\n path(\"about/\", flatpage, {\"url\": \"/about/\"}, name=\"about\"),\n path(\"privacy/\", flatpage, {\"url\": \"/privacy/\"}, name=\"privacy\"),\n # Your stuff: custom urls includes go here\n path(\n \"favicon.ico\", RedirectView.as_view(url=\"/static/images/favicons/favicon.ico\")\n ),\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\n# API URLS\nurlpatterns += [\n # API base url\n path(\"api/\", include(\"config.api_router\")),\n # DRF auth token\n path(\"auth-token/\", obtain_auth_token),\n path(\"\", include(\"backend.users.api.passwordless_urls\")),\n]\n# + [\n# path('', TemplateView.as_view(template_name=\"pages/vue.html\")),\n# path('', TemplateView.as_view(template_name=\"pages/vue.html\"))\n# ]\n\n\nif settings.DEBUG:\n # This allows the error pages to be debugged during development, just visit\n # these url in browser to see how these error pages look like.\n urlpatterns += [\n path(\n \"400/\",\n default_views.bad_request,\n kwargs={\"exception\": Exception(\"Bad Request!\")},\n ),\n path(\n \"403/\",\n default_views.permission_denied,\n kwargs={\"exception\": Exception(\"Permission Denied\")},\n ),\n path(\n \"404/\",\n default_views.page_not_found,\n kwargs={\"exception\": Exception(\"Page not Found\")},\n ),\n path(\"500/\", default_views.server_error),\n ]\n if \"debug_toolbar\" in settings.INSTALLED_APPS:\n import debug_toolbar\n\n urlpatterns = [path(\"__debug__/\", include(debug_toolbar.urls))] + urlpatterns\n","repo_name":"Synchro/constellate","sub_path":"backend/config/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"}
+{"seq_id":"36868485753","text":"# coding: utf8\n\n\ndef index(): return dict(message=\"hello from seo.py\")\n\n\ndef split():\n presentation = db(db.plugin_presentation.name == request.args[0]).select().first()\n splitted = presentation.markmin.split(\"\")\n db(db.plugin_slide.plugin_presentation_id == presentation.id).delete()\n \n for n, markmin in enumerate(splitted):\n db.plugin_slide.insert(\n presentation_id=presentation.id,\n markmin=markmin,\n )\n return dict(n=n)\n \n\ndef show():\n presentation = db(db.plugin_presentation.name == request.args[0]).select().first()\n if presentation:\n slides = db(db.plugin_slide.presentation_id == presentation.id).select()\n \n response.title = presentation.title\n response.description = presentation.description\n response.keywords = presentation.keywords\n response.author = presentation.author\n else:\n response.title = 'Presentation not loaded'\n response.description = 'Presentation not loaded'\n response.keywords = 'Presentation not loaded'\n response.author = 'Presentation not loaded'\n slides = None\n\n return dict(slides=slides)\n","repo_name":"DonaldMcC/gdms","sub_path":"controllers/plugin_ndspresent_slides.py","file_name":"plugin_ndspresent_slides.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"78"}
+{"seq_id":"73649543611","text":"\"\"\"empty message\n\nRevision ID: ae71f340e800\nRevises: a69a3ac2098e\nCreate Date: 2017-11-21 20:29:33.087493\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'ae71f340e800'\ndown_revision = 'a69a3ac2098e'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('chats',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('created_by', sa.Integer(), nullable=False),\n sa.Column('created_at', sa.DateTime(), nullable=False),\n sa.ForeignKeyConstraint(['created_by'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('messages',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('text', sa.Text(), nullable=False),\n sa.Column('chat_id', sa.Integer(), nullable=False),\n sa.Column('created_by', sa.Integer(), nullable=False),\n sa.Column('created_at', sa.DateTime(), nullable=False),\n sa.ForeignKeyConstraint(['chat_id'], ['chats.id'], ),\n sa.ForeignKeyConstraint(['created_by'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('messages')\n op.drop_table('chats')\n # ### end Alembic commands ###\n","repo_name":"farismosman/chat-app","sub_path":"migrations/versions/ae71f340e800_.py","file_name":"ae71f340e800_.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"24876226210","text":"import functools\nimport inspect\nimport os\nimport subprocess\nimport sys\nimport timeit\nimport argparse\nimport copy\nimport re\n\nimport libconf\nimport yaml\n\nfrom common import *\n\n# Output file names.\nout_prefix = \"timeloop-mapper.\"\nlog_file_name = out_prefix + \"log\"\nstats_file_name = out_prefix + \"stats.txt\"\nxml_file_name = out_prefix + \"map+stats.xml\"\nmap_txt_file_name = out_prefix + \"map.txt\"\nmap_cfg_file_name = out_prefix + \"map.cfg\"\nmap_cpp_file_name = out_prefix + \"map.cpp\"\noutput_file_names = [log_file_name,\n stats_file_name,\n xml_file_name,\n map_txt_file_name,\n map_cfg_file_name,\n map_cpp_file_name]\n\n# dimension conversion that maps a WU problem to FW problem\nwu2fw = {'P': 'R',\n 'Q': 'S',\n 'R': 'P',\n 'S': 'Q',\n 'C': 'K',\n 'K': 'N',\n 'N': 'C'}\n\n\ndef prod(l):\n return functools.reduce(lambda x, y: x * y, l)\n\n\ndef rewrite_workload_bounds(src, dst, workload_bounds, model, layer, batchsize, dataflow, phase, terminate, threads, synthetic, sparsity, save, replication, array_width, glb_scaling, dense): # backward_padding\n w, h, c, n, k, s, r, wpad, hpad, wstride, hstride = workload_bounds\n n = batchsize\n q = int((w - s + 2 * wpad) / wstride) + 1\n p = int((h - r + 2 * hpad) / hstride) + 1\n\n wu_equiv = k != 'D' and phase == 'wu'\n env_list = {}\n\n if not wu_equiv:\n print('Workload Dimensions:')\n print(' W =', w)\n print(' H =', h)\n print(' C =', c)\n print(' K =', k)\n print(' S =', s)\n print(' R =', r)\n print(' P =', p)\n print(' Q =', q)\n print(' N =', n)\n print(' W-pad =', wpad)\n print(' H-pad =', hpad)\n print(' W-stride =', wstride)\n print(' H-stride =', hstride)\n print()\n else:\n print('Equivalence Test: can we convert WU problem to FW and use cnn-layer.cfg? (at least in the dense case?)')\n print('Workload Dimensions:')\n print(' W =', w)\n print(' H =', h)\n print(f' C <- N {n}')\n print(f' K <- C {c}')\n print(f' S <- Q {q}')\n print(f' R <- P {p}')\n print(f' P <- R {r}')\n print(f' Q <- S {s}')\n print(f' N <- K {k}')\n print(' W-pad =', wpad)\n print(' H-pad =', hpad)\n print(' W-stride =', wstride)\n print(' H-stride =', hstride)\n print()\n env_list['TIMELOOP_EQUIVLENT_WU'] = 'True'\n\n with open(src, \"r\") as f:\n if \"cfg\" in src:\n config = libconf.load(f)\n elif \"yaml\" in src:\n config = yaml.load(f, Loader=yaml.SafeLoader)\n\n config['problem']['shape'] = shapes[phase]\n if wu_equiv:\n config['problem']['shape'] = shapes['fw']\n\n if k == 'D':\n depthwise = True\n adapt_depthwise_config(config)\n else:\n depthwise = False\n config['problem']['shape'] += '.yaml'\n\n if wu_equiv:\n dataflow = convert_dataflow(dataflow)\n\n if phase == 'wu':\n remove_block_constraint(config)\n\n if depthwise:\n if dataflow == 'CK':\n dataflow = 'CN'\n dataflow = dataflow.replace('K', 'C')\n\n rewrite_dataflow(config, dataflow, replication, array_width)\n\n rewrite_mesh(config, array_width)\n\n if glb_scaling:\n rewrite_glb_size(config, array_width)\n\n if not wu_equiv:\n config['problem']['R'] = r\n config['problem']['S'] = s\n config['problem']['P'] = p\n config['problem']['Q'] = q\n config['problem']['C'] = c\n if not depthwise:\n config['problem']['K'] = k\n config['problem']['N'] = n\n else:\n config['problem']['R'] = p\n config['problem']['S'] = q\n config['problem']['P'] = r\n config['problem']['Q'] = s\n config['problem']['C'] = n\n config['problem']['K'] = c\n config['problem']['N'] = k\n config['problem']['Wstride'] = wstride\n config['problem']['Hstride'] = hstride\n config['problem']['Wdilation'] = 1\n config['problem']['Hdilation'] = 1\n config['mapper']['model-name'] = model\n config['mapper']['layer-name'] = layer\n\n if terminate is not None:\n config['mapper']['victory-condition'] = terminate\n if threads is not None:\n config['mapper']['num-threads'] = threads\n\n # rewrite synthetic mask configuration\n if not synthetic:\n try:\n config['mapper'].pop('mask-synthetic')\n except KeyError:\n pass\n else:\n config['mapper']['mask-synthetic'] = {}\n if sparsity is not None:\n config['mapper']['mask-synthetic']['target-sparsity'] = sparsity\n if save is not None:\n config['mapper']['mask-synthetic']['synthetic-mask-path'] = save\n\n if dense:\n opt_metrics = []\n for opt in config['mapper']['optimization-metrics']:\n opt_metrics.append(opt.split('-')[-1])\n config['mapper']['optimization-metrics'] = opt_metrics\n\n with open(dst, \"w\") as f:\n if \"cfg\" in src:\n f.write(libconf.dumps(config))\n elif \"yaml\" in src:\n f.write(yaml.dump(config))\n\n return env_list\n\n\ndef convert_dataflow(dataflow):\n pre_convert_dataflow = copy.copy(dataflow)\n converted_dataflow = []\n converted_dataflow.append(wu2fw[pre_convert_dataflow[0]])\n converted_dataflow.append(wu2fw[pre_convert_dataflow[1]])\n converted = ''\n converted = converted.join(converted_dataflow)\n print(f'convert from {dataflow} to {converted}')\n return converted\n\n\ndef remove_block_constraint(config): # or possibily remove\n for constraint in config['mapspace']['constraints']:\n if constraint['type'] == 'temporal' and constraint['target'] == 'RegFile':\n try:\n constraint.pop('factors')\n except KeyError:\n pass\n\n\ndef rewrite_dataflow(config, dataflow, replication, array_width):\n # loop through constaints, and make sure there is only 1 spatial type constraint\n # dingqing FIXME: not general for more spatial level architecture config\n num_spatial = 0\n for constraint in config['mapspace']['constraints']:\n if num_spatial > 1:\n raise Exception(\"More than one spatial level! Check the config and the scripts.\")\n if constraint['type'] == 'spatial':\n num_spatial += 1\n\n # determine if it is possible to replicate\n possible2replicate = replication and (not config['problem'][dataflow[0]] > array_width / 2 or not config['problem'][dataflow[1]] > array_width / 2)\n print('possible2replicate?', possible2replicate)\n factors = constraint['factors'].split(' ')\n new_factor = []\n for factor in factors:\n if factor[0] in dataflow:\n # look at problem size\n new_factor.append(factor[0] + f'{array_width}')\n elif not possible2replicate:\n new_factor.append(factor[0] + '1')\n constraint['factors'] = ' '.join(new_factor)\n\n # rewrite permutation\n # emmmm ugly\n non_spatial_dims = constraint['permutation'].replace(dataflow[0], '').replace(dataflow[1], '')\n constraint['permutation'] = dataflow[0] + non_spatial_dims + dataflow[1]\n\n\ndef rewrite_mesh(config, array_width):\n # honestly, the structure is kinda unnatural...\n pe_subtree = config['architecture']['subtree'][0]['subtree'][0] # FIXME: this is not generic enough\n pe_name = pe_subtree['name']\n num_pe_prev = re.findall(r'\\d+', pe_name)[-1]\n num_pe_new = array_width * array_width - 1\n pe_subtree['name'] = pe_name.replace(num_pe_prev, f'{num_pe_new}')\n\n # iterate over RF and PE\n for component in pe_subtree['local']:\n component['attributes']['meshX'] = array_width\n\n\ndef rewrite_glb_size(config, array_width):\n scaling_factor = array_width / 16\n # honestly, the structure is kinda unnatural...\n sys_subtree = config['architecture']['subtree'][0] # FIXME: this is not generic enough\n for comp in sys_subtree['local']:\n if comp['name'] == 'GlobalBuffer':\n comp['attributes']['depth'] = int(comp['attributes']['depth'] * scaling_factor)\n comp['attributes']['n_banks'] = int(comp['attributes']['n_banks'] * scaling_factor)\n\n\ndef adapt_depthwise_config(config):\n config['problem']['shape'] += '-depthwise.yaml'\n try:\n config['problem'].pop('K')\n except KeyError:\n pass\n for constraint in config['mapspace']['constraints']:\n if 'factors' in constraint:\n factors = constraint['factors'].split(' ')\n new_factor = [x for x in factors if x[0] != 'K']\n constraint['factors'] = ' '.join(new_factor)\n if 'permutation' in constraint:\n constraint['permutation'] = ''.join([x for x in constraint['permutation'] if x != 'K'])\n\n\ndef run_timeloop(dirname, configfile, logfile='timeloop.log', env_list={}, dense=False, dense_dirname='dense-timeloop'):\n configfile_path = os.path.join(dirname, os.path.basename(configfile))\n logfile_path = os.path.join(dirname, logfile)\n\n print('Running timeloop to get mapping')\n\n def stmt():\n with open(logfile_path, \"w\") as outfile:\n this_file_path = os.path.abspath(inspect.getfile(inspect.currentframe()))\n if not dense:\n timeloop_executable_location = os.path.join(\n os.path.dirname(this_file_path), '..', 'build', 'timeloop-mapper')\n else:\n timeloop_executable_location = os.path.join(\n os.path.dirname(this_file_path), '..', '..', dense_dirname, 'build', 'timeloop-mapper')\n status = subprocess.call([timeloop_executable_location, configfile_path], stdout=outfile, stderr=outfile, env=dict(os.environ, **env_list))\n # status = subprocess.call([timeloop_executable_location, configfile_path, 'ERT.yaml'], stdout=outfile, stderr=outfile)\n if status != 0:\n subprocess.check_call(['cat', logfile_path])\n print('Did you remember to build timeloop and set up your environment properly?')\n sys.exit(1)\n t = timeit.Timer(stmt)\n time = t.timeit(1)\n print('Time to run timeloop = ', time)\n\n # Move timeloop output files to the right directory\n for f in output_file_names:\n if os.path.exists(f):\n os.rename(f, dirname + '/' + f)\n","repo_name":"compstruct/procrustes-timeloop-model","sub_path":"scripts/timeloop.py","file_name":"timeloop.py","file_ext":"py","file_size_in_byte":10616,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"}
+{"seq_id":"15264573759","text":"from pyffi.formats.nif import NifFormat\nfrom os import path\nfrom sys import argv, exit, stdout\n\n# UIX_PATH = \"../../../Private/UIX/UIX FILES/Data Files/\"\n# UIX_PATH = \"../../../Private/UIX/UIX FILES/Data Files/Meshes/TOE/RedMoonGa01.NIF\"\n\n\ndef find_external_assets(data, assets):\n \"\"\" recursively find any external assets linked into a nif\"\"\"\n\n if isinstance(data, list):\n for node in data:\n if node.__class__.__name__ == 'NiNode':\n find_external_assets(node.children, assets)\n elif node.__class__.__name__ == 'NiTriShape':\n find_external_assets(node.properties, assets)\n else:\n find_external_assets(node, assets)\n\n if data.__class__.__name__ == 'NiSourceTexture':\n assets.append(data.file_name)\n elif data.__class__.__name__ == 'NiTexturingProperty':\n if data.has_base_texture:\n find_external_assets(data.base_texture.source, assets)\n if data.has_bump_map_texture:\n find_external_assets(data.bump_map_texture.source, assets)\n if data.has_dark_texture:\n find_external_assets(data.dark_texture.source, assets)\n if data.has_gloss_texture:\n find_external_assets(data.gloss_texture.source, assets)\n if data.has_glow_texture:\n find_external_assets(data.glow_texture.source, assets)\n if data.has_normal_texture:\n find_external_assets(data.normal_texture.source, assets)\n if data.has_detail_texture:\n find_external_assets(data.detail_texture.source, assets)\n if data.has_unknown_2_texture:\n find_external_assets(data.unknown_2_texture.source, assets)\n if data.has_decal_0_texture:\n find_external_assets(data.decal_0_texture.source, assets)\n if data.has_decal_1_texture:\n find_external_assets(data.decal_1_texture.source, assets)\n if data.has_decal_2_texture:\n find_external_assets(data.decal_2_texture.source, assets)\n if data.has_decal_3_texture:\n find_external_assets(data.decal_3_texture.source, assets)\n\n\ndef walk_nif(nif_path, use_stdout=True):\n if not path.exists(nif_path):\n exit(\"Path `{0}` not found.\".format(nif_path))\n\n all_assets = []\n for stream, data in NifFormat.walkData(nif_path):\n try:\n if use_stdout:\n print(stream.name, sep='', end=', ', file=stdout, flush=True)\n data.read(stream)\n assets = []\n find_external_assets(data.roots, assets)\n assets = set(assets) # remove duplicates\n assets_string = \"{0}\".format(b', '.join(assets).decode(encoding=\"ISO-8859-1\"))\n all_assets.append(assets_string)\n if use_stdout:\n print(assets_string, sep=', ', end='\\n', file=stdout, flush=True)\n except ValueError as ex:\n print(\"\\n Error with {0}: {1}\".format(stream.name, str(ex.args)), sep='', end='\\n', file=stdout, flush=True)\n except Exception as ex:\n print(ex)\n raise\n return all_assets\n\n\nif __name__ == \"__main__\":\n if len(argv) == 2:\n walk_nif(argv[1])\n else:\n exit(\"No path given.\")\n","repo_name":"OpenMW/UIX-R","sub_path":"nif_walker.py","file_name":"nif_walker.py","file_ext":"py","file_size_in_byte":3218,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"78"}
+{"seq_id":"45986727642","text":"from datetime import datetime\nfrom database import db\n\nclass Payment(db.Document):\n \"\"\"\n Payment class representing a payment made by a user to a vendor for a specific event.\n \n :param amount: The amount paid in the payment\n :type amount: int\n :param date: The date and time the payment was made\n :type date: datetime\n :param user: The user that made the payment\n :type user: ObjectIdField(User)\n :param vendor: The vendor that received the payment\n :type vendor: ObjectIdField(Vendor)\n :param event: The event the payment is for\n :type event: ObjectIdField(Event)\n \"\"\"\n user_id = db.ObjectIdField(required=True)\n vendor_id = db.ObjectIdField(required=True)\n event = db.ObjectIdField(required=True)\n amount = db.FloatField(min_value=0.0, max_value=999999.99,required=True)\n date = db.DateTimeField(default=datetime.utcnow, required=True)\n","repo_name":"mcmxciillan/planner-pp","sub_path":"planner-flask/models/payment.py","file_name":"payment.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"45660710402","text":"import os\nimport sys\n\ndef plus_minus(arr):\n\tcount = len(arr)\n\tpositve, negative, zero = 0, 0, 0\n\tfor x in arr:\n\t\tpositve += (1 if x > 0 else 0)\n\t\tnegative += (1 if x < 0 else 0)\n\t\tzero += (1 if x == 0 else 0)\n\treturn (positve/count, negative/count, zero/count)\n\n\nif __name__ == '__main__':\n\tarr = [-4, 3, -9, 0, 4, 1]\n\tresult = plus_minus(arr)\n\tprint(result)","repo_name":"hokagequan/hackrank","sub_path":"src/plus_minus.py","file_name":"plus_minus.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"41498981682","text":"import random\r\nfrom random import randint\r\n\r\nwith open('./racing_maps.txt') as f:\r\n racing_maps = f.read().splitlines()\r\n\r\nwith open('./arena_maps.txt') as f:\r\n arena_maps = f.read().splitlines()\r\n\r\nwith open('./figure_8_maps.txt') as f:\r\n figure_8_maps = f.read().splitlines()\r\n\r\n\r\nclass_restrictions = ['a','b','c','a','b','c','special']\r\nfigure_8_class_restrictions = ['a','b','c','special']\r\narena_restrictions = ['school bus','lawn mower','bumper car','honey pot']\r\nfigure_8_restrictions = ['school bus','school bus','school bus','school bus','motor home','sofa car','big rig']\r\nracing_restrictions = ['school bus','school bus','school bus','school bus','school bus','school bus','school bus','school bus','motor home','sofa car','bugzilla','big rig']\r\nlaps = ['5', '6', '7']\r\n\r\nx = 0\r\nwhile x <= 100:\r\n y = randint(0,100)\r\n\r\n if y > 90:\r\n z_map = random.choice(arena_maps)\r\n\r\n z_class_restriction = random.choice(class_restrictions)\r\n z_car_restriction = ''\r\n if z_class_restriction == 'special':\r\n z_class_restriction = ''\r\n z_car_restriction = random.choice(arena_restrictions)\r\n print('el_add=',z_map)\r\n print('el_gamemode=derby deathmatch')\r\n print('el_bots=',randint(10,20))\r\n print('el_car_class_restriction=',z_class_restriction)\r\n print('el_car_restriction=',z_car_restriction)\r\n print('')\r\n \r\n elif y < 20:\r\n z_map = random.choice(figure_8_maps)\r\n\r\n z_class_restriction = random.choice(figure_8_class_restrictions)\r\n z_car_restriction = ''\r\n if z_class_restriction == 'special':\r\n z_class_restriction = ''\r\n z_car_restriction = random.choice(figure_8_restrictions)\r\n \r\n print(\"el_add=\",z_map)\r\n print('el_gamemode=racing')\r\n print('el_laps=12')\r\n print('el_bots=24')\r\n print('el_car_class_restriction=',z_class_restriction)\r\n print('el_car_restriction=',z_car_restriction)\r\n print('')\r\n \r\n else:\r\n z_map = random.choice(racing_maps)\r\n\r\n z_class_restriction = random.choice(class_restrictions)\r\n z_car_restriction = ''\r\n if z_class_restriction == 'special':\r\n z_class_restriction = ''\r\n z_car_restriction = random.choice(racing_restrictions)\r\n \r\n print(\"el_add=\",z_map)\r\n print('el_gamemode=racing')\r\n print('el_laps=5')\r\n print('el_bots=',randint(6,20))\r\n print('el_car_class_restriction=',z_class_restriction)\r\n print('el_car_restriction=',z_car_restriction)\r\n print('')\r\n\r\n # racing_map = random.choice(racing_maps)\r\n # print(\"el_add=\",racing_map)\r\n # print(y)\r\n x += 1\r\n\r\n\r\n\r\n\r\n# Event Loop (el) settings.\r\n#-------------------------------------------------------------------------------\r\n# If enabled, server will automatically rotate pre-configured events.\r\n# Using \"el_add=trackname\" you can add as many events to the rotation as you wish.\r\n# Note that \"el_*\" parameters override corresponding global settings for the event.\r\n# Remove the first # from setup parameters to enable.\r\n# Use the console command /eventloop to enable/disable rotation.\r\n\r\n## Add first event to Loop\r\n#el_add=gravel1_main_loop\r\n#el_gamemode=racing\r\n#el_laps=3\r\n#el_bots=3\r\n#el_car_reset_disabled=0\r\n#el_wrong_way_limiter_disabled=0\r\n#el_car_class_restriction=a\r\n#el_car_restriction=\r\n#el_weather=\r\n\r\n## Add second event to Loop\r\n#el_add=tarmac1_main_circuit\r\n#el_gamemode=team race\r\n#el_num_teams=2\r\n#el_laps=3\r\n#el_bots=3\r\n#el_car_reset_disabled=0\r\n#el_wrong_way_limiter_disabled=0\r\n#el_car_class_restriction=a\r\n#el_car_restriction=\r\n#el_weather=\r\n\r\n## Add third event to Loop\r\n#el_add=speedway2_demolition_arena\r\n#el_gamemode=derby deathmatch\r\n#el_bots=3\r\n#el_car_reset_disabled=0\r\n#el_car_class_restriction=a\r\n#el_car_restriction=\r\n#el_weather=","repo_name":"TheLysdexicOne/wreckfest-server","sub_path":"eventloops/eventloop_creator.py","file_name":"eventloop_creator.py","file_ext":"py","file_size_in_byte":3892,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"78"}
+{"seq_id":"23396110202","text":"## class representing a NODE\nclass Node(object):\n def __init__(self, value):\n self.value = value\n self.next = None\n\n## Class representing Linked list\nclass LinkedList:\n def __init__(self, head=None):\n self.head = head\n pass\n \n def append(self, node):\n \"\"\"\n append a node to list\n \"\"\"\n current = self.head\n if self.head:\n while current.next:\n current = current.next\n current.next = node\n else:\n self.head = node\n \n def get_position(self, position):\n \"\"\"\n get element at specific index\n \"\"\"\n counter = 0\n current = self.head\n if not self.head or position < 0 or position > self.size():\n return None\n \n while current:\n if counter == position:\n return current\n current = current.next\n counter +=1\n return None\n \n def size(self):\n \"\"\"\n get the size of linked list\n \"\"\"\n i = 0\n if not self.head:\n return 0\n \n current = self.head\n while current:\n i += 1\n current = current.next\n \n return i\n \n def delete(self, value):\n \"\"\"\n delete the first occuring value\n \"\"\"\n if not self.head:\n return None\n current = self.head\n previous = None\n \n while current.value != value and current.next:\n previous = current\n current = current.next\n \n if current.value == value:\n if previous:\n previous.next = current.next\n else:\n self.head = current.next\n \n def insert(self, node, position):\n \"\"\"\n inserting element at particular location\n \"\"\"\n counter = 0\n current = self.head\n if position > 0:\n while current and counter < position:\n if counter == position - 1:\n node.next = current.next\n current.next = node\n current = current.next\n counter += 1\n elif position == 0:\n node.next = self.head\n self.head = node\n \n def __str__(self):\n if self.head:\n data = []\n current = self.head\n while current:\n data.append(str(current.value))\n current = current.next\n return \" -> \".join(data)\n else:\n return \"No element\"\n pass\n\n\nclass Stack:\n def __init__(self):\n self.stack = LinkedList()\n \n def echo(self):\n print(self.stack)\n pass\n \n def size(self):\n \"\"\"\n get the size of stack\n \"\"\"\n return self.stack.size()\n \n def empty(self):\n \"\"\"\n check if stack is empty\n \"\"\"\n return self.size() == 0\n \n def peek(self):\n \"\"\"\n get the top most element\n \"\"\"\n return self.stack.get_position(0).value\n \n def pop(self):\n \"\"\"\n remove element from top\n \"\"\"\n v = self.peek()\n self.stack.delete(v)\n return v\n \n def push(self, node):\n \"\"\"\n add element to top\n \"\"\"\n self.stack.insert(node, 0)\n pass\n \n \nmyStack = Stack()\n\n# creating values\nn1 = Node(10)\nn2 = Node(20)\nn3 = Node(30)\n\n# Operating\nprint(\"[1] Stack is empty\" if myStack.empty() else \"[1] Stack is not empty\")\n\nprint(\"[2] Pushing {} to stack\".format(n1.value))\nmyStack.push(n1)\nprint(\"[3] Pushing {} to stack\".format(n2.value))\nmyStack.push(n2)\n\nprint(\"[4] Stack is empty\" if myStack.empty() else \"[4] Stack is not empty\")\n\nprint(\"[5] Current Size of Stack is\", myStack.size())\n\nprint(\"[6] Peeking value\", myStack.peek())\n\nprint(\"[7] Poping value {}\".format(myStack.pop()))\n\nprint(\"[8] Current Size of Stack is\", myStack.size())\n\nprint(\"[9] Pushing {} to stack\".format(n2.value))\nmyStack.push(n3)\n\nprint(\"[!] The Stack is\")\nmyStack.echo()\n","repo_name":"sukhdeepg/Hacktoberfest","sub_path":"Python/Stack.py","file_name":"Stack.py","file_ext":"py","file_size_in_byte":4091,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"78"}
+{"seq_id":"446660346","text":"from datetime import timedelta\nimport os\nimport whisper\nimport argparse\n\nparser = argparse.ArgumentParser(description='Transcribe audio and create subtitles')\nparser.add_argument('path', type=str, help='path to the audio file')\nargs = parser.parse_args()\n\ndef create_model():\n return whisper.load_model(\"medium\", \"cpu\")\n\ndef transcribe_audio(path):\n model = create_model()\n return model.transcribe(path)\n\ndef create_transcript_file(transcription, path):\n base_path = os.path.splitext(path)[0]\n transcript_suffix = '_transcript.txt'\n transcript_path = base_path + transcript_suffix\n with open(transcript_path, \"w\", encoding='utf-8') as file:\n file.write(transcription['text'])\n\ndef create_subtitles(transcription, path):\n segments = transcription['segments']\n base_path = os.path.splitext(path)[0]\n subtitle_suffix = '_subtitles.srt'\n subtitle_path = base_path + subtitle_suffix\n with open(subtitle_path, 'w', encoding='utf-8') as srtFile:\n for segment in segments:\n start_time = timedelta(seconds=int(segment['start']))\n end_time = timedelta(seconds=int(segment['end']))\n text = segment['text'].lstrip()\n segment_id = segment['id'] + 1\n segment_text = f\"{segment_id}\\n{start_time} --> {end_time}\\n{text}\\n\\n\"\n srtFile.write(segment_text)\n\ntranscription = transcribe_audio(args.path)\ncreate_transcript_file(transcription, args.path)\ncreate_subtitles(transcription, args.path)","repo_name":"bartlomiejborzucki/transcript_whisper","sub_path":"transcript.py","file_name":"transcript.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"32902446461","text":"import fiona\nimport fiona.crs\nimport shapely\nimport rtree\nimport pyproj\nimport shapely.geometry as geom\nimport sys\nimport pandas as pd\nimport geopandas as gpd\nfrom pyspark import SparkContext\n\n\n\ndef getdict():\n import csv\n wordcount={}\n keyword=[]\n with open('drug_illegal.txt') as file:\n reader = csv.reader(file)\n for row in reader:\n keyword.append(row[0])\n with open('drug_sched2.txt') as file:\n reader = csv.reader(file)\n for row in reader:\n keyword.append(row[0])\n with open('tweets-100m.csv') as file:\n reader = csv.reader(file,delimiter='|') \n for row in reader:\n if len(row) < 7:\n continue\n if len(row[6])<3:\n continue\n flag=0\n for i in keyword:\n if i in row[6]:\n flag=1\n break\n if flag==1:\n row[6]=row[6].replace(',','')\n for i in row[6].split(' '):\n wordcount[i]=wordcount.get(i,0)+1\n return wordcount\n \n\n\n \ndef processwords(pid,records):\n import csv\n \n \n worddict=getdict()\n counts={}\n keyword=[]\n\n with open('drug_illegal.txt') as file:\n reader = csv.reader(file)\n for row in reader:\n keyword.append(row[0])\n with open('drug_sched2.txt') as file:\n reader = csv.reader(file)\n for row in reader:\n keyword.append(row[0])\n \n reader = csv.reader(records,delimiter='|') \n for row in reader:\n if len(row) < 7:\n continue\n if len(row[6].split(' '))<3:\n continue\n flag = 0\n fre=[]\n for i in keyword:\n if i in row[6]:\n flag=1\n break\n if(flag==1):\n row[6]=row[6].replace(',','')\n for i in row[6].split(' '):\n fre.append((i,worddict[i]))\n fre1=set(fre)\n a=[i[0] for i in sorted(fre1, key=lambda x : x[1])[0:3]]\n \n for i in a:\n counts[i]=counts.get(i,0)+1\n else:\n continue\n return counts.items()\n \nif __name__ == \"__main__\":\n output=sys.argv[2]\n tweetdata=sys.argv[1]\n \n sc = SparkContext()\n tweet = sc.textFile(tweetdata).cache() \n freq = tweet.mapPartitionsWithIndex(processwords).top(100, key=lambda x: x[1])\n sc.parallelize(freq).saveAsTextFile(output)\n","repo_name":"jianangong/bdm_final","sub_path":"extra2.py","file_name":"extra2.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"858943191","text":"import random as ran\nimport numpy as np\nfrom math import sqrt\nimport cProfile\nimport time\nfrom functools import cache\nimport os.path\nimport sys\nimport gc\n\n\nn_m = int(sys.argv[1])\n\nr_mm = float(sys.argv[2])\nr_m = r_mm/1e3\n# while True: #choose monomer number \n# try:\n# n_m = int(input('Enter the number of monomers: '))\n# except ValueError:\n# # Not a valid number\n# print('You must enter a vaild number')\n# n_m = int(input('Enter the number of monomers: '))\n# else:\n# # No error; stop the loop\n# break\n\n# while True: #choose monomer size\n# try:\n# r_m = float(input('Enter size of monomers in mm e.g 0.01: '))\n# except ValueError:\n# # Not a valid number\n# print('You must enter a vaild number')\n# r_m = float(input('Enter the number of monomers: '))\n# else:\n# # No error; stop the loop\n# break\n\n#setup arrays and initialise values \n\nx_a, y_a, z_a = [],[],[]\n\nWavelength = 0.870\nRe = 3.408100\nIm = 5.6983002E-02\n\n#n_m = number of monomeres\nr_c = 1 \n#r_m = 0.01\n\nfac = (r_c + r_m) * (1+1e-6)\n\nsave_path = '/Users/raomorusupalli/Documents/UNI/Honours/project/GMM/'\n\nname_of_file ='aggregate_'+str(n_m)\n\ncompleteName = os.path.join(save_path, name_of_file+\".k\")\n\nstart_time = time.time()\nnp.seterr(invalid='ignore')\n\n\n@cache\ndef main():\n count = 0\n tried =0\n actp = []\n t1 = time.time()\n with open(completeName,'w') as f:\n f.write(str(Wavelength)+'\\n')\n f.write(str(n_m+1)+'\\n')\n f.write(\" \".join([str(0.), str(0.), str(0.), str(r_c), str(Re), str(Im), '\\n'])) #initial position of centre monomer\n while count < n_m: # i and j are indicies for 2 points to compare, j starts at i + 1 because all values of j=0, ..., i will be compared are a different iteration\n \n\n pos = ran.uniform(0.,1.)\n z = 2.0 * pos - 1.0\n r = np.sqrt(1-z*z)\n\n pos = ran.uniform(0.,1.)\n ARGMT = np.pi*(2.0*pos- 1.0 )\n x = r * np.cos(ARGMT)\n y = r * np.sin(ARGMT)\n\n x *= fac\n y *= fac\n z *= fac\n posvec = np.array([x,y,z])\n overlap = False\n\n \n if not overlap:\n for p in actp:\n # remove the doubling of the posvec[.]-p[.] in the below statement\n # find a a way to make the staements absolute ie a == b instead of a < b.\n gc.disable() \n xt = np.abs(posvec[0]-p[0]) - 2*r_m\n yt = np.abs(posvec[1]-p[1]) - 2*r_m\n zt = np.abs(posvec[2]-p[2]) - 2*r_m\n rt = (posvec[0]-p[0])**2+(posvec[1]-p[1])**2+(posvec[2]-p[2])**2 - 4*r_m**2\n stat = [abs(xt) != xt, abs(yt) != yt,abs(zt) != zt,abs(rt) != rt]\n if all(stat):\n overlap = True\n break\n \n # d = np.sqrt(np.dot(posvec,actp[-1]))\n # if fac/d > 1:\n # overlap = True\n # break \n \n \n if not overlap:\n gc.disable() \n actp.append(posvec)\n\n #print (posvec)\n count+=1\n # t1 = time.time()\n print(count) \n else:\n tried+=1\n \n \n for val in actp:\n f.write('{} {} {}'.format(val[0], val[1], val[2]) +' '+str(r_m)+' '+str(Re)+' '+str(Im)+'\\n') \n f.close()\n\nif __name__ == '__main__':\n\n cProfile.run('main()') \n print(\"--- %s seconds ---\" % (time.time() - start_time))","repo_name":"raomoomoo/GMM-Aggregates","sub_path":"agg_generator.py","file_name":"agg_generator.py","file_ext":"py","file_size_in_byte":3727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"72000573053","text":"populations_2020 = [\n {\"Name\": \"Honduras\", \"Population\": 9_904_607},\n {\"Name\": \"United Arab Emirates\", \"Population\": 9_890_402},\n {\"Name\": \"Vietnam\", \"Population\": 97_338_579},\n {\"Name\": \"Hungary\", \"Population\": 9_660_351},\n {\"Name\": \"Austria\", \"Population\": 9_006_398},\n {\"Name\": \"Fiji\", \"Population\": 896_445},\n {\"Name\": \"Papua New Guinea\", \"Population\": 8_947_024},\n {\"Name\": \"Switzerland\", \"Population\": 8_654_622},\n {\"Name\": \"Turkey\", \"Population\": 84_339_067},\n {\"Name\": \"Iran\", \"Population\": 83_992_949},\n {\"Name\": \"Germany\", \"Population\": 83_783_942},\n {\"Name\": \"Hong Kong\", \"Population\": 7_496_981},\n {\"Name\": \"Paraguay\", \"Population\": 7_132_538},\n {\"Name\": \"United Kingdom\", \"Population\": 67_886_011},\n {\"Name\": \"Lebanon\", \"Population\": 6_825_445},\n {\"Name\": \"Cayman Islands\", \"Population\": 65_722},\n {\"Name\": \"Italy\", \"Population\": 60_461_826},\n {\"Name\": \"Congo\", \"Population\": 5_518_087},\n {\"Name\": \"Kenya\", \"Population\": 53_771_296},\n]\n","repo_name":"FelipeD97/100DaysofCode","sub_path":"Day 14 - higher_lower_game/game_data.py","file_name":"game_data.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"73978174651","text":"\"CRB of orbital method using iSCAT\"\nimport matplotlib\nfrom static_crb.CRB import *\nimport rsmf\n\ncolor_list = ['#1d6996', '#73af48', '#edad08', '#e17c05', '#cc503e', '#94346e', '#6f4070']\nplt.rcParams['axes.prop_cycle'] = plt.cycler(color=color_list)\n\ncol_width = 246 # For journal draft (JCP) - single column\n# col_width = 418 # I don't know anymore\n\nformatter = rsmf.CustomFormatter(columnwidth=col_width * 0.01389, fontsizes=10,\n pgf_preamble=r'\\usepackage{lmodern} \\usepackage[utf8x]{inputenc}')\n\n# matplotlib.rcParams.update({'font.size': formatter.fontsizes.footnotesize})\nmatplotlib.rcParams.update({'font.size': 8})\nmatplotlib.rcParams.update({'font.family': 'serif'})\n\ndill.settings['recurse'] = True\nfile_orbital = 'pickles/crb_lambda_orbital_iscat'\nfile_orbital_20 = 'pickles/crb_lambda_orbital_iscat_20'\nfile_orbital_40 = 'pickles/crb_lambda_orbital_iscat_40'\nfile_orbital_80 = 'pickles/crb_lambda_orbital_iscat_80'\n\ncompute_crb = False\n\nif compute_crb:\n # orbital = Orbital(file_orbital, iscat=True)\n orbital = Orbital(file_orbital_20, iscat=True, numpoints=20)\n orbital = Orbital(file_orbital_40, iscat=True, numpoints=40)\n orbital = Orbital(file_orbital_80, iscat=True, numpoints=80)\n\ny = np.linspace(-300, 300, num=100)\n\ncontrast = 0.01\nn = 100\nnscat = 1 * n\nnsigma = np.sqrt(2 * nscat / contrast)\n\nfileobject_orbital = open(file_orbital, 'rb')\ncrb_lambda_orbital = dill.load(fileobject_orbital)\nfileobject_orbital_20 = open(file_orbital_20, 'rb')\ncrb_lambda_orbital_20 = dill.load(fileobject_orbital_20)\nfileobject_orbital_40 = open(file_orbital_40, 'rb')\ncrb_lambda_orbital_40 = dill.load(fileobject_orbital_40)\nfileobject_orbital_80 = open(file_orbital_80, 'rb')\ncrb_lambda_orbital_80 = dill.load(fileobject_orbital_80)\n\n# x, y, L, N, w, amp\ncrb20 = crb_lambda_orbital_20(0, y, 500, nscat, 353, 1, nsigma)\ncrb40 = crb_lambda_orbital_40(0, y, 500, nscat, 353, 1, nsigma)\ncrb60 = crb_lambda_orbital(0, y, 500, nscat, 353, 1, nsigma)\ncrb80 = crb_lambda_orbital_80(0, y, 500, nscat, 353, 1, nsigma)\n\nfig = formatter.figure(width_ratio=1.0, aspect_ratio=0.6)\nax1 = fig.add_subplot()\n\nax1.plot(y, crb20)\nax1.plot(y, crb40)\nax1.plot(y, crb60)\nax1.plot(y, crb80)\n\nax1.text(190, 85, '20 points', fontsize=10, color='C0')\nax1.text(190, 61, '40 points', fontsize=10, color='C1')\nax1.text(190, 50, '60 points', fontsize=10, color='C2')\nax1.text(190, 43, '80 points', fontsize=10, color='C3')\n\nax1.set_xlabel('Position (nm)')\nax1.set_ylabel('CRB (nm)')\n\nfor line in ax1.lines:\n line.set_lw(1.3)\n\nplt.tight_layout()\nplt.savefig('../out/orbital_crb_iscat_numpoints.pdf')\n","repo_name":"bvanheerden/spt_sim","sub_path":"static_crb/compare_numpoints.py","file_name":"compare_numpoints.py","file_ext":"py","file_size_in_byte":2623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"1442692025","text":"# -*- coding: utf-8 -*-\n#\n# (c) 2016 Boundless, http://boundlessgeo.com\n# This code is licensed under the GPL 2.0 license.\n#\n'''\nThis module provides methods to export layers so they can be used as valid data\nfor uploading to GeoServer.\n'''\n\nfrom builtins import str\nfrom qgis.core import *\nfrom geoserverexplorer.qgis import utils\nimport os\nfrom qgis.PyQt import QtCore\nfrom qgis.utils import iface\nfrom qgis.gui import QgsMessageBar\nfrom qgiscommons2.files import tempFilenameInTempFolder\n\ndef exportVectorLayer(layer):\n '''accepts a QgsVectorLayer or a string with a filepath'''\n settings = QtCore.QSettings()\n systemEncoding = settings.value( \"/UI/encoding\", \"System\" )\n if isinstance(layer, QgsMapLayer):\n filename = str(layer.source())\n destFilename = str(layer.name())\n else:\n filename = str(layer)\n destFilename = str(os.path.splitext(os.path.basename(filename))[0])\n if (not filename.lower().endswith(\"shp\")):\n if not isinstance(layer, QgsMapLayer):\n layer = QgsVectorLayer(filename, \"layer\", \"ogr\")\n if not layer.isValid() or layer.type() != QgsMapLayer.VectorLayer:\n raise Exception (\"Error reading file {} or it is not a valid vector layer file\".format(filename))\n output = tempFilenameInTempFolder(destFilename + \".shp\")\n QgsVectorFileWriter.writeAsVectorFormat(layer, output, systemEncoding, layer.crs(), \"ESRI Shapefile\")\n QgsMessageLog.logMessage(\"Layer '%s' had to be exported to shapefile for importing. Data might be lost.\" % layer.name(),\n level = Qgis.Warning)\n return output\n else:\n return filename\n\n\n\ndef exportRasterLayer(layer):\n if (not str(layer.source()).lower().endswith(\"tif\") ):\n filename = str(layer.name())\n output = tempFilenameInTempFolder(filename + \".tif\")\n writer = QgsRasterFileWriter(output)\n writer.setOutputFormat(\"GTiff\");\n writer.writeRaster(layer.pipe(), layer.width(), layer.height(), layer.extent(), layer.crs())\n del writer\n return output\n else:\n return str(layer.source())\n\n\n\n\n\n\n","repo_name":"planetfederal/qgis-geoserver-plugin","sub_path":"geoserverexplorer/qgis/exporter.py","file_name":"exporter.py","file_ext":"py","file_size_in_byte":2163,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"78"}
+{"seq_id":"42139126488","text":"# Solution by PauloBA\n\ndef find_difference(a, b):\n vA = 1\n vB = 1\n for i in a:\n vA = vA * i\n for i in b:\n vB = vB * i\n ans = abs(vA -vB)\n return ans","repo_name":"PauloBernal/Codewars","sub_path":"kata/cuboids.py","file_name":"cuboids.py","file_ext":"py","file_size_in_byte":180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"22962232032","text":"def decimal_to_binary(n: int):\n \"\"\"\n Transforms a possitive decimal integer into a binary number\n n -- possitive integer number\n \"\"\"\n if n == 0:\n return '0'\n b = []\n while n > 0:\n remainder = n % 2\n n = n // 2\n b.append(str(remainder))\n return ''.join(b[::-1])\n\n\ndef max_number_of_consecutive_ones(binary: str):\n \"\"\"\n Returns the maximum number of consecutive ones in a binary number\n \"\"\"\n return max([len(i) for i in binary.split('0')])\n\n\nif __name__ == \"__main__\":\n print('-' * 60)\n print('Maximum number of consecutive ones in a binary number')\n print('-' * 60)\n numbers = list(range(10)) + list(range(20,200,20))\n for n in numbers:\n b = decimal_to_binary(n)\n res = f'{\" \" * (6-len(str(n)))}({n})_10 = ({b})_2{\" \" * (10-len(b))}'\n res += f' --> max consecutive ones = {max_number_of_consecutive_ones(b)}'\n print(res)\n\n print()\n print('-' * 60)\n print('Convert decimal numbers to binary')\n print('-' * 60)\n while True:\n # decimal number\n number = int(input(\"Enter any decimal number: \"))\n\n print(' -> Binary equivalent: ', decimal_to_binary(number))","repo_name":"daalgi/algorithms","sub_path":"maths/decimal_to_binary.py","file_name":"decimal_to_binary.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"}
+{"seq_id":"10065199326","text":"import streamlit as st\r\nimport pickle\r\nimport pandas as pd\r\nimport requests\r\n\r\ndef fetch_poster(movie_id):\r\n api_key=\"...........\" # use your api key \r\n response=requests.get(\"https://api.themoviedb.org/3/movie/{}?api_key={}\".format(movie_id,api_key))\r\n data=response.json()\r\n print(data['poster_path'])\r\n return \"http://image.tmdb.org/t/p/w500\"+data['poster_path']\r\n\r\n# for list of movies for select\r\nmovies_dict=pickle.load(open('movie_dict.pkl','rb'))\r\nsimilarity=pickle.load(open('similarity.pkl','rb'))\r\nmovies_name=pd.DataFrame(movies_dict)\r\n\r\ndef recommendation(movie):\r\n index=movies_name[movies_name['title']==movie].index[0]\r\n distance=similarity[index]\r\n sorted_list=sorted(enumerate(distance),reverse=True,key=lambda x:x[1])[1:6]\r\n\r\n recommended_movie=[]\r\n recommended_poster=[]\r\n for i in sorted_list:\r\n movie_id=movies_name.iloc[i[0]].movie_id\r\n # for fetching movie poster we use movie_id in TMDB API\r\n recommended_poster.append(fetch_poster(movie_id))\r\n recommended_movie.append(movies_name.iloc[i[0]].title)\r\n \r\n return recommended_movie,recommended_poster\r\n\r\n\r\nst.title(\"Movie Recommender System\")\r\noption = st.selectbox(\r\n 'Select or Type Movie Name!',\r\n (movies_name['title'].values))\r\n\r\nif st.button('Recommend'):\r\n a,posters=recommendation(option)\r\n col1, col2, col3,col4,col5= st.columns(5)\r\n with col1:\r\n st.text(a[0])\r\n st.image(posters[0])\r\n with col2:\r\n st.text(a[1])\r\n st.image(posters[1])\r\n with col3:\r\n st.text(a[2])\r\n st.image(posters[2])\r\n with col4:\r\n st.text(a[3])\r\n st.image(posters[3])\r\n with col5:\r\n st.text(a[4])\r\n st.image(posters[4])\r\n\r\n\r\n","repo_name":"9771-raj/Movie_Recommendation_System","sub_path":"movie_web.py","file_name":"movie_web.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"}
+{"seq_id":"33066348296","text":"from turtle import Turtle\nALIGNMENT = \"center\"\nFONT = (\"Arial\", 24, \"normal\")\n\n\nclass Scoreboard(Turtle):\n\n def __init__(self):\n super().__init__()\n self.penup()\n self.hideturtle()\n self.goto(0, 275)\n self.score = 0\n self.color(\"white\")\n self.scoreboard_refresh()\n\n def scoreboard_refresh(self):\n self.write(f\"Score = {self.score}\", False, align=ALIGNMENT, font=FONT)\n\n def update_score(self):\n self.score += 1\n self.clear()\n self.scoreboard_refresh()\n\n def game_over(self):\n self.goto(0,0)\n self.write(\"GAME OVER\", False, align=ALIGNMENT, font=FONT)\n","repo_name":"YTCYC/Python_OOP_Projects","sub_path":"snake-game/scoreboard.py","file_name":"scoreboard.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"30386212462","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def reverseKGroup(self, head: Optional[ListNode], k: int) -> Optional[ListNode]:\n if head is None or head.next is None or k < 1:\n return head\n ### If the code didn't return in the above statement then \n ### The Linkedlist has atleast 2 nodes & k >= 2 \n \n totalLength = 0\n cur = head\n while cur:\n totalLength += 1\n cur = cur.next\n \n totalIterTimes = totalLength // k\n \n prev = None\n cur = head\n StartInprevGroup = None\n StartIncurGroup = head\n \n isFirstTime = True\n while cur and totalIterTimes:\n StartIncurGroup = cur\n prev = None\n for i in range(k):\n ### Save pointer to next node\n nxt = cur.next\n \n ### Move connection from cur -> next to cur -> prev\n cur.next = prev\n \n ### Move all the pointers to the right by 1 place\n prev = cur\n cur = nxt\n \n if isFirstTime:\n head = prev\n isFirstTime = False\n else:\n StartInprevGroup.next = prev\n StartInprevGroup = StartIncurGroup\n totalIterTimes -= 1\n \n StartInprevGroup.next = cur\n \n return head\n","repo_name":"krishnasaiv/My-LeetCode-Journey","sub_path":"25-reverse-nodes-in-k-group/25-reverse-nodes-in-k-group.py","file_name":"25-reverse-nodes-in-k-group.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"27424035502","text":"# !/usr/bin/python\n# coding=UTF-8\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\nimport pandas as pd\npd.set_option('display.max_columns',30)\n\n\n# 学生基本数据信息(本科生) ok\nbks_xsjbsjxx = pd.read_csv('./process_data/droped_bks_xsjbsjxx_out.csv')\n# # 学籍基本数据信息(本科生)\n# bks_xjjbsjxx = pd.read_csv('./bks_xjjbsjxx_out.csv')\n# 本科生成绩信息\n# bks_cjxx = pd.read_csv('./droped_bks_cjxx_out.csv')\n\n# # 排课数据信息(本科生) ok\n# bks_pksjxx = pd.read_csv('./bks_pksjxx_out.csv')\n\n# # 课程数据信息(本科生) ok\n# bks_kcsjxx = pd.read_csv('./bks_kcsjxx_out.csv')\n\n# 一卡通消费日志:YKT_JYRZ ok\nykt_jyrz = pd.read_csv('./新增数据/merge_ykt_jyrz.csv', names=['xh','jylx','jyje','jyrq','jysj','jydd','shdm','shmc','zdjh','ljykcs','jyye'])\n\n# label标签\ntrain_label = pd.read_csv('./process_data/add_data_2_train_label.csv')\n\ny = train_label['is_poor']\n\n\ndef get_feature(ykt_jyrz, label):\n for feature in ykt_jyrz.columns[1:]:\n if ykt_jyrz[feature].dtype == 'object':\n label = label.merge(ykt_jyrz.groupby(by='xh')[feature].count().reset_index().rename(columns = {feature:'count_'+ feature}), how='left', on='xh')\n label = label.merge(ykt_jyrz.groupby(by='xh')[feature].nunique().reset_index().rename(columns = {feature:'nunique_'+ feature}), how='left', on='xh')\n else:\n label =label.merge(ykt_jyrz.groupby(['xh'])[feature].count().reset_index().rename(columns = {feature:'count_'+ feature}),on='xh',how='left')\n label =label.merge(ykt_jyrz.groupby(['xh'])[feature].nunique().reset_index().rename(columns = {feature:'nunique_'+ feature}),on='xh',how='left')\n label =label.merge(ykt_jyrz.groupby(['xh'])[feature].mean().reset_index().rename(columns = {feature:'mean_'+ feature}),on='xh',how='left')\n label =label.merge(ykt_jyrz.groupby(['xh'])[feature].std().reset_index().rename(columns = {feature:'std_'+ feature}),on='xh',how='left')\n label =label.merge(ykt_jyrz.groupby(['xh'])[feature].max().reset_index().rename(columns = {feature:'max_'+ feature}),on='xh',how='left')\n label =label.merge(ykt_jyrz.groupby(['xh'])[feature].min().reset_index().rename(columns = {feature:'min_'+ feature}),on='xh',how='left')\n label =label.merge(ykt_jyrz.groupby(['xh'])[feature].sum().reset_index().rename(columns = {feature:'sum_'+ feature}),on='xh',how='left')\n label =label.merge(ykt_jyrz.groupby(['xh'])[feature].skew().reset_index().rename(columns = {feature:'skew_'+ feature}),on='xh',how='left')\n return label\n\n# 提取特征\ntrain_valid_data = get_feature(ykt_jyrz, train_label)\nprint(train_valid_data.shape) # (24690, 52)\n\n\ntrain_valid_data.to_csv('./process_data/第一轮特征.csv', index=None)","repo_name":"Andrewsunning/XinHuaSanCup_top1","sub_path":"code/BiXuanTi_code/第一轮特征.py","file_name":"第一轮特征.py","file_ext":"py","file_size_in_byte":2807,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"}
+{"seq_id":"6493468434","text":"from nltk.probability import FreqDist\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom wordcloud import WordCloud, STOPWORDS\npd.set_option('display.max_columns', None)\npd.set_option('display.max_rows', None)\n\ntext = pd.read_csv('/Users/haven/galvanize/capstone_2/tweets_only.csv')\n\ntext['totalwords'] = text['text'].str.split().str.len()\n\ntext['hashtags'] = text['text'].str.count('#')\ntext['attention'] = text['text'].str.count('@')\n\ndef clean_data(dataframe,col):\n '''\n Removes punctuation and import errors from dataframe\n\n INPUT: DataFrame (df)\n Column name to clean (string)\n\n OUTPUT: Cleaned DataFrame Column (series)\n\n '''\n punctuation = '!\"$%&()*+,-./:;<=>?[\\]^_`{|}~'\n import_errors = ['_„Ž','_„ñ','_ã_','_„','ñ','ñ','ð']\n df2 = dataframe.copy()\n for e in import_errors:\n df2[col] = df2[col].str.replace(e,'')\n for p in punctuation:\n df2[col] = df2[col].str.replace(p,'')\n return df2[col]\n\ntext['text'] = clean_data(text, 'text')\ntext.text = text.text.astype(str).str.lower()\n\ntext.to_csv('text_info.csv')\n\ncorpus = text['text']\n#\n# words = []\n# tokenized_words = []\n#\n# for doc in corpus:\n# words += doc.split()\n# tokenized_words += word_tokenize(str(words))\n\ndef make_wordcloud(data):\n stopwords = set(STOPWORDS)\n # iterate through the csv file\n for val in text.text:\n\n # typecaste each val to string\n val = str(val)\n\n # split the value\n tokens = val.split()\n\n # Converts each token into lowercase\n for i in range(len(tokens)):\n tokens[i] = tokens[i].lower()\n\n comment_words = ' '\n for i in range(5000):\n comment_words+=(np.random.choice(text.text.values, replace=False))\n\n\n wordcloud = WordCloud(width = 800, height = 800,\n background_color ='white',\n stopwords = stopwords,\n min_font_size = 10).generate(comment_words)\n\n # plot the WordCloud image\n plt.figure(figsize = (8, 8), facecolor = None)\n plt.imshow(wordcloud)\n plt.axis(\"off\")\n plt.tight_layout(pad = 0)\n\n plt.show()\n","repo_name":"HM618/Capstone-2","sub_path":"capstone_2/src/text_level.py","file_name":"text_level.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"18137671297","text":"import boto3\n\n# CIDR for VPC and Subnets\ncidr_vpc = '10.0.0.0/16'\ncidr_sn_public = '10.0.1.0/24'\ncidr_sn_private = '10.0.2.0/24'\n\n# crete an ec2 ressource object\nec2 = boto3.resource('ec2')\n\n# create a VPC\nvpc = ec2.create_vpc(CidrBlock=cidr_vpc)\n\n# assign a name to the VPC\nvpc.create_tags(Tags=[{\"Key\": \"Name\", \"Value\": \"my_vpc\"}])\n\nvpc.wait_until_available()\n\n# create an IGW and attach it to the VPC\nigw = ec2.create_internet_gateway()\nigw.create_tags(Tags=[{\"Key\": \"Name\", \"Value\": \"my_igw\"}])\nvpc.attach_internet_gateway(InternetGatewayId=igw.id)\n\n# identify the main route table\nmain_vpc_rt = []\nfor route_table in vpc.route_tables.all():\n for association in route_table.associations:\n if association.main:\n main_vpc_rt.append(route_table)\n# main_vpc_rt = vpc.route_tables.filter(Filters=[{'Name': 'association.main', 'Values': [\"true\"]}])\nrt_main = main_vpc_rt[0]\nrt_main.create_tags(Tags=[{\"Key\": \"Name\", \"Value\": \"my_rt_private\"}])\n\n# create a route table for the public subnet \nrt_public = vpc.create_route_table()\nrt_public.create_tags(Tags=[{\"Key\": \"Name\", \"Value\": \"my_rt_public\"}])\n\n# add a public route to the public route table\nroute_public = rt_public.create_route(DestinationCidrBlock='0.0.0.0/0', GatewayId=igw.id)\n\n# create a public subnet\nsn_public = ec2.create_subnet(CidrBlock=cidr_sn_public, VpcId=vpc.id)\nsn_public.create_tags(Tags=[{\"Key\": \"Name\", \"Value\": \"my_sn_public\"}])\n\n# create a private subnet\nsn_private = ec2.create_subnet(CidrBlock=cidr_sn_private, VpcId=vpc.id)\nsn_private.create_tags(Tags=[{\"Key\": \"Name\", \"Value\": \"my_sn_private\"}])\n\n# attach the route tables to the subnets\nrt_main.associate_with_subnet(SubnetId=sn_private.id)\nrt_public.associate_with_subnet(SubnetId=sn_public.id)\n","repo_name":"antoine-hochart/aws","sub_path":"sdk_for_python/files/aws_sdk_vpc.py","file_name":"aws_sdk_vpc.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"22281285431","text":"import cv2\nimport numpy as np\nimport matplotlib\nfrom PIL import Image\nfrom matplotlib import pyplot as plt\nfrom skimage.util import random_noise\nfrom skimage import img_as_ubyte\nfrom skimage.filters import roberts, sobel, prewitt, laplace\nfrom closedcv.filters import *\n\nmatplotlib.rcParams[\"backend\"] = \"TkAgg\"\nnp.set_printoptions(threshold=np.inf)\n\n\nif __name__==\"__main__\":\n img = cv2.imread('./input/img.jpeg', 0)\n \n # Apply noise to input image\n gauss = random_noise(img, mode='gaussian', seed=None, clip=True)\n gauss = img_as_ubyte(gauss)\n dir_ = './res/'\n cv2.imwrite(dir_ + 'gauss.jpeg', gauss)\n\n Q = [-1, 0, 1]\n for q in Q:\n cntrharmonic = contraharmonic_filter(gauss, q,(3,3))\n cv2.imwrite(dir_ + 'contraharmonic_Q_' + str(q) + '.jpg', cntrharmonic)\n \n for q in Q:\n gauss_blur = cv2.GaussianBlur(gauss, (3,3), q)\n cv2.imwrite(dir_ + 'gaussian_Q_' + str(q) + '.jpg', gauss_blur)\n\n # Median, weighted median, rang and Winner filtering\n cv2.imwrite(dir_ + 'median.jpg', cv2.medianBlur(gauss, 1))\n \n cv2.imwrite(dir_ + 'weighted_median.jpg', cv2.medianBlur(gauss, 3))\n \n cv2.imwrite(dir_ + 'wiener.jpg', wiener_filter(gauss, (3,3)))\n \n cv2.imwrite(dir_ + 'rank.jpg', rank_filter(gauss, -1, (3,3)))\n \n #cv2.imwrite(dir_ + 'adaptive_median.jpg', adaptive_median_filter(gauss, (3,3), 5, 5))\n \n # Edge detectors \n #roberts_ = img_as_ubyte(roberts(img))\n cv2.imwrite(dir_ + 'roberts.jpg', img_as_ubyte(roberts(img)))\n cv2.imwrite(dir_ + 'sobel.jpg', img_as_ubyte(sobel(img)))\n cv2.imwrite(dir_ + 'prewitt.jpg', img_as_ubyte(prewitt(img)))\n cv2.imwrite(dir_ + 'laplace.jpg', img_as_ubyte(laplace(img)))\n cv2.imwrite(dir_ + 'canny.jpg', cv2.Canny(img, 100, 200))\n","repo_name":"AlexKaravaev/ifmo","sub_path":"bachelor/CV/lab3/lab.py","file_name":"lab.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"}
+{"seq_id":"3602727181","text":"import os\r\nfrom numpy import std\r\nimport wget\r\nimport zipfile\r\nimport subprocess\r\nimport shutil\r\n\r\ntry:\r\n os.mkdir(\"bin\", mode=777)\r\n os.mkdir(\"YT-CORPUS\", mode=777)\r\n os.mkdir(\"temp_yt\", mode=777)\r\n\r\nexcept FileExistsError:\r\n pass\r\n\r\nif os.name == 'posix':\r\n subprocess.call(['apt', 'install', 'ffmpeg', 'firefox', 'firefox-geckodriver', '-y'], \r\n stdout=subprocess.DEVNULL,\r\n stderr=subprocess.STDOUT)\r\n\r\n subprocess.call([\r\n 'cp', '/usr/lib/geckodriver', '/usr/bin'\r\n ],\r\n stdout=subprocess.DEVNULL,\r\n stderr=subprocess.STDOUT)\r\n\r\n subprocess.call(['cp', '/usr/lib/geckodriver', '/usr/bin'],\r\n stdout=subprocess.DEVNULL,\r\n stderr=subprocess.STDOUT)\r\n\r\nelif os.name == 'nt':\r\n filename = wget.download(\r\n \"https://github.com/BtbN/FFmpeg-Builds/releases/download/latest/ffmpeg-master-latest-win64-gpl.zip\"\r\n )\r\n\r\n try:\r\n os.remove(\"ffmpeg.zip\")\r\n except FileNotFoundError:\r\n pass\r\n \r\n os.rename(\r\n filename, \r\n \"ffmpeg.zip\"\r\n )\r\n \r\n\r\n archive = zipfile.ZipFile('ffmpeg.zip')\r\n for file in archive.namelist():\r\n if file.startswith('ffmpeg-master-latest-win64-gpl/bin/'):\r\n filename = os.path.basename(file)\r\n source = archive.open(file)\r\n \r\n if not filename:\r\n continue\r\n\r\n target = open(os.path.join(f'bin/{filename}'), \"wb\")\r\n\r\n with source, target:\r\n shutil.copyfileobj(source, target)\r\n\r\n target.close()\r\n source.close()\r\n\r\n archive.close()\r\n\r\n if os.path.exists(\"bin/geckodriver.exe\") == False:\r\n filename = wget.download(\r\n \"https://github.com/mozilla/geckodriver/releases/download/v0.31.0/geckodriver-v0.31.0-win64.zip\"\r\n )\r\n\r\n try:\r\n os.remove(\"gecko.zip\")\r\n except FileNotFoundError:\r\n pass\r\n\r\n os.rename(filename, \"gecko.zip\")\r\n archive = zipfile.ZipFile('gecko.zip')\r\n archive.extractall('bin/')\r\n archive.close()\r\n\r\n \r\n os.remove(\"gecko.zip\")\r\n os.remove(\"ffmpeg.zip\")\r\n\r\n","repo_name":"dellano54/youtube-speech-corpus","sub_path":"initilizer.py","file_name":"initilizer.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"71921078011","text":"import pandas as pd\nimport numpy as np\nfrom algorithms.Algorithm import Algorithm\nfrom datetime import datetime, timedelta\n\n\nclass Gini(Algorithm):\n\n def __init__(self, risk_score, model_name):\n super().__init__( risk_score, model_name )\n\n def get_optimal_portfolio(self):\n selected_prices_value = self.prices_df[self.selected_assets].dropna()\n num_portfolios = 1000\n years = len(selected_prices_value) / 253\n starting_value = selected_prices_value.iloc[0, :]\n ending_value = selected_prices_value.iloc[len(selected_prices_value) - 1, :]\n total_period_return = ending_value / starting_value\n annual_returns = (total_period_return ** (1 / years)) - 1\n port_returns = []\n port_gini_coefficient = []\n sharpe_ratio = []\n stock_weights = []\n num_assets = len(self.selected_assets)\n np.random.seed(101)\n\n for single_portfolio in range(num_portfolios):\n weights = np.random.random(num_assets)\n weights /= np.sum(weights)\n returns = np.dot(weights, annual_returns)\n gini_coefficient = (selected_prices_value.mad()).mad()\n sharpe = returns / gini_coefficient\n sharpe_ratio.append(sharpe)\n port_returns.append(returns * 100)\n port_gini_coefficient.append(gini_coefficient * 100)\n stock_weights.append(weights)\n portfolio = {'Returns': port_returns,\n 'Volatility': port_gini_coefficient,\n 'Sharpe Ratio': sharpe_ratio}\n for i, symbol in enumerate(self.selected_assets):\n portfolio[symbol] = [Weight[i] for Weight in stock_weights]\n df = pd.DataFrame(portfolio)\n columns = ['Returns', 'Volatility', 'Sharpe Ratio'] + [stock for stock in self.selected_assets]\n df = df[columns]\n best_sharpe_portfolio = df.loc[df['Sharpe Ratio'] == df['Sharpe Ratio'].max()]\n sharpe_portfolio = pd.DataFrame(columns=['Ticker', 'Weight'])\n for i in range(len(self.selected_assets)):\n ticker = self.selected_assets[i]\n weight = best_sharpe_portfolio.loc[:, ticker].iloc[0]\n sharpe_portfolio = sharpe_portfolio.append({'Ticker': ticker, 'Weight': weight}, ignore_index=True)\n sharpe_portfolio = sharpe_portfolio.set_index('Ticker')\n return sharpe_portfolio\n","repo_name":"avielfedida/RoboAdvisor","sub_path":"server/algorithms/mean_gini.py","file_name":"mean_gini.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"35571071784","text":"import pandas as pd\n\nDATA_DIR = \"../data/prepared/twitter\"\nFN_LEMMAS = \"2022_01_01_to_07_lemmas_02.csv\"\nOUT_FN = \"2022_01_01_to_07_userids_02.csv\"\n\ndf = pd.read_csv(\"{}/{}\".format(DATA_DIR, FN_LEMMAS), header=0, index_col=0)\n\nuser_ids = df['userid'].unique()\nprint(user_ids)\n\nwith open(\"{}/{}\".format(DATA_DIR, OUT_FN), \"w\") as f:\n for u in user_ids:\n f.write(\"{}\\n\".format(u))\n","repo_name":"wpower12/MLNEmbeddings","sub_path":"scratch/00_userid_list.py","file_name":"00_userid_list.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"33577725362","text":"from django.shortcuts import render\nfrom django.http import JsonResponse\nfrom bookmark.models import Bookmark\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.views.generic import View\n# Create your views here.\n\n\nclass AddRemoveBookmark(View):\n def get(self, request, *args, **kwargs):\n response = {\n 'status' : False\n }\n slug = self.kwargs['stave_slug']\n try:\n Bookmark.objects.get(user=self.request.user, stave__slug=slug).delete()\n except Bookmark.DoesNotExist:\n Bookmark.objects.create(user=self.request.user, stave__slug=slug)\n response['following'] = True\n return JsonResponse(response, safe=False)\n\n","repo_name":"dauntless001/Store-Recipie","sub_path":"bookmark/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"30029749733","text":"def binsearchIter(value, values):\n # takes care of the base case\n if values ==[]: return False\n\n left, right = 0, len(values) -1\n while left <= right:\n mid = (left + right) //2\n # high probability event first in if statements\n if values[mid] < value:\n left = mid + 1\n elif values[mid] > value:\n right = mid - 1\n else:\n return True\n return False\n\nprint(binsearchIter(3, [1,5,7,10]))\n# I want to test if my branch works\n# I don't want to deal with merge conflict\n","repo_name":"HilaryHe1012/LecInfo","sub_path":"BinarySearch.py","file_name":"BinarySearch.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"18095564735","text":"import turtle\n\nt = turtle.Turtle()\ns = turtle.Screen()\ns.bgcolor('black')\nt.speed('fastest')\nfor x in range(200):\n t.pencolor('blue')\n t.width(x / 100 + 1)\n t.forward(x)\n t.left(79)\n","repo_name":"QAAAK/Animation_in_Turtle","sub_path":"the square is transformed into a circle.py","file_name":"the square is transformed into a circle.py","file_ext":"py","file_size_in_byte":194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"31774432689","text":"class Base1:\n def __init__(self,a):\n self.x=a\n\n\nclass Base2:\n def getdata(self,b):\n self.y=b\n\n\nclass derive(Base1,Base2):\n def putdata(self):\n self.add=self.x+self.y\n print(\"addition of two numbers :\",self.add)\n\nx=int(input(\"Enter the value of x : \"))\ny=int(input(\"Enter the value of y : \"))\nd=derive(x)\nd.getdata(y)\nd.putdata()\n","repo_name":"ssiedu/Python-Programming-050922-7PM-","sub_path":"multipleinheri.py","file_name":"multipleinheri.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"20319742877","text":"from django.shortcuts import render\nfrom django.http import JsonResponse\nfrom .models import Tree, Post\nfrom django.db.models import Count\n\n\ndef index(request):\n # latest_question_list = Question.objects.order_by('-pub_date')[:5]\n # context = {'latest_question_list': latest_question_list}s\n\n trees = Tree.objects.all().annotate(n_posts=Count(\"posts\"))\n posts = Post.objects.all()\n context = {\n \"trees\": trees,\n \"stats\": {\n \"Árvores cadastradas\": trees.count(),\n \"Espécies\": trees.aggregate(\n n_species=Count(\"nome_cientifico\", distinct=True)\n )[\"n_species\"],\n \"Ton. de CO2 retido\": round(sum(t.stored_co2 for t in trees), 1),\n \"Comentários\": posts.count(),\n },\n }\n\n return render(request, \"index.html\", context)\n","repo_name":"nickolasbmm/habitas","sub_path":"habitas/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"9759757758","text":"from collections import deque\ndef slidingWindowMax(nums,k):\n\tdeq,output = deque(),[]\n\tn = len(nums)\n\tdef clean_deq(deq,i):\n\t\twhile deq and deq[0] == i-k:\n\t\t\tdeq.popleft()\n\t\twhile deq and nums[deq[-1]] < nums[i]:\n\t\t\tdeq.pop()\n\n\tfor i in range(k):\n\t\tclean_deq(deq,i)\n\t\tdeq.append(i)\n\toutput.append(nums[deq[0]])\n\n\tfor i in range(k,n):\n\t\tclean_deq(deq,i)\n\t\tdeq.append(i)\n\t\toutput.append(nums[deq[0]])\n\n\treturn output\n\t\nprint(slidingWindowMax([1,3,-1,-3,5,3,6,7],3))\n","repo_name":"deepitapai/Coding-Interview-Prep","sub_path":"Leetcode solutions/sliding-window-maximum.py","file_name":"sliding-window-maximum.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"35804267888","text":"import numpy as np\nimport cv2\nimport keras\nfrom keras.models import load_model\nfrom keras.optimizers import SGD\nfrom keras.utils.vis_utils import plot_model\npath=\"./mnist.npz\"\nf=np.load(path)\nx_train, y_train = f['x_train'], f['y_train']\nx_test, y_test = f['x_test'], f['y_test']\ncv2.namedWindow(\"img\")\ncv2.imshow(\"img\",x_test[0])\ncv2.waitKey(0)\nx_train=x_train.reshape(x_train.shape[0],28*28).astype('float32')\nx_test=x_test.reshape(x_test.shape[0],28*28).astype('float32')\n#x_test[0]=x_test[0].reshape(1,28*28)\ny_test=keras.utils.to_categorical(y_test,10)\nf.close()\nmodel=load_model('./now_model.h5')\nsgd=SGD(lr=0.01,momentum=0.9,decay=1e-9,nesterov=True)#优化函数,参数有学习率,学习衰退率 ,指数1e-9这样写\nmodel.compile(optimizer=sgd,loss='categorical_crossentropy') # 使用交叉熵作为loss函数\n#plot_model(model, to_file='model1.png', show_shapes=True)\nprint(\"*********\")\nx=np.array([x_test[0]])#!!!!!!!important!!!!!!!\nans=model.predict(x)\nprint(ans)\nprint(y_test[0])\nansmax=np.argmax(ans)\nyuanmax=np.argmax(y_test[0])\nprint(ansmax)\nprint(yuanmax)\n","repo_name":"liuxinfengabc/cultivate","sub_path":"5.机器学习/src/keras_mnist/flatten/mnist_predict.py","file_name":"mnist_predict.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"78"}
+{"seq_id":"26088940149","text":"import asyncio\nimport logging\nfrom typing import List\nfrom aiogram import Bot\n\nfrom .utils import log_message, create_user_link\n\nfrom ..utils import config\nfrom ..models import db, Calculation, Cluster, CalculationStatus, SubmitType\nfrom ..models import User as UserModel\nfrom ..models import TelegramUser as TelegramUserModel\n\n\nCALCULATION_FAILED_TO_UPLOAD = (\n 'Ошибка при загрузке расчёта {name}. '\n 'Повторите попытку позже или обратитесь к администратору'\n)\nCALCULATION_FAILED_TO_UPLOAD_LOG = (\n 'Ошибка при загрузке расчёта {name} у пользователя {user}'\n)\nCALCULATION_FINISHED = (\n 'Расчёт {name} завершился. '\n 'Результаты расчёта доступны по ссылке'\n)\nCALCULATION_FINISHED_LOG = (\n 'У пользователя {user} завершился расчёт {name}. '\n 'Результаты расчёта доступны по ссылке'\n)\n\n\nasync def notify_on_finished(bot: Bot):\n calculations: List[Calculation] = (\n Calculation.select(\n Calculation, Cluster, UserModel, TelegramUserModel\n )\n .join(Cluster).switch(Calculation)\n .join(UserModel)\n .join(TelegramUserModel)\n .where((\n (Calculation.status == CalculationStatus.CLOUDED.value) |\n (Calculation.status == CalculationStatus.FAILED_TO_UPLOAD.value)\n ) & (Calculation.submit_type == SubmitType.TELEGRAM.value))\n )\n users: List[TelegramUserModel] = [calc.user.tg_user[0]\n for calc in calculations]\n\n updated = []\n for calc, user in zip(calculations, users):\n if calc.get_status() == CalculationStatus.FAILED_TO_UPLOAD:\n text = CALCULATION_FAILED_TO_UPLOAD.format(\n name=calc.name\n )\n log_text = CALCULATION_FAILED_TO_UPLOAD_LOG.format(\n user=create_user_link(\n model=user\n ),\n name=calc.name\n )\n calc.set_status(CalculationStatus.SENDED)\n else:\n link = config.storage.get_shared(calc.get_folder_name())\n text = CALCULATION_FINISHED.format(\n name=calc.name,\n link=link\n )\n log_text = CALCULATION_FINISHED_LOG.format(\n user=create_user_link(\n model=user\n ),\n name=calc.name,\n link=link\n )\n calc.set_status(CalculationStatus.SENDED)\n\n updated.append(calc)\n\n try:\n message = await bot.send_message(\n chat_id=user.tg_id,\n text=text\n )\n await log_message(\n bot=bot,\n text=log_text\n )\n except Exception as e:\n logging.error(\n 'Failed to send message to user #{id}'.format(id=user.id),\n exc_info=e\n )\n\n if len(updated) > 0:\n with db.atomic():\n Calculation.bulk_update(\n updated,\n fields=['status']\n )\n","repo_name":"UnWaDo/HPC_bot","sub_path":"HPC_bot/telegram/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":3288,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"41289530210","text":"import sys\nimport socket\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nport = 10000\nif len(sys.argv) >= 2:\n port = int(sys.argv[1])\nserver_address = ('localhost', port)\nbuffer_size = 128\ntry:\n\n sock.sendto('Hello, Server!'.encode(), server_address)\n raw, server = sock.recvfrom(buffer_size)\n data = raw.decode()\n print('recv: ' + str(data))\n\nfinally:\n print('closing')\n sock.close()","repo_name":"atadi96/felev5","sub_path":"szamhalok/gyak03/udp_client.py","file_name":"udp_client.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"38162375684","text":"class QuickSort:\n def __init__(self, array):\n self.array = array\n\n\n def swap(self, pointer1, pointer2):\n self.array[pointer1], self.array[pointer2] = self.array[pointer2], self.array[pointer1]\n\n def set_partition(self, left_pointer, right_pointer):\n self.pivot_position = right_pointer\n self.pivot = self.array[self.pivot_position]\n\n right_pointer = right_pointer - 1\n\n while(True):\n \n while(self.array[left_pointer] < self.pivot):\n left_pointer = left_pointer + 1\n\n while(self.array[right_pointer] > self.pivot):\n right_pointer = right_pointer - 1\n\n if left_pointer >= right_pointer:\n break\n else:\n self.swap(left_pointer, right_pointer)\n\n self.swap(left_pointer, self.pivot_position)\n return left_pointer\n\n def quick_sort(self, left_index, right_index):\n if right_index - left_index <= 0:\n return\n \n pivot_position = self.set_partition(left_index, right_index)\n self.quick_sort(left_index, pivot_position -1)\n self.quick_sort(pivot_position + 1, right_index)\n","repo_name":"jpvargasdev/Algorithms","sub_path":"AlgorithmsPY/quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"}
+{"seq_id":"12012568271","text":"from datetime import datetime, timedelta\nimport extractor as extr\nimport pytz\nimport os\n\n# obtain full path where the file is located\n# then obtain the path to the dir from that full path\nsrc_dir = os.path.dirname(os.path.realpath(__file__))\n\n\ndef epoch_delta(date):\n return date - pytz.timezone(\"UTC\").localize(datetime(1970, 1, 1))\n\n\ndef New_York_time(year, mon, day, h=0, m=0, s=0):\n return epoch_delta(pytz.timezone(\"America/New_York\").\n localize(datetime(year, mon, day, h, m, s)))\n\n\nif __name__ == \"__main__\":\n graph = extr.system.comp_graph()\n\n op = graph.features\n\n trade_file = src_dir + \"/data/sip_trades_20171018.mp\"\n\n trades_in = op.mp_play(\n trade_file,\n ((\"receive\", extr.Time64, \"\"),\n (\"ticker\", extr.Array(extr.Char, 16), \"\"),\n (\"market\", extr.Array(extr.Char, 32), \"\"),\n (\"price\", extr.Rprice, \"\"),\n (\"qty\", extr.Int32, \"\"),\n (\"side\", extr.Int32, \"\")))\n\n out_stream = op.perf_timer_start(trades_in, \"ident_batch\")\n\n for i in range(0, 1000):\n out_stream = op.identity(out_stream)\n\n out_stream = op.perf_timer_stop(out_stream, \"ident_batch\")\n\n graph.stream_ctx().run_to(New_York_time(2017, 10, 18, 16))\n\n print(\"Time spent on identity operators\", extr.system.sample_value(\"ident_batch\"))\n","repo_name":"featuremine/extractor","sub_path":"test/perf_ident.py","file_name":"perf_ident.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"}
+{"seq_id":"1987909354","text":"from framework import Behaviour\nfrom motobs.stop import Stop\n\nclass StopMoving(Behaviour):\n\n def __init__(self, start_moving, am_i_alive):\n super().__init__(Stop)\n\n self.checker = start_moving\n self.moving = 0\n self.sensobs.append(am_i_alive)\n\n def update(self):\n one = self.sense_and_act()\n if self.moving:\n self.weight = one\n\n\n def sense_and_act(self):\n value = self.sensobs[0].get_value()\n return value\n\n def set_moving(self, number):\n self.moving = number\n\n def get_weight(self):\n if self.weight:\n self.moving = 0\n self.weight = 0\n try:\n self.checker.set_moving(0)\n except:\n pass\n return 1\n return 0","repo_name":"solbjorg/tdt4113-robot","sub_path":"basic_robot/behaviours/stopmoving.py","file_name":"stopmoving.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"6799991177","text":"from dateutil.rrule import *\nfrom datetime import date\n\nTODAY = date(year=2018, month=11, day=29)\n\n\ndef get_hundred_weekdays(start_date=TODAY):\n \"\"\"Return a list of hundred date objects starting from\n start_date up till 100 weekdays later, so +100 days\n skipping Saturdays and Sundays\"\"\"\n return [\n dt.date()\n for dt in rrule(DAILY, count=100, dtstart=start_date, byweekday=(MO, TU, WE, TH, FR))\n ]\n","repo_name":"etoFlash/bitesofpy","sub_path":"147/hundred_days.py","file_name":"hundred_days.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"}
+{"seq_id":"33712599143","text":"import gym\nimport tensorflow as tf\nimport numpy as np\nimport pandas as pd\nimport random\nfrom collections import deque\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.optimizers import Adam\n\nfrom scores.score_logger import ScoreLogger\n\nENV_NAME = \"Pong-ram-v0\"\n\nLEARNING_RATE = 0.001\n\nMEMORY_SIZE = 1000000\n\nTOTAL_EPS = 500\nEXPLORATION_MAX = 1.0\nEXPLORATION_MIN = 0.01\nEXPLORATION_DECAY = 0.995\n\nrender = True\n\n\nclass DQNSolver:\n\n def __init__(self, observation_space, action_space):\n self.exploration_rate = EXPLORATION_MAX\n\n self.action_space = action_space\n self.memory = deque(maxlen=MEMORY_SIZE)\n\n self.model = Sequential()\n self.model.add(Dense(24, input_shape=(\n observation_space,), activation=\"relu\"))\n self.model.add(Dense(24, activation=\"relu\"))\n self.model.add(Dense(self.action_space, activation=\"linear\"))\n self.model.compile(loss=\"mse\", optimizer=Adam(lr=LEARNING_RATE))\n\n def remember(self, state, action, reward, next_state, done):\n self.memory.append((state, action, reward, next_state, done))\n\n def act(self, state):\n if np.random.rand() < self.exploration_rate:\n return random.randrange(self.action_space)\n q_values = self.model.predict(state)\n return np.argmax(q_values[0])\n\n def experience_replay(self):\n if len(self.memory) < BATCH_SIZE:\n return\n batch = random.sample(self.memory, BATCH_SIZE)\n for state, action, reward, state_next, terminal in batch:\n q_update = reward\n if not terminal:\n q_update = (reward + GAMMA *\n np.amax(self.model.predict(state_next)[0]))\n q_values = self.model.predict(state)\n q_values[0][action] = q_update\n self.model.fit(state, q_values, verbose=0)\n self.exploration_rate *= EXPLORATION_DECAY\n self.exploration_rate = max(EXPLORATION_MIN, self.exploration_rate)\n\n# n_states = env.observation_space.shape\n# tot_states = 0\n# for d in n_states:\n# tot_states += d\n# n_actions = env.action_space.n\n\n\n# n_hidden_1 = 200\n# n_input = tot_states\nBATCH_SIZE = 500\nGAMMA = .99\n\nself.model = Sequential()\nself.model.add(Dense(24, input_shape=(observation_space,), activation=\"relu\"))\nself.model.add(Dense(24, activation=\"relu\"))\nself.model.add(Dense(self.action_space, activation=\"linear\"))\nself.model.compile(loss=\"mse\", optimizer=Adam(lr=0.001))\n\n# weights = {\n# 'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),\n# 'out': tf.Variable(tf.random_normal([n_hidden_1, n_actions]))\n# }\n# biases = {\n# 'b1': tf.Variable(tf.random_normal([n_hidden_1])),\n# 'out': tf.Variable(tf.random_normal([n_actions]))\n# }\n\n# keep_prob = tf.placeholder(\"float\")\n\n\ndef experience_replay(self):\n if len(self.memory) < BATCH_SIZE:\n return\n batch = random.sample(self.memory, BATCH_SIZE)\n for s, a, r, s_next, f in batch:\n q_update = r\n if not f:\n q_update = (r + GAMMA *\n np.amax(self.model.predict(s_next)[0]))\n q_values = self.model.predict(s)\n q_values[0][a] = q_update\n self.model.fit(s, q_values, verbose=0)\n\n\ndef build_nn(x, weights, biases):\n layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])\n layer_1 = tf.nn.relu(layer_1)\n # layer_1 = tf.nn.dropout(layer_1, keep_prob)\n out_layer = tf.matmul(layer_1, weights['out']) + biases['out']\n return out_layer\n\n\ndef pong():\n env = gym.make(ENV_NAME)\n observation_space = env.observation_space.shape[0]\n action_space = env.action_space.n\n dqn_solver = DQNSolver(observation_space, action_space)\n ep = 0\n while ep < TOTAL_EPS:\n obs = env.reset()\n obs = np.reshape(obs, [1, observation_space])\n while True:\n if render:\n env.render()\n action = dqn_solver.act(obs)\n obs_next, r, f, info = env.step(action)\n r = r if not f else -r # TODO not sure\n obs_next = np.reshape(obs_next, [1, observation_space])\n dqn_solver.remember(obs, action, r, obs_next, f)\n dqn_solver.experience_replay()\n obs = obs_next\n if f:\n break\n ep += 1\n env.close()\n","repo_name":"Zman94/Learning-RL","sub_path":"PongRam/dingdong.py","file_name":"dingdong.py","file_ext":"py","file_size_in_byte":4291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"40908206575","text":"# gaussian jobs used to identify the type of calculation and insert it to the db\nJOB_TYPES = {\n \"sp\",\n \"opt\",\n \"freq\",\n \"irc\",\n \"ircmax\",\n \"scan\",\n \"polar\",\n \"admp\",\n \"bomd\",\n \"eet\",\n \"force\",\n \"stable\",\n \"volume\",\n \"density\",\n \"guess\",\n \"pop\",\n \"scrf\",\n \"cphf\",\n \"prop\",\n \"nmr\",\n \"cis\",\n \"zindo\",\n \"td\",\n \"eom\",\n \"sac-ci\",\n}\n\n# gaussian SCRF models used to identify calculation model and insert to the db\nSCRF_MODELS = {\"pcm\", \"iefpcm\", \"cpcm\", \"dipole\", \"ipcm\", \"isodensity\", \"scipcm\", \"smd\"}\n\n# default gaussian inputs used in the workflows if not specified by user\nSTANDARD_OPT_GUASSIAN_INPUT = {\n \"functional\": \"B3LYP\",\n \"basis_set\": \"6-31G(d)\",\n \"route_parameters\": {\"Opt\": None},\n \"link0_parameters\": {\n \"%chk\": \"checkpoint.chk\",\n \"%mem\": \"45GB\",\n \"%NProcShared\": \"24\",\n },\n}\n\n# maximum number of errors to correct\nCUSTODIAN_MAX_ERRORS = 5\n","repo_name":"molmd/mispr","sub_path":"mispr/gaussian/defaults.py","file_name":"defaults.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"78"}
+{"seq_id":"47093603327","text":"#training\nimport wandb\nfrom torchaudio.transforms import MelSpectrogram\nfrom torch import optim\nimport os \nimport umap\nimport torch\n#\n\nreducer = umap.UMAP(random_state=42, n_neighbors=7, min_dist=0.1)\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nmanifold_approx = []\nval_loss = []\nnum_utt = 27629\nnum_epoch = 30\nmodel = Nnet()\noptimizer = optim.Adam(model.parameters())\ncriterion = GE2ELoss(init_w=10.0, init_b=-5.0, loss_method='softmax')\nmelspec = MelSpectrogram(n_mels=128, n_fft=400).to(device)\nfor i in range(num_epoch):\n run = wandb.init(project=\"ge2e\", reinit=True)\n running_loss =0.0\n for k in range(num_utt // 80):\n model.to(device)\n model.train()\n X = get_batch('/content/LibriSpeech/train-clean-100', 8, 10).sampler()\n X = padding_batch(X).to(device)\n\n X = melspec(X)\n\n optimizer.zero_grad()\n outputs = model(X).view(8, 10, -1)\n loss = criterion(outputs)\n loss.backward()\n\n optimizer.step()\n \n running_loss += loss.item()\n if (k + 1) % 20 == 0:\n print(f'Train epoch:{i}, mean loss = {running_loss / 20}')\n\n wandb.log({\"Loss\": running_loss / 20})\n\n running_loss = 0.0\n #there should be 3 different piles of points as val_data has 3 different speakers\n if (k + 1) % 40 == 0:\n model.eval()\n model.to('cpu')\n with torch.no_grad():\n test = model(val_audio)\n reducer.fit(test)\n manifold_approx.append(reducer.fit_transform(test))\n print('Val loss is: ', criterion(model(val_audio).view(3,9,-1)))\n val_loss.append(criterion(model(val_audio).view(3,9,-1)))\n run.finish()\n \ntorch.save(model.state_dict(), os.path.join(wandb.run.dir, 'model.pt'))\n","repo_name":"sakharok13/KWS-with-speaker-verification-pytorch","sub_path":"embedder_train.py","file_name":"embedder_train.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"}
+{"seq_id":"42873840679","text":"import random\r\n\r\nprint('Rock...')\r\nprint('Paper...')\r\nprint('Scissors...')\r\na = \"y\"\r\nwhile a == \"y\":\r\n p1_wins = 0\r\n p2_wins = 0\r\n com_wins = 0\r\n win_score = int(input(\"define a wining score :\"))\r\n gameMode = input(\"please choose a game mode (pvp or pvc)\")\r\n if gameMode == \"pvc\":\r\n print(gameMode)\r\n while p1_wins < win_score and com_wins < win_score:\r\n print(f\"Player_1 score : {p1_wins} com score : {com_wins}\")\r\n Player_1 = input('Player_1 , Make your move :').lower()\r\n com = random.choice([\"rock\", \"scissor\", \"paper\"])\r\n if Player_1 == \"surrender\" or Player_1 == \"quit\":\r\n print(f\"Player_1 score : {p1_wins} com : {com_wins}\")\r\n print(\"Player_1 surrendered , com wins\")\r\n break\r\n else:\r\n print(f\"com chose {com}\")\r\n if Player_1 == com:\r\n print(\"Tie\")\r\n elif Player_1 == \"rock\":\r\n if com == \"scissor\":\r\n print(\"Player_1 Wins\")\r\n p1_wins += 1\r\n elif com == \"paper\":\r\n print(\"com Wins\")\r\n com_wins += 1\r\n else:\r\n print(\"Something went wrong ...\")\r\n elif Player_1 == \"scissor\":\r\n if com == \"paper\":\r\n print(\"Player_1 Wins\")\r\n p1_wins += 1\r\n elif com == \"rock\":\r\n print(\"com Wins\")\r\n com_wins += 1\r\n else:\r\n print(\"Something went wrong ...\")\r\n elif Player_1 == \"paper\":\r\n if com == \"rock\":\r\n print(\"Player_1 Wins\")\r\n p1_wins += 1\r\n elif com == \"scissor\":\r\n print(\"com Wins\")\r\n com_wins += 1\r\n else:\r\n print(\"Something went wrong ...\")\r\n else:\r\n print(\"Something went wrong ...\")\r\n if p1_wins == win_score or com_wins == win_score:\r\n if p1_wins > com_wins:\r\n print(\"Player_1 Wins the match !!!\")\r\n elif com_wins > p1_wins:\r\n print(\"com Wins the match !!!\")\r\n elif com_wins == p1_wins:\r\n print(\"the match ended with a Tie !!!\")\r\n print(f\"Player_1 score : {p1_wins} com score : {com_wins}\")\r\n elif gameMode == \"pvp\":\r\n print(gameMode)\r\n while p1_wins < win_score and p2_wins < win_score:\r\n print(f\"Player_1 score : {p1_wins} Player_2 score : {p2_wins}\")\r\n Player_1 = input('Player_1 , Make your move :').lower()\r\n Player_2 = input('Player_2 , Make your move :').lower()\r\n if Player_1 == \"surrender\" or Player_2 == \"surrender\" or Player_1 == \"quit\" or Player_2 == \"quit\":\r\n print(f\"Player_1 score : {p1_wins} Player_2 score : {p2_wins}\")\r\n if Player_1 == \"surrender\" or Player_1 == \"quit\":\r\n print(\"Player_1 surrendered , Player_2 wins\")\r\n break\r\n elif Player_2 == \"surrender\" or Player_2 == \"quit\":\r\n print(\"Player_2 surrendered , Player_1 wins\")\r\n break\r\n else:\r\n print(f\"Player_2 chose {Player_2}\")\r\n if Player_1 == Player_2:\r\n print(\"Tie\")\r\n elif Player_1 == \"rock\":\r\n if Player_2 == \"scissor\":\r\n print(\"Player_1 Wins\")\r\n p1_wins += 1\r\n elif Player_2 == \"paper\":\r\n print(\"Player_2 Wins\")\r\n p2_wins += 1\r\n else:\r\n print(\"Something went wrong ...\")\r\n elif Player_1 == \"scissor\":\r\n if Player_2 == \"paper\":\r\n print(\"Player_1 Wins\")\r\n p1_wins += 1\r\n elif Player_2 == \"rock\":\r\n print(\"Player_2 Wins\")\r\n p2_wins += 1\r\n else:\r\n print(\"Something went wrong ...\")\r\n elif Player_1 == \"paper\":\r\n if Player_2 == \"rock\":\r\n print(\"Player_1 Wins\")\r\n p1_wins += 1\r\n elif Player_2 == \"scissor\":\r\n print(\"Player_2 Wins\")\r\n p2_wins += 1\r\n else:\r\n print(\"Something went wrong ...\")\r\n else:\r\n print(\"Something went wrong ...\")\r\n if p1_wins == win_score or p2_wins == win_score:\r\n if p1_wins > p2_wins:\r\n print(\"Player_1 Wins the match !!!\")\r\n elif p2_wins > p1_wins:\r\n print(\"Player_2 Wins the match !!!\")\r\n elif p2_wins == p1_wins:\r\n print(\"the match ended with a Tie !!!\")\r\n print(f\"Player_1 score : {p1_wins} Player_2 score : {p2_wins}\")\r\n else:\r\n print(\"something went wrong\")\r\n\r\n a = input(\"Again ? (Y/N)\").lower()\r\n","repo_name":"Samuel-X124/rock-paper-scissor","sub_path":"rockScissorPaperEnhanced.py","file_name":"rockScissorPaperEnhanced.py","file_ext":"py","file_size_in_byte":5440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"40011183101","text":"import cv2\n\nimgs = [cv2.imread(\"\")]\nfor _ in range(3):\n r = cv2.pyrDown(imgs[-1])\n imgs.append(r)\nfor (i, img) in enumerate(imgs):\n cv2.imshow(\"view_%d\" % i, img)\n\ncv2.waitKey()\n","repo_name":"nhomble/hiccup","sub_path":"bin/demo/pyr.py","file_name":"pyr.py","file_ext":"py","file_size_in_byte":204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"34395931775","text":"# Import Modules\nimport numpy as np\nimport copy\nimport pandas as pd\nfrom itertools import count\nimport time\nimport lxml\nimport html\nimport json\nimport requests\nimport requests_html as reqHTML\nimport urllib.parse as urllib\nfrom bs4 import BeautifulSoup\nfrom pprint import pprint\n\n \n\n\ndef construct_sephora_req(search_term:str='moisturizer', page_num:int=1) -> dict:\n '''Given the category, page number and meta data, constructs page link for use with requests.get\n \n Use in conjunction with async scraping functions\n Default set to moisturizers\n '''\n res = {}\n # Construct link\n base_link = 'https://www.sephora.com/api/catalog/search?'\n payload = {\n 'type' : 'keyword',\n 'q' : search_term,\n 'currentPage' : page_num\n }\n \n # Add results to res to use as kwarg\n res['url'] = base_link\n res['params'] = payload \n\n return res\n\n\ndef get_api_json(req_params:dict, return_json:bool=True) -> dict:\n '''Given the \n '''\n res = None\n # Check link\n if req_params:\n try:\n response = requests.get(**req_params)\n res = response.json() if return_json else response\n except requests.exceptions.RequestException as e:\n print(e)\n \n return res\n\n\nasync def get_page_html(page_link:str='', html_element:bool=True) -> lxml.html.Element:\n '''Given the URL of a JS rendered webpage, function will return the raw html from page in bytes format\n \n Must use 'await' command with function, setting html_element to True will return html.Element object, otherwise will return html page in bytes\n '''\n res = None\n # Check link\n if page_link:\n try:\n # Start Session\n asession = reqHTML.AsyncHTMLSession()\n # Request Page\n r = await asession.get(page_link, headers={'User-Agent': 'Mozilla/5.0'})\n await r.html.arender()\n res = lxml.html.fromstring(r.html.raw_html) if html_element else r\n except requests.exceptions.RequestException as e:\n print(e)\n \n return res\n\ndef get_pagnation_num(html_page:lxml.html.Element, pagnation_xpath:str=None) -> int:\n '''Given the html.Element object returned from get_page_html, returns max pagnation for product category'''\n res = None\n if not pagnation_xpath:\n # Set default pagnation xpath for sephora.com \n pagnation_xpath = '/html/body/div[1]/div[2]/div/div/div/div[2]/div[1]/main/div[3]/div/div[3]/div[2]/nav/ul/li[6]/button'\n \n try:\n element_list = html_page.xpath(pagnation_xpath)\n if element_list:\n res = int(element_list[0].text)\n except:\n pass\n return res\n \n ","repo_name":"boogiedev/smooth-sailor-v2","sub_path":"src/scraping_funcs.py","file_name":"scraping_funcs.py","file_ext":"py","file_size_in_byte":2709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"139131469","text":"import networkx as nx\nimport matplotlib.pyplot as plt\nfrom classes.bfs import BfsTraverser\nG = nx.Graph()\nnodes=[\"SportsComplex\",\"Siwaka\",\"PH.1A\",\"PH.1B\",\"STC\",\"Phase2\",\"ParkingLot\",\"Phase3\",\"J1\",\"Mada\"]\nG.add_nodes_from(nodes)\nG.nodes()#confirm nodes\n#Add Edges and their weights\nG.add_edge(\"SportsComplex\",\"Siwaka\",weight=\"450\")\nG.add_edge(\"Siwaka\",\"PH.1A\",weight=\"10\")\nG.add_edge(\"PH.1A\",\"PH.1B\",weight=\"100\")\nG.add_edge(\"Siwaka\",\"PH.1B\",weight=\"230\")\nG.add_edge(\"PH.1B\",\"STC\",weight=\"50\")\nG.add_edge(\"PH.1B\",\"Phase2\",weight=\"112\")\nG.add_edge(\"STC\",\"Phase2\",weight=\"50\")\nG.add_edge(\"Phase2\",\"J1\",weight=\"600\")\nG.add_edge(\"Phase2\",\"Phase3\",weight=\"500\")\nG.add_edge(\"Phase3\",\"ParkingLot\",weight=\"350\")\nG.add_edge(\"J1\",\"Mada\",weight=\"200\")\nG.add_edge(\"ParkingLot\",\"Mada\",weight=\"700\")\nG.add_edge(\"PH.1A\",\"Mada\",weight=\"850\")\n\n\n#nodes=[\"SportsComplex\",\"Siwaka\",\"PH.1A\",\"PH.1B\",\"STC\",\"Phase2\",\"ParkingLot\",\"Phase3\",\"J1\",\"Mada\"]\n\n\n#position the nodes to resemble Madaraka's map\nG.nodes[\"SportsComplex\"]['pos']=(0,600)\nG.nodes[\"Siwaka\"]['pos']=(100,600)\nG.nodes[\"PH.1A\"]['pos']=(200,600)\nG.nodes[\"PH.1B\"]['pos']=(200,400)\nG.nodes[\"STC\"]['pos']=(200,200)\nG.nodes[\"Phase2\"]['pos']=(300,400)\nG.nodes[\"ParkingLot\"]['pos']=(400,0)\nG.nodes[\"Phase3\"]['pos']=(400,200)\nG.nodes[\"J1\"]['pos']=(400,400)\nG.nodes[\"Mada\"]['pos']=(500,400)\n#store all positions in a variable\nnode_pos = nx.get_node_attributes(G,'pos')\narc_weight=nx.get_edge_attributes(G,'weight')\npos = nx.spring_layout(G, k=0.5, iterations=20)\nnx.draw_networkx(G, node_pos,with_labels = True, node_color= 'red', node_size=1500,font_size=6)\nnx.draw_networkx_edge_labels(G, node_pos, edge_labels=arc_weight)\nplt.axis('off')\nplt.show()\n\n","repo_name":"mandem296/AI-ML-Group-Task","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"74668752572","text":"import numpy\r\nimport string\r\nimport random\r\n\r\nalphabet_string = string.ascii_lowercase\r\ndef alphabet_dict():\r\n return [{char:0 for char in alphabet_string}]*5\r\n\r\ndef best_word(word_list, possible_words, guessed_words, yellow_memory, green_memory):\r\n if len(possible_words) == 1:\r\n return possible_words[0]\r\n\r\n adict = alphabet_dict()\r\n for word in possible_words:\r\n already = False\r\n for i in range(0, len(word)):\r\n l = word[i]\r\n for letter in adict[i].keys():\r\n if letter == l:\r\n if already:\r\n adict[i][letter] += 0.25\r\n else:\r\n adict[i][letter] += 1\r\n already = True\r\n\r\n max_score = 0\r\n best_word = possible_words[0]\r\n for word in word_list:\r\n score = 0\r\n\r\n for i in range(0, len(word)):\r\n letter = word[i]\r\n if not letter in yellow_memory[i]:\r\n if letter in word[:i]:\r\n score += get_letter_value(adict, letter, i, len(possible_words)) * 0.1\r\n else:\r\n score += get_letter_value(adict, letter, i, len(possible_words))\r\n if letter in green_memory[i]:\r\n if len(possible_words) <= 3:\r\n score += 5 * get_letter_value(adict, letter, i, len(possible_words)) / len(possible_words)\r\n else:\r\n score += get_letter_value(adict, letter, i, len(possible_words))\r\n if score > max_score and word not in guessed_words:\r\n max_score = score\r\n best_word = word\r\n\r\n return best_word\r\n\r\ndef get_letter_value(adict, letter, position, words_left):\r\n return 1.25 * adict[position][letter] + sum((adict[i][letter] for i in range(0, len(adict))))\r\n #return adict[position][letter]//float(words_left) + -1*((sum((adict[i][letter] for i in range(0, len(adict))))/float(words_left))-.5)**2 + .25\r\n\r\ndef clean(guess, info, possible_words, yellow_memory, green_memory):\r\n not_in_word = []\r\n correct_slot = {}\r\n incorrect_slot = {}\r\n valid = False\r\n possible_words = numpy.delete(possible_words, numpy.where(possible_words == guess))\r\n while not valid:\r\n if info == None:\r\n info = input(\"Please input guess results (ex. bbgby): \")\r\n if len(info) != 5:\r\n print(\"Please enter a string 5 characters long!\")\r\n continue\r\n valid = True\r\n for i in range(0, len(guess)):\r\n result = info[i]\r\n if result == \"b\":\r\n not_in_word.append(guess[i])\r\n elif result == \"g\":\r\n correct_slot[i] = guess[i]\r\n green_memory[i] += guess[i]\r\n elif result == \"y\":\r\n incorrect_slot[i] = guess[i]\r\n yellow_memory[i] += guess[i]\r\n else:\r\n info = None\r\n valid = False\r\n\r\n i = 0\r\n cap = len(possible_words)\r\n while i < cap:\r\n delete = False\r\n word = possible_words[i]\r\n for j in range(0, len(word)):\r\n letter = word[j]\r\n if j in correct_slot.keys() and letter != correct_slot[j]:\r\n delete = True\r\n elif j in incorrect_slot.keys() and letter == incorrect_slot[j]:\r\n delete = True\r\n if letter in not_in_word:\r\n delete = True\r\n if delete:\r\n possible_words = numpy.delete(possible_words, i)\r\n cap -= 1\r\n break\r\n if not delete:\r\n for letter in incorrect_slot.values():\r\n if not letter in word:\r\n delete = True\r\n possible_words = numpy.delete(possible_words, i)\r\n cap -= 1\r\n break\r\n if not delete:\r\n i += 1\r\n return possible_words\r\n\r\ndef get_feedback(guess, answer):\r\n feedback = \"\"\r\n for i in range(0, len(guess)):\r\n if guess[i] == answer[i]:\r\n feedback += \"g\"\r\n elif guess[i] in answer:\r\n feedback += \"y\"\r\n else:\r\n feedback += \"b\"\r\n return feedback\r\n\r\ndef load_words():\r\n fans = open(\"answers.txt\", \"r\")\r\n fall = open(\"allowed.txt\", \"r\")\r\n word_list = numpy.concatenate((fans.read().splitlines(), fall.read().splitlines()))\r\n fans.close()\r\n fall.close()\r\n return word_list\r\n\r\ndef solve_auto(answer):\r\n word_list = load_words()\r\n possible_words = numpy.copy(word_list)\r\n\r\n print(\"Answer in advance:\", answer)\r\n guessed_words = []\r\n yellow_memory = {i:\"\" for i in range(0, len(answer))}\r\n green_memory = {i:\"\" for i in range(0, len(answer))}\r\n while len(guessed_words) == 0 or guessed_words[-1] != answer:\r\n guess = best_word(word_list, possible_words, guessed_words, yellow_memory, green_memory)\r\n guessed_words.append(guess)\r\n print(\"Guessing\", guess.upper() + \"...\")\r\n feedback = get_feedback(guess, answer)\r\n print(\"Feedback:\", feedback, \"(\" + str(len(possible_words)) + \" possibilities)\")\r\n possible_words = clean(guess, feedback, possible_words, yellow_memory, green_memory)\r\n print(\"Successfully narrowed search to\", len(possible_words), \"words.\")\r\n print(\"The word is\", answer, \"(\" + str(len(guessed_words)) + \" guesses)\", \"\\n--------------------------\")\r\n return len(guessed_words)\r\n\r\ndef solve_manual():\r\n word_list = load_words()\r\n possible_words = numpy.copy(word_list)\r\n length = 5\r\n\r\n guessed_words = []\r\n yellow_memory = {i:\"\" for i in range(0, length)}\r\n green_memory = {i:\"\" for i in range(0, length)}\r\n while len(possible_words) > 1:\r\n guess = best_word(word_list, possible_words, guessed_words, yellow_memory, green_memory)\r\n guessed_words.append(guess)\r\n print(\"Guessing\", guess.upper() + \"...\")\r\n possible_words = clean(guess, None, possible_words, yellow_memory, green_memory)\r\n if (len(possible_words)) == 1:\r\n print(possible_words)[0]\r\ndef gen_test_set():\r\n fans = open(\"answers.txt\", \"r\")\r\n randos = random.sample(fans.read().splitlines(), 100)\r\n fans.close()\r\n return randos\r\n\r\ndef run_test_set():\r\n ftest = open(\"testset.txt\", \"r\")\r\n test_set = ftest.read().splitlines()\r\n total = 0\r\n for word in test_set:\r\n total += solve_auto(word)\r\n avg = total / float(len(test_set))\r\n print(avg)\r\n\r\n\r\nrun_test_set()\r\nsolve_manual()\r\n# solve_auto(\"crane\")","repo_name":"Emmet-exe/wordletree","sub_path":"wordling.py","file_name":"wordling.py","file_ext":"py","file_size_in_byte":6505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"32046537010","text":"\"\"\"\nModule to model the user learning and logic around it.\n\n:author: Haemanth Santhi Ponnusamy \n\"\"\"\n\nimport pickle\nimport random\nimport numpy as np\nimport networkx as nx\n\nfrom vocabby.bookshelf import Book\n\n\nclass Learner(object):\n def __init__(self, name, level=3):\n self.name = name\n self.difficulty_level = level\n self.tutors = {}\n\n def add_book(self, book):\n self.tutors.update({book.code: Tutor(self, book)})\n\n @staticmethod\n def load(learner_id):\n try:\n with open('data/learners/' + learner_id + '.p', 'rb') as learner_file:\n learner = pickle.load(learner_file)\n except FileNotFoundError:\n learner = Learner(learner_id)\n return learner\n\n def save(self):\n with open('data/learners/' + self.name + '.p', 'wb') as learner_file:\n pickle.dump(self, learner_file)\n\n def get_tutor(self, book_code):\n \"\"\"Return the tutors corresponding to book or hire one.\"\"\"\n\n if book_code not in self.tutors:\n book = Book.load(book_code)\n self.add_book(book)\n\n return self.tutors[book_code]\n\n\nclass Tutor(object):\n \"\"\"\"\"\"\n def __init__(self, learner, book):\n self.learner = learner\n self.book = book\n self.sessions = []\n # self.mastery = {w: 0.5 for w in self.book.words}\n self.network = self.book.network\n\n @property\n def progress(self):\n mastered_count = sum([1 for node in self.network if self.network.nodes[node][\"mastery\"] > 0.8])\n return (mastered_count *100) / len(self.network)\n\n def new_session(self):\n critical_nodes = self.get_critical_nodes()\n self.sessions.append(Session(self, critical_nodes))\n\n def update(self, token, response):\n sign = 1 if response else -1\n step = 0.3\n factor = 1 + (sign * step)\n self.network.nodes[token.root]['mastery'] = min(\n 0.99, self.network.nodes[token.root]['mastery'] * factor)\n\n for neigh in self.network.neighbors(token.root):\n weight = self.network[token.root][neigh]['weight']\n factor = 1 + (sign * step * 0.5 * weight)\n self.network.nodes[neigh]['mastery'] = min(\n 0.99, self.network.nodes[neigh]['mastery'] * factor)\n\n def get_critical_nodes(self):\n # TODO: Update with proper implementation\n print(\"\\n\\n\\nNew mode of gettig critical nodes \\n\\n\\n\")\n candidates = []\n # centrality_scores = list(nx.betweenness_centrality(self.network, k= int(len(self.network.nodes)/10), weight=\"weight\").items())\n # centrality_scores = list(nx.betweenness_centrality(self.network, weight=\"weight\").items())\n\n for node in self.network:\n extrensic_score = sum([v['weight'] for k, v in self.network[node].items()])\n candidates.append((node, extrensic_score))\n\n # Use internsic measure to decide a critical node\n # intrensic_score = self.book.families[node].complexity\n # candidates.append((node, extrensic_score * intrensic_score))\n\n # n_choice = np.random.choice(len(families), 20)\n # n_choice = sorted(candidates, key=lambda x: -x[1])[20:35]\n n_choice = sorted(candidates, key=lambda x: -x[1])\n # n_choice = sorted(centrality_scores, key=lambda x: -x[1])\n return [self.book.families[node] for node, score in n_choice if self.network.nodes[node]['mastery'] < 0.8][:20]\n\n def get_session(self):\n \"\"\"Returns a active/incomplete session or a new session.\"\"\"\n if not(self.sessions and len(self.sessions[-1].queue)):\n self.new_session()\n return self.sessions[-1]\n\n def get_graph_for_viz(self):\n \"\"\"Loads the processed book from shelf.\"\"\"\n\n # Building the entire graph for visualization\n node_list = {name: idx for idx, name in enumerate(self.network.nodes)}\n sorted_node_list = sorted(node_list.items(), key=lambda x: x[1])\n active_session = self.get_session()\n print(sorted_node_list[:5])\n nodes = [{'id': node_list[token],\n 'name': token,\n 'score':self.network.nodes[token]['mastery'],\n 'child': False,\n 'critical': token in list(active_session.tokens.keys())}\n for token, _ in sorted_node_list]\n print('Node list:', nodes[:5])\n edges = []\n for source, target, attrb in self.network.edges.data():\n if attrb['weight'] < 0.4: ######!!!!! ORIGINAL 0.6 ############\n continue\n edges.append({\"source\": node_list[source],\n \"target\": node_list[target],\n \"weight\": attrb['weight']})\n\n children = []\n child_edges = []\n families = {}\n for parent in nodes:\n families[parent['id']] = set()\n family = self.book.families.get(parent['name'], {})\n if not family:\n print(\"No family for \", parent['name'])\n print(parent)\n break\n for child in family.members:\n child_pos = len(nodes) + len(children)\n children.append({'id': child_pos,\n 'name': child.text + \"_\" + child.pos,\n 'score': parent['score'],\n 'child': True})\n child_edges.append({\"source\": parent['id'],\n \"target\": child_pos,\n \"weight\": 1})\n families[parent['id']].add(child_pos)\n\n neighbourhood = {\"nodes\": nodes + children, \"links\": edges + child_edges, \"families\": families}\n neighbourhoodwc = {\"nodes\": nodes, \"links\": edges, \"families\": families}\n\n # Raffael: Save neighborhood dict for analysis\n with open(\"C:/Users/raffa/Desktop/Masterarbeit_Vocabby/Vocabby/backend/neighbourhood_biologie.pickle\", 'wb') as fi:\n pickle.dump(neighbourhood, fi)\n\n with open(\"C:/Users/raffa/Desktop/Masterarbeit_Vocabby/Vocabby/backend/neighbourhood_biologie_wc.pickle\", 'wb') as fi:\n pickle.dump(neighbourhoodwc, fi)\n\n # Generate Graph\n\n G = nx.Graph() \n\n # iterate through nodes and corresponding edges\n for link in neighbourhood['links']:\n \n node = neighbourhood['nodes'][link['source']]['name'] # gives us name of node from id\n edge = neighbourhood['nodes'][link['target']]['name']\n G.add_edge(node, edge, weight=link['weight']) # This is the final graph\n\n with open(\"C:/Users/raffa/Desktop/Masterarbeit_Vocabby/Vocabby/backend/graph_spacyff.pickle\", 'wb') as fi:\n pickle.dump(G, fi)\n\n\n return neighbourhood, neighbourhoodwc\n\n\nclass Session(object):\n def __init__(self, tutor, tokens):\n self.tutor = tutor\n self.book = self.tutor.book\n self.network = self.book.network\n self.tokens = {t.root: t for t in tokens}\n self.queue = list(self.tokens.keys())\n self.answers = {}\n # Prevent regeneration of new activity\n self.activity_cache = {}\n\n def _create_activity(self, family, activity_type):\n \"\"\"\"\"\"\n if activity_type == 0:\n word = np.random.choice(family.members)\n\n # For German the words have to be matched in upper case;\n # TODO: Look at families and upper/lower case distinction\n all_sentences = list(\n s.replace(word.text, '______') for s in word.get_sentences())\n\n for s in word.get_sentences():\n print(\"Sentence: \", s)\n print(word.text)\n # random.shuffle(all_sentences)\n sentences = all_sentences[:3]\n \n distractor_objs = self._get_distractors(family, word.pos) + [word]\n np.random.shuffle(distractor_objs)\n distractors = [d.text for d in distractor_objs]\n activity_id = random.randint(1000, 100000)\n self.answers.update(\n {activity_id:\n {\"answer\": distractors.index(word.text),\n \"family\": family,\n \"distractors\": distractor_objs,\n \"activityType\": activity_type}})\n return {\"sentences\": sentences,\n \"options\": distractors,\n \"activityType\": activity_type,\n \"activityId\": activity_id}\n\n elif activity_type == 1:\n word = np.random.choice(family.members)\n sentences = [random.choice(list(\n set(s.replace(word.text, '______')\n for s in word.get_sentences()[:3])))]\n character_mix = list(word.text)\n random.shuffle(character_mix)\n activity_id = random.randint(1000, 100000)\n self.answers.update(\n {activity_id:\n {\"answer\": word.text,\n \"family\": family,\n \"activityType\": activity_type}})\n return {\"sentences\": sentences,\n \"options\": character_mix,\n \"activityType\": activity_type,\n \"activityId\": activity_id}\n\n def _get_distractors(self, family, pos):\n \"\"\"Select good set of distractors\"\"\"\n\n candidates = {}\n for n1, attrb1 in self.network[family.root].items():\n wt1 = attrb1['weight']\n candidates.update({n1: wt1})\n for n2, attrb2 in self.network[n1].items():\n if n2 == family.root:\n continue\n\n wt2 = attrb2['weight'] * wt1\n wt2 = max(candidates.get(n2, 0), wt2)\n candidates.update({n2: wt2})\n\n # Sort the neighbor based on context similarity\n neighbors = sorted(candidates.items(), key=lambda x: -x[1])\n return [self._select_family_member(self.book.families[w], pos)\n for w, wt in neighbors if wt < 0.65][:3]\n #for w, wt in neighbors if wt < 0.8][:3] original\n\n def _select_family_member(self, family, pos):\n \"\"\"Select a word of given POS tag from family if any.\"\"\"\n for word in family.members:\n if word.pos == pos:\n return word\n return np.random.choice(family.members)\n\n def candidate_neighbours(self):\n # TODO: Get 2 level neighbours\n neighbourhood = []\n for token in self.tokens:\n edges = []\n nodes = [{'id': 0, 'name': token,\n \"score\": self.network.nodes[token]['mastery']}]\n\n count = 0\n for neighbour in self.network[token]:\n if self.network[token][neighbour]['weight'] < 0.7:\n continue\n count += 1\n nodes.append({\"id\": count, \"name\": neighbour,\n \"score\": self.network.nodes[token]['mastery']})\n edges.append({\"source\": 0, \"target\": count})\n neighbourhood.append({\"nodes\": nodes, \"links\": edges})\n print(neighbourhood[0])\n\n return neighbourhood\n\n def _activity_selector(self, word):\n # TODO: Improve activity selection based on student progress\n if len(word) >= 5:\n return 0\n else:\n return random.choice([0, 1])\n\n def next_acitivity(self):\n if self.activity_cache:\n return self.activity_cache\n\n if self.queue:\n word = self.queue[0]\n activity_type = self._activity_selector(word)\n self.activity_cache = self._create_activity(\n self.tokens[word], activity_type)\n return self.activity_cache\n else:\n return {\"activityType\": '-1'}\n\n def update(self, family, response):\n if family.root == self.queue[0]:\n word = self.queue.pop(0)\n if not response:\n self.queue.append(word)\n self.tutor.update(family, response)\n\n def evaluate(self, activity_id, selection):\n self.activity_cache = {}\n activity_type = self.answers[activity_id][\"activityType\"]\n\n is_correct = self.answers[activity_id]['answer'] == selection\n self.update(self.answers[activity_id]['family'], is_correct)\n result = {'isCorrect': is_correct,\n 'remaining': len(self.queue)}\n\n if not is_correct:\n feedback_sentence = ''\n if activity_type == 0:\n wrong_word = self.answers[\n activity_id]['distractors'][selection]\n # feedback_sentence = random.choice(wrong_word.get_sentences())\n feedback_sentence = wrong_word.get_sentences()[0]\n result.update({'feedback': feedback_sentence})\n return result\n","repo_name":"raffa-dev/vocabby_da","sub_path":"Vocabby/backend/vocabby/learner.py","file_name":"learner.py","file_ext":"py","file_size_in_byte":12924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"12508343267","text":"\"\"\"\nkey analytics for Black Scholes Merton pricer and implied volatilities\n\"\"\"\n\nimport numpy as np\nfrom numba import njit\nfrom typing import Union, Tuple\nfrom numba.typed import List\nfrom scipy.stats import norm\n\nfrom stochvolmodels.utils.funcs import ncdf, npdf\n\n\n@njit(cache=False, fastmath=True)\ndef compute_normal_price(forward: float,\n strike: float,\n ttm: float,\n vol: float,\n discfactor: float = 1.0,\n optiontype: str = 'C'\n ) -> float:\n \"\"\"\n bsm pricer for forward\n \"\"\"\n sdev = forward*vol*np.sqrt(ttm)\n d = (forward - strike) / sdev\n if optiontype == 'C' or optiontype == 'IC':\n price = discfactor * ((forward-strike) * ncdf(d) + sdev * npdf(d))\n elif optiontype == 'P' or optiontype == 'IP':\n price = discfactor * ((forward - strike) * (ncdf(d)-1.0) + sdev * npdf(d))\n else:\n raise NotImplementedError(f\"optiontype\")\n\n return price\n\n\n@njit(cache=False, fastmath=True)\ndef compute_normal_slice_prices(ttm: float,\n forward: float,\n strikes: np.ndarray,\n vols: np.ndarray,\n optiontypes: np.ndarray,\n discfactor: float = 1.0\n ) -> np.ndarray:\n \"\"\"\n vectorised bsm deltas for array of aligned strikes, vols, and optiontypes\n \"\"\"\n def f(strike: float, vol: float, optiontype: str) -> float:\n return compute_normal_price(forward=forward,\n ttm=ttm,\n vol=vol,\n strike=strike,\n optiontype=optiontype,\n discfactor=discfactor)\n normal_prices = np.zeros_like(strikes)\n for idx, (strike, vol, optiontype) in enumerate(zip(strikes, vols, optiontypes)):\n normal_prices[idx] = f(strike, vol, optiontype)\n return normal_prices\n\n\ndef compute_normal_delta_to_strike(ttm: float,\n forward: float,\n delta: float,\n vol: float\n ) -> Union[float, np.ndarray]:\n \"\"\"\n bsm deltas for strikes and vols\n \"\"\"\n inv_delta = norm.ppf(delta) if delta > 0.0 else norm.ppf(1.0+delta)\n sdev = forward * vol * np.sqrt(ttm)\n strike = forward - sdev*inv_delta\n return strike\n\n\n@njit(cache=False, fastmath=True)\ndef compute_normal_delta_from_lognormal_vol(ttm: float,\n forward: float,\n strike: float,\n given_price: float,\n optiontype: str,\n discfactor: float = 1.0\n ) -> float:\n if np.abs(ttm) < 1e-12:\n if optiontype == 'C' and forward > strike:\n delta = 1.0\n elif optiontype == 'P' and forward < strike:\n delta = -1.0\n else:\n delta = 0.0\n else:\n normal_vol = infer_normal_implied_vol(forward=forward, ttm=ttm, strike=strike,\n given_price=given_price, optiontype=optiontype, discfactor=discfactor)\n delta = compute_normal_delta(ttm=ttm, forward=forward, strike=strike, vol=normal_vol,\n optiontype=optiontype, discfactor=discfactor)\n return delta\n\n\n@njit(cache=False, fastmath=True)\ndef compute_normal_delta(ttm: float,\n forward: float,\n strike: float,\n vol: float,\n optiontype: str,\n discfactor: float = 1.0\n ) -> float:\n \"\"\"\n bsm deltas for strikes and vols\n \"\"\"\n sdev = forward * vol * np.sqrt(ttm)\n d = (forward - strike) / sdev\n if optiontype == 'C':\n normal_delta = discfactor * ncdf(d)\n elif optiontype == 'P':\n normal_delta = - discfactor * ncdf(-d)\n else:\n normal_delta = np.nan\n return normal_delta\n\n\n@njit(cache=False, fastmath=True)\ndef compute_normal_slice_deltas(ttm: Union[float, np.ndarray],\n forward: Union[float, np.ndarray],\n strikes: Union[float, np.ndarray],\n vols: Union[float, np.ndarray],\n optiontypes: Union[np.ndarray],\n discfactor: float = 1.0\n ) -> Union[float, np.ndarray]:\n \"\"\"\n bsm deltas for strikes and vols\n \"\"\"\n sdev = forward * vols * np.sqrt(ttm)\n d = (forward - strikes) / sdev\n d1_sign = np.where(np.array([op == 'C' for op in optiontypes]), 1.0, -1.0)\n normal_deltas = discfactor * d1_sign * ncdf(d1_sign * d)\n return normal_deltas\n\n\n@njit(cache=False, fastmath=True)\ndef compute_normal_deltas_ttms(ttms: np.ndarray,\n forwards: np.ndarray,\n strikes_ttms: Tuple[np.ndarray, ...],\n vols_ttms: Tuple[np.ndarray,...],\n optiontypes_ttms: Tuple[np.ndarray, ...],\n ) -> List[np.ndarray]:\n \"\"\"\n vectorised bsm deltas for array of aligned strikes, vols, and optiontypes\n \"\"\"\n deltas_ttms = List()\n for ttm, forward, vols_ttm, strikes_ttm, optiontypes_ttm in zip(ttms, forwards, vols_ttms, strikes_ttms, optiontypes_ttms):\n deltas_ttms.append(compute_normal_slice_deltas(ttm=ttm, forward=forward, strikes=strikes_ttm, vols=vols_ttm, optiontypes=optiontypes_ttm))\n return deltas_ttms\n\n\n@njit(cache=False, fastmath=True)\ndef compute_normal_slice_vegas(ttm: float,\n forward: float,\n strikes: np.ndarray,\n vols: np.ndarray,\n optiontypes: np.ndarray = None\n ) -> np.ndarray:\n \"\"\"\n vectorised bsm vegas for array of aligned strikes, vols, and optiontypes\n \"\"\"\n sdev = forward*vols * np.sqrt(ttm)\n d = (forward - strikes) / sdev\n vegas = forward * npdf(d) * np.sqrt(ttm)\n return vegas\n\n\n@njit(cache=False, fastmath=True)\ndef compute_normal_vegas_ttms(ttms: np.ndarray,\n forwards: np.ndarray,\n strikes_ttms: Tuple[np.ndarray, ...],\n vols_ttms: Tuple[np.ndarray,...],\n optiontypes_ttms: Tuple[np.ndarray, ...],\n ) -> List[np.ndarray]:\n \"\"\"\n vectorised bsm vegas for array of aligned strikes, vols, and optiontypes\n \"\"\"\n vegas_ttms = List()\n for ttm, forward, vols_ttm, strikes_ttm, optiontypes_ttm in zip(ttms, forwards, vols_ttms, strikes_ttms, optiontypes_ttms):\n vegas_ttms.append(compute_normal_slice_vegas(ttm=ttm, forward=forward, strikes=strikes_ttm, vols=vols_ttm, optiontypes=optiontypes_ttm))\n return vegas_ttms\n\n\n@njit(cache=False, fastmath=True)\ndef infer_normal_implied_vol(forward: float,\n ttm: float,\n strike: float,\n given_price: float,\n discfactor: float = 1.0,\n optiontype: str = 'C',\n tol: float = 1e-12,\n is_bounds_to_nan: bool = False\n ) -> float:\n \"\"\"\n compute normal implied vol\n \"\"\"\n x1, x2 = 0.01, 10.0 # starting values\n f = compute_normal_price(forward=forward, strike=strike, ttm=ttm, vol=x1, discfactor=discfactor, optiontype=optiontype) - given_price\n fmid = compute_normal_price(forward=forward, strike=strike, ttm=ttm, vol=x2, discfactor=discfactor, optiontype=optiontype) - given_price\n\n if f*fmid < 0.0:\n if f < 0.0:\n rtb = x1\n dx = x2-x1\n else:\n rtb = x2\n dx = x1-x2\n xmid = rtb\n for j in range(0, 100):\n dx = dx*0.5\n xmid = rtb+dx\n fmid = compute_normal_price(forward=forward, strike=strike, ttm=ttm, vol=xmid, discfactor=discfactor, optiontype=optiontype) - given_price\n if fmid <= 0.0:\n rtb = xmid\n if np.abs(fmid) < tol:\n break\n v1 = xmid\n\n else:\n if f < 0:\n v1 = x1\n else:\n v1 = x2\n\n if is_bounds_to_nan: # in case vol was inferred it will return nan\n if np.abs(v1-x1) < tol or np.abs(v1-x2) < tol:\n v1 = np.nan\n return v1\n\n\n@njit(cache=False, fastmath=True)\ndef infer_normal_ivols_from_model_slice_prices(ttm: float,\n forward: float,\n strikes: np.ndarray,\n optiontypes: np.ndarray,\n model_prices: np.ndarray,\n discfactor: float\n ) -> np.ndarray:\n model_vol_ttm = np.zeros_like(strikes)\n for idx, (strike, model_price, optiontype) in enumerate(zip(strikes, model_prices, optiontypes)):\n model_vol_ttm[idx] = infer_normal_implied_vol(forward=forward, ttm=ttm, discfactor=discfactor,\n given_price=model_price,\n strike=strike,\n optiontype=optiontype)\n return model_vol_ttm\n\n\n@njit(cache=False, fastmath=True)\ndef infer_normal_ivols_from_slice_prices(ttm: float,\n forward: float,\n discfactor: float,\n strikes: np.ndarray,\n optiontypes: np.ndarray,\n model_prices: np.ndarray\n ) -> List:\n \"\"\"\n vectorised chain ivols\n \"\"\"\n model_vol_ttm = np.zeros_like(strikes)\n for idx, (strike, model_price, optiontype) in enumerate(zip(strikes, model_prices, optiontypes)):\n model_vol_ttm[idx] = infer_normal_implied_vol(forward=forward, ttm=ttm, discfactor=discfactor,\n given_price=model_price,\n strike=strike,\n optiontype=optiontype)\n return model_vol_ttm\n\n\n@njit(cache=False, fastmath=True)\ndef infer_normal_ivols_from_chain_prices(ttms: np.ndarray,\n forwards: np.ndarray,\n discfactors: np.ndarray,\n strikes_ttms: List[np.ndarray,...],\n optiontypes_ttms: List[np.ndarray, ...],\n model_prices_ttms: List[np.ndarray],\n ) -> List[np.ndarray, ...]:\n \"\"\"\n vectorised chain ivols\n \"\"\"\n model_vol_ttms = List()\n for ttm, forward, discfactor, strikes_ttm, optiontypes_ttm, model_prices_ttm in zip(ttms, forwards, discfactors, strikes_ttms, optiontypes_ttms, model_prices_ttms):\n model_vol_ttm = np.zeros_like(strikes_ttm)\n for idx, (strike, model_price, optiontype) in enumerate(zip(strikes_ttm, model_prices_ttm, optiontypes_ttm)):\n model_vol_ttm[idx] = infer_normal_implied_vol(forward=forward, ttm=ttm, discfactor=discfactor,\n given_price=model_price,\n strike=strike,\n optiontype=optiontype)\n model_vol_ttms.append(model_vol_ttm)\n return model_vol_ttms\n","repo_name":"ArturSepp/StochVolModels","sub_path":"stochvolmodels/pricers/core/normal_pricer.py","file_name":"normal_pricer.py","file_ext":"py","file_size_in_byte":12249,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"78"}
+{"seq_id":"29747197510","text":"import logging\n\nfrom python_cli_generator import output_printer\n\n\nclass OutputProcessor:\n\n def __init__(self, logger, format=\"json\", file=None, **kwargs):\n self.logger = logger\n self.format = format\n self.file = file\n self.filter_list_search = None\n self.filter_list_attributes = None\n\n def _get_json_values(self, json_obj):\n arr = []\n\n def extract(obj, arr):\n if isinstance(obj, dict):\n for k, v in obj.items():\n if isinstance(v, (dict, list)):\n extract(v, arr)\n else:\n arr.append(v)\n elif isinstance(obj, list):\n for item in obj:\n extract(item, arr)\n return arr\n\n values = extract(json_obj, arr)\n return values\n\n def _filter_list_search(self, json_list):\n json_list_result = []\n if self.filter_list_search is not None:\n for result in json_list:\n for value in self._get_json_values(result):\n\n if value is not None and self.filter_list_search in str(value):\n json_list_result.append(result)\n break\n else:\n json_list_result = json_list\n\n return json_list_result\n\n def _filter_list_attributes(self, json_list):\n def deep_access(x, keylist):\n val = x\n for key in keylist:\n if key in val:\n val = val[key]\n else:\n val = None\n return val\n\n result = []\n if self.filter_list_attributes is not None:\n for json in json_list:\n element = {}\n found_attribute = False\n for attribute in self.filter_list_attributes:\n arrayAttributes = attribute.split(\".\")\n deep_access_result = deep_access(json, arrayAttributes)\n if deep_access_result is not None:\n element[arrayAttributes[-1]] = deep_access_result\n found_attribute = True\n if found_attribute:\n result.append(element)\n else:\n result = json_list\n return result\n\n def _print_result(self, result):\n output_printer.print_json_value(\n result, output_format=str(self.format), file=self.file)\n\n def _process_result(self, result):\n if self.logger is not False:\n self.logger.debug(\"Result: {}\\n\".format(result))\n if not isinstance(result, dict) and not isinstance(result, list) and str(self.format) != \"raw\":\n return {\"result\": result}\n return result\n\n def _process_result_list(self, item_list, titles=None):\n result = []\n for item in item_list:\n result_item = {}\n if not isinstance(item, dict):\n if item.__class__.__module__ != \"builtins\":\n item = vars(item)\n else:\n item = item\n result.append(item)\n continue\n for attr in item:\n if titles is None or attr in titles:\n result_item[attr] = item[attr]\n result.append(result_item)\n\n result = self._filter_list_attributes(result)\n result = self._filter_list_search(result)\n return result\n\n def process_args(self, args):\n from python_cli_generator.cli_builtin import BuiltinArguments\n\n if BuiltinArguments.verbose.value in args and args[BuiltinArguments.verbose.value]:\n self.logger.setLevel(logging.DEBUG)\n self.format = args.get(BuiltinArguments.format.value, self.format)\n self.file = args.get(BuiltinArguments.file.value, self.file)\n self.filter_list_search = args.get(\n BuiltinArguments.search.value, self.filter_list_search)\n self.filter_list_attributes = args.get(\n BuiltinArguments.attribute_filter.value, self.filter_list_attributes)\n\n def process_result(self, result):\n if type(result) is list:\n result = self._process_result_list(result)\n else:\n result = self._process_result(result)\n self._print_result(result)\n return result\n","repo_name":"AlexSua/python-cli-generator","sub_path":"python_cli_generator/output_processor.py","file_name":"output_processor.py","file_ext":"py","file_size_in_byte":4341,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"}
+{"seq_id":"29138980507","text":"import copy\nfrom typing import Any, Callable, Dict, Iterable, Iterator, Optional, Tuple, Union\n\nimport torch\nfrom torch import Tensor\nfrom torch.distributions import Distribution\nfrom torch.nn import Module, ModuleList, Parameter\n\nfrom bayestorch.nn.prior_module import PriorModule\nfrom bayestorch.nn.utils import nested_apply\n\n\n__all__ = [\n \"ParticlePosteriorModule\",\n]\n\n\nclass ParticlePosteriorModule(PriorModule):\n \"\"\"Bayesian module that defines a prior and a particle-based\n posterior over its parameters.\n\n References\n ----------\n .. [1] Q. Liu and D. Wang.\n \"Stein Variational Gradient Descent: A General Purpose Bayesian Inference Algorithm\".\n In: Advances in Neural Information Processing Systems. 2016, pp. 2378-2386.\n URL: https://arxiv.org/abs/1608.04471\n\n Examples\n --------\n >>> import torch\n >>> from torch import nn\n >>>\n >>> from bayestorch.distributions import LogScaleNormal\n >>> from bayestorch.nn import ParticlePosteriorModule\n >>>\n >>>\n >>> num_particles = 5\n >>> batch_size = 10\n >>> in_features = 4\n >>> out_features = 2\n >>> model = nn.Linear(in_features, out_features)\n >>> num_parameters = sum(parameter.numel() for parameter in model.parameters())\n >>> model = ParticlePosteriorModule(\n ... model,\n ... prior_builder=LogScaleNormal,\n ... prior_kwargs={\n ... \"loc\": torch.zeros(num_parameters),\n ... \"log_scale\": torch.full((num_parameters,), -1.0),\n ... },\n ... num_particles=num_particles,\n ... )\n >>> input = torch.rand(batch_size, in_features)\n >>> output = model(input)\n >>> outputs, log_priors = model(\n ... input,\n ... return_log_prior=True,\n ... reduction=\"none\",\n ... )\n\n \"\"\"\n\n replicas: \"ModuleList\"\n \"\"\"The module replicas (one for each particle).\"\"\"\n\n # override\n def __init__(\n self,\n module: \"Module\",\n prior_builder: \"Callable[..., Distribution]\",\n prior_kwargs: \"Dict[str, Any]\",\n num_particles: \"int\" = 10,\n module_parameters: \"Optional[Iterable[Tensor]]\" = None,\n ) -> \"None\":\n \"\"\"Initialize the object.\n\n Parameters\n ----------\n module:\n The module.\n prior_builder:\n The prior builder, i.e. a callable that receives keyword\n arguments and returns a prior with size (batch + event)\n equal to the length of the 1D tensor obtained by flattening\n and concatenating each tensor in `module_parameters`.\n prior_kwargs:\n The keyword arguments to pass to the prior builder.\n Tensor arguments are internally registered as parameters\n if their `requires_grad` attribute is True, as persistent\n buffers otherwise.\n num_particles:\n The number of particles.\n module_parameters:\n The module parameters over which the prior is defined.\n Useful to selectively define a prior over a restricted\n subset of submodules/parameters.\n Default to ``module.parameters()``.\n\n Raises\n ------\n ValueError\n If an invalid argument value is given.\n\n Warnings\n --------\n High memory usage is to be expected as `num_particles - 1`\n replicas of the module must be maintained internally.\n\n \"\"\"\n if num_particles < 1 or not float(num_particles).is_integer():\n raise ValueError(\n f\"`num_particles` ({num_particles}) must be in the integer interval [1, inf)\"\n )\n\n super().__init__(module, prior_builder, prior_kwargs, module_parameters)\n self.num_particles = int(num_particles)\n\n # Replicate module (one replica for each particle)\n self.replicas = ModuleList(\n [module] + [copy.deepcopy(module) for _ in range(num_particles - 1)]\n )\n\n # Retrieve indices of the selected parameters\n self._module_parameter_idxes = []\n replica_parameters = list(module.parameters())\n for parameter in self.module_parameters:\n for i, x in enumerate(replica_parameters):\n if parameter is x:\n self._module_parameter_idxes.append(i)\n break\n\n for replica in self.replicas:\n # Sample new particle\n new_particle = self.prior.sample()\n\n # Inject sampled particle\n start_idx = 0\n replica_parameters = list(replica.parameters())\n module_parameters = [\n replica_parameters[idx] for idx in self._module_parameter_idxes\n ]\n for parameter in module_parameters:\n end_idx = start_idx + parameter.numel()\n new_parameter = new_particle[start_idx:end_idx].reshape_as(parameter)\n parameter.detach_().requires_grad_(False).copy_(\n new_parameter\n ).requires_grad_()\n start_idx = end_idx\n\n # override\n def named_parameters(\n self,\n *args: \"Any\",\n include_all: \"bool\" = True,\n **kwargs: \"Any\",\n ) -> \"Iterator[Tuple[str, Parameter]]\":\n \"\"\"Return the named parameters.\n\n Parameters\n ----------\n include_all:\n True to include all the named parameters,\n False to include only those over which the\n prior is defined.\n\n Returns\n -------\n The named parameters.\n\n \"\"\"\n if include_all:\n return super(PriorModule, self).named_parameters(*args, **kwargs)\n named_parameters = dict(\n super(PriorModule, self).named_parameters(*args, **kwargs)\n )\n result = []\n for replica in self.replicas:\n replica_parameters = list(replica.parameters())\n for idx in self._module_parameter_idxes:\n for k, v in named_parameters.items():\n if v is replica_parameters[idx]:\n result.append((k, v))\n break\n return result\n\n @property\n def particles(self) -> \"Tensor\":\n \"\"\"Return the particles.\n\n In the following, let `N` denote the number of particles,\n and `D` the number of parameters over which the prior is\n defined.\n\n Returns\n -------\n The particles, shape: ``[N, D]``.\n\n \"\"\"\n result = []\n for replica in self.replicas:\n replica_parameters = list(replica.parameters())\n module_parameters = [\n replica_parameters[idx] for idx in self._module_parameter_idxes\n ]\n for parameter in module_parameters:\n result.append(parameter.flatten())\n return torch.cat(result).reshape(self.num_particles, -1)\n\n # override\n def forward(\n self,\n *args: \"Any\",\n return_log_prior: \"bool\" = False,\n reduction: \"str\" = \"mean\",\n **kwargs: \"Any\",\n ) -> \"Union[Any, Tuple[Any, Tensor]]\":\n \"\"\"Forward pass.\n\n In the following, let `N` denote the number of particles,\n `B = {B_1, ..., B_k}` the batch shape, and `O = {O_1, ..., O_m}`\n the shape of a leaf value of the underlying module output (can be\n a nested tensor).\n\n Parameters\n ----------\n args:\n The positional arguments to pass to the underlying module.\n return_log_prior:\n True to additionally return the log prior (usually\n required during training), False otherwise.\n reduction:\n The reduction to apply to the leaf values of the underlying\n module output and to the log prior (if `return_log_prior` is\n True) across particles. Must be one of the following:\n - \"none\": no reduction is applied;\n - \"mean\": the leaf values and the log prior are averaged\n across particles.\n kwargs:\n The keyword arguments to pass to the underlying module.\n\n Returns\n -------\n - The output, shape of a leaf value: ``[N, *B, *O]``\n if `reduction` is \"none\" , ``[*B, *O]`` otherwise;\n - if `return_log_prior` is True, the log prior, shape:\n ``[N]`` if `reduction` is \"none\" , ``[]`` otherwise.\n\n Raises\n ------\n ValueError\n If an invalid argument value is given.\n\n \"\"\"\n if reduction not in [\"none\", \"mean\"]:\n raise ValueError(\n f\"`reduction` ({reduction}) must be one of {['none', 'mean']}\"\n )\n\n # Forward pass\n outputs = [replica(*args, **kwargs) for replica in self.replicas]\n if reduction == \"none\":\n outputs = nested_apply(torch.stack, outputs)\n elif reduction == \"mean\":\n outputs = nested_apply(\n lambda inputs, dim: torch.mean(torch.stack(inputs, dim), dim), outputs\n )\n\n if not return_log_prior:\n return outputs\n\n # Extract particles\n particles = self.particles\n\n # Compute log prior\n log_priors = self.prior.log_prob(particles)\n if reduction == \"mean\":\n log_priors = log_priors.mean()\n\n return outputs, log_priors\n\n # override\n def __repr__(self) -> \"str\":\n return (\n f\"{type(self).__name__}\"\n f\"(module: {self.module}, \"\n f\"prior: {self.prior}, \"\n f\"num_particles: {self.num_particles}, \"\n f\"module_parameters: {sum(parameter.numel() for parameter in self.module_parameters)})\"\n )\n","repo_name":"lucadellalib/bayestorch","sub_path":"bayestorch/nn/particle_posterior_module.py","file_name":"particle_posterior_module.py","file_ext":"py","file_size_in_byte":9712,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"78"}
+{"seq_id":"3698391654","text":"from tkinter import *\nfrom fpdf import FPDF\nfrom pdf2image import convert_from_path\nfrom PIL import ImageTk, Image\n\nmax_col_width = 95\n\n\npreview_width = 210*2\npreview_height = 297*2\n\nclass PDF(FPDF):\n def header(self):\n self.image('logo.jpg', 10, 8, h=25)\n self.set_font('times', '', 12)\n self.cell(max_col_width)\n self.multi_cell(max_col_width, 6, ' \\n Numero Certificato: 3220167A2020105041616447\\n Data Certificato: 03/06/2021\\n ', border=1)\n self.ln(10)\n\n def footer(self):\n self.set_y(-15)\n self.set_font('times', 'B', 10)\n self.cell(0, 10, f'Page {self.page_no()}/{{nb}}', align='C')\n \n \ndef generate_report():\n name = name_entry.get()\n address = address_entry.get()\n city = city_entry.get()\n state = state_entry.get()\n zip = zip_entry.get()\n country = country_entry.get()\n email = \"Email: \" + email_entry.get()\n phone = \"Tel: \" + phone_entry.get()\n vat = \"VAT: \" + vat_entry.get()\n\n pdf = PDF()\n pdf.set_auto_page_break(auto=True, margin=15)\n pdf.alias_nb_pages()\n\n pdf.add_page()\n\n pdf.set_text_color(16)\n pdf.set_draw_color(128)\n\n pdf.set_font('arial', '', 12)\n\n cell_width = max_col_width \n\n pdf.set_font('arial', 'B', 14)\n pdf.cell(cell_width, 7, 'Fornitore Servizio')\n pdf.cell(cell_width, 7, 'Cliente', ln=True)\n pdf.ln(3)\n \n pdf.set_font('arial', '', 12)\n pdf.cell(cell_width, 7, name)\n pdf.cell(cell_width, 7, name)\n pdf.ln()\n \n pdf.set_font('arial', '', 12)\n pdf.cell(cell_width, 7, address)\n pdf.cell(cell_width, 7, address)\n pdf.ln()\n \n pdf.set_font('arial', '', 12)\n pdf.cell(cell_width, 7, city + \" (\" + state + \"), \" + zip)\n pdf.cell(cell_width, 7, city + \" (\" + state + \"), \" + zip)\n pdf.ln()\n \n pdf.set_font('arial', '', 12)\n pdf.cell(cell_width, 7, country)\n pdf.cell(cell_width, 7, country)\n pdf.ln()\n \n pdf.set_font('arial', '', 12)\n pdf.cell(cell_width, 7, email)\n pdf.cell(cell_width, 7, email)\n pdf.ln()\n \n pdf.set_font('arial', '', 12)\n pdf.cell(cell_width, 7, phone)\n pdf.cell(cell_width, 7, phone)\n pdf.ln()\n \n pdf.set_font('arial', '', 12)\n pdf.cell(cell_width, 7, vat)\n pdf.cell(cell_width, 7, vat)\n pdf.ln()\n \n pdf.output('report.pdf')\n \ndef preview_report():\n global preview_label\n global image\n global my_image\n generate_report()\n pages = convert_from_path('report.pdf')\n for page in pages:\n page.save(\"report.png\")\n \n image = Image.open(\"report.png\")\n image = image.resize((preview_width, preview_height), Image.ANTIALIAS)\n my_image = ImageTk.PhotoImage(image)\n\n preview_label.config(image=my_image)\n #preview_label.image = my_image\n\n\n# --------------------------------------------------------------------------\n# tkinter\n# --------------------------------------------------------------------------\nwindow = Tk()\nwindow.geometry(\"1920x1080\")\nwindow.iconbitmap(\"logo.ico\")\nwindow.title(\"Certificato Sanificazione\")\nwindow.state('zoomed')\n\nmenubar = Menu(window)\nwindow.config(menu=menubar)\n\nopenImage = PhotoImage(file=\"logo.png\")\nfileMenu = Menu(menubar, tearoff=0)\nmenubar.add_cascade(label=\"File\", menu=fileMenu)\nfileMenu.add_command(label=\"Exit\", command=quit)\n\n\nframe_top = Frame(\twindow, \n\t\t#bg=\"green\", \n\t\trelief=\"raised\", \n\t\tborderwidth=1, \n\t\t)\nframe_top.pack(fill=BOTH, expand=1, side=TOP)\nframe_top.pack_propagate(0)\n\nstatus_label = Label(window, text=\"Stato\")\nstatus_label.pack()\n\nframe1 = Frame(\tframe_top, \n\t\t#bg=\"green\", \n\t\twidth=300, \n\t\trelief=\"raised\", \n\t\tborderwidth=1, \n\t\t)\nframe1.pack(fill=Y, side=LEFT)\nframe1.pack_propagate(0)\n\nframe2 = Frame(\tframe_top, \n\t\t#bg=\"red\",\n\t\trelief=\"raised\", \n\t\tborderwidth=1, \n\t\t)\nframe2.pack(expand=1, fill=BOTH)\nframe2.pack_propagate(0)\n\n\npadx = 10\npady = 2\n\nname_label = Label(frame1, text=\"Nome: \")\nname_label.pack(anchor=W, padx=padx, pady=pady)\nname_entry = Entry(frame1, width=40)\nname_entry.pack(anchor=W, padx=padx, pady=pady)\n\naddress_label = Label(frame1, text=\"Indirizzo: \")\naddress_label.pack(anchor=W, padx=padx, pady=pady)\naddress_entry = Entry(frame1, width=40)\naddress_entry.pack(anchor=W, padx=padx, pady=pady)\n\ncity_label = Label(frame1, text=\"Città: \")\ncity_label.pack(anchor=W, padx=padx, pady=pady)\ncity_entry = Entry(frame1, width=40)\ncity_entry.pack(anchor=W, padx=padx, pady=pady)\n\nstate_label = Label(frame1, text=\"Provincia: \")\nstate_label.pack(anchor=W, padx=padx, pady=pady)\nstate_entry = Entry(frame1, width=40)\nstate_entry.pack(anchor=W, padx=padx, pady=pady)\n\nzip_label = Label(frame1, text=\"Cap: \")\nzip_label.pack(anchor=W, padx=padx, pady=pady)\nzip_entry = Entry(frame1, width=40)\nzip_entry.pack(anchor=W, padx=padx, pady=pady)\n\ncountry_label = Label(frame1, text=\"Stato: \")\ncountry_label.pack(anchor=W, padx=padx, pady=pady)\ncountry_entry = Entry(frame1, width=40)\ncountry_entry.pack(anchor=W, padx=padx, pady=pady)\n\nemail_label = Label(frame1, text=\"Email: \")\nemail_label.pack(anchor=W, padx=padx, pady=pady)\nemail_entry = Entry(frame1, width=40)\nemail_entry.pack(anchor=W, padx=padx, pady=pady)\n\nphone_label = Label(frame1, text=\"Tel: \")\nphone_label.pack(anchor=W, padx=padx, pady=pady)\nphone_entry = Entry(frame1, width=40)\nphone_entry.pack(anchor=W, padx=padx, pady=pady)\n\nvat_label = Label(frame1, text=\"IVA: \")\nvat_label.pack(anchor=W, padx=padx, pady=pady)\nvat_entry = Entry(frame1, width=40)\nvat_entry.pack(anchor=W, padx=padx, pady=pady)\n\nsubmit = Button(frame1,\n\t\ttext=\"Genera Certificato\",\n\t\tcommand=lambda:generate_report(),\n)\nsubmit.pack(side=LEFT)\n\npreview = Button(frame1,\n\t\ttext=\"Anteprima Certificato\",\n\t\tcommand=lambda:preview_report(),\n)\npreview.pack(side=LEFT)\n\nimage = Image.open(\"report.png\")\nimage = image.resize((preview_width, preview_height), Image.ANTIALIAS)\nmy_image = ImageTk.PhotoImage(image)\n\npreview_label = Label( frame2,\n text=\"label1\",\n image=my_image,\n )\npreview_label.pack()\n\n\n'''\nframe1 = Frame(window, bg=\"green\")\nframe1.grid(row=0, column=0)\n\nlabel1 = Label(frame1, text=\"label1\", width=30)\nlabel1.grid(row=0, column=0)\n\nframe2 = Frame(\twindow, bg=\"red\")\nframe2.grid(row=0, column=1, sticky=W+E)\n\nlabel2 = Label(frame2, text=\"label2\")\nlabel2.grid(row=0, column=0)\n'''\n\nwindow.mainloop()\n","repo_name":"MartinPellizzer/og-repgen","sub_path":"repgen.py","file_name":"repgen.py","file_ext":"py","file_size_in_byte":6325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"8235076586","text":"import os as os\nimport subprocess\nimport tkinter as tk\nimport tkinter.ttk as ttk\nimport tkinter.messagebox\nimport re as re\nimport datetime as dt\nfrom DB_con import DB_con\nimport pdb\n\nfrom work_with_encryption import work_with_crypto\n\n\nclass init:\n\n def __init__(self, parent_window, content_frame):\n self.parent_window = parent_window\n self.content_frame = content_frame\n self.pos_in_msg = 0\n self.msg_check_for_dir = tk.Label(self.content_frame, text='Überprüfe database Ordner...')\n self.msg_check_for_dir.grid(column=0, row=self.pos_in_msg)\n # check if database dir exists:\n output = subprocess.getstatusoutput(\"ls -la | grep database\")\n\n if output[1][0] == 'd':\n self.msg_found_dir = tk.Label(self.content_frame, text='database Ordner gefunden...')\n self.pos_in_msg += 1\n self.msg_found_dir.grid(column=0, row=self.pos_in_msg)\n self.check_for_db_files()\n else:\n self.msg_no_dir = tk.Label(self.content_frame, text='Kein database Ordner gefunden, erstelle ihn jetzt.')\n self.pos_in_msg += 1\n self.msg_no_dir.grid(column=0, row=self.pos_in_msg)\n os.system('mkdir database')\n\n def check_for_db_files(self):\n os.chdir('database/')\n\n # check if we are in the database dir:\n pwd = subprocess.getstatusoutput('pwd')\n\n database_reg_exp = re.compile(r'\\/database$')\n match_obj = re.match(database_reg_exp, pwd[1])\n\n if match_obj == 0:\n self.msg_cant_cd_into_dir = tk.Label(self.content_frame, text='Database Ordner konnte nicht betreten werden...')\n self.pos_in_msg += 1\n self.msg_cant_cd_into_dir.grid(column=0, row=self.pos_in_msg)\n return\n\n self.msg_check_for_db_in_dir = tk.Label(self.content_frame, text='Überprüfe, ob Datenbanken vorhanden sind...')\n self.pos_in_msg += 1\n self.msg_check_for_db_in_dir.grid(column=0, row=self.pos_in_msg)\n\n databases = self.check_dbs_in_pwd()\n\n if len(databases) == 1:\n self.msg_found_one_db = tk.Label(self.content_frame, text='Es wurde genau eine Datenbank gefunden. Es wird geprüft, ob sie verschlüsselt ist')\n self.pos_in_msg += 1\n self.msg_found_one_db.grid(column=0, row=self.pos_in_msg)\n\n self.crypto_obj = work_with_crypto(databases[0], self.content_frame, self.pos_in_msg)\n self.pos_in_msg = self.crypto_obj.get_row_counter()\n\n elif len(databases) > 1:\n self.msg_found_more_dbs = tk.Label(self.content_frame, text='Es wurden verschiedene Datenbanken gefunden, bitte auswählen...')\n self.pos_in_msg += 1\n self.msg_found_more_dbs.grid(column=0, row=self.pos_in_msg)\n self.db_choice_window = tk.Toplevel(self.parent_window)\n self.db_choice_window.lift()\n self.heading_choice_window_frame = tk.Frame(self.db_choice_window)\n self.content_choice_window_frame = tk.Frame(self.db_choice_window)\n self.heading_choice_window_frame.grid(column=0, row=0)\n self.content_choice_window_frame.grid(column=0, row=1)\n self.heading_label_choice_window = ttk.Label(self.heading_choice_window_frame, text='Datenbank auswählen', style=\"My.TLabel\")\n self.heading_label_choice_window.grid(column=0, row=0)\n self.desc_label_choice_window = tk.Label(self.content_choice_window_frame, text='Es wurden mehr als eine Datenbank gefunden. Bitte wähle die datenbank aus, die du laden möchtest')\n self.desc_label_choice_window.grid(column=0, row=0, columnspan=3)\n self.option_menu_label_choice_db = tk.Label(self.content_choice_window_frame, text='Datenbank wählen:')\n self.option_menu_label_choice_db.grid(column=0, row=1)\n self.option_menu_choice_db_var = tk.StringVar()\n self.option_menu_choice_db = tk.OptionMenu(self.content_choice_window_frame, self.option_menu_choice_db_var, *databases)\n self.option_menu_choice_db.grid(column=1, row=1)\n\n self.submit_db_choice = tk.Button(self.content_choice_window_frame, text='Abschicken', command=self.submit_choice)\n self.submit_db_choice.grid(column=1, row=2)\n\n\n else:\n self.msg_no_db_found = tk.Label(self.content_frame, text='Es konnte keine Datenbank gefunden werden...')\n self.pos_in_msg += 1\n self.msg_no_db_found.grid(column=0, row=self.pos_in_msg)\n\n self.msg_start_setup_assistent = tk.Label(self.content_frame, text='Setup-Assistent für die Datenbankerstellung wird gestartet...')\n self.pos_in_msg += 1\n self.msg_start_setup_assistent.grid(column=0, row=self.pos_in_msg)\n self.create_new_database()\n #self.db_connection = db_connection\n # build list of database file names:\n\n\n def get_row_counter(self):\n return self.pos_in_msg\n\n def submit_choice(self):\n self.crypto_obj = work_with_crypto(self.option_menu_choice_db_var.get(), self.content_frame, self.pos_in_msg)\n self.pos_in_msg = self.crypto_obj.get_row_counter()\n self.db_choice_window.destroy()\n\n def return_db_connection(self):\n return self.crypto_obj.get_db_connection()\n\n def create_new_database(self):\n # setup assistent for new database\n self.create_database_window = tk.Toplevel()\n style = ttk.Style(self.create_database_window)\n style.configure(\"My.TLabel_setup_window\", font=('Arial', 25))\n self.setup_heading_frame = tk.Frame(self.create_database_window)\n self.setup_heading_frame.grid(column=0, row=0)\n self.heading_label = ttk.Label(self.setup_heading_frame, text='Neue Datenbank erstellen', style=\"My.TLabel_setup_window\")\n self.heading_label.grid(column=0, row=0)\n self.setup_content_frame = tk.Frame(self.create_database_window)\n self.setup_content_frame.grid(column=0, row=1)\n\n self.description_label_setup = tk.Label(self.setup_content_frame, text='Da in dieser Datenbank sensibele Daten gespeichert werden können, kann die Datenbank verschlüsselt werden.')\n self.description_label_setup.grid(column=0, row=0)\n\n self.desc_label_2_setup = tk.Label(self.setup_content_frame, text='Für die Entschlüsselung wird dann ein Passwort benötigt. Bitte beachte, dass bei Verlust des Passworts nicht mehr auf die Datenbank zugegriffen werden kann.')\n self.desc_label_2_setup.grid(column=0, row=1)\n\n self.desc_label_3_setup = tk.Label(self.setup_content_frame, text='Überlege dir also gut, ob du die Datenbank verschlüsseln möchtest!')\n self.desc_label_3_setup.grid(column=0, row=2)\n\n self.database_name_label = tk.Label(self.setup_content_frame, text='Datenbank Name')\n self.database_name_label.grid(column=0, row=3)\n\n self.database_name_entry_var = tk.StringVar()\n # set a default database name containing the current date and time to make the name unique\n self.current_datetime = dt.datetime.now()\n self.current_datetime_str = current_datetime.strftime(\"%d%m%Y_%H%M\")\n self.database_name_str = f\"Datenbank_{current_datetime_str}.db\"\n self.database_name_entry_var.set(database_name_str)\n\n self.database_name_entry_widget = tk.Entry(self.setup_content_frame, textvariable=self.database_name_entry_var)\n self.database_name_entry_widget.grid(column=1, row=3)\n\n self.encrypt_checkbutton_var = tk.IntVar()\n self.encrypt_checkbutton_var.set(0)\n self.encrypt_checkbutton = tk.Checkbutton(self.setup_content_frame, text=\"Verschlüsseln?\", variable=self.encrypt_checkbutton_var, onvalue=1, offvalue=0, command=self.password_widget)\n self.encrypt_checkbutton.grid(column=1, row=4)\n\n # self.password_label = tk.Label(self.setup_content_frame, text='Verschlüsselungs-Passwort')\n #\n # self.password_entry_var = tk.StringVar()\n # self.password_entry_widget = tk.Entry(self.setup_content_frame, show=\"*\")\n\n self.submit_button = tk.Button(self.setup_content_frame, text='Abschicken', command=self.submit_new_db)\n self.submit_button.grid(column=1, row=5)\n\n def password_widget(self):\n if self.encrypt_checkbutton_var.get() == 1:\n self.database_name_entry_var.set(self.database_name_entry_var.get().replace(\".db\", \".enc\"))\n else:\n self.database_name_entry_var.set(self.database_name_entry_var.get().replace(\".enc\", \".db\"))\n\n\n def check_dbs_in_pwd(self):\n databases_str = subprocess.getstatusoutput('find *.db')\n # reg_exp_new_line = re.compile(r'[\\S]+(.db)|[\\S]+(.enc)')\n # pdb.set_trace()\n # return re.findall(reg_exp_new_line, databases_str[1])\n return databases_str[1].splitlines()\n\n def submit_new_db(self):\n # check if database name already exists:\n db_results = self.check_dbs_in_pwd()\n new_db_name = self.database_name_entry_var.get()\n for db_name in db_results:\n if new_db_name == db_name:\n on_click = tkinter.messagebox.showerror(title='Name schon vorhanden!', message='Datenbank Name schon vorhanden. Bitte anderen Namen wählen!')\n if on_click:\n self.database_name_entry_widget.configure(bg=\"red\")\n self.create_database_window.lift()\n return\n\n #check if a password was given:\n if self.encrypt_checkbutton_var == 1:\n if len(self.password_entry_var.get()) < 10:\n on_click = tkinter.messagebox.showerror(title='Passwort zu kurz', message=\"Das Passwort muss mindestens 10 zeichen lang sein!\")\n if on_click:\n self.password_entry_widget.configure(bg='red')\n self.create_database_window.lift()\n return\n else:\n self.crypto_obj = work_with_crypto(new_db_name, self.treeview_messages, self.pos_in_msg)\n self.pos_in_msg = self.crypto_obj.get_row_counter()\n else:\n self.crypto_obj = work_with_crypto(new_db_name, self.treeview_messages, self.pos_in_msg)\n self.pos_in_msg = self.crypto_obj.get_row_counter()\n","repo_name":"AeroEngDev/TeachManger","sub_path":"src/init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":10281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"35484394606","text":"import logging\n\nfrom logtest.logging.submodule.sub_logging import sublog\n_logger = logging.getLogger(__name__)\n\n\ndef log():\n _logger.error('logging root error')\n _logger.warning('logging root warning')\n _logger.info('logging root info')\n _logger.debug('logging root debug')\n\n sublog()\n\nif __name__ == \"__main__\":\n logger = logging.getLogger('logtest.logging')\n sh = logging.StreamHandler()\n logger.addHandler(sh)\n logger.setLevel(logging.INFO)\n\n log()","repo_name":"norahyk/logtest","sub_path":"provider_package/logtest/logging/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"22565137051","text":"from django.conf.urls import url\nfrom . import views\n\n\nurlpatterns = [\n url(r'^cart/$', views.cart, name=\"cart\"),\n url(r'^add(\\d+)_(\\d+)/$', views.add, name=\"add\"),\n url(r'^edit(\\d+)_(\\d+)/$', views.edit, name=\"edit\"),\n url(r'^delete(\\d+)/$', views.delete, name=\"delete\"),\n]","repo_name":"nanshannan1/-","sub_path":"cart/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"}
+{"seq_id":"212711173","text":"from gi.repository import Gtk\nfrom gi.repository.GdkPixbuf import Pixbuf\n\nclass ImageComboBox(Gtk.ComboBox):\n def __init__(self):\n Gtk.ComboBox.__init__(self)\n renderer_pixbuf = Gtk.CellRendererPixbuf()\n self.pack_start(renderer_pixbuf, True)\n self.add_attribute(renderer_pixbuf, \"pixbuf\", 0)\n renderer_text = Gtk.CellRendererText()\n self.pack_start(renderer_text, True)\n self.add_attribute(renderer_text, \"text\", 1)\n self.values_dict = dict()\n\n def build_and_set_model(self, values):\n self.values_dict.clear()\n store = Gtk.ListStore(Pixbuf, str)\n if values:\n for value in values:\n if isinstance(value, list):\n store.append([value[0], value[1]])\n self.values_dict[value[1]] = value[1]\n else:\n store.append([value.get_pixbuf(), value.get_name()])\n self.values_dict[value.get_id()] = value\n self.set_model(store)\n self.set_id_column(1)\n\n def set_value(self, value):\n if value in self.values_dict:\n self.set_active_id(value)\n\n def get_value(self, default=None):\n active_id = self.get_active_id()\n if active_id is not None:\n value_ob = self.values_dict[active_id]\n if hasattr(value_ob, \"get_id\"):\n return value_ob.get_id()\n return value_ob\n return default\n\n","repo_name":"sujoykroy/motion-picture","sub_path":"editor/MotionPicture/gui_utils/image_combo_box.py","file_name":"image_combo_box.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"14859155860","text":"import threading\nimport time\nfrom time import sleep\n\nimport PyQt5.QtGui\nfrom PyQt5.QtCore import QModelIndex\nfrom PyQt5.QtWidgets import QMainWindow, QApplication\n\nfrom Client.Actions.ActionElements.TransferType import TransferType\nfrom Client.DesktopApp.DataHandling.ActionHandler import ActionHandler\nfrom Client.DesktopApp.ServerCommunication.ConnectToServerDesktop import ConnectToServerDesktop\nfrom Client.DesktopApp.UI.AuthorizationWindow import AuthorizationWindow\nfrom Client.DesktopApp.UI.Ui_MainWindow import Ui_MainWindow\nfrom Client.Elements.UserInfoContainer import UserInfoContainer\n\n\nclass MainWindow(QMainWindow):\n def __init__(self, parent=None):\n super().__init__(parent)\n # Привязка формы\n self.__ui = Ui_MainWindow()\n self.__ui.setupUi(self)\n # Кнопки\n self.__ui.pushButton_SignOut.pressed.connect(self.__sign_out_command)\n self.__ui.pushButton_SendMessage.pressed.connect(self.__send_message_command)\n self.__ui.radioButton_AllUsers.pressed.connect(self.__get_all_users_command)\n self.__ui.radioButton_UsersOnline.pressed.connect(self.__get_online_users_command)\n # Общие ресурсы\n self.__user_info = UserInfoContainer()\n self.__my_client = ConnectToServerDesktop(self.__user_info)\n self.__authorization_window = AuthorizationWindow(self.__my_client, self.__user_info)\n self.__list_view_users_model = PyQt5.QtGui.QStandardItemModel()\n self.__ui.listView_Users.setModel(self.__list_view_users_model)\n self.__list_view_messages_model = PyQt5.QtGui.QStandardItemModel()\n self.__ui.listView_MessageHistory.setModel(self.__list_view_messages_model)\n self.__remember_index = self.__ui.listView_Users.currentIndex()\n # Авторизация пользователя\n self.__sign_out_command()\n # Поток получения сообщений\n self.__time_sleep = 2\n self.__is_continue_getting_messages = True\n threading.Thread(target=self.__get_messages_command).start()\n # При закрытии окна\n self.__my_close = False\n\n def __sign_out_command(self):\n self.__clear_all()\n while not self.__user_info.is_authorized():\n self.__authorization_window.exec()\n else:\n self.__is_continue_getting_messages = True\n threading.Thread(target=self.__get_messages_command).start()\n\n def __send_message_command(self):\n selected_user = self.__ui.listView_Users.currentIndex().data()\n if selected_user is None:\n selected_user = self.__remember_user\n selected_user_login = selected_user.split(\"\\n\")[0]\n message = self.__ui.lineEdit_SendMessageField.text()\n if message.__len__() > 0 and selected_user_login.__len__() > 0:\n self.__user_info.last_transfer_type = TransferType.MessageSending\n self.__user_info.last_data_to_send.addressee = selected_user_login\n self.__user_info.last_data_to_send.message_to_send = message\n self.__my_client.connect_to_server()\n self.__ui.lineEdit_SendMessageField.clear()\n\n def __get_all_users_command(self):\n self.__time_sleep = 2\n self.__user_info.last_transfer_type = TransferType.GetAllUsers\n self.__my_client.connect_to_server()\n self.__fill_users_model(self.__user_info.last_data_to_send.get_all_users())\n self.__is_continue_getting_users = False\n\n def __get_online_users_command(self):\n # Поток получения пользователей\n self.__time_sleep = 0.3\n self.__is_continue_getting_users = True\n threading.Thread(target=self.__online_users_thread_command).start()\n\n def __get_messages_command(self):\n while threading.main_thread().is_alive() and self.__is_continue_getting_messages:\n if self.__user_info.is_authorized():\n self.__user_info.last_transfer_type = TransferType.MessageRequest\n self.__my_client.connect_to_server()\n self.__fill_message_history()\n time.sleep(self.__time_sleep)\n else:\n continue\n\n def __online_users_thread_command(self):\n while threading.main_thread().is_alive() and self.__is_continue_getting_users:\n if self.__user_info.is_authorized():\n selected_user = self.__ui.listView_Users.currentIndex().data()\n if selected_user is not None:\n self.__remember_user = selected_user\n self.__user_info.last_transfer_type = TransferType.GetOnlineUsers\n self.__my_client.connect_to_server()\n self.__fill_users_model(self.__user_info.last_data_to_send.get_online_users())\n self.__ui.listView_MessageHistory.scrollToBottom()\n time.sleep(1)\n else:\n continue\n\n def __fill_users_model(self, to_fill: list):\n self.__list_view_users_model.clear()\n for el in to_fill:\n item = PyQt5.QtGui.QStandardItem(el)\n self.__list_view_users_model.appendRow(item)\n\n def __fill_message_history(self):\n self.__list_view_messages_model.clear()\n self.__user_info.last_data_to_send.message_history.reverse()\n for el in self.__user_info.last_data_to_send.message_history:\n item = PyQt5.QtGui.QStandardItem(el)\n self.__list_view_messages_model.appendRow(item)\n\n def __clear_all(self):\n self.__is_continue_getting_messages = False\n # Выход из аккаунта\n if self.__user_info.is_authorized():\n self.__user_info.last_transfer_type = TransferType.SignOut\n self.__my_client.connect_to_server()\n # Чистка всех полей/сущностей\n self.__user_info.clear_user()\n self.__list_view_messages_model.clear()\n self.__list_view_users_model.clear()\n self.__ui.lineEdit_SendMessageField.clear()\n\n def __close_event(self, event):\n if self.__my_close:\n self.__user_info.last_transfer_type = TransferType.SignOut\n self.__my_client.connect_to_server()\n else:\n event.ignore()\n","repo_name":"JustLornet/PythonDesktopMessenger","sub_path":"src/Client/DesktopApp/UI/MainWindow.py","file_name":"MainWindow.py","file_ext":"py","file_size_in_byte":6277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"33661682817","text":"from abc import ABCMeta, abstractmethod\nimport inspect\n\nfrom .mem_mode import MemMode, mem_mode_to_string\nfrom ...resources.workload import AbstractWorkload\n\nfrom m5.objects import (\n AddrRange,\n System,\n Port,\n IOXBar,\n ClockDomain,\n SrcClockDomain,\n VoltageDomain,\n)\n\nfrom typing import List, Optional, Sequence, Tuple\n\n\nclass AbstractBoard:\n \"\"\"The abstract board interface.\n\n Boards are used as the object which can connect together all other\n components. This abstract class defines the external interface that other\n boards must provide. Boards can be specialized for different ISAs or system\n designs (e.g., core counts, cache types, memory channels, I/O devices, etc)\n\n In addition to providing the place that system components are connected,\n the board also exposes an interface for the caches, processor, and memory\n to interact.\n\n The board also exposes an interface to set up I/O devices which needs to be\n specialized for each ISA and/or platform.\n\n Board inherits from System and can therefore be used as a System simobject\n when required.\n \"\"\"\n\n __metaclass__ = ABCMeta\n\n def __init__(\n self,\n clk_freq: str,\n processor: \"AbstractProcessor\",\n memory: \"AbstractMemorySystem\",\n cache_hierarchy: Optional[\"AbstractCacheHierarchy\"],\n ) -> None:\n \"\"\"\n :param clk_freq: The clock frequency for this board.\n :param processor: The processor for this board.\n :param memory: The memory for this board.\n :param cache_hierarchy: The Cache Hierarchy for this board.\n In some boards caches can be optional. If so,\n that board must override `_connect_things`.\n \"\"\"\n\n if not isinstance(self, System):\n raise Exception(\"A gem5 stdlib board must inherit from System.\")\n\n # Set up the clock domain and the voltage domain.\n self.clk_domain = SrcClockDomain()\n self.clk_domain.clock = clk_freq\n self.clk_domain.voltage_domain = VoltageDomain()\n\n # Set the processor, memory, and cache hierarchy.\n self.processor = processor\n self.memory = memory\n self._cache_hierarchy = cache_hierarchy\n if cache_hierarchy is not None:\n self.cache_hierarchy = cache_hierarchy\n\n # This variable determines whether the board is to be executed in\n # full-system or syscall-emulation mode. This is set when the workload\n # is defined. Whether or not the board is to be run in FS mode is\n # determined by which kind of workload is set.\n self._is_fs = None\n\n # This variable is used to record the checkpoint directory which is\n # set when declaring the board's workload and then used by the\n # Simulator module.\n self._checkpoint = None\n\n # Setup the board and memory system's memory ranges.\n self._setup_memory_ranges()\n\n # Setup board properties unique to the board being constructed.\n self._setup_board()\n\n # A private variable to record whether `_connect_things` has been\n # been called.\n self._connect_things_called = False\n\n def get_processor(self) -> \"AbstractProcessor\":\n \"\"\"Get the processor connected to the board.\n\n :returns: The processor.\n \"\"\"\n return self.processor\n\n def get_memory(self) -> \"AbstractMemory\":\n \"\"\"Get the memory (RAM) connected to the board.\n\n :returns: The memory system.\n \"\"\"\n return self.memory\n\n def get_mem_ports(self) -> Sequence[Tuple[AddrRange, Port]]:\n \"\"\"Get the memory ports exposed on this board\n\n Note: The ports should be returned such that the address ranges are\n in ascending order.\n \"\"\"\n return self.get_memory().get_mem_ports()\n\n def get_cache_hierarchy(self) -> Optional[\"AbstractCacheHierarchy\"]:\n \"\"\"Get the cache hierarchy connected to the board.\n\n :returns: The cache hierarchy.\n \"\"\"\n return self._cache_hierarchy\n\n def get_cache_line_size(self) -> int:\n \"\"\"Get the size of the cache line.\n\n :returns: The size of the cache line size.\n \"\"\"\n return self.cache_line_size\n\n def connect_system_port(self, port: Port) -> None:\n self.system_port = port\n\n def set_mem_mode(self, mem_mode: MemMode) -> None:\n \"\"\"\n Set the memory mode of the board.\n\n :param mem_mode: The memory mode the board is to be set to.\n \"\"\"\n self.mem_mode = mem_mode_to_string(mem_mode=mem_mode)\n\n def get_clock_domain(self) -> ClockDomain:\n \"\"\"Get the clock domain.\n :returns: The clock domain.\n \"\"\"\n return self.clk_domain\n\n def _set_fullsystem(self, is_fs: bool) -> None:\n \"\"\"\n Sets whether this board is to be run in FS or SE mode. This is set\n via the workload (the workload specified determines whether this will\n be run in FS mode or not). This is not intended to be set in a\n configuration script ergo, it's private.\n\n :param is_fs: Set whether the board is to be run in FS mode or SE mode.\n \"\"\"\n self._is_fs = is_fs\n\n def is_fullsystem(self) -> bool:\n \"\"\"\n Returns True if the board is to be run in FS mode. Otherwise the board\n is to be run in Se mode. An exception will be thrown if this has not\n been set.\n\n This function is used by the Simulator module to setup the simulation\n correctly.\n \"\"\"\n if self._is_fs == None:\n raise Exception(\n \"The workload for this board not yet to be set. \"\n \"Whether the board is to be executed in FS or SE \"\n \"mode is determined by which 'set workload' \"\n \"function is run.\"\n )\n return self._is_fs\n\n def set_workload(self, workload: AbstractWorkload) -> None:\n \"\"\"\n Set the workload for this board to run.\n\n This function will take the workload specified and run the correct\n workload function (e.g., `set_kernel_disk_workload`) with the correct\n parameters\n\n :params workload: The workload to be set to this board.\n \"\"\"\n\n try:\n func = getattr(self, workload.get_function_str())\n except AttributeError:\n raise Exception(\n \"This board does not support this workload type. \"\n f\"This board does not contain the necessary \"\n f\"`{workload.get_function_str()}` function\"\n )\n\n func_signature = inspect.signature(func)\n for param_name in workload.get_parameters().keys():\n if param_name not in func_signature.parameters.keys():\n raise Exception(\n \"Workload specifies non-existent parameter \"\n f\"`{param_name}` for function \"\n f\"`{workload.get_function_str()}` \"\n )\n\n func(**workload.get_parameters())\n\n @abstractmethod\n def _setup_board(self) -> None:\n \"\"\"\n This function is called in the AbstractBoard constructor, before the\n memory, processor, and cache hierarchy components are incorporated via\n `_connect_thing()`, but after the `_setup_memory_ranges()` function.\n This function should be overridden by boards to specify components,\n connections unique to that board.\n \"\"\"\n raise NotImplementedError\n\n # Technically `get_dma_ports` returns a list. This list could be empty to\n # indicate the presense of dma ports. Though I quite like having this\n # boolean to quickly check a board.\n @abstractmethod\n def has_dma_ports(self) -> bool:\n \"\"\"Determine whether the board has DMA ports or not.\n\n :returns: True if the board has DMA ports, otherwise False.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def get_dma_ports(self) -> List[Port]:\n \"\"\"Get the board's Direct Memory Access ports.\n This abstract method must be implemented within the subclasses if they\n support DMA and/or full system simulation.\n\n :returns: A List of the Direct Memory Access ports.\n\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def has_io_bus(self) -> bool:\n \"\"\"Determine whether the board has an IO bus or not.\n\n :returns: True if the board has an IO bus, otherwise False.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def get_io_bus(self) -> IOXBar:\n \"\"\"Get the board's IO Bus.\n This abstract method must be implemented within the subclasses if they\n support DMA and/or full system simulation.\n\n The I/O bus is a non-coherent bus (in the classic caches). On the CPU\n side, it accepts requests meant for I/O devices. On the memory side, it\n forwards these requests to the devices (e.g., the interrupt\n controllers on each core).\n\n :returns: The I/O Bus.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def has_coherent_io(self) -> bool:\n \"\"\"Determine whether the board needs coherent I/O\n\n :returns: True if the board needs coherent I/O, false otherwise\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def get_mem_side_coherent_io_port(self):\n \"\"\"Get the memory-side coherent I/O port.\n This abstract method must be implemented if has_coherent_io is true.\n\n This returns a *port* (not a bus) that should be connected to a\n CPU-side port for which coherent I/O (DMA) is issued.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def _setup_memory_ranges(self) -> None:\n \"\"\"\n Set the memory ranges for this board and memory system.\n\n This is called in the constructor, prior to `_setup_board` and\n `_connect_things`. It should query the board's memory to determine the\n size and the set the memory ranges on the memory system and on the\n board.\n\n The simplest implementation sets the board's memory range to the size\n of memory and memory system's range to be the same as the board. Full\n system implementations will likely need something more complicated.\n\n Notes\n -----\n * This *must* be called prior to the incorporation of the cache\n hierarchy (via `_connect_things`) as cache hierarchies depend upon\n knowing the memory system's ranges.\n \"\"\"\n raise NotImplementedError\n\n def _connect_things(self) -> None:\n \"\"\"Connects all the components to the board.\n\n The order of this board is always:\n\n 1. Connect the memory.\n 2. Connect the cache hierarchy.\n 3. Connect the processor.\n\n Developers may build upon this assumption when creating components.\n\n Notes\n -----\n\n * The processor is incorporated after the cache hierarchy due to a bug\n noted here: https://gem5.atlassian.net/browse/GEM5-1113. Until this\n bug is fixed, this ordering must be maintained.\n * Once this function is called `_connect_things_called` *must* be set\n to `True`.\n \"\"\"\n\n if self._connect_things_called:\n raise Exception(\n \"The `_connect_things` function has already been called.\"\n )\n\n # Incorporate the memory into the motherboard.\n self.get_memory().incorporate_memory(self)\n\n # Incorporate the cache hierarchy for the motherboard.\n if self.get_cache_hierarchy():\n self.get_cache_hierarchy().incorporate_cache(self)\n\n # Incorporate the processor into the motherboard.\n self.get_processor().incorporate_processor(self)\n\n self._connect_things_called = True\n\n def _post_instantiate(self):\n \"\"\"Called to set up anything needed after m5.instantiate\"\"\"\n self.get_processor()._post_instantiate()\n if self.get_cache_hierarchy():\n self.get_cache_hierarchy()._post_instantiate()\n self.get_memory()._post_instantiate()\n\n def _pre_instantiate(self):\n \"\"\"To be called immediately before m5.instantiate. This is where\n `_connect_things` is executed by default.\"\"\"\n\n # Connect the memory, processor, and cache hierarchy.\n self._connect_things()\n\n def _connect_things_check(self):\n \"\"\"\n Here we check that connect things has been called and throw an\n Exception if it has not.\n\n Since v22.1 `_connect_things` function has\n been moved from the AbstractBoard constructor to the\n `_pre_instantation` function. Users who have used the gem5 stdlib\n components (i.e., boards which inherit from AbstractBoard) and the\n Simulator module should notice no change. Those who do not use the\n Simulator module and instead called `m5.instantiate` directly must\n call `AbstractBoard._pre_instantation` prior so `_connect_things` is\n called. In order to avoid confusion, this check has been incorporated\n and the Exception thrown explains the fix needed to convert old scripts\n to function with v22.1.\n\n This function is called in `AbstractSystemBoard.createCCObject` and\n ArmBoard.createCCObject`. Both these functions override\n `SimObject.createCCObject`. We can not do that here as AbstractBoard\n does not inherit form System.\n \"\"\"\n if not self._connect_things_called:\n raise Exception(\n \"\"\"\nAbstractBoard's `_connect_things` function has not been called. This is likely\ndue to not running a board outside of the gem5 Standard Library Simulator\nmodule. If this is the case, this can be resolved by calling\n`._pre_instantiate()` prior to `m5.instantiate()`.\n\"\"\"\n )\n","repo_name":"gem5/gem5","sub_path":"src/python/gem5/components/boards/abstract_board.py","file_name":"abstract_board.py","file_ext":"py","file_size_in_byte":13888,"program_lang":"python","lang":"en","doc_type":"code","stars":1196,"dataset":"github-code","pt":"78"}
+{"seq_id":"39502826960","text":"from keyphrase_extractor import *\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport os\nimport shutil\n\n\noutput_path = 'dataset_eda_updated/'\n\nif os.path.exists(output_path):\n shutil.rmtree(output_path)\n\nos.mkdir(output_path)\n\nkeyphrase_extractor_chem_verbose = KeyphraseExtractor('oli-gen-chem', verbose=True)\nfor candidate_method in ['ngrams', 'parsing']:\n keyphrase_extractor_chem_verbose.get_candidate_keyphrases(candidate_method, verbose=True)\nprint('--------\\n\\n')\ndataset_chem_verbose = keyphrase_extractor_chem_verbose.data_df\ndataset_chem_verbose['Dataset'] = pd.Series(['oli-gen-chem' for x in range(len(dataset_chem_verbose.index))])\n\nkeyphrase_extractor_bio = KeyphraseExtractor('oli-intro-bio', verbose=True)\nfor candidate_method in ['ngrams', 'parsing']:\n keyphrase_extractor_bio.get_candidate_keyphrases(candidate_method, verbose=True)\nprint('--------\\n\\n')\ndataset_bio = keyphrase_extractor_bio.data_df\ndataset_bio['Dataset'] = pd.Series(['oli-intro-bio' for x in range(len(dataset_bio.index))])\n\nkeyphrase_extractor_inspec = KeyphraseExtractor('inspec', oli_labels='short', verbose=True)\nfor candidate_method in ['ngrams', 'parsing']:\n keyphrase_extractor_inspec.get_candidate_keyphrases(candidate_method, verbose=True)\nprint('--------\\n\\n')\ndataset_inspec = keyphrase_extractor_inspec.data_df\ndataset_inspec['Dataset'] = pd.Series(['inspec' for x in range(len(dataset_inspec.index))])\ndataset_inspec = dataset_inspec.drop(['id', 'document', 'other_metadata', 'doc_bio_tags', 'extractive_keyphrases', 'abstractive_keyphrases', 'processed_document_whole', 'processed_document_tokenized'], axis=1)\n\nkeyphrase_extractor_kdd = KeyphraseExtractor('kdd', oli_labels='short', verbose=True)\nfor candidate_method in ['ngrams', 'parsing']:\n keyphrase_extractor_kdd.get_candidate_keyphrases(candidate_method, verbose=True)\nprint('--------\\n\\n')\ndataset_kdd = keyphrase_extractor_kdd.data_df\ndataset_kdd['Dataset'] = pd.Series(['kdd' for x in range(len(dataset_kdd.index))])\ndataset_kdd = dataset_kdd.drop(['id', 'document', 'other_metadata', 'doc_bio_tags', 'extractive_keyphrases', 'abstractive_keyphrases', 'processed_document_whole', 'processed_document_tokenized'], axis=1)\n\n# print(dataset_chem_verbose.head())\n# print(dataset_bio.head())\n# print(dataset_inspec.head())\n# print(dataset_kdd.head())\n\n# gold kp per document across all datasets\nfull_dataset = pd.concat([dataset_chem_verbose, dataset_bio, dataset_inspec, dataset_kdd])\nfull_dataset['Gold Keyphrase Count'] = full_dataset['keyphrases'].str.len().fillna(0).astype(int)\n\nsns.boxplot(data=full_dataset, x='Dataset', y='Gold Keyphrase Count', showfliers=False)\nplt.title('Number of Gold Keyphrases Per Document Across AKE Datasets')\nfpath = 'dataset_eda_updated/target_keyphrases_per_document.pdf'\nplt.savefig(fpath)\nplt.close()\n\nkp_list_chem = pd.Series([item[0] for item in dataset_chem_verbose['keyphrases'].values]).unique()\nkp_list_bio = pd.Series([item[0] for item in dataset_bio['keyphrases'].values]).unique()\n\nkp_list_lengths_chem = [len(word_tokenize(kp)) for kp in kp_list_chem]\nkp_list_lengths_bio = [len(word_tokenize(kp)) for kp in kp_list_bio]\nkp_list_lengths_inspec = [len(word_tokenize(kp)) for kp_list in dataset_inspec.keyphrases for kp in kp_list]\nkp_list_lengths_kdd = [len(word_tokenize(kp)) for kp_list in dataset_kdd.keyphrases for kp in kp_list]\n\ndoc_lengths_chem = [len(word_tokenize(doc)) for doc in dataset_chem_verbose.whole_document]\ndoc_lengths_bio = [len(word_tokenize(doc)) for doc in dataset_bio.whole_document]\ndoc_lengths_inspec = [len(word_tokenize(doc)) for doc in dataset_inspec.whole_document]\ndoc_lengths_kdd = [len(word_tokenize(doc)) for doc in dataset_kdd.whole_document]\n\nkp_chem = pd.DataFrame()\nkp_chem['KP Lengths'] = kp_list_lengths_chem\nkp_chem['Dataset'] = pd.Series(['oli-gen-chem' for x in range(len(kp_chem.index))])\n\ndoc_chem = pd.DataFrame()\ndoc_chem['Document Lengths'] = doc_lengths_chem\ndoc_chem['Dataset'] = pd.Series(['oli-gen-chem' for x in range(len(doc_chem.index))])\n\nkp_bio = pd.DataFrame()\nkp_bio['KP Lengths'] = kp_list_lengths_bio\nkp_bio['Dataset'] = pd.Series(['oli-intro-bio' for x in range(len(kp_bio.index))])\n\ndoc_bio = pd.DataFrame()\ndoc_bio['Document Lengths'] = doc_lengths_bio\ndoc_bio['Dataset'] = pd.Series(['oli-intro-bio' for x in range(len(doc_bio.index))])\n\nkp_inspec = pd.DataFrame()\nkp_inspec['KP Lengths'] = kp_list_lengths_inspec\nkp_inspec['Dataset'] = pd.Series(['inspec' for x in range(len(kp_inspec.index))])\n\ndoc_inspec = pd.DataFrame()\ndoc_inspec['Document Lengths'] = doc_lengths_inspec\ndoc_inspec['Dataset'] = pd.Series(['inspec' for x in range(len(doc_inspec.index))])\n\nkp_kdd = pd.DataFrame()\nkp_kdd['KP Lengths'] = kp_list_lengths_kdd\nkp_kdd['Dataset'] = pd.Series(['kdd' for x in range(len(kp_kdd.index))])\n\ndoc_kdd = pd.DataFrame()\ndoc_kdd['Document Lengths'] = doc_lengths_kdd\ndoc_kdd['Dataset'] = pd.Series(['kdd' for x in range(len(doc_kdd.index))])\n\nfull_kp = pd.concat([kp_chem, kp_bio, kp_inspec, kp_kdd])\nfull_doc = pd.concat([doc_chem, doc_bio, doc_inspec, doc_kdd])\n\n# length (number of tokens) per gold kp\nsns.boxplot(data=full_kp, x='Dataset', y='KP Lengths', showfliers=False)\nplt.title('Length of Gold Keyphrases (# Tokens) Across AKE Datasets')\nfpath = 'dataset_eda_updated/target_keyphrase_length.pdf'\nplt.savefig(fpath)\nplt.close()\n\n# length (number of tokens) per document\nsns.boxplot(data=full_doc, x='Dataset', y='Document Lengths', showfliers=False)\nplt.title('Length of Documents (# Tokens) Across AKE Datasets')\nfpath = 'dataset_eda_updated/document_length.pdf'\nplt.savefig(fpath)\nplt.close()\n","repo_name":"IEClab-NCSU/SMART","sub_path":"SMART_CORE/skill_labeling/embedding_ake_study/dataset_eda.py","file_name":"dataset_eda.py","file_ext":"py","file_size_in_byte":5608,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"}
+{"seq_id":"1897264426","text":"def linha():\n print('-='*20)\n\n\ndef fatorial(n = 1): #Parametro opcional(caso nao seja passado valor pra n ele vale 1)\n f = 1\n for c in range(n, 0, -1):\n f = f * c\n return f\n\n\ndef par(n=0):\n if n % 2 == 0:\n return True\n else:\n return False\n\n\n#Programa Principal\nnum = int(input('Digite um número: '))\nprint(f'O fatorial do número {num} é {fatorial(num)}')\nlinha()\nf1 = fatorial(4)\nf2 = fatorial(5)\nf3 = fatorial()\nprint(f'Os resultados dos fatoriais de 4, 5 e nada são {f1}, {f2} e {f3}')\nlinha()\nrsp = int(input('Digite um número para analisarmos se é par ou não: '))\nprint(f'{par(rsp)}')\nif par(rsp):\n print('É par!')\nelse:\n print('Não é par!')\n","repo_name":"millenagena/Python-Scripts","sub_path":"###COMANDOS AULA21.py","file_name":"###COMANDOS AULA21.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"4976346094","text":"# Example of a simply supported beam with a point load.\n# Units used for the model in this example are inches and kips\n\n# Import `FEModel3D` from `PyNite`\nfrom PyNite import FEModel3D\n\n# Import 'Visualization' for rendering the model\nfrom PyNite import Visualization\n\n# Create a new finite element model\nSimpleBeam = FEModel3D()\n\n# Add nodes (14 ft apart)\nSimpleBeam.add_node('N1', 0, 0, 0)\nSimpleBeam.add_node('N2', 14*12, 0, 0)\n\n# Add a beam with the following properties:\n# E = 29000 ksi, G = 11400 ksi, Iy = 100 in^4, Iz = 150 in^4, J = 250 in^4, A = 20 in^2\nSimpleBeam.add_member('M1', 'N1', 'N2', 29000, 11400, 100, 150, 250, 20)\n\n# Provide simple supports\nSimpleBeam.def_support('N1', True, True, True, True, False, False) # Constrained for torsion at 'N1'\nSimpleBeam.def_support('N2', True, True, True, False, False, False) # Not constrained for torsion at 'N2'\n\n# Add a downward point load of 5 kips at the midspan of the beam\nSimpleBeam.add_member_pt_load('M1', 'Fy', -5, 7*12, 'D') # 5 kips Dead load\nSimpleBeam.add_member_pt_load('M1', 'Fy', -8, 7*12, 'L') # 8 kips Live load\n\n# Add load combinations\nSimpleBeam.add_load_combo('1.4D', {'D':1.4})\nSimpleBeam.add_load_combo('1.2D+1.6L', {'D':1.2, 'L':1.6})\n\n# Analyze the beam and perform a statics check\nSimpleBeam.analyze(check_statics=True)\n\nVisualization.render_model(SimpleBeam, annotation_size=10, deformed_shape=True, deformed_scale=30, render_loads=True, combo_name='1.2D+1.6L')\n\n# Print the shear, moment, and deflection diagrams\nSimpleBeam.Members['M1'].plot_shear('Fy', '1.2D+1.6L')\nSimpleBeam.Members['M1'].plot_moment('Mz', '1.2D+1.6L')\nSimpleBeam.Members['M1'].plot_deflection('dy', '1.2D+1.6L')\n\n# Print reactions at each end of the beam\nprint('Left Support Reaction:', SimpleBeam.Nodes['N1'].RxnFY['1.2D+1.6L'], 'kip')\nprint('Right Support Reacton:', SimpleBeam.Nodes['N2'].RxnFY['1.2D+1.6L'], 'kip')\n\n# Print the max/min shears and moments in the beam\nprint('Maximum Shear:', SimpleBeam.Members['M1'].max_shear('Fy', '1.2D+1.6L'), 'kip')\nprint('Minimum Shear:', SimpleBeam.Members['M1'].min_shear('Fy', '1.2D+1.6L'), 'kip')\nprint('Maximum Moment:', SimpleBeam.Members['M1'].max_moment('Mz', '1.2D+1.6L')/12, 'kip-ft')\nprint('Minimum Moment:', SimpleBeam.Members['M1'].min_moment('Mz', '1.2D+1.6L')/12, 'kip-ft')\n\n# Print the max/min deflections in the beam\nprint('Maximum Deflection:', SimpleBeam.Members['M1'].max_deflection('dy', '1.2D+1.6L'), 'in')\nprint('Minimum Deflection:', SimpleBeam.Members['M1'].min_deflection('dy', '1.2D+1.6L'), 'in')\n\n# The following lines can be uncommented to create a PDF report. Follow the instructions on the\n# wiki under \"Generating PDF Reports\" to prevent errors. The report will be output to the PyNite\n# folder unless the 'output_path' variable below is modified.\n\n# from PyNite import Reporting\n# Reporting.CreateReport(SimpleBeam, output_filepath='.//PyNite Report.pdf', plates=False, plate_corner_forces=False, \\\n# plate_center_forces=False, plate_corner_membrane=False, plate_center_membrane=False)","repo_name":"tamalone1/PyNite","sub_path":"Examples/Simple Beam - Point Load.py","file_name":"Simple Beam - Point Load.py","file_ext":"py","file_size_in_byte":3042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"}
+{"seq_id":"40368331707","text":"# 4_12 negative_power\na = float(input())\nn = int(input())\n\n\ndef power(a, n):\n if n == 0:\n return 1\n\n if a == 0:\n return 0\n\n res = 1\n i = n\n while i != 0:\n if n > 0:\n res = res * a\n i = i - 1\n else:\n res = 1 / a * res\n i = i + 1\n return res\n\nprint(power(a, n))\n","repo_name":"rshekhovtsov/py-intro","sub_path":"week4/4_12 negative_power.py","file_name":"4_12 negative_power.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"5218986721","text":"# -*- coding:utf-8 -*-\n\nimport tensorflow as tf\nimport numpy as np\nimport math\nfrom tqdm import tqdm\nfrom path_config import embedding_matrix_path, train_x_path, train_y_path, test_x_path\n\n# load embedding\ndef load_embedding_matrix():\n return np.load(embedding_matrix_path + '.npy')\n\ndef load_train_Dataset(max_enc_len, max_dec_len):\n train_X = np.load(train_x_path + '.npy')\n train_Y = np.load(train_y_path + '.npy')\n \n train_X = train_X[:, :max_enc_len] \n train_Y = train_Y[:, :max_dec_len]\n return train_X, train_Y\n\ndef load_test_dataset(max_enc_len):\n test_X = np.load(test_x_path)[:, :max_enc_len]\n return test_X\n\ndef train_batch_generator(batch_size, max_enc_len=200, max_dec_len=150):\n train_X, train_Y = load_train_Dataset(max_enc_len, max_dec_len)\n dataset = tf.data.Dataset.from_tensor_slices((train_X, train_Y)).shuffle(len(train_X))\n dataset = dataset.batch(batch_size, drop_remainder=True)\n steps_per_epoch = len(train_X) // batch_size\n\n return dataset, steps_per_epoch\n\ndef greedy_decode(model, test_X, params, vocab):\n batch_size = params['batch_size']\n # 返回的结果list\n results = []\n # 输入的样本数\n sample_size = len(test_X)\n # batch 操作轮数 math.ceil向上取整 小数 +1\n steps_epoch = math.ceil(sample_size / batch_size)\n for i in tqdm(range(steps_epoch)):\n batch_data = test_X[i * batch_size:(i+1) * batch_size]\n results += batch_greedy_decode(model, batch_data, vocab, params) \n return results \n\ndef batch_greedy_decode(model, batch_data, vocab, params):\n batch_size = len(batch_data)\n predicts = [''] * batch_size\n\n inps = tf.convert_to_tensor(batch_data)\n # 初始化隐藏层的输入\n hidden = [tf.zeros((batch_size, params['enc_units']))]\n # cerate encoder\n enc_output, enc_hidden = model.encoder(inps, hidden)\n dec_hidden = enc_hidden\n # *BATCH_SIZE\n dec_input = tf.expand_dims([vocab.word_to_id(vocab.STOP_DECONDING)] * batch_size,1)\n \n context_vector, _ = model.attention(dec_hidden, enc_output)\n\n for t in range(params['max_dec_len']):\n # 上下文计算\n context_vector, attention_weights = model.attention(dec_hidden, enc_output)\n predictions, dec_hidden = model.decoder(dec_input,\n dec_hidden,\n enc_output,\n context_vector)\n # id转换,贪婪搜索\n predicted_ids = tf.argmax(predictions, axis=1).numpy()\n\n for index_, predicter_id in enumerate(predicted_ids):\n predicts[index_] += vocab.id_to_word(predicter_id) + ' '\n \n # 使用predicted_ids dim + 1 , 本次更新的dec_hidden 作为下一个词的预测输入\n dec_input = tf.expand_dims(predicter_id, 1)\n\n results = []\n for predict in predicts:\n predict = predict.strip()\n if vocab.STOP_DECONDING in predict:\n # 截断结束标记前的内容\n predict = predict[:predict.index(vocab.STOP_DECODING)]\n results.append(predict)\n return results\n\n\nclass Vocab:\n PAD_TOKEN = ''\n UNKOWN_TOKEN = ''\n START_DECODING = ''\n STOP_DECONDING = ''\n\n def __init__(self, vocab_file, vocab_max_size=None):\n \"\"\"\n Vocab 对象,vocab基本操作封装\n :param vocab_file: Vocab 存储路径\n :param vocab_max_size: 最大字典数量\n \"\"\"\n self.word2id, self.id2word = self.load_vocab(vocab_file, vocab_max_size)\n self.count = len(self.word2id)\n\n @staticmethod\n def load_vocab(file_path, vocab_max_size=None):\n vocab = {} \n reverse_vocab = {}\n with open(file_path, 'r', encoding='utf-8') as f:\n for line in f.readlines():\n word, index = line.strip().split(\"\\t\")\n index = int(index)\n # 如果vocab 超出指定大小, 跳出循环并截断\n if vocab_max_size and index > vocab_max_size:\n break\n \n vocab[word] = index\n reverse_vocab[index] = word\n return vocab, reverse_vocab\n\n\n def word_to_id(self, word):\n if word not in self.word2id:\n return self.word2id[self.UNKOWN_TOKEN]\n return self.word2id[word]\n\n def id_to_word(self, word_id):\n if word_id not in self.id2word:\n raise ValueError('Id not found in vocab: %d' % word_id) \n return self.id2word[word_id]\n\n \n","repo_name":"fangwei136/seq_seq-","sub_path":"func/seq_helper.py","file_name":"seq_helper.py","file_ext":"py","file_size_in_byte":4575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"3975948541","text":"import pandas as pd\nimport numpy as np\nimport random\nimport sys\nimport os\nimport scipy as sc\nimport warnings\nfrom scipy.stats import sem\nfrom scipy import stats\n\nnp.random.seed(0)\nrandom.seed(0)\n\npd.set_option('display.max_columns', 100)\npd.set_option('display.max_rows', 100)\nwarnings.filterwarnings('ignore')\n\nmydir = '/Users/kenlocey/GitHub/HACRP-HAIs/'\n\ndef obs_pred_rsquare(obs, pred):\n obs = np.sqrt(obs)\n pred = np.sqrt(pred)\n # Determines the prop of variability in a data set accounted for by a model\n # In other words, this determines the proportion of variation explained by\n # the 1:1 line in an observed-predicted plot.\n return 1 - sum((obs - pred) ** 2) / sum((obs - np.mean(obs)) ** 2)\n\n\ndef optimize(volume, pred_cases, obs_cases, hai, z_ran, pi_ran):\n\n main_df = pd.read_pickle(mydir + \"data/Compiled_HCRIS-HACRP-HAI-RAND/Compiled_HCRIS-HACRP-HAI-RAND.pkl\")\n \n main_df['file date'] = main_df['file_year'] + '_' + main_df['file_month']\n fdates = sorted(list(set(main_df['file date'].tolist())))\n \n main_df = main_df[~main_df[obs_cases].isin([np.nan, 'Not Available'])]\n main_df = main_df[~main_df[pred_cases].isin([np.nan, 'Not Available'])]\n main_df = main_df[~main_df[volume].isin([np.nan, 'Not Available'])]\n\n main_df[obs_cases] = main_df[obs_cases].astype(float)\n main_df[pred_cases] = main_df[pred_cases].astype(float)\n main_df = main_df[main_df[pred_cases] >= 1]\n\n main_df[volume] = main_df[volume].astype('int64')\n main_df['O/E'] = main_df[obs_cases] / main_df[pred_cases]\n main_df['simulated O'] = [np.nan] * main_df.shape[0]\n main_df['simulated O/E'] = [np.nan] * main_df.shape[0]\n\n main_df['expected O'] = [np.nan] * main_df.shape[0]\n main_df['expected O/E'] = [np.nan] * main_df.shape[0]\n main_df['pi_opt'] = [np.nan] * main_df.shape[0]\n main_df['z_opt'] = [np.nan] * main_df.shape[0]\n\n\n for fdate in fdates:\n print(fdate)\n \n pi_opt = 0\n pi = 0\n\n z_opt = 0\n z = 0\n\n pi_opt_ls = []\n z_opt_ls = []\n avg_pval_ls = []\n se_pval_ls = []\n std_pval_ls = []\n avg_r2_ls = []\n ct_ls = []\n\n simulated_cases_opt = []\n expected_cases_opt = []\n\n pval_opt = 0\n std_pval_opt = 0\n se_pval_opt = 0\n r2_opt = 0\n ct = 0\n \n df = main_df[main_df['file date'] == fdate]\n print('rows:', df.shape[0])\n if df.shape[0] < 100:\n continue\n \n vol = np.array(df[volume].tolist())\n predicted_cases = np.array(df[pred_cases].tolist())\n observed_cases = np.array(df[obs_cases].tolist())\n\n observed_SIR = observed_cases/predicted_cases\n observed_SIR = observed_SIR.tolist()\n \n while ct < 5*10**3:\n \n ct += 1\n if ct < 2500:\n # choose pi and z based on uniform random sampling\n pi = np.random.uniform(min(pi_ran), max(pi_ran))\n z = np.random.uniform(min(z_ran), max(z_ran))\n\n else:\n max_avg_pval = max(avg_pval_ls)\n i = avg_pval_ls.index(max_avg_pval)\n \n pi = np.abs(np.random.normal(pi_opt_ls[i], 0.001))\n z = np.abs(np.random.normal(z_opt_ls[i], 10))\n \n pD = vol/(z + vol)\n p = pi * pD\n \n pval_ls1 = []\n r2_ls1 = []\n \n iter = 100\n for i in range(iter):\n \n simulated_cases = np.array(np.random.binomial(vol, p=p, size=len(vol)))\n r2 = obs_pred_rsquare(np.array(observed_cases), np.array(simulated_cases))\n stat, c_vals, p_val = stats.anderson_ksamp(np.array([simulated_cases, observed_cases]))\n pval_ls1.append(p_val)\n r2_ls1.append(r2)\n \n sim_pval = np.nanmean(pval_ls1)\n std_pval = np.std(pval_ls1)\n se_pval = sem(pval_ls1)\n \n expected_cases = p * vol\n exp_r2 = obs_pred_rsquare(np.array(observed_cases), np.array(expected_cases))\n stat, c_vals, exp_pval = stats.anderson_ksamp(np.array([expected_cases, observed_cases]))\n \n if ct == 1 or (sim_pval > pval_opt) or (sim_pval >= pval_opt and exp_r2 > r2_opt):\n \n pi_opt = float(pi)\n z_opt = float(z)\n pval_opt = float(sim_pval)\n std_pval_opt = float(std_pval)\n se_pval_opt = float(se_pval)\n r2_opt = float(exp_r2)\n \n vol = np.array(df[volume].tolist())\n pD = vol/(z_opt + vol)\n p = pi_opt * pD\n simulated_cases_opt = np.array(np.random.binomial(vol, p=p, size=len(vol)))\n expected_cases_opt = p * vol\n \n if ct == 1 or ct%500 == 0:\n print(ct)\n print('pi_opt:', pi_opt, ' | z_opt:', z_opt)\n print('avg. p-val: ', np.round(pval_opt, 5), ' | r2 (obs vs exp): ', np.round(r2_opt, 5), '\\n')\n \n pi_opt_ls.append(pi_opt)\n z_opt_ls.append(z_opt)\n avg_pval_ls.append(pval_opt)\n std_pval_ls.append(std_pval_opt)\n se_pval_ls.append(se_pval_opt)\n avg_r2_ls.append(r2_opt)\n ct_ls.append(ct)\n \n df['simulated O'] = simulated_cases_opt\n df['simulated O/E'] = df['simulated O'] / df[pred_cases]\n \n df['expected O'] = expected_cases_opt\n df['expected O/E'] = df['expected O'] / df[pred_cases]\n \n df['pi_opt'] = [pi_opt]*len(simulated_cases_opt)\n df['z_opt'] = [z_opt]*len(simulated_cases_opt)\n \n opt_df = pd.DataFrame(columns=['iteration'])\n opt_df['iteration'] = ct_ls\n opt_df['avg_pval'] = avg_pval_ls\n opt_df['std_pval'] = std_pval_ls\n opt_df['se_pval'] = se_pval_ls\n opt_df['r2 (obs vs exp)'] = avg_r2_ls\n opt_df['pi_opt'] = pi_opt_ls\n opt_df['z_opt'] = z_opt_ls\n \n df.to_pickle(mydir + \"data/optimized_by_HAI_file_date/\" + hai + \"/\" + hai + \"_Data_opt_for_SIRs_\" + fdate + \".pkl\")\n opt_df.to_csv(mydir + \"data/optimized_by_HAI_file_date/\" + hai + \"/\" + hai + \"_opt_iterations_\" + fdate + \".csv\")\n \n print('Finished:', fdate)\n\n","repo_name":"Rush-Quality-Analytics/HACRP-HAIs","sub_path":"5_Optimize_random_sampling_models/HAI_optimize.py","file_name":"HAI_optimize.py","file_ext":"py","file_size_in_byte":6659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"72488034813","text":"# pyrec.py 文件内容\nimport os\nimport time as t\nimport wave\n\nimport numpy as np\nimport pyaudio\nfrom aip import AipSpeech\nfrom ltp import LTP\nfrom scipy import fftpack\nfrom PyQt5.Qt import QThread\nfrom PyQt5.QtWidgets import QMessageBox\nimport cv2\n\n\nclass ThreadClass(QtCore.QThread):\n any_signal = QtCore.pyqtSignal(int)\n\n def __init__(self, parent=None, index=0):\n super(ThreadClass, self).__init__(parent)\n self.index = index\n self.is_running = True\n\n def run(self):\n print('Starting thread...', self.index)\n cnt = 0\n while (True):\n cnt += 1\n if cnt == 99: cnt = 0\n time.sleep(0.01)\n self.any_signal.emit(cnt)\n\n def stop(self):\n self.is_running = False\n print('stopping thread...', self.index)\n self.terminate()\n\n\ndef wav_to_pcm(wav_file, class_name):\n # 假设 wav_file = \"音频文件.wav\"\n # wav_file.split(\".\") 得到[\"音频文件\",\"wav\"] 拿出第一个结果\"音频文件\" 与 \".pcm\" 拼接 等到结果 \"音频文件.pcm\"\n pcm_file = \"%s.pcm\" % ((class_name + wav_file).split(\".\")[0])\n\n # 就是此前我们在cmd窗口中输入命令,这里面就是在让Python帮我们在cmd中执行命令\n os.system(\"ffmpeg -loglevel quiet -y -i %s -acodec pcm_s16le -f s16le -ac 1 -ar 16000 %s\" % (class_name + wav_file, pcm_file))\n\n return pcm_file\n\n\ndef play_mp3(file_name):\n class_name = 'sounds/'\n os.system('ffplay '+'-nodisp -autoexit ' + class_name + file_name)\n\n\ndef recognize(file, class_name, self):\n APP_ID = '25559029'\n API_KEY = 'PIIyG8nXV0DeVWgo8O0fNGuy'\n SECRET_KEY = 'FuiB46Ek3iHd0elRx49KTm9Xbh1QrHO1'\n client = AipSpeech(APP_ID, API_KEY, SECRET_KEY)\n pcm_file = wav_to_pcm(file,class_name)\n with open(pcm_file, 'rb') as fp:\n file_context = fp.read()\n res = client.asr(file_context, 'pcm', 16000, {'dev_pid': 1536, })\n try:\n res_str = res['result'][0]\n\n self.plainTextEdit.appendPlainText(str(res_str))\n\n return res_str\n except:\n return '的'\n\n\ndef nlp(res_str, self):\n ltp = LTP()\n segment, hidden = ltp.seg([res_str])\n pos = ltp.pos(hidden)\n\n self.plainTextEdit.appendPlainText(str(segment))\n self.plainTextEdit.appendPlainText(str(pos))\n\n dict={}\n voice=[]\n i =0\n for word in segment[0]:\n dict[word]=pos[0][i]\n i+=1\n voice.append(dict)\n return voice\n\ndef synth_sound(res_str, self,synth_name):\n APP_ID = '25559029'\n API_KEY = 'PIIyG8nXV0DeVWgo8O0fNGuy'\n SECRET_KEY = 'FuiB46Ek3iHd0elRx49KTm9Xbh1QrHO1'\n class_name='sounds/'\n client = AipSpeech(APP_ID, API_KEY, SECRET_KEY)\n synth_context = client.synthesis(res_str, 'zh', 1, {'vol': 5})\n if not isinstance(synth_context, dict):\n with open(class_name + synth_name, 'wb') as f:\n f.write(synth_context)\n else:\n\n self.plainTextEdit.appendPlainText(str(synth_context))\n\n\ndef recording(filename, class_name, self, time=0, threshold=2000):\n \"\"\"\n :param filename: 文件名\n :param time: 录音时间,如果指定时间,按时间来录音,默认为自动识别是否结束录音\n :param threshold: 判断录音结束的阈值\n :return:\n \"\"\"\n CHUNK = 1024 # 块大小\n FORMAT = pyaudio.paInt16 # 每次采集的位数\n CHANNELS = 1 # 声道数\n RATE = 16000 # 采样率:每秒采集数据的次数\n RECORD_SECONDS = time # 录音时间\n WAVE_OUTPUT_FILENAME = class_name + filename # 文件存放位置\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK)\n frames = []\n\n choice = QMessageBox.question(self, 'Change Text?', 'Would you like to change the button text?',\n QMessageBox.Yes | QMessageBox.No) # 1\n\n if choice == QMessageBox.Yes: # 2\n if time > 0:\n #for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):\n while(1):\n data = stream.read(CHUNK)\n frames.append(data)\n\n if choice == QMessageBox.No: # 4\n break\n elif choice == QMessageBox.No: # 4\n pass\n\n stream.stop_stream()\n stream.close()\n p.terminate()\n with wave.open(WAVE_OUTPUT_FILENAME, 'wb') as wf:\n wf.setnchannels(CHANNELS)\n wf.setsampwidth(p.get_sample_size(FORMAT))\n wf.setframerate(RATE)\n wf.writeframes(b''.join(frames))\n\ndef full(self):\n class_name = 'sounds/'\n record_name = 'record.wav'\n recording(record_name, class_name, self, time=5)\n res_str = recognize(record_name,class_name, self)\n return nlp(res_str, self)\n\ndef name_full(self):\n class_name = 'sounds/'\n record_name = 'record.wav'\n recording(record_name, class_name, self, time=5)\n res_str = recognize(record_name,class_name, self)\n return res_str\n\nif __name__ == '__main__':\n CHUNK = 1024\n FORMAT = pyaudio.paInt16\n CHANNELS = 2\n RATE = 16000\n RECORD_SECONDS = 5\n class_name = 'sounds/'\n record_name = 'record.wav'\n synth_name = 'synth.mp3'\n\n from MainLogic import ui\n\n recording(record_name, class_name, ui, time=10)\n # rec(record_name)\n res_str = recognize(record_name,class_name, ui)\n ui.plainTextEdit.appendPlainText(str(nlp(res_str, ui)))\n\n synth_sound(res_str, ui,synth_name)\n play_mp3(synth_name)\n","repo_name":"Falcom4000/SRTP-2","sub_path":"record.py","file_name":"record.py","file_ext":"py","file_size_in_byte":5384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"23519656584","text":"import sys\nimport pandas as pd\nimport python.Config as Config\nimport python.Timer as Timer\nimport numpy as np\nfrom time import sleep\nfrom math import sqrt\nimport scipy.sparse as sp\nfrom sklearn.externals.joblib import Parallel, delayed\nfrom sklearn.utils import safe_indexing\nfrom sklearn.utils.validation import (_is_arraylike, _num_samples, column_or_1d)\nfrom sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error, r2_score\n\n\ndef read_chunks(file):\n time = Timer.Timer()\n print('Reading chunks from', file, '...')\n\n iter_hdf = pd.read_hdf(Config.H5_PATH + '/' + file, chunksize=Config.CHUNK_SIZE)\n rows = iter_hdf.nrows\n chunk_amount = int(rows / Config.CHUNK_SIZE)\n chunks = []\n percentage = 0\n\n # Read by chunks and join them\n for i, chunk in enumerate(iter_hdf):\n percentage += np.asscalar(100 * Config.CHUNK_SIZE / rows)\n chunks.append(chunk)\n if i % int(chunk_amount / 5) == 0:\n sys.stdout.write('%d%% ' % percentage)\n sys.stdout.flush()\n\n print('All chunks read. Joining results...')\n chunks = pd.concat(chunks)\n time.print()\n del time\n return chunks\n\n\ndef chunk_reader(file):\n iter_hdf = pd.read_hdf(Config.H5_PATH + '/' + file, chunksize=Config.CHUNK_SIZE)\n return iter_hdf\n\n\ndef read_hdf(file):\n time = Timer.Timer()\n print('Reading', file, 'file...')\n df = pd.read_hdf(Config.H5_PATH + '/' + file)\n time.print()\n del time\n return df\n\n\ndef calc_scores(target_test, y_prediction):\n print_scores(r2_score(target_test, y_prediction), mean_squared_error(target_test, y_prediction),\n mean_absolute_error(target_test, y_prediction))\n\n\ndef print_scores(r2, mse, mae):\n print('R^2 Score:', r2)\n print('Mean Squared Error:', mse)\n print('Root Mean Squared Error:', sqrt(mse))\n print('Mean Absolute Error:', mae)\n\n\ndef cross_val_execute(alg, x, y, cv, fit_params=None, n_jobs=1):\n parallel = Parallel(n_jobs=n_jobs, verbose=0, pre_dispatch='2*n_jobs')\n results = parallel(delayed(fit_predict)(alg, x, y, train, test, fit_params)\n for train, test in list(cv.split(x, y)))\n\n scores = [key[0] for (key, val) in results]\n mse = [key[1] for (key, val) in results]\n mae = [key[2] for (key, val) in results]\n predictions = [val for (key, val) in results]\n\n return scores, mse, mae, np.concatenate(predictions)\n\n\ndef fit_predict(alg, x, y, train, test, fit_params):\n fit_params = fit_params if fit_params is not None else {}\n fit_params = dict([(k, _index_param_value(x, v, train))\n for k, v in fit_params.items()])\n x_train, x_test, y_train, y_test = x.iloc[train], x.iloc[test], y.iloc[train], y.iloc[test]\n\n alg.fit(x_train, y_train)\n y_predict = alg.predict(X=x_test)\n\n return [r2_score(y_test, y_predict), mean_squared_error(y_test, y_predict),\n mean_absolute_error(y_test, y_predict)], y_predict\n\n\ndef _index_param_value(X, v, indices):\n \"\"\"Private helper function for parameter value indexing.\"\"\"\n if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):\n # pass through: skip indexing\n return v\n if sp.issparse(v):\n v = v.tocsr()\n return safe_indexing(v, indices)","repo_name":"ngmatos/SonaePredict","sub_path":"python/Data.py","file_name":"Data.py","file_ext":"py","file_size_in_byte":3250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"15886020247","text":"\nclass Node:\n def __init__(self,item):\n self.data=item\n self.left=None\n self.right=None\n\ndef treeInput():\n data=int(input())\n if data==-1:\n return\n root=Node(data)\n leftTree=treeInput()\n rightTree=treeInput()\n root.left=leftTree\n root.right=rightTree\n return root\n\nroot=treeInput()\n","repo_name":"krxxnna/CP-and-DSA","sub_path":"DSA_python/08. Binary_tree/TreeInput.py","file_name":"TreeInput.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"74421347770","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\nAuthor: Xihao Liang\nCreated: 2016.03.08\n'''\n\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf8')\n\nimport cPickle\nimport matplotlib.pyplot as plt\n\n\nfrom utils import progbar\n\ndef analyse_result(ys, pred_probs, ofname = 'output/new_precision.png'):\n\tn_test = len(ys)\t\n\ty_dim = len(pred_probs[0])\n\thit = [0 for i in range(y_dim)]\n\n\tfor y, probs in zip(ys, pred_probs):\n\t\teid_prob = sorted(enumerate(probs), key = lambda k:-k[1])\n\n\t\tfor i, item in enumerate(eid_prob):\n\t\t\teid, progs = item\n\t\t\tif y == eid:\n\t\t\t\thit[i] += 1\n\n\tfor i in range(1, y_dim):\n\t\thit[i] += hit[i - 1]\n\t\n\tacc = [float(hi) / n_test for hi in hit]\n\n\tplt.figure()\n\tplt.axis([1, y_dim, 0., 1.])\n\tplt.xlabel('Top N')\n\tplt.ylabel('Precision')\n\tplt.plot(range(1, y_dim + 1), acc)\n\n\trand_x = range(1, y_dim + 1)\n\trand_y = [float(xi) / y_dim for xi in rand_x]\n\tplt.plot(rand_x, rand_y, '--r') \n\n\tplt.savefig(ofname)\n\ndef test(ifname = 'output/lstm_result.pkl', ofname = 'output/precision.png'):\n\timport cPickle\n\ttest_y, pred_probs = cPickle.load(open(ifname, 'r'))\n\t\n\tanalyse_result(test_y, pred_probs, ofname)\n\nif __name__ == '__main__':\n\ttest()\n","repo_name":"liangxh/weibo","sub_path":"lstmreporter.py","file_name":"lstmreporter.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"}
+{"seq_id":"4780223570","text":"from kafka import KafkaConsumer\nimport platform, socket, re, uuid, json, psutil, logging\n\n\ndef getSystemInfo():\n try:\n info={}\n info['platform']=platform.system()\n info['platform-release']=platform.release()\n info['platform-version']=platform.version()\n info['architecture']=platform.machine()\n info['hostname']=socket.gethostname()\n info['ip-address']=socket.gethostbyname(socket.gethostname())\n info['mac-address']=':'.join(re.findall('..', '%012x' % uuid.getnode()))\n info['processor']=platform.processor()\n info['ram']=str(round(psutil.virtual_memory().total / (1024.0 ** 3)))+\" GB\"\n return json.dumps(info)\n except Exception as e:\n logging.exception(e)\n\n\nprint(\"Starting Consumer 1;\")\nconsumer = KafkaConsumer('number')\nprint(json.loads(getSystemInfo()))\n\nprint(\"All numbers found so far: \")\nnumbers = []\nfor msg in consumer:\n numbers.append(msg.value)\nprint(numbers)\n\n","repo_name":"Stixxl/iot-report","sub_path":"src/kafka/consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"20346134472","text":"import pygame\nfrom settings import *\nimport random\n\n\nclass SpriteSheet:\n\n def __init__(self, filename):\n \"\"\"Load the sheet.\"\"\"\n try:\n self.sheet = pygame.image.load(filename).convert()\n except pygame.error as e:\n print(f\"Unable to load spritesheet image: {filename}\")\n raise SystemExit(e)\n\n def image_at(self, rectangle, colorkey = None):\n \"\"\"Load a specific image from a specific rectangle.\"\"\"\n \"\"\"rectangle is a tuple with (x, y, x+offset, y+offset)\"\"\"\n rect = pygame.Rect(rectangle)\n image = pygame.Surface(rect.size).convert()\n image.blit(self.sheet, (0, 0), rect)\n if colorkey is not None:\n if colorkey is -1:\n colorkey = image.get_at((0,0))\n image.set_colorkey(colorkey, pygame.RLEACCEL)\n return image\n\n def images_at(self, rects, colorkey = None):\n \"\"\"Load a whole bunch of images and return them as a list.\"\"\"\n return [self.image_at(rect, colorkey) for rect in rects]\n\n def load_grid_images(self, num_rows, num_cols, x_margin=0, x_padding=0,\n y_margin=0, y_padding=0, width=None, height=None, colorkey = None):\n \"\"\"Load a grid of images.\n x_margin is the space between the top of the sheet and top of the first\n row. x_padding is space between rows. Assumes symmetrical padding on\n left and right. Same reasoning for y. Calls self.images_at() to get a\n list of images.\n \"\"\"\n\n sheet_rect = self.sheet.get_rect()\n sheet_width, sheet_height = sheet_rect.size\n\n if width and height:\n x_sprite_size = width\n y_sprite_size = height\n else:\n x_sprite_size = (sheet_width - 2 * x_margin\n - (num_cols - 1) * x_padding) / num_cols\n y_sprite_size = (sheet_height - 2 * y_margin\n - (num_rows - 1) * y_padding) / num_rows\n\n sprite_rects = []\n for row_num in range(num_rows):\n for col_num in range(num_cols):\n # Position of sprite rect is margin + one sprite size\n # and one padding size for each row. Same for y.\n x = x_margin + col_num * (x_sprite_size + x_padding)\n y = y_margin + row_num * (y_sprite_size + y_padding)\n sprite_rect = (x, y, x_sprite_size, y_sprite_size)\n sprite_rects.append(sprite_rect)\n\n return self.images_at(sprite_rects, colorkey)\n\n\nclass Explosion(pygame.sprite.Sprite):\n def __init__(self, sheet, center):\n pygame.sprite.Sprite.__init__(self)\n self.sheet = sheet\n self.EXPLOSION_LIST = [self.sheet.image_at((0, 0, 31, 31), -1), self.sheet.image_at((32, 0, 31, 31), -1),\n self.sheet.image_at((65, 0, 31, 31), -1), self.sheet.image_at((96, 0, 31, 31), -1),\n self.sheet.image_at((128, 0, 31, 31), -1), self.sheet.image_at((160, 0, 31, 31), -1)]\n self.EXPLOSION_LIST = [pygame.transform.scale2x(explosion) for explosion in self.EXPLOSION_LIST]\n self.image = self.EXPLOSION_LIST[0]\n self.rect = self.image.get_rect()\n self.rect.center = center\n self.frame = 0\n self.frame_rate = 50\n self.kill_center = center\n self.previous_update = pygame.time.get_ticks()\n\n def update(self):\n current = pygame.time.get_ticks()\n if current - self.previous_update > self.frame_rate:\n self.previous_update = current\n self.frame += 1\n elif self.frame == len(self.EXPLOSION_LIST):\n self.kill()\n else:\n self.image = self.EXPLOSION_LIST[self.frame]\n self.rect = self.image.get_rect()\n self.rect.center = self.kill_center\n\n\nclass Player(pygame.sprite.Sprite):\n def __init__(self, x, y, sheet, running, display, sheet_2):\n pygame.sprite.Sprite.__init__(self)\n self.surface = sheet.image_at((0, 95, 47, 47), -1)\n self.surface = pygame.transform.scale(self.surface, (150, 150))\n self.image = self.surface\n self.rect = self.surface.get_rect()\n self.rect.x = x\n self.rect.y = y\n self.run = [sheet.image_at((10, 110, 32, 30), -1), sheet.image_at((57, 110, 32, 30), -1)]\n self.run = [pygame.transform.scale(player, (150, 150)) for player in self.run]\n self.fly = [sheet_2.image_at((4, 149, 38, 38), -1), sheet_2.image_at((52, 149, 38, 38), -1),\n sheet_2.image_at((100, 149, 38, 38), -1), sheet_2.image_at((52, 149, 38, 38), -1)]\n self.fly = [pygame.transform.scale(player, (150, 150)) for player in self.fly]\n self.frame = 0\n self.frame_rate = 50\n self.previous_update = pygame.time.get_ticks()\n self.image_delay = 100\n self.running = running\n self.dodging_up = False\n self.dodging_down = False\n self.display = display\n\n def update(self, level):\n #print(level)\n now = pygame.time.get_ticks()\n if now - self.previous_update >= self.image_delay:\n self.previous_update = now\n if level == 1:\n if self.frame >= len(self.run):\n self.frame = 0\n self.image = self.run[self.frame]\n else:\n if self.frame >= len(self.fly):\n self.frame = 0\n self.image = self.fly[self.frame]\n self.frame = self.frame + 1\n self.display.blit(self.image, (self.rect.x, self.rect.y))\n\n def get_keys(self, time, level):\n self.time = time\n keys = pygame.key.get_pressed()\n self.current_move = pygame.time.get_ticks()\n if self.current_move - self.time > move_delay:\n self.time = self.current_move\n if level == 1:\n if keys[pygame.K_s] and self.rect.y < 636:\n self.rect.y += 45\n if keys[pygame.K_w] and self.rect.y > 180:\n self.rect.y -= 45\n else:\n if keys[pygame.K_s] and self.rect.y < 636:\n self.rect.y += 45\n if keys[pygame.K_w] and self.rect.y > 180:\n self.rect.y -= 45\n if keys[pygame.K_d] and self.rect.x < 1700:\n self.rect.x += 25\n if keys[pygame.K_a] and self.rect.x > 0:\n self.rect.x -= 25\n\n\nclass Car(pygame.sprite.Sprite):\n def __init__(self, x, y, display, color, speed):\n pygame.sprite.Sprite.__init__(self)\n self.red_car = pygame.image.load(\"assets/red car.png\")\n self.teal_car = pygame.image.load(\"assets/teal car.png\")\n self.white_car = pygame.image.load(\"assets/truckFlat.png\")\n self.violet_car = pygame.image.load(\"assets/van.png\")\n self.yellow_car = pygame.image.load(\"assets/taxi.png\")\n self.cop_car = pygame.image.load(\"assets/police.png\")\n self.fast_car = pygame.image.load(\"assets/raceFuture.png\")\n self.health_car = pygame.image.load(\"assets/ambulance.png\")\n self.rect = self.red_car.get_rect()\n self.speed = speed\n if color == \"R\":\n self.image = self.red_car\n self.speed = 22\n if color == \"T\":\n self.image = self.teal_car\n self.speed = 18\n if color == \"W\":\n self.image = self.white_car\n self.speed = 15\n if color == \"V\":\n self.image = self.violet_car\n self.speed = 12\n if color == \"Y\":\n self.image = self.yellow_car\n self.speed = 15\n if color == \"C\":\n self.image = self.cop_car\n self.speed = 28\n if color == \"F\":\n self.image = self.fast_car\n self.speed = 35\n if color == \"H\":\n self.image = self.health_car\n self.speed = 8\n self.rect.x = x\n self.rect.y = y\n self.display = display\n\n def update(self):\n self.rect.x -= self.speed\n #self.image.fill(RED)\n self.display.blit(self.image, (self.rect.x, self.rect.y))\n\n\nclass Seed(pygame.sprite.Sprite):\n def __init__(self, x, y, display):\n pygame.sprite.Sprite.__init__(self)\n self.seed = pygame.image.load(\"assets/Seeds_Cereals.png\")\n self.seed = pygame.transform.scale(self.seed, (75, 75))\n self.rect = self.seed.get_rect()\n self.display = display\n self.rect.x = x\n self.rect.y = y\n\n def update(self):\n self.rect.x -= 10\n self.display.blit(self.seed, (self.rect.x, self.rect.y))\n\n\nclass Nut(pygame.sprite.Sprite):\n def __init__(self, x, y, display):\n pygame.sprite.Sprite.__init__(self)\n self.seed = pygame.image.load(\"assets/Seeds_Cereals.png\")\n self.seed = pygame.transform.scale(self.seed, (75, 75))\n self.rect = self.seed.get_rect()\n self.display = display\n self.rect.x = x\n self.rect.y = y - random.randint(500, 2000)\n\n def update(self):\n self.rect.y += 10\n self.display.blit(self.seed, (self.rect.x, self.rect.y))\n if self.rect.y > 1000:\n self.rect.y = random.randint(500, 2000)*-1\n\n\nclass Tree(pygame.sprite.Sprite):\n def __init__(self, x, y, display):\n pygame.sprite.Sprite.__init__(self)\n self.tree = pygame.image.load(\"assets/baum.png\")\n self.tree = pygame.transform.scale(self.tree, (475, 475))\n self.rect = self.tree.get_rect()\n self.display = display\n self.rect.x = x\n self.rect.y = y - 200\n\n def update(self):\n self.rect.x -= 7\n self.display.blit(self.tree, (self.rect.x, self.rect.y))\n\n\nclass Score:\n def __init__(self, font, display, score, x, y):\n self.font = font\n self.display = display\n self.score = score\n self.x = x\n self.y = y\n\n def draw_score(self):\n text = self.font.render(f\"Score = {self.score}\", True, WHITE)\n self.display.blit(text, (self.x, self.y))\n\n def get_score(self):\n return self.score\n\n\nclass Layout:\n def __init__(self, layout, sheet, display, x_multi, y_multi, sheet_2):\n pygame.sprite.Sprite.__init__(self)\n self.layout = layout\n self.display = display\n self.player_grp = pygame.sprite.GroupSingle()\n self.starting_car_grp = pygame.sprite.Group()\n self.car_grp = pygame.sprite.Group()\n self.all_sprites = pygame.sprite.Group()\n self.seed_grp = pygame.sprite.Group()\n self.tree_grp = pygame.sprite.Group()\n self.nut_grp = pygame.sprite.Group()\n self.SCORE = 0\n self.level = 1\n self.home = False\n self.letters = ['R', 'T', 'W', 'V', 'Y', 'C', \"F\", 'H']\n\n for i, row in enumerate(self.layout):\n for j, col in enumerate(row):\n x_val = j * x_multi\n y_val = i * y_multi\n\n if col == \"P\":\n player = Player(x_val, y_val, sheet, True, self.display, sheet_2)\n self.player_grp.add(player)\n if col == \"R\":\n car = Car(x_val, y_val, self.display, col, 22)\n self.car_grp.add(car)\n if col == \"T\":\n car = Car(x_val, y_val, self.display, col, 18)\n self.car_grp.add(car)\n if col == \"W\":\n car = Car(x_val, y_val, self.display, col, 15)\n self.car_grp.add(car)\n if col == \"V\":\n car = Car(x_val, y_val, self.display, col, 12)\n self.car_grp.add(car)\n if col == \"Y\":\n car = Car(x_val, y_val, self.display, col, 15)\n self.car_grp.add(car)\n if col == \"C\":\n car = Car(x_val, y_val, self.display, col, 28)\n self.car_grp.add(car)\n if col == \"F\":\n car = Car(x_val, y_val, self.display, col, 35)\n self.car_grp.add(car)\n if col == \"H\":\n car = Car(x_val, y_val, self.display, col, 8)\n self.car_grp.add(car)\n if col == \"N\":\n car = Car(x_val, y_val, self.display, self.letters[random.randint(0, 7)], 15)\n self.starting_car_grp.add(car)\n if col == \"G\":\n if random.randint(1, 3) == 1 or 3:\n car = Car(x_val, y_val, self.display, self.letters[random.randint(0, 7)], 15)\n self.car_grp.add(car)\n if col == \"1\":\n if random.randint(1, 25) == 1:\n seed = Seed(x_val, y_val, self.display)\n self.seed_grp.add(seed)\n if col == \"2\":\n tree = Tree(x_val, y_val, self.display)\n self.tree_grp.add(tree)\n if col == \"3\":\n nut = Nut(x_val, y_val, self.display)\n self.nut_grp.add(nut)\n\n def collied(self):\n touched = False\n player = self.player_grp.sprite\n\n collide_list = pygame.sprite.spritecollide(player, self.car_grp, False)\n eat_list = pygame.sprite.spritecollide(player, self.seed_grp, True)\n tree_list = pygame.sprite.spritecollide(player, self.tree_grp, False)\n for car in self.car_grp:\n if pygame.sprite.spritecollide(car, self.car_grp, False):\n car.speed = 15\n if collide_list:\n touched = True\n player.rect.y += 2000\n if eat_list:\n self.SCORE += 15\n if tree_list:\n self.home = True\n self.level = 2\n print(self.home)\n #print(self.level, \"hi\")\n return touched, player.rect.center, self.SCORE, self.home\n\n def update(self, display, time, level, text):\n for sprite in self.all_sprites.sprites():\n display.blit(sprite.surface, sprite.rect)\n for player in self.player_grp.sprites():\n player.update(level)\n player.get_keys(time, level)\n for car in self.car_grp.sprites():\n car.update()\n for car in self.starting_car_grp.sprites():\n car.update()\n for seed in self.seed_grp:\n seed.update()\n for tree in self.tree_grp:\n tree.update()\n if text == False:\n for nut in self.nut_grp:\n nut.update()\n\n\n\n\n\n\n","repo_name":"SeaMoose6/BirdRun","sub_path":"sprites.py","file_name":"sprites.py","file_ext":"py","file_size_in_byte":14504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"31320831442","text":"import os, re\nimport sys\n\nNB_DOSS = 40\n\nlst = ['{:02d}'.format(k) for k in range(1, NB_DOSS+1)]\n\ndef create_doss():\n for nom in lst:\n os.makedirs(nom + '_1', exist_ok = True)\n os.makedirs(nom + '_2', exist_ok = True)\n\ndef fichier(nom_doss):\n nom_file = nom_doss + '/' + 'enonce.md'\n with open(nom_file, 'w') as f:\n f.write(\"\")\n \n nom_file = nom_doss + '/' + 'correction.md'\n with open(nom_file, 'w') as f:\n f.write(\"\")\n\ndef fichiers_1_2():\n for nom_doss in lst:\n n1 = nom_doss + '_1'\n n2 = nom_doss + '_2'\n fichier(n1)\n fichier(n2)\n\n\n#create_doss()\n#fichiers_1_2()\n\n\n\n\n# contenu = \"\"\"\n# ### Exercice {0}.1 □\n# !!! example \"Exercice {0}.1\"\n# === \"Énoncé\" \n# --8<-- \"docs/T6_6_Epreuve_pratique/files/{0}_1/enonce.md\"\n# \n# === \"Correction\"\n# --8<-- \"docs/T6_6_Epreuve_pratique/files/{0}_1/correction.md\"\n# \n# === \"Source Markdown\"\n# --8<-- \"docs/T6_6_Epreuve_pratique/files/{0}_1/enonce.md\"\n# \n# \n# ### Exercice {0}.2 □\n# !!! example \"Exercice {0}.2\"\n# === \"Énoncé\" \n# --8<-- \"docs/T6_6_Epreuve_pratique/files/{0}_2/enonce.md\"\n# \n# === \"Correction\"\n# --8<-- \"docs/T6_6_Epreuve_pratique/files/{0}_2/correction.md\"\n# \n# === \"Sources Markdown\"\n# ```md\n# --8<-- \"docs/T6_6_Epreuve_pratique/files/{0}_2/enonce.md\"\n# ``` \n# \"\"\"\n\ncontenu = \"\"\"\n--8<-- \"./docs/exercices/{0}/{1}/sujet_formate.md\"\n\n\"\"\"\nide = \"\"\"\n```python\n --8<-- \"./docs/exercices/{0}/{1}/exo.py\"\n```\n\"\"\"\nexclude = \"\"\"\nsearch:\n - exclude: True\"\"\"\nfor root, dirs, lst_files in os.walk('.') :\n for file in lst_files :\n if file == 'sujet.md' :\n print('traitenent de {0}/{1}'.format(root, file))\n with open(os.path.join(root,file), 'r') as f:\n data = ''.join(f.readlines())\n \n titre = re.search('title: (?P.*)', data).group('title')\n regex = re.compile(r'exclude: True', re.MULTILINE)\n if regex.search(data) == None :\n regex = re.compile(r'title:.*', re.MULTILINE)\n titre = regex.search(data).group()\n data2 = regex.sub(titre + exclude, data)\n with open(os.path.join(root,file), 'w') as f:\n f.write(data2)\n titre = re.search('title: (?P.*)', data).group('title')\n regex = re.compile(r'---(\\n|.)*---$', re.MULTILINE)\n data = regex.sub('', data)\n regex = re.compile(r\"{{ py_sujet.*}}\", re.MULTILINE)\n data = regex.sub('', data)\n regex = re.compile(r\"{{ IDE.*}}\", re.MULTILINE)\n rep = root.split('/')\n data = regex.sub(ide.format(rep[-2], rep[-1]), data)\n regex = re.compile(r']\\(images/', re.MULTILINE)\n data = regex.sub(']({0}/images/'.format(rep[-1]), data) \n with open(os.path.join(root, 'sujet_formate.md'), 'w') as f :\n f.write(\"\\n\\n### {0} \\n\\n\".format(titre))\n f.write(data)\n \n \n \nfor rep in os.listdir() : \n if os.path.isdir(rep) :\n with open('./{0}/exos.md'.format(rep), 'w') as f :\n for ss_rep in os.listdir('./'+rep) :\n f.write(contenu.format(rep, ss_rep))\n \n\n\n\n# nom_dossier = 'listes_logins'\n# nom_sources = \"sources\"\n# os.makedirs(nom_dossier, exist_ok = True)\n# os.makedirs(nom_sources, exist_ok = True)\n# for file in os.scandir(nom_dossier):\n# os.remove(file.path)\n# \n# \n# \n# \n# for classe in classes:\n# print(classe)\n# nomhtml = nom_sources + '/' + classe + '.html'\n# with open(nomhtml, 'w') as f:\n# f.write(html_start)\n# f.write('
' + classe + ' - logins Scribe
')\n# f.write(tabdf[classe].to_html(index=False))\n# f.write(html_end)\n# nom_fichier = nom_dossier + '/' + classe + '.pdf'\n# pdf.from_file(nomhtml, nom_fichier)\n# os.remove(nomhtml)\n# os.rmdir(nom_sources)\n# print(\"Terminé\")\n","repo_name":"RVDROU/Ressources-NSI","sub_path":"docs/exercices/gen2.py","file_name":"gen2.py","file_ext":"py","file_size_in_byte":4049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"19768596909","text":"#!/usr/bin/env python\nimport unittest\n\nfrom palo_alto_firewall_analyzer.core import get_policy_validators\nfrom palo_alto_firewall_analyzer.core import ProfilePackage, ConfigurationSettings\nfrom palo_alto_firewall_analyzer.pan_config import PanConfig\n\n\nclass TestShadowingObjects(unittest.TestCase):\n @staticmethod\n def create_profilepackage(shared_services, dg_services, shared_service_groups, dg_service_groups):\n device_groups = [\"test_dg\"]\n device_group_hierarchy_parent = {\"test_dg\": \"shared\"}\n devicegroup_objects = {\"shared\": {}, \"test_dg\": {}}\n devicegroup_objects[\"shared\"]['Services'] = shared_services\n devicegroup_objects[\"test_dg\"]['Services'] = dg_services\n devicegroup_objects[\"shared\"]['ServiceGroups'] = shared_service_groups\n devicegroup_objects[\"test_dg\"]['ServiceGroups'] = dg_service_groups\n\n profilepackage = ProfilePackage(\n api_key='',\n pan_config=PanConfig('<_/>'),\n settings=ConfigurationSettings().get_config(),\n device_group_hierarchy_children={},\n device_group_hierarchy_parent=device_group_hierarchy_parent,\n device_groups_and_firewalls={},\n device_groups=device_groups,\n devicegroup_objects=devicegroup_objects,\n devicegroup_exclusive_objects={},\n rule_limit_enabled=False\n )\n return profilepackage\n\n def test_shadowing_services(self):\n test_xml = \"\"\"\\\n \n \n \n 1\n 2\n \n \n \n \n 2\n \n \n \n \"\"\"\n pan_config = PanConfig(test_xml)\n shared_services = pan_config.get_devicegroup_object('Services', 'shared')\n dg_services = pan_config.get_devicegroup_object('Services', 'test_dg')\n profilepackage = self.create_profilepackage(shared_services, dg_services, [], [])\n\n _, _, validator_function = get_policy_validators()['ShadowingServices']\n results = validator_function(profilepackage)\n\n self.assertEqual(len(results), 1)\n self.assertEqual(len(results[0].data), 2)\n self.assertEqual(results[0].data[0][0], 'shared')\n self.assertEqual(results[0].data[0][1].get('name'), 'tcp-dup')\n self.assertEqual(results[0].data[1][0], 'test_dg')\n self.assertEqual(results[0].data[1][1].get('name'), 'tcp-dup')\n\n def test_shadowing_servicegroups(self):\n test_xml = \"\"\"\\\n \n \n \n mem1mem2\n mem1mem2\n \n \n \n \n mem1mem2\n mem1mem2\n \n \n \n \"\"\"\n pan_config = PanConfig(test_xml)\n shared_service_groups = pan_config.get_devicegroup_object('ServiceGroups', 'shared')\n dg_service_groups = pan_config.get_devicegroup_object('ServiceGroups', 'test_dg')\n\n profilepackage = self.create_profilepackage([], [], shared_service_groups, dg_service_groups)\n\n _, _, validator_function = get_policy_validators()['ShadowingServiceGroups']\n results = validator_function(profilepackage)\n self.assertEqual(len(results), 1)\n self.assertEqual(len(results[0].data), 2)\n self.assertEqual(results[0].data[0][0], 'shared')\n self.assertEqual(results[0].data[0][1].get('name'), 'dupgroup1')\n self.assertEqual(results[0].data[1][0], 'test_dg')\n self.assertEqual(results[0].data[1][1].get('name'), 'dupgroup1')\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"moshekaplan/palo_alto_firewall_analyzer","sub_path":"tests/test_ShadowingObjects.py","file_name":"test_ShadowingObjects.py","file_ext":"py","file_size_in_byte":4703,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"78"}
+{"seq_id":"11597791851","text":"# -*- coding:utf-8 -*-\n\"\"\"\n:Description: Spark SQL 多个UDF作用于一列\n:Owner: jiajing_qu\n:Create time: 2020/08/17 10:17\n\"\"\"\nfrom pyspark.sql import SparkSession\n\nfrom pyspark.sql.types import IntegerType\nfrom pyspark.sql.functions import udf, struct\nfrom pyspark.sql.functions import lit\nfrom pyspark.sql.functions import *\n\n\nclass udfs:\n def sum2(self, x):\n return x + 4\n\n def multi(self, x):\n return x * 2\n\n def div(self, x):\n return x / 3\n\n\nfun_list = [\"sum2\", \"multi\", \"div\"]\nudfs = udfs()\n\n\ndef my_udf(func_list):\n def all_udf(v):\n r = None\n for f in func_list:\n if r is None:\n r = getattr(udfs, f)(v)\n else:\n r = getattr(udfs, f)(r)\n return r\n return udf(all_udf, IntegerType())\n\n\ndef main():\n spark = SparkSession.builder.enableHiveSupport()\\\n .config(\"hive.exec.dynamic.partition\", True)\\\n .config(\"hive.exec.dynamic.partition\", True)\\\n .config(\"hive.exec.dynamic.partition.mode\", \"nonstrict\")\\\n .appName(\"Test udf\").getOrCreate()\n\n df = spark.createDataFrame([(101, 1, 16)], ['ID', 'A', 'B'])\n df.show()\n\n df.withColumn('Result2', my_udf(fun_list)(\"A\")).show()\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"Shmilyqjj/Shmily-py","sub_path":"BigData/learn_and_tests/Spark/udf/multi_udf_one_col.py","file_name":"multi_udf_one_col.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"}
+{"seq_id":"40795425696","text":"from django.shortcuts import render, redirect\r\nfrom .models import *\r\n\r\ndef addexpense(request):\r\n if request.method==\"POST\":\r\n obj=expenseform(request.POST)\r\n obj.save()\r\n return redirect(\"/\")\r\n else:\r\n d={\"form\":expenseform}\r\n return render(request,\"form.html\",d)\r\n\r\ndef details(request):\r\n obj=Expense.objects.all()\r\n d={'data1':obj}\r\n return render (request,\"details1.html\",d)\r\n\r\n\r\n\r\ndef delete(request,incid):\r\n obj=Expense.objects.get(id=incid)\r\n obj.delete()\r\n return redirect(\"/Inc-details\")\r\n\r\ndef edit(request,incid):\r\n data=Expense.objects.get(id=incid)\r\n if request.method==\"POST\":\r\n obj=expenseform(request.POST,instance=data)\r\n obj.save()\r\n return redirect(\"/\")\r\n else:\r\n d={\"form\":expenseform(instance=data) }\r\n return render(request,\"form.html\",d) \r\n","repo_name":"awk26/income","sub_path":"Expense/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"15413871586","text":"import pandas as pd\nimport numpy as np\nimport os \nimport pickle \n#=================================================================================================\n# Physiological data read according to the client\n#=================================================================================================\n\n##===================================================\n# EEG data read from files\n##===================================================\n# def eeg_data(p):\n# file_eeg = '/home/gp/Desktop/MER_arin/DEAP_data/eeg_data/'+str(p)+'_data_DEAP'+'.csv'\n# print(file_eeg)\n# eeg_sig = pd.read_csv(file_eeg,sep=',', header = None, engine='python')\n# return eeg_sig\n\n##===================================================\n# \n##===================================================\ndef get_emg_eog_gsr_labels_data(p):\n f='data_preprocessed_python'\n physio_data_all = []\n label_data_all = []\n# file = os.path.join(r, i) //check later\n if p<=8:\n file = '/home/csis/Documents/data_preprocessed_python/s0'+str(p+1)+'.dat'\n else:\n file = '/home/csis/Documents/data_preprocessed_python/s'+str(p+1)+'.dat'\n \n with open(file, 'rb') as s_data: \n content = pickle.load(s_data, encoding='latin1')\n physio_data_all.append(content['data'])\n label_data_all.append(content['labels'])\n# for (r, d, f) in os.walk(f):\n# for i in f:\n# print(i)\n\n p_all = np.array(physio_data_all)\n l_all = np.array(label_data_all)\n EMG_all = p_all[:,:,34:36,:]\n EOG_all = p_all[:,:,32:34,:]\n GSR_all = p_all[:,:,36,:]\n \n print(EMG_all.shape,GSR_all.shape,len(label_data_all))\n \n return EMG_all,EOG_all,GSR_all,label_data_all\n\n##===================================================\n# \n##===================================================\ndef get_eog_v(i,EOG_all):\n s=EOG_all[0]\n print('shape of s',s.shape)\n t = s[i].T\n t = t[128*3:]\n t = t.reshape((-1, 128, 2))\n t_EOG = np.array(t)\n \n return t_EOG\n\ndef get_emg_v(i,EMG_all):\n s=EMG_all[0]\n \n t = s[i].T\n t = t[128*3:]\n t = t.reshape((-1, 128, 2))\n t_EMG = np.array(t)\n \n return t_EMG\n\ndef get_gsr_v(i,GSR_all):\n s=GSR_all[0]\n \n t = s[i].T\n t = t[128*3:]\n t = t.reshape((-1, 128, 1))\n t_GSR = np.array(t)\n \n return t_GSR\n\n\n##===================================================\n# \n##===================================================\ndef get_data_video(i,EMG_all,EOG_all,GSR_all,label_data_all):\n t_EOG_all = []\n t_EOG_all = get_eog_v(i,EOG_all)\n t_EMG_all = []\n t_EMG_all = get_emg_v(i,EMG_all)\n t_GSR_all = []\n t_GSR_all = get_gsr_v(i,GSR_all)\n \n y_all = []\n l=label_data_all[0]\n y = np.ones((60,1))*l[i]\n \n temp = []\n for j in y:\n y_val=[]\n for i in j:\n if i>5:\n y_val.append(1)\n else:\n y_val.append(0)\n temp.append(y_val)\n \n y_all = np.array(temp)\n y_all_concat = y_all.reshape(-1, 4)\n from keras.utils import np_utils\n\n y = np_utils.to_categorical(y_all_concat)\n\n\n from sklearn.model_selection import cross_val_score\n from sklearn.model_selection import KFold\n from sklearn.preprocessing import StandardScaler\n from sklearn.preprocessing import LabelEncoder\n\n scaler1 = StandardScaler()\n val_EMG = t_EMG_all.reshape(-1, 2)\n scaler1 = scaler1.fit(val_EMG)\n EMG = scaler1.transform(val_EMG)\n t_EMG = EMG.reshape(-1, 128, 2)\n\n scaler2 = StandardScaler()\n val_EOG = t_EOG_all.reshape(-1, 2)\n scaler2 = scaler2.fit(val_EOG)\n EOG = scaler2.transform(val_EOG)\n t_EOG = EOG.reshape(-1, 128, 2)\n\n scaler3 = StandardScaler()\n val_GSR = t_GSR_all.reshape(-1, 1)\n scaler3 = scaler3.fit(val_GSR)\n GSR = scaler3.transform(val_GSR)\n t_GSR = GSR.reshape(-1, 128, 1)\n \n return t_EMG,t_EOG,t_GSR,y\n","repo_name":"muskaankumar/Fed-ReMECS-mqtt","sub_path":"data_reading_utils.py","file_name":"data_reading_utils.py","file_ext":"py","file_size_in_byte":3859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"}
+{"seq_id":"38707579444","text":"\"\"\"\nTests the various Ball classes\nthat are defined in the imported m2_changers module.\n\nAuthors: David Mutchler, Mark Hays, Michael Wollowski, Amanda Stouder,\n Chandan Rupakheti, Katie Dion, Claude Anderson, Delvin Defoe,\n Curt Clifton, Matt Boutell, Dave Fisher, their colleagues,\n and PUT YOUR NAME HERE. October 2014.\n\"\"\" # TODO: 1. PUT YOUR NAME IN THE ABOVE LINE\n\nimport simulator as sim\nimport m2_changers as changers\nimport rosegraphics as rg\n\n\n# ----------------------------------------------------------------------\n# TODO: Modify this module as needed to test your Changer classes\n# as you implement them. We have supplied some tests for you.\n# ----------------------------------------------------------------------\ndef main():\n \"\"\"\n Calls the TEST functions in this module to get\n lists of Changers that are good for testing the Changer classes.\n Constructs a Simulator, sending the Changers to the Simulator.\n As such, this provides a VISUAL test of the Changer classes.\n \"\"\"\n # If you add your own classes, add to the following list.\n testers = [test_Dud, test_Mover, test_Randomizer, test_Jiggler,\n test_Follower, test_Grower, test_MoverGrower,\n test_Exploder]\n\n changers_to_test = []\n for tester in testers:\n changers = tester()\n if changers:\n changers_to_test = changers_to_test + changers\n\n sim.Simulator(changers_to_test)\n\n\ndef test_Dud():\n \"\"\" Returns a list of Dud instances good for testing. \"\"\"\n dud1 = changers.Dud(rg.Point(100, 100))\n dud2 = changers.Dud(rg.Point(150, 80))\n dud3 = changers.Dud(rg.Point(200, 240))\n\n return [dud1, dud2, dud3]\n\n\ndef test_Mover():\n \"\"\" Returns a list of Mover instances good for testing. \"\"\"\n # DONE: Implement and test this method.\n circle = rg.Circle(rg.Point(100, 100), 50)\n circle.fill_color = 'green'\n rectangle = rg.Rectangle(rg.Point(125, 125), 75, 100)\n rectangle.fill_color = 'yellow'\n mover1 = changers.Mover(circle, 20, 20)\n mover2 = changers.Mover(rectangle, 30, 50)\n\n return [mover1, mover2]\n\n\ndef test_Randomizer():\n \"\"\" Returns a list of Randomizer instances good for testing. \"\"\"\n # DONE: Implement and test this method.\n circle = rg.Circle(rg.Point(100, 100), 50)\n circle.fill_color = 'blue'\n rectangle = rg.Rectangle(rg.Point(125, 125), 75, 100)\n rectangle.fill_color = 'orange'\n random1 = changers.Randomizer(circle,)\n random2 = changers.Randomizer(rectangle)\n\n return [random1, random2]\n\n\ndef test_Jiggler():\n \"\"\" Returns a list of Jiggler instances good for testing. \"\"\"\n # DONE: Implement and test this method.\n circle = rg.Circle(rg.Point(100, 320), 25)\n circle.fill_color = 'purple'\n rectangle = rg.Rectangle(rg.Point(500, 600), 120, 200)\n rectangle.fill_color = 'red'\n jiggler1 = changers.Jiggler(circle)\n jiggler2 = changers.Jiggler(rectangle)\n\n return [jiggler1, jiggler2]\n\n\ndef test_Follower():\n \"\"\" Returns a list of Follower instances good for testing. \"\"\"\n # DONE: Implement and test this method.\n circle = rg.Circle(rg.Point(300, 320), 25)\n circle.fill_color = 'pink'\n rectangle = rg.Rectangle(rg.Point(500, 400), 100, 80)\n rectangle.fill_color = 'brown'\n follower1 = changers.Jiggler(circle)\n follower2 = changers.Mover(rectangle, 40, 40)\n test1 = changers.Follower(circle, follower1, 2)\n test2 = changers.Follower(rectangle, follower2, 4)\n\n return [test1, test2]\n\ndef test_Grower():\n \"\"\" Returns a list of Grower instances good for testing. \"\"\"\n # DONE: Implement and test this method.\n circle1 = rg.Circle(rg.Point(600, 320), 25)\n circle1.fill_color = 'black'\n circle2 = rg.Circle(rg.Point(200, 320), 25)\n circle2.fill_color = 'green'\n grower1 = changers.Grower(circle1, 60)\n grower2 = changers.Grower(circle2, 90)\n\n return [grower1, grower2]\n\ndef test_MoverGrower():\n \"\"\" Returns a list of MoverGrower instances good for testing. \"\"\"\n # DONE: Implement and test this method.\n circle1 = rg.Circle(rg.Point(100, 120), 25)\n circle1.fill_color = 'black'\n circle2 = rg.Circle(rg.Point(30, 20), 25)\n circle2.fill_color = 'green'\n moverg1 = changers.MoverGrower(circle1, 80, 20, 20)\n moverg2 = changers.MoverGrower(circle2, 40, 10, 30)\n\n return [moverg1, moverg2]\n\n\ndef test_Exploder():\n \"\"\" Returns a list of Exploder instances good for testing. \"\"\"\n # DONE: Implement and test this method.\n circle1 = rg.Circle(rg.Point(300, 220), 25)\n circle1.fill_color = 'black'\n circle2 = rg.Circle(rg.Point(330, 220), 25)\n circle2.fill_color = 'green'\n explode1 = changers.MoverGrower(circle1, 20, 20, 80)\n explode2 = changers.MoverGrower(circle2, 10, 30, 40)\n\n return [explode1, explode2]\n\n\n\n\n\n# ----------------------------------------------------------------------\n# If this module is running at the top level (as opposed to being\n# imported by another module), then call the 'main' function.\n# ----------------------------------------------------------------------\nif __name__ == '__main__':\n main()\n","repo_name":"Goldabj/IntroToProgramming","sub_path":"Session19_Inheritance/src/m2_test_changers.py","file_name":"m2_test_changers.py","file_ext":"py","file_size_in_byte":5159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"69822282174","text":"from book.models import (\r\n Book, Category, Author, Publisher, Contract, Market,\r\n DiscountShop, BBook, PBook, EBook, ABook, Types\r\n)\r\n\r\nfrom random import randint, choice, choices\r\nfrom string import ascii_lowercase\r\nfrom django.utils import timezone\r\n\r\nCATEGORIES_COUNT = 1500\r\nAUTHORS_COUNT = 1500\r\nPUBLISHER_COUNT = 100\r\nCONTRACT_COUNT = 100\r\nBOOK_COUNT = 1500\r\nDISCOUNT_SHOP = 100\r\n\r\n\r\ndef get_random_obj(model):\r\n random_idx = randint(0, model.objects.count() - 1)\r\n return model.objects.all()[random_idx]\r\n\r\n\r\ndef get_random_queryset(model):\r\n authors = model.objects.all().values_list('id', flat=True)\r\n return model.objects.filter(pk__in=choices(authors, k=3))\r\n\r\n\r\ndef get_book_type():\r\n return choice([PBook, EBook, ABook, BBook])\r\n\r\n\r\ndef generate_data_by_type(book_type):\r\n if book_type == PBook:\r\n return {}\r\n elif book_type == EBook:\r\n generate = f'https://{\"\".join(choice(ascii_lowercase) for _ in range(randint(2, 30)))}/'\r\n\r\n if EBook.objects.filter(source=generate).exists():\r\n return ''.join(choice(ascii_lowercase) for _ in range(randint(2, 30)))\r\n\r\n return {\r\n \"source\": generate\r\n }\r\n elif book_type == ABook:\r\n return {\r\n \"file\": None\r\n }\r\n elif book_type == BBook:\r\n return {\r\n \"symbol_type\": choice([Types.ASSOCIATIVE, Types.DIDACTIC, Types.COMBINED])\r\n }\r\n\r\n\r\ndef create_category(count):\r\n for item in range(count):\r\n params = {\r\n \"title\": ''.join(choice(ascii_lowercase) for _ in range(randint(2, 50)))\r\n }\r\n Category.objects.create(**params)\r\n\r\n\r\ndef create_author(count):\r\n for item in range(count):\r\n params = {\r\n \"first_name\": ''.join(choice(ascii_lowercase) for _ in range(randint(2, 30))),\r\n \"second_name\": ''.join(choice(ascii_lowercase) for _ in range(randint(2, 30))),\r\n 'percent': randint(2, 30)\r\n }\r\n Author.objects.create(**params)\r\n\r\n\r\ndef create_publisher(count):\r\n for item in range(count):\r\n params = {\r\n \"title\": ''.join(choice(ascii_lowercase) for _ in range(randint(2, 40))),\r\n }\r\n Publisher.objects.create(**params)\r\n\r\n\r\ndef create_contract(count):\r\n\r\n for item in range(count):\r\n params = {\r\n \"title\": ''.join(choice(ascii_lowercase) for _ in range(randint(2, 100))),\r\n \"author\": get_random_obj(Author),\r\n 'publisher': get_random_obj(Publisher)\r\n }\r\n Contract.objects.create(**params)\r\n\r\n\r\ndef create_discount_shop(count):\r\n for item in range(count):\r\n params = {\r\n \"author_discount\": randint(1, 20),\r\n \"shop_discount\": randint(1, 20),\r\n }\r\n DiscountShop.objects.create(**params)\r\n\r\n\r\ndef create_book(count):\r\n\r\n now = timezone.now()\r\n\r\n for item in range(count):\r\n book_type = get_book_type()\r\n params = {\r\n \"title\": ''.join(choice(ascii_lowercase) for _ in range(randint(10, 50))),\r\n \"price\": randint(1, 2000),\r\n \"issued\": now,\r\n \"publisher\": get_random_obj(Publisher),\r\n \"market_id\": randint(1, 2000),\r\n \"discount_market\": randint(1, 50),\r\n \"discount_shop\": get_random_obj(DiscountShop),\r\n \"available\": True,\r\n }\r\n params.update(generate_data_by_type(book_type))\r\n book = book_type.objects.create(**params)\r\n authors_queryset = get_random_queryset(Author)\r\n categories_queryset = get_random_queryset(Category)\r\n\r\n for author in authors_queryset:\r\n book.authors.add(author)\r\n\r\n for categories in categories_queryset:\r\n book.categories.add(categories)\r\n\r\n\r\ndef main():\r\n create_category(CATEGORIES_COUNT)\r\n create_author(AUTHORS_COUNT)\r\n create_publisher(PUBLISHER_COUNT)\r\n create_contract(CONTRACT_COUNT)\r\n create_discount_shop(DISCOUNT_SHOP)\r\n create_book(BOOK_COUNT)\r\n\r\n","repo_name":"Lyxf3/Books_Shop","sub_path":"tmp/book.py","file_name":"book.py","file_ext":"py","file_size_in_byte":3990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"18201733655","text":"\"\"\"Contains the BatchClient class.\"\"\"\n\nfrom client import Client\nimport pandas as pd\nimport ast\nfrom concurrent.futures import ThreadPoolExecutor\nimport requests\nimport os\nimport configparser\nfrom csv_file_writer import CsvFileWriter \n\nclass BatchClient:\n \"\"\"Represents a Benchmarking-in-Batch Client.\n \n Responsible for sending a set of requests in batches, to \n the given API and storing the consequent responses in a CSV.\n\n Attributes:\n api_endpoint: A String for the URL/prefix of URL at which to send the request.\n request_type: A String for the HTTP Request Type of the requests to be sent (POST/GET here).\n request_dataset_path: A String for the complete path of the file containing request body/URL suffixes.\n response_file_name: A String for the complete path of the file in which to store the responses.\n \"\"\"\n\n def __init__(self, api_endpoint, request_type, request_dataset_path, response_file_name):\n \"\"\"Initializes BatchClient with api_endpoint, request_type, request_dataset_path and response_file_name.\"\"\"\n self.api_endpoint = api_endpoint\n self.request_type = request_type\n self.request_dataset_path = request_dataset_path\n self.response_file_name = response_file_name\n\n def __create_post_clients(self, count, request_body_list):\n clients = []\n \n for i in range(count):\n clients.append(Client(\n self.api_endpoint, \n self.request_type, \n request_body_list[i]\n )\n )\n\n return clients\n\n def __create_get_clients(self, count, url_path_var_list):\n clients = []\n\n for i in range(count):\n clients.append(Client(\n self.api_endpoint + url_path_var_list[i], \n self.request_type,\n None\n )\n )\n\n return clients\n \n def send_requests_store_responses(self):\n request_body_df = pd.read_csv(self.request_dataset_path)\n response_entry_all_batches = []\n \n for batch_id in range(len(request_body_df)):\n batch_metadata = ast.literal_eval(request_body_df[\"Metadata\"][batch_id])\n batch_data = ast.literal_eval(request_body_df[\"Data\"][batch_id])\n response_entry_batch = []\n latency_entry_batch = []\n\n for sub_batch_id in range(len(batch_data)):\n client_count = batch_metadata[\"qps\"]\n clients = []\n\n if self.request_type == \"GET\":\n clients = self.__create_get_clients(client_count, batch_data[sub_batch_id])\n elif self.request_type == \"POST\":\n clients = self.__create_post_clients(client_count, batch_data[sub_batch_id])\n \n with ThreadPoolExecutor(max_workers = client_count) as executor:\n responses = executor.map(Client.call_send_request, clients)\n\n response_entry_sub_batch = []\n\n for response in responses:\n latency_entry_batch.append(response.elapsed.total_seconds())\n response_entry_sub_batch.append(response.json())\n\n response_entry_batch.append(response_entry_sub_batch)\n \n response_entry_all_batches.append([batch_metadata, response_entry_batch, latency_entry_batch])\n\n csv_file_writer = CsvFileWriter(self.response_file_name, [\"Metadata\", \"Responses\", \"Latency\"], response_entry_all_batches)\n csv_file_writer.write_to_csv()\n \n","repo_name":"googleinterns/chat-service-on-gcp","sub_path":"benchmarking/batch_client.py","file_name":"batch_client.py","file_ext":"py","file_size_in_byte":3733,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"}
+{"seq_id":"70921512572","text":"\"\"\"\n* make a function where you're given a vehicle ID and an accessToken and you check a file for if it exists (structure this file as a .py that's a dict, where key is vehicle ID and value is the vehicle class instance)\n* if it exists, return the instance of the vehicle\n* if it doesn't exist, then you call a different function to initialize a Vehicle instance using the vehicle ID and accessToken\n\nhere's some helpful stuff for that second function:\n\nvehicle = smartcar.Vehicle(vehicleID, accessToken[\"access_token\"])\nlocation = vehicle.location()\nodometer = vehicle.odometer()\ninfo = vehicle.info()\n\nexamples:\n{'data': {'latitude': 37.07706832885742, 'longitude': -108.27452087402344}, 'age': datetime.datetime(2018, 11, 18, 3, 16, 12, 579000, tzinfo=tzutc())}\n{'data': {'distance': 44376.44140625}, 'unit_system': 'metric', 'age': datetime.datetime(2018, 11, 18, 3, 16, 12, 339000, tzinfo=tzutc())}\n{'id': 'bf67d922-1e8a-4eb3-bffe-475feaee8e4e', 'make': 'TESLA', 'model': 'Model S', 'year': 2016}\n\n\nAlso we need an update function (given a vehicle ID and a vehicle instance, update the dictionary's vehicle ID with the new vehicle instance)\n\"\"\"\nimport json\nimport smartcar\n\nfrom vehicle import Vehicle\n\ndef FindVehicleInstance(vehicleID, accessToken):\n \"\"\"check if it exists in json file. if so, return that. if not, make new instance in that json\"\"\"\n vehiclesDict = getVehicleDataAsDict()\n if vehicleID in vehiclesDict:\n return toVehicleInstance(vehicleID, vehiclesDict[vehicleID]) # return the vehicle instance (already in data.json)\n else:\n # doesn't exist in data.json, we'll add it into there\n vehicle = smartcar.Vehicle(vehicleID, accessToken)\n vehicleInfo = vehicle.info()\n vehicleOdometer = vehicle.odometer()['data']['distance']\n vehicleLatitude = vehicle.location()['data']['latitude']\n vehicleLongitude = vehicle.location()['data']['longitude']\n newVehicleInstance = Vehicle(vehicleID, vehicleInfo['make'], vehicleInfo['model'], vehicleInfo['year'], [vehicleOdometer], [(vehicleLatitude, vehicleLongitude)], accessToken)\n\n updateDictionary(vehicleID, newVehicleInstance.VehicleToDict())\n\n return newVehicleInstance\n\n\"\"\"Creates vehicle dictionary when data.json is empty\"\"\"\ndef vehicleInit(vehicleId, vehicle):\n storedDict = {}\n storedDict[vehicleId] = vehicle\n\n with open('data.json', 'w') as outfile:\n json.dump(storedDict, outfile)\n\n\"\"\"Returns the data from data.json as a dictionary\"\"\"\ndef getVehicleDataAsDict():\n with open('data.json', 'r') as infile:\n storedJson = json.load(infile)\n return storedJson\n\n\n\"\"\"Pushes a new vehicle (dict) on the dictionary\"\"\"\ndef updateDictionary(vechicleId, vehicle):\n \n # pull in json and read in dictionary\n storedDict = getVehicleDataAsDict()\n\n # push to dictionary \n storedDict[vechicleId] = vehicle\n\n # write to json file as json\n with open('data.json', 'w') as outfile:\n json.dump(storedDict, outfile)\n\ndef toVehicleInstance(id, vehicleDict):\n vehicle = Vehicle(vehicleDict[\"id\"], vehicleDict[\"make\"], vehicleDict[\"model\"], vehicleDict[\"year\"], vehicleDict[\"odometer\"],eval(vehicleDict[\"location\"]), vehicleDict[\"accessToken\"])\n vehicle.setTeslaAirFilterLifespan(vehicleDict[\"teslaAirFilterLifespan\"])\n vehicle.setBrakePadLifespan(vehicleDict[\"brakePadLifespan\"])\n vehicle.setBatteryLifespan(vehicleDict[\"batteryLifespan\"])\n vehicle.setWindshieldWiperLifespan(vehicleDict[\"windshieldWiperLifespan\"])\n vehicle.textSent = vehicleDict[\"textSent\"]\n return vehicle\n","repo_name":"karmitdandona/SacHacks2018","sub_path":"vehicleInit.py","file_name":"vehicleInit.py","file_ext":"py","file_size_in_byte":3482,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"}
+{"seq_id":"26307526921","text":"import tensorflow as tf\nfrom keras import Model\nfrom keras.losses import binary_crossentropy\nfrom keras.layers import Layer\nfrom keras.metrics import MeanSquaredError, Mean\n\n# Credits:\n# Sampling layer and train_step functions are directly obtained from \n# https://keras.io/examples/generative/vae/\n\n\nclass Sampling(Layer):\n \"\"\"Uses (z_mean, z_log_var) to sample z, the vector encoding\"\"\"\n\n def call(self, inputs):\n z_mean, z_log_var = inputs\n batch = tf.shape(z_mean)[0]\n dim = tf.shape(z_mean)[1]\n epsilon = tf.keras.backend.random_normal(shape=(batch, dim))\n return z_mean + tf.exp(0.5 * z_log_var) * epsilon\n\nclass VAE(Model):\n \"\"\"A VAE wrapper for ae based on VGG16\"\"\"\n def __init__(self, *args, **kwargs):\n super(VAE, self).__init__(*args, **kwargs)\n\n self.total_loss_tracker = Mean(name=\"total_loss\")\n self.reconstruction_loss_tracker = Mean(name=\"reconstruction_loss\")\n self.kl_loss_tracker = Mean(name=\"kl_loss\")\n self.mse_loss_tracker = MeanSquaredError(name=\"mean_squared_error\")\n\n def train_step(self, data):\n x, y = data\n\n with tf.GradientTape() as tape:\n reconstructed, z_mean, z_log_var, _ = self(x, training=True)\n\n reconstruction_loss = tf.reduce_mean(\n tf.reduce_sum(\n binary_crossentropy(tf.expand_dims(y, -1), reconstructed), axis=(1, 2)\n )\n )\n\n kl_loss = -0.5 * (1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var))\n kl_loss = tf.reduce_mean(tf.reduce_sum(kl_loss, axis=1))\n total_loss = reconstruction_loss + kl_loss\n grads = tape.gradient(total_loss, self.trainable_weights)\n self.optimizer.apply_gradients(zip(grads, self.trainable_weights))\n self.total_loss_tracker.update_state(total_loss)\n self.reconstruction_loss_tracker.update_state(reconstruction_loss)\n self.kl_loss_tracker.update_state(kl_loss)\n self.mse_loss_tracker.update_state(y, reconstructed)\n\n return {\n \"loss\": self.total_loss_tracker.result(),\n \"reconstruction_loss\": self.reconstruction_loss_tracker.result(),\n \"kl_loss\": self.kl_loss_tracker.result(),\n \"mean_squared_error\": self.mse_loss_tracker.result()\n }\n","repo_name":"LinasVidziunas/Unsupervised-lesion-detection-with-multi-view-MRI-and-autoencoders","sub_path":"variational.py","file_name":"variational.py","file_ext":"py","file_size_in_byte":2319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"70940239293","text":"\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nimport sys\nimport vip1\nimport paramiko\nimport time\nimport datetime as d\n\n\nclass XDialog(QDialog, vip1.Ui_Dialog):\n\n def __init__(self):\n QDialog.__init__(self)\n self.setupUi(self)\n self.setup1()\n self.button6clicked()\n self.button5clicked()\n\t\n\n def setup1(self):\n\n global table1\n table1 = self.tableWidget\n global table2\n table2 = self.tableWidget_2\n global table3\n table3 = self.tableWidget_3\n global table5\n table5 = self.tableWidget_5\n global table6\n table6 = self.tableWidget_6\n global table7\n table7 = self.tableWidget_7\n global table8\n table8 = self.tableWidget_8\n global tr1\n tr1 = self.treeWidget\n global tr2\n tr2 = self.treeWidget_2\n global tr3\n tr3 = self.treeWidget_3\n global tr4\n tr4 = self.treeWidget_4\n global tr5\n tr5 = self.treeWidget_5\n global prog\n prog = self.progressBar\n global btn6\n \n label11 = self.label_11\n pixmap = QPixmap(\"back5.jpg\")\n pixmap = pixmap.scaledToHeight(520)\n pixmap = pixmap.scaledToWidth(580)\n label11.setPixmap(pixmap)\n\n label12 = self.label_12\n pixmap2 = QPixmap(\"smu2.jpg\")\n pixmap2 = pixmap2.scaledToHeight(90)\n pixmap2 = pixmap2.scaledToWidth(80)\n label12.setPixmap(pixmap2)\n \n le1 = self.lineEdit \n self.groupBox_2.close()\n\n prog.setValue(0)\n le3 = self.lineEdit_3\n le3.displayText()\n\n #test\n \n def button6clicked(self):\n btn6 = self.pushButton_6\n btn6.clicked.connect(self.btn6click)\n\n def btn6click(self):\n self.Authentication()\n\n\n def button5clicked(self):\n btn5 = self.pushButton_5\n btn5.clicked.connect(self.btn5click)\n\n def btn5click(self):\n QMessageBox.about(self,\"제작팀\",\"지도교수 : 오 선 진 교수님\\n조장: 김 경 일\\n조원: 문 진 영, 이 시 후, 조 건 희\")\n \n \n def Authentication(self):\n\n le1 = self.lineEdit\n le2 = self.lineEdit_2\n le3 = self.lineEdit_3\n lb5 = self.label_5\n prog = self.progressBar\n tb1 = self.textBrowser\n \n #\n\n #userinfo\n tb5 = self.textBrowser_5\n tb6 = self.textBrowser_6\n tb7 = self.textBrowser_7\n tb8 = self.textBrowser_8\n\n #VERSION info\n tb9 = self.textBrowser_9\n tb10 = self.textBrowser_10\n tb11 = self.textBrowser_11\n tb12 = self.textBrowser_12\n\n #cpuinfo\n tb13 = self.textBrowser_13\n tb14 = self.textBrowser_14\n tb15 = self.textBrowser_15\n tb16 = self.textBrowser_16\n \n global client\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n \n host = str(le1.text())\n port_num= 22\n user = str(le2.text())\n pw = str(le3.text())\n\n def con():\n try:\n client.connect(hostname=host, port=port_num, username=user, password=pw)\n return True\n except:\n return False\n \n def exec_cmd(cmd) :\n (stdin, stdout, stderr) = client.exec_command(cmd)\n if stderr.read().strip() != \"\" :\n return invoke_shell(cmd)\n return stdout.read().strip()\n \n def invoke_shell(cmd) :\n channel = client.invoke_shell()\n response = channel.recv(9999)\n channel.send(cmd+\"\\n\")\n while not channel.recv_ready():\n time.sleep(3)\n response = channel.recv(9999)\n out = response.decode(\"utf-8\")\n first_enter_index = min(out.find(\"/r\"), out.find(\"\\n\"))\n out = out.replace(\"\\r\\n\", \"\\n\")\n return out.strip()\n\n con1 = con()\n\n if con1 == True :\n\n global route1\n global ospf1\n global ospfarea1\n global memory1\n global user1\n global cpu1\n global version1\n global eth0cmd\n global eth1cmd\n global eth2cmd\n global eth3cmd\n \n route1 = exec_cmd('show ip route')\n ospf1 = exec_cmd('show ip ospf neighbor')\n ospfarea1 = exec_cmd('show ip ospf | no-more')\n memory1 = exec_cmd('show system memory')\n user1 = exec_cmd('show system login user')\n cpu1 = exec_cmd('show hardware cpu')\n version1 = exec_cmd('show version')\n eth0cmd = exec_cmd('show interface ethernet eth0')\n eth1cmd = exec_cmd('show interface ethernet eth1')\n eth2cmd = exec_cmd('show interface ethernet eth2')\n eth3cmd = exec_cmd('show interface ethernet eth3')\n noshutdown1 = exec_cmd('/home/vyos/')\n\n QMessageBox.about(self,\"notice\",\"Login successful\")\n #\n self.groupBox.close()\n self.groupBox_2.show()\n time1 = d.datetime.now()\n lb5.setText(\"Device : {}\\nIP Address: {}\\nroot id: {}\\n\\n\\n\\n{}\".format('vyos',host,user,time1))\n\n #라우팅 테이블\n rownum = route1[188:].count('\\x1b[m\\n')\n table1.setRowCount(rownum)\n RoutingTableList = self.routeseparate(route1)\n cnt1 = 0\n cnt2 = 0\n for i in range(0,rownum):\n if RoutingTableList[i][0] == 'O':\n cnt1 = cnt1 + 1\n table3.setRowCount(cnt1)\n \n for i in range(0,rownum):\n for j in range(0,4):\n table1.setItem(i,j, QTableWidgetItem(self.makeroutelist(RoutingTableList[i])[j]))\n if self.makeroutelist(RoutingTableList[i])[0][0] == 'O':\n for k in range(0,4):\n table3.setItem(cnt2,k, QTableWidgetItem(table1.item(i,k).text()))\n cnt2 += 1\n \n #ospf테이블 \n ospfrownum = ospf1[176:].count('\\x1b[m \\x08')\n table2.setRowCount(ospfrownum)\n ospfTableList = self.ospfTable(ospf1)\n\n for i in range(0,ospfrownum):\n for j in range(0,4):\n table2.setItem(i,j, QTableWidgetItem(self.makeospflist(ospfTableList[i])[j]))\n\n #ospf 에어리어 분류\n oal = self.ospfArealist(ospfarea1)\n tr1.topLevelItem(0).setText(1, oal[0])\n tr1.topLevelItem(0).child(0).child(0).setText(1, oal[1])\n tr1.topLevelItem(0).child(0).child(1).setText(1, oal[2])\n tr1.topLevelItem(0).child(1).setText(1, oal[3])\n tr1.topLevelItem(0).child(2).setText(1, oal[4])\n tr1.topLevelItem(0).child(3).setText(1, oal[5])\n\n #메모리 관리\n mvalue = self.memorylist(memory1)\n prog.setValue(mvalue[1]/mvalue[0]*100)\n\n tb1.setText(\"총 용량(Mb): \"+str(mvalue[0])+\"\\n잔여 용량(Mb): \"\n +str(mvalue[0]-mvalue[1])+\"\\n사용된 용량(Mb): \"+str(mvalue[1]))\n\n #user정보\n us = user1.split()\n tb5.setText(us[16])\n tb6.setText(us[17])\n tb7.setText(us[19])\n tb8.setText(str(time1))\n\n #버전 정보 \n verinfo = self.verlist(version1)\n tb9.setText(verinfo[0])\n tb10.setText(verinfo[1])\n tb11.setText(verinfo[2])\n tb12.setText(verinfo[3])\n\n #cpu정보 \n cpuinfo = self.cpulist(cpu1)\n tb13.setText(cpuinfo[0])\n tb14.setText(cpuinfo[1])\n tb15.setText(cpuinfo[2])\n tb16.setText(cpuinfo[3])\n\n #Interface\n for (ethXcmd,tableX) in [(eth0cmd,table5),(eth1cmd,table6),(eth2cmd,table7),(eth3cmd,table8)]:\n for n in range(0,2):\n for m in range(0,6):\n tableX.setItem(n,m, QTableWidgetItem(self.etherlist(ethXcmd)[m+(6*n)]))\n\n #address\n for (ethXcmd, trX) in [(eth0cmd,tr2),(eth1cmd,tr3),(eth2cmd,tr4),(eth3cmd,tr5)] :\n ad = self.addrlist(ethXcmd)\n trX.topLevelItem(0).child(0).setText(1, ad[0])\n trX.topLevelItem(0).child(1).setText(1, ad[1])\n trX.topLevelItem(1).child(0).setText(1, ad[2])\n trX.topLevelItem(2).child(0).setText(1, ad[3])\n trX.topLevelItem(2).child(1).setText(1, ad[4])\n \n \n \n else :\n QMessageBox.about(self,\"notice\",\"Login failed\")\n\n \n def routeseparate(self,route):\n routex = route[188:]\n x = route[188:].count('\\x1b[m\\n')\n j = 0\n routelist = []\n for i in range(0,x):\n temp = routex.index('\\n',j,-1) + 1\n routelist.append(routex[j:temp])\n j= temp\n return routelist\n\n def makeroutelist(self, route):\n if route[0]=='S':\n protocol = 'Static'\n addr = route[4:route.index('[')-1]\n nexthop = route[route.index('via')+4:route.index(',')]\n nexthopif = route[route.index(',')+2:route.index('\\x1b')]\n\n elif route[0]=='C':\n protocol = 'Direct'\n addr = route[4:route.index('is')-1]\n nexthop = '-'\n nexthopif = route[route.index(',')+2:route.index('\\x1b')]\n\n elif route[0]=='O':\n addr = route[4:route.index('[')-1]\n try:\n nexthop = route[route.index('via')+4:route.index(',')]\n nexthopif = route[route.index(',')+2:route.index('\\x1b')-10]\n protocol = 'OSPF'\n except:\n nexthop = '-'\n nexthopif = route[route.index(',')+2:route.index('\\x1b')-10]\n protocol = 'OSPF(Direct)'\n\n elif route[0]=='B':\n protocol = 'BGP'\n addr = route[4:route.index('[')-1]\n nexthop = route[route.index('via')+4:route.index('(')-1]\n nexthopif = '-'\n \n else :\n protocol='x'\n addr='x'\n nexthop='x'\n nexthopif='x'\n \n \n return [protocol,addr,nexthop,nexthopif]\n\n def ospfTable (self, route):\n routex = route[176:]\n x = routex.count('\\x1b[m \\x08')\n j=0\n routelist = []\n for i in range(0,x):\n temp = routex.index('\\n',j,-1) + 1\n routelist.append(routex[j:temp])\n j= temp\n return routelist\n\n def makeospflist (self, route):\n o = route.split()\n return [o[0],o[3],o[4],o[5]]\n\n def ospfArealist(self,ospfroute):\n area1 = ospfroute[ospfroute.index(\"Area ID\"):]\n ospf_area = area1[9:area1.index('\\n')]\n\n intnum1 = ospfroute[ospfroute.index(\"Number of interfaces in this area\"):]\n ospf_intmumT = intnum1[intnum1.index('Total')+7:intnum1.index(',')]\n ospf_intmumA = intnum1[intnum1.index('Active')+8:intnum1.index('\\n')]\n\n adj1 = ospfroute[ospfroute.index(\"Number of fully adjacent neighbors in this area\"):]\n ospf_adj = adj1[48:adj1.index('\\n')]\n\n if(ospfroute.count('no authentication') == 1):\n ospf_auth = 'N'\n else:\n ospf_auth = 'Y'\n\n lsa1 = ospfroute[ospfroute.index(\"Number of LSA\"):]\n ospf_lsa = lsa1[14:lsa1.index('\\n')]\n \n return [ospf_area,ospf_intmumT,ospf_intmumA,ospf_adj,ospf_auth,ospf_lsa]\n\n def memorylist(self,memorycmd):\n total1 = memorycmd[memorycmd.index(\"Total\"):]\n totalmemory = total1[6:total1.index('\\x1b[m\\n')]\n\n used1 = memorycmd[memorycmd.index(\"Used\"):]\n usedmemory = used1[6:used1.index('\\x1b[m\\n')]\n\n return [int(totalmemory), int(usedmemory)]\n\n def cpulist(self, cpucmd):\n mhz1 = cpucmd[cpucmd.index(\"CPU MHz\"):]\n cpu_mhz = mhz1[23:mhz1.index('\\x1b[m\\n')]\n\n arc1 = cpucmd[cpucmd.index(\"Architecture\"):]\n cpu_arc = arc1[23:arc1.index('\\x1b[m\\n')]\n\n mod1 = cpucmd[cpucmd.index(\"CPU op-mode(s)\"):]\n cpu_mod = mod1[23:mod1.index('\\x1b[m\\n')]\n\n vendor1 = cpucmd[cpucmd.index(\"Vendor ID\"):]\n cpu_vendor = vendor1[23:vendor1.index('\\x1b[m\\n')]\n\n return [cpu_mhz, cpu_arc, cpu_mod, cpu_vendor]\n \n def verlist(self, vercmd):\n ver1 = vercmd[vercmd.index(\"Version\"):]\n ver = ver1[14:ver1.index('\\x1b[m\\n')]\n\n hv1 = vercmd[vercmd.index(\"Hypervisor\"):]\n hv = hv1[14:hv1.index('\\x1b[m\\n')]\n\n hwm1 = vercmd[vercmd.index(\"HW model\"):]\n hwm = hwm1[14:hwm1.index('\\x1b[m\\n')]\n\n boot1 = vercmd[vercmd.index(\"Boot via\"):]\n boot = boot1[14:boot1.index('\\x1b[m\\n')]\n\n return [ver, hv, hwm, boot]\n\n def etherlist(self, ethcmd):\n RX = ethcmd[ethcmd.index('RX'):ethcmd.index('TX')-5]\n TX = ethcmd[ethcmd.index('TX'):]\n RX1 = RX.split()\n TX1 = TX.split()\n def rateform(x,y):\n return str(float(x)/float(y)*100)\n \n return [RX1[7], RX1[8], RX1[9], RX1[10], rateform(RX1[9],RX1[8]), rateform(RX1[10],RX1[8]) ,TX1[7], TX1[8], TX1[9], TX1[10], rateform(TX1[9],TX1[8]), rateform(TX1[10],TX1[8])]\n\n def addrlist(self, ethcmd):\n mac1 = ethcmd[ethcmd.index('link/ether'):ethcmd.index('\\x1b[m\\n inet')]\n try:\n ip41= ethcmd[ethcmd.index('inet '):ethcmd.index('scope global')]\n ip4 = ip41.split()\n except:\n ip4=['None','None','None','None']\n ip61 = ethcmd[ethcmd.index('inet6'):ethcmd.index('scope link')]\n mac = mac1.split()\n ip6 = ip61.split()\n\n return [ ip4[1], ip4[3], ip6[1], mac[1], mac[3] ]\n \n \nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n dlg = XDialog()\n dlg.show()\n app.exec_()\n\n\n\n","repo_name":"kki7823/capston_final_vip","sub_path":"run_vip_final.py","file_name":"run_vip_final.py","file_ext":"py","file_size_in_byte":14077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"28746716575","text":"import unittest\nimport src.data_structures as ds\n\n\nclass TestSolution(unittest.TestCase):\n def test_solution_init(self):\n c = ds.Car(0, 0, 0, 0)\n s1 = ds.Station(\"A\", 10, 0, 0)\n s2 = ds.Station(\"B\", 20, 0, 0)\n sol1 = ds.Solution(c)\n sol1.add_station((s1, 10))\n sol1.add_station((s2, 20))\n\n sol2 = ds.Solution(c, [(s1, 10), (s2, 20)])\n self.assertEqual(sol1.solution, sol2.solution)\n\n def test_solution_value_1(self):\n c = ds.Car(0, 0, 0, 0)\n s1 = ds.Station(\"A\", 10, 0, 0)\n s2 = ds.Station(\"B\", 20, 0, 0)\n solution = ds.Solution(c)\n\n solution.add_station((s1, 10))\n solution.add_station((s2, 20))\n self.assertEqual(solution.solution_value(), 500)\n\n def test_solution_value_2(self):\n c = ds.Car(0, 0, 0, 0)\n s1 = ds.Station(\"A\", 11, 0, 0)\n s2 = ds.Station(\"B\", 22, 0, 0)\n solution = ds.Solution(c)\n\n solution.add_station((s1, 10))\n solution.add_station((s2, 20))\n self.assertEqual(solution.solution_value(), 550)\n\n def test_solution_gt(self):\n c = ds.Car(0, 0, 0, 0)\n s1 = ds.Station(\"A\", 11, 0, 0)\n s2 = ds.Station(\"B\", 22, 0, 0)\n\n solution1 = ds.Solution(c, [(s1, 10), (s2, 20)])\n solution2 = ds.Solution(c, [(s1, 20), (s2, 30)])\n self.assertTrue(solution2 > solution1)\n\n def test_solution_ge(self):\n c = ds.Car(0, 0, 0, 0)\n s1 = ds.Station(\"A\", 10, 0, 0)\n s2 = ds.Station(\"B\", 20, 0, 0)\n\n solution1 = ds.Solution(c)\n solution2 = ds.Solution(c)\n solution1.add_station((s1, 10))\n solution1.add_station((s2, 20))\n solution2.add_station((s1, 10))\n solution2.add_station((s2, 20))\n self.assertTrue(solution2 >= solution1)\n\n def test_solution_lt(self):\n c = ds.Car(0, 0, 0, 0)\n s1 = ds.Station(\"A\", 11, 0, 0)\n s2 = ds.Station(\"B\", 22, 0, 0)\n\n solution1 = ds.Solution(c)\n solution2 = ds.Solution(c)\n solution1.add_station((s1, 10))\n solution1.add_station((s2, 20))\n solution2.add_station((s1, 5))\n solution2.add_station((s2, 5))\n self.assertTrue(solution2 < solution1)\n\n def test_solution_le(self):\n c = ds.Car(0, 0, 0, 0)\n s1 = ds.Station(\"A\", 10, 0, 0)\n s2 = ds.Station(\"B\", 20, 0, 0)\n\n solution1 = ds.Solution(c, [(s1, 10), (s2, 20)])\n solution2 = ds.Solution(c, [(s1, 10), (s2, 20)])\n self.assertTrue(solution2 <= solution1)\n\n def test_solution_len(self):\n c = ds.Car(0, 0, 0, 0)\n s1 = ds.Station(\"A\", 10, 0, 0)\n s2 = ds.Station(\"B\", 20, 0, 0)\n\n solution1 = ds.Solution(c, [(s1, 10), (s2, 20)])\n self.assertEqual(len(solution1), 2)\n\n def test_solution_penalty_function(self):\n c = ds.Car(50, 10, 0, 40)\n s = ds.Solution(c)\n s1 = ds.Station(\"A\", 10, 10, 100)\n s2 = ds.Station(\"B\", 20, 20, 200)\n\n s.add_station((s1, 5))\n s.add_station((s2, 10))\n\n self.assertEqual(s.penalty_function, [22.5, 20])\n\n def test_solution_penalty_function2(self):\n c = ds.Car(50, 10, 0, 40)\n s = ds.Solution(c)\n s1 = ds.Station(\"A\", 10, 10, 100)\n s2 = ds.Station(\"B\", 20, 20, 200)\n\n s.add_station((s1, 5))\n s.add_station((s2, 10))\n\n self.assertEqual(s.get_penalty(), 42.5)\n\n def test_solution_remove(self):\n c = ds.Car(50, 10, 0, 40)\n s = ds.Solution(c)\n s1 = ds.Station(\"A\", 10, 10, 100)\n s2 = ds.Station(\"B\", 20, 20, 200)\n s3 = ds.Station(\"C\", 30, 30, 300)\n\n s.add_station((s1, 5))\n s.add_station((s2, 10))\n s.add_station((s3, 29))\n s.remove_station(1)\n self.assertEqual(len(s), 2)\n\n def test_get_station_position(self):\n c = ds.Car(50, 10, 0, 40)\n s = ds.Solution(c)\n s1 = ds.Station(\"A\", 10, 10, 100)\n s.add_station((s1, 20))\n self.assertEqual(s.get_station_position(0), 100)\n\n def test_get_station(self):\n c = ds.Car(50, 10, 0, 40)\n s = ds.Solution(c)\n s1 = ds.Station(\"A\", 10, 10, 100)\n s.add_station((s1, 20))\n self.assertEqual(s.get_station(0), s1)\n\n def test_get_stations(self):\n c = ds.Car(50, 10, 0, 40)\n s = ds.Solution(c)\n s1 = ds.Station(\"A\", 10, 10, 100)\n s2 = ds.Station(\"Z\", 30, 40, 50)\n s.add_station((s1, 20))\n s.add_station((s2, 300))\n self.assertEqual(s.get_stations(), [s1, s2])\n\n\nclass TestCar(unittest.TestCase):\n def test_move_car_position(self):\n c = ds.Car(50, 10, 0, 40)\n s1 = ds.Station(\"A\", 10, 5, 75)\n s2 = ds.Station(\"B\", 20, 15, 200)\n\n c.move_car(s1)\n c.move_car(s2)\n self.assertEqual(c.curr_position, 200)\n\n def test_move_car_fuel_level(self):\n c = ds.Car(50, 10, 0, 40)\n s1 = ds.Station(\"A\", 10, 10, 80)\n\n c.move_car(s1)\n self.assertEqual(c.curr_fuel_level, 49)\n\n def test_move_car_fuel_level2(self):\n c = ds.Car(50, 15, 0, 40)\n s1 = ds.Station(\"A\", 10, 30, 80)\n\n c.move_car(s1)\n self.assertEqual(c.curr_fuel_level, 45.5)\n\n\nclass TestStation(unittest.TestCase):\n def test_equal(self):\n s1 = ds.Station(\"A\", 30, 30, 40)\n s2 = ds.Station(\"A\", 30, 30, 40)\n self.assertEqual(s1, s2)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"13Dominik/Simulated-annealing","sub_path":"test/test_data_structures.py","file_name":"test_data_structures.py","file_ext":"py","file_size_in_byte":5410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"6642018239","text":"#!/usr/bin/env python3\n\nimport time\nimport socket\n\nfrom pyos import system_call\nfrom pyos import socket_wrapper\nfrom pyos import schedule\n\n\nclass TcpServer(object):\n def __init__(self, host='127.0.0.1', port=4444):\n self.host = host\n self.port = port\n \n def start(self):\n print(\"Server starting on port:\", self.port)\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.bind((self.host, self.port))\n self.sock.listen(5)\n\n sock = socket_wrapper.Socket(self.sock)\n while True:\n client, addr = yield sock.accept()\n yield system_call.NewTask(self.handle_client(client, addr))\n\n @staticmethod\n def handle_client(client, addr):\n print(\"Connection from\", addr)\n\n host, port = addr\n while True:\n data = yield client.recv(65536)\n if not data:\n break\n\n message = '%s [%s:%d]: %s' % (time.strftime(\"%F %H:%M:%S\"), host, port, data.decode('utf-8'))\n yield client.send(data.encode('utf-8'))\n\n client.close()\n\n print(\"Client closed\")\n yield\n\n\ndef main():\n tcpServer = TcpServer()\n sched = schedule.Scheduler()\n sched.new(tcpServer.start())\n sched.mainloop()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"raojinlin/pyos","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"29985722568","text":"\"\"\"\nAmaan Rahman\nECE 472: Deep Learning\nAssigment 2: Binary Classification\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nfrom tqdm import trange\n\n# ---- Global Variables ----\nNUM_SAMPLES = 500\nBATCH_SIZE = 32\nNUM_ITR = 2000\nSEED = 1618\nSIGMA_NOISE = 0.1\nROT_NUM = 2\n\n# class for generating data\nclass Data(object):\n def __init__(self, num_samples, sigma, id, attr):\n\n # spiral attributes\n theta = np.random.uniform(attr[\"min\"], attr[\"max\"], size=(num_samples))\n spiral = self.Spiral(attr[\"center\"], attr[\"gap\"], theta, 1)\n\n # generate data\n factor = 1 if id == 1 else -1\n noise = sigma * np.random.normal(size=(num_samples)) # gaussian noise\n self.x = (\n factor * spiral.r * np.cos(theta) / 1.5 + noise\n ) # arbitrary scaling factor\n self.y = factor * spiral.r * np.sin(theta) + noise\n\n self.spiral = spiral._data((self.x, self.y, [id] * num_samples))\n\n def _init_input(self, data):\n self.data = tf.constant(data[0 : data.shape[0] - 1], dtype=np.float32)\n self.labels = tf.constant(\n data[data.shape[0] - 1], shape=[1, data.shape[1]], dtype=np.float32\n )\n\n def _batchGet(self, batch_size):\n self.index = NUM_SAMPLES * 2\n rand_ind = np.random.choice(self.index, size=batch_size)\n batch_data = tf.squeeze(tf.gather(self.data, rand_ind, axis=1))\n batch_labels = tf.squeeze(tf.gather(self.labels, rand_ind, axis=1))\n\n # normalize data\n return (\n batch_data,\n batch_labels,\n )\n\n # https://en.wikipedia.org/wiki/Archimedean_spiral\n class Spiral(object):\n def __init__(self, a, b, theta, n):\n self.r = a + b * (theta ** (1 / n))\n\n def _data(self, xy_dat):\n self.data = xy_dat\n return self\n\n\nclass MLP(tf.Module):\n def __init__(self, X_features, depth, width_arr):\n self.W = [None] * depth\n self.B = [None] * depth\n for width, k in zip(width_arr, range(1, depth + 1)):\n self.W[k - 1] = tf.Variable(\n 0.2 * tf.random.normal(shape=[X_features, width]),\n name=(\"WEIGHTS_\" + str(k)),\n dtype=np.float32,\n )\n self.B[k - 1] = tf.Variable(\n 0.001 * tf.ones(shape=[width, 1]),\n name=(\"BIAS_\" + str(k)),\n dtype=np.float32,\n )\n\n X_features = width\n\n def __call__(self, X): # output from current layer\n X_k = X\n for W_k, B_k in zip(self.W, self.B):\n func = tf.nn.relu if W_k.shape[1] != 1 else tf.nn.sigmoid\n self.Z = tf.squeeze(func(((tf.transpose(W_k) @ X_k) + B_k)))\n X_k = tf.squeeze(self.Z)\n return self.Z # output is the predicted probabilities for input batch\n\n\ndef train(data, model):\n optimizer = tf.optimizers.Adam()\n bar = trange(NUM_ITR)\n loss_dat = [0] * NUM_ITR\n for i in bar:\n with tf.GradientTape() as tape:\n X, y_true = data._batchGet(BATCH_SIZE)\n y_hat = model(X)\n loss_dat[i] = tf.losses.binary_crossentropy(y_true, y_hat)\n\n grads = tape.gradient(loss_dat[i], model.trainable_variables)\n optimizer.apply_gradients(zip(grads, model.trainable_variables))\n bar.set_description(f\"Loss @ {i} => {loss_dat[i].numpy():0.6f}\")\n bar.refresh()\n\n return loss_dat\n\n\n# https://machinelearningmastery.com/plot-a-decision-surface-for-machine-learning/\ndef decision_surf(data, model):\n min1, max1 = data[0, :].min() - 1, data[0, :].max() + 1\n min2, max2 = data[1, :].min() - 1, data[1, :].max() + 1\n\n x1grid = np.arange(min1, max1, 0.1)\n x2grid = np.arange(min2, max2, 0.1)\n X, Y = np.meshgrid(x1grid, x2grid)\n r1, r2 = X.flatten(), Y.flatten()\n r1, r2 = r1.reshape((1, len(r1))), r2.reshape((1, len(r2)))\n G = np.vstack((r1, r2))\n Z = tf.reshape(model(G), shape=X.shape)\n return (X, Y, Z)\n\n\n# very messy data object setup :/\n# generating 2 seperate data objects\ndef main():\n np.random.seed(SEED)\n # generate 2 Archimidean spirals\n dataset = (\n Data(\n NUM_SAMPLES,\n SIGMA_NOISE,\n 1,\n {\"min\": -ROT_NUM * 2 * np.pi + 0.1, \"max\": -0.1, \"center\": -1, \"gap\": 1},\n ),\n Data(\n NUM_SAMPLES,\n SIGMA_NOISE,\n 0,\n {\"min\": -ROT_NUM * 2 * np.pi + 0.1, \"max\": -0.1, \"center\": -1, \"gap\": 1},\n ),\n )\n\n spiral_A = list(\n zip(\n dataset[0].spiral.data[0],\n dataset[0].spiral.data[1],\n dataset[0].spiral.data[2],\n )\n )\n spiral_B = list(\n zip(\n dataset[1].spiral.data[0],\n dataset[1].spiral.data[1],\n dataset[1].spiral.data[2],\n )\n )\n input_data = np.concatenate((spiral_A, spiral_B), axis=0)\n dataset[0]._init_input(input_data.T)\n mlp_model = MLP(dataset[0].data.shape[0], 8, [100, 75, 50, 25, 50, 75, 100, 1])\n train(dataset[0], mlp_model)\n prob_surf = decision_surf(dataset[0].data.numpy(), mlp_model)\n\n # https://stackoverflow.com/questions/49991227/pandas-matplotlib-plot-a-bar-graph-on-existing-scatter-plot-or-vice-versa\n fig = plt.figure(figsize=(5, 3), dpi=200)\n ax = fig.add_subplot(111)\n ax.contour(*prob_surf, cmap=\"RdPu\", linestyles=\"solid\", levels=1)\n ax.scatter(\n input_data[0:NUM_SAMPLES, 0],\n input_data[0:NUM_SAMPLES, 1],\n c=\"r\",\n edgecolors=\"k\",\n )\n ax.scatter(\n input_data[NUM_SAMPLES:, 0], input_data[NUM_SAMPLES:, 1], c=\"b\", edgecolors=\"k\"\n )\n ax.set_title(\"Spirals Dataset & Classification Boundary\")\n ax.set(xlabel=\"x-values\", ylabel=\"y-values\")\n plt.savefig(\"output1.pdf\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"amaan4152/ECE472-DeepLearning","sub_path":"assign2/bin_class.py","file_name":"bin_class.py","file_ext":"py","file_size_in_byte":5839,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"}
+{"seq_id":"10010826356","text":"SCREEN_HEIGHT = 650\nSCREEN_WIDTH = 810\n\nMIN_BOARD_HEIGHT = 6\nMIN_BOARD_WIDTH = 6\nSTART_SNAKE_SIZE = 3\n\nBLOCK_SIZE = 30\nHOW_MANY_BLOCKS_WIDTH = 20\nHOW_MANY_BLOCKS_HEIGHT = 15\nGAP_SIZE = 10\n\nBACKGROUND_COLOR = (26, 26, 28)\nBORDER_COLOR = (255, 191, 94)\nSNAKE_COLOR = (121, 62, 16, 100)\nPREY_COLOR = (72, 73, 84)\nOPAQUE_ALPHA = 210\nHSV_STEP = 10\n\nACCELERATION_INTERVAL = 5\nACCELERATION = 2\nPLAYER_SPEED = 0.2\n\nFONT = \"freesansbold.ttf\"\nTEXT_COLOR = (255, 255, 255)\nGAME_OVER = \"GAME OVER\"\nGAME_OVER_SIZE = 60\nGAME_OVER_POSITION = (SCREEN_WIDTH // 2, 3 * SCREEN_HEIGHT // 15)\nSCORE_SIZE = 32\nYOUR_SCORE = \"YOUR SCORE:\"\nHIGH_SCORE = \"HIGH SCORE:\"\nYOUR_SCORE_POSITION = (SCREEN_WIDTH // 2, 5 * SCREEN_HEIGHT // 15)\nYOUR_SCORE_SCORE_POSITION = (SCREEN_WIDTH // 2, 6 * SCREEN_HEIGHT // 15)\nHIGH_SCORE_POSITION = (SCREEN_WIDTH // 2, 8 * SCREEN_HEIGHT // 15)\nHIGH_SCORE_SCORE_POSITION = (SCREEN_WIDTH // 2, 9 * SCREEN_HEIGHT // 15)\nCONTINUE = \"Press R to try again or Q to quit.\"\nCONTINUE_SIZE = 20\nCONTINUE_POSITION = (SCREEN_WIDTH // 2, 11 * SCREEN_HEIGHT // 15)\nSCORE = \"SCORE: \"\nSCORE_POSITION = (\n SCREEN_WIDTH // 40,\n SCREEN_HEIGHT - SCREEN_HEIGHT // 17,\n)\nSCORE_SCORE_POSITION = (SCORE_POSITION[0] + SCREEN_WIDTH // 5, SCORE_POSITION[1])\n\nFPS = 60\n\nSAVEFILE = \"high_score.txt\"","repo_name":"DallogFheir/snakes-prey","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"33384723241","text":"#!/usr/bin/env python\n\nfrom interbotix_xs_modules.arm import InterbotixManipulatorXS\nfrom sensor_msgs.msg import JointState\nfrom std_msgs.msg import Bool \nimport rospy\nimport time \n\nclass State_machine:\n\n\tdef __init__(self):\n\t\tself.state = \"Start\"\n\t\tself.load = 0\n\t\tself.empty_cup_load = 0\n\t\tself.moment = 0\n\t\tself.start_time = 0\n\t\tself.timer_running = False\n\t\n\tdef start_timer(self):\n\t\tself.start_time = time.time()\n\t\tself.timer_running = True\n\t\n\tdef stop_timer(self):\n\t\tself.timer_running = False\n\t\n\tdef reset_timer(self):\n\t\tself.start_time = time.time()\n\t\tself.timer_running = True\n\t\t\n\tdef time_elapsed(self):\n\t\telapsed = time.time() - self.start_time\n\t\tprint( \"Time elapsed: \" + str(elapsed) )\n\t\treturn elapsed\n\t\t\n\ndef set_wrist_pose():\n\tprint( \"Moving arm into position...\" )\n\tneutral_joint_position = [0, 0, 0.506, -0.531, 0]\n\tbot.arm.set_joint_positions( neutral_joint_position )\n\trospy.sleep( 1 )\n\t\n\ndef listener():\n\trospy.Subscriber( \"/rx150/joint_states\", JointState, check_load )\n\ndef check_load( joint_states ):\n\tglobal jointLoad\n\tjointLoad = joint_states.effort[3] \n\ndef process_state( State ):\n\tprint( \"Load: \" + str(State.load) + \"\tState: \" + State.state)\n\tif not(State.load <= -115 and State.load >= -134.5):\n\t\tprint( \"Load not initialised correctly\" )\n\t\treset_load = [0, -1.57, 0, -0.531, 0]\n\t\tbot.arm.set_joint_positions( reset_load )\n\t\trospy.sleep( 1 )\n\t\tneutral_joint_position = [0, 0, 0.506, -0.531, 0]\n\t\tbot.arm.set_joint_positions( neutral_joint_position )\n\telse:\n\t\tprint( \"Ready to receive cup\" )\n\treturn State\n\n\nif __name__=='__main__':\n\tbot = InterbotixManipulatorXS(\"rx150\", \"arm\", \"gripper\")\n\tset_wrist_pose()\n\tlistener()\n\trobot_arm = State_machine()\n\twhile not rospy.is_shutdown():\n\t\t\n\t\trospy.sleep( 0.05 )\n\t\trobot_arm.load = jointLoad\n\t\trobot_arm = process_state( robot_arm )\n\n\n\n","repo_name":"Crystal-Rose/Final_Year_Project","sub_path":"final_year_project/repo/load_testing.py","file_name":"load_testing.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"4099855260","text":"#!/usr/bin/python -t\n\n# heap\n# 使用 Heapq 的方法\n# 最快,因为不需要创建额外空间。\n# 时间复杂度和其他的算法一致,都是 \n# O(NlogK) N 是所有元素个数\n\n\nimport heapq\n\nclass Solution:\n \"\"\"\n @param arrays: k sorted integer arrays\n @return: a sorted array\n \"\"\"\n def mergekSortedArrays(self, arrays):\n # write your code here\n ret = []\n heap = []\n \n for index, array in enumerate(arrays):\n if len(array) == 0:\n continue\n heapq.heappush(heap, (array[0], index, 0))\n \n while len(heap):\n val, x, y = heapq.heappop(heap)\n ret.append(val)\n if y + 1 < len(arrays[x]):\n heapq.heappush(heap, (arrays[x][y+1], x, y+1))\n \n return ret\n \n\n# divid and conqur\n\n\nclass Solution:\n \"\"\"\n @param arrays: k sorted integer arrays\n @return: a sorted array\n \"\"\"\n def mergekSortedArrays(self, arrays):\n # write your code here\n n = len(arrays)\n \n return self.helper(arrays, 0, n-1)\n \n def helper(self, arrays, start, end):\n if start >= end:\n return arrays[start]\n \n mid = (start + end) /2\n \n left = self.helper(arrays, start, mid)\n right = self.helper(arrays, mid+1, end)\n \n return self.merge(left, right)\n \n def merge(self, l1, l2):\n ret = []\n \n len_l1 = len(l1)\n index1 = 0\n len_l2 = len(l2)\n index2 = 0\n \n while index1 < len_l1 and index2 < len_l2:\n if l1[index1] < l2[index2]:\n ret.append(l1[index1])\n index1 += 1\n else:\n ret.append(l2[index2])\n index2 += 1\n \n if index1 < len_l1:\n ret.extend(l1[index1:])\n if index2 < len_l2:\n ret.extend(l2[index2:])\n \n return ret\n \n","repo_name":"boknowswiki/mytraning","sub_path":"lintcode/python/0486_merge_k_sorted_array.py","file_name":"0486_merge_k_sorted_array.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"}
+{"seq_id":"2080961773","text":"import datetime\nimport logging\nimport pickle\nimport re\nimport urllib.request, urllib.error, urllib.parse\n\nimport simplejson as json\nfrom authz_group import Group\nfrom django.conf import settings\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.utils import timezone\nfrom panopto_client import PanoptoAPIException\nfrom panopto_client.remote_recorder import RemoteRecorderManagement\nfrom panopto_tools.models import PersistentData\nfrom PIL import Image\nfrom userservice.user import UserService\n\nlogger = logging.getLogger(__name__)\n\n_api = RemoteRecorderManagement()\n\n\n@login_required\ndef preview(request, **kwargs):\n user = UserService().get_original_user()\n if not Group().is_member_of_group(user, settings.PANOPTO_PREVIEW_GROUP):\n return HttpResponseRedirect(\"/\")\n\n recorder_id = kwargs.get('recorder_id')\n\n try:\n thumb = get_recorder_thumbnail(recorder_id)\n return HttpResponse(thumb.read(), content_type=\"image/jpeg\")\n except (PanoptoAPIException, IOError) as err:\n logger.exception(err)\n red = Image.new('RGBA', (1, 1), (255, 0, 0, 0))\n response = HttpResponse(content_type=\"image/jpeg\")\n red.save(response, \"JPEG\")\n return response\n\n\ndef get_api_recorder_details(api, recorder_id):\n if re.match(r'^\\d+$', recorder_id):\n recorders = api.getRemoteRecordersByExternalId(recorder_id)\n else:\n recorders = api.getRemoteRecordersById(recorder_id)\n\n if not (recorders and hasattr(recorders, 'RemoteRecorder')):\n return None\n\n return recorders.RemoteRecorder\n\n\ndef get_private_recorder_details(recorder_id):\n key = 'RecorderDetails_%s' % recorder_id\n expiration = timezone.now() - datetime.timedelta(hours=1)\n\n try:\n details = PersistentData.objects.get(name=key)\n if details.timestamp > expiration:\n return json.loads(details.value)\n except PersistentData.DoesNotExist:\n details = PersistentData(name=key)\n\n url = 'https://%s/Panopto/Api/remoteRecorders/%s' % \\\n (settings.PANOPTO_SERVER, recorder_id)\n\n request = urllib.request.Request(url)\n _add_cookies(request)\n result = urllib.request.urlopen(request)\n\n details.value = result.read()\n details.save()\n\n return json.loads(details.value)\n\n\ndef get_recorder_preview_url(recorder_id):\n key = 'ThumbnailURL_%s' % recorder_id\n expiration = timezone.now() - datetime.timedelta(hours=1)\n\n try:\n url = PersistentData.objects.get(name=key)\n if url.timestamp > expiration:\n return url.value\n except PersistentData.DoesNotExist:\n url = PersistentData(name=key)\n\n recorders = get_api_recorder_details(_api, recorder_id)\n\n if recorders is None:\n raise RecorderException(\"No Recorder Found\")\n\n for recorder in recorders:\n recorder.PrivateDetails = get_private_recorder_details(recorder.Id)\n for device in recorder.PrivateDetails['Devices']:\n if recorder.PrivateDetails['PrimaryVideoDeviceId'] == \\\n device['DeviceId']:\n url.value = device['VideoPreviewUrl']\n url.save()\n return url.value\n\n raise RecorderException(\"Recorder Preview URL Not Found\")\n\n\ndef get_recorder_thumbnail(recorder_id):\n url = get_recorder_preview_url(recorder_id)\n\n request = urllib.request.Request(url)\n _add_cookies(request)\n result = urllib.request.urlopen(request)\n\n return result\n\n\ndef _add_cookies(request):\n cookiejar = _api._api.options.transport.cookiejar\n cookiejar.add_cookie_header(request)\n\n key = 'CookieJar'\n try:\n c = PersistentData.objects.get(name=key)\n except PersistentData.DoesNotExist:\n c = PersistentData(name=key)\n\n if not request.has_header('Cookie'):\n # try saved cookie\n cookiejar._cookies = pickle.loads(eval(c.value))\n cookiejar.add_cookie_header(request)\n\n if not request.has_header('Cookie'):\n # make an authenticated request through public api\n _api.listRecorders()\n cookiejar.add_cookie_header(request)\n\n c.value = str(pickle.dumps(cookiejar._cookies))\n c.save()\n","repo_name":"uw-asa/django-panopto-tools","sub_path":"panopto_tools/views/recorderpreview.py","file_name":"recorderpreview.py","file_ext":"py","file_size_in_byte":4212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"12193099477","text":"def main():\n\n numbers = [3, 1, 4, 1, 5, 9, 2]\n numbers.remove(3)\n numbers.insert(0, 10)\n numbers.remove(2)\n numbers.insert(6, 1)\n print(numbers)\n numbers2 = slice(2, 6)\n print(numbers[numbers2])\n if 9 in numbers:\n print(\"yes\")\n else:\n print(\"no\")\n\n# question 1: will print 3\n# question 2: starts at the end of the list and works to the front\n# question 3: will print 1\n# question 4: starts at beginning goes till end\n# question 5: will print 1 and 5\n# question 6: will look through list for the when the number 5 appears\n# question 7: will look for 7 in list\n# question 8: will look through list for 3, won't find since expecting int\n# question 9 : will add need to have equals new list as trying to combine 2 lists would need to .append if wanted to\n# add them to the original list\n\nmain()\n","repo_name":"ProperBeowulf/cp1404practicals","sub_path":"prac_04/lists_warmup.py","file_name":"lists_warmup.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"35211421545","text":"# https://www.acmicpc.net/problem/7570\n\n\nimport sys\n\ninput = lambda: sys.stdin.readline()\n\n\ndef solution(n, nums):\n dp = [0 for _ in range(n + 1)]\n result = 0\n for num in nums:\n dp[num] = dp[num - 1] + 1\n result = max(result, dp[num])\n return n - result\n\n\nif __name__ == \"__main__\":\n n = int(input())\n nums = list(map(int, input().split()))\n print(solution(n, nums))\n","repo_name":"HyungJunGoo/AlgorithmProblems","sub_path":"Baekjun/DP/7570.py","file_name":"7570.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"20785284972","text":"#!/usr/bin/env python\n\"\"\"\nInitiate the Kivy main loop.\n\"\"\"\n\nfrom typing import final\n\nimport storage\nimport ui\nimport utils\n\nfrom kivy.app import App\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.label import Label\nfrom kivy.uix.button import Button\nfrom kivy.uix.textinput import TextInput\nfrom kivy.uix.recycleview import RecycleView\nfrom kivy.properties import ListProperty, BooleanProperty\n\n# Debugging\nfrom kivy.logger import Logger\n\n\n@final\nclass Armory(BoxLayout):\n \"\"\"The starting point of the app.\"\"\"\n items = ListProperty([])\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.orientation = \"vertical\"\n\n config = utils.read_config()\n\n # Armor item button bar\n armor_button_bar = self._create_button_bar(config[\"armor\"])\n\n # Weapon button bar\n weapon_button_bar = self._create_button_bar(config[\"weapons\"])\n\n # Item header bar\n item_header = BoxLayout(size_hint=(1, None), size_hint_y=None, height=25)\n item_name = Label(text=\"Name\", bold=True)\n item_reqs = Label(text=\"Requirements\", bold=True)\n item_quality = Label(text=\"Quality\", bold=True)\n item_location = Label(text=\"Location\", bold=True)\n item_notes = Label(text=\"Notes\", bold=True)\n item_header.add_widget(item_name)\n item_header.add_widget(item_reqs)\n item_header.add_widget(item_quality)\n item_header.add_widget(item_location)\n item_header.add_widget(item_notes)\n\n # Populate item table - default item view\n armor_button_bar.children[0].trigger_action()\n\n # Item list\n item_list = BoxLayout()\n recycle_view = RecycleView()\n recycle_view.add_widget(ui.SelectableRecycleGridLayout())\n recycle_view.data=[{\"text\": str(x)} for x in self.items]\n recycle_view.orientation = \"vertical\"\n recycle_view.viewclass = \"SelectableButton\"\n item_list.add_widget(recycle_view)\n\n self.add_widget(armor_button_bar)\n self.add_widget(weapon_button_bar)\n self.add_widget(item_header)\n self.add_widget(item_list)\n\n def _get_items(self, instance):\n \"\"\"Populate the list of items with elements from the DB\"\"\"\n item_type = instance.text.lower()\n\n # Temporary DB mock data\n data = [[\"foo\", \"bar\"] for x in range(40)]\n\n for row in data:\n for item in row:\n self.items.append(item)\n\n def _create_button_bar(self, items):\n \"\"\"Create a bar with buttons for given item types.\"\"\"\n item_button_bar = BoxLayout(size_hint=(1, None), size_hint_y=None, height=25)\n\n for item in items:\n button = Button(text=item.capitalize())\n button.bind(on_press=self._get_items)\n item_button_bar.add_widget(button)\n\n return item_button_bar\n\n@final\nclass ArmoryApp(App):\n \"\"\"Main entry point into the Kivy main loop.\"\"\"\n title = \"Armory v0.1\"\n\n def build(self):\n self.icon = \"../assets/shield.ico\"\n return Armory()\n\nif __name__ == \"__main__\":\n ArmoryApp().run()\n","repo_name":"lb1wh/armory","sub_path":"armory/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"12891283668","text":"from typing import List\n\n\nclass Solution:\n def buildArray(self, target: List[int], n: int) -> List[str]:\n res = []\n s = set(target)\n for i in range(1, target[-1] + 1):\n res.append(\"Push\")\n if i not in s:\n res.append(\"Pop\")\n return res\n\n\nclass Solution2:\n def buildArray(self, target: List[int], n: int) -> List[str]:\n prev, res = 0, []\n for t in target:\n res += ['Push', 'Pop'] * (t - prev - 1)\n res.append('Push')\n prev = t\n return res\n\n\ndef test():\n sol = Solution()\n\n print('Test 1 ... ', end='')\n assert sol.buildArray(target=[1, 3], n=3) == [\"Push\", \"Push\", \"Pop\", \"Push\"]\n print('ok')\n\n print('Test 2 ... ', end='')\n assert sol.buildArray(target=[1, 2, 3], n=3) == [\"Push\", \"Push\", \"Push\"]\n print('ok')\n\n print('Test 3 ... ', end='')\n assert sol.buildArray(target=[1, 2], n=4) == [\"Push\", \"Push\"]\n print('ok')\n\n\nif __name__ == '__main__':\n test()\n","repo_name":"Vskesha/leetcode_solutions","sub_path":"leetcode_solutions/p1441_build_an_array_with_stack_operations.py","file_name":"p1441_build_an_array_with_stack_operations.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"}
+{"seq_id":"38750719104","text":"import sys\nimport os\n\n# This module is relatively simple, it's more or less a straightforward clear up\n\nRMDIR = 'rclone rmdirs %s/\"%s\"/ --leave-root -v'\n\nclass Colors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\ndef rmdir(config):\n\n try:\n folder = input(\"Remove Airing or Premiered folders? [a] or [p]: \")\n except:\n print(\"Exiting...\")\n sys.exit(1)\n\n if folder.lower() == \"a\": option = 1\n elif folder.lower() == \"p\": option = 2\n else: \n print(\"Please enter a valid input!\")\n return\n\n for r in config.getList():\n print(\"%sNOTICE%s: Removing from %s%s%s...\" \n % (Colors.WARNING, Colors.ENDC, Colors.OKBLUE, r[0], Colors.ENDC), end=\" \")\n sys.stdout.flush()\n\n # We don't need to check for empty folders, cause rmdir doesn't do anything\n os.system(RMDIR %(r[0], r[option]))\n print(\"Done\")\n\n return\n\n\n","repo_name":"shunjuu/Izumi","sub_path":"core/tools/src/rmdir.py","file_name":"rmdir.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"}
+{"seq_id":"39515157786","text":"\"\"\"\nCustom class for loading audio-visual model and extract features\nModified from https://github.com/s3prl/s3prl/blob/main/s3prl/upstream/example/expert.py\n\"\"\"\n\nimport sys\nfrom collections import OrderedDict\nfrom typing import Dict, List, Tuple, Union\n\nimport torch\nimport torch.nn as nn\n\nfrom . import replai\nfrom .replai import models\nfrom .replai.data.builder import build_transforms\n\nsys.modules[\"replai\"] = replai # create alias for unpickling\n\n\nfrom munch import DefaultMunch\nfrom torch import Tensor\nfrom torch.nn.utils.rnn import pad_sequence\n\n\nclass UpstreamExpert(nn.Module):\n def __init__(self, ckpt: str = None, model_config: str = None, **kwargs):\n \"\"\"\n Args:\n ckpt:\n checkpoint path for loading pretrained weights.\n\n model_config:\n config path for your model.\n \"\"\"\n super().__init__()\n\n # load model weights\n _weights = torch.load(ckpt)\n _weights = _weights[\"model\"]\n _weights = OrderedDict(\n {\n k[16:]: _weights[k]\n for k in _weights.keys()\n if k.startswith(\"module.backbone\")\n }\n )\n\n # hardcode model config\n model_conf = DefaultMunch.fromDict(\n {\n \"audio\": {\n \"arch\": \"avid_spec_cnn_9\",\n \"args\": {\"channels\": 1, \"pretrained\": False},\n \"sync_bn\": False,\n \"outp_dim\": 512,\n },\n \"video\": {\n \"arch\": \"avid_r2plus1d_18\",\n \"args\": {\"pretrained\": False},\n \"sync_bn\": False,\n \"outp_dim\": 512,\n },\n }\n )\n\n # create model and load weights\n self.backbone = models.build_audio_video_model(model_conf, remove_head=True)\n self.backbone.load_state_dict(\n _weights, strict=True\n )\n\n self.audio_sample_rate = 16000\n self.video_frame_size = (112, 112)\n self.video_frame_rate = 16\n\n def preprocess_video(self, video, video_frame_rate):\n \"\"\"\n Replace this function to preprocess videos into your input format\n video: (video_length, video_channels, height, width), where video_channels is usually 3 for RGB or 1 for greyscale\n in RepLAI, the default length is 0.5 secs for video, resulting in 8 frames (16FPS)\n \"\"\"\n # Resample video\n # (from https://github.com/pytorch/vision/blob/5b07d6c9c6c14cf88fc545415d63021456874744/torchvision/datasets/video_utils.py#L278)\n step = float(video_frame_rate) / self.video_frame_rate\n if step.is_integer():\n # optimization: if step is integer, don't need to perform\n # advanced indexing\n step = int(step)\n idxs = slice(None, None, step)\n else:\n num_frames = max(int(len(video) / step),1)\n idxs = torch.arange(num_frames, dtype=torch.float32) * step\n idxs = idxs.floor().to(torch.int64)\n video = video[idxs]\n\n _video_transform = build_transforms(\n cfg=DefaultMunch.fromDict(\n {\n \"video\": {\n \"name\": \"ResizeCropFlip\",\n \"args\": {\n \"min_size\": 128,\n \"max_size\": 180,\n \"crop_size\": self.video_frame_size[0],\n },\n \"data_shape\": [\n 3,\n len(video),\n self.video_frame_size[0],\n self.video_frame_size[1],\n ],\n },\n }\n ),\n augment=False,\n )\n # Original uses OpenCV for resizing numpy tensors\n clips = {\n \"video\": (video.numpy().transpose(0, 2, 3, 1), self.video_frame_rate),\n }\n\n clips = _video_transform(clips)\n\n # output video shape (channel, length, w, h)\n return clips[\"video\"]\n\n def preprocess_audio(self, audio, audio_sample_rate):\n \"\"\"\n Replace this function to preprocess audio waveforms into your input format\n audio: (audio_channels, audio_length), where audio_channels is usually 1 or 2\n In RepLAI, they use 2.0 sec audio with raw sample rate of 32khz\n It then first downsample to 16kHz and take 128 temporal frames on mel.\n So I follow the same proportion\n \"\"\"\n if len(audio.shape) == 2:\n audio = audio.mean(dim=0)\n\n _audio_length_sec = len(audio) / audio_sample_rate\n num_temporal_frames = int(_audio_length_sec / 2.0 * 128)\n _audio_transform = build_transforms(\n cfg=DefaultMunch.fromDict(\n {\n \"audio\": {\n \"name\": \"ResampleLogMelSpectrogram\",\n \"args\": {\n \"raw_sample_rate\": audio_sample_rate,\n \"audio_rate\": self.audio_sample_rate,\n \"mel_window_size\": 32,\n \"mel_step_size\": 16,\n \"num_mels\": 80,\n \"num_temporal_frames\": num_temporal_frames,\n },\n \"data_shape\": [1, num_temporal_frames, 80],\n }\n }\n ),\n augment=False,\n )\n\n clips = {\n \"audio\": (audio, audio_sample_rate),\n }\n\n clips = _audio_transform(clips)\n\n return clips[\"audio\"]\n\n def forward(\n self, source: List[Tuple[Tensor, Tensor]]\n ) -> Dict[str, Union[Tensor, List[Tensor]]]:\n \"\"\"\n Replace this function run a forward pass with your model\n source: list of audio-video Tensor tuples\n [(wav1,vid1), (wav2,vid2), ...]\n in your input format\n \"\"\"\n bsz = len(source)\n audio, video = zip(*source)\n\n # Collate audio and video into batch\n audio = [a.squeeze() for a in audio]\n wavs = pad_sequence(audio, batch_first=True).unsqueeze(dim=1)\n # Pad video along time axis, video starts with channel x time x height x width\n video = [v.permute(1, 0, 2, 3) for v in video]\n videos = pad_sequence(video, batch_first=True).permute(0, 2, 1, 3, 4)\n # videos = torch.stack(video)\n\n assert wavs.shape[0] == bsz\n assert videos.shape[0] == bsz\n assert videos.shape[-2] == 112\n assert videos.shape[-1] == 112\n\n # Run through audio and video encoders\n video_feats = self.backbone[\"video\"](videos, return_embs=True)\n audio_feats = self.backbone[\"audio\"](wavs, return_embs=True)\n\n # use the output of last CNN layer before pooling\n video_feats = video_feats[\"conv5x\"]\n audio_feats = audio_feats[\"conv5x\"]\n\n # convert video_feats to shape (bsz, T', hid_dim)\n video_feats = video_feats.flatten(start_dim=2, end_dim=-1)\n video_feats = video_feats.permute(0, 2, 1)\n\n # convert video_feats to shape (bsz, T', hid_dim)\n audio_feats = audio_feats.flatten(start_dim=2, end_dim=-1)\n audio_feats = audio_feats.permute(0, 2, 1)\n\n # Return intermediate layer representations for potential layer-wise experiments\n return {\n \"video_feats\": [video_feats],\n \"audio_feats\": [audio_feats],\n \"fusion_feats\": [],\n }","repo_name":"roger-tseng/av-superb","sub_path":"upstream_models/replai/expert.py","file_name":"expert.py","file_ext":"py","file_size_in_byte":7582,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"78"}
+{"seq_id":"69879860091","text":"class Employee:\n \"\"\"\n A class describing an Employee for an employee management system\n\n Properties:\n first_name: string, the first name of the employee\n last_name: string, the last name of the employee\n salary: int, the employee's salary\n\n Methods:\n calculate_raise: calculates the employee's raise\n apply_raise: applies the employee's raise\n \"\"\"\n\n def __init__(self, first_name=\"\", last_name=\"\", salary=0):\n \"\"\" Initialises the properties \"\"\"\n self._first_name = first_name\n self._last_name = last_name\n self._salary = salary\n\n @property\n def first_name(self):\n \"\"\" first_name getter \"\"\"\n if self._first_name:\n return self._first_name\n else:\n return \"First name not set\"\n \n @first_name.setter\n def first_name(self, new_value):\n \"\"\"\n first_name setter method\n \n Args:\n new_value: string specifying the new first name\n \n Returns:\n None\n \n Raises:\n ValueError: if the new string has a zero length\n \"\"\"\n if len(new_value) > 0:\n self._first_name = new_value\n else:\n raise ValueError(\"Cannot set the value!\")\n \n @property\n def last_name(self):\n \"\"\" last_name getter \"\"\"\n if self._last_name:\n return self._last_name\n else:\n return \"Last name not set\"\n \n @last_name.setter\n def last_name(self, new_value):\n \"\"\"\n last_name setter method\n \n Args:\n new_value: string specifying the new last name\n \n Returns:\n None\n \n Raises:\n ValueError: if the new string has a zero length\n \"\"\"\n if len(new_value) > 0:\n self._last_name = new_value\n else:\n raise ValueError(\"Cannot set the value!\")\n \n @property\n def salary(self):\n \"\"\" salary getter \"\"\"\n return self._salary\n \n @salary.setter\n def salary(self, new_value):\n \"\"\"\n salary setter method\n \n Args:\n new_value: string specifying the new salary\n \n Returns:\n None\n \n Raises:\n Exception: if the new salary is less than zero\n ValueError: if the new salary is not an integer\n \"\"\"\n if isinstance(new_value, int):\n if new_value < 0:\n raise Exception(\"The salary cannot be less than zero\")\n else:\n self._salary = new_value\n else:\n raise ValueError(\"The new value must be a whole number!\")\n \n def calculate_raise(self):\n \"\"\"\n calculate_raise method\n\n Args:\n None\n \n Returns:\n int of 10% of the current salary\n\n Raises:\n None\n \"\"\"\n return int(self.salary * 0.1)\n \n def apply_raise(self):\n \"\"\"\n apply_raise method\n\n Args:\n None\n \n Returns:\n int of current salary plus the calculated raise\n\n Raises:\n None\n \"\"\"\n\n self.salary += self.calculate_raise()\n return self.salary\n \n def __str__(self):\n \"\"\" String representation of the object \"\"\"\n return f'Employee({self.first_name},{self.last_name},{self.salary})'\n\n\nclass Developer(Employee):\n \"\"\"\n A class describing a Developer for an employee management system\n Subclass of Employee\n\n Properties:\n language: string, the programming language the developer uses\n \"\"\"\n\n def __init__(self, first_name=\"\", last_name=\"\", salary=0):\n \"\"\" Initialises the properties \"\"\"\n super().__init__(first_name, last_name, salary)\n self._language = \"\"\n self._language_list = [\"php\", \"python\", \"javascript\"]\n \n @property\n def language(self):\n \"\"\" language getter \"\"\"\n if self._language:\n return self._language\n else:\n return \"Language not set\"\n\n @language.setter\n def language(self, new_value):\n \"\"\"\n language setter method\n \n Args:\n new_value: string specifying the new language\n \n Returns:\n None\n \n Raises:\n ValueError: if the new string is not in the permitted list\n \"\"\"\n if new_value.lower() not in self._language_list:\n raise Exception(\"Error: language must be in the list\")\n else:\n self._language = new_value\n \n def calculate_raise(self):\n \"\"\"\n calculate_raise polymorphic method\n\n Args:\n None\n \n Returns:\n int of a percentage of the current salary\n\n Raises:\n Exception: if the language is not set\n \"\"\"\n if self.language.lower() == \"php\":\n rate = 0.15\n elif self.language.lower() == \"javascript\":\n rate = 0.2\n elif self.language.lower() == \"python\":\n rate = 0.25\n else:\n raise Exception(\"Error: language not set\")\n \n return int(self.salary * rate)\n \n def __str__(self):\n \"\"\" String representation of the object \"\"\"\n return f'Developer({self.first_name},{self.last_name},{self.salary},{self.language})'\n \n","repo_name":"lechien73/oop_walkthrough","sub_path":"classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":5416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"41189236319","text":"import re\n\nfrom .support import PyScriptTest\n\n\nclass TestBasic(PyScriptTest):\n def test_pyscript_hello(self):\n self.pyscript_run(\n \"\"\"\n \n print('hello pyscript')\n \n \"\"\"\n )\n # this is a very ugly way of checking the content of the DOM. If we\n # find ourselves to write a lot of code in this style, we will\n # probably want to write a nicer API for it.\n inner_html = self.page.locator(\"py-script\").inner_html()\n pattern = r'
hello pyscript
'\n assert re.search(pattern, inner_html)\n\n def test_execution_in_order(self):\n \"\"\"\n Check that they py-script tags are executed in the same order they are\n defined\n \"\"\"\n self.pyscript_run(\n \"\"\"\n import js; js.console.log('one')\n js.console.log('two')\n js.console.log('three')\n js.console.log('four')\n \"\"\"\n )\n assert self.console.log.lines == [\n self.PY_COMPLETE,\n \"one\",\n \"two\",\n \"three\",\n \"four\",\n ]\n\n def test_escaping_of_angle_brackets(self):\n \"\"\"\n Check that py-script tags escape angle brackets\n \"\"\"\n self.pyscript_run(\n \"\"\"\n import js; js.console.log(1<2, 1>2)\n js.console.log(\"\")\n \"\"\"\n )\n assert self.console.log.lines == [self.PY_COMPLETE, \"true false\", \"\"]\n\n def test_paths(self):\n self.writefile(\"a.py\", \"x = 'hello from A'\")\n self.writefile(\"b.py\", \"x = 'hello from B'\")\n self.pyscript_run(\n \"\"\"\n \n paths = [\"./a.py\", \"./b.py\"]\n \n\n \n import js\n import a, b\n js.console.log(a.x)\n js.console.log(b.x)\n \n \"\"\"\n )\n assert self.console.log.lines == [\n self.PY_COMPLETE,\n \"hello from A\",\n \"hello from B\",\n ]\n\n def test_packages(self):\n self.pyscript_run(\n \"\"\"\n \n # we use asciitree because it's one of the smallest packages\n # which are built and distributed with pyodide\n packages = [\"asciitree\"]\n \n\n \n import js\n import asciitree\n js.console.log('hello', asciitree.__name__)\n \n \n \"\"\"\n )\n assert self.console.log.lines == [\n self.PY_COMPLETE,\n \"Loading asciitree\", # printed by pyodide\n \"Loaded asciitree\", # printed by pyodide\n \"hello asciitree\", # printed by us\n ]\n","repo_name":"MattStammers/PyScript","sub_path":"pyscript-main/pyscriptjs/tests/integration/test_01_basic.py","file_name":"test_01_basic.py","file_ext":"py","file_size_in_byte":3063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"54415433","text":"\"\"\"\nSmall demonstration on fisher yates algorithm\n\"\"\"\nimport random\n\n\ndef get_random(floor, ceiling):\n \"\"\"\n Gets a random number between floor and ceiling\n :rerturn: randomly selected element in the range\n \"\"\"\n return random.randrange(floor, ceiling + 1)\n\n\ndef shuffle(the_list):\n \"\"\"\n Shuffles a list in_place, this means that the input list is destroyed\n Does not return anything, as the input list is destroyed and thus will be altered. Be careful\n when using this function, it has side-effects\n :param: the_list list being used to shuffle\n :return: None or the list itself if the list is length of 0 or 1\n :rtype: None\n \"\"\"\n # if the list is 0 or 1 in length, simply return it\n if len(the_list) <= 1:\n return the_list\n\n last_index_in_list = len(the_list) - 1\n\n # walk through the list from beginning to end\n for index_we_are_choosing in range(0, last_index_in_list):\n # choose a random not-yet-placed item to place there\n # (could also be the item currently in that spot)\n # must be an item AFTER the current item, because the stuff\n # before has all already been placed\n\n random_choice_index = get_random(index_we_are_choosing, last_index_in_list)\n\n # place our random choice in the spot by swapping\n if random_choice_index != index_we_are_choosing:\n the_list[index_we_are_choosing], the_list[random_choice_index] = (\n the_list[random_choice_index],\n the_list[index_we_are_choosing],\n )\n","repo_name":"BrianLusina/PythonSnips","sub_path":"algorithms/fisher_yates_shuffle/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"}
+{"seq_id":"29669911249","text":"from . rest_helper import *\nfrom . globalValue import *\n\nimport json\n\n\ndef try_int_input(int_str):\n try:\n a = int(int_str)\n except Exception as e:\n return None\n return a\n\n\ndef try_json_input(json_str):\n try:\n # print(json_str)\n\n tmpdict = json.loads(json_str)\n # print(tmpdict)\n except Exception as e:\n return None\n return tmpdict\n\ndef try_json_file_input(json_file):\n try:\n fd = open(json_file)\n json_str = fd.read()\n fd.close()\n except Exception as e:\n return None\n return json_str\n\n######### trans_dict_to_op #######\n\n\ndef format_op_dict(path_value_dict):\n tmp_list = []\n for tmpkey in path_value_dict.keys():\n tmpvalue = path_value_dict[tmpkey]\n tmpdict = {\"op\" : \"replace\" , \"path\" : tmpkey , \"value\" : tmpvalue }\n tmp_list.append(tmpdict)\n return tmp_list\n\ndef get_path_to_value(prefix_str , src_dict):\n result_dict = {}\n for tmpkey in src_dict.keys():\n tmpvalue = src_dict[tmpkey]\n tmp_str = prefix_str + tmpkey\n if isinstance(tmpvalue , dict):\n result_dict.update(get_path_to_value(tmp_str + \"/\" , tmpvalue))\n else:\n result_dict.update({ tmp_str : tmpvalue })\n \n return result_dict\n\n\ndef trans_dict_to_path_value(src_dict):\n # path_value_dict = {}\n for tmpkey in src_dict.keys():\n if tmpkey == \"\":\n path_value_dict = { \"/\" : src_dict[tmpkey]}\n return path_value_dict\n\n path_value_dict = get_path_to_value(\"/\" , src_dict )\n # print(path_value_dict)\n return path_value_dict\n\n\ndef trans_dict_to_op(data , path):\n if path == \"\":\n exit(\"path is empty\")\n path_value_dict = {path : data}\n # print(path_value_dict)\n return format_op_dict(path_value_dict)\n \n \n######### ################ #######\n\n\n\ndef format_args(arguments):\n # resultdict = {\"params:\" : {} , \"data\" : {}}\n params = {}\n data = {}\n if arguments[\"--group\"] != None:\n params.update( { \"group\" : arguments[\"--group\"] })\n \n if arguments[\"--index\"] != None:\n params.update( { \"index\" : arguments[\"--index\"] })\n\n if arguments[\"--keys\"] != None:\n params.update( { \"keys\" : arguments[\"--keys\"] })\n\n if arguments[\"--type\"] != None:\n params.update( { \"type\" : arguments[\"--type\"] })\n \n if arguments[\"--depth\"] != None:\n params.update( { \"depth\" : arguments[\"--depth\"] })\n\n\n if arguments[\"--int-value\"]:\n if try_int_input(arguments[\"\"]) == None:\n exit(\"invalid int value : {0}\".format(arguments[\"\"]))\n data = int(arguments[\"\"])\n if arguments[\"--str-value\"]:\n data = arguments[\"\"]\n\n\n if arguments[\"--json\"]:\n json_str = arguments[\"\"]\n tmpdict = try_json_input( json_str ) \n if tmpdict == None:\n exit(\"invalid json string\")\n if isinstance( tmpdict , dict) == False:\n exit(\"invalid json string : input should be a dictionary\")\n data.update(tmpdict)\n \n if arguments[\"--json-file\"] : \n json_str = try_json_file_input(arguments[\"\"])\n # here : if json str is None , tmpdict is None too\n tmpdict = try_json_input( json_str ) \n if tmpdict == None:\n exit(\"invalid json string\")\n if isinstance( tmpdict , dict) == False:\n exit(\"invalid json string : input should be a dictionary\")\n data.update(tmpdict)\n \n if arguments[\"patch\"]:\n tmpdata = trans_dict_to_op(data , arguments[\"\"])\n data = tmpdata\n\n\n return params , data\n \n\n\n\n''' \n if arguments[\"--json\"] : \n json_str = arguments[\"\"]\n tmpdict = try_json_input( json_str ) \n if tmpdict == None:\n exit(\"invalid json string\")\n if isinstance( tmpdict , dict) == False:\n exit(\"invalid json string : input should be a dictionary\")\n\n data.update(tmpdict)\n \n if arguments[\"--json-file\"] : \n json_str = try_json_file_input(arguments[\"\"])\n # here : if json str is None , tmpdict is None too\n tmpdict = try_json_input( json_str ) \n if tmpdict == None:\n exit(\"invalid json string\")\n if isinstance( tmpdict , dict) == False:\n exit(\"invalid json string : input should be a dictionary\")\n data.update(tmpdict)\n'''\n\n\n\n\n","repo_name":"asterfusion/Tapplet","sub_path":"sf_cli/sf_rest_cli/sfrestcli/sf_utils.py","file_name":"sf_utils.py","file_ext":"py","file_size_in_byte":4488,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"}
+{"seq_id":"21836590836","text":"#!/usr/bin/env python3\n\nfrom multiprocessing import Pool\nimport numpy as np\nimport cv2\n\n# range for identifying blue cones in HSV\nbluRanges = [\n [(97, 78, 35), (130, 255, 100)], # regular cone blue\n [(112, 30, 30), (150, 80, 70)] # more of a dark gray\n]\n\n# range for identifying yellow cones in HSV\nylwRanges = [\n [(23, 60, 140), (32, 255, 255)]\n]\n\n# range for identifying orange cones in HSV\norgRanges = [\n [(0, 80, 110), (8, 180, 200)]\n]\n\nminOrgArea = 60 # only detect intersection when its cones are larger than this\n\n\ndef _findConesInImg(img, hsvRanges, minArea=0):\n cones = None\n for i in range(len(hsvRanges)):\n inRange = cv2.inRange(img, hsvRanges[i][0], hsvRanges[i][1])\n if i == 0: cones = inRange\n else: cones = cv2.bitwise_or(cones, inRange)\n\n kernel = np.ones((3, 3), np.uint8)\n erode = cv2.erode(cones, kernel, iterations=2)\n dilate = cv2.dilate(erode, kernel, iterations=2)\n\n _, contours, _ = cv2.findContours(dilate, cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n conePos = []\n largeCones = False\n for contour in contours:\n x, y, w, h = cv2.boundingRect(contour)\n conePos.append((x + int(w/2), y + h))\n if w*h > minArea:\n largeCones = True\n conePos.sort(key=lambda pt: pt[1])\n\n if minArea > 0:\n return conePos, largeCones\n return conePos\n\ndef _findCarInImg(img):\n #find the black part, the range can be calibrated in the future\n inRange = cv2.inRange(img, (0, 0, 0, 0), (30, 30, 30, 30))\n\n kernel = np.ones((3, 3), np.uint8)\n dilate = cv2.dilate(inRange, kernel, iterations=12)\n\n _, contours, _ = cv2.findContours(dilate, cv2.RETR_TREE,\n cv2.CHAIN_APPROX_NONE)\n Flag_CarFound = False\n\n if len(contours) != 0:\n #find the biggest area\n contour_sizes = [(cv2.contourArea(contour), contour) for contour in contours]\n #to make it simple, guess the largest one is the target car\n biggest_contour = max(contour_sizes, key=lambda x: x[0])[1] \n\n #filter out the false positie\n if cv2.contourArea(biggest_contour) > 750: #2800 we may even set a maxmium limit according to the test\n Flag_CarFound = True\n \n return Flag_CarFound\n\npool = Pool(processes=2)\ndef processImage(img, atIntersection):\n img = img[200:480, 0:640] # remove the top of the image\n\n blur = cv2.GaussianBlur(img, (5, 5), 0)\n hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)\n pts = np.array(((0, 280), (0, 170), (200, 130), (420, 135), (640, 190),\n (640, 280), (0, 280))).astype(np.int32)\n cv2.fillPoly(hsv, [pts], (0, 0, 0)) # black out the car\n\n if atIntersection:\n carHsv = hsv[0:130, 250:640]\n else:\n carHsv = hsv[0:130, 420:640]\n\n carRes = pool.apply_async(_findCarInImg, (carHsv,))\n bluRes = pool.apply_async(_findConesInImg, (hsv, bluRanges))\n ylwRes = pool.apply_async(_findConesInImg, (hsv, ylwRanges))\n orgRes = pool.apply_async(_findConesInImg, (hsv, orgRanges, minOrgArea))\n\n carFound = carRes.get(1000)\n bluCones = bluRes.get(1000)\n ylwCones = ylwRes.get(1000)\n orgCones, intersectionFound = orgRes.get(1000)\n\n return (bluCones, ylwCones, orgCones, img.shape[1], img.shape[0], carFound,\n intersectionFound)\n","repo_name":"nakulred1/autonomous-robots-kiwi-project","sub_path":"intersection/vision.py","file_name":"vision.py","file_ext":"py","file_size_in_byte":3282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"21162076096","text":"up = 0\nlow = 0\nst = input(\"Nhập một chuỗi: \")\nfor i in st:\n if i.isupper():\n up += 1\n if i.islower():\n low += 1\nprint(\"Chữ hoa: \", up)\nprint(\"Chữ thường: \", low)\n\n#VIDU:\n#đầu vào là: Cafedev – Kênh Thông Tin IT\n#Thì đầu ra là:\n#Chữ hoa: 7\n#Chữ thường: 15","repo_name":"Ahn12111/BT_P4","sub_path":"BT_P4/test9.py","file_name":"test9.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"9165547521","text":"import sys\nfrom unittest import result\nfrom PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QVBoxLayout, QLineEdit, QGroupBox, QLabel, QGridLayout, QTextEdit, QMainWindow\napp = QApplication(sys.argv)\nwindow = QWidget()#QMainWindow()b\nmainlayout = QGridLayout()\n\n# Create Encrypt Box\nencryptBox = QGroupBox('Encrypt')\npublicKeyLabel = QLabel('Public Key')\npublicKey = QLineEdit()\nencryptTextLabel = QLabel('Text to Encrypt')\nencryptText = QTextEdit()\nencryptButton = QPushButton('Click to Encrypt')\n# Layout Encrypt Box\nlayoutEncrypt = QVBoxLayout()\nlayoutEncrypt.addWidget(publicKeyLabel)\nlayoutEncrypt.addWidget(publicKey)\nlayoutEncrypt.addSpacing(10)\nlayoutEncrypt.addWidget(encryptTextLabel)\nlayoutEncrypt.addWidget(encryptText)\nlayoutEncrypt.addSpacing(20)\nlayoutEncrypt.addWidget(encryptButton)\nlayoutEncrypt.addStretch(1)\nencryptBox.setLayout(layoutEncrypt)\n\n# Create Decrypt Box\ndecryptBox = QGroupBox('Decrypt')\nprivateKeyLabel = QLabel('Private Key')\nprivateKey = QLineEdit()\ndecryptTextLabel = QLabel('Text to Decrypt')\ndecryptText = QTextEdit()\ndecryptButton = QPushButton('Click to Decrypt')\n# Layout Decrypt Box\nlayoutDecrypt = QVBoxLayout()\nlayoutDecrypt.addWidget(privateKeyLabel)\nlayoutDecrypt.addWidget(privateKey)\nlayoutDecrypt.addSpacing(10)\nlayoutDecrypt.addWidget(decryptTextLabel)\nlayoutDecrypt.addWidget(decryptText)\nlayoutDecrypt.addSpacing(20)\nlayoutDecrypt.addWidget(decryptButton)\nlayoutDecrypt.addStretch(1)\ndecryptBox.setLayout(layoutDecrypt)\n\n# Create Result Box\nresultBox = QGroupBox('Result')\nresultLabel = QTextEdit()\n# Layout Result Box\nlayoutResult = QVBoxLayout()\nlayoutResult.addWidget(resultLabel)\nlayoutResult.addStretch(1)\nresultBox.setLayout(layoutResult)\n\n# Layout Main Window\nmainlayout.addWidget(encryptBox, 0, 0)\nmainlayout.addWidget(decryptBox, 0, 1)\nmainlayout.addWidget(resultBox, 1, 0, 1, 2)\nmainlayout.setVerticalSpacing(30)\nwindow.setLayout(mainlayout)\n\n# # Action to Encrypt\n# encryptButton.clicked.connect(self.clickEncrypt)\n# def clickEncrypt(self):\n# self.resultLabel.setText(self.encryptText.text())\n\n# # Action to Decrypt\n# encryptButton.clicked.connect(self.clickDecrypt)\n# def clickDecrypt(self):\n# self.resultLabel.setText(self.decryptText.text())\n\nwindow.show()\napp.exec()\n\n# print('text to encrypt: ', encryptText.text())","repo_name":"izanamiah/soteris","sub_path":"encrypt-app.py","file_name":"encrypt-app.py","file_ext":"py","file_size_in_byte":2302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"18238081059","text":"import os\nfrom sklearn.utils import shuffle\n\nfrom git import *\nfrom clf import *\nfrom comp import *\n\nfilter_big = True\n\ndef get_all_fork_list(repo):\n q = [api.get('repos/%s' % repo)]\n i = 0\n while i < len(q):\n try:\n if int(q[i]['forks_count']) > 0:\n t = get_fork_list(q[i]['full_name'])\n q += t\n except:\n q[i]['forks_count'] = 0\n\n i += 1\n return q\n \n\ndef detect_dup_pr_cross_repo(upstream, q, out_file):\n pr = {}\n num = 0\n tot_len = 0\n for branch in q:\n t = get_repo_info(branch['full_name'], 'pull')\n if len(t) > 0:\n pr[branch['full_name']] = t\n \n print('start on ', out_file)\n print('number of sub repo', len(pr))\n\n out = open(out_file, 'w')\n #out2 = open(out_file + '.log', 'a')\n\n c = classify()\n\n # init_model_with_repo(upstream)\n all_pr = []\n for b in pr:\n all_pr += shuffle(pr[b])[:2000]\n save_id = out_file.replace('/', '_').replace('.txt', '')\n init_model_with_pulls(all_pr, save_id)\n\n results = []\n\n for b1 in pr:\n for b2 in pr:\n if b1 < b2:\n if len(pr[b1]) > len(pr[b2]):\n b1, b2 = b2, b1\n\n for p1 in pr[b1]:\n if filter_big and check_too_big(p1):\n continue\n\n li = []\n for p2 in pr[b2]:\n if filter_big and check_too_big(p2):\n continue\n\n # print(p2['number'])\n\n if p1[\"user\"][\"id\"] == p2[\"user\"][\"id\"]:\n continue\n\n feature_vector = get_pr_sim_vector(p1, p2)\n\n t = [p1[\"html_url\"], p2[\"html_url\"], feature_vector, c.predict_proba([feature_vector])[0][1], \\\n p1[\"user\"][\"id\"] == p2[\"user\"][\"id\"], \\\n ]\n li.append(t)\n\n # print(t, file=out2)\n\n li = sorted(li, key=lambda x: x[3], reverse=True)\n if li[0][3] > 0.55:\n print(li[0])\n print(li[0], file=out)\n\n out.close()\n #out2.close()\n\n\ndef detect_on_pr(repo):\n out_file = 'evaluation/' + repo.replace('/', '_') + '_cross_forks.txt'\n\n if os.path.exists(out_file):\n return\n \n q = list(filter(lambda x: int(x['forks_count']) > 0, get_all_fork_list(repo)))\n \n '''\n q = [{'full_name': 'MarlinFirmware/Marlin'},\\\n {'full_name': 'Ultimaker/Ultimaker2Marlin'},\\\n {'full_name': 'RichCattell/Marlin'},\\\n {'full_name': 'jcrocholl/Marlin'}]\n '''\n detect_dup_pr_cross_repo(repo, q, out_file)\n\n\n'''\ndef detect_on_commit(repo):\n init_model_with_repo(repo)\n li = get_all_fork_list(repo)\n \n for t in li:\n r = t['full_name']\n if r == repo:\n continue\n \n branch_list = get_branch_list(r)\n''' \n \ndef run_cross_repo(r1, r2):\n q =[{'full_name': r1}, {'full_name': r2}]\n \n out_file = 'evaluation/' + (r1 + '_' + r2).replace('/', '_') + '_cross_forks_version2.txt'\n\n if os.path.exists(out_file) and (os.path.getsize(out_file) > 0):\n print('Already run before =', r1, r2)\n return\n\n detect_dup_pr_cross_repo(r2, q, out_file)\n \nif __name__ == \"__main__\":\n if len(sys.argv) == 3:\n r1 = sys.argv[1].strip()\n r2 = sys.argv[2].strip()\n run_cross_repo(r1, r2)\n sys.exit()\n\n hard_forks = open('data/hard_forks.txt').readlines()\n\n for repo_pair in hard_forks:\n r1, r2 = repo_pair.strip().split()\n run_cross_repo(r1, r2)\n \n\n '''\n if len(sys.argv) == 2:\n r = sys.argv[1].strip()\n detect_on_pr(r)\n else:\n t = open('data/repoList_morethan200PR.txt').readlines()\n # t = open('data/repoList_rly.txt').readlines()\n for repo in t:\n r = repo.strip()\n detect_on_pr(r)\n '''\n","repo_name":"luyaor/INTRUDE","sub_path":"detect_on_cross_forks.py","file_name":"detect_on_cross_forks.py","file_ext":"py","file_size_in_byte":4014,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"78"}
+{"seq_id":"27088302048","text":"def convertmltodec(line):\n \n # To convert moneyline odds to decimal odds:\n # if the moneyline is positive, divide by 100 and add 1\n # if it is negative, divide 100 by the moneyline amount (without the minus sign) and add 1\n\n if line > 0:\n dec_odds = (line / 100) + 1\n return dec_odds\n else:\n dec_odds = (100 / (line * -1)) + 1\n return dec_odds\n ","repo_name":"jmholleran/cit-129-2019-fall","sub_path":"modules_lesson/mltodec.py","file_name":"mltodec.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"24805465758","text":"\r\nimport os\r\n\r\nclass EmotionVocab():\r\n def __init__(self):\r\n self.label = '' #label_name\r\n self.words_id = [] #list of words id\r\n self.times = 0\r\n\r\n\r\nclass EmotionWord():\r\n def __init__(self):\r\n self.label_id = 0 #label id\r\n self.context = '' #what is the word\r\n self.times = 0\r\n\r\ndef is_number(str):\r\n if ord(str) >= ord('0') and ord(str) <= ord('9'):\r\n return True\r\n return False\r\n\r\ndef is_letter(str):\r\n if ord(str) >= ord('a') and ord(str) <= ord('z'):\r\n return True\r\n if ord(str) >= ord('A') and ord(str) <= ord('Z'):\r\n return True\r\n return False\r\n\r\ndef get_Emotion(emotion_path):\r\n Emotion_list = []\r\n Words_list = []\r\n main_path = emotion_path\r\n dir = os.listdir(main_path)\r\n dir.sort()\r\n for filename in dir:\r\n emotion = EmotionVocab()\r\n emotion.label = filename.split('.')[0]\r\n open_path = os.path.join(main_path, filename)\r\n f = open(open_path, 'r')\r\n for line in f.readlines():\r\n str = line.split()\r\n if len(str) == 0: continue\r\n if is_number(str[0][0]):\r\n word = ''\r\n for char in str[1]:\r\n if is_letter(char):\r\n word += char\r\n else:\r\n break\r\n emotion_word = EmotionWord()\r\n emotion_word.label_id = len(Emotion_list)\r\n emotion_word.context = word\r\n emotion.words_id.append(len(Words_list))\r\n Words_list.append(emotion_word)\r\n Emotion_list.append(emotion)\r\n return Emotion_list, Words_list\r\n\r\n","repo_name":"Tongji-MIC-Lab/EmVidCap","sub_path":"FT_v1/FT/caption-eval-master/caption-eval-master/Emotion-Eval/getEmationVocab.py","file_name":"getEmationVocab.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"70816572091","text":"#!/usr/bin/pytho\n\nimport pandas as pand\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.metrics import accuracy_score\n\ndef load_data():\n df = pand.read_csv(\"yourfile1.csv\");\n return df\n\ndf = load_data()\ndf.drop(['dataId'],inplace=True, axis=1)\n\ncolumn_names = df.columns.values\nprint(len(column_names))\nindex = [len(column_names)-1]\n#index = [1,2,3,4,5,6,11]\nfeatures = np.delete(column_names, index)\n\n# separating 80% data for training\ntrain = df.sample(frac=0.8, random_state=1)\n#print(train)\n\n# rest 20% data for evaluation purpose\ntest = df.loc[~df.index.isin(train.index)]\n#print(test)\n\n#using the seperated 80% train data set devide the features and lables\nfeatures_train = train[features]\nlabels_train = train[\"result\"]\nfeatures_test = test[features]\nlabels_test = test[\"result\"]\n\n# runRate_lose = [features_train[0] for ii in range(0, len(features_train)) if labels_train==0]\n# powerPlayRuns_lose = [features_train[1] for ii in range(0, len(features_train)) if labels_train==0]\n# runRate_win = [features_train[0] for ii in range(0, len(features_train)) if labels_train==1]\n# powerPlayRuns_win = [features_train[1] for ii in range(0, len(features_train)) if labels_train==1]\n#\n# #### initial visualization\n# plt.xlim(0.0, 90.0)\n# plt.ylim(0.0, 15.0)\n# plt.scatter(powerPlayRuns_lose, runRate_lose, color = \"b\", label=\"lose\")\n# plt.scatter(powerPlayRuns_win, runRate_win, color = \"r\", label=\"win\")\n# plt.legend()\n# plt.xlabel(\"dots\")\n# plt.ylabel(\"runs\")\n# plt.show()\n# print(\"asdfasdf\")\n\nimport itertools as iter\n\ndef pset(lst):\n comb = (iter.combinations(lst, l) for l in range(len(lst) + 1))\n return list(iter.chain.from_iterable(comb))\n\nnewArray = pset(features)\nfrom sklearn.svm import SVC\nclf = SVC(kernel=\"rbf\", C=10000, gamma=1)\nprint(\"begin feature len\", len(newArray))\nmyList=[]\nfeatureArray = []\n\n#for num in range(0, len(newArray)-1): # Second Example\n\nnewArray = pset(features)#creating all possible combinations to array\nfor num in range(0, 1000):\n#for num in range(0, len(newArray)):\n print('Combination of :', newArray[num+1], ' number ', num)\n featureArray.append(newArray[num+1])\n features_train = train[np.asarray(newArray[num+1])]#selecting a combination of features\n labels_train = train[\"result\"]\n features_test = test[np.asarray(newArray[num + 1])]\n labels_test = test[\"result\"]\n clf.fit(features_train, labels_train)#fitting the data to learner\n predictions = clf.predict(features_test)#predicting for test data\n mse = accuracy_score(predictions, labels_test)\n # mse = mean_squared_error(predictions, labels_test)\n myList.append(mse)\n\n\n print(\"heee your done\")\n print(mse)\n\nprint(myList)\n\n\nmylist1 = list(range(len(myList)))\nprint(mylist1)\n\n#### initial visualization\nprint(featureArray)\nprint(features)\n#plt.xticks(x, my_xticks)\nplt.xlim(0.0, len(myList))\nplt.ylim(0.0, 1.0)\n#plt.scatter(mylist1, myList, color = \"b\", label=\"lose\")\nplt.plot(mylist1, myList, 'b-',label='Accuracy deviance')\n#plt.scatter(powerPlayRuns_win, runRate_win, color = \"r\", label=\"win\")\nplt.legend()\nplt.xlabel(\"possible combinations (1000)\")\nplt.ylabel(\"accuracy\")\nplt.show()\nprint(\"asdfasdf\")\n\n\nx = mylist1\ny = myList\n#x_ticks_labels = ['jan','feb','mar','apr','may']\nx_ticks_labels = features\n\nfig, ax = plt.subplots()\nfig.subplots_adjust(bottom=0.25)\nax.plot(x,y)\n\n# Set number of ticks for x-axis\nax.set_xticks(x)\n# Set ticks labels for x-axis\nax.set_xticklabels(x_ticks_labels, rotation='vertical', fontsize=10)\n# fig.suptitle('Deviation of Mean Square Error', fontsize=14)\nfig.suptitle('Deviation of accuracy', fontsize=14)\nplt.xlabel(\"single combinations\")\n#plt.ylabel(\"mean square error\")\nplt.ylabel(\"accuracy\")\nplt.show()\nprint(\"doneeee\")","repo_name":"gihanwijesinghe/FYP-PythonCode","sub_path":"Visualization&Classification/AccuracyOfFeatures.py","file_name":"AccuracyOfFeatures.py","file_ext":"py","file_size_in_byte":3781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"6356203113","text":"from sqlite3 import Row\nimport tkinter as tk\nfrom tkinter.tix import COLUMN, ROW\nimport color as cl\n#####\nunit_list = [['活动室', '室部', '校对组', '一组', '编辑组', '一体化'],\n ['学习室', ' ', '打印室', '三组', '二组', '仓库']]\ncell_list = [\n [['无'], ['无2']], #'活动室'\n [['a1'], ['a2']], #'室部'\n [['1', '2', '3'], ['', '4', '5']], #'校对组'\n [['1', '2', '3', '4', '5'], ['6', '7', '8', '9', '阿斯顿啊']], #'一组'\n [['2', '0', '3', '4', '5'], ['6', '7', '8', '9', '阿斯顿啊']], # 编辑组\n [['12', '2', '3', '4', '5'], ['6', '7', '8', '9', '阿斯顿啊']], # 一体化\n [['无5'], ['学习室']], #'学习室'\n [['无7'], ['无8']], # 无\n [['gitlab', '123', '103'], ['T1300', 'T1200', 'T1100']], # 打印室\n [['15', '2', '3', '4', '5'], ['6', '7', '8', '9', '阿斯顿啊']], #'三组'\n [['81', '2', '3', '4', '5'], ['6', '7', '8', '9', '阿斯顿啊']], #'二组对组'\n [['19', '2', '3', '4', '5'], ['6', '7', '8', '9', '阿斯顿啊', 'uy']] #'仓库'\n]\n\n##############\n\nwin = tk.Tk()\nwin.title(\"资产管理\")\nwin.geometry('800x500+300+200') # 窗口创建位置是(300,200)\n######################################################################\n# 详细信息\n\narea2 = tk.LabelFrame(win,\n text='位置',\n labelanchor=\"n\",\n bg=cl.Gray,\n bd=5,\n height=150)\narea2.place(x=0, y=0)\n\nfor i in unit_list:\n for j in i:\n ck=tk.Checkbutton(area2, text=j)\n ck.grid(column=unit_list.index(i),row=i.index(j),sticky='w')\n ck.select()\n\n\n\n# 将 selectmode 设置为多选模式,并为Listbox控件添加滚动条\nlistbox1 =tk.Listbox(win,selectmode = tk.BROWSE,height =25)\n#listbox1.pack()\nlistbox1.place(x=10,y=200,width=150,relheight=0.6)\n# 设置滚动条,使用 yview使其在垂直方向上滚动 Listbox 组件的内容,通过绑定 Scollbar 组件的 command 参数实现\ns =tk. Scrollbar(listbox1)\nlistbox1.configure( yscrollcommand = s.set)\n# 设置垂直滚动条显示的位置,使得滚动条,靠右侧;通过 fill 沿着 Y 轴填充\ns.pack(side = tk.RIGHT,fill = tk.Y)\ns.config(command = listbox1.yview)\nfor i,item in enumerate(range(1,50)):\n listbox1.insert(i,item)\n\n\n\narea3 = tk.LabelFrame(win,\n text='详细信息',\n labelanchor=\"n\",\n bg=cl.Gray,\n bd=5,\n height=150)\narea3.place(x=200, y=0)\n\nfor i in unit_list:\n for j in i:\n ck=tk.Checkbutton(area3, text=j)\n ck.grid(column=unit_list.index(i),row=i.index(j),sticky='w')\n ck.select()\n\n\n\n\n\nclass class_button():\n\n def __init__(self, _frame, _user, _pos=[0, 1]):\n # 常量\n self.frame = _frame\n self.user = _user\n if self.user != '':\n self.button = tk.Button(self.frame, text=self.user, width=6)\n self.button.grid(row=_pos[0], column=_pos[1], padx=2, pady=10)\n\n pass\n\n\nclass class_group():\n\n def __init__(self, _title, _window, _pos: list):\n self.bg = '#5CACEE'\n self.frame = tk.LabelFrame(_window,\n text=_title,\n labelanchor=\"n\",\n bg=self.bg,\n bd=5,\n height=50)\n self.frame.grid(row=_pos[1], column=_pos[0], padx=2, pady=15)\n\n # button1=class_button(self.frame,'asd',[1,1])\n\n\nf = tk.Frame(win, width=600, height=600, bg='#5CAC00')\nunits = []\nfor i in unit_list:\n for j in i:\n x = i.index(j)\n y = unit_list.index(i)\n g = class_group(j, _window=area3, _pos=[x, y])\n units.append(g)\n print(g.frame.grid_info())\nunits[-1].frame.grid(row=2, column=0, columnspan=3)\ncells = []\nfor i in cell_list: # 组 in 组列表\n for j in i: # 排 in 组\n for k in j: # 人 in 排\n unit = units[cell_list.index(i)]\n x = i.index(j)\n y = j.index(k)\n c = class_button(unit.frame, k, [x, y])\n cells.append(c)\n\nwin.mainloop()","repo_name":"bmzk/my_python_program","sub_path":"资产管理/资产管理.py","file_name":"资产管理.py","file_ext":"py","file_size_in_byte":4221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"19207925458","text":"class Solution:\n def longestPalindrome(self, s: str) -> str:\n length = len(s)\n if length < 2:\n return s\n odd_set = set(i for i in range(length))\n even_set = set(i for i in range(length - 1) if s[i] == s[i + 1])\n odd_length = 0\n even_length = 0\n tmp_set = set()\n \n for odd_length in range(1, length):\n for i in odd_set:\n if i >= odd_length and i < length - odd_length and s[i - odd_length] == s[i + odd_length]:\n tmp_set.add(i)\n if tmp_set:\n odd_set = tmp_set\n tmp_set=set()\n else:\n break\n \n odd_length -= 1\n \n for even_length in range(1, length):\n for i in even_set:\n if i >= even_length and i < length - even_length - 1 and s[i - even_length] == s[i + even_length +1]:\n tmp_set.add(i)\n if tmp_set:\n even_set = tmp_set\n tmp_set = set()\n else:\n break\n even_length -= 1\n print(odd_length, even_length)\n print(odd_set, even_set)\n if odd_set and odd_length > 0 and odd_length * 2 + 1 >= even_length * 2 + 2:\n index = odd_set.pop()\n return s[index - odd_length:index + odd_length + 1]\n elif even_set:\n index = even_set.pop()\n return s[index - even_length:index + even_length + 2]\n else:\n return s[0]\n","repo_name":"michaelhuo/pcp","sub_path":"5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"32939487761","text":"import sys\n\ninput = sys.stdin.readline\n\nn = int(input())\nbuses = list(map(int, input().split()))\n\nanswer = 0\n\n# a3 < a1 < a2\nfor i in range(n-2):\n # a1이 1이 아닐 때\n if buses[i] != 1:\n # a1 뒤에서\n for j in range(i+1, n-i):\n # a1보다 큰 a2를 찾았을 때\n if buses[i] < buses[j]:\n # a1보다 작은 a3 후보군과 a2 뒤의 숫자들을 비교하여 개수 카운트\n # 1번 방법\n answer += len(set(range(1, buses[i])) & set(buses[j+1:]))\n# # for k in range(1, buses[i]):\n# # 2번 방법\n# if k in buses[j+1:]:\n# answer += 1\n# # 3번 방법\n# answer += buses[j+1:].count(k)\n\nprint(answer)","repo_name":"cascadeffect/coding-test","sub_path":"Softeer/commute_bus.py","file_name":"commute_bus.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"74468653690","text":"#!usr/bin/python3\n\n\nimport requests,time\nfrom bs4 import BeautifulSoup\n\n\"\"\"\n http://www.woyaogexing.com/ 网站的头像图片爬取.\n\n\"\"\"\n\n\n\ndef icon_spiders(url,num):\n #主程序\n r=requests.get(url)\n if r.status_code == 200:\n r.encoding = \"utf8\"\n soup = BeautifulSoup(r.text,\"lxml\")\n img = soup.find_all(\"img\",{\"class\":\"lazy\"})\n for i in img:\n r = requests.get(i[\"src\"])\n if r.status_code == 200:\n with open(\"img/\"+str(num)+\".jpg\",\"wb\") as f:\n f.write(r.content)\n time.sleep(0.5)\n num += 1\n\n return num\n\n\nif __name__ == \"__main__\":\n num = 0\n for i in range(2,1000):\n try:\n url = \"http://www.woyaogexing.com/touxiang/index_%d.html\"%i\n num = icon_spiders(url,num)\n except:\n num += 1\n continue\n\n","repo_name":"lkk09/icon_spiders","sub_path":"icon_spiders.py","file_name":"icon_spiders.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"39078969044","text":"import sys\n\ndef t5paths(size=\"all\"):\n sizes = [\"small\", \"base\", \"large\", \"xl\", \"xxl\"] \n if size not in sizes and size != \"all\":\n print(\"Not a valid model size. Valid sizes are:\"+sizes)\n sys.exit()\n\n paths = []\n for s in sizes:\n if s==size or size==\"all\":\n #if size == \"base\":\n # private = False\n #else:\n private = True\n\n n = \"byt5_\"+s+\"_NCC\"\n p = \"gs://north-t5x/pretrained_models/\"+s+\"/norwegian_NCC_plus_English_byt5x_\"+s+\"/\"\n c = p+\"checkpoint_1500000\"\n if s!=\"xl\" and s!=\"xxl\":\n paths.append({\"name\":n,\"path\":p,\"checkpoint\":c,\"private\":private,\"size\":s})\n \n n = \"t5_\"+s+\"_scand\"\n p = \"gs://north-t5x/pretrained_models/\"+s+\"/scandinavian_t5x_\"+s+\"/\"\n c = p+\"checkpoint_1700000\"\n if s!=\"xl\" and s!=\"xxl\":\n paths.append({\"name\":n,\"path\":p,\"checkpoint\":c,\"private\":private,\"size\":s})\n \n n = \"t5_\"+s+\"_NCC_lm\"\n p = \"gs://north-t5x/pretrained_models/\"+s+\"/norwegian_NCC_plus_English_pluss100k_lm_t5x_\"+s+\"/\"\n c = p+\"checkpoint_1600000\"\n paths.append({\"name\":n,\"path\":p,\"checkpoint\":c,\"private\":False,\"size\":s})\n \n n = \"t5_\"+s+\"_NCC_modern_lm\"\n p = \"gs://north-t5x/pretrained_models/\"+s+\"/norwegian_NCC_plus_English_pluss200k_balanced_bokmaal_nynorsk_pluss100k_lm_t5x_\"+s+\"/\"\n c = p+\"checkpoint_1800000\"\n if s!=\"xxl\":\n paths.append({\"name\":n,\"path\":p,\"checkpoint\":c,\"private\":private,\"size\":s})\n \n n = \"t5_\"+s+\"_NCC_modern\"\n p = \"gs://north-t5x/pretrained_models/\"+s+\"/norwegian_NCC_plus_English_pluss200k_balanced_bokmaal_nynorsk_t5x_\"+s+\"/\"\n c = p+\"checkpoint_1700000\"\n if s!=\"xxl\":\n paths.append({\"name\":n,\"path\":p,\"checkpoint\":c,\"private\":private,\"size\":s})\n \n n = \"t5_\"+s+\"_NCC_scand\"\n p = \"gs://north-t5x/pretrained_models/\"+s+\"/norwegian_NCC_plus_English_pluss200k_scandinavian_t5x_\"+s+\"/\"\n c = p+\"checkpoint_1700000\"\n if s!=\"xxl\":\n paths.append({\"name\":n,\"path\":p,\"checkpoint\":c,\"private\":private,\"size\":s})\n \n n = \"t5_\"+s+\"_scand3M\"\n p = \"gs://north-t5x/pretrained_models/\"+s+\"/scandinavian3k_t5x_\"+s+\"/\"\n c = p+\"checkpoint_3000000\"\n if s!=\"xxl\" and s!=\"small\":\n paths.append({\"name\":n,\"path\":p,\"checkpoint\":c,\"private\":private,\"size\":s})\n \n n = \"t5_\"+s+\"_NCC\"\n p = \"gs://north-t5x/pretrained_models/\"+s+\"/norwegian_NCC_plus_English_t5x_\"+s+\"/\"\n c = p+\"checkpoint_1500000\"\n paths.append({\"name\":n,\"path\":p,\"checkpoint\":c,\"private\":False,\"size\":s})\n\n return paths\n\ndef create_index_table(target):\n mdict = dict()\n model = dict()\n \n for m in t5paths():\n mdict[m['name']] ={\"size\":m['size'], 'path': m['path'], 'private': m['private']}\n\n show_private = mdict[target]['private']\n\n sizes=['small','base','large','xl','xxl']\n types=['t5_##_NCC','t5_##_NCC_lm','t5_##_NCC_modern','t5_##_NCC_modern_lm','t5_##_NCC_scand','t5_##_scand','byt5_##_NCC','t5_##_scand3M']\n table=\"| |**Small** _60M_|**Base** _220M_|**Large** _770M_|**XL** _3B_|**XXL** _11B_|\\n|:-----------|:------------:|:------------:|:------------:|:------------:|:------------:|\\n\"\n\n for t in types:\n row = \"|\"\n for s in sizes:\n model = mdict.get(t.replace('##',s))\n if model:\n if model['private'] == False or show_private == True:\n\n if t.replace('##',s) == target:\n row += \"✔|\"\n \n else:\n if t.replace('##',s) == \"t5_base_scand3M\":\n row+='| [🤗](https://huggingface.co/north/'+t.replace('##',s)+')|'\n else:\n row+='[🤗](https://huggingface.co/north/'+t.replace('##',s)+')|'\n else:\n row+\" ❌|\"\n\n if row.replace(\"|\",\"\").replace(\"-\",\"\") != \"\":\n table+=\"|\"+t.replace(\"_##\",\"\").replace(\"byt5\",\"North-byT5\").replace(\"t5\",\"North-T5\").replace(\"_\",\"‑\")+row+\"|\\n\"\n \n bucket = \"\\n## T5X Checkpoint\\nThe original T5X checkpoint is also available for this model in the [Google Cloud Bucket](\"+mdict[target]['path']+\").\\n\"\n\n return table + bucket\n\n\n","repo_name":"peregilk/north-t5","sub_path":"t5paths.py","file_name":"t5paths.py","file_ext":"py","file_size_in_byte":4559,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"78"}
+{"seq_id":"30868803902","text":"from django.http import HttpResponseRedirect\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom django.template import RequestContext\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.urlresolvers import reverse\nfrom django.views.generic.list_detail import object_list, object_detail\nfrom django.contrib import messages\nfrom core.models import Customer, Product, Sale\nfrom csvimporter.models import CSV\nfrom csvimporter.forms import CSVForm, CSVAssociateForm\n\n# TODO: Make this view class based\ndef prepare_view(request, kwargs):\n if not kwargs.get(\"model\"):\n raise ValueError(\"You haven't specified the model\")\n else:\n kwargs[\"app_label\"] = kwargs[\"model\"]._meta.app_label\n kwargs[\"model_name\"] = kwargs[\"model\"]._meta.module_name\n \"\"\"\n kwargs[\"redirect_url\"] = reverse(\n \"admin:%s_%s_changelist\" % (kwargs[\"app_label\"],\n kwargs[\"model_name\"])\n )\n \"\"\"\n\n kwargs[\"extra_context\"] = {\n \"app_label\": kwargs[\"app_label\"],\n \"model_name\": kwargs[\"model_name\"],\n #\"redirect_url\": kwargs[\"redirect_url\"],\n }\n return kwargs\n\n\n@login_required\ndef csv_list(request, **kwargs):\n kwargs = prepare_view(request, kwargs)\n if not kwargs.get(\"template_name\"):\n kwargs[\"template_name\"] = 'csv_list.html'\n return object_list(request,\n queryset=CSV.objects.all(),\n template_name=kwargs[\"template_name\"],\n template_object_name='csv',\n extra_context=kwargs[\"extra_context\"],\n )\n\n\n@login_required\ndef associate(request, object_id, modelname=\"\", **kwargs):\n if not kwargs.get(\"template_name\"):\n kwargs[\"template_name\"] = 'csv_detail.html'\n if not kwargs.get(\"form_class\"):\n kwargs[\"form_class\"] = CSVAssociateForm\n if not modelname:\n raise ValueError(\n \"A model wasn't specified. This is our fault. Please let us know this happened so we can fix it, thanks.\")\n else:\n kwargs[\"model\"] = eval(modelname)\n\n kwargs = prepare_view(request, kwargs)\n instance = get_object_or_404(CSV, pk=object_id)\n if request.method == 'POST':\n form = kwargs[\"form_class\"](instance, request.POST)\n if form.is_valid():\n form.save(request)\n request.user.message_set.create(message='CSV imported.')\n return HttpResponseRedirect(\"/core/%s\" % (modelname.lower()))\n else:\n messages.info(request, 'Uploaded CSV. Please associate fields below.')\n form = CSVAssociateForm(instance)\n kwargs[\"extra_context\"].update({\"form\": form})\n return object_detail(request,\n queryset=CSV.objects.all(),\n object_id=object_id,\n template_name=kwargs[\"template_name\"],\n template_object_name='csv',\n extra_context=kwargs[\"extra_context\"],\n )\n\n\n@login_required\ndef new(request, **kwargs):\n if not kwargs.get(\"template_name\"):\n kwargs[\"template_name\"] = 'new.html'\n if not kwargs.get(\"form_class\"):\n kwargs[\"form_class\"] = CSVForm\n kwargs = prepare_view(request, kwargs)\n if request.method == 'POST':\n form = kwargs[\"form_class\"](kwargs[\"model\"],\n request.POST, request.FILES)\n if form.is_valid():\n modelname = kwargs[\"model\"].__name__\n instance = form.save()\n return HttpResponseRedirect(\n reverse('associate-csv', args=[instance.id, modelname]))\n else:\n form = kwargs[\"form_class\"](kwargs[\"model\"])\n kwargs[\"extra_context\"].update({\"form\": form})\n kwargs[\"extra_context\"].update({\"csv_import_type\": request.get_full_path().split('/')[3]})\n return render_to_response(kwargs[\"template_name\"],\n kwargs[\"extra_context\"],\n context_instance=RequestContext(request)\n )\n","repo_name":"moeburney/trackpattern","sub_path":"csvimporter/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3856,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"}
+{"seq_id":"36875251103","text":"import gluon.contrib.simplejson\n\ndef index():\n \"\"\"\n example action using the internationalization operator T and flash\n rendered by views/default/index.html or views/generic.html\n\n if you need a simple wiki simply replace the two lines below with:\n return auth.wiki()\n \"\"\"\n response.flash = T(\"Testing the pronto\")\n\n cells=[{\"type\":\"basic.Rect\",\"position\":{\"x\":200,\"y\":30},\"size\":{\"width\":100,\"height\":30},\"angle\":0,\"id\":\"test\",\"z\":1,\"attrs\":{\"rect\":{\"fill\":\"blue\"},\"text\":{\"text\":\"my box\",\"fill\":\"white\"}}}]\n\n cellsjson = gluon.contrib.simplejson.dumps(cells)\n\n return dict(message=T('Hello World'),cellsjson=cellsjson)\n\n\ndef palette():\n #This will setup the basic shapes once we can draw them\n\n response.flash = T(\"Palette\")\n\n cells=[{\"type\":\"basic.Rect\",\"position\":{\"x\":200,\"y\":30},\"size\":{\"width\":100,\"height\":30},\"angle\":0,\"id\":\"test\",\"z\":1,\"attrs\":{\"rect\":{\"fill\":\"blue\"},\"text\":{\"text\":\"my box\",\"fill\":\"white\"}}}]\n\n cellsjson = gluon.contrib.simplejson.dumps(cells)\n return dict(message=T('Hello World'),cellsjson=cellsjson)","repo_name":"DonaldMcC/pronto","sub_path":"controllers/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"8255117150","text":"# Loop through the two list at the same time, comparing current elements from both, add the smaller and non-dup one to final list\n# how to decide it is non-dup: comparing the to-be added smaller element with the last element in final list.\ndef merge_list(l1, l2):\n i = 0\n j = 0\n result = []\n while i < len(l1) and j < len(l2):\n print('l1[{}] is: {}, l2[{}] is : {}'.format(i, l1[i], j, l2[j]))\n print(l1[i] <= l2[j])\n if l1[i] <= l2[j]:\n # if result is empty, directly append.\n if len(result) == 0 or result[-1] != l1[i]:\n result.append(l1[i])\n print(i, result)\n i += 1\n else:\n if len(result) == 0 or result[-1] != l2[j]:\n result.append(l2[j])\n print(j, result)\n j += 1\n \n while i < len(l1):\n if len(result) == 0 or result[-1] != l1[i]:\n result.append(l1[i])\n i += 1\n \n while j < len(l2):\n if len(result) == 0 or result[-1] != l2[j]:\n result.append(l2[j])\n j += 1\n \n return result\n\ndef merge_list1(l1, l2):\n return sorted(set(l1+l2))\nresult = merge_list([1,1,2,4], [2,3,5])\nprint(result)\n","repo_name":"smallfishxz/Practice_Algorithm","sub_path":"List/Merge_SortedLists.py","file_name":"Merge_SortedLists.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"2366873873","text":"from flask import Blueprint, jsonify, request\n\necho = Blueprint('v1_echo', __name__, url_prefix='/v1/echo')\n\n\n@echo.route('/', methods=['GET', 'POST'])\ndef echo_():\n if request.method == 'GET':\n return jsonify({'result': 'ok'})\n\n if request.method == 'POST':\n data = request.json\n return_data = {'result': 'post message = ' + data['msg']}\n return jsonify(return_data)\n","repo_name":"airiest/flask-minimal-app","sub_path":"api/v1/echo.py","file_name":"echo.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"10664063135","text":"import torch\n\nfrom utils.pytorch_util import calculate_iou\nfrom utils.pytorch_util import convert_box_from_hw_to_yx\n\ndevice = 'cuda:0' if torch.cuda.is_available() else 'cpu'\n\n\ndef yolo_pretrain_custom_loss(predict, target):\n losses = -1 * (target * torch.log(predict + 1e-15) + (1 - target) * torch.log(1 - predict + 1e-15))\n batch = losses.shape[0]\n loss = losses.sum() / batch\n\n return loss\n\n\ndef yolov2_custom_loss_1(predict, target, anchor_boxes, num_bbox_predict, num_classes, lambda_coord=5, lambda_noobj=.5):\n \"\"\"\n :param predict: tensor, [batch, height, width, (cy, cx, h, w, p) * num bounding boxes]\n :param target: tensor, [batch, height, width, (cy, cx, h, w, p) * num bounding boxes]\n :param anchor_boxes: tensor, [height, width, (y, x, h, w)]\n :param num_bbox_predict: int\n :param num_classes: int\n :param lambda_coord: float\n :param lambda_noobj: float\n\n :return: float tensor, [float]\n \"\"\"\n NUM_BATCH, H, W = predict.shape[:3]\n\n pred = predict.reshape(NUM_BATCH, -1, 5 + num_classes) # [num batch, h * w * 5(num predict bbox), 5(num coords) + num classes)]\n tar = target.reshape(NUM_BATCH, -1, 5 + num_classes) # [num batch, h * w * 5(num predict bbox), 5(num coords) + num classes)]\n anc = anchor_boxes.reshape(-1, 4)\n\n # obj_responsible_mask = torch.zeros(NUM_BATCH, H * W, 5).to(device)\n\n # for i in range(num_bbox_predict):\n # obj_responsible_mask[:, :, :, i] = target[:, :, :, 4]\n\n # Get responsible masks\n pred_bboxes = pred[:, :, :4]\n pred_probs = pred[:, :, 4]\n\n tar_bboxes = tar[:, :, :4]\n tar_probs = tar[:, :, 4] # [num batch, h * w * 5(num predict bbox)]\n\n # pred_y1 = pred_bboxes[:, :, 0] - .5 * pred_bboxes[:, :, 2]\n # pred_x1 = pred_bboxes[:, :, 1] - .5 * pred_bboxes[:, :, 3]\n # pred_y2 = pred_bboxes[:, :, 0] + pred_bboxes[:, :, 2] * anc[:, :, 2]\n # pred_x2 = pred_bboxes[:, :, 1] + pred_bboxes[:, :, 3] * anc[:, :, 3]\n pred_y = pred_bboxes[:, :, 0] + anc[:, 0]\n pred_x = pred_bboxes[:, :, 1] + anc[:, 1]\n pred_h = pred_bboxes[:, :, 2] * anc[:, 2]\n pred_w = pred_bboxes[:, :, 3] * anc[:, 3]\n\n pred_y1 = pred_y - .5 * pred_h\n pred_x1 = pred_x - .5 * pred_w\n pred_y2 = pred_y + pred_h\n pred_x2 = pred_x + pred_w\n\n pred_bboxes = torch.cat([pred_y1.unsqueeze(2), pred_x1.unsqueeze(2), pred_y2.unsqueeze(2), pred_x2.unsqueeze(2)], dim=2)\n\n tar_y = tar_bboxes[:, :, 0] + anc[:, 0]\n tar_x = tar_bboxes[:, :, 1] + anc[:, 1]\n tar_h = tar_bboxes[:, :, 2] * anc[:, 2]\n tar_w = tar_bboxes[:, :, 3] * anc[:, 3]\n\n # tar_y1 = tar_bboxes[:, :, 0] - .5 * tar_bboxes[:, :, 2]\n # tar_x1 = tar_bboxes[:, :, 1] - .5 * tar_bboxes[:, :, 3]\n # tar_y2 = tar_bboxes[:, :, 0] + tar_bboxes[:, :, 2] * anc[:, 2]\n # tar_x2 = tar_bboxes[:, :, 1] + tar_bboxes[:, :, 3] * anc[:, 3]\n\n tar_y1 = tar_y - .5 * tar_h\n tar_x1 = tar_x - .5 * tar_w\n tar_y2 = tar_y + tar_h\n tar_x2 = tar_x + tar_w\n\n tar_bboxes = torch.cat([tar_y1.unsqueeze(2), tar_x1.unsqueeze(2), tar_y2.unsqueeze(2), tar_x2.unsqueeze(2)], dim=2)\n\n # for idx1 in range(NUM_BATCH):\n # for idx2 in range(13 * 13 * 5):\n # if tar[idx1, idx2, 0] != .5:\n # print(f'{idx1 + 1} {pred[idx1, idx2, :5].detach().cpu().numpy()}, {tar[idx1, idx2, :5].detach().cpu().numpy()}')\n\n ########## Original (start) ##########\n indices_valid = torch.where(tar_probs > 0)\n # pred_bboxes_valid = pred_bboxes[indices_valid].reshape(NUM_BATCH, -1, 4)\n # tar_bboxes_valid = tar_bboxes[indices_valid].reshape(NUM_BATCH, -1, 4)\n pred_bboxes_valid = pred_bboxes[indices_valid].reshape(-1, 4)\n tar_bboxes_valid = tar_bboxes[indices_valid].reshape(-1, 4)\n\n ious_valid = calculate_iou(pred_bboxes_valid, tar_bboxes_valid, dim=1).reshape(-1)\n\n ious = torch.zeros(NUM_BATCH, H * W * 5).to(device) # [num batch, h * w * 5(num predict bbox)]\n ious[indices_valid] = ious_valid\n ious = ious.reshape(NUM_BATCH, H * W, 5)\n ########## Original (end) ##########\n ########## Changed (start) ##########\n ious = calculate_iou(pred_bboxes, tar_bboxes, dim=2).reshape(NUM_BATCH, -1, 5)\n ########## Changed (end) ##########\n\n\n # ious_temp = ious.reshape(NUM_BATCH, -1)\n # for idx1 in range(NUM_BATCH):\n # for idx2 in range(13 * 13 * 5):\n # if tar_probs[idx1, idx2].detach().cpu().numpy() > 0:\n # print('[{}] {}'.format(idx1, ious_temp[idx1, idx2].detach().cpu().numpy()))\n\n indices_argmax_ious = torch.argmax(ious, dim=2)\n idx1 = []\n for i in range(indices_argmax_ious.shape[0]):\n idx1 += [i for _ in range(indices_argmax_ious.shape[1])]\n\n idx2 = []\n for i in range(indices_argmax_ious.shape[0]):\n idx2 += [j for j in range(indices_argmax_ious.shape[1])]\n\n idx3 = indices_argmax_ious.reshape(-1).squeeze()\n\n obj_responsible_mask = torch.zeros(NUM_BATCH, H * W, 5).to(device) # [num batch, h * w, 5(num predict bbox)]\n obj_responsible_mask[idx1, idx2, idx3] = 1\n ########## Added (start) ########## - 2021.03.03\n obj_responsible_mask *= tar_probs.reshape(NUM_BATCH, -1, 5)\n ########## Added (end) ########## - 2021.03.03\n # obj_responsible_mask[indices_valid] = 1\n obj_responsible_mask = obj_responsible_mask.reshape(NUM_BATCH, -1, 5)\n\n # for i in range(NUM_BATCH):\n # for j in range(13 * 13):\n # if 1 in obj_responsible_mask[i, j]:\n # print('[{}] ({}) {}'.format(i, j, obj_responsible_mask[i, j]))\n\n ########## Original (start) ########## - 2021.03.02\n # no_obj_responsible_mask = torch.zeros(NUM_BATCH, H * W, 5).to(device)\n # no_obj_responsible_mask[indices_argmax_ious[:-1]] = 1\n # no_obj_responsible_mask[indices_argmax_ious] = 0\n ########## Original (end) ########## - 2021.03.02\n ########## Changed (start) ########## - 2021.03.02\n no_obj_responsible_mask = 1 - obj_responsible_mask\n ########## Changed (end) ########## - 2021.03.02\n\n # Get coordinate loss(1)\n loss_coord = torch.square(pred[:, :, 0] - tar[:, :, 0]) + \\\n torch.square(pred[:, :, 1] - tar[:, :, 1]) + \\\n torch.square(torch.sqrt(pred[:, :, 2]) - torch.sqrt(tar[:, :, 2])) + \\\n torch.square(torch.sqrt(pred[:, :, 3]) - torch.sqrt(tar[:, :, 3]))\n loss_coord *= lambda_coord * obj_responsible_mask.reshape(NUM_BATCH, -1)\n\n # for i in range(pred.shape[1]):\n # if tar[0, i, 4] == 1:\n # print(pred[0, i, 4], tar[0, i, 4], ious.reshape(NUM_BATCH, -1)[0, i])\n\n # Get confidence loss(2)\n loss_confidence = obj_responsible_mask.reshape(NUM_BATCH, -1) * torch.square(pred[:, :, 4] - tar[:, :, 4] * ious.reshape(NUM_BATCH, -1)) + \\\n lambda_noobj * no_obj_responsible_mask.reshape(NUM_BATCH, -1) * \\\n torch.square(pred[:, :, 4] - tar[:, :, 4])\n\n # for idx1 in range(NUM_BATCH):\n # for idx2 in range(13 * 13 * 5):\n # if obj_responsible_mask.reshape(NUM_BATCH, -1)[idx1, idx2] == 1:\n # print(f'PRED: {pred[idx1, idx2, 4]}, TAR: {tar[idx1, idx2, 4] * ious.reshape(NUM_BATCH, -1)[idx1, idx2]}')\n\n # ious_temp = ious.reshape(NUM_BATCH, -1)\n # obj_mask_temp = obj_responsible_mask.reshape(NUM_BATCH, -1)\n # no_obj_mask_temp = no_obj_responsible_mask.reshape(NUM_BATCH, -1)\n # for i in range(NUM_BATCH):\n # for j in range(13 * 13 * 5):\n # if obj_mask_temp[i, j] == 1:\n # print('[{}] {:.5f} {} {:.5f}'.format(i + 1, pred[i, j, 4].detach().cpu().item(), tar[i, j, 4].detach().cpu().item(), ious_temp[i, j].item()))\n # if no_obj_mask_temp[i, j] == 1:\n # print('{:.5f} {} / {} {}'.format(\n # pred[i, j, 4].detach().cpu().item(), tar[i, j, 4].detach().cpu().item(), obj_mask_temp[i, j], no_obj_mask_temp[i, j]))\n\n\n # Get class loss(3)\n loss_class = torch.square(pred[:, :, 5:] - tar[:, :, 5:])\n ########## Original (start) ########## - 2021.03.02\n # loss_class = loss_class.reshape(NUM_BATCH, H * W, -1)\n ########## Original (end) ########## - 2021.03.02\n loss_class = torch.sum(loss_class, dim=2)\n ########## Original (start) ########## - 2021.03.02\n # loss_class *= responsible_mask.reshape(NUM_BATCH, -1)\n ########## Original (end) ########## - 2021.03.02\n ########## Changed (start) ########## - 2021.03.02\n loss_class *= obj_responsible_mask.reshape(NUM_BATCH, -1)\n ########## Changed (end) ########## - 2021.03.02\n\n # for i in range(NUM_BATCH):\n # for j in range(13 * 13):\n # for m in range(5):\n # if loss_class.reshape(NUM_BATCH, -1, 5)[i, j, m] != 0:\n # print(i, j, m, loss_class.reshape(NUM_BATCH, -1, 5)[i, j, m])\n\n # Sum up all the losses\n loss_coord = loss_coord.sum() / NUM_BATCH\n loss_confidence = loss_confidence.sum() / NUM_BATCH\n loss_class = loss_class.sum() / NUM_BATCH\n loss = loss_coord + loss_confidence + loss_class\n\n # if loss.detach().cpu().item() > 1000:\n # print('bbox : ', tar_bboxes_valid)\n # print('probs : ', tar_probs)\n\n return loss, loss_coord, loss_confidence, loss_class\n\n\ndef yolov2_custom_loss_2(predict, target, anchor_boxes, num_bbox_predict, num_classes, lambda_coord=5, lambda_noobj=.5):\n \"\"\"\n :param predict: tensor, [batch, height, width, (cy, cx, h, w, p) * num bounding boxes]\n :param target: tensor, [batch, height, width, (cy, cx, h, w, p) * num bounding boxes]\n :param num_bbox_predict: int\n :param num_classes: int\n :param lambda_coord: float\n :param lambda_noobj: float\n\n :return: float tensor, [float]\n \"\"\"\n\n h, w = predict.shape[1:3]\n\n coord_loss = torch.zeros(1).to(device)\n confidence_loss = torch.zeros(1).to(device)\n class_loss = torch.zeros(1).to(device)\n\n n_batch = predict.shape[0]\n for b in range(n_batch):\n obj_responsible_mask = torch.zeros(h, w, num_bbox_predict).to(device)\n no_obj_responsible_mask = torch.zeros(h, w, num_bbox_predict).to(device)\n\n # Get responsible box masks\n for i in range(num_bbox_predict):\n obj_responsible_mask[:, :, i] = target[b, :, :, (5 + num_classes) * i + 4]\n no_obj_responsible_mask[:, :, i] = target[b, :, :, (5 + num_classes) * i + 4]\n\n for s1 in range(7):\n for s2 in range(7):\n if obj_responsible_mask[s1, s2, 0] == 1:\n ious = torch.zeros(num_bbox_predict)\n\n for n in range(num_bbox_predict):\n box_temp = convert_box_from_hw_to_yx(predict[b, s1, s2, (5 + num_classes) * n:(5 + num_classes) * n + 4]).to(device)\n gt = target[b, s1, s2, :4]\n ious[n] = calculate_iou(box_temp, gt)\n\n idx_max_iou = ious.argmax().item()\n\n for n in range(num_bbox_predict):\n if n != idx_max_iou:\n obj_responsible_mask[s1, s2, n] = 0\n else:\n no_obj_responsible_mask[s1, s2, n] = 0\n\n responsible_mask = torch.zeros(h, w).to(device)\n for n in range(num_bbox_predict):\n responsible_mask += obj_responsible_mask[:, :, n]\n\n # Calculate losses\n coord_loss_batch = torch.zeros(1).to(device)\n confidence_loss_batch = torch.zeros(1).to(device)\n class_loss_batch = torch.zeros(1).to(device)\n\n for i in range(num_bbox_predict):\n # Coordinate loss\n coord_losses_temp = torch.square(predict[b, :, :, (5 + num_classes) * i] - target[b, :, :, (5 + num_classes) * i]) \\\n + torch.square(predict[b, :, :, (5 + num_classes) * i + 1] - target[b, :, :, (5 + num_classes) * i + 1]) \\\n + torch.square(torch.sqrt(predict[b, :, :, (5 + num_classes) * i + 2]) - torch.sqrt(target[b, :, :, (5 + num_classes) * i + 2])) \\\n + torch.square(torch.sqrt(predict[b, :, :, (5 + num_classes) * i + 3]) - torch.sqrt(target[b, :, :, (5 + num_classes) * i + 3]))\n coord_losses_temp *= obj_responsible_mask[:, :, i]\n coord_loss_batch += coord_losses_temp.sum()\n\n # Confidence loss\n confidence_losses_temp = torch.square(predict[b, :, :, (5 + num_classes) * i + 4] - target[b, :, :, (5 + num_classes) * i + 4])\n confidence_loss_batch += (confidence_losses_temp * obj_responsible_mask[:, :, i] \\\n + lambda_noobj * confidence_losses_temp * no_obj_responsible_mask[:, :, i]).sum()\n\n # Class loss\n class_losses_temp = torch.square(predict[b, :, :, (5 + num_classes) * i + 5:(5 + num_classes) * (i + 1)] -\n target[b, :, :, (5 + num_classes) * i + 5:(5 + num_classes) * (i + 1)]).sum(dim=2)\n class_loss_batch += (responsible_mask * class_losses_temp).sum()\n\n coord_loss += coord_loss_batch\n confidence_loss += confidence_loss_batch\n class_loss += class_loss_batch\n\n loss = (coord_loss + confidence_loss + class_loss) / n_batch\n # print(coord_loss.detach().cpu().item(), confidence_loss.detach().cpu().item(), class_loss.detach().cpu().item())\n\n return loss, coord_loss / n_batch, confidence_loss / n_batch, class_loss / n_batch\n\n","repo_name":"tjwldnjss13/yolov2-mobile-pytorch","sub_path":"loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":13336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"70249163771","text":"from django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.views import logout_then_login\nfrom django.shortcuts import render\n\nfrom .forms import CarburantForm, EntretientForm, CarburantTraitement\nfrom .models import Carburant, Entretient, TraitementCarburant\n\n\n# Create your views here.\n\n\ndef welcome(request):\n template_name='client.html'\n return render(request , template_name )\n\n@login_required\ndef welcome_admin(request):\n template_name='index.html'\n nb_carb=Carburant.objects.all().filter(traite=False).count()\n nb_carbt=Carburant.objects.all().filter(traite=True).count()\n nb_entr=Entretient.objects.all().filter(traite=False).count()\n nb_entrt=Entretient.objects.all().filter(traite=True).count()\n context = {'nb_carb': nb_carb, 'nb_carbt': nb_carbt, 'nb_entr': nb_entr, 'nb_entrt':nb_entrt}\n return render(request , template_name, context )\n\ndef login(request):\n template_name='login.html'\n return render(request , template_name )\n\ndef forgot_psw(request):\n template_name='forgot-password.html'\n return render(request , template_name )\n\n@login_required\ndef rapport_mensuel(request):\n template_name='charts.html'\n return render(request , template_name )\n\ndef error404(request):\n template_name='404.html'\n return render(request , template_name )\n\n@login_required\ndef carburant_affiche(request):\n query_results=Carburant.objects.all().filter(traite=False)\n template_name='tables2.html'\n context={\"query_results\":query_results}\n return render(request , template_name ,context)\n\n@login_required\ndef carburant_traffiche(request):\n query_results=Carburant.objects.all().filter(traite=True)\n template_name='tables2.html'\n context={\"query_results\":query_results}\n return render(request , template_name ,context)\n\n@login_required\ndef entretient_affiche(request):\n query_results=Entretient.objects.all().filter(traite=False)\n template_name='tables.html'\n context={\"query_results\":query_results}\n return render(request , template_name ,context)\n\n@login_required\ndef entretient_traffiche(request):\n query_results=Entretient.objects.all().filter(traite=True)\n template_name='tables.html'\n context={\"query_results\":query_results}\n return render(request , template_name ,context)\n\n@login_required\ndef carburant_traitement(request, id):\n form = CarburantTraitement(request.POST or None)\n result= Carburant.objects.get(id=id)\n if form.is_valid():\n obj = TraitementCarburant.objects.create(**form.cleaned_data)\n obj.save()\n form = CarburantTraitement()\n #print('data valid')\n else:\n print('data is not valid')\n context = {'form': form, 'result':result}\n template_name = 'traitement-Carburant.html'\n return render(request, template_name, context)\n\ndef carburant_save(request):\n form = CarburantForm(request.POST or None)\n if form.is_valid():\n obj=Carburant.objects.create(** form.cleaned_data)\n obj.save()\n form = CarburantForm()\n print('data valid')\n else: print('data is not valid')\n context={'form': form}\n template_name = 'carburant.html'\n return render(request, template_name, context)\n\n\ndef entretient_save(request):\n form = EntretientForm(request.POST or None)\n if form.is_valid():\n obj = Entretient.objects.create(**form.cleaned_data)\n obj.save()\n form = EntretientForm()\n print('data valid')\n else:\n print('data is not valid')\n context = {'form': form}\n template_name = 'entretient.html'\n return render(request, template_name, context)\n\ndef login_view(request):\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(request, username=username, password=password)\n if user is not None:\n template_name='index.html'\n login(request, user)\n return render(request,template_name)\n else:\n print('login none ')\n return 0\n\ndef logout_view(request):\n logout(request)\n template_name='login.html'\n return render(request, template_name)\n\n\ndef logoutTlogin(request):\n return logout_then_login(request, login_url='/login')\n","repo_name":"Hadjer711/Demande-carburant-entretient-pour-v-hicule-de-service-","sub_path":"demande/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4256,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"}
+{"seq_id":"15943158612","text":"import requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport re\nimport logging\n\ndef scrape_table_and_footnotes(url = 'https://cdn-dev.economistdatateam.com/jobs/pds/code-test/index.html'):\n \"\"\"Scrape html table and footnotes\n\n Parameters:\n url (str) : url of website containing polling data\n\n Returns:\n df_rawdata (DataFrame) : contains content of html table as strings\n footnotes (dict) : dict of footnotes (keys = markers, value = footnote text) \n \"\"\"\n\n response = requests.get(url)\n soup = BeautifulSoup(response.content, \"html.parser\")\n\n # table with poll data\n table = soup.find(\"table\")\n\n # table headers\n names_cols = [col.text.strip() for col in table.find_all(\"th\")]\n \n # loop over all the rows in the table and store in df\n rawdata = []\n for row in table.find_all(\"tr\"):\n cells = row.find_all(\"td\")\n rawdata.append({names_cols[i]: cells[i].text.strip() for i in range(len(names_cols))})\n df_rawdata = pd.DataFrame(rawdata)\n\n # footnotes as a dict\n tmp = soup.find('ul', id='notes').find_all(\"li\")\n footnotes = {}\n for li in tmp:\n footnotes[li['data-mark']] = li.text.replace('\\n', ' ').strip()\n \n return df_rawdata, footnotes\n\ndef parse_data(df_rawdata, names_candidates_and_others, footnotes, lims_sum_shares = [0.985, 1.015]):\n \"\"\"Parse poll results from html table\n\n Parameters:\n df_rawdata (DataFrame) : content of html table formatted as strings\n names_candidates_and_others (list): List of candidates in the election, incl. 'Others'\n footnotes (list) : footnotes to be removed\n lims_sum_shares (list) : lower and upper limit for sum of vote shares to determine if poll should be removed\n\n Returns:\n df_polls (DataFrame) : contains date of poll, pollster, sample size, vote shares in different formats (e.g. datetime, floats)\n \"\"\"\n \n df_polls = df_rawdata.copy()\n\n # remove footnotes\n for f in footnotes.keys():\n for col in df_polls.columns:\n # according to docu default is False, but without explicitly setting it to False encountered regex error!\n df_polls[col] = df_polls[col].str.replace(f, '', regex=False) \n\n # convert string columns to appropriate types\n df_polls['Date'] = pd.to_datetime(df_polls['Date'])\n\n df_polls['Sample'] = pd.to_numeric(df_polls['Sample'].str.replace(',', '')).astype('Int64') # replacing , if possible; float to int\n\n pat = re.compile(r\"[0-9\\.,]+\")\n\n for col in df_polls.columns:\n if col in names_candidates_and_others:\n # convert vote shares to numeric, removing any non-numeric characters except ',' or '.\n df_polls[col] = pd.to_numeric(df_polls[col].str.findall(pat).str.join(''), errors = 'coerce') / 100.0\n\n # remove polls that could not be parsed \n all_na = df_polls.loc[:, names_candidates_and_others].isna().all(axis=1)\n if sum(all_na) > 0:\n logging.warning('Excluded {} poll(s) because vote shares could not be converted to floats'.format(all_na.sum()))\n df_polls = df_polls.loc[~all_na, :]\n\n # remove polls whose vote shares differs from 1 by more than a given margin\n sum_shares = df_polls.loc[:, names_candidates_and_others].sum(axis=1)\n drop_row = (sum_shares < lims_sum_shares[0]) | (sum_shares > lims_sum_shares[1])\n if sum(drop_row) > 0:\n logging.warning('Excluded {} poll(s) because the sum of vote shares was smaller (larger) than {} ({}).'.format(drop_row.sum(), lims_sum_shares[0], lims_sum_shares[1]))\n df_polls = df_polls.loc[~drop_row, :]\n\n return df_polls\n\ndef calculate_trends(df_polls, \n names_candidates, \n k_days = '7D', \n method_interpolate = 'linear'):\n \"\"\"Calculate trend vote shares based on poll results\n\n Parameters:\n df_polls (DataFrame) : contains poll results\n names_candidates (list) : List of candidates in the election\n k_days (str) : rolling average window in days\n method_interpolate (str): method for interpolation of missing values\n\n Returns:\n df_trends (DataFrame) : contains trend vote shares (columns) over time (rows)\n\n \"\"\"\n # resample df_polls to daily frequency taking the mean over days\n df_trends = df_polls.set_index('Date').resample('D').mean()\n\n # 'Sample' is not needed for the trend calculations\n df_trends = df_trends.drop(columns=['Sample'])\n\n # interpolate missing values\n if method_interpolate == 'linear':\n df_trends = df_trends.interpolate(method='linear', limit_direction='both')\n else:\n raise ValueError('method_interpolate must be linear')\n \n # calculate k_days rolling average\n df_trends = df_trends.rolling(window=k_days, on = df_trends.index).mean()\n\n # overwrite trend of Others with 1 - sum of trends of all candidates (if Others is in df_trends)\n if 'Others' in df_trends.columns:\n df_trends['Others'] = 1 - df_trends.loc[:, names_candidates].sum(axis=1, skipna=False)\n\n return df_trends\n\ndef export_dfs_to_csv(df_polls, df_trends):\n \"\"\"Export dataframes to csv\"\"\"\n\n # bring columns in line with the example files\n df_polls = df_polls.rename(columns={'Date': 'date', 'Pollster': 'pollster', 'Sample': 'n'})\n df_trends.index.name = 'date'\n\n # write to csv\n df_polls.to_csv('./polls.csv', index=False)\n df_trends.to_csv('./trends.csv', index=True) # date is index!\n\n","repo_name":"philippotto-hauber/poll-tracker-assignment","sub_path":"tools_poll_tracker.py","file_name":"tools_poll_tracker.py","file_ext":"py","file_size_in_byte":5442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"9412769284","text":"import imp\nfrom updater.config import webhook_config\nfrom . import app\nfrom . import config\nfrom flask import request, abort\ntry:\n from . import kube\nexcept ImportError:\n kube = None\nimport json\n\n\n@app.route('//', methods=['POST'])\ndef index(webhook_name, webhook_key):\n if config.webhooks[webhook_name] is None:\n print(\"Invalid wehbook name\")\n abort(403)\n wh_config: config.WebhookConfig = config.webhooks[webhook_name]\n if not wh_config.is_key_valid(webhook_key):\n print(f\"Invalid webhook key: {webhook_key}\")\n print(f\"Expected key: {wh_config._raw_config['key']}\")\n abort(403)\n rq_json = request.get_json()\n if rq_json is None:\n print(\"Invalid JSON payload\")\n abort(403)\n if \"push_data\" not in rq_json:\n print(\"No push_data in payload\")\n abort(403)\n if \"tag\" not in rq_json[\"push_data\"]:\n print(\"No tag found in push_data\")\n abort(403)\n if rq_json[\"push_data\"][\"tag\"] != wh_config.cluster_tag:\n return \"That's cool and all but I don't care\"\n if kube is not None:\n namespace = wh_config.cluster_namespace\n label = wh_config.cluster_deployment_label\n return f\"{kube.restart_deployment(namespace, label)}\"\n return \"Pogging\"","repo_name":"bridgecrew-perf6/Deployment-Updater","sub_path":"updater/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"72835889853","text":"import time\nfrom ..utils.log import log, INFO, ERROR, PASS\nfrom ..utils.i_selenium import assert_tab\nfrom ..utils.i_selenium import wait_for_xpath_element\nfrom ..tests import TestWithDependency\nfrom selenium.common.exceptions import TimeoutException, NoSuchElementException\n\n__all__ = [\"admin_stats_analytics\"]\n\n\n#####\n# Test : Admin Stats Analytics Page\n#####\n@TestWithDependency(\"ADMIN_STATS_ANALYTICS\", [\"ADMIN_STATS_SUMMARY\"])\ndef admin_stats_analytics(driver, ISAAC_WEB, WAIT_DUR, **kwargs):\n \"\"\"Test if admin stats analyitcs page works.\n\n - 'driver' should be a Selenium WebDriver.\n - 'ISAAC_WEB' is the string URL of the Isaac website to be tested.\n - 'WAIT_DUR' is the time in seconds to wait for JavaScript to run/load.\n \"\"\"\n assert_tab(driver, ISAAC_WEB + \"/admin/stats\")\n time.sleep(WAIT_DUR)\n try:\n analytics_button = driver.find_element_by_xpath(\"//a[@ui-sref='adminStats.isaacAnalytics']\")\n analytics_button.click()\n log(INFO, \"Clicked 'View Analytics' button.\")\n wait_for_xpath_element(driver, \"//h2[contains(text(), 'Last user locations')]\")\n except NoSuchElementException:\n log(ERROR, \"Can't find 'Analytics' button; can't continue!\")\n return False\n except TimeoutException:\n log(ERROR, \"Analytics page didn't load after clicking button; can't continue!\")\n return False\n\n try:\n locations_button = driver.find_element_by_xpath(\"//a[@ng-click='getLocationData()']\")\n locations_button.click()\n log(INFO, \"Click 'Generate Location Data' button.\")\n wait_for_xpath_element(driver, \"//div[@class='angular-google-map']\", 60)\n log(INFO, \"Google Map of location data loaded successfully.\")\n except TimeoutException:\n log(ERROR, \"Google Map didn't load!\")\n # return False # Is this really a fatal error; probably not!\n except NoSuchElementException:\n log(ERROR, \"Can't find 'Generate Location Data' button; can't continue testing!\")\n return False\n\n try:\n answer_graph_button = driver.find_element_by_xpath(\"//div[@ng-show='editingGraph']//li/label[text()='ANSWER_QUESTION']/../input\")\n answer_graph_button.click()\n log(INFO, \"Added 'ANSWER_QUESTION' events to graph.\")\n graph_button = driver.find_elements_by_xpath(\"//a[@ng-click='updateGraph()']\")[0]\n graph_button.click()\n log(INFO, \"Clicked to generate graph.\")\n wait_for_xpath_element(driver, \"//div[@ng-show='questionsAnsweredOverTime']/div[@data='questionsAnsweredOverTime']\", 25)\n log(INFO, \"A graph was shown as expected.\")\n except NoSuchElementException:\n log(ERROR, \"Can't find graph tickbox for 'ANSWER_QUESTION' events; can't continue!\")\n return False\n except IndexError:\n log(ERROR, \"Can't find 'Update Graph' button; can't continue!\")\n return False\n except TimeoutException:\n log(ERROR, \"Graph didn't load after clicking 'Update Graph' button; can't continue!\")\n return False\n\n log(PASS, \"Admin Stats Analytics page contains info as expected.\")\n return True\n","repo_name":"jsharkey13/isaac-selenium-testing","sub_path":"isaactest/tests/admin_stats_analytics.py","file_name":"admin_stats_analytics.py","file_ext":"py","file_size_in_byte":3115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"12667050581","text":"import websockets\n# import socket\nimport asyncio\n# import warnings\nimport struct\nimport UGV.Packet as packet\nimport UGV.Node as node\nimport time\n\nPORT = 1234\n#url = \"192.168.244.243\"\n# url = \"localhost\"\nurl = \"192.168.0.104\"\n\n# # Example path packet data\n# x = b'15.10100'\n# y = b'34.35000'\n# ts_ms = b'10506789'\n# v = b'2.300000'\n# heading = b'19.12345'\n# path = b'2' + x + y + ts_ms + v + heading\n\n# Example path packet data\nx = '15.10100'\ny = '34.35000'\nv = '2.300000'\nheading = '19.12345'\nts_ms = '10506789'\n\ntest = '5'\n\npath = x + y + ts_ms + v + heading\npath2 = '5' + x + y\n\nasync def start_network():\n async with websockets.serve(handler, url, PORT):\n # print(type(path))\n # print(path)\n await asyncio.Future() # run forever\n\n\n# create handler for each connection\nasync def handler(websocket):\n while(1):\n await websocket.send(path2.encode())\n data = await websocket.recv()\n print(data.decode())\n await asyncio.sleep(3)\n # reply = f\"Data recieved as: {data}!\"\n\n # await websocket.send(path2)\n # await websocket.send(test.encode())\n # await asyncio.sleep(5)\n\n\n\n# start_server = websockets.serve(handler, url, PORT)\n# asyncio.get_event_loop().run_until_complete(start_server)\n# asyncio.get_event_loop().run_forever()\nasyncio.run(start_network())\n","repo_name":"40I6-Capstone/Server-Backend","sub_path":"Test_server.py","file_name":"Test_server.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"17923302575","text":"import uuid\n\nimport pytest\n\nfrom chemotion_api import Instance\n\ndef test_new_collection(logged_in_instance: Instance):\n col = logged_in_instance.get_root_collection()\n col.add_collection('Test_collection')\n col.save()\n\n\n tc = col.get_collection('Test_collection')\n assert tc.get_path() == '/Test_collection'\n tc.delete()\n col.save()\n col = logged_in_instance.get_root_collection()\n with pytest.raises(ModuleNotFoundError) as e:\n col.get_collection('Test_collection')\n\n@pytest.fixture()\ndef prepare_manipulation(logged_in_instance):\n name = uuid.uuid4().__str__()\n root_col = logged_in_instance.get_root_collection()\n new_root = root_col.add_collection(name)\n new_root.add_collection('A')\n new_root.add_collection('B')\n root_col.save()\n yield {\n 'instance': logged_in_instance,\n 'name': name,\n 'root_col': root_col,\n 'a_col': new_root.get_collection('A'),\n 'b_col': new_root.get_collection('B')\n }\n new_root.delete()\n root_col.save()\n\n\ndef test_move_collection(prepare_manipulation):\n name = prepare_manipulation['name']\n root_col = prepare_manipulation['root_col']\n b = prepare_manipulation['b_col']\n b.move('/{}/A'.format(name))\n root_col.save()\n with pytest.raises(ModuleNotFoundError) as e:\n root_col.get_collection(name + '/B')\n\n assert root_col.get_collection(name + '/A/B').label == 'B'\n assert root_col.get_collection(name).get_collection(['A', 'B']).label == 'B'\n assert root_col.get_collection(name).get_collection('A/B').label == 'B'\n assert root_col.get_collection(name).get_collection('/{}/A/B'.format(name)).label == 'B'\n\n\ndef test_rename_collection(prepare_manipulation):\n name = prepare_manipulation['name']\n root_col = prepare_manipulation['root_col']\n b = prepare_manipulation['b_col']\n b.label = 'B_NEW'\n root_col.save()\n\n assert root_col.get_collection(name + '/B_NEW').label == 'B_NEW'\n\ndef test_get_create_collection(logged_in_instance):\n root_col = logged_in_instance.get_root_collection()\n a = root_col.get_or_create_collection('A')\n b = a.get_or_create_collection('B')\n b1 = a.get_or_create_collection('B')\n\n\n assert b1.id == b.id\n\n\ndef test_sync(prepare_manipulation):\n root_col = prepare_manipulation['root_col']\n sync_root = root_col.sync_root\n with pytest.raises(Exception) as e:\n sync_root.add_collection('A')\n\n\n","repo_name":"StarmanMartin/ChemotionApi","sub_path":"tests/test_collection.py","file_name":"test_collection.py","file_ext":"py","file_size_in_byte":2428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"42286169835","text":"import time\nimport threading\nfrom baibaoxiang import baibaoxiangInterface,excel,sys_powel\nfrom baibaoxiang.sql import *\nfrom cese.数据库测试.pack.sql_user import *\nimport sys\n\n\nxxx=[]\nclass a(threading.Thread):\n def __init__(self,user_sum,sqlserver,sql,sql_name):\n threading.Thread.__init__(self)\n self.user_sum=int(user_sum)\n self.sqlserver=sqlserver\n self.sql=sql\n self.sql_name=sql_name\n\n\n def run(self):\n for i in range(self.user_sum):\n sql_b = \"SELECT * FROM `plus_users` WHERE user_id >\"+str(i+1)\n if i == 0:\n t4 = time.time()\n go=sql()\n aa = go.lianjie_sql(self.sql_name, sql_b, self.sqlserver)\n xxx.append(aa)\n # print(\"第\",i+1,\"条:\",aa)\n if i == self.user_sum - 1:\n t5 = time.time()\n kk = sys.getsizeof(xxx)\n print(\"共消耗时间:\",float(t5) - float(t4),\"秒,获取了\",kk,\"字节数据\")\n\n\n\nclass Bingfa_test:\n def bingfa_test_go(self,user_sum,b,c):\n t1=time.time()\n k1=a(user_sum,sql_226,b,c)\n k1.start()\n t2 = time.time()\n t3 = float(t2) - float(t1)\n print(\"发起\",user_sum,\"SQL请求,共耗时:\", t3,\"秒\")\n\n\n\n\nif __name__ == \"__main__\":\n sql_a = \"SELECT * FROM `plus_users` WHERE user_id >10\"\n Bingfa_test().bingfa_test_go(\"1\",sql_a,\"plus2test\")","repo_name":"woshichenya/All","sub_path":"weidong/cese/数据库测试/226.py","file_name":"226.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"40796577084","text":"\n# import nltk\n# import nltk.data\n# from nltk.corpus import brown\nfrom nltk.tokenize import word_tokenize, sent_tokenize\n# from nltk.tokenize import blankline_tokenize, LineTokenizer\n# from nltk.probability import FreqDist\n# from nltk.util import bigrams, trigrams, ngrams\n# from nltk.stem import PorterStemmer\nfrom nltk.stem import wordnet, WordNetLemmatizer\n# from nltk import ne_chunk\n# import pandas as pd\nimport nltk\nfrom nltk.corpus import stopwords\nimport numpy as np\nimport os\nimport re\nimport scipy\nfrom pathlib import Path\nfrom db import DbContext\nimport yaml\nimport os.path\nimport itertools\nclass DepClaim:\n def __init__(self, claim_no, dependency):\n self.claim_no = claim_no\n self.dependency = dependency\n \nclass Glove:\n config_path = None\n config_dict = {}\n log_substr_length = 100\n regex_exp = \"^(.+?)[\\,\\.\\)]\"\n total_operation_flag = dependency_occurence_flag = 0\n word_lem = WordNetLemmatizer()\n model = None\n def __init__(self): \n self.config_path = Path(__file__).with_name('./config.yaml') \n self.glove_path = Path(__file__).parent.joinpath('training.data/glove.6B.50d.txt')\n with open(self.config_path, \"r\") as f:\n config_dict = yaml.safe_load(f) \n self.dbContextObj = DbContext(config_dict)\n self.model = self.loadGloveModel(self.glove_path)\n \n def loadGloveModel(self, gloveFile):\n print (\"Loading Glove Model\")\n with open(gloveFile, encoding=\"utf8\" ) as f:\n content = f.readlines()\n model = {}\n for line in content:\n splitLine = line.split()\n word = splitLine[0]\n embedding = np.array([float(val) for val in splitLine[1:]])\n model[word] = embedding\n print (\"Done.\",len(model),\" words loaded!\")\n return model\n\n def preprocess(self, raw_text):\n letters_only_text = re.sub(\"[^a-zA-Z]\", \" \", raw_text)\n words = letters_only_text.lower().split()\n\n # removing stopwords and performing lemmatization\n stopword_set = set(stopwords.words(\"english\"))\n cleaned_words = set([self.word_lem.lemmatize(w) for w in words if w not in stopword_set])\n \n # selecting the words that exist in the glove model alone\n return list(cleaned_words.intersection(self.model))\n\n\n def cosine_distance_wordembedding_method(self, s1, s2):\n print(s1)\n vector_1 = np.mean([self.model[word] for word in s1], axis = 0)\n vector_2 = np.mean([self.model[word] for word in s2], axis = 0)\n return scipy.spatial.distance.cosine(vector_1, vector_2)\n \n\n def similarity_between_two_sentences(self, s1, s2):\n print(f'Claim : {s1[:20]} and Claim: {s2[:20]}')\n s1 = self.preprocess(s1)\n s2 = self.preprocess(s2)\n cosine = self.cosine_distance_wordembedding_method(s1, s2)\n percentage = round((1-cosine) * 100, 2)\n return percentage\n \n def Evaluate(self):\n print('Starting .. ')\n os.system('clear')\n patents_df = self.dbContextObj.get_patent_ids()\n\n patents = patents_df['id'].tolist()\n print('Patent length : {}'.format(len(patents)))\n for patent in patents[:5]:\n print('Patent : {}'.format(patent))\n claims_list = []\n dependency_list = []\n dep_df = self.dbContextObj.get_dependencies_by_patent_id(patent)\n \n for patent_id, claim_id, dependency in dep_df.values.tolist():\n claims_df = self.dbContextObj.get_claims_by_id(claim_id)\n claim_text = claims_df['claim_text'][0]\n \n try:\n claim_no = self.get_claim_number(claim_text)\n except:\n continue\n \n claims_list.append(claim_text)\n dependency_list.append( DepClaim(claim_no, dependency) )\n \n print('Claims count : {}'.format(len(claims_list)))\n \n # possible_combinations = self.get_combinations(claims_list) \n every_first_and_second = zip(claims_list[0::2], claims_list[1::2]) \n for first_text, second_text in every_first_and_second: \n similarity_percentage = self.similarity_between_two_sentences(first_text, second_text)\n print('Word Embedding method with a cosine distance axes that our two sentences are similar to ', similarity_percentage,'%')\n\n try:\n first_text_claim_no = self.get_claim_number(first_text)\n second_text_claim_no = self.get_claim_number(second_text)\n except:\n continue\n \n # If score above a certain point then\n if float(similarity_percentage) > 0.75:\n print('Logging high similarity')\n # check if second claim number has dependencies\n dependency = next((x.dependency for x in dependency_list if x.claim_no == second_text_claim_no), None)\n \n if dependency:\n # if exists: check if first claim number is amongst them\n print('Logging dependency existence')\n if dependency in first_text_claim_no:\n print('Logging dependency match')\n # print('Patent_id : {} & Claim_no : {} & Dependency : ({})'.format(patent, first_text_claim_no, dependency) )\n self.dependency_occurence_flag += 1\n \n # else ignore\n self.total_operation_flag += 1\n \n print(\"{} \\t\\t {} \\t\\t Score: {:.4f}\".format(first_text, second_text, similarity_percentage))\n \n \n print('Total operations : {}'.format(self.total_operation_flag))\n print('Similarity operations : {}'.format(self.dependency_occurence_flag))\n \n \n def get_claim_number(self, claim_text):\n text = re.search(self.regex_exp, claim_text)[0]\n return text.rsplit('.', 1)[0]\n \n def get_combinations(self, input_list):\n combination_indices = list(itertools.combinations(range(len(input_list)), 2))\n print(combination_indices)\n\nGlove().Evaluate()\n","repo_name":"rahu619/Keras_playground","sub_path":"GloVe_approach.py","file_name":"GloVe_approach.py","file_ext":"py","file_size_in_byte":6383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"20233670884","text":"\"\"\"OpenWeatherMap widget for QTile\"\"\"\n\nimport requests\n\nfrom libqtile import pangocffi\nfrom libqtile.log_utils import logger\nfrom libqtile.widget import base\n\n__author__ = \"Simon Kennedy \"\n__version__ = \"0.2\"\n\nICON_FONT = \"Weather Icons\"\n\nICONS = {\n \"Weather Icons\": { # https://github.com/erikflowers/weather-icons\n \"01d\": \"\\uF00D\", # Clear sky\n \"01n\": \"\\uF02E\",\n \"02d\": \"\\uF002\", # Few clouds\n \"02n\": \"\\uF086\",\n \"03d\": \"\\uF041\", # Scattered Clouds\n \"03n\": \"\\uF041\",\n \"04d\": \"\\uF013\", # Broken clouds\n \"04n\": \"\\uF013\",\n \"09d\": \"\\uF009\", # Shower Rain\n \"09n\": \"\\uF037\",\n \"10d\": \"\\uF008\", # Rain\n \"10n\": \"\\uF036\",\n \"11d\": \"\\uF010\", # Thunderstorm\n \"11n\": \"\\uF03B\",\n \"13d\": \"\\uF00A\", # Snow\n \"13n\": \"\\uF038\",\n \"50d\": \"\\uF003\", # Mist\n \"50n\": \"\\uF04A\",\n \"sleetd\": \"\\uF0B2\",\n \"sleetn\": \"\\uF0B3\",\n },\n \"Material Design Icons\": {\n \"01d\": \"\\U000F0599\", # Clear sky\n \"01n\": \"\\U000F0594\",\n \"02d\": \"\\U000F0595\", # Few clouds\n \"02n\": \"\\U000F0F31\",\n \"03d\": \"\\U000F0595\", # Scattered Clouds\n \"03n\": \"\\U000F0F31\",\n \"04d\": \"\\U000F0590\", # Broken clouds\n \"04n\": \"\\U000F0F31\",\n \"09d\": \"\\U000F0F33\", # Shower Rain\n \"09n\": \"\\U000F0F33\",\n \"10d\": \"\\U000F0597\", # Rain\n \"10n\": \"\\U000F0597\",\n \"11d\": \"\\U000F0596\", # Thunderstorm\n \"11n\": \"\\U000F0596\",\n \"13d\": \"\\U000F0598\", # Snow\n \"13n\": \"\\U000F0598\",\n \"50d\": \"\\U000F0591\", # Mist\n \"50n\": \"\\U000F0591\",\n \"sleetd\": \"\\U000F0596\",\n \"sleetn\": \"\\U000F0596\",\n },\n}\n\nCONDITION_CODES = {\n 200: (\"thunderstorm with light rain\", \"11d\", \"11n\"),\n 201: (\"thunderstorm with rain\", \"11d\", \"11n\"),\n 202: (\"thunderstorm with heavy rain\", \"11d\", \"11n\"),\n 210: (\"light thunderstorm\", \"11d\", \"11n\"),\n 211: (\"thunderstorm\", \"11d\", \"11n\"),\n 212: (\"heavy thunderstorm\", \"11d\", \"11n\"),\n 221: (\"ragged thunderstorm\", \"11d\", \"11n\"),\n 230: (\"thunderstorm with light drizzle\", \"11d\", \"11n\"),\n 231: (\"thunderstorm with drizzle\", \"11d\", \"11n\"),\n 232: (\"thunderstorm with heavy drizzle\", \"11d\", \"11n\"),\n 300: (\"light intensity drizzle\", \"09d\", \"09n\"),\n 301: (\"drizzle\", \"09d\", \"09n\"),\n 302: (\"heavy intensity drizzle\", \"09d\", \"09n\"),\n 310: (\"light intensity drizzle rain\", \"09d\", \"09n\"),\n 311: (\"drizzle rain\", \"09d\", \"09n\"),\n 312: (\"heavy intensity drizzle rain\", \"09d\", \"09n\"),\n 313: (\"shower rain and drizzle\", \"09d\", \"09n\"),\n 314: (\"heavy shower rain and drizle\", \"09d\", \"09n\"),\n 321: (\"shower drizzle\", \"09d\", \"09n\"),\n 500: (\"light rain\", \"10d\", \"10n\"),\n 501: (\"moderatelight rain\", \"10d\", \"10n\"),\n 502: (\"heavy intensity rain\", \"10d\", \"10n\"),\n 503: (\"very heavy rain\", \"10d\", \"10n\"),\n 504: (\"extreme rain\", \"10d\", \"10n\"),\n 511: (\"freezing rain\", \"13d\", \"13n\"),\n 520: (\"light intensity shower rain\", \"09d\", \"09n\"),\n 521: (\"shower rain\", \"09d\", \"09n\"),\n 522: (\"heavy intensity shower rain\", \"09d\", \"09n\"),\n 531: (\"ragged shower rain\", \"09d\", \"09n\"),\n 600: (\"light snow\", \"13d\", \"13n\"),\n 601: (\"snow\", \"13d\", \"13n\"),\n 602: (\"heavy snow\", \"13d\", \"13n\"),\n 611: (\"sleet\", \"sleetd\", \"sleetn\"),\n 612: (\"light shower sleet\", \"13d\", \"13n\"),\n 613: (\"shower sleet\", \"13d\", \"13n\"),\n 615: (\"light rain and snow\", \"13d\", \"13n\"),\n 616: (\"rain and snow\", \"13d\", \"13n\"),\n 620: (\"light shower snow\", \"13d\", \"13n\"),\n 621: (\"shower snow\", \"13d\", \"13n\"),\n 622: (\"heavy shower snow\", \"13d\", \"13n\"),\n 701: (\"mist\", \"50d\", \"50n\"),\n 711: (\"smoke\", \"50d\", \"50n\"),\n 721: (\"haze\", \"50d\", \"50n\"),\n 731: (\"sand / dust swirls\", \"50d\", \"50n\"),\n 741: (\"fog\", \"50d\", \"50n\"),\n 751: (\"sand\", \"50d\", \"50n\"),\n 761: (\"dust\", \"50d\", \"50n\"),\n 762: (\"volcanic ash\", \"50d\", \"50n\"),\n 771: (\"squalls\", \"50d\", \"50n\"),\n 781: (\"tornado\", \"50d\", \"50n\"),\n 800: (\"clear sky\", \"01d\", \"01n\"),\n 801: (\"few clouds\", \"02d\", \"02n\"),\n 802: (\"scattered clouds\", \"03d\", \"03n\"),\n 803: (\"broken clouds\", \"04d\", \"04d\"),\n 804: (\"overcast clouds\", \"04d\", \"04d\"),\n}\n\n# Handle the change of widget base class in the Qtile project\ntry:\n BaseClass = base.ThreadPoolText\n NewWidgetBase = True\nexcept AttributeError:\n BaseClass = base.ThreadedPollText # pylint: disable=no-member\n NewWidgetBase = False\n\nclass OpenWeatherMap(BaseClass):\n \"\"\"OpenWeatherMap widget for QTile\"\"\"\n\n orientations = base.ORIENTATION_HORIZONTAL\n defaults = [\n (\"api_key\", \"\", \"API Key for OpenWeatherMap data\"),\n (\"icon_font\", None, \"Font to use for weather icons\"),\n (\"format\", \"{temp:.1f}{temp_units} {icon}\", \"Format string\",),\n (\"update_interval\", 3600, \"Update interval in seconds between look ups\"),\n (\"latitude\", 51.4934, \"Latitude to look up weather data for\"),\n (\"longitude\", 0.0098, \"Longitude to look up weather data for\"),\n (\"units\", \"metric\", \"Temperature units to use\"),\n ]\n\n def __init__(self, **config):\n if NewWidgetBase:\n super().__init__(\"\", **config)\n else:\n super().__init__(**config)\n\n self.add_defaults(OpenWeatherMap.defaults)\n if not self.api_key:\n logger.exception(\n \"OpenWeatherMap: An API key is required. Pass as the `api_key` parameter\"\n )\n self.url = f\"https://api.openweathermap.org/data/2.5/weather?lat={self.latitude}&lon={self.longitude}&appid={self.api_key}&units={self.units}\"\n if not self.icon_font: # pylint: disable=access-member-before-definition # icon_font created by add_defaults\n self.icon_font = ICON_FONT\n self.markup = True\n\n def poll(self):\n resp = requests.get(self.url)\n self.status = resp.status_code\n if resp.status_code == 200:\n _lookup = lambda group, key: group[key] if key in group else \"\"\n data = resp.json()\n owm_icon = _lookup(data[\"weather\"][0], \"icon\")\n day = owm_icon[-1] == \"d\"\n\n owm_condition = _lookup(data[\"weather\"][0], \"id\")\n if owm_condition in CONDITION_CODES:\n condition = CONDITION_CODES[owm_condition][0].capitalize()\n if day:\n icon_id = CONDITION_CODES[owm_condition][1]\n else:\n icon_id = CONDITION_CODES[owm_condition][2]\n else:\n condition = \"Unknown\"\n logger.warning(\n f\"OpenWeatherMap: Unknown condition {owm_condition} received\"\n )\n if day:\n icon_id = \"01d\"\n else:\n icon_id = \"01n\"\n\n temp_units = {\"metric\": \"°C\", \"imperial\": \"°F\", \"standard\": \"°K\"}[\n self.units\n ]\n self.format = self.format.replace(\"{icon}\", '{icon}')\n info = {\n \"icon\": ICONS[self.icon_font][icon_id],\n \"icon_font\": self.icon_font,\n \"condition\": condition,\n \"temp_units\": temp_units,\n \"temp\": _lookup(data[\"main\"], \"temp\"),\n \"temp_min\": _lookup(data[\"main\"], \"temp_min\"),\n \"temp_max\": _lookup(data[\"main\"], \"temp_max\"),\n \"temp_feels_like\": _lookup(data[\"main\"], \"feels_like\"),\n \"pressure\": _lookup(data[\"main\"], \"pressure\"),\n \"humidity\": _lookup(data[\"main\"], \"humidity\"),\n }\n\n return self.format.format(**info)\n else:\n return f\"OpenWeatherMap Error {resp.status_code}\"\n","repo_name":"NimbleClint/ShaiHulud","sub_path":"qtile/.config/qtile/owm.py","file_name":"owm.py","file_ext":"py","file_size_in_byte":7723,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"78"}
+{"seq_id":"29145428435","text":"import pandas as pd\r\nimport numpy as np\r\nimport psycopg2\r\nfrom psycopg2.extensions import connection\r\nfrom pyspark.sql import SparkSession\r\nfrom pyspark.sql.functions import when, expr, udf, col, size, lit, count,explode,sum\r\nfrom pyspark.sql.types import StructType, StructField, StringType,ArrayType, IntegerType\r\n\r\n\r\n\r\n\r\n# Create Spark Session\r\nspark = SparkSession.builder.appName(\"Address Ranges Count by Country\").getOrCreate()\r\n\r\ndef parse_hnr_tags(tags, tags_network, interpolation):\r\n # Initialize variables to store parsed information\r\n constant = None\r\n interpolation_value = None\r\n intermediates = None\r\n street = None\r\n\r\n # Parse tags\r\n if tags:\r\n tags_list = [tag.split(\"=>\") for tag in tags.split('\",')]\r\n for tag in tags_list:\r\n key = tag[0].strip('\" ')\r\n value = tag[1].strip('\" ')\r\n if key == \"constant\":\r\n constant = value\r\n if key == \"street\":\r\n street = value\r\n\r\n # Parse tags_network\r\n if tags_network:\r\n tags_network_list = [tag.split(\"=>\") for tag in tags_network.split('\",')]\r\n for tag in tags_network_list:\r\n key = tag[0].strip('\" ')\r\n value = tag[1].strip('\" ')\r\n if key == \"interpolation\":\r\n interpolation_value = value\r\n if key == \"intermediate\":\r\n intermediates = [hsn.strip() for hsn in value.split(\",\")]\r\n\r\n # If interpolation is not provided, use interpolation_value\r\n if interpolation is None:\r\n interpolation = interpolation_value\r\n\r\n # Return the parsed information as a tuple\r\n return (constant, interpolation_value, interpolation, intermediates, street)\r\n\r\n\r\n\r\n# Sample preprocessing logic (replace with your actual preprocessing logic)\r\ndef preprocess_hnr_hsn_udf(min_hsn, max_hsn):\r\n def process_hsn(hsn):\r\n # Sample preprocessing logic for a single house number\r\n # Replace this with your own logic to process a single house number\r\n if hsn is not None:\r\n hsn = hsn.strip() # Remove leading and trailing spaces\r\n hsn = hsn.upper() # Convert to uppercase\r\n return hsn\r\n\r\n min_hsn = process_hsn(min_hsn)\r\n max_hsn = process_hsn(max_hsn)\r\n\r\n return (min_hsn, max_hsn)\r\n\r\n# Sample preprocessing logic for get_hnr_df (replace with your actual logic)\r\ndef get_hnr_df_udf(interpolation):\r\n # Sample logic to produce the house number range\r\n if interpolation == \"alphabetic\":\r\n # Your alphabetic interpolation logic here\r\n hnr_range = \"Alphabetic Range\"\r\n else:\r\n # Your numeric interpolation logic here\r\n hnr_range = \"Numeric Range\"\r\n\r\n return hnr_range\r\n\r\n# Sample preprocessing logic for get_alphabetic_hnr_df_udf\r\n# Sample preprocessing logic for get_alphabetic_hnr_df_udf\r\ndef get_alphabetic_hnr_df_udf(min_hsn, max_hsn):\r\n # Extract the first character from min_hsn and max_hsn\r\n first_char = max_hsn[0]\r\n\r\n # Extract numeric part of hsn if present, or assume 1 as the minimum\r\n min_numeric = int(min_hsn[1:]) if min_hsn[1:].isdigit() else 1\r\n max_numeric = int(max_hsn[1:]) if max_hsn[1:].isdigit() else 1\r\n\r\n # Sample logic to produce alphabetic variance house number ranges\r\n hnr_range = [f\"{first_char}{i}\" for i in range(min_numeric, max_numeric + 1)]\r\n\r\n return hnr_range\r\n\r\n\r\n# Sample logic for correct_hnr_array (replace with your actual logic)\r\ndef correct_hnr_array_udf(arr):\r\n if isinstance(arr, float): # Check for None or float\r\n return []\r\n\r\n if isinstance(arr, list):\r\n corrected_arr = []\r\n for item in arr:\r\n if pd.notna(item): # Check for non-NaN items\r\n if isinstance(item, int):\r\n corrected_arr.append(item)\r\n elif isinstance(item, str) and ';' in item:\r\n corrected_arr.extend(item.split(';'))\r\n else:\r\n corrected_arr.append(item)\r\n return corrected_arr\r\n\r\n return []\r\n\r\n# Define the get_numeric_hnr_df_udf UDF\r\ndef get_numeric_hnr_df_udf(min_hsn, max_hsn, interpolation):\r\n hnr_array = []\r\n\r\n def safe_int(value):\r\n try:\r\n return int(value)\r\n except (ValueError, TypeError):\r\n return None\r\n\r\n min_hsn_numeric = safe_int(min_hsn)\r\n max_hsn_numeric = safe_int(max_hsn)\r\n\r\n if min_hsn_numeric is not None and max_hsn_numeric is not None:\r\n if interpolation == \"even\":\r\n for i in range(min_hsn_numeric, max_hsn_numeric + 1, 2):\r\n hnr_array.append(str(i))\r\n elif interpolation == \"odd\":\r\n for i in range(min_hsn_numeric + 1, max_hsn_numeric + 1, 2):\r\n hnr_array.append(str(i))\r\n else:\r\n for i in range(min_hsn_numeric, max_hsn_numeric + 1):\r\n hnr_array.append(str(i))\r\n\r\n return hnr_array\r\n\r\n\r\n\r\n\r\ndef get_country_schema(country: str, conn: connection) -> str:\r\n \"\"\"\r\n :param conn:\r\n :param country: Country Name in ISO-3 Code\r\n :return: Schema for Given ISO-3 Country Code\r\n \"\"\"\r\n # Schemas. list in PostgreSQL database.\r\n schemas_df = pd.read_sql('select * from pg_catalog.pg_namespace', con=conn)\r\n # conn.close()\r\n return schemas_df.loc[schemas_df.nspname.str.contains(f'_{country}')].nspname.iloc[0]\r\n\r\n\r\ndef adminAreaList(schema: str, admin_level: str, conn: connection) -> list:\r\n adminlist = \"\"\"\r\n SELECT \"name\" \r\n FROM {schema}.planet_osm_polygon\r\n where boundary= 'administrative' and admin_level = '{admin_level}'\"\"\".format(schema=schema, admin_level=admin_level)\r\n schemas_df = pd.read_sql(adminlist, con=conn)\r\n # conn.close()\r\n AdminNames = [i for i in schemas_df.name]\r\n return AdminNames\r\n\r\ndef format_query(schema, admin_level,admin) -> str:\r\n query = \"\"\"with sample as(SELECT osm_id as aa8_osm_id ,\"name\" as index_searched_query,ST_SetSRID(way, 4326) as coordinates\r\n FROM \"{schema}\".planet_osm_polygon\r\n where boundary= 'administrative' and admin_level = '{admin_level}' and \"name\" = '{admin}'\r\n )\r\n\r\n, tags as (\r\nselect distinct skeys(tags) keys\r\nfrom \"{schema}\".planet_osm_polygon pop\r\nwhere admin_level in ('4', '8')\r\n)\r\n\r\n, hnr_way as (\r\nselect sample.aa8_osm_id, sample.index_searched_query, pol.* \r\nfrom {schema}.planet_osm_line pol \r\njoin sample on ST_Intersects(pol.way, sample.coordinates)\r\nwhere \"addr:interpolation\" is not null \r\n\r\n)\r\n\r\n, name_tags as (\r\nselect *\r\nfrom tags\r\nwhere (keys like '%name:%' or keys like '%alt%name') and keys not like '%pronunciation%'\r\n)\r\n\r\n, hsn_tags as (\r\nselect distinct skeys(tags) keys\r\nfrom \"{schema}\".planet_osm_point\r\nwhere \"addr:housenumber\" is not null or tags::text like '%addr:housenumber%'\r\n)\r\n, hsn_keys as (\r\nselect * from hsn_tags where (keys like '%addr:housenumber%')\r\n)\r\n\r\n, address_ranges as (\r\nselect \r\n\thnr_way.index_searched_query\r\n, hnr_way.aa8_osm_id\r\n,\thnr_way.osm_id\r\n, ST_astext(hnr_way.way) way\r\n, hnr_way.\"addr:interpolation\" as interpolation\r\n, hnr_way.tags\r\n, hnr_way.tags->'addr:street' as road_name_way\r\n, hnr_way.tags->'addr:interpolation' as interpolation_tag\r\n, hnr_way.\"name\"\r\n, unnest(ways.nodes) nodes\r\n\r\nfrom hnr_way\r\njoin \"{schema}\".planet_osm_ways ways on ways.id = hnr_way.osm_id\r\n\r\n) \r\n\r\n, hsn as (\r\nselect\r\npop.tags as tags_hsn\r\n, array_remove(array_append(pop.tags -> array((select keys from hsn_keys )), pop.\"addr:housenumber\"), null) as range_hsn\r\n, address_ranges.*\r\nfrom address_ranges\r\nleft join \"{schema}\".planet_osm_point pop\r\non pop.osm_id = address_ranges.nodes\r\nwhere pop.tags is not null and pop.tags-> 'layer_id' = '15633'\r\n)\r\n\r\n, hsn_long as (\r\n select\r\n hsn.osm_id\r\n, hsn.index_searched_query\r\n, hsn.aa8_osm_id\r\n--, hsn.coordinates\r\n, hsn.tags as tags_network\r\n, hsn.road_name_way\r\n, hsn.interpolation\r\n, hsn.interpolation_tag\r\n, hsn.way\r\n, hsn.name\r\n, first_value(tags_hsn) over(partition by osm_id) as first_tags_hsn\r\n, unnest(range_hsn) as range_hsn\r\nfrom hsn\r\n)\r\n,addressrangesfinal as (select\r\n hsn_long.osm_id\r\n, hsn_long.way\r\n, min(range_hsn) as min_hsn\r\n, max(range_hsn) as max_hsn\r\n, hsn_long.index_searched_query\r\n, hsn_long.aa8_osm_id\r\n--, ST_AsText(hsn_long.coordinates) as coordinates\r\n, hsn_long.tags_network\r\n, hsn_long.road_name_way\r\n, hsn_long.interpolation\r\n, hsn_long.interpolation_tag\r\n, hsn_long.name\r\n, first_tags_hsn as tags\r\n, array_agg(distinct range_hsn) as intermediates\r\nfrom hsn_long\r\ngroup by\r\n hsn_long.osm_id\r\n, hsn_long.index_searched_query\r\n, hsn_long.aa8_osm_id\r\n--, coordinates\r\n, hsn_long.tags_network\r\n, hsn_long.road_name_way\r\n, hsn_long.interpolation\r\n, hsn_long.interpolation_tag\r\n, hsn_long.way\r\n, hsn_long.name\r\n, first_tags_hsn)\r\n\r\n\r\nselect * from addressrangesfinal\r\n\"\"\".format(schema=schema, admin_level=admin_level,admin = admin)\r\n\r\n return query\r\n\r\n\r\n\r\n\r\nhost = '10.137.173.42'\r\ndatabase = 'ggg'\r\nuser = 'ggg'\r\npassword = 'ok'\r\nport = 5432\r\n\r\n# establish connection\r\nconn = psycopg2.connect(host=host, database=database, user=user, password=password, port=port)\r\n\r\n# schema = get_country_schema('fra', conn)\r\n\r\n# admin = adminAreaList(schema,'8',conn)\r\n\r\n# print(admin)\r\n\r\n# Main Code\r\nall_dfs = []\r\nfor country in ['fra']:\r\n country_schema = get_country_schema(country,conn)\r\n adminOrder = adminAreaList(country_schema,'8',conn)\r\n\r\n for admin in adminOrder:\r\n adminnew = admin.replace(\"'\", '\"')\r\n formatted_query = format_query(country_schema, '8',adminnew)\r\n # Define the PGSQL server connection properties\r\n host = '10.137.173.42'\r\n database = 'ggg'\r\n user = 'ggg'\r\n password = 'ok'\r\n port = 5432\r\n\r\n # Define the PGSQL server connection properties\r\n pg_properties = {\r\n \"user\": \"ggg\",\r\n \"password\": \"ok\",\r\n \"driver\": \"org.postgresql.Driver\",\r\n \"url\": \"jdbc:postgresql://10.137.173.42:5432/ggg\"\r\n }\r\n\r\n # Step 4: Read data from PostgreSQL\r\n df = spark.read.jdbc(url=pg_properties[\"url\"], table=f\"({formatted_query}) as subquery\", properties=pg_properties)\r\n\r\n # Add a new column \"country\" with the value 'FRA' to every row\r\n df = df.withColumn(\"country\", lit(country))\r\n\r\n # Register the UDF function with PySpark\r\n udf_parse_hnr_tags = udf(parse_hnr_tags, StructType([\r\n StructField(\"constant\", StringType(), True),\r\n StructField(\"interpolation_value\", StringType(), True),\r\n StructField(\"interpolation\", StringType(), True),\r\n StructField(\"intermediates\", StringType(), True),\r\n StructField(\"street\", StringType(), True)\r\n ]))\r\n\r\n # Apply the UDF to the DataFrame \"parse_hnr_tags\" function\r\n df = df.withColumn(\"parsed_hnr_tags\", udf_parse_hnr_tags(df[\"tags\"], df[\"tags_network\"], df[\"interpolation\"]))\r\n\r\n # Register the UDF function with PySpark\r\n udf_preprocess_hnr_hsn = udf(preprocess_hnr_hsn_udf, StructType([\r\n StructField(\"min_hsn_numeric\", StringType(), True),\r\n StructField(\"max_hsn_numeric\", StringType(), True)\r\n ]))\r\n\r\n # Apply the UDF to the DataFrame \"udf_preprocess_hnr_hsn\"\r\n df = df.withColumn(\"preprocessed_hsn\", udf_preprocess_hnr_hsn(df[\"min_hsn\"], df[\"max_hsn\"]))\r\n\r\n # Register the UDF function with PySpark\r\n udf_get_hnr_df = udf(get_hnr_df_udf, StringType())\r\n\r\n\r\n # Apply the UDF to the DataFrame\r\n df = df.withColumn(\"hnr_range\", udf_get_hnr_df(df[\"interpolation\"]))\r\n\r\n # Register the get_alphabetic_hnr_df_udf UDF with PySpark\r\n udf_get_alphabetic_hnr_df = udf(get_alphabetic_hnr_df_udf, ArrayType(StringType()))\r\n\r\n # Apply the get_alphabetic_hnr_df_udf UDF to the DataFrame with alphabetic data\r\n df = df.withColumn(\"hnr_range\", udf_get_alphabetic_hnr_df(df[\"min_hsn\"], df[\"max_hsn\"]))\r\n # Show the results for alphabetic data\r\n # df.show(truncate=False)\r\n\r\n # Register the get_numeric_hnr_df_udf UDF with PySpark\r\n udf_get_numeric_hnr_df = udf(get_numeric_hnr_df_udf, ArrayType(StringType()))\r\n\r\n # Apply the get_numeric_hnr_df_udf UDF to the DataFrame with numeric data\r\n df = df.withColumn(\"hnr_array\",udf_get_numeric_hnr_df(df[\"min_hsn\"], df[\"max_hsn\"],df[\"interpolation\"]))\r\n\r\n # Register the UDF function with PySpark\r\n udf_correct_hnr_array = udf(correct_hnr_array_udf, ArrayType(StringType()))\r\n\r\n # # Apply the UDF to the DataFrame\r\n df = df.withColumn(\"corrected_hnr_array\", udf_correct_hnr_array(df[\"hnr_array\"]))\r\n # Count the elements in the \"corrected_hnr_array\" column and create a new column \"hnr_array_count\"\r\n df = df.withColumn(\"hnr_array_count\", size(col(\"corrected_hnr_array\")).cast(\"int\"))\r\n # Filter out rows with empty arrays in the \"corrected_hnr_array\" column\r\n df = df.filter(size(col(\"corrected_hnr_array\")) > 0)\r\n # Assuming you have a DataFrame named 'df'\r\n df = df.withColumn(\"Road_Line\", lit(1))\r\n\r\n # # # Select and keep only the specified columns\r\n df = df.select(\"country\",\"interpolation\", \"hnr_array_count\",\"hnr_array\", \"Road_Line\" )\r\n\r\n\r\n #aggregations on the columns \"hnr_array_count\" and \"Road_Line\" for each group\r\n\r\n\r\n df = df.groupBy(\"country\", \"interpolation\").agg(sum(\"hnr_array_count\").alias(\"expaned_addresses_count\"),sum(\"Road_Line\").alias(\"interpolation_line_count\"))\r\n\r\n df.show()\r\n\r\n break\r\n\r\n\r\n\r\n\r\n # # Group by \"country\" and \"interpolation,\" and count \"hnr_array_count\" for each group\r\n # df = df.groupBy(\"country\", \"interpolation\").agg(count(\"hnr_array_count\").alias(\"count\"))\r\n\r\n # To add row count, you can modify the aggregation like this:\r\n\r\n\r\n\r\n # all_dfs.append(df)\r\n\r\n# # Concatenate the DataFrames and store the result in a new DataFrame\r\n# concatenated_df = all_dfs[0]\r\n# for df in all_dfs[1:]:\r\n# concatenated_df = concatenated_df.union(df)\r\n#\r\n# # Show the concatenated DataFrame\r\n# concatenated_df.show()\r\n\r\nspark.stop()\r\n\r\n\r\n\r\n\r\n","repo_name":"amolparande-tomtom/addressranges","sub_path":"addressRangesParallel.py","file_name":"addressRangesParallel.py","file_ext":"py","file_size_in_byte":14127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"74595800250","text":"from django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom .models import Question, Choice, SerialNumber, Vote, JudgeVote\nfrom .serializers import QuestionSerializer, ChoiceCountSerializer\nfrom django.db.models import Count\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import transaction\nfrom django.contrib.auth.decorators import user_passes_test\n\n\n@api_view([\"GET\"])\ndef question_list_view(request, format=None):\n if request.method == \"GET\":\n questions = Question.objects.all()\n serializer = QuestionSerializer(questions, many=True)\n return Response(serializer.data)\n\n\n@api_view([\"GET\"])\ndef vote_count_view(request, id: int, format=None):\n if request.method == \"GET\":\n choices = Choice.objects.filter(question__id=id).annotate(count=Count('vote'))\n serializer = ChoiceCountSerializer(choices, many=True)\n return Response(serializer.data)\n\n\n@api_view([\"GET\"])\ndef judge_vote_count_view(request, id: int, format=None):\n if request.method == \"GET\":\n choices = Choice.objects.filter(question__id=id).annotate(count=Count('judge_vote'))\n serializer = ChoiceCountSerializer(choices, many=True)\n return Response(serializer.data)\n\n\n@transaction.atomic\n@api_view([\"POST\"])\ndef update_vote_view(request, question_id: int, format=None):\n if request.method == \"POST\":\n try:\n # Get question\n try:\n question = Question.objects.get(id=question_id)\n except ObjectDoesNotExist:\n raise AssertionError(\"Question doesn't exits\")\n\n if not question.enable:\n raise AssertionError(\"Question is locked\")\n\n # Assert that input is dictionary\n assert type(request.data) == dict, \"Input must be dictionary\"\n\n # Check serial number\n serial_number = request.data[\"serial_number\"]\n assert type(serial_number) == str, \"Serial number must be string\"\n try:\n serial_number_obj = SerialNumber.objects.get(serial_number=serial_number)\n except ObjectDoesNotExist:\n raise AssertionError(\"Serial number is not valid\")\n\n if not serial_number_obj.enable:\n raise AssertionError(\"Serial number valid but not eligible to vote\")\n\n # Check if choices ID are malformated\n choices_ids = request.data[\"choice_ids\"]\n for id in choices_ids:\n assert type(id) == int, \"choice ID must be integer\"\n\n # Check if choices are valid\n num_valid_choices = Choice.objects.filter(id__in=choices_ids, question__id=question_id).count()\n assert len(choices_ids) == num_valid_choices, \"Some choices are not valid\"\n\n # Check if the number of choices are valid\n assert question.min_num_chosen <= num_valid_choices <= question.max_num_chosen,\\\n \"The number of choices is not in range [%d, %d]\" % (question.min_num_chosen,\n question.max_num_chosen)\n\n vote_category = JudgeVote if serial_number_obj.is_judge else Vote\n vote_category.objects.filter(serial_number=serial_number_obj, choice__question__id=question_id).delete()\n for id in choices_ids:\n new_record = vote_category(serial_number=serial_number_obj, choice=Choice.objects.get(id=id))\n new_record.save()\n return Response({\"detail\": \"success\"})\n\n except KeyError as e:\n return Response({\"detail\": \"Malformated input\"}, status.HTTP_400_BAD_REQUEST)\n except AssertionError as e:\n return Response({\"detail\": str(e)}, status.HTTP_400_BAD_REQUEST)\n\n\n@transaction.atomic\n@api_view([\"GET\"])\ndef get_selected_view(request, serial_number, format=None):\n if request.method == \"GET\":\n try:\n # Check serial number\n assert type(serial_number) == str, \"Serial number must be string\"\n try:\n serial_number_obj = SerialNumber.objects.get(serial_number=serial_number)\n except ObjectDoesNotExist:\n raise AssertionError(\"Serial number is not valid\")\n\n result = Vote.objects.filter(serial_number__serial_number=serial_number)\\\n .select_related('choice')\n judge_result = JudgeVote.objects.filter(serial_number__serial_number=serial_number)\\\n .select_related('choice')\n return Response([{\"question\": item.choice.question.id,\n \"choice\": item.choice.id} for item in result] +\n [{\"question\": item.choice.question.id,\n \"choice\": item.choice.id} for item in judge_result])\n except AssertionError as e:\n return Response({\"detail\": str(e)}, status.HTTP_400_BAD_REQUEST)\n\n\ndef change_serial_number_state(serial_numbers, state):\n for serial_number in serial_numbers:\n try:\n serial_number_obj = SerialNumber.objects.get(serial_number=serial_number)\n serial_number_obj.enable = state\n serial_number_obj.save()\n except ObjectDoesNotExist:\n SerialNumber(serial_number=serial_number, enable=state).save()\n return Response({\"detail\": \"success\"})\n\n\n@user_passes_test(lambda u: u.is_superuser, login_url='/admin')\n@api_view([\"POST\"])\ndef enable_serial_number(request):\n if request.method == \"POST\":\n return change_serial_number_state(request.data, True)\n\n\n@user_passes_test(lambda u: u.is_superuser, login_url='/admin')\n@api_view([\"POST\"])\ndef disable_serial_number(request):\n if request.method == \"POST\":\n return change_serial_number_state(request.data, False)\n","repo_name":"LouYu2015/voting-server","sub_path":"voting/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"36541545843","text":"import numpy as np\nimport pandas as pd\nimport datetime\n\n# explicitly require this experimental feature\nfrom sklearn.experimental import enable_iterative_imputer # noqa\nfrom sklearn.impute import IterativeImputer, KNNImputer\n\nfrom analyzer.utils import remove_missing\n\n# ICD9 COVID diagnosis Italian codes\nLIST_DIAGNOSIS = ['4808', '4803', 'V0182', '7982']\nLIST_REMOVE_COMORBIDITIES = [\"Immunizations and screening for infectious disease\",\n \"Pneumonia (except that caused by tuberculosis or Genderually transmitted disease)\",\n \"Respiratory failure; insufficiency; arrest (adult)\",\n \"Residual codes; unclassified\",\n \"Diabetes mellitus without complication\",\n \"Diabetes mellitus with complications\",\n \"Influenza\",\n \"Acute and unspecified renal failure\"]\n\nSWAB_WITH_LAB_COLUMNS = ['Age',\n 'Gender',\n 'Body Temperature',\n #'Systolic Blood Pressure',\n 'Respiratory Frequency',\n 'Cardiac Frequency',\n 'C-Reactive Protein (CRP)',\n 'Blood Calcium',\n 'CBC: Leukocytes',\n 'Aspartate Aminotransferase (AST)',\n 'ABG: PaO2',\n 'Prothrombin Time (INR)',\n 'CBC: Hemoglobin',\n 'ABG: pH',\n 'Cholinesterase',\n 'Blood Urea Nitrogen (BUN)',\n 'ABG: MetHb',\n 'Total Bilirubin',\n 'CBC: Mean Corpuscular Volume (MCV)',\n 'Glycemia']\n\nSUBSET_COLUMNS_WITHOUT_ABG = ['Age', 'Gender', 'Body Temperature', \n 'ABG: Oxygen Saturation (SaO2)','Cardiac Frequency', 'Respiratory Frequency', \n #'Systolic Blood Pressure', \n 'Alanine Aminotransferase (ALT)', 'Aspartate Aminotransferase (AST)', \n 'Total Bilirubin', 'Blood Calcium', 'Blood Creatinine', 'Blood Sodium', \n 'Blood Urea Nitrogen (BUN)', 'CBC: Hemoglobin', 'CBC: Mean Corpuscular Volume (MCV)', \n 'CBC: Platelets', 'CBC: Red cell Distribution Width (RDW)', 'CBC: Leukocytes', \n 'C-Reactive Protein (CRP)', 'Prothrombin Time (INR)']\n\nCOLUMNS_WITHOUT_ABG = ['Age', 'Gender', 'Body Temperature', 'Cardiac Frequency',\n 'Respiratory Frequency', 'ABG: Oxygen Saturation (SaO2)',\n #'Systolic Blood Pressure', \n 'Activated Partial Thromboplastin Time (aPTT)', 'Blood Urea Nitrogen (BUN)',\n 'Alanine Aminotransferase (ALT)', 'Aspartate Aminotransferase (AST)',\n 'Blood Amylase', 'Blood Calcium', 'Blood Creatinine', 'Blood Sodium',\n 'C-Reactive Protein (CRP)', 'CBC: Hemoglobin', 'CBC: Leukocytes',\n 'CBC: Mean Corpuscular Volume (MCV)', 'CBC: Platelets',\n 'CBC: Red cell Distribution Width (RDW)', 'Cholinesterase',\n 'Glycemia', 'Potassium Blood Level',\n 'Prothrombin Time (INR)', 'Total Bilirubin']\n\nSPANISH_ITALIAN_DATA = ['Age', 'Gender', 'Body Temperature', \n 'ABG: Oxygen Saturation (SaO2)', 'Cardiac Frequency', \n # 'Systolic Blood Pressure', 'Essential hypertension',\n 'Alanine Aminotransferase (ALT)', 'Aspartate Aminotransferase (AST)', \n 'Blood Creatinine', 'Blood Sodium', 'Blood Urea Nitrogen (BUN)', \n 'Potassium Blood Level', 'CBC: Hemoglobin', 'CBC: Mean Corpuscular Volume (MCV)', \n 'CBC: Platelets', 'CBC: Leukocytes', 'C-Reactive Protein (CRP)', 'Glycemia', \n 'Prothrombin Time (INR)', 'Cardiac dysrhythmias', 'Chronic kidney disease', \n 'Coronary atherosclerosis and other heart disease', 'Diabetes']\n\n\n# Discharge codes\n# 1,2,5,6,9 = discharged, 4 = deceased\nDISCHARGE_CODES = [1, 2, 4, 5, 6, 9]\nDISCHARGE_CODE_RELEASED = 4\n\nDIAGNOSIS_COLUMNS = ['Dia1', 'Dia2', 'Dia3', 'Dia4', 'Dia5']\nDEMOGRAPHICS_FEATURES = ['Gender', 'Age', 'Outcome']\n\n\nRENAMED_LAB_COLUMNS = {\n 'ALT: ALT': 'Alanine Aminotransferase (ALT)',\n 'AST: AST': 'Aspartate Aminotransferase (AST)',\n 'Creatinina UAR: CREATININA SANGUE': 'Blood Creatinine',\n 'Potassio: POTASSIEMIA': 'Potassium Blood Level',\n 'Proteina C Reattiva: PCR - PROTEINA C REATTIVA': 'C-Reactive Protein (CRP)',\n 'Glucosio ematico: GLICEMIA': 'Glycemia',\n 'Azoto ematico UAR: AZOTO UREICO EMATICO': 'Blood Urea Nitrogen (BUN)',\n 'Emogasanalisi su sangue arterioso: ACIDO LATTICO': 'ABG: Lactic Acid',\n 'Emogasanalisi su sangue arterioso: IONE BICARBONATO STD': 'ABG: standard bicarbonate (sHCO3)',\n 'Emogasanalisi su sangue arterioso: ECCESSO DI BASI': 'ABG: Base Excess',\n 'Emogasanalisi su sangue arterioso: PO2': 'ABG: PaO2',\n 'Emogasanalisi su sangue arterioso: OSSIGENO SATURAZIONE': 'ABG: Oxygen Saturation (SaO2)',\n 'Emogasanalisi su sangue arterioso: PCO2': 'ABG: PaCO2',\n 'Emogasanalisi su sangue arterioso: PH EMATICO': 'ABG: pH',\n 'Emogasanalisi su sangue arterioso: CARBOSSIEMOGLOBINA': 'ABG: COHb',\n 'Emogasanalisi su sangue arterioso: METAEMOGLOBINA': 'ABG: MetHb',\n 'Sodio: SODIEMIA': 'Blood Sodium',\n 'TEMPO DI PROTROMBINA UAR: TEMPO DI PROTROMBINA RATIO': 'Prothrombin Time (INR)',\n 'TEMPO DI TROMBOPLASTINA PARZIALE: TEMPO DI TROMBOPLASTINA PARZIALE ATTIVATO': 'Activated Partial Thromboplastin Time (aPTT)',\n 'Calcemia: CALCEMIA': 'Blood Calcium',\n 'BILIRUBINA TOTALE REFLEX: BILIRUBINA TOTALE': 'Total Bilirubin',\n 'Amilasi: AMILASI NEL SIERO' : 'Blood Amylase',\n 'Colinesterasi: COLINESTERASI': 'Cholinesterase',\n 'Emocromocitometrico (Urgenze): VOLUME CORPUSCOLARE MEDIO': 'CBC: Mean Corpuscular Volume (MCV)',\n 'Emocromocitometrico (Urgenze): PIASTRINE': 'CBC: Platelets',\n 'Emocromocitometrico (Urgenze): VALORE DISTRIBUTIVO GLOBULI ROSSI': 'CBC: Red cell Distribution Width (RDW)',\n 'Emocromocitometrico (Urgenze): LEUCOCITI': 'CBC: Leukocytes',\n 'Emocromocitometrico (Urgenze): EMOGLOBINA': 'CBC: Hemoglobin',\n }\n\nVITAL_SIGNS = ['SaO2',\n 'P. Max',\n # 'P. Min', # Keep only max because it is more precise\n 'F. Card.',\n 'F. Resp.',\n 'Temp.',\n 'Dolore',\n 'GCS',\n 'STICKGLI']\n\nRENAMED_VITALS_COLUMNS = {\n \"P. Max\": \"Systolic Blood Pressure\",\n # \"P. Min\": \"Diastolic Blood Pressure\",\n \"F. Card.\": \"Cardiac Frequency\",\n \"Temp.\": \"Body Temperature\",\n \"F. Resp.\": \"Respiratory Frequency\"\n }\n\n\nLAB_FEATURES_NOT_CONTAIN = ['NOTA', # Remove notes\n 'AFRO', # No normalized creatinine\n 'CAUCAS', # No normalized creatinine\n 'UREA EMATICA' # We keep BUN directly\n ]\nLAB_FEATURES_NOT_MATCH = ['IONE BICARBONATO', # We keep standard directly\n '(PT) TEMPO DI PROTROMBINA', # We keep only Prothrombin Time\n 'HCT', # Remove Hematocrit to keep Hemoglobin\n 'EMATOCRITO', # Remove Hematocrit to keep Hemoglobin\n 'ERITROCITI', # Redundant with Hemoglobin\n 'BE(ECF)', # Remove Base Excess ECF (Keep normal one BE)\n 'CTCO2', # Redundant with PaCO2\n 'FHHB', # Redundant with Hemoglobin (also with Hematocrit)\n 'FO2HB', # Redundant with Hemoglobin\n 'CALCIO IONIZZATO', # Redundant with Blood Calcium\n 'CONCENTRAZIONE HB MEDIA', # Redundant with MCV\n 'CONTENUTO HB MEDIO', # Redundant with MCV\n 'CLORUREMIA', # Redundant with Sodium\n ]\n\nCOLS_TREATMENTS = ['HOSPITAL', 'COUNTRY', 'DT_HOSPITAL_ADMISSION', 'GENDER',\n 'RACE', 'PREGNANT', 'AGE', 'DIABETES', 'HYPERTENSION',\n 'DISLIPIDEMIA', 'OBESITY', 'SMOKING', 'RENALINSUF',\n 'ANYLUNGDISEASE', 'AF', 'VIH', 'ANYHEARTDISEASE',\n 'MAINHEARTDISEASE', 'ANYCEREBROVASCULARDISEASE', 'CONECTIVEDISEASE',\n 'LIVER_DISEASE', 'CANCER', 'HOME_OXIGEN_THERAPY', 'IN_PREVIOUSASPIRIN',\n 'IN_OTHERANTIPLATELET', 'IN_ORALANTICOAGL', 'IN_ACEI_ARB', 'IN_BETABLOCKERS',\n 'IN_BETAGONISTINHALED', 'IN_GLUCORTICOIDSINHALED','IN_DVITAMINSUPLEMENT',\n 'IN_BENZODIACEPINES', 'IN_ANTIDEPRESSANT', 'FAST_BREATHING', 'MAXTEMPERATURE_ADMISSION',\n 'SAT02_BELOW92', 'DDDIMER_B', 'PROCALCITONIN_B', 'PCR_B', 'TRANSAMINASES_B', 'LDL_B',\n 'BLOOD_PRESSURE_ABNORMAL_B', 'CREATININE', 'SODIUM', 'LEUCOCYTES', 'LYMPHOCYTES',\n 'HEMOGLOBIN', 'PLATELETS', 'GLASGOW_COMA_SCORE', 'CHESTXRAY_BNORMALITY',\n 'CORTICOSTEROIDS', 'INTERFERONOR', 'TOCILIZUMAB', 'ANTIBIOTICS','ACEI_ARBS',\n 'ONSET_DATE_DIFF', 'TEST_DATE_DIFF', 'CLOROQUINE', 'ANTIVIRAL','ANTICOAGULANTS',\n 'REGIMEN', 'DEATH', 'COMORB_DEATH']\n\n# This is the list of HCUP used for the mortality paper\nCOVID_MORTALITY_PAPER_HCUP_LIST = [49,50,87,90,95,146]\n\nDIABETES = [49, 50, 174]\nHYPERTENSION = [87, 88, 171]\nDISLIPIDEMIA = [53]\nOBESITY = [58]\nRENALINSUF = [146]\nANYLUNGDISEASE = [116, 117, 121, 122]\nAF = [95]\nVIH = [5]\nANYHEARTDISEASE = [90, 92, 93, 95]\nANYCEREBROVASCULARDISEASE = [98, 100, 101, 102]\nCONECTIVEDISEASE = [198, 199]\nLIVER_DISEASE = [6, 139]\nCANCER = [11, 12, 13, 14, 15, 16, 17, 18, 19, \n 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, \n 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, \n 40, 41, 42, 43]\n# HCUP_LIST FOR THE TREATMENTS PAPER \nCOMORBS_TREATMENTS_NAMES = ['DIABETES', 'HYPERTENSION', 'DISLIPIDEMIA', 'OBESITY', 'RENALINSUF',\n 'ANYLUNGDISEASE', 'AF', 'VIH', 'ANYHEARTDISEASE', 'ANYCEREBROVASCULARDISEASE',\n 'CONECTIVEDISEASE', 'LIVER_DISEASE', 'CANCER']\n\nCOMORBS_TREATMENTS_HCUP = [DIABETES, HYPERTENSION, DISLIPIDEMIA, OBESITY, RENALINSUF,\n ANYLUNGDISEASE, AF, VIH, ANYHEARTDISEASE, ANYCEREBROVASCULARDISEASE,\n CONECTIVEDISEASE, LIVER_DISEASE, CANCER]\n\n\nHCUP_LIST = list(set(DIABETES + HYPERTENSION + DISLIPIDEMIA + OBESITY + RENALINSUF + \\\n ANYLUNGDISEASE + AF + VIH + ANYHEARTDISEASE + ANYCEREBROVASCULARDISEASE + \\\n CONECTIVEDISEASE + LIVER_DISEASE + CANCER))\n\n# HOPE TREATMENTS\n\nIN_TREATMENTS_NAME = ['HOME_OXIGEN_THERAPY', 'IN_ACEI_ARB', 'IN_BETABLOCKERS', 'IN_BETAGONISTINHALED',\n 'IN_GLUCORTICOIDSINHALED', 'IN_DVITAMINSUPLEMENT', 'IN_BENZODIACEPINES', 'IN_ANTIDEPRESSANT']\n\nIN_TREATMENTS_LONG_NAME = ['IN_PREVIOUSASPIRIN', 'IN_OTHERANTIPLATELET', 'IN_ORALANTICOAGL']\n\nHOME_OXIGEN_THERAPY = 'V03AN01'\nIN_PREVIOUSASPIRIN = ['N02BA', 'B01AC06']\nIN_OTHERANTIPLATELET = ['B01AC24', 'B01AC04', 'B01AC05', 'B01AC11', 'B01AC30', 'B01AC10']\nIN_ORALANTICOAGL = ['B01AA', 'B01AE', 'B01AF']\nIN_ACEI_ARB = 'C09'\nIN_BETABLOCKERS = 'C07A'\nIN_BETAGONISTINHALED = 'R03AC'\nIN_GLUCORTICOIDSINHALED = 'R03BA'\nIN_DVITAMINSUPLEMENT = 'A11CC'\nIN_BENZODIACEPINES = 'N05B'\nIN_ANTIDEPRESSANT = 'N06A'\n\nIN_TREATMENTS = [HOME_OXIGEN_THERAPY, IN_ACEI_ARB, IN_BETABLOCKERS, IN_BETAGONISTINHALED,\n IN_GLUCORTICOIDSINHALED, IN_DVITAMINSUPLEMENT, IN_BENZODIACEPINES, IN_ANTIDEPRESSANT]\n\nIN_TREATMENTS_LONG = [IN_PREVIOUSASPIRIN, IN_OTHERANTIPLATELET, IN_ORALANTICOAGL]\n\nVITALS_TREAT = ['Respiratory Frequency', 'Body Temperature', 'Systolic Blood Pressure']\nLABS_TREAT = ['ABG: Oxygen Saturation (SaO2)', 'Azoto ematico UAR: D-DIMERO', 'PROCALCITONINA: PROCALCITONINA',\n 'C-Reactive Protein (CRP)', 'Alanine Aminotransferase (ALT)', 'LATTICODEIDROGENASI: (LDH) LATTICODEIDROGENASI',\n 'Blood Creatinine', 'Blood Sodium', 'CBC: Leukocytes', 'Emocromo + formula: LINFOCITI (N)', 'CBC: Hemoglobin', 'CBC: Platelets']\n\nVITALS_TREAT_RENAME = {'Respiratory Frequency': 'FAST_BREATHING', \n 'Body Temperature': 'MAXTEMPERATURE_ADMISSION', \n 'Systolic Blood Pressure': 'BLOOD_PRESSURE_ABNORMAL_B'}\n\nLABS_TREAT_RENAME = {'ABG: Oxygen Saturation (SaO2)': 'SAT02_BELOW92', \n 'Azoto ematico UAR: D-DIMERO': 'DDDIMER_B', \n 'PROCALCITONINA: PROCALCITONINA': 'PROCALCITONIN_B',\n 'C-Reactive Protein (CRP)': 'PCR_B', \n 'Alanine Aminotransferase (ALT)': 'TRANSAMINASES_B', \n 'LATTICODEIDROGENASI: (LDH) LATTICODEIDROGENASI': 'LDL_B',\n 'Blood Creatinine': 'CREATININE', \n 'Blood Sodium': 'SODIUM', \n 'CBC: Leukocytes': 'LEUCOCYTES', \n 'Emocromo + formula: LINFOCITI (N)': 'LYMPHOCYTES', \n 'CBC: Hemoglobin': 'HEMOGLOBIN', \n 'CBC: Platelets': 'PLATELETS'}\n\n# TREATMENTS\nTREATMENTS_NAME = ['CORTICOSTEROIDS', 'INTERFERONOR', 'TOCILIZUMAB', 'ANTIBIOTICS', 'ACEI_ARBS', 'CLOROQUINE', 'ANTIVIRAL', 'ANTICOAGULANTS']\nCORTICOSTEROIDS = 'H02'\nINTERFERONOR = 'L03'\nTOCILIZUMAB = 'L04AC07'\nANTIBIOTICS = 'J01'\nACEI_ARBS = 'C09'\nCLOROQUINE = 'P01BA02'\nANTIVIRAL = 'J05AR'\nANTICOAGULANTS = 'B01AB'\n\nTREATMENTS = [CORTICOSTEROIDS, INTERFERONOR, TOCILIZUMAB, ANTIBIOTICS, ACEI_ARBS, CLOROQUINE, ANTIVIRAL, ANTICOAGULANTS]\n\n# HCUP for COMORB_DEATH columns. SEPSIS = 2; Acute Renal Failure: 145; Heart Failure: 97; Embolic Event: 105\nCOMORB_DEATH = [2, 145, 97, 105]\nSEPSIS = [2]\nARF = [145]\nHF = [97]\nEMBOLIC = [105]\n\n# Respiratory procedures 9390 = Continuous Respiratory Pressure, 9396 = other oxygen treatment, 9671 = less than 96 hours of ventilation, 9672 = more than 96 hours of ventilation\nPROCEDURE_COLUMNS = ['Proc0', 'Proc1', 'Proc2', 'Proc3', 'Proc4', 'Proc5']\nLIST_PROCEDURES = ['9671', '9672']\n\ndef clean_lab_features(lab_feat):\n features = [x for x in lab_feat\n if all(s not in x for s in LAB_FEATURES_NOT_CONTAIN) and\n all(s != x for s in LAB_FEATURES_NOT_MATCH)]\n return features\n\n\ndef export_comorbidities(df, file_name):\n # Convert (to export for R processing)\n # TODO: Improve this code\n comorb_df = pd.DataFrame(columns=['id', 'comorb'])\n for i in range(len(df)):\n d_temp = df.iloc[i]\n df_temp = pd.DataFrame({'id': [d_temp['NumeroScheda']] * 6,\n 'comorb': [d_temp['Principale']] + \\\n [d_temp[d] for d in DIAGNOSIS_COLUMNS]})\n comorb_df = comorb_df.append(df_temp)\n\n comorb_df = comorb_df.dropna().reset_index()\n comorb_df.to_csv(file_name)\n\n\ndef comorbidities_long(df):\n # Convert (to export for R processing)\n # TODO: Improve this code\n comorb_df = pd.DataFrame(columns=['id', 'comorb'])\n for i in range(len(df)):\n d_temp = df.iloc[i]\n df_temp = pd.DataFrame({'id': [d_temp['NumeroScheda']] * 6,\n 'comorb': [d_temp['Principale']] + \\\n [d_temp[d] for d in DIAGNOSIS_COLUMNS]})\n comorb_df = comorb_df.append(df_temp)\n\n comorb_df = comorb_df.dropna().reset_index()\n return comorb_df\n\n\ndef get_lab_dates(t):\n # TODO: Find better way to do so. Nested try-except is not nice.\n try:\n date = datetime.datetime.strptime(t, '%d/%m/%Y %H:%M')\n except ValueError:\n try:\n date = datetime.datetime.strptime(t, '%d/%m/%Y')\n except ValueError:\n try:\n date = datetime.datetime.strptime(t, '%d/%m/%y %H:%M')\n except ValueError:\n date = datetime.datetime.strptime(t, '%d/%m/%y')\n\n return date\n\ndef get_age(t):\n\n try:\n today = pd.Timestamp(year=2020, month=4, day=1)\n age = np.round((today - t).days/365)\n return age\n except:\n return np.NaN\n\n\ndef cleanup_demographics(demographics):\n\n demographics = demographics[['N_SCHEDA_PS', 'PZ_SESSO_PS', \"PZ_DATA_NASCITA_PS\"]]\n try:\n demographics['PZ_DATA_NASCITA_PS'] = \\\n pd.to_datetime(demographics['PZ_DATA_NASCITA_PS'], format='%Y-%m-%d %H:%M:%S')\n except ValueError:\n demographics.loc[:, 'PZ_DATA_NASCITA_PS'] = \\\n pd.to_datetime(demographics['PZ_DATA_NASCITA_PS'], format='%m/%d/%Y')\n\n demographics.loc[:, 'Age'] = demographics['PZ_DATA_NASCITA_PS'].apply(get_age)\n demographics = demographics.drop('PZ_DATA_NASCITA_PS', axis = 1)\n demographics = demographics.rename(columns = {'N_SCHEDA_PS' : 'NOSOLOGICO', 'PZ_SESSO_PS' : 'Gender'})\n demographics['Gender'] = (demographics['Gender'] == 'F').astype(int)\n demographics['NOSOLOGICO'] = demographics['NOSOLOGICO'].astype(str)\n\n return demographics\n\n\ndef create_vitals_dataset(vitals, patients, lab_tests=True):\n vital_signs = VITAL_SIGNS.copy()\n if lab_tests:\n vital_signs.remove('SaO2') # Remove oxygen saturation if we have lab values (it is there)\n\n # Cleanup commas in numbers\n vitals.loc[:, 'VALORE_PARAMETRO'] = \\\n vitals.loc[:, 'VALORE_PARAMETRO'].apply(lambda x: x.replace(\",\", \".\"))\n\n dataset_vitals = pd.DataFrame(np.nan, columns=vital_signs, index=patients)\n for p in patients:\n vitals_p = vitals[vitals['NOSOLOGICO'] == p][['NOME_PARAMETRO_VITALE', 'VALORE_PARAMETRO']]\n for vital_name in vital_signs:\n # Take mean if multiple values\n vital_value = vitals_p[vitals_p['NOME_PARAMETRO_VITALE'] == vital_name]['VALORE_PARAMETRO']\n vital_value = pd.to_numeric(vital_value).mean()\n dataset_vitals.loc[p, vital_name] = vital_value\n\n #dataset_vitals['Temp.'] = fahrenheit_covert(dataset_vitals['Temp.'])\n\n # Adjust missing columns\n dataset_vitals = remove_missing(dataset_vitals, nan_threshold=100)\n\n # Rename to English\n dataset_vitals = dataset_vitals.rename(columns=RENAMED_VITALS_COLUMNS)\n\n return dataset_vitals\n\n\n\ndef create_lab_dataset(lab, patients):\n # Remove missing test (groups) with more than 40% nonzeros\n lab_tests = lab['COD_INTERNO_PRESTAZIONE'].unique().tolist()\n dataset_lab_tests = pd.DataFrame(False, columns=lab_tests, index=patients)\n\n #Unstack the dataset and transform the entries in True/False\n dataset_lab_tests = lab[['NOSOLOGICO', 'COD_INTERNO_PRESTAZIONE', 'VALORE_TESTO']].groupby(['NOSOLOGICO', 'COD_INTERNO_PRESTAZIONE']).count().unstack().notna()\n dataset_lab_tests.columns = [i[1] for i in dataset_lab_tests.columns] # because of groupby, the columns are a tuple\n\n # 30% removes tests that are not present and the COVID-19 lab test\n lab_tests_reduced = remove_missing(dataset_lab_tests, missing_type=False, nan_threshold=100, impute=False)\n\n # Filter data entries per test\n lab_reduced = lab[lab['COD_INTERNO_PRESTAZIONE'].isin(lab_tests_reduced.columns)]\n\n # Create lab features for each exam\n dataset_lab = {}\n for lab_test in lab_tests_reduced.columns:\n # Create dataset\n lab_test_temp = lab_reduced.loc[lab_reduced['COD_INTERNO_PRESTAZIONE'] == lab_test]\n lab_test_features = lab_test_temp['PRESTAZIONE'].unique().tolist()\n\n # Remove unnecessary features\n lab_test_features = clean_lab_features(lab_test_features)\n\n # Add name of lab_test\n test_name = lab[lab['COD_INTERNO_PRESTAZIONE'] == lab_test]['DESCR_PRESTAZIONE'].values[0]\n lab_test_features_names = [test_name.strip() + \": \" + x for x in lab_test_features]\n\n dataset_lab_test = pd.DataFrame(np.nan, columns=lab_test_features_names, index=patients)\n for p in patients:\n lab_p = lab_test_temp[lab_test_temp['NOSOLOGICO'] == p][['COD_INTERNO_PRESTAZIONE', 'DATA_RICHIESTA', 'PRESTAZIONE', 'VALORE']]\n for lab_name in lab_test_features:\n if any(lab_p['PRESTAZIONE'] == lab_name):\n lab_p_name = lab_p[lab_p['PRESTAZIONE'] == lab_name]\n idx = lab_p_name['DATA_RICHIESTA'].idxmin() # Pick first date of test if multiple\n dataset_lab_test.loc[p, test_name.strip() + \": \" + lab_name] = lab_p_name.loc[idx]['VALORE']\n dataset_lab[lab_test] = dataset_lab_test\n\n # Create full dataset\n dataset_lab_full = pd.concat([v for _,v in dataset_lab.items()],\n axis=1, sort=True).astype(np.float64)\n dataset_lab_full = remove_missing(dataset_lab_full, nan_threshold=100)\n\n\n # Rename dataset laboratory\n dataset_lab_full = dataset_lab_full.rename(columns=RENAMED_LAB_COLUMNS)\n\n return dataset_lab_full\n\ndef create_dataset_comorbidities(comorb_long, icd_category, patients):\n\n #Load the diagnoses dict\n if icd_category == 9:\n icd_dict = pd.read_csv('../../../analyzer/hcup_dictionary_icd9.csv')\n else:\n icd_dict = pd.read_csv('../../../analyzer/hcup_dictionary_icd10.csv')\n\n #The codes that are not mapped are mostly procedure codes or codes that are not of interest\n icd_descr = pd.merge(comorb_long, icd_dict, how='inner', left_on=['DIAGNOSIS_CODE'], right_on=['DIAGNOSIS_CODE'])\n\n #Create a list with the categories that we want\n comorb_descr = icd_descr.loc[icd_descr['HCUP_ORDER'].isin(HCUP_LIST)]\n\n #Limit only to the HCUP Description and drop the duplicates\n comorb_descr = comorb_descr[['NOSOLOGICO','GROUP_HCUP']].drop_duplicates()\n\n #Convert from long to wide format\n comorb_descr = pd.get_dummies(comorb_descr, columns=['GROUP_HCUP'], prefix=['GROUP_HCUP'])\n\n #Now we will remove the GROUP_HCUP_ from the name of each column\n comorb_descr = comorb_descr.rename(columns = lambda x: x.replace('GROUP_HCUP_', ''))\n\n #Let's combine the diabetes columns to one\n comorb_descr['Diabetes'] = comorb_descr[[\"Diabetes mellitus with complications\", \"Diabetes mellitus without complication\"]].max(axis=1)\n\n #Drop the other two columns\n comorb_descr = comorb_descr.drop(columns=['Diabetes mellitus with complications', 'Diabetes mellitus without complication'])\n\n dataset_comorbidities = pd.DataFrame(comorb_descr.groupby(['NOSOLOGICO'], as_index=False).max())\n\n df_patients = pd.DataFrame(patients, columns = ['NOSOLOGICO'])\n dataset_comorbidities = pd.merge(df_patients, dataset_comorbidities, how='left',\n left_on=['NOSOLOGICO'], right_on = ['NOSOLOGICO'])\n dataset_comorbidities = dataset_comorbidities.fillna(0)\n \n return dataset_comorbidities\n\ndef create_dataset_discharge(discharge, patients, icu=None):\n\n dataset_discharge = pd.DataFrame(columns=DEMOGRAPHICS_FEATURES, index=patients)\n dataset_discharge.loc[:, DEMOGRAPHICS_FEATURES] = discharge[['NOSOLOGICO'] + DEMOGRAPHICS_FEATURES].set_index('NOSOLOGICO')\n #dataset_discharge.loc[:, 'Gender'] = dataset_discharge.loc[:, 'Gender'].astype('category')\n #dataset_discharge.Gender = dataset_discharge.Gender.cat.codes.astype('category')\n dataset_discharge = dataset_discharge[['Outcome']]\n dataset_discharge.loc[:, 'Outcome'] = dataset_discharge.loc[:, 'Outcome'].astype('category')\n\n if icu is not None:\n dataset_discharge = dataset_discharge.join(icu.set_index('NOSOLOGICO'))\n\n\n return dataset_discharge\n\n\ndef cleanup_discharge_info(discharge_info):\n\n covid_patients = discharge_info['Principale'].isin(LIST_DIAGNOSIS)\n\n for d in DIAGNOSIS_COLUMNS:\n covid_patients = covid_patients | discharge_info[d].isin(LIST_DIAGNOSIS)\n\n discharge_info = discharge_info[covid_patients]\n\n # Keep discharge codes and transform the dependent variable to binary\n discharge_info = discharge_info[discharge_info['Modalità di dimissione'].isin(DISCHARGE_CODES)]\n discharge_info['Modalità di dimissione'] = \\\n (discharge_info['Modalità di dimissione'] == DISCHARGE_CODE_RELEASED).apply(int) #transform to binary\n\n # Drop Duplicated Observations\n discharge_info.drop_duplicates(['NumeroScheda', 'Modalità di dimissione'],\n inplace=True)\n discharge_info.drop_duplicates(['NumeroScheda'], inplace=True)\n\n #Keep only important columns and rename them\n discharge_info = discharge_info[['NumeroScheda', 'Sesso', 'Età', 'Modalità di dimissione']]\n discharge_info = discharge_info.rename(\n columns={'NumeroScheda': 'NOSOLOGICO',\n 'Sesso': 'Gender',\n 'Età':'Age',\n 'Modalità di dimissione':'Outcome'})\n discharge_info.NOSOLOGICO = discharge_info.NOSOLOGICO.apply(str)\n\n return discharge_info\n\n\ndef fahrenheit_covert(temp_celsius):\n temp_fahrenheit = ((temp_celsius * 9)/5)+ 32\n return temp_fahrenheit\n\ndef filter_patients(datasets):\n\n patients = datasets[0]['NOSOLOGICO'].astype(np.int64)\n\n # Get common patients\n for d in datasets[1:]:\n patients = d[d['NOSOLOGICO'].astype(np.int64).isin(patients)]['NOSOLOGICO'].unique()\n\n\n # Remove values not in patients (in place)\n for d in datasets:\n d.drop(d[~d['NOSOLOGICO'].astype(np.int64).isin(patients)].index, inplace=True)\n\n return patients\n\n\ndef get_swabs(lab):\n\n covid = lab[lab.COD_INTERNO_PRESTAZIONE == 'COV19']\n covid = covid[covid.VALORE_TESTO.isin(['POSITIVO', 'Negativo', 'Debolmente positivo'])]\n covid.VALORE_TESTO = covid.VALORE_TESTO.isin(['POSITIVO','Debolmente positivo']).astype(int).astype('category')\n covid = covid[~ covid.NOSOLOGICO.duplicated()] # drop duplicated values\n swab = covid[['NOSOLOGICO', 'VALORE_TESTO']]\n swab = swab.rename(columns = {'VALORE_TESTO': 'Swab'})\n swab['Swab'] = swab['Swab'].astype('int')\n\n return swab\n\ndef get_regimen(cloroquine, antiviral, anticoagulant):\n if cloroquine == 0:\n return 'Non-Chloroquine'\n\n elif cloroquine == 1 and antiviral == 1 and anticoagulant == 1:\n return 'All'\n \n elif cloroquine == 1 and antiviral == 1 and anticoagulant == 0:\n return 'Chloroquine and Antivirals'\n\n elif cloroquine == 1 and antiviral == 0 and anticoagulant == 1:\n return 'Chloroquine and Anticoagulants'\n\n elif cloroquine == 1 and antiviral == 0 and anticoagulant == 0:\n return 'Chloroquine Only'\n \ndef check_treatment(l, obs):\n c = 0 \n for i in l:\n if i in obs:\n c += 1\n return c > 0 ","repo_name":"COVIDAnalytics/covid19_hypertensive_treatments","sub_path":"calculator/analyzer/loaders/cremona/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":26792,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"}
+{"seq_id":"23772852769","text":"\"\"\"\n На стороне клиента:\n 1) Принять сообщение от пользователя\n 2) Зашифровать его\n 3) Составить список [шифровка, ключ]\n 4) Отправить серверу\n 5) Принять от сервера расшифрованое сообещиние\n\"\"\"\n\nimport sys\nimport pickle\nimport socket\nserverHost = 'localhost'\nserverPort = 9010\n\nmessage = 'hello network world'\nkey = 3\nx = pickle.dumps([message, key])\n\nsockobj = socket.socket()\nsockobj.connect((serverHost, serverPort))\nsockobj.send(x)\ndata = sockobj.recv(1024)\nprint('Client receivied: ', pickle.loads(data))\n\nsockobj.close\ny = pickle.loads(data)\nprint(y)","repo_name":"LeoLevin91/Labs-OC","sub_path":"Lab_2/Cript_Csezar py/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"12388963833","text":"from selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\nimport time\n\nbrowser= webdriver.Chrome(executable_path=r'E:\\software\\browser\\chromedriver_win32\\chromedriver.exe')\n\n# 发起请求\nbrowser.get('http://www.runoob.com/try/try.php?filename=jqueryui-api-droppable')\ntime.sleep(2)\n\n# 切换框架\nbrowser.switch_to.frame('iframeResult')\n\n# 查找框架中的元素\n# res = browser.find_element_by_class_name('ui-droppable')\n\n# 查找框架中不存在的元素\ntry:\n res = browser.find_element_by_class_name('logo')\n print(res)\nexcept NoSuchElementException as e:\n print(e)\n # 切换到父级框架\n browser.switch_to.parent_frame()\n res = browser.find_element_by_class_name('logo')\n print(res)\n\n","repo_name":"theme716/small-routine","sub_path":"insect/9.nine_day/10.selenium_框架切换.py","file_name":"10.selenium_框架切换.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"12853594074","text":"import os\n\nimport pytest\n\nimport salt.modules.kmod as kmod\nfrom salt.exceptions import CommandExecutionError\nfrom tests.support.mock import MagicMock, patch\n\n\n@pytest.fixture\ndef configure_loader_modules():\n return {kmod: {}}\n\n\ndef test_available():\n \"\"\"\n Tests return a list of all available kernel modules\n \"\"\"\n with patch(\"salt.modules.kmod.available\", MagicMock(return_value=[\"kvm\"])):\n assert [\"kvm\"] == kmod.available()\n\n\ndef test_check_available():\n \"\"\"\n Tests if the specified kernel module is available\n \"\"\"\n with patch(\"salt.modules.kmod.available\", MagicMock(return_value=[\"kvm\"])):\n assert kmod.check_available(\"kvm\") is True\n\n\ndef test_lsmod():\n \"\"\"\n Tests return information about currently loaded modules\n \"\"\"\n ret_str = \"\"\"Module Size Used by\n kvm_intel 233472 0\n \"\"\"\n expected = [{\"size\": \"233472\", \"module\": \"kvm_intel\", \"depcount\": \"0\", \"deps\": []}]\n mock_cmd = MagicMock(return_value=ret_str)\n with patch(\n \"salt.utils.path.which\", MagicMock(side_effect=[None, \"/sbin/lsmod\"])\n ), patch.dict(kmod.__salt__, {\"cmd.run\": mock_cmd}):\n with pytest.raises(CommandExecutionError):\n kmod.lsmod()\n assert expected == kmod.lsmod()\n\n\n@pytest.mark.skipif(\n not os.path.isfile(\"/etc/modules\"), reason=\"/etc/modules not present\"\n)\ndef test_mod_list():\n \"\"\"\n Tests return a list of the loaded module names\n \"\"\"\n with patch(\n \"salt.modules.kmod._get_modules_conf\",\n MagicMock(return_value=\"/etc/modules\"),\n ):\n with patch(\n \"salt.modules.kmod._strip_module_name\", MagicMock(return_value=\"lp\")\n ):\n assert [\"lp\"] == kmod.mod_list(True)\n\n mock_ret = [{\"size\": 100, \"module\": None, \"depcount\": 10, \"deps\": None}]\n with patch(\"salt.modules.kmod.lsmod\", MagicMock(return_value=mock_ret)):\n assert [None] == kmod.mod_list(False)\n\n\ndef test_load():\n \"\"\"\n Tests to loads specified kernel module.\n \"\"\"\n mod = \"cheese\"\n err_msg = \"Module too moldy, refusing to load\"\n mock_persist = MagicMock(return_value={mod})\n mock_lsmod = MagicMock(\n return_value=[{\"size\": 100, \"module\": None, \"depcount\": 10, \"deps\": None}]\n )\n mock_run_all_0 = MagicMock(return_value={\"retcode\": 0})\n mock_run_all_1 = MagicMock(return_value={\"retcode\": 1, \"stderr\": err_msg})\n\n with patch(\"salt.modules.kmod._set_persistent_module\", mock_persist):\n with patch(\n \"salt.utils.path.which\",\n MagicMock(side_effect=[None, \"/sbin/modprobe\", \"/sbin/modprobe\"]),\n ), patch(\"salt.modules.kmod.lsmod\", mock_lsmod):\n with patch.dict(\n kmod.__salt__, {\"cmd.run_all\": mock_run_all_0}\n ), pytest.raises(CommandExecutionError):\n kmod.load(mod, True)\n\n with patch.dict(kmod.__salt__, {\"cmd.run_all\": mock_run_all_0}):\n assert [mod] == kmod.load(mod, True)\n\n with patch.dict(kmod.__salt__, {\"cmd.run_all\": mock_run_all_1}):\n assert \"Error loading module {}: {}\".format(mod, err_msg) == kmod.load(\n mod\n )\n\n\ndef test_is_loaded():\n \"\"\"\n Tests if specified kernel module is loaded.\n \"\"\"\n with patch(\"salt.modules.kmod.mod_list\", MagicMock(return_value={\"lp\"})):\n assert kmod.is_loaded(\"lp\") is True\n\n\ndef test_remove():\n \"\"\"\n Tests to remove the specified kernel module\n \"\"\"\n mod = \"cheese\"\n err_msg = \"Cannot find module: it has been eaten\"\n mock_persist = MagicMock(return_value={mod})\n mock_lsmod = MagicMock(\n return_value=[{\"size\": 100, \"module\": None, \"depcount\": 10, \"deps\": None}]\n )\n mock_run_all_0 = MagicMock(return_value={\"retcode\": 0})\n mock_run_all_1 = MagicMock(return_value={\"retcode\": 1, \"stderr\": err_msg})\n\n with patch(\"salt.modules.kmod._remove_persistent_module\", mock_persist):\n with patch(\n \"salt.utils.path.which\",\n MagicMock(side_effect=[None, \"/sbin/rmmod\", \"/sbin/rmmod\", \"/sbin/rmmod\"]),\n ), patch(\"salt.modules.kmod.lsmod\", mock_lsmod):\n with patch.dict(kmod.__salt__, {\"cmd.run_all\": mock_run_all_0}):\n with pytest.raises(CommandExecutionError):\n kmod.remove(mod)\n\n assert [mod] == kmod.remove(mod, True)\n\n assert [] == kmod.remove(mod)\n\n with patch.dict(kmod.__salt__, {\"cmd.run_all\": mock_run_all_1}):\n assert \"Error removing module {}: {}\".format(\n mod, err_msg\n ) == kmod.remove(mod, True)\n","repo_name":"saltstack/salt","sub_path":"tests/pytests/unit/modules/test_kmod.py","file_name":"test_kmod.py","file_ext":"py","file_size_in_byte":4632,"program_lang":"python","lang":"en","doc_type":"code","stars":13606,"dataset":"github-code","pt":"78"}
+{"seq_id":"22641842406","text":"import platform\nimport numpy as np\nimport sys\n\nsys.path.append('C:\\github_projects\\PythonPractice\\simple_CNN')\nsys.path.append('C:\\GithubProject\\PythonPractice\\simple_CNN')\n\nfrom layer import Conv2D, FC, Activations\nfrom datagen import DataGenerator\n\n\n# def change_smth(kernel):\n# kernel = np.expand_dims(kernel, 0)\n\nconv = Conv2D(3, 2, 'same', 1, 'sigmoid')\n\nif platform.system() == 'Windows':\n folder = 'C:/data/train_data'\n test_folder = 'C:/data/test_data'\nelif platform.system() == 'Linux':\n folder = '/home/shaoheng/Documents/PythonPractice/handwritedigit'\n\ndata_generator = DataGenerator(\n folder, 10, (16, 16), class_num=10)\n\ndef test_CNN_2D_with_FC():\n fc_layer = FC(10, 'sigmoid')\n conv = Conv2D(filter_size=3, channels=2, padding='same', stride=1, activation='sigmoid')\n\n x, y = data_generator.load_data()\n x = np.expand_dims(x, -1) # the data is 1-channel, add the channel to the last axis\n\n x = conv.forward_prop(x)\n x = np.reshape(x, (np.shape(x)[0], -1))\n x = fc_layer.forward_prop(x)\n\n w, delta = fc_layer.back_prop(label=y)\n w, delta = conv.back_prop(w_nextlayer=w, delta_nextlayer=delta, next_layer='FC')\n \n assert x.shape == (10, 10)\n\ndef test_put_zeros():\n matrix = np.arange(18).reshape((2, 3, 3))\n conv = Conv2D(filter_size=3, channels=2, padding='same', stride=2, activation='sigmoid')\n matrix = conv.put_zeros(matrix, 2, del_last_ele=True)\n print(matrix)\n assert matrix.shape == (2, 5, 5)\n\n\ndef test_CNN_2D_with_CNN_2D():\n fc_layer = FC(10, 'sigmoid')\n conv_1 = Conv2D(filter_size=3, channels=2, padding='same', stride=1, activation='sigmoid')\n conv_2 = Conv2D(filter_size=5, channels=4, padding='same', stride=2, activation='sigmoid')\n\n x, y = data_generator.load_data()\n x = np.expand_dims(x, -1) # the data is 1-channel, add the channel to the last axis\n\n x = conv_1.forward_prop(x)\n x = conv_2.forward_prop(x)\n\n x = np.reshape(x, (np.shape(x)[0], -1))\n x = fc_layer.forward_prop(x)\n\n w, delta = fc_layer.back_prop(label=y)\n w, delta = conv_2.back_prop(w_nextlayer=w, delta_nextlayer=delta, next_layer='FC')\n w, delta = conv_1.back_prop(w_nextlayer=w, delta_nextlayer=delta, next_layer='Conv2D')\n \n assert x.shape == (10, 10)\n\n\ndef test_relu_activation():\n matrix = np.arange(9).reshape((3, 3)) - 5\n relu = Activations().relu\n relu_deri = Activations().relu_derivative\n matrix_after_relu = relu(matrix)\n print(matrix_after_relu)\n matrix_relu_deri = relu_deri(matrix)\n print(matrix_relu_deri)\n assert matrix_after_relu.shape == (3, 3)\n \nif __name__ == '__main__':\n test_relu_activation()\n","repo_name":"hankerkuo/PythonPractice","sub_path":"simple_CNN/test_pytest.py","file_name":"test_pytest.py","file_ext":"py","file_size_in_byte":2652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"2096649979","text":"import json\nfrom functools import reduce\n\nimport BaseHTTPServer\nimport requests\nfrom urlparse import parse_qs\n\n\nclass Concourse:\n def __init__(self, url, token=None):\n self.__url = url\n self.__token = token\n self.__jobs = []\n\n # started > failed > succeeded\n def __new_status(self, newStatus, oldStatus=None):\n if not oldStatus:\n return newStatus\n elif oldStatus == 'started' or newStatus == 'started':\n return 'started'\n elif oldStatus == 'failed' or newStatus == 'failed':\n return 'failed'\n elif oldStatus == 'errored' or newStatus == 'errored':\n return 'failed'\n else:\n return newStatus\n\n def __get_jobs(self):\n response = requests.get(self.__url + \"/api/v1/jobs\", headers={'Authorization': self.__token})\n return json.loads(response.content)\n\n def group_ci_status_by_teams(self):\n jobs = self.__get_jobs()\n status = {}\n\n for job in jobs:\n if job['next_build'] and job['next_build']['status']:\n status[job['team_name']] = self.__new_status(job['next_build']['status'],\n status.get(job['team_name'], None))\n if job['finished_build'] and job['finished_build']['status']:\n status[job['team_name']] = self.__new_status(job['finished_build']['status'],\n status.get(job['team_name'], None))\n\n return status\n\n def status_from_team(self, teamOrTeams):\n states = self.group_ci_status_by_teams()\n\n if type(teamOrTeams) is str:\n return states[teamOrTeams]\n else:\n teamStats = [states[team] for team in states if team in teamOrTeams]\n state = reduce((lambda x, y: self.__new_status(x, y)), teamStats)\n return state\n\n def wait_for_token(self):\n global CI_TOKEN\n\n print(\"Login to concourse\")\n print\n print(self.__url + \"/sky/login?redirect_uri=http://127.0.0.1:64354/auth/callback\")\n print\n httpd = BaseHTTPServer.HTTPServer(('127.0.0.1', 64354), self.__ReadTokenHandler)\n try:\n httpd.handle_request()\n except KeyboardInterrupt:\n pass\n\n if not CI_TOKEN:\n print(\"No token from callback. Exit\")\n else:\n self.__token = CI_TOKEN\n return self.__token\n\n def set_token(self, token):\n self.__token = token\n\n class __ReadTokenHandler(BaseHTTPServer.BaseHTTPRequestHandler):\n def do_GET(s):\n global CI_TOKEN\n\n s.send_response(200)\n s.send_header(\"Content-type\", \"text/text\")\n s.end_headers()\n\n if s.path.startswith(\"/auth/callback?\"):\n query_str = s.path[s.path.index(\"?\") + 1:]\n params = parse_qs(query_str)\n if params.get('token'):\n CI_TOKEN = params.get('token')[0]\n print(\"received token.\")\n s.wfile.write(\"Ok, received token. You can close this window now.\")\n else:\n print(\"wrong callback parameter\")\n s.wfile.write(\"Call it like '/auth/callback?token=XXX'.\")\n else:\n print(\"wrong callback parameter\")\n s.wfile.write(\"Call it like '/auth/callback?token=XXX'.\")\n","repo_name":"innogy-digital/hue-concourse","sub_path":"hueci/concourse.py","file_name":"concourse.py","file_ext":"py","file_size_in_byte":3452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"44680649253","text":"from typing import *\nfrom collections import defaultdict, deque\n\n\n# Idea from 4 color theorem\nclass Solution:\n def possibleBipartition(self, n: int, dislikes: List[List[int]]) -> bool:\n planar = defaultdict(list)\n queue, label = deque(), defaultdict(int)\n\n for who, whom in dislikes:\n planar[who].append(whom)\n planar[whom].append(who)\n\n for person in range(1, n+1):\n if person not in label:\n queue.append(person)\n label[person] = 1\n\n # BFS\n while queue:\n cur = queue.popleft()\n for dislike in planar[cur]:\n if dislike not in label:\n label[dislike] = -label[cur]\n queue.append(dislike)\n # not possible to group by two label\n if dislike in label and label[dislike] != -label[cur]:\n return False\n\n return True\n\n\n# WA\n# class Solution:\n# def possibleBipartition(self, n: int, dislikes: List[List[int]]) -> bool:\n# dislike, like = collections.defaultdict(list), collections.defaultdict(list)\n# people = [l for l in range(1, n+1)]\n# person = 1\n# stack, visited = [person], []\n#\n# for who, whom in dislikes:\n# dislike[who].append(whom)\n# dislike[whom].append(who)\n#\n# for i in range(1, n + 1):\n# like[i].extend([p for p in people if p != i and p not in dislike[i]])\n#\n# # DFS\n# while stack:\n# cur = stack.pop()\n# for neighbor in like[cur]:\n# if neighbor not in visited and neighbor not in dislike[person]:\n# stack.append(neighbor)\n# visited.append(cur)\n#\n# for i, p1 in enumerate(visited[1:], start=1):\n# for p2 in visited[i+1:]:\n# if p1 in dislike[p2]:\n# return False\n#\n# others = [p for p in people if p not in visited]\n# for i, p1 in enumerate(others):\n# for p2 in others[i+1:]:\n# if p1 in dislike[p2]:\n# return False\n#\n# return True\n","repo_name":"childult-programmer/algorithm_study","sub_path":"6.graph/DAY 5/Possible_Bipartition_LSI.py","file_name":"Possible_Bipartition_LSI.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"24119037009","text":"from datetime import datetime, timedelta\n\nimport pytz\n\nfrom addons.discord_bot.config import Config\nfrom addons.plex.client import client\n\n\ndef append_items():\n response = \"**QUEUE ITEMS**\"\n response += \"\\n--------------------\\n\"\n timezone = pytz.timezone(Config.APP_TIMEZONE)\n play_time = datetime.now(timezone)\n for item in client.items:\n runtime = (item.duration - item.viewOffset)\n time_delta = timedelta(milliseconds=runtime)\n response += f\"{item.title} ({item.year}) **[{play_time.strftime('%b %d | %I:%M %p %Z')}]**\\n\"\n play_time = play_time + time_delta\n return response\n\n\ndef append_search(results):\n response = \"**SEARCH RESULTS**\"\n response += \"\\n--------------------\\n\"\n response += \"*(use /play command with Media ID to play item)*\\n\"\n response += \"*(use /add command with Media ID to queue item next)*\"\n response += \"\\n--------------------\\n\"\n for result in results:\n if result.type == \"collection\":\n continue\n response += f\"{result.title} ({result.year}) **[media_id: {result.ratingKey}]**\\n\"\n return response\n","repo_name":"jacobfholland/plex-discord-bot","sub_path":"addons/discord_bot/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"70945423932","text":"# 풀이 시간: 약 20 분 소요\n# 알고리즘 설계 시간 보다는 파이썬 문법에 익숙하지 않아서 오래 걸렸다. 파이썬 사용에 익숙해져야겠다.\n\nn, m, k = map(int, input().split(\" \"))\nnum = list(map(int, input().split(\" \")))\nmax_num = max(num) # O(N)\nnum.remove(max_num) # O(N)\nsec_max_num = max(num) # O(N)\n\n\n# 전체 복잡도: O(N + i)\ndef big_num(m, k):\n\n i = 0\n sum_num = 0\n while True: # O(i)\n j = 0\n while j != k and i != m:\n sum_num += max_num\n j += 1\n i += 1\n if i == m:\n break\n sum_num += sec_max_num\n i += 1\n\n print(sum_num)\n\n\n# 개선된 코드\n# 복잡도 O(N)\ndef improved_big_num(m, k):\n sum_num = int(m/(k+1) * (max_num*k + sec_max_num) + max_num*(m % (k+1)))\n print(sum_num)\n # 8번 더하기 중복 3번\n # m/(k+1) * (max*k + sec) + max*(m%(k+1))\n\n\nbig_num(m, k)\nimproved_big_num(m, k)\n","repo_name":"jeonjw95/coding-test-python","sub_path":"greedy/bigNumber.py","file_name":"bigNumber.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"21473989601","text":"import argparse\nfrom detect_card import detect_card\n\ndef main():\n\tparser = argparse.ArgumentParser(description='Card Detection')\n\tparser.add_argument('--image', help='image path')\n\tparser.add_argument('--directory', help='directory path')\n\targs = parser.parse_args()\n\t\n\tcard_obj=detect_card(Image_Path=args.image,\n\t\t Directory_Path=args.directory)\n\tcard_obj.get_cards()\n\n\nif __name__ == \"__main__\":\n\tmain()","repo_name":"saurabhbagdiya/verificient_assignment","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"32137467015","text":"import random\r\nimport time\r\nimport main\r\nimport keyboard\r\n\r\n\r\ndef math_test():\r\n for i in range(5):\r\n first_number = random.randint(1, 10)\r\n second_number = random.randint(1, 10)\r\n sing_number = random.randint(0, 3)\r\n\r\n if sing_number == 0:\r\n result = first_number * second_number\r\n player_result = int(input(f'{first_number} * {second_number} '))\r\n\r\n if sing_number == 1:\r\n result = first_number / second_number\r\n player_result = int(input(f'{first_number} / {second_number} '))\r\n\r\n if sing_number == 2:\r\n result = first_number + second_number\r\n player_result = int(input(f'{first_number} + {second_number} '))\r\n\r\n if sing_number == 3:\r\n result = first_number - second_number\r\n player_result = int(input(f'{first_number} - {second_number} '))\r\n\r\n if result == player_result:\r\n print('Правильно!')\r\n else:\r\n print('Неправильно!')\r\n time.sleep(1)\r\n print('1. Играть еще')\r\n print('2. Выйти')\r\n\r\n if int(input()) == 1:\r\n math_test()\r\n else:\r\n main.game_choice()\r\n\r\n\r\nif __name__ == '__main__':\r\n math_test()\r\n\r\n\r\nheroes = []\r\n\r\n\r\ndef picker():\r\n print('===КОМАНДЫ===') \r\n print('1. ДОБАВИТЬ ГЕРОЯ')\r\n print('2. Выбрать')\r\n print('3. Выйти')\r\n print('4. Удалить все')\r\n time.sleep(1)\r\n command = int(input())\r\n if command == 1:\r\n ap_heroes = input('Напиши героя, чтобы добавить в список \\n')\r\n heroes.append(ap_heroes)\r\n picker()\r\n if command == 2:\r\n try:\r\n pik_rand = random.randint(0, len(heroes) - 1)\r\n print(heroes)\r\n print(heroes[pik_rand])\r\n picker()\r\n except ValueError:\r\n print('В списке нет героев')\r\n picker()\r\n if command == 3:\r\n main.game_choice()\r\n if command == 4:\r\n heroes.clear()\r\n picker()\r\n if __name__ == '__main__':\r\n picker()\r\n\r\n\r\ndef letter_pick():\r\n letters = [ 'a', 'b', 'c', 'd', 'e', 'f', 'g',\r\n 'h', 'i', 'j', 'k', 'l', 'm',\r\n 'n', 'o', 'p', 'q', 'r', 's', 't',\r\n 'u', 'v', 'w', 'x', 'y', 'z']\r\n correct_letter = 0\r\n for i in range(1, 10):\r\n a = random.randint(1, 26)\r\n time.sleep(1)\r\n print(letters[a])\r\n if keyboard.read_key() == letters[a]:\r\n print('Правильно!')\r\n correct_letter = int(correct_letter + 1)\r\n else:\r\n print('Неправильно!')\r\n print(f'У вас {correct_letter} правильных нажатий')\r\n print('1. Играть еще')\r\n print('2. Выйти')\r\n if int(input()) == 1:\r\n letter_pick()\r\n else:\r\n main.game_choice()\r\n\r\n\r\nif __name__ == '__main__':\r\n letter_pick()\r\n\r\n\r\ndef notes():\r\n print('===КОМАНДЫ===')\r\n print('1. Добавить заметку')\r\n print('2. Удалить заметки')\r\n print('3. Прочесть все заметки')\r\n print('4. ESC')\r\n command = int(input())\r\n if command == 1:\r\n f = open('Notes.txt', 'a')\r\n f.write(input('Что напишем? '))\r\n f.close()\r\n notes()\r\n\r\n if command == 2:\r\n f = open('Notes.txt', 'w')\r\n f.close()\r\n notes()\r\n if command == 3:\r\n f = open('Notes.txt', 'r')\r\n print(f.read())\r\n f.close()\r\n notes()\r\n if command == 3:\r\n main.game_choice()","repo_name":"SergeyDff/Text-assistant","sub_path":"dff/games.py","file_name":"games.py","file_ext":"py","file_size_in_byte":3649,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"71493789693","text":"from collections import OrderedDict\nimport os\nimport re\nfrom typing import (\n Dict,\n Mapping,\n MutableMapping,\n MutableSequence,\n Optional,\n Sequence,\n Tuple,\n Type,\n Union,\n cast,\n)\n\nfrom google.api_core import client_options as client_options_lib\nfrom google.api_core import exceptions as core_exceptions\nfrom google.api_core import gapic_v1\nfrom google.api_core import retry as retries\nfrom google.auth import credentials as ga_credentials # type: ignore\nfrom google.auth.exceptions import MutualTLSChannelError # type: ignore\nfrom google.auth.transport import mtls # type: ignore\nfrom google.auth.transport.grpc import SslCredentials # type: ignore\nfrom google.oauth2 import service_account # type: ignore\n\nfrom google.cloud.api_keys_v2 import gapic_version as package_version\n\ntry:\n OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]\nexcept AttributeError: # pragma: NO COVER\n OptionalRetry = Union[retries.Retry, object] # type: ignore\n\nfrom google.api_core import operation # type: ignore\nfrom google.api_core import operation_async # type: ignore\nfrom google.longrunning import operations_pb2 # type: ignore\nfrom google.protobuf import empty_pb2 # type: ignore\nfrom google.protobuf import field_mask_pb2 # type: ignore\nfrom google.protobuf import timestamp_pb2 # type: ignore\n\nfrom google.cloud.api_keys_v2.services.api_keys import pagers\nfrom google.cloud.api_keys_v2.types import apikeys, resources\n\nfrom .transports.base import DEFAULT_CLIENT_INFO, ApiKeysTransport\nfrom .transports.grpc import ApiKeysGrpcTransport\nfrom .transports.grpc_asyncio import ApiKeysGrpcAsyncIOTransport\nfrom .transports.rest import ApiKeysRestTransport\n\n\nclass ApiKeysClientMeta(type):\n \"\"\"Metaclass for the ApiKeys client.\n\n This provides class-level methods for building and retrieving\n support objects (e.g. transport) without polluting the client instance\n objects.\n \"\"\"\n\n _transport_registry = OrderedDict() # type: Dict[str, Type[ApiKeysTransport]]\n _transport_registry[\"grpc\"] = ApiKeysGrpcTransport\n _transport_registry[\"grpc_asyncio\"] = ApiKeysGrpcAsyncIOTransport\n _transport_registry[\"rest\"] = ApiKeysRestTransport\n\n def get_transport_class(\n cls,\n label: Optional[str] = None,\n ) -> Type[ApiKeysTransport]:\n \"\"\"Returns an appropriate transport class.\n\n Args:\n label: The name of the desired transport. If none is\n provided, then the first transport in the registry is used.\n\n Returns:\n The transport class to use.\n \"\"\"\n # If a specific transport is requested, return that one.\n if label:\n return cls._transport_registry[label]\n\n # No transport is requested; return the default (that is, the first one\n # in the dictionary).\n return next(iter(cls._transport_registry.values()))\n\n\nclass ApiKeysClient(metaclass=ApiKeysClientMeta):\n \"\"\"Manages the API keys associated with projects.\"\"\"\n\n @staticmethod\n def _get_default_mtls_endpoint(api_endpoint):\n \"\"\"Converts api endpoint to mTLS endpoint.\n\n Convert \"*.sandbox.googleapis.com\" and \"*.googleapis.com\" to\n \"*.mtls.sandbox.googleapis.com\" and \"*.mtls.googleapis.com\" respectively.\n Args:\n api_endpoint (Optional[str]): the api endpoint to convert.\n Returns:\n str: converted mTLS api endpoint.\n \"\"\"\n if not api_endpoint:\n return api_endpoint\n\n mtls_endpoint_re = re.compile(\n r\"(?P[^.]+)(?P\\.mtls)?(?P\\.sandbox)?(?P\\.googleapis\\.com)?\"\n )\n\n m = mtls_endpoint_re.match(api_endpoint)\n name, mtls, sandbox, googledomain = m.groups()\n if mtls or not googledomain:\n return api_endpoint\n\n if sandbox:\n return api_endpoint.replace(\n \"sandbox.googleapis.com\", \"mtls.sandbox.googleapis.com\"\n )\n\n return api_endpoint.replace(\".googleapis.com\", \".mtls.googleapis.com\")\n\n DEFAULT_ENDPOINT = \"apikeys.googleapis.com\"\n DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore\n DEFAULT_ENDPOINT\n )\n\n @classmethod\n def from_service_account_info(cls, info: dict, *args, **kwargs):\n \"\"\"Creates an instance of this client using the provided credentials\n info.\n\n Args:\n info (dict): The service account private key info.\n args: Additional arguments to pass to the constructor.\n kwargs: Additional arguments to pass to the constructor.\n\n Returns:\n ApiKeysClient: The constructed client.\n \"\"\"\n credentials = service_account.Credentials.from_service_account_info(info)\n kwargs[\"credentials\"] = credentials\n return cls(*args, **kwargs)\n\n @classmethod\n def from_service_account_file(cls, filename: str, *args, **kwargs):\n \"\"\"Creates an instance of this client using the provided credentials\n file.\n\n Args:\n filename (str): The path to the service account private key json\n file.\n args: Additional arguments to pass to the constructor.\n kwargs: Additional arguments to pass to the constructor.\n\n Returns:\n ApiKeysClient: The constructed client.\n \"\"\"\n credentials = service_account.Credentials.from_service_account_file(filename)\n kwargs[\"credentials\"] = credentials\n return cls(*args, **kwargs)\n\n from_service_account_json = from_service_account_file\n\n @property\n def transport(self) -> ApiKeysTransport:\n \"\"\"Returns the transport used by the client instance.\n\n Returns:\n ApiKeysTransport: The transport used by the client\n instance.\n \"\"\"\n return self._transport\n\n @staticmethod\n def key_path(\n project: str,\n location: str,\n key: str,\n ) -> str:\n \"\"\"Returns a fully-qualified key string.\"\"\"\n return \"projects/{project}/locations/{location}/keys/{key}\".format(\n project=project,\n location=location,\n key=key,\n )\n\n @staticmethod\n def parse_key_path(path: str) -> Dict[str, str]:\n \"\"\"Parses a key path into its component segments.\"\"\"\n m = re.match(\n r\"^projects/(?P.+?)/locations/(?P.+?)/keys/(?P.+?)$\",\n path,\n )\n return m.groupdict() if m else {}\n\n @staticmethod\n def common_billing_account_path(\n billing_account: str,\n ) -> str:\n \"\"\"Returns a fully-qualified billing_account string.\"\"\"\n return \"billingAccounts/{billing_account}\".format(\n billing_account=billing_account,\n )\n\n @staticmethod\n def parse_common_billing_account_path(path: str) -> Dict[str, str]:\n \"\"\"Parse a billing_account path into its component segments.\"\"\"\n m = re.match(r\"^billingAccounts/(?P.+?)$\", path)\n return m.groupdict() if m else {}\n\n @staticmethod\n def common_folder_path(\n folder: str,\n ) -> str:\n \"\"\"Returns a fully-qualified folder string.\"\"\"\n return \"folders/{folder}\".format(\n folder=folder,\n )\n\n @staticmethod\n def parse_common_folder_path(path: str) -> Dict[str, str]:\n \"\"\"Parse a folder path into its component segments.\"\"\"\n m = re.match(r\"^folders/(?P.+?)$\", path)\n return m.groupdict() if m else {}\n\n @staticmethod\n def common_organization_path(\n organization: str,\n ) -> str:\n \"\"\"Returns a fully-qualified organization string.\"\"\"\n return \"organizations/{organization}\".format(\n organization=organization,\n )\n\n @staticmethod\n def parse_common_organization_path(path: str) -> Dict[str, str]:\n \"\"\"Parse a organization path into its component segments.\"\"\"\n m = re.match(r\"^organizations/(?P.+?)$\", path)\n return m.groupdict() if m else {}\n\n @staticmethod\n def common_project_path(\n project: str,\n ) -> str:\n \"\"\"Returns a fully-qualified project string.\"\"\"\n return \"projects/{project}\".format(\n project=project,\n )\n\n @staticmethod\n def parse_common_project_path(path: str) -> Dict[str, str]:\n \"\"\"Parse a project path into its component segments.\"\"\"\n m = re.match(r\"^projects/(?P.+?)$\", path)\n return m.groupdict() if m else {}\n\n @staticmethod\n def common_location_path(\n project: str,\n location: str,\n ) -> str:\n \"\"\"Returns a fully-qualified location string.\"\"\"\n return \"projects/{project}/locations/{location}\".format(\n project=project,\n location=location,\n )\n\n @staticmethod\n def parse_common_location_path(path: str) -> Dict[str, str]:\n \"\"\"Parse a location path into its component segments.\"\"\"\n m = re.match(r\"^projects/(?P.+?)/locations/(?P.+?)$\", path)\n return m.groupdict() if m else {}\n\n @classmethod\n def get_mtls_endpoint_and_cert_source(\n cls, client_options: Optional[client_options_lib.ClientOptions] = None\n ):\n \"\"\"Return the API endpoint and client cert source for mutual TLS.\n\n The client cert source is determined in the following order:\n (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not \"true\", the\n client cert source is None.\n (2) if `client_options.client_cert_source` is provided, use the provided one; if the\n default client cert source exists, use the default one; otherwise the client cert\n source is None.\n\n The API endpoint is determined in the following order:\n (1) if `client_options.api_endpoint` if provided, use the provided one.\n (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is \"always\", use the\n default mTLS endpoint; if the environment variable is \"never\", use the default API\n endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise\n use the default API endpoint.\n\n More details can be found at https://google.aip.dev/auth/4114.\n\n Args:\n client_options (google.api_core.client_options.ClientOptions): Custom options for the\n client. Only the `api_endpoint` and `client_cert_source` properties may be used\n in this method.\n\n Returns:\n Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the\n client cert source to use.\n\n Raises:\n google.auth.exceptions.MutualTLSChannelError: If any errors happen.\n \"\"\"\n if client_options is None:\n client_options = client_options_lib.ClientOptions()\n use_client_cert = os.getenv(\"GOOGLE_API_USE_CLIENT_CERTIFICATE\", \"false\")\n use_mtls_endpoint = os.getenv(\"GOOGLE_API_USE_MTLS_ENDPOINT\", \"auto\")\n if use_client_cert not in (\"true\", \"false\"):\n raise ValueError(\n \"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`\"\n )\n if use_mtls_endpoint not in (\"auto\", \"never\", \"always\"):\n raise MutualTLSChannelError(\n \"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`\"\n )\n\n # Figure out the client cert source to use.\n client_cert_source = None\n if use_client_cert == \"true\":\n if client_options.client_cert_source:\n client_cert_source = client_options.client_cert_source\n elif mtls.has_default_client_cert_source():\n client_cert_source = mtls.default_client_cert_source()\n\n # Figure out which api endpoint to use.\n if client_options.api_endpoint is not None:\n api_endpoint = client_options.api_endpoint\n elif use_mtls_endpoint == \"always\" or (\n use_mtls_endpoint == \"auto\" and client_cert_source\n ):\n api_endpoint = cls.DEFAULT_MTLS_ENDPOINT\n else:\n api_endpoint = cls.DEFAULT_ENDPOINT\n\n return api_endpoint, client_cert_source\n\n def __init__(\n self,\n *,\n credentials: Optional[ga_credentials.Credentials] = None,\n transport: Optional[Union[str, ApiKeysTransport]] = None,\n client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None,\n client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,\n ) -> None:\n \"\"\"Instantiates the api keys client.\n\n Args:\n credentials (Optional[google.auth.credentials.Credentials]): The\n authorization credentials to attach to requests. These\n credentials identify the application to the service; if none\n are specified, the client will attempt to ascertain the\n credentials from the environment.\n transport (Union[str, ApiKeysTransport]): The\n transport to use. If set to None, a transport is chosen\n automatically.\n client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the\n client. It won't take effect if a ``transport`` instance is provided.\n (1) The ``api_endpoint`` property can be used to override the\n default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT\n environment variable can also be used to override the endpoint:\n \"always\" (always use the default mTLS endpoint), \"never\" (always\n use the default regular endpoint) and \"auto\" (auto switch to the\n default mTLS endpoint if client certificate is present, this is\n the default value). However, the ``api_endpoint`` property takes\n precedence if provided.\n (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable\n is \"true\", then the ``client_cert_source`` property can be used\n to provide client certificate for mutual TLS transport. If\n not provided, the default SSL client certificate will be used if\n present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is \"false\" or not\n set, no client certificate will be used.\n client_info (google.api_core.gapic_v1.client_info.ClientInfo):\n The client info used to send a user-agent string along with\n API requests. If ``None``, then default info will be used.\n Generally, you only need to set this if you're developing\n your own client library.\n\n Raises:\n google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport\n creation failed for any reason.\n \"\"\"\n if isinstance(client_options, dict):\n client_options = client_options_lib.from_dict(client_options)\n if client_options is None:\n client_options = client_options_lib.ClientOptions()\n client_options = cast(client_options_lib.ClientOptions, client_options)\n\n api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(\n client_options\n )\n\n api_key_value = getattr(client_options, \"api_key\", None)\n if api_key_value and credentials:\n raise ValueError(\n \"client_options.api_key and credentials are mutually exclusive\"\n )\n\n # Save or instantiate the transport.\n # Ordinarily, we provide the transport, but allowing a custom transport\n # instance provides an extensibility point for unusual situations.\n if isinstance(transport, ApiKeysTransport):\n # transport is a ApiKeysTransport instance.\n if credentials or client_options.credentials_file or api_key_value:\n raise ValueError(\n \"When providing a transport instance, \"\n \"provide its credentials directly.\"\n )\n if client_options.scopes:\n raise ValueError(\n \"When providing a transport instance, provide its scopes \"\n \"directly.\"\n )\n self._transport = transport\n else:\n import google.auth._default # type: ignore\n\n if api_key_value and hasattr(\n google.auth._default, \"get_api_key_credentials\"\n ):\n credentials = google.auth._default.get_api_key_credentials(\n api_key_value\n )\n\n Transport = type(self).get_transport_class(transport)\n self._transport = Transport(\n credentials=credentials,\n credentials_file=client_options.credentials_file,\n host=api_endpoint,\n scopes=client_options.scopes,\n client_cert_source_for_mtls=client_cert_source_func,\n quota_project_id=client_options.quota_project_id,\n client_info=client_info,\n always_use_jwt_access=True,\n api_audience=client_options.api_audience,\n )\n\n def create_key(\n self,\n request: Optional[Union[apikeys.CreateKeyRequest, dict]] = None,\n *,\n parent: Optional[str] = None,\n key: Optional[resources.Key] = None,\n key_id: Optional[str] = None,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Union[float, object] = gapic_v1.method.DEFAULT,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> operation.Operation:\n r\"\"\"Creates a new API key.\n\n NOTE: Key is a global resource; hence the only supported value\n for location is ``global``.\n\n .. code-block:: python\n\n # This snippet has been automatically generated and should be regarded as a\n # code template only.\n # It will require modifications to work:\n # - It may require correct/in-range values for request initialization.\n # - It may require specifying regional endpoints when creating the service\n # client as shown in:\n # https://googleapis.dev/python/google-api-core/latest/client_options.html\n from google.cloud import api_keys_v2\n\n def sample_create_key():\n # Create a client\n client = api_keys_v2.ApiKeysClient()\n\n # Initialize request argument(s)\n request = api_keys_v2.CreateKeyRequest(\n parent=\"parent_value\",\n )\n\n # Make the request\n operation = client.create_key(request=request)\n\n print(\"Waiting for operation to complete...\")\n\n response = operation.result()\n\n # Handle the response\n print(response)\n\n Args:\n request (Union[google.cloud.api_keys_v2.types.CreateKeyRequest, dict]):\n The request object. Request message for ``CreateKey`` method.\n parent (str):\n Required. The project in which the\n API key is created.\n\n This corresponds to the ``parent`` field\n on the ``request`` instance; if ``request`` is provided, this\n should not be set.\n key (google.cloud.api_keys_v2.types.Key):\n Required. The API key fields to set at creation time.\n You can configure only the ``display_name``,\n ``restrictions``, and ``annotations`` fields.\n\n This corresponds to the ``key`` field\n on the ``request`` instance; if ``request`` is provided, this\n should not be set.\n key_id (str):\n User specified key id (optional). If specified, it will\n become the final component of the key resource name.\n\n The id must be unique within the project, must conform\n with RFC-1034, is restricted to lower-cased letters, and\n has a maximum length of 63 characters. In another word,\n the id must match the regular expression:\n ``[a-z]([a-z0-9-]{0,61}[a-z0-9])?``.\n\n The id must NOT be a UUID-like string.\n\n This corresponds to the ``key_id`` field\n on the ``request`` instance; if ``request`` is provided, this\n should not be set.\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n google.api_core.operation.Operation:\n An object representing a long-running operation.\n\n The result type for the operation will be\n :class:`google.cloud.api_keys_v2.types.Key` The\n representation of a key managed by the API Keys API.\n\n \"\"\"\n # Create or coerce a protobuf request object.\n # Quick check: If we got a request object, we should *not* have\n # gotten any keyword arguments that map to the request.\n has_flattened_params = any([parent, key, key_id])\n if request is not None and has_flattened_params:\n raise ValueError(\n \"If the `request` argument is set, then none of \"\n \"the individual field arguments should be set.\"\n )\n\n # Minor optimization to avoid making a copy if the user passes\n # in a apikeys.CreateKeyRequest.\n # There's no risk of modifying the input as we've already verified\n # there are no flattened fields.\n if not isinstance(request, apikeys.CreateKeyRequest):\n request = apikeys.CreateKeyRequest(request)\n # If we have keyword arguments corresponding to fields on the\n # request, apply these.\n if parent is not None:\n request.parent = parent\n if key is not None:\n request.key = key\n if key_id is not None:\n request.key_id = key_id\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = self._transport._wrapped_methods[self._transport.create_key]\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata(((\"parent\", request.parent),)),\n )\n\n # Send the request.\n response = rpc(\n request,\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )\n\n # Wrap the response in an operation future.\n response = operation.from_gapic(\n response,\n self._transport.operations_client,\n resources.Key,\n metadata_type=empty_pb2.Empty,\n )\n\n # Done; return the response.\n return response\n\n def list_keys(\n self,\n request: Optional[Union[apikeys.ListKeysRequest, dict]] = None,\n *,\n parent: Optional[str] = None,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Union[float, object] = gapic_v1.method.DEFAULT,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> pagers.ListKeysPager:\n r\"\"\"Lists the API keys owned by a project. The key string of the API\n key isn't included in the response.\n\n NOTE: Key is a global resource; hence the only supported value\n for location is ``global``.\n\n .. code-block:: python\n\n # This snippet has been automatically generated and should be regarded as a\n # code template only.\n # It will require modifications to work:\n # - It may require correct/in-range values for request initialization.\n # - It may require specifying regional endpoints when creating the service\n # client as shown in:\n # https://googleapis.dev/python/google-api-core/latest/client_options.html\n from google.cloud import api_keys_v2\n\n def sample_list_keys():\n # Create a client\n client = api_keys_v2.ApiKeysClient()\n\n # Initialize request argument(s)\n request = api_keys_v2.ListKeysRequest(\n parent=\"parent_value\",\n )\n\n # Make the request\n page_result = client.list_keys(request=request)\n\n # Handle the response\n for response in page_result:\n print(response)\n\n Args:\n request (Union[google.cloud.api_keys_v2.types.ListKeysRequest, dict]):\n The request object. Request message for ``ListKeys`` method.\n parent (str):\n Required. Lists all API keys\n associated with this project.\n\n This corresponds to the ``parent`` field\n on the ``request`` instance; if ``request`` is provided, this\n should not be set.\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n google.cloud.api_keys_v2.services.api_keys.pagers.ListKeysPager:\n Response message for ListKeys method.\n\n Iterating over this object will yield results and\n resolve additional pages automatically.\n\n \"\"\"\n # Create or coerce a protobuf request object.\n # Quick check: If we got a request object, we should *not* have\n # gotten any keyword arguments that map to the request.\n has_flattened_params = any([parent])\n if request is not None and has_flattened_params:\n raise ValueError(\n \"If the `request` argument is set, then none of \"\n \"the individual field arguments should be set.\"\n )\n\n # Minor optimization to avoid making a copy if the user passes\n # in a apikeys.ListKeysRequest.\n # There's no risk of modifying the input as we've already verified\n # there are no flattened fields.\n if not isinstance(request, apikeys.ListKeysRequest):\n request = apikeys.ListKeysRequest(request)\n # If we have keyword arguments corresponding to fields on the\n # request, apply these.\n if parent is not None:\n request.parent = parent\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = self._transport._wrapped_methods[self._transport.list_keys]\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata(((\"parent\", request.parent),)),\n )\n\n # Send the request.\n response = rpc(\n request,\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )\n\n # This method is paged; wrap the response in a pager, which provides\n # an `__iter__` convenience method.\n response = pagers.ListKeysPager(\n method=rpc,\n request=request,\n response=response,\n metadata=metadata,\n )\n\n # Done; return the response.\n return response\n\n def get_key(\n self,\n request: Optional[Union[apikeys.GetKeyRequest, dict]] = None,\n *,\n name: Optional[str] = None,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Union[float, object] = gapic_v1.method.DEFAULT,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> resources.Key:\n r\"\"\"Gets the metadata for an API key. The key string of the API key\n isn't included in the response.\n\n NOTE: Key is a global resource; hence the only supported value\n for location is ``global``.\n\n .. code-block:: python\n\n # This snippet has been automatically generated and should be regarded as a\n # code template only.\n # It will require modifications to work:\n # - It may require correct/in-range values for request initialization.\n # - It may require specifying regional endpoints when creating the service\n # client as shown in:\n # https://googleapis.dev/python/google-api-core/latest/client_options.html\n from google.cloud import api_keys_v2\n\n def sample_get_key():\n # Create a client\n client = api_keys_v2.ApiKeysClient()\n\n # Initialize request argument(s)\n request = api_keys_v2.GetKeyRequest(\n name=\"name_value\",\n )\n\n # Make the request\n response = client.get_key(request=request)\n\n # Handle the response\n print(response)\n\n Args:\n request (Union[google.cloud.api_keys_v2.types.GetKeyRequest, dict]):\n The request object. Request message for ``GetKey`` method.\n name (str):\n Required. The resource name of the\n API key to get.\n\n This corresponds to the ``name`` field\n on the ``request`` instance; if ``request`` is provided, this\n should not be set.\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n google.cloud.api_keys_v2.types.Key:\n The representation of a key managed\n by the API Keys API.\n\n \"\"\"\n # Create or coerce a protobuf request object.\n # Quick check: If we got a request object, we should *not* have\n # gotten any keyword arguments that map to the request.\n has_flattened_params = any([name])\n if request is not None and has_flattened_params:\n raise ValueError(\n \"If the `request` argument is set, then none of \"\n \"the individual field arguments should be set.\"\n )\n\n # Minor optimization to avoid making a copy if the user passes\n # in a apikeys.GetKeyRequest.\n # There's no risk of modifying the input as we've already verified\n # there are no flattened fields.\n if not isinstance(request, apikeys.GetKeyRequest):\n request = apikeys.GetKeyRequest(request)\n # If we have keyword arguments corresponding to fields on the\n # request, apply these.\n if name is not None:\n request.name = name\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = self._transport._wrapped_methods[self._transport.get_key]\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata(((\"name\", request.name),)),\n )\n\n # Send the request.\n response = rpc(\n request,\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )\n\n # Done; return the response.\n return response\n\n def get_key_string(\n self,\n request: Optional[Union[apikeys.GetKeyStringRequest, dict]] = None,\n *,\n name: Optional[str] = None,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Union[float, object] = gapic_v1.method.DEFAULT,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> apikeys.GetKeyStringResponse:\n r\"\"\"Get the key string for an API key.\n\n NOTE: Key is a global resource; hence the only supported value\n for location is ``global``.\n\n .. code-block:: python\n\n # This snippet has been automatically generated and should be regarded as a\n # code template only.\n # It will require modifications to work:\n # - It may require correct/in-range values for request initialization.\n # - It may require specifying regional endpoints when creating the service\n # client as shown in:\n # https://googleapis.dev/python/google-api-core/latest/client_options.html\n from google.cloud import api_keys_v2\n\n def sample_get_key_string():\n # Create a client\n client = api_keys_v2.ApiKeysClient()\n\n # Initialize request argument(s)\n request = api_keys_v2.GetKeyStringRequest(\n name=\"name_value\",\n )\n\n # Make the request\n response = client.get_key_string(request=request)\n\n # Handle the response\n print(response)\n\n Args:\n request (Union[google.cloud.api_keys_v2.types.GetKeyStringRequest, dict]):\n The request object. Request message for ``GetKeyString`` method.\n name (str):\n Required. The resource name of the\n API key to be retrieved.\n\n This corresponds to the ``name`` field\n on the ``request`` instance; if ``request`` is provided, this\n should not be set.\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n google.cloud.api_keys_v2.types.GetKeyStringResponse:\n Response message for GetKeyString method.\n \"\"\"\n # Create or coerce a protobuf request object.\n # Quick check: If we got a request object, we should *not* have\n # gotten any keyword arguments that map to the request.\n has_flattened_params = any([name])\n if request is not None and has_flattened_params:\n raise ValueError(\n \"If the `request` argument is set, then none of \"\n \"the individual field arguments should be set.\"\n )\n\n # Minor optimization to avoid making a copy if the user passes\n # in a apikeys.GetKeyStringRequest.\n # There's no risk of modifying the input as we've already verified\n # there are no flattened fields.\n if not isinstance(request, apikeys.GetKeyStringRequest):\n request = apikeys.GetKeyStringRequest(request)\n # If we have keyword arguments corresponding to fields on the\n # request, apply these.\n if name is not None:\n request.name = name\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = self._transport._wrapped_methods[self._transport.get_key_string]\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata(((\"name\", request.name),)),\n )\n\n # Send the request.\n response = rpc(\n request,\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )\n\n # Done; return the response.\n return response\n\n def update_key(\n self,\n request: Optional[Union[apikeys.UpdateKeyRequest, dict]] = None,\n *,\n key: Optional[resources.Key] = None,\n update_mask: Optional[field_mask_pb2.FieldMask] = None,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Union[float, object] = gapic_v1.method.DEFAULT,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> operation.Operation:\n r\"\"\"Patches the modifiable fields of an API key. The key string of\n the API key isn't included in the response.\n\n NOTE: Key is a global resource; hence the only supported value\n for location is ``global``.\n\n .. code-block:: python\n\n # This snippet has been automatically generated and should be regarded as a\n # code template only.\n # It will require modifications to work:\n # - It may require correct/in-range values for request initialization.\n # - It may require specifying regional endpoints when creating the service\n # client as shown in:\n # https://googleapis.dev/python/google-api-core/latest/client_options.html\n from google.cloud import api_keys_v2\n\n def sample_update_key():\n # Create a client\n client = api_keys_v2.ApiKeysClient()\n\n # Initialize request argument(s)\n request = api_keys_v2.UpdateKeyRequest(\n )\n\n # Make the request\n operation = client.update_key(request=request)\n\n print(\"Waiting for operation to complete...\")\n\n response = operation.result()\n\n # Handle the response\n print(response)\n\n Args:\n request (Union[google.cloud.api_keys_v2.types.UpdateKeyRequest, dict]):\n The request object. Request message for ``UpdateKey`` method.\n key (google.cloud.api_keys_v2.types.Key):\n Required. Set the ``name`` field to the resource name of\n the API key to be updated. You can update only the\n ``display_name``, ``restrictions``, and ``annotations``\n fields.\n\n This corresponds to the ``key`` field\n on the ``request`` instance; if ``request`` is provided, this\n should not be set.\n update_mask (google.protobuf.field_mask_pb2.FieldMask):\n The field mask specifies which fields to be updated as\n part of this request. All other fields are ignored.\n Mutable fields are: ``display_name``, ``restrictions``,\n and ``annotations``. If an update mask is not provided,\n the service treats it as an implied mask equivalent to\n all allowed fields that are set on the wire. If the\n field mask has a special value \"*\", the service treats\n it equivalent to replace all allowed mutable fields.\n\n This corresponds to the ``update_mask`` field\n on the ``request`` instance; if ``request`` is provided, this\n should not be set.\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n google.api_core.operation.Operation:\n An object representing a long-running operation.\n\n The result type for the operation will be\n :class:`google.cloud.api_keys_v2.types.Key` The\n representation of a key managed by the API Keys API.\n\n \"\"\"\n # Create or coerce a protobuf request object.\n # Quick check: If we got a request object, we should *not* have\n # gotten any keyword arguments that map to the request.\n has_flattened_params = any([key, update_mask])\n if request is not None and has_flattened_params:\n raise ValueError(\n \"If the `request` argument is set, then none of \"\n \"the individual field arguments should be set.\"\n )\n\n # Minor optimization to avoid making a copy if the user passes\n # in a apikeys.UpdateKeyRequest.\n # There's no risk of modifying the input as we've already verified\n # there are no flattened fields.\n if not isinstance(request, apikeys.UpdateKeyRequest):\n request = apikeys.UpdateKeyRequest(request)\n # If we have keyword arguments corresponding to fields on the\n # request, apply these.\n if key is not None:\n request.key = key\n if update_mask is not None:\n request.update_mask = update_mask\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = self._transport._wrapped_methods[self._transport.update_key]\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata(((\"key.name\", request.key.name),)),\n )\n\n # Send the request.\n response = rpc(\n request,\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )\n\n # Wrap the response in an operation future.\n response = operation.from_gapic(\n response,\n self._transport.operations_client,\n resources.Key,\n metadata_type=empty_pb2.Empty,\n )\n\n # Done; return the response.\n return response\n\n def delete_key(\n self,\n request: Optional[Union[apikeys.DeleteKeyRequest, dict]] = None,\n *,\n name: Optional[str] = None,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Union[float, object] = gapic_v1.method.DEFAULT,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> operation.Operation:\n r\"\"\"Deletes an API key. Deleted key can be retrieved within 30 days\n of deletion. Afterward, key will be purged from the project.\n\n NOTE: Key is a global resource; hence the only supported value\n for location is ``global``.\n\n .. code-block:: python\n\n # This snippet has been automatically generated and should be regarded as a\n # code template only.\n # It will require modifications to work:\n # - It may require correct/in-range values for request initialization.\n # - It may require specifying regional endpoints when creating the service\n # client as shown in:\n # https://googleapis.dev/python/google-api-core/latest/client_options.html\n from google.cloud import api_keys_v2\n\n def sample_delete_key():\n # Create a client\n client = api_keys_v2.ApiKeysClient()\n\n # Initialize request argument(s)\n request = api_keys_v2.DeleteKeyRequest(\n name=\"name_value\",\n )\n\n # Make the request\n operation = client.delete_key(request=request)\n\n print(\"Waiting for operation to complete...\")\n\n response = operation.result()\n\n # Handle the response\n print(response)\n\n Args:\n request (Union[google.cloud.api_keys_v2.types.DeleteKeyRequest, dict]):\n The request object. Request message for ``DeleteKey`` method.\n name (str):\n Required. The resource name of the\n API key to be deleted.\n\n This corresponds to the ``name`` field\n on the ``request`` instance; if ``request`` is provided, this\n should not be set.\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n google.api_core.operation.Operation:\n An object representing a long-running operation.\n\n The result type for the operation will be\n :class:`google.cloud.api_keys_v2.types.Key` The\n representation of a key managed by the API Keys API.\n\n \"\"\"\n # Create or coerce a protobuf request object.\n # Quick check: If we got a request object, we should *not* have\n # gotten any keyword arguments that map to the request.\n has_flattened_params = any([name])\n if request is not None and has_flattened_params:\n raise ValueError(\n \"If the `request` argument is set, then none of \"\n \"the individual field arguments should be set.\"\n )\n\n # Minor optimization to avoid making a copy if the user passes\n # in a apikeys.DeleteKeyRequest.\n # There's no risk of modifying the input as we've already verified\n # there are no flattened fields.\n if not isinstance(request, apikeys.DeleteKeyRequest):\n request = apikeys.DeleteKeyRequest(request)\n # If we have keyword arguments corresponding to fields on the\n # request, apply these.\n if name is not None:\n request.name = name\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = self._transport._wrapped_methods[self._transport.delete_key]\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata(((\"name\", request.name),)),\n )\n\n # Send the request.\n response = rpc(\n request,\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )\n\n # Wrap the response in an operation future.\n response = operation.from_gapic(\n response,\n self._transport.operations_client,\n resources.Key,\n metadata_type=empty_pb2.Empty,\n )\n\n # Done; return the response.\n return response\n\n def undelete_key(\n self,\n request: Optional[Union[apikeys.UndeleteKeyRequest, dict]] = None,\n *,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Union[float, object] = gapic_v1.method.DEFAULT,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> operation.Operation:\n r\"\"\"Undeletes an API key which was deleted within 30 days.\n\n NOTE: Key is a global resource; hence the only supported value\n for location is ``global``.\n\n .. code-block:: python\n\n # This snippet has been automatically generated and should be regarded as a\n # code template only.\n # It will require modifications to work:\n # - It may require correct/in-range values for request initialization.\n # - It may require specifying regional endpoints when creating the service\n # client as shown in:\n # https://googleapis.dev/python/google-api-core/latest/client_options.html\n from google.cloud import api_keys_v2\n\n def sample_undelete_key():\n # Create a client\n client = api_keys_v2.ApiKeysClient()\n\n # Initialize request argument(s)\n request = api_keys_v2.UndeleteKeyRequest(\n name=\"name_value\",\n )\n\n # Make the request\n operation = client.undelete_key(request=request)\n\n print(\"Waiting for operation to complete...\")\n\n response = operation.result()\n\n # Handle the response\n print(response)\n\n Args:\n request (Union[google.cloud.api_keys_v2.types.UndeleteKeyRequest, dict]):\n The request object. Request message for ``UndeleteKey`` method.\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n google.api_core.operation.Operation:\n An object representing a long-running operation.\n\n The result type for the operation will be\n :class:`google.cloud.api_keys_v2.types.Key` The\n representation of a key managed by the API Keys API.\n\n \"\"\"\n # Create or coerce a protobuf request object.\n # Minor optimization to avoid making a copy if the user passes\n # in a apikeys.UndeleteKeyRequest.\n # There's no risk of modifying the input as we've already verified\n # there are no flattened fields.\n if not isinstance(request, apikeys.UndeleteKeyRequest):\n request = apikeys.UndeleteKeyRequest(request)\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = self._transport._wrapped_methods[self._transport.undelete_key]\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata(((\"name\", request.name),)),\n )\n\n # Send the request.\n response = rpc(\n request,\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )\n\n # Wrap the response in an operation future.\n response = operation.from_gapic(\n response,\n self._transport.operations_client,\n resources.Key,\n metadata_type=empty_pb2.Empty,\n )\n\n # Done; return the response.\n return response\n\n def lookup_key(\n self,\n request: Optional[Union[apikeys.LookupKeyRequest, dict]] = None,\n *,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Union[float, object] = gapic_v1.method.DEFAULT,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> apikeys.LookupKeyResponse:\n r\"\"\"Find the parent project and resource name of the API key that\n matches the key string in the request. If the API key has been\n purged, resource name will not be set. The service account must\n have the ``apikeys.keys.lookup`` permission on the parent\n project.\n\n .. code-block:: python\n\n # This snippet has been automatically generated and should be regarded as a\n # code template only.\n # It will require modifications to work:\n # - It may require correct/in-range values for request initialization.\n # - It may require specifying regional endpoints when creating the service\n # client as shown in:\n # https://googleapis.dev/python/google-api-core/latest/client_options.html\n from google.cloud import api_keys_v2\n\n def sample_lookup_key():\n # Create a client\n client = api_keys_v2.ApiKeysClient()\n\n # Initialize request argument(s)\n request = api_keys_v2.LookupKeyRequest(\n key_string=\"key_string_value\",\n )\n\n # Make the request\n response = client.lookup_key(request=request)\n\n # Handle the response\n print(response)\n\n Args:\n request (Union[google.cloud.api_keys_v2.types.LookupKeyRequest, dict]):\n The request object. Request message for ``LookupKey`` method.\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n google.cloud.api_keys_v2.types.LookupKeyResponse:\n Response message for LookupKey method.\n \"\"\"\n # Create or coerce a protobuf request object.\n # Minor optimization to avoid making a copy if the user passes\n # in a apikeys.LookupKeyRequest.\n # There's no risk of modifying the input as we've already verified\n # there are no flattened fields.\n if not isinstance(request, apikeys.LookupKeyRequest):\n request = apikeys.LookupKeyRequest(request)\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = self._transport._wrapped_methods[self._transport.lookup_key]\n\n # Send the request.\n response = rpc(\n request,\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )\n\n # Done; return the response.\n return response\n\n def __enter__(self) -> \"ApiKeysClient\":\n return self\n\n def __exit__(self, type, value, traceback):\n \"\"\"Releases underlying transport's resources.\n\n .. warning::\n ONLY use as a context manager if the transport is NOT shared\n with other clients! Exiting the with block will CLOSE the transport\n and may cause errors in other clients!\n \"\"\"\n self.transport.close()\n\n def get_operation(\n self,\n request: Optional[operations_pb2.GetOperationRequest] = None,\n *,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Union[float, object] = gapic_v1.method.DEFAULT,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> operations_pb2.Operation:\n r\"\"\"Gets the latest state of a long-running operation.\n\n Args:\n request (:class:`~.operations_pb2.GetOperationRequest`):\n The request object. Request message for\n `GetOperation` method.\n retry (google.api_core.retry.Retry): Designation of what errors,\n if any, should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n Returns:\n ~.operations_pb2.Operation:\n An ``Operation`` object.\n \"\"\"\n # Create or coerce a protobuf request object.\n # The request isn't a proto-plus wrapped type,\n # so it must be constructed via keyword expansion.\n if isinstance(request, dict):\n request = operations_pb2.GetOperationRequest(**request)\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = gapic_v1.method.wrap_method(\n self._transport.get_operation,\n default_timeout=None,\n client_info=DEFAULT_CLIENT_INFO,\n )\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata(((\"name\", request.name),)),\n )\n\n # Send the request.\n response = rpc(\n request,\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )\n\n # Done; return the response.\n return response\n\n\nDEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(\n gapic_version=package_version.__version__\n)\n\n\n__all__ = (\"ApiKeysClient\",)\n","repo_name":"googleapis/google-cloud-python","sub_path":"packages/google-cloud-api-keys/google/cloud/api_keys_v2/services/api_keys/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":56193,"program_lang":"python","lang":"en","doc_type":"code","stars":4415,"dataset":"github-code","pt":"78"}
+{"seq_id":"5957843569","text":"import random\n\ndef getCards() : #카드들을 생성한다.\n cards = []\n for i in range(4) :\n #수식 카드 만들기\n num1 = random.randint(1,9)\n num2 = random.randint(1,9)\n op = random.randint(1,2)\n\n content = \"\"\n correctContent = \"\"\n if op == 1 :\n content = str(num1) + \"+\" + str(num2)\n correctContent = str(num1 + num2)\n elif op == 2 :\n content = str(num1) + \"-\" + str(num2)\n correctContent = str(num1 - num2)\n \n cards.append(content)\n cards.append(correctContent)\n print(cards)\n\ngetCards()","repo_name":"xowl1596/PythonPS-basic","sub_path":"special/memoryCard.py","file_name":"memoryCard.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"15464989524","text":"from keras_preprocessing import sequence\nfrom tensorflow.python.keras import Sequential\nfrom tensorflow.python.keras.datasets import imdb\nfrom tensorflow.python.keras.layers import Embedding, SimpleRNN, Dense, Conv1D, MaxPooling1D, GlobalMaxPooling1D, CuDNNLSTM\nfrom tensorflow.python.keras.optimizers import RMSprop\n\nfrom tf_keras.keras import tools\n\n\ndef build_simple_rnn_model(max_features=10000):\n model = Sequential()\n model.add(Embedding(max_features, 32))\n model.add(SimpleRNN(32))\n model.add(Dense(1, activation='sigmoid'))\n model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])\n return model\n\n\ndef build_LSTM_model(max_features=10000):\n model = Sequential()\n model.add(Embedding(max_features, 32))\n model.add(CuDNNLSTM(32))\n model.add(Dense(1, activation='sigmoid'))\n model.compile(optimizer='rmsprop',\n loss='binary_crossentropy',\n metrics=['acc'])\n return model\n\n\ndef build_cnn_1d_model(maxlen=500):\n model = Sequential()\n model.add(Embedding(max_features, 128, input_length=maxlen))\n model.add(Conv1D(32, 7, activation='relu'))\n model.add(MaxPooling1D(5))\n model.add(Conv1D(32, 7, activation='relu'))\n model.add(GlobalMaxPooling1D())\n model.add(Dense(1))\n model.summary()\n model.compile(optimizer=RMSprop(lr=1e-4),\n loss='binary_crossentropy',\n metrics=['acc'])\n return model\n\n\nif __name__ == '__main__':\n max_features = 10000\n maxlen = 500\n batch_size = 32\n print('Loading data...')\n (input_train, y_train), (input_test, y_test) = imdb.load_data(\n num_words=max_features)\n print(len(input_train), 'train sequences')\n print(len(input_test), 'test sequences')\n print('Pad sequences (samples x time)')\n input_train = sequence.pad_sequences(input_train, maxlen=maxlen)\n input_test = sequence.pad_sequences(input_test, maxlen=maxlen)\n print('input_train shape:', input_train.shape)\n print('input_test shape:', input_test.shape)\n # rnn训练速度极慢\n # model = build_simple_rnn_model()\n\n #CuDNNLSTM, 才能使用gpu提高速度,普通LSTM很慢\n model = build_LSTM_model()\n\n # 训练速度较快\n # model = build_cnn_1d_model()\n\n\n history = model.fit(input_train, y_train,\n epochs=10,\n batch_size=128,\n validation_split=0.2)\n tools.plot_loss(history.history)\n # tools.plot_accuracy(history.history)\n","repo_name":"ljldgup/ml","sub_path":"tf_keras/keras/text_temperature/imdb_SimpleRNN_LSTM_CONV1D.py","file_name":"imdb_SimpleRNN_LSTM_CONV1D.py","file_ext":"py","file_size_in_byte":2511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"24706872016","text":"import argparse\r\nimport numpy as np\r\n\r\nindex_tag_input_file =''\r\ndef get_inputs():\r\n \"\"\"\r\n Collects all the inputs from the command line and returns the data. To use this function:\r\n\r\n train_data, words_to_index, tags_to_index, init_out, emit_out, trans_out = get_inputs()\r\n \r\n Where above the arguments have the following types:\r\n\r\n train_data --> A list of training examples, where each training example is a list\r\n of tuples train_data[i] = [(word1, tag1), (word2, tag2), (word3, tag3), ...]\r\n \r\n words_to_indices --> A dictionary mapping words to indices\r\n\r\n tags_to_indices --> A dictionary mapping tags to indices\r\n\r\n init_out --> A file path to which you should write your initial probabilities\r\n\r\n emit_out --> A file path to which you should write your emission probabilities\r\n\r\n trans_out --> A file path to which you should write your transition probabilities\r\n \r\n \"\"\"\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"train_input\", type=str)\r\n parser.add_argument(\"index_to_word\", type=str)\r\n parser.add_argument(\"index_to_tag\", type=str)\r\n parser.add_argument(\"hmmprior\", type=str)\r\n parser.add_argument(\"hmmemit\", type=str)\r\n parser.add_argument(\"hmmtrans\", type=str)\r\n\r\n args = parser.parse_args()\r\n global index_tag_input_file\r\n index_tag_input_file = args.index_to_tag\r\n train_data = list()\r\n with open(args.train_input, \"r\") as f:\r\n examples = f.read().strip().split(\"\\n\\n\")\r\n for example in examples:\r\n xi = [pair.split(\"\\t\") for pair in example.split(\"\\n\")]\r\n train_data.append(xi)\r\n \r\n with open(args.index_to_word, \"r\") as g:\r\n words_to_indices = {w: i for i, w in enumerate(g.read().strip().split(\"\\n\"))}\r\n \r\n with open(args.index_to_tag, \"r\") as h:\r\n tags_to_indices = {t: i for i, t in enumerate(h.read().strip().split(\"\\n\"))}\r\n \r\n return train_data, words_to_indices, tags_to_indices, args.hmmprior, args.hmmemit, args.hmmtrans\r\n\r\n\r\ndef wordLister(directory:str):\r\n examples = None\r\n with open(index_tag_input_file, \"r\") as f:\r\n examples = f.read().strip().split(\"\\n\")\r\n # print(examples)\r\n return examples\r\nif __name__ == \"__main__\":\r\n # Collect the input data\r\n\r\n # Initialize the initial, emission, and transition matrices\r\n\r\n # Increment the matrices\r\n\r\n # Add a pseudocount\r\n\r\n # Save your matrices to the output files --- the reference solution uses \r\n # np.savetxt (specify delimiter=\"\\t\" for the matrices)\r\n x,y,z,w,v,u = get_inputs()\r\n print(\"x is \"+str((x)))\r\n print(\"y is \"+str((y)))\r\n print(\"z is \"+str((z)))\r\n print(\"w is \"+str((w)))\r\n print(\"v is \"+str((v)))\r\n print(\"u is \"+str((u)))\r\n\r\n\r\n given_index_tags=wordLister(index_tag_input_file)\r\n # print(\"index tags are\" +str(given_index_tags))\r\n pi_len = len(given_index_tags)\r\n\r\n #non initialized np arrayfor init matrix\r\n init_1=np.ones([pi_len])\r\n # print(init_1)\r\n\r\n #initialize B matrix\r\n b_mat_1 = np.ones([pi_len,pi_len])\r\n\r\n #initialize emission mat\r\n emiss = np.ones([pi_len,len(y.keys())])\r\n x=x[:10000]\r\n for i in range(0,len(x)):\r\n # for td_j in td_i:\r\n # print(td_j)\r\n # print(x[i])\r\n for j in range(0,len(given_index_tags)):\r\n if x[i][0][1]==given_index_tags[j]:\r\n init_1[j]= init_1[j]+1\r\n for k in range(1,len(x[i])):\r\n for l in range(0, len(given_index_tags)):\r\n if x[i][k-1][1]==given_index_tags[j] and x[i][k][1]==given_index_tags[l]:\r\n b_mat_1[j][l]=b_mat_1[j][l]+1\r\n # for m in range(0,len(emiss[z.get(given_index_tags[j])])):\r\n for n in range(0,len(x[i])):\r\n emiss[z.get(x[i][n][1]),y.get(x[i][n][0])] = emiss[z.get(x[i][n][1]),y.get(x[i][n][0])] + 1\r\n\r\n for i in range(len(b_mat_1)):\r\n b_mat_1[i] = b_mat_1[i]/np.sum(b_mat_1[i])\r\n\r\n for i in range(len(emiss)):\r\n emiss[i] = emiss[i]/np.sum(emiss[i])\r\n total_init = np.sum(init_1)\r\n init_1 = init_1/total_init\r\n # print(\"final init matrix is \"+str(init_1))\r\n # print(\"final b matrix is \"+str(b_mat_1))\r\n # print(\"final emm matrix is \"+str(emiss))\r\n np.savetxt(w, init_1, delimiter='\\n')\r\n # np.savetxt(u, b_mat_1, delimiter='\\n', newline=\" \")\r\n np.savetxt(u, b_mat_1, delimiter=\" \", newline=\"\\n\")\r\n np.savetxt(v, emiss, delimiter=\" \", newline=\"\\n\")\r\n\r\n\r\n#python3 learnhmm.py en_data/train.txt en_data/index_to_word.txt en_data/index_to_tag.txt en_data/hmminit.txt en_data/hmmemit.txt en_data/hmmtrans.txt\r\n\r\n##python3 learnhmm.py fr_data/train.txt fr_data/index_to_word.txt fr_data/index_to_tag.txt fr_data/hmminit.txt fr_data/hmmemit.txt fr_data/hmmtrans.txt\r\n#python3 learnhmm.py toy_data/train.txt toy_data/index_to_word.txt toy_data/index_to_tag.txt toy_data/hmminit.txt toy_data/hmmemit.txt toy_data/hmmtrans.txt\r\n\r\n\r\n","repo_name":"academicnair009/ML_10_601_CMU","sub_path":"hidden_markov/learnhmm.py","file_name":"learnhmm.py","file_ext":"py","file_size_in_byte":4980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"21878724309","text":"from django.shortcuts import render, redirect\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom .forms import AutorForm\nfrom .models import Autor\nfrom django.views.generic import TemplateView, ListView\n\n\n# Create your views here.\n\n# Templateview me permite renderizar templates. Es una vista basada en clase\nclass Inicio(TemplateView):\n template_name = 'index.html'\n\n\ndef crearautor(request):\n if request.method == 'POST':\n print(request.POST)\n nom = request.POST.get('nombre')\n ape = request.POST.get('apellidos')\n nacio = request.POST.get('nacionalidad')\n desc = request.POST.get('descripcion')\n autor = Autor(nombre=nom, apellidos=ape, nacionalidad=nacio, descripcion=desc)\n autor.save()\n return redirect('libro:listar_autor')\n return render(request, 'libro/crear_autor.html')\n\n\n# List view me permite listar contenido de modelos. Es una vista basada en clases.\nclass ListadoAutor(ListView):\n model = Autor\n template_name = 'libro/listar_autor.html'\n context_object_name = 'autores' # en el html cuando hago el if pongo este nombre \"if autores\" hace x cosa\n queryset = Autor.objects.filter(estado=True).order_by('id')\n\n\ndef editarautor(request, id):\n autor_form = None\n error = None\n try:\n autor = Autor.objects.get(id=id)\n if request.method == 'GET':\n autor_form = AutorForm(instance=autor)\n else:\n autor_form = AutorForm(request.POST, instance=autor)\n if autor_form.is_valid():\n autor_form.save()\n return redirect('index')\n except ObjectDoesNotExist as e:\n error = e\n return render(request, 'libro/crear_autor.html', {'autor_form': autor_form, 'error': error})\n\n\ndef eliminarautor(request, id):\n autor = Autor.objects.get(id=id)\n autor.estado = False\n autor.save()\n return redirect('libro:listar_autor')\n","repo_name":"angelogaliazzi/biblioteca","sub_path":"apps/libro/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"15403412798","text":"import unittest\n\nfrom tests.setup import to_dwc\n\nLABEL = \"color\"\n\n\nclass TestColor(unittest.TestCase):\n def test_color_dwc_01(self):\n self.assertEqual(\n to_dwc(LABEL, \"male leaf margin green\"),\n {\"dwc:dynamicProperties\": {\"maleLeafMarginColor\": \"green\"}},\n )\n\n def test_color_dwc_02(self):\n self.assertEqual(\n to_dwc(LABEL, \"flower petals not purple-spotted\"),\n {\"dwc:dynamicProperties\": {\"missingFlowerPetalColor\": \"purple-spotted\"}},\n )\n","repo_name":"rafelafrance/FloraTraiter","sub_path":"tests/dwc/test_color.py","file_name":"test_color.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"}
+{"seq_id":"39998513259","text":"\"\"\"Perform tasks against a remote host.\"\"\"\nfrom config import (host,\n user,\n ssh_key_filepath,\n local_file_directory,\n remote_path)\nfrom files import fetch_local_files\nfrom client import RemoteClient\n\n\ndef main():\n \"\"\"Initialize remote host client and execute actions.\"\"\"\n remote = RemoteClient(host, user, ssh_key_filepath, remote_path)\n #upload_files_to_remote(remote)\n execute_command_on_remote(remote)\n remote.disconnect()\n\n\ndef upload_files_to_remote(remote):\n \"\"\"Upload files to remote via SCP.\"\"\"\n files = fetch_local_files(local_file_directory)\n remote.bulk_upload(files)\n\n\ndef execute_command_on_remote(remote):\n \"\"\"Execute UNIX command on the remote host.\"\"\"\n remote.execute_commands(['python3 query_db.py > results.txt', 'cat results.txt'])\n","repo_name":"mfranzon/Python-ssh","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"41868932002","text":"import os\nimport subprocess\nimport sys\n\nimport joblib\n\nRUNNER_PATH = 'runner.py'\nDEBUG = True\n\nHARD_TIMEOUT = 3600 * 300\nNTHREADS = 8\nSEED = 42\n\n\ndef run_task(name, gpu, benchmark_path, data_path, dataset, task, fold, trial_name, params, rewrite=False):\n run_name = dataset + '_' + task + '_' + trial_name + '_' + str(fold)\n\n print('Processing', run_name)\n\n benchmark_path = os.path.abspath(benchmark_path)\n data_path = os.path.abspath(data_path)\n output = os.path.join(benchmark_path, name, dataset, task, str(trial_name), 'fold_{0}'.format(fold))\n\n os.makedirs(output, exist_ok=True)\n\n success_flg = os.path.join(output, 'SUCCESS')\n\n if os.path.exists(success_flg) and not rewrite:\n return\n\n # clean folder\n for f in (x for x in os.listdir(output) if not x.startswith('.')):\n os.remove(os.path.join(output, f))\n\n joblib.dump(params, os.path.join(output, 'params.pkl'))\n\n # TRAIN\n try:\n\n script = \"\"\n\n log = subprocess.check_output(script + ' '.join([\n sys.executable,\n RUNNER_PATH,\n '-b', benchmark_path,\n '-p', data_path,\n '-k', dataset,\n '-f', str(fold),\n '-n', str(NTHREADS),\n '-s', str(SEED),\n '-d', ','.join(map(str, gpu)),\n '-o', output,\n '-r', task\n\n ]), shell=True, stderr=subprocess.STDOUT, executable='/bin/bash').decode()\n\n if DEBUG:\n print(log)\n\n with open(success_flg, 'w') as f:\n pass\n\n with open(os.path.join(output, 'train_log.txt'), 'w') as f:\n f.write(log)\n\n except subprocess.CalledProcessError as e:\n\n print(e.output.decode())\n\n with open(os.path.join(output, 'ERROR'), 'w') as f:\n pass\n\n with open(os.path.join(output, 'train_log.txt'), 'w') as f:\n f.write(e.output.decode())\n\n except subprocess.TimeoutExpired:\n\n with open(os.path.join(output, 'TIMEOUT'), 'w') as f:\n pass\n\n print('HARD TIMEOUT!')\n\n results = joblib.load(os.path.join(output, 'results.pkl'))\n return results\n\n\ndef run_cv_loop(name, gpu, benchmark_path, data_path, dataset, task, trial_name, params, rewrite=False):\n res = []\n\n for i in range(5):\n results = run_task(name, gpu, benchmark_path, data_path, dataset, task, i, trial_name, params, rewrite=rewrite)\n\n res.append(results)\n\n return res\n","repo_name":"sb-ai-lab/sketchboost-paper","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"78"}
+{"seq_id":"73890981690","text":"import math\nimport os\nimport pickle\nimport re\nimport bisect\n\nfrom base import Frequency\nfrom data import mokassar\n\n\nclass Dictionary:\n main_dict = {}\n \"\"\"\n {\n token1:{\n doc_1:{\n 'frequency': 2,\n 'list':[23, 94],\n }\n doc_2:{\n 'frequency': 1,\n 'list':[35],\n }\n }\n }\n \"\"\"\n\n token_doc_frequency_dict = {}\n\n tf_idf_dict = {}\n \"\"\"\n {\n doc_1: {\n token1: 0.5,\n token2: 0.1,\n token3: 0.8,\n }\n }\n \"\"\"\n\n idf_dict = {}\n \"\"\"\n {\n doc_1: 0.8,\n }\n \"\"\"\n\n champion_dict = {}\n \"\"\"\n {\n word1: [],\n }\n \"\"\"\n doc_element_squares_dict = {}\n token_term_frequency_dict = {}\n stop_words = []\n docs_num = 0\n\n docs_dir = 'sampleDoc/'\n\n main_all_tokens_num = 0\n\n def __init__(self, docs_dir: str = None, main_dict_dir: str = None,\n stop_words_dir: str = None):\n self.docs_dir = docs_dir\n self.main_dict_dir = main_dict_dir\n self.stop_words_dir = stop_words_dir\n self.docs_num = self.docs_num\n self.id2path = {}\n self.path2id = {}\n self.generate_file_paths()\n\n def generate_file_paths(self):\n doc_id = 1\n for root, d_names, f_names in os.walk(self.docs_dir):\n for f in f_names:\n d = os.path.join(root, f)\n self.id2path[doc_id] = d\n self.path2id[d] = doc_id\n doc_id += 1\n self.docs_num = doc_id\n\n\n def get_doc_path_by_id(self, doc_id):\n return self.id2path.get(doc_id, 'Not included')\n\n def get_doc_id_by_path(self, doc_path):\n return self.path2id.get(doc_path, 'Not included')\n\n def make_dictionary(self):\n all_tokens_num = 0\n for doc_id in self.id2path.keys():\n with open(self.id2path[doc_id], encoding='utf8') as f:\n line = f.readline()\n cnt = 1\n position = 0\n while line:\n line = line.strip()\n normalized_text = self.normalization(line)\n # print(normalized_text)\n # tokens = self.tokenization(normalized_text)\n tokens = self.tokenization(normalized_text)\n # stemmed = self.stemmer(tokens)\n for token in tokens:\n if len(token) < 2:\n continue\n # print(token)\n self.token_term_frequency_dict[token] = self.token_term_frequency_dict.get(token, 0) + 1\n all_tokens_num = all_tokens_num + 1\n self.update_dictionary(doc_id, token, position)\n position += 1\n\n line = f.readline()\n cnt += 1\n self.main_all_tokens_num = all_tokens_num\n\n # self.remove_stop_words()\n # normalized_text = normalization(text)\n # tokens = tokenization(normalized_text)\n # stemmed = stemmer(tokens)\n # tokens = remove_stop_words(stemmed)\n # for position in range(0, len(tokens)):\n # word = tokens[position]\n # update_dictionary(doc_id, word, position)\n\n def update_dictionary(self, doc_id, word, position):\n if self.main_dict.get(word, None) is None:\n self.main_dict[word] = {}\n if self.main_dict[word].get(doc_id, None) is None:\n self.main_dict[word][doc_id] = {'frequency': 0, 'list': []}\n\n self.main_dict[word][doc_id]['frequency'] += 1\n self.main_dict[word][doc_id]['list'].append(position)\n\n def normalization(self, data):\n normal_data = re.sub('\\u200c|\\u200b|\\u200d|\\u200e|\\u200f|\\u202c|\\xad|\\ufeff|_|\\u2067|\\u2069|\\x7f', ' ', data)\n normal_data = re.sub('[|{}=;&«»%/+*!@#$.؛:\",،)(?؟]|-|\\d+|[a-zA-Z]', ' ', normal_data)\n normal_data = normal_data.replace(\"'\", \" \")\n normal_data = normal_data.replace(\"ـ\", \" \")\n normal_data = normal_data.replace(\"]\", \" \")\n normal_data = normal_data.replace(\"[\", \" \")\n normal_data = normal_data.replace('\\n', \" \")\n normal_data = normal_data.replace('\\r', \" \")\n normal_data = re.sub('[ء]', ' ', normal_data)\n normal_data = re.sub('[ؤ]', 'و', normal_data)\n normal_data = re.sub('[ۀ]', 'ه', normal_data)\n normal_data = re.sub('[َ ِ ُ ّ ً ]', ' ', normal_data)\n normal_data = re.sub('[ْْ ]', ' ', normal_data)\n normal_data = re.sub('[ئ]', 'ی', normal_data)\n normal_data = re.sub('[ْي]', 'ی', normal_data)\n normal_data = re.sub('[ك]', 'ک', normal_data)\n normal_data = re.sub('[إاٌآأ]', 'ا', normal_data)\n normal_data = re.sub('[ْ…]', ' ', normal_data)\n\n return normal_data\n\n def find_stop_words(self):\n abundance_rate = 1 / 100\n for word, frequency in self.token_term_frequency_dict.items():\n if frequency / self.main_all_tokens_num > abundance_rate and len(\n self.main_dict[word].keys()) / self.docs_num > 0.6:\n self.stop_words.append(word)\n\n def remove_stop_words_from_dictionary(self):\n for word in self.stop_words:\n self.main_dict.pop(word)\n\n def remove_stop_words(self, tokens):\n new_tokens = []\n for token in tokens:\n if token not in self.stop_words:\n new_tokens.append(token)\n return new_tokens\n\n def stemmer(self, tokens):\n return tokens\n verbAffix = [\"*ش\", \"*نده\", \"*ا\", \"*ار\", \"وا*\", \"اثر*\", \"فرو*\", \"پیش*\", \"گرو*\", \"*ه\", \"*گار\", \"*ن\"]\n ends = ['ات',\n 'ان',\n 'ترین',\n 'تر',\n 'م', 'ت', 'ش', 'یی', 'ی', 'ها', 'ٔ', 'ا', '']\n\n suffix = [\"كار\", \"ناك\", \"وار\", \"آسا\", \"آگین\", \"بار\", \"بان\", \"دان\", \"زار\", \"سار\", \"سان\", \"لاخ\", \"مند\", \"دار\",\n \"مرد\",\n \"کننده\", \"گرا\", \"نما\", \"متر\"]\n\n prefix = [\"بی\", \"با\", \"پیش\", \"غیر\", \"فرو\", \"هم\", \"نا\", \"یک\"]\n\n def stem(word):\n for end in ends:\n if word.endswith(end):\n word = word[:-len(end)]\n\n if word.endswith('ۀ'):\n word = word[:-1] + 'ه'\n\n return word\n\n new_tokens = []\n for token in tokens:\n if token in mokassar:\n new_tokens.append(mokassar[token])\n else:\n new_tokens.append(token)\n j = 0\n # for affix in verbAffix:\n # if (j == 0 and (token[-1] == 'ا' or token[-1] == 'و')):\n # sTemp = affix.replace(\"*\", token + \"ی\")\n # else:\n # sTemp = affix.replace(\"*\", token)\n #\n # if normalizeValidation(sTemp, True):\n # return affix\n # j = j + 1\n # return \"\"\n return new_tokens\n\n def tokenization(self, text):\n return text.split(' ')\n\n def get_token_docs_ids(self, token):\n token_info = self.main_dict.get(token, None)\n if token_info:\n return token_info.keys()\n return []\n\n def get_token_champion_docs_ids(self, token, threshold=1):\n return [freq.doc for freq in self.champion_dict.get(token, []) if freq.frequency >= threshold]\n\n def save_main_dict(self, save_dir, name):\n a_file = open(save_dir + '/' + name + '.pkl', \"wb\")\n pickle.dump(self.main_dict, a_file)\n a_file.close()\n\n def load_main_dict(self, save_dir, name):\n a_file = open(save_dir + '/' + name + '.pkl', \"rb\")\n self.main_dict = pickle.load(a_file)\n a_file.close()\n\n def save_stop_words(self, save_dir, name):\n a_file = open(save_dir + '/' + name + '.pkl', \"wb\")\n pickle.dump(self.stop_words, a_file)\n a_file.close()\n\n def load_stop_words(self, save_dir, name):\n a_file = open(save_dir + '/' + name + '.pkl', \"rb\")\n self.stop_words = pickle.load(a_file)\n a_file.close()\n\n def save_frequency_dict(self, save_dir, name):\n a_file = open(save_dir + '/' + name + '.pkl', \"wb\")\n pickle.dump(self.token_term_frequency_dict, a_file)\n a_file.close()\n\n def load_frequency_dict(self, save_dir, name):\n a_file = open(save_dir + '/' + name + '.pkl', \"rb\")\n self.token_term_frequency_dict = pickle.load(a_file)\n a_file.close()\n\n def fill_tf_idf_empty_dict(self):\n for word in self.main_dict.keys():\n for doc_id in self.main_dict[word]:\n self.tf_idf_dict[doc_id] = {}\n\n def fill_token_doc_frequency(self):\n for word in self.main_dict.keys():\n self.token_doc_frequency_dict[word] = len(self.main_dict[word].keys())\n self.idf_dict[word] = self.calculate_idf(self.token_doc_frequency_dict[word])\n\n def fill_tf_idf_dict(self):\n self.fill_token_doc_frequency()\n self.fill_tf_idf_empty_dict()\n for word in self.main_dict.keys():\n for doc_id in self.main_dict[word].keys():\n tfidf = self.calculate_tf(self.main_dict[word][doc_id]['frequency']) \\\n * self.calculate_idf(self.token_doc_frequency_dict[word])\n\n self.tf_idf_dict[doc_id][word] = tfidf\n # if tfidf < 0:\n # print('tf_idf is negative')\n self.doc_element_squares_dict[doc_id] = self.doc_element_squares_dict.get(doc_id, 0) + tfidf ** 2\n\n def normalize_tf_idf(self):\n for doc_id in self.tf_idf_dict.keys():\n doc_vector_size = math.sqrt(self.doc_element_squares_dict[doc_id])\n for word in self.tf_idf_dict[doc_id].keys():\n self.tf_idf_dict[doc_id][word] /= doc_vector_size\n\n def calculate_tf(self, frequency):\n tf = float(1) + math.log10(frequency)\n if tf < 0:\n print('tf is negative', frequency)\n return tf\n\n def calculate_idf(self, frequency):\n idf = math.log10(self.docs_num / frequency)\n if idf <= 0:\n print('idf is negative', self.docs_num / frequency)\n return idf\n\n def fill_champion_dict(self, champion_list_size):\n for token, token_info in self.main_dict.items():\n l = []\n for doc_id, tok_doc_info in token_info.items():\n bisect.insort(l, Frequency(doc_id, tok_doc_info['frequency']))\n l.reverse()\n self.champion_dict[token] = l[:champion_list_size]\n\n def add_doc(self):\n \"\"\"to use just and only adding few docs\"\"\"\n pass\n\n\nif __name__ == '__main__':\n d = Dictionary(10, 'sampleDoc/', [1,\n 2, 3, 4, 5, 6, 7, 8, 9,\n ])\n d.make_dictionary()\n # print(d.frequency_dict)\n # print(d.main_dict)\n # print(d.main_dict.get('ریال', ''))\n # print(d.get_token_docs_ids('ریال'))\n # print(sorted(d.frequency_dict.items(), key=lambda x: x[1], reverse=False))\n d.find_stop_words()\n # print(d.stop_words)\n d.remove_stop_words_from_dictionary()\n d.fill_tf_idf_dict()\n d.normalize_tf_idf()\n d.fill_champion_dict(10)\n print(d.token_doc_frequency_dict['جام'])\n print(d.tf_idf_dict[1])\n print(d.champion_dict['ریال'])\n # d.save_main_dict(d.main_dict_dir, 'DICT')\n","repo_name":"HosseinMohammadii/IR_SearchEngine","sub_path":"collect.py","file_name":"collect.py","file_ext":"py","file_size_in_byte":11583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"73795347451","text":"import threading\nimport json\nimport requests\nfrom ui_main_window import *\nfrom PyQt5.QtCore import QTimer,QDateTime\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5.QtGui import QImage\nfrom PyQt5.QtWidgets import QToolTip\nfrom PyQt5.QtWidgets import QWidget\nfrom PyQt5.QtWidgets import QMessageBox\nfrom PyQt5.QtWidgets import QApplication\nfrom PyQt5.QtWidgets import QFileDialog\nfrom PyQt5.QtWidgets import *\n\nfrom firebase import firebase\nimport yaml\nimport matplotlib as mp\nimport urllib\nimport http.client\nimport math\nimport datetime\nimport time\nimport numpy as np\nimport cv2\nimport sys\n\n\n# import some PyQt5 modules\n\n\nclass MainWindow(QWidget):\n # class constructor\n def __init__(self):\n # call QWidget constructor\n super().__init__()\n self.ui = Ui_Form()\n self.ui.setupUi(self)\n '''\n self.topFiller = QWidget()\n self.topFiller.setMinimumSize(250, 2000)\n self.scroll = QScrollArea()\n self.scroll.setWidget(self.topFiller)\n self.ui.Entrance.addWidget(self.scroll)\n '''\n self.detec = []\n self.count = 34\n #detec = []\n self.detect_y = []\n self.num = [0]\n # total remain parking space count\n self.remainA = 17\n self.remainB = 17\n # caculate center point's position\n self.offset = 2\n self.line_pos = 130\n # AVOID continuous counting\n # 1: allow_count\n # 0: disallow_count\n self.Iscount = 1\n # AVOID continuous counting in this time range\n self.restTime = 2\n # AVOID detec(x,y) remove again\n self.IsDOWNdetectRemove = 0\n self.IsUPdetectRemove = 0\n # allow upload to Cloud when count != pre_count\n # Assignment previous value(count) to pre_count\n self.pre_count = 34\n self.pre_countA = 17\n self.pre_countB = 17\n # Assignment cascadeDetect result to cars\n self.cars = 0\n\n # define da car in or out for traffic light (optional)\n self.Iscarin = 0\n\n # to check whether the carin or out\n self.framecount = 0\n self.Frame = 0\n\n # Car's direction\n # IN : 1\n # OUT : -1\n self.direction = 0\n self.starttime = 0\n self.detail = [\"\",\"\",\"\",\"\",\"\",\"\"]\n self.movement = \"Initialization\"\n self.lastFrame = 0\n \n self.firebase_url = 'https://test-7f2de.firebaseio.com/'\n self.key=\"Z61Y6gfIJzqhCWI5RHre35Xgsld8tvLZUWCWQ2Lo\"\n self.authentication = firebase.FirebaseAuthentication(self.key, 'g0930421313@gmail.com')\n firebase.authentication = self.authentication \n self.user = self.authentication.get_user() #獲取使用者資訊\n self.firebase = firebase.FirebaseApplication('https://test-7f2de.firebaseio.com/', authentication=self.authentication)\n \n self.firebase.put(\"/remain\",\"Entrance\",self.count)\n self.firebase.put(\"/remain\",\"area_A\",self.remainA)\n self.firebase.put(\"/remain\",\"area_B\",self.remainB)\n \n \n #print(datetime.toString())\n # load face cascade classifier\n self.car_cascade = cv2.CascadeClassifier('./car.xml')\n if self.car_cascade.empty():\n QMessageBox.information(self, \"Error Loading cascade classifier\",\n \"Unable to load the Car cascade classifier xml file :(\")\n #sys.exit()\n\n # create a timer\n self.timer = QTimer()\n self.timerA = QTimer()\n self.timerB = QTimer()\n self.timer2 = QTimer()\n self.timer_texttime = QTimer()\n self.timer_texttime.start()\n self.timer_texttime.timeout.connect(self.texttime)\n\n # set control_bt callback clicked function\n \n self.cap = cv2.VideoCapture(\"./Entrance.mp4\")\n self.capA = cv2.VideoCapture(\"./areaA.mp4\")\n self.capB = cv2.VideoCapture(\"./areaB.mp4\")\n \n self.ui.control_bt.clicked.connect(self.controlTimer)\n\n # set timer timeout callback function\n self.timer.timeout.connect(\n lambda: self.detectCarE(self.cap, self.ui.Entrance))\n self.timerA.timeout.connect(\n lambda: self.detectCarA(self.capA, self.ui.areaA))\n self.timerB.timeout.connect(\n lambda: self.detectCarB(self.capB, self.ui.areaB))\n\n self.ui.close_bt.clicked.connect(self.close_btn)\n #read YML\n self.ui.selectYML_btn.clicked.connect(self.getYMLpath)\n self.ui.confirm_path_btn.clicked.connect(self.confirmYMLpath)\n #####################space#########################################\n self.YMLPath = \"\"\n self.IsreadYML = False\n self.spaceopen = True\n if self.spaceopen == True:\n self.ui.control_bt.clicked.connect(self.detectspace)\n self.timer2.timeout.connect(self.detectspace)\n self.fn = \"./parkinglot_1_480p.mp4\"\n self.fn_yaml = \"\"\n self.capspace = cv2.VideoCapture(self.fn)\n self.config = {'save_video': False,\n 'text_overlay': True,\n 'parking_overlay': True,\n 'parking_id_overlay': True,\n 'parking_detection': True,\n 'motion_detection': False,\n 'pedestrian_detction': False,\n 'min_area_motion_contour': 200,\n 'park_laplacian_th': 1.8,\n 'park_sec_to_wait': 5,\n 'start_frame': 0} # 35000\n self.video_info = {'fps': self.capspace.get(cv2.CAP_PROP_FPS),\n 'width': int(self.capspace.get(cv2.CAP_PROP_FRAME_WIDTH)),\n 'height': int(self.capspace.get(cv2.CAP_PROP_FRAME_HEIGHT)),\n 'fourcc': self.capspace.get(cv2.CAP_PROP_FOURCC),\n 'num_of_frames': int(self.capspace.get(cv2.CAP_PROP_FRAME_COUNT))}\n self.parking_contours = [] # Parking spaces four points\n self.parking_bounding_rects = []\n self.parking_mask = []\n self.pre_countSpace = int('100000000000000000',2)\n #self.countSpace = int('100000000000000000',2)\n self.capspace.set(cv2.CAP_PROP_POS_FRAMES,\n self.config['start_frame']) # jump to frame\n '''\n if self.config['pedestrian_detction']: # tracePeople\n self.hog = cv2.HOGDescriptor()\n self.hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())\n if self.config['motion_detection']:\n self.fgbg = cv2.createBackgroundSubtractorMOG2(\n history=300, varThreshold=16, detectShadows=True)\n '''\n \n #####################space#########################################\n def getYMLpath(self):\n fname = QFileDialog.getOpenFileName(self, 'Open file',\n 'home/stu/', \"yml files (*.yml )\")\n self.YMLPath = fname[0]\n \n self.ui.displayYMLpath.setText(self.YMLPath)\n def confirmYMLpath(self):\n self.fn_yaml = self.YMLPath\n with open(self.fn_yaml, 'r') as stream:\n self.parking_data = yaml.load(stream) \n for park in self.parking_data:\n points = np.array(park['points'])\n rect = cv2.boundingRect(points)\n points_shifted = points.copy()\n points_shifted[:,0] = points[:,0] - rect[0] # shift contour to roi\n points_shifted[:,1] = points[:,1] - rect[1]\n self.parking_contours.append(points)\n\n self.parking_bounding_rects.append(rect)\n\n mask = cv2.drawContours(np.zeros((rect[3], rect[2]), dtype=np.uint8), [points_shifted], contourIdx=-1,\n color=255, thickness=-1, lineType=cv2.LINE_8)\n mask = mask == 255\n self.parking_mask.append(mask)\n #print(self.parking_bounding_rects)\n kernel_erode = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)) # morphological kernel\n\n kernel_dilate = cv2.getStructuringElement(cv2.MORPH_RECT,(5,19))\n self.parking_status = [False]*len(self.parking_data)\n self.parking_buffer = [None]*len(self.parking_data)\n #print(self.parking_status) \n def close_btn(self):\n # stop timer\n self.timer.stop()\n self.timerA.stop()\n self.timerB.stop()\n self.timer2.stop()\n # release video capture\n self.cap.release()\n self.capA.release()\n self.capB.release()\n self.capspace.release()\n QtWidgets.qApp.quit() \n def textdetail(self):\n \n self.ui.text_detail.setText(str(self.detail[0])+\"\\n\"+str(self.detail[1])+\"\\n\"\n +str(self.detail[2])+\"\\n\"+str(self.detail[3])+\"\\n\"\n +str(self.detail[4])+\"\\n\"+str(self.detail[5])+\"\\n\")\n def texttime(self):\n t = time.time()\n date = datetime.datetime.fromtimestamp(t).strftime('%Y/%m/%d , %H:%M:%S')\n self.ui.Text_time.setText(\"現在時間 : \"+date)\n\n def textCount(self,value): \n self.ui.textCount.setText(str(\"入口 : \")+format(value))\n\n def textAremain(self, value):\n self.ui.textAremain.setText(str(\"A區 : \")+format(value))\n\n def textBremain(self, value):\n self.ui.textBremain.setText(str(\"B區 : \")+format(value))\n\n def Display(self, Qimg, Textlabel):\n Textlabel.setPixmap(QPixmap.fromImage(Qimg))\n\n def detectCarE(self, capV, labelshow):\n #print(self.text_detail)\n def catch_center(x, y, w, h):\n x1 = int(w / 2)\n y1 = int(h / 2)\n cx = x + x1\n cy = y + y1\n return cx, cy\n\n def center_y(x, y, w, h):\n x1 = int(w / 2)\n y1 = int(h / 2)\n cx = x + x1\n cy = y + y1\n return cy\n\n def post():\n '''\n \n self.detail.insert(0,str(\"Entrance : \")+post_to_thingspeak.post_to_thingspeak(params)+self.movement)\n self.textdetail()\n '''\n t = time.time()\n date = datetime.datetime.fromtimestamp(t).strftime('%Y/%m/%d , %H:%M:%S')\n #data = {'area':'A','remain':self.count} \n self.firebase.put(\"/remain\",\"Entrance\",self.count)\n self.detail.insert(0,str(\"入口\")+self.movement+'\\n時間:'+date+'\\n========================')\n self.textdetail()\n print(\"post_to_field1...\")\n \n ret, frame = capV.read()\n \n if ret == False:\n QMessageBox.information(\n self, \"Error Loading video\", \"Unable to load the Entrace video\")\n self.timer.stop() \n \n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n self.cars = self.car_cascade.detectMultiScale(gray, 1.1, 10)\n\n cv2.line(frame, (25, self.line_pos),\n (1200, self.line_pos), (255, 0, 0), 2)\n\n nowframetime = datetime.datetime.now()\n self.Iscarin = 0\n self.Frame += 1\n \n for (x, y, w, h) in self.cars:\n\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)\n\n self.Iscarin = 1\n center = catch_center(x, y, w, h)\n # print(self.detec)\n centery = center_y(x, y, w, h)\n self.detec.append(center)\n self.detect_y.append(centery)\n cv2.circle(frame, center, 4, (0, 0, 255), -1)\n startframetime = datetime.datetime.now()\n nowframetime = startframetime+datetime.timedelta(seconds=5)\n\n self.framecount += 1\n beforecount = self.framecount-3\n f1 = self.detect_y[self.framecount-1:self.framecount+1]\n f2 = self.detect_y[beforecount:beforecount+1]\n\n # 303,40\n num = list(map(lambda x: x[0]-x[1], zip(f1, f2)))\n\n self.direction = np.sign(num)\n\n for (x, y) in self.detec:\n\n if y < (self.line_pos+self.offset) and y > (self.line_pos-self.offset) and self.direction == 1:\n\n if self.Iscount == 0: # 0 remove x_y\n\n endtime = datetime.datetime.now()\n nowtime = self.starttime + \\\n datetime.timedelta(seconds=self.restTime)\n self.detec.remove((x, y))\n self.IsDOWNdetectRemove = 1\n if endtime >= nowtime:\n self.Iscount = 1\n if self.Iscount == 1:\n\n self.count -= 1\n\n cv2.line(frame, (25, self.line_pos),\n (1200, self.line_pos), (0, 127, 255), 3)\n if self.IsDOWNdetectRemove == 0:\n self.detec.remove((x, y))\n self.Iscount = 0\n self.starttime = datetime.datetime.now()\n #cv2.imwrite('frame%d.jpg'%Frame , frame)\n # time.sleep(3)\n self.lastFrame = self.Frame\n print(\"Cars detected so far: \"+str(self.count))\n self.movement = \"進來一輛車\"\n\n if y > (self.line_pos-self.offset) and y < (self.line_pos+self.offset) and self.direction == -1:\n\n if self.Iscount == 0:\n\n endtime = datetime.datetime.now()\n nowtime = self.starttime + \\\n datetime.timedelta(seconds=self.restTime)\n self.detec.remove((x, y))\n self.IsUPdetectRemove = 1\n if endtime >= nowtime:\n self.Iscount = 1\n if self.Iscount == 1:\n\n self.count += 1\n\n cv2.line(frame, (25, self.line_pos),\n (1200, self.line_pos), (0, 127, 255), 3)\n if self.IsUPdetectRemove == 0:\n self.detec.remove((x, y))\n self.Iscount = 0\n self.starttime = datetime.datetime.now()\n #cv2.imwrite('frame%d.jpg'%Frame , frame)\n # time.sleep(3)\n print(\"Cars detected so far: \"+str(self.count))\n self.movement =\"出去一輛車\"\n\n if self.pre_count != self.count:\n p = threading.Thread(target=post)\n p.start()\n # t.join()\n\n self.pre_count = self.count\n\n # time.sleep(0.15)\n if self.Iscarin == 1:\n cv2.circle(frame, (303, 40), 10, (0, 0, 255), -1)\n\n else:\n cv2.circle(frame, (303, 40), 10, (0, 255, 0), -1)\n self.Iscarin = 0\n if self.Frame == self.lastFrame+300:\n self.movement = \"\"\n self.lastFrame = 0\n\n # display the resulting frame\n\n self.textCount(self.count)\n # self.textAremain(self.remainA)\n # self.textBremain(self.remainB)\n cv2.putText(frame, \"E\", (27, 50),\n cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 3)\n \n # get frame infos\n height, width, channel = frame.shape\n\n step = channel * width\n\n # create QImage from RGB frame\n qImg = QImage(frame.data, width, height, step, QImage.Format_RGB888)\n # show frame in img_label\n labelshow.setPixmap(QPixmap.fromImage(qImg))\n\n def detectCarA(self, capV, labelshow):\n\n def catch_center(x, y, w, h):\n x1 = int(w / 2)\n y1 = int(h / 2)\n cx = x + x1\n cy = y + y1\n return cx, cy\n\n def center_y(x, y, w, h):\n x1 = int(w / 2)\n y1 = int(h / 2)\n cx = x + x1\n cy = y + y1\n return cy\n\n def post():\n t = time.time()\n date = datetime.datetime.fromtimestamp(t).strftime('%Y/%m/%d , %H:%M:%S')\n #data = {'area':'A','remain':self.count} \n self.firebase.put(\"/remain\",\"area_A\",self.remainA)\n self.detail.insert(0,str(\"A區\")+self.movement+'\\n時間:'+date+'\\n========================')\n self.textdetail()\n print(\"post_to_field2...\")\n\n \n\n ret, frame = capV.read()\n if ret == False:\n \n QMessageBox.information(\n self, \"Error Loading video\", \"Unable to load the areaA video\")\n self.timerA.stop() \n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n self.cars = self.car_cascade.detectMultiScale(gray, 1.1, 10)\n\n cv2.line(frame, (25, self.line_pos),\n (1200, self.line_pos), (255, 0, 0), 2)\n\n nowframetime = datetime.datetime.now()\n self.Iscarin = 0\n self.Frame += 1\n\n for (x, y, w, h) in self.cars:\n\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)\n\n self.Iscarin = 1\n center = catch_center(x, y, w, h)\n # print(self.detec)\n centery = center_y(x, y, w, h)\n self.detec.append(center)\n self.detect_y.append(centery)\n cv2.circle(frame, center, 4, (0, 0, 255), -1)\n startframetime = datetime.datetime.now()\n nowframetime = startframetime+datetime.timedelta(seconds=5)\n\n self.framecount += 1\n beforecount = self.framecount-3\n f1 = self.detect_y[self.framecount-1:self.framecount+1]\n f2 = self.detect_y[beforecount:beforecount+1]\n\n # 303,40\n num = list(map(lambda x: x[0]-x[1], zip(f1, f2)))\n\n self.direction = np.sign(num)\n\n for (x, y) in self.detec:\n\n if y < (self.line_pos+self.offset) and y > (self.line_pos-self.offset) and self.direction == 1:\n\n if self.Iscount == 0: # 0 remove x_y\n\n endtime = datetime.datetime.now()\n nowtime = self.starttime + \\\n datetime.timedelta(seconds=self.restTime)\n self.detec.remove((x, y))\n self.IsDOWNdetectRemove = 1\n if endtime >= nowtime:\n self.Iscount = 1\n if self.Iscount == 1:\n\n self.remainA -= 1\n\n cv2.line(frame, (25, self.line_pos),\n (1200, self.line_pos), (0, 127, 255), 3)\n if self.IsDOWNdetectRemove == 0:\n self.detec.remove((x, y))\n self.Iscount = 0\n self.starttime = datetime.datetime.now()\n #cv2.imwrite('frame%d.jpg'%Frame , frame)\n # time.sleep(3)\n self.lastFrame = self.Frame\n print(\"Cars detected so far: \"+str(self.remainA))\n self.movement = \"進來一輛車\"\n\n if y > (self.line_pos-self.offset) and y < (self.line_pos+self.offset) and self.direction == -1:\n\n if self.Iscount == 0:\n\n endtime = datetime.datetime.now()\n nowtime = self.starttime + \\\n datetime.timedelta(seconds=self.restTime)\n self.detec.remove((x, y))\n self.IsUPdetectRemove = 1\n if endtime >= nowtime:\n self.Iscount = 1\n if self.Iscount == 1:\n\n self.remainA += 1\n\n cv2.line(frame, (25, self.line_pos),\n (1200, self.line_pos), (0, 127, 255), 3)\n if self.IsUPdetectRemove == 0:\n self.detec.remove((x, y))\n self.Iscount = 0\n self.starttime = datetime.datetime.now()\n #cv2.imwrite('frame%d.jpg'%Frame , frame)\n # time.sleep(3)\n print(\"Cars detected so far: \"+str(self.remainA))\n self.movement = \"出去一輛車\"\n\n if self.pre_countA != self.remainA:\n p = threading.Thread(target=post)\n p.start()\n # t.join()\n\n self.pre_countA = self.remainA\n\n # time.sleep(0.15)\n if self.Iscarin == 1:\n cv2.circle(frame, (303, 40), 10, (0, 0, 255), -1)\n\n else:\n cv2.circle(frame, (303, 40), 10, (0, 255, 0), -1)\n self.Iscarin = 0\n if self.Frame == self.lastFrame+300:\n self.movement = \"\"\n self.lastFrame = 0\n\n # display the resulting frame\n\n # self.textCount(self.count)\n self.textAremain(self.remainA)\n # self.textBremain(self.remainB)\n cv2.putText(frame, \"A\", (27, 50),\n cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 3)\n \n # get frame infos\n height, width, channel = frame.shape\n step = channel * width\n\n # create QImage from RGB frame\n qImg = QImage(frame.data, width, height, step, QImage.Format_RGB888)\n # show frame in img_label\n labelshow.setPixmap(QPixmap.fromImage(qImg))\n\n def detectCarB(self, capV, labelshow):\n\n def catch_center(x, y, w, h):\n x1 = int(w / 2)\n y1 = int(h / 2)\n cx = x + x1\n cy = y + y1\n return cx, cy\n\n def center_y(x, y, w, h):\n x1 = int(w / 2)\n y1 = int(h / 2)\n cx = x + x1\n cy = y + y1\n return cy\n\n def post():\n t = time.time()\n date = datetime.datetime.fromtimestamp(t).strftime('%Y/%m/%d , %H:%M:%S')\n #data = {'area':'A','remain':self.count} \n self.firebase.put(\"/remain\",\"area_B\",self.remainB)\n self.detail.insert(0,str(\"B區\")+self.movement+'\\n時間:'+date+'\\n========================')\n self.textdetail()\n print(\"post_to_field3...\")\n\n \n\n #ini = threading.Thread(target = init)\n # ini.start()\n\n # resize frame image\n # while True:\n\n ret, frame = capV.read()\n if ret == False:\n \n QMessageBox.information(\n self, \"Error Loading video\", \"Unable to load the areaB video\")\n self.timerB.stop() \n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n self.cars = self.car_cascade.detectMultiScale(gray, 1.1, 10)\n\n cv2.line(frame, (25, self.line_pos),\n (1200, self.line_pos), (255, 0, 0), 2)\n\n nowframetime = datetime.datetime.now()\n self.Iscarin = 0\n self.Frame += 1\n \n for (x, y, w, h) in self.cars:\n\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)\n\n self.Iscarin = 1\n center = catch_center(x, y, w, h)\n # print(self.detec)\n centery = center_y(x, y, w, h)\n self.detec.append(center)\n self.detect_y.append(centery)\n cv2.circle(frame, center, 4, (0, 0, 255), -1)\n startframetime = datetime.datetime.now()\n nowframetime = startframetime+datetime.timedelta(seconds=5)\n\n self.framecount += 1\n beforecount = self.framecount-3\n f1 = self.detect_y[self.framecount-1:self.framecount+1]\n f2 = self.detect_y[beforecount:beforecount+1]\n\n # 303,40\n num = list(map(lambda x: x[0]-x[1], zip(f1, f2)))\n\n self.direction = np.sign(num)\n\n for (x, y) in self.detec:\n\n if y < (self.line_pos+self.offset) and y > (self.line_pos-self.offset) and self.direction == 1:\n\n if self.Iscount == 0: # 0 remove x_y\n\n endtime = datetime.datetime.now()\n nowtime = self.starttime + \\\n datetime.timedelta(seconds=self.restTime)\n self.detec.remove((x, y))\n self.IsDOWNdetectRemove = 1\n if endtime >= nowtime:\n self.Iscount = 1\n if self.Iscount == 1:\n\n self.remainB -= 1\n\n cv2.line(frame, (25, self.line_pos),\n (1200, self.line_pos), (0, 127, 255), 3)\n if self.IsDOWNdetectRemove == 0:\n self.detec.remove((x, y))\n self.Iscount = 0\n self.starttime = datetime.datetime.now()\n #cv2.imwrite('frame%d.jpg'%Frame , frame)\n # time.sleep(3)\n self.lastFrame = self.Frame\n print(\"Cars detected so far: \"+str(self.remainB))\n self.movement = \"進來一輛車\"\n\n if y > (self.line_pos-self.offset) and y < (self.line_pos+self.offset) and self.direction == -1:\n\n if self.Iscount == 0:\n\n endtime = datetime.datetime.now()\n nowtime = self.starttime + \\\n datetime.timedelta(seconds=self.restTime)\n self.detec.remove((x, y))\n self.IsUPdetectRemove = 1\n if endtime >= nowtime:\n self.Iscount = 1\n if self.Iscount == 1:\n\n self.remainB += 1\n\n cv2.line(frame, (25, self.line_pos),\n (1200, self.line_pos), (0, 127, 255), 3)\n if self.IsUPdetectRemove == 0:\n self.detec.remove((x, y))\n self.Iscount = 0\n self.starttime = datetime.datetime.now()\n #cv2.imwrite('frame%d.jpg'%Frame , frame)\n # time.sleep(3)\n print(\"Cars detected so far: \"+str(self.remainB))\n self.movement = \"出去一輛車\"\n\n if self.pre_countB != self.remainB:\n p = threading.Thread(target=post)\n p.start()\n # t.join()\n\n self.pre_countB = self.remainB\n\n # time.sleep(0.15)\n if self.Iscarin == 1:\n cv2.circle(frame, (303, 40), 10, (0, 0, 255), -1)\n\n else:\n cv2.circle(frame, (303, 40), 10, (0, 255, 0), -1)\n self.Iscarin = 0\n if self.Frame == self.lastFrame+300:\n self.movement = \"\"\n self.lastFrame = 0\n\n # display the resulting frame\n\n # self.textCount(self.count)\n self.textBremain(self.remainB)\n # self.textBremain(self.remainB)\n cv2.putText(frame, \"B\", (27, 50),\n cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 3)\n \n # get frame infos\n height, width, channel = frame.shape\n step = channel * width\n\n # create QImage from RGB frame\n qImg = QImage(frame.data, width, height, step, QImage.Format_RGB888)\n # show frame in img_label\n labelshow.setPixmap(QPixmap.fromImage(qImg))\n\n def detectspace(self):\n #print(self.config)\n def job():\n self.firebase.put(\"/parkingspace\",\"space\",int(countSpace))\n print(\"post_to_field4...\")\n \n \n \n \n \n # Create Background subtractor\n \n \n \n video_cur_pos = self.capspace.get(cv2.CAP_PROP_POS_MSEC) / 1000.0 # Current position of the video file in seconds\n video_cur_frame = self.capspace.get(cv2.CAP_PROP_POS_FRAMES) # Index of the frame to be decoded/captured next\n ret, frame = self.capspace.read() \n if ret == False:\n \n QMessageBox.information(\n self, \"Error Loading video\", \"Unable to load the parkingspace video\")\n self.timer2.stop()\n\n \n \n #frame_gray = cv2.cvtColor(frame.copy(), cv2.COLOR_BGR2GRAY)\n # Background Subtraction\n frame_blur = cv2.GaussianBlur(frame.copy(), (5,5), 3)\n frame_gray = cv2.cvtColor(frame_blur, cv2.COLOR_BGR2GRAY)\n frame_out = frame.copy()\n \n # Draw Overlay\n '''\n if self.config['text_overlay']:\n str_on_frame = \"%d/%d\" % (video_cur_frame, self.video_info['num_of_frames'])\n #textframecount\n cv2.putText(frame_out, str_on_frame, (5,30), cv2.FONT_HERSHEY_SIMPLEX,\n 0.8, (0,0,255), 2, cv2.LINE_AA)\n '''\n \n if self.config['motion_detection']:\n fgmask = self.fgbg.apply(frame_blur)\n bw = np.uint8(fgmask==255)*255 \n bw = cv2.erode(bw, kernel_erode, iterations=1)\n bw = cv2.dilate(bw, kernel_dilate, iterations=1)\n (_, cnts, _) = cv2.findContours(bw.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n # loop over the contours\n for c in cnts:\n # if the contour is too small, ignore it\n if cv2.contourArea(c) < self.config['min_area_motion_contour']:\n continue\n (x, y, w, h) = cv2.boundingRect(c)\n cv2.rectangle(frame_out, (x, y), (x + w, y + h), (255, 255, 255), 2) \n \n if self.config['parking_detection']: \n for ind, park in enumerate(self.parking_data):\n points = np.array(park['points'])\n rect = self.parking_bounding_rects[ind]\n roi_gray = frame_gray[rect[1]:(rect[1]+rect[3]), rect[0]:(rect[0]+rect[2])] # crop roi for faster calcluation \n laplacian = cv2.Laplacian(roi_gray, cv2.CV_64F)\n points[:,0] = points[:,0] - rect[0] # shift contour to roi\n points[:,1] = points[:,1] - rect[1]\n delta = np.mean(np.abs(laplacian * self.parking_mask[ind]))\n status = delta < self.config['park_laplacian_th']\n # If detected a change in parking status, save the current time\n if status != self.parking_status[ind] and self.parking_buffer[ind]==None:\n self.parking_buffer[ind] = video_cur_pos\n # If status is still different than the one saved and counter is open\n elif status != self.parking_status[ind] and self.parking_buffer[ind]!=None:\n if video_cur_pos - self.parking_buffer[ind] > self.config['park_sec_to_wait']:\n self.parking_status[ind] = status\n self.parking_buffer[ind] = None\n # If status is still same and counter is open \n elif status == self.parking_status[ind] and self.parking_buffer[ind]!=None:\n #if video_cur_pos - parking_buffer[ind] > config['park_sec_to_wait']:\n self.parking_buffer[ind] = None \n #print(\"#%d: %.2f\" % (ind, delta))\n #print(self.parking_buffer)\n \n if self.config['parking_overlay']: \n \n countSpace = int('100000000000000000',2)\n for ind, park in enumerate(self.parking_data):\n points = np.array(park['points'])\n if self.parking_status[ind]:\n countSpace+=pow(2,ind)\n color = (255,0,0)#if no car change color\n #print(parking_status[ind])\n else: color = (0,0,255)\n \n \n countSpace << 1\n cv2.drawContours(frame_out, [points], contourIdx=-1,\n color=color, thickness=2, lineType=cv2.LINE_8) \n moments = cv2.moments(points)\n ##textID \n \n centroid = (int(moments['m10']/moments['m00'])-3, int(moments['m01']/moments['m00'])+3)\n cv2.putText(frame_out, str(park['id']), (centroid[0]+1, centroid[1]+1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1, cv2.LINE_AA)\n cv2.putText(frame_out, str(park['id']), (centroid[0]-1, centroid[1]-1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1, cv2.LINE_AA)\n cv2.putText(frame_out, str(park['id']), (centroid[0]+1, centroid[1]-1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1, cv2.LINE_AA)\n cv2.putText(frame_out, str(park['id']), (centroid[0]-1, centroid[1]+1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,0), 1, cv2.LINE_AA)\n cv2.putText(frame_out, str(park['id']), centroid, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,0), 1, cv2.LINE_AA)\n #print(str(ind)+\" : \") \n #print(str(countSpace))\n \n \n cv2.putText(frame_out,format(countSpace,'b'),(7,97), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 3)\n \n if self.pre_countSpace!=countSpace:\n p = threading.Thread(target = job)\n p.start()\n \n \n self.pre_countSpace = countSpace \n if self.config['pedestrian_detction']: \n # detect people in the image\n (rects, weights) = self.hog.detectMultiScale(frame, winStride=(4, 4), padding=(8, 8), scale=1.05)\n \n # draw the original bounding boxes\n for (x, y, w, h) in rects:\n cv2.rectangle(frame_out, (x, y), (x + w, y + h), (255, 0, 0), 2)\n #print(frame_out.shape)\n height, width, channel = frame_out.shape\n step = channel * width\n #print(self.countSpace)\n # create QImage from RGB frame\n spaceqImg = QImage(frame_out.data, width, height, step, QImage.Format_RGB888)\n # show frame in img_label\n self.ui.parkspace.setPixmap(QPixmap.fromImage(spaceqImg)) \n # start/stop timer\n\n def controlTimer(self):\n # if timer is stopped\n if not self.timer.isActive():\n # create video capture\n #self.cap = self.cap\n \n self.textdetail()\n self.cap = cv2.VideoCapture(\"./Entrance.mp4\")\n self.capA = cv2.VideoCapture(\"./areaA.mp4\")\n self.capB = cv2.VideoCapture(\"./areaB.mp4\")\n if self.spaceopen ==True:\n self.fn = r\"./parkinglot_1_480p.mp4\"\n self.fn_yaml = r\"./parking2.yml\"\n self.capspace = cv2.VideoCapture(self.fn)\n # start timer\n self.timer.start(5)\n self.timerA.start(5)\n self.timerB.start(5)\n self.timer2.start(5)\n # update control_bt text\n self.ui.control_bt.setText(\"暫停\")\n # if timer is started\n else:\n # stop timer\n self.timer.stop()\n self.timerA.stop()\n self.timerB.stop()\n self.timer2.stop()\n # release video capture\n self.cap.release()\n self.capA.release()\n self.capB.release()\n self.capspace.release()\n # update control_bt text\n self.ui.control_bt.setText(\"開始\")\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n\n # create and show mainWindow\n mainWindow = MainWindow()\n mainWindow.show()\n\n sys.exit(app.exec_())\n\n","repo_name":"gitwetguy/Smart_parkinglot","sub_path":"back-end/main_window.py","file_name":"main_window.py","file_ext":"py","file_size_in_byte":35452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"33654005557","text":"import multiprocessing\nimport os\nimport sys\nimport threading\nimport time\nimport traceback\n\nimport testlib.helper as helper\nimport testlib.log as log\nimport testlib.result as result\nimport testlib.state as state\nimport testlib.terminal as terminal\n\nfrom queue import Queue, Empty\nfrom testlib.configuration import constants\n\n\nclass _TestStreamManager(object):\n def __init__(self):\n self._writers = {}\n\n def open_writer(self, test_result):\n if test_result in self._writers:\n raise ValueError('Cannot have multiple writters on a single test.')\n self._writers[test_result] = _TestStreams(test_result.stdout,\n test_result.stderr)\n\n def get_writer(self, test_result):\n if test_result not in self._writers:\n self.open_writer(test_result)\n return self._writers[test_result]\n\n def close_writer(self, test_result):\n if test_result in self._writers:\n writer = self._writers.pop(test_result)\n writer.close()\n\n def close(self):\n for writer in self._writers.values():\n writer.close()\n self._writers.clear()\n\nclass _TestStreams(object):\n def __init__(self, stdout, stderr):\n helper.mkdir_p(os.path.dirname(stdout))\n helper.mkdir_p(os.path.dirname(stderr))\n self.stdout = open(stdout, 'w')\n self.stderr = open(stderr, 'w')\n\n def close(self):\n self.stdout.close()\n self.stderr.close()\n\nclass ResultHandler(object):\n '''\n Log handler which listens for test results and output saving data as\n it is reported.\n\n When the handler is closed it writes out test results in the python pickle\n format.\n '''\n def __init__(self, schedule, directory):\n '''\n :param schedule: The entire schedule as a :class:`LoadedLibrary`\n object.\n\n :param directory: Directory to save test stdout/stderr and aggregate\n results to.\n '''\n self.directory = directory\n self.internal_results = result.InternalLibraryResults(schedule,\n directory)\n self.test_stream_manager = _TestStreamManager()\n self._closed = False\n\n self.mapping = {\n log.LibraryStatus.type_id: self.handle_library_status,\n\n log.SuiteResult.type_id: self.handle_suite_result,\n log.TestResult.type_id: self.handle_test_result,\n\n log.TestStderr.type_id: self.handle_stderr,\n log.TestStdout.type_id: self.handle_stdout,\n }\n\n def handle(self, record):\n if not self._closed:\n self.mapping.get(record.type_id, lambda _:None)(record)\n\n def handle_library_status(self, record):\n if record['status'] in (state.Status.Complete, state.Status.Avoided):\n self.test_stream_manager.close()\n\n def handle_suite_result(self, record):\n suite_result = self.internal_results.get_suite_result(\n record['metadata'].uid)\n suite_result.result = record['result']\n\n def handle_test_result(self, record):\n test_result = self._get_test_result(record)\n test_result.result = record['result']\n\n def handle_stderr(self, record):\n self.test_stream_manager.get_writer(\n self._get_test_result(record)\n ).stderr.write(record['buffer'])\n\n def handle_stdout(self, record):\n self.test_stream_manager.get_writer(\n self._get_test_result(record)\n ).stdout.write(record['buffer'])\n\n def _get_test_result(self, test_record):\n return self.internal_results.get_test_result(\n test_record['metadata'].uid,\n test_record['metadata'].suite_uid)\n\n def _save(self):\n #FIXME Hardcoded path name\n result.InternalSavedResults.save(\n self.internal_results,\n os.path.join(self.directory, constants.pickle_filename))\n result.JUnitSavedResults.save(\n self.internal_results,\n os.path.join(self.directory, constants.xml_filename))\n\n def close(self):\n if self._closed:\n return\n self._closed = True\n self._save()\n\n def unsuccessful(self):\n '''\n Performs an or reduce on all of the results.\n Returns true if at least one test is unsuccessful, false when all tests\n pass\n '''\n for suite_result in self.internal_results:\n if suite_result.unsuccessful:\n return True\n # If all are successful, then this wasn't \"unsuccessful\"\n return False\n\n\n#TODO Change from a handler to an internal post processor so it can be used\n# to reprint results\nclass SummaryHandler(object):\n '''\n A log handler which listens to the log for test results\n and reports the aggregate results when closed.\n '''\n color = terminal.get_termcap()\n reset = color.Normal\n colormap = {\n state.Result.Errored: color.Red,\n state.Result.Failed: color.Red,\n state.Result.Passed: color.Green,\n state.Result.Skipped: color.Cyan,\n }\n\n def __init__(self):\n self.mapping = {\n log.TestResult.type_id: self.handle_testresult,\n log.LibraryStatus.type_id: self.handle_library_status,\n }\n self._timer = helper.Timer()\n self.results = []\n\n def handle_library_status(self, record):\n if record['status'] == state.Status.Building:\n self._timer.restart()\n\n def handle_testresult(self, record):\n result = record['result'].value\n if result in (state.Result.Skipped, state.Result.Failed,\n state.Result.Passed, state.Result.Errored):\n self.results.append(result)\n\n def handle(self, record):\n self.mapping.get(record.type_id, lambda _:None)(record)\n\n def close(self):\n print(self._display_summary())\n\n def _display_summary(self):\n most_severe_outcome = None\n outcome_fmt = ' {count} {outcome}'\n strings = []\n\n outcome_count = [0] * len(state.Result.enums)\n for result in self.results:\n outcome_count[result] += 1\n\n # Iterate over enums so they are in order of severity\n for outcome in state.Result.enums:\n outcome = getattr(state.Result, outcome)\n count = outcome_count[outcome]\n if count:\n strings.append(outcome_fmt.format(count=count,\n outcome=state.Result.enums[outcome]))\n most_severe_outcome = outcome\n string = ','.join(strings)\n if most_severe_outcome is None:\n string = ' No testing done'\n most_severe_outcome = state.Result.Passed\n else:\n string = ' Results:' + string + ' in {:.2} seconds '.format(\n self._timer.active_time())\n string += ' '\n return terminal.insert_separator(\n string,\n color=self.colormap[most_severe_outcome] + self.color.Bold)\n\nclass TerminalHandler(object):\n color = terminal.get_termcap()\n verbosity_mapping = {\n log.LogLevel.Warn: color.Yellow,\n log.LogLevel.Error: color.Red,\n }\n default = color.Normal\n\n def __init__(self, verbosity=log.LogLevel.Info, machine_only=False):\n self.stream = verbosity >= log.LogLevel.Trace\n self.verbosity = verbosity\n self.machine_only = machine_only\n self.mapping = {\n log.TestResult.type_id: self.handle_testresult,\n log.SuiteStatus.type_id: self.handle_suitestatus,\n log.TestStatus.type_id: self.handle_teststatus,\n log.TestStderr.type_id: self.handle_stderr,\n log.TestStdout.type_id: self.handle_stdout,\n log.TestMessage.type_id: self.handle_testmessage,\n log.LibraryMessage.type_id: self.handle_librarymessage,\n }\n\n def _display_outcome(self, name, outcome, reason=None):\n print(self.color.Bold\n + SummaryHandler.colormap[outcome]\n + name\n + ' '\n + state.Result.enums[outcome]\n + SummaryHandler.reset)\n\n if reason is not None:\n log.test_log.info('')\n log.test_log.info('Reason:')\n log.test_log.info(reason)\n log.test_log.info(terminal.separator('-'))\n\n def handle_teststatus(self, record):\n if record['status'] == state.Status.Running:\n log.test_log.debug('Starting Test Case: %s' %\\\n record['metadata'].name)\n\n def handle_testresult(self, record):\n self._display_outcome(\n 'Test: %s' % record['metadata'].name,\n record['result'].value)\n\n def handle_suitestatus(self, record):\n if record['status'] == state.Status.Running:\n log.test_log.debug('Starting Test Suite: %s ' %\\\n record['metadata'].name)\n\n def handle_stderr(self, record):\n if self.stream:\n print(record.data['buffer'], file=sys.stderr, end='')\n\n def handle_stdout(self, record):\n if self.stream:\n print(record.data['buffer'], file=sys.stdout, end='')\n\n def handle_testmessage(self, record):\n if self.stream:\n print(self._colorize(record['message'], record['level']))\n\n def handle_librarymessage(self, record):\n if not self.machine_only or record.data.get('machine_readable', False):\n print(self._colorize(record['message'], record['level'],\n record['bold']))\n\n def _colorize(self, message, level, bold=False):\n return '%s%s%s%s' % (\n self.color.Bold if bold else '',\n self.verbosity_mapping.get(level, ''),\n message,\n self.default)\n\n def handle(self, record):\n if record.data.get('level', self.verbosity) > self.verbosity:\n return\n self.mapping.get(record.type_id, lambda _:None)(record)\n\n def close(self):\n pass\n\nclass MultiprocessingHandlerWrapper(object):\n '''\n A handler class which forwards log records to subhandlers, enabling\n logging across multiprocessing python processes.\n\n The 'parent' side of the handler should execute either\n :func:`async_process` or :func:`process` to forward\n log records to subhandlers.\n '''\n def __init__(self, *subhandlers):\n # Create thread to spin handing recipt of messages\n # Create queue to push onto\n self.queue = multiprocessing.Queue()\n self.queue.cancel_join_thread()\n self._shutdown = threading.Event()\n\n # subhandlers should be accessed with the _handler_lock\n self._handler_lock = threading.Lock()\n self._subhandlers = subhandlers\n\n def add_handler(self, handler):\n self._handler_lock.acquire()\n self._subhandlers = (handler, ) + self._subhandlers\n self._handler_lock.release()\n\n def _with_handlers(self, callback):\n exception = None\n self._handler_lock.acquire()\n for handler in self._subhandlers:\n # Prevent deadlock when using this handler by delaying\n # exception raise until we get a chance to unlock.\n try:\n callback(handler)\n except Exception as e:\n exception = e\n break\n self._handler_lock.release()\n\n if exception is not None:\n raise exception\n\n def async_process(self):\n self.thread = threading.Thread(target=self.process)\n self.thread.daemon = True\n self.thread.start()\n\n def process(self):\n while not self._shutdown.is_set():\n try:\n item = self.queue.get(timeout=0.1)\n self._handle(item)\n except (KeyboardInterrupt, SystemExit):\n raise\n except EOFError:\n return\n except Empty:\n continue\n\n def _drain(self):\n while True:\n try:\n item = self.queue.get(block=False)\n self._handle(item)\n except (KeyboardInterrupt, SystemExit):\n raise\n except EOFError:\n return\n except Empty:\n return\n\n def _handle(self, record):\n self._with_handlers(lambda handler: handler.handle(record))\n\n def handle(self, record):\n self.queue.put(record)\n\n def _close(self):\n if hasattr(self, 'thread'):\n self.thread.join()\n _wrap(self._drain)\n self._with_handlers(lambda handler: _wrap(handler.close))\n\n # NOTE Python2 has an known bug which causes IOErrors to be raised\n # if this shutdown doesn't go cleanly on both ends.\n # This sleep adds some time for the sender threads on this process to\n # finish pickling the object and complete shutdown after the queue is\n # closed.\n time.sleep(.2)\n self.queue.close()\n time.sleep(.2)\n\n def close(self):\n if not self._shutdown.is_set():\n self._shutdown.set()\n self._close()\n\n\ndef _wrap(callback, *args, **kwargs):\n try:\n callback(*args, **kwargs)\n except:\n traceback.print_exc()\n","repo_name":"gem5/gem5","sub_path":"ext/testlib/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":13202,"program_lang":"python","lang":"en","doc_type":"code","stars":1196,"dataset":"github-code","pt":"78"}
+{"seq_id":"37153171078","text":"from argparse import ArgumentParser\nfrom collections import defaultdict\nfrom enum import Enum\nfrom functools import partial\nimport itertools\nimport os\nfrom typing import Dict, List, Optional, Union\n\nimport numpy as np\n\nfrom .cli import CLIBuilder, register_command\nfrom .constants import DIFF_MODEL\nfrom .data import FileReducer, RepoMapping\nfrom .io_constants import (\n BOW_DIR,\n DOC_FILENAME,\n DOCWORD_FILENAME,\n LABELS_FILENAME,\n REF_FILENAME,\n TOPICS_DIR,\n VOCAB_FILENAME,\n WORDTOPIC_FILENAME,\n)\nfrom .reduce import (\n concat_reducer,\n diff_to_hall_reducer,\n last_ref_reducer,\n max_reducer,\n mean_reducer,\n median_reducer,\n)\nfrom .utils import (\n check_file_exists,\n check_range,\n check_remove,\n create_logger,\n load_refs_dict,\n)\n\n\ndef _define_parser(parser: ArgumentParser) -> None:\n cli_builder = CLIBuilder(parser)\n cli_builder.add_bow_arg(required=True)\n cli_builder.add_experiment_arg(required=True)\n cli_builder.add_force_arg()\n parser.add_argument(\n \"--mu\",\n help=\"Weights how discriminative we want the label to be relative to other\"\n \" topics , defaults to %(default)s.\",\n default=1.0,\n type=float,\n )\n parser.add_argument(\n \"--label-size\",\n help=\"Number of words in a label, defaults to %(default)s.\",\n default=2,\n type=int,\n )\n parser.add_argument(\n \"--min-prob\",\n help=\"Admissible words for a topic label must have a topic probability over \"\n \"this value, defaults to %(default)s.\",\n default=0.001,\n type=float,\n )\n parser.add_argument(\n \"--max-topics\",\n help=\"Admissible words for a topic label must be admissible for less then this\"\n \" amount of topics, defaults to %(default)s.\",\n default=10,\n type=int,\n )\n parser.add_argument(\n \"--no-smoothing\",\n help=\"To ignore words that don't cooccur with a given label rather then use \"\n \"Laplace smoothing on the joint word/label probabilty.\",\n dest=\"smoothing\",\n action=\"store_false\",\n )\n parser.add_argument(\n \"--context\",\n help=\"Context creation method.\",\n choices=list(Context),\n type=Context.from_string,\n required=True,\n )\n\n\nclass Context(Enum):\n last = partial(last_ref_reducer)\n max = partial(max_reducer)\n mean = partial(mean_reducer)\n median = partial(median_reducer)\n concat = partial(concat_reducer)\n hall = None\n\n def __str__(self) -> str:\n return self.name\n\n @staticmethod\n def from_string(s: str) -> \"Context\":\n try:\n return Context[s]\n except KeyError:\n raise ValueError()\n\n @property\n def reducer(self) -> Optional[FileReducer]:\n return self.value\n\n\n@register_command(parser_definer=_define_parser)\ndef label(\n bow_name: str,\n exp_name: str,\n force: bool,\n log_level: str,\n mu: float,\n label_size: int,\n min_prob: float,\n max_topics: int,\n smoothing: bool,\n context: Context,\n) -> None:\n \"\"\"Infer a label for each topic automatically given a topic model.\"\"\"\n logger = create_logger(log_level, __name__)\n input_dir_bow = os.path.join(BOW_DIR, bow_name)\n doc_input_path = os.path.join(input_dir_bow, DOC_FILENAME)\n check_file_exists(doc_input_path)\n docword_input_path = os.path.join(input_dir_bow, DOCWORD_FILENAME)\n check_file_exists(docword_input_path)\n refs_input_path = os.path.join(input_dir_bow, REF_FILENAME)\n check_file_exists(refs_input_path)\n vocab_input_path = os.path.join(input_dir_bow, VOCAB_FILENAME)\n check_file_exists(vocab_input_path)\n\n dir_exp = os.path.join(TOPICS_DIR, bow_name, exp_name)\n wordtopic_input_path = os.path.join(dir_exp, WORDTOPIC_FILENAME)\n check_file_exists(wordtopic_input_path)\n\n labels_output_path = os.path.join(dir_exp, LABELS_FILENAME)\n check_remove(labels_output_path, logger, force)\n\n check_range(min_prob, \"min-prob\")\n\n refs_dict = load_refs_dict(logger, refs_input_path)\n\n logger.info(\"Loading word index ...\")\n with open(vocab_input_path, \"r\", encoding=\"utf-8\") as fin:\n word_index: Dict[int, str] = {\n i: word.replace(\"\\n\", \"\") for i, word in enumerate(fin)\n }\n num_words = len(word_index)\n logger.info(\"Loaded word index, found %d words.\", num_words)\n\n repo_mapping = RepoMapping()\n repo_mapping.build(logger, doc_input_path)\n corpus = repo_mapping.create_corpus(logger, docword_input_path)\n if repo_mapping.topic_model == DIFF_MODEL:\n logger.info(\"Recreating hall model corpus (we can't use delta-documents) ...\")\n corpus = repo_mapping.reduce_corpus(\n corpus, logger, refs_dict, diff_to_hall_reducer\n )\n num_docs = corpus.shape[0]\n logger.info(\"Recreated hall model corpus, found %d documents ...\", num_docs)\n\n if context.reducer is not None:\n logger.info(\"Creating %s context ...\", str(context))\n corpus = repo_mapping.reduce_corpus(corpus, logger, refs_dict, context.reducer)\n num_docs = corpus.shape[0]\n logger.info(\"Created context, found %d documents ...\", num_docs)\n\n logger.info(\"Loading word topic distributions ...\")\n topic_words = np.load(wordtopic_input_path)\n num_topics = topic_words.shape[0]\n logger.info(\"Loaded distributions, found %d topics.\", num_topics)\n\n logger.info(\"Finding common words for each topic ...\")\n common_words = np.argwhere(np.sum(topic_words > min_prob, axis=0) > max_topics)\n mask = np.ones(num_words, dtype=bool)\n mask[common_words] = False\n logger.info(\n \"Found %d words with probability over %.4f for more then %d topics, \"\n \"they will not be considered for labels.\",\n len(common_words),\n min_prob,\n max_topics,\n )\n if len(common_words) == num_words:\n logger.info(\"All words were excluded, cannot infer label.\")\n return\n coeff = mu / (num_topics - 1)\n words_counts = np.sum(corpus, axis=0)\n logger.info(\"Inferring labels for each topic ...\")\n best_labels_per_topic: Dict[int, Dict[str, float]] = {}\n best_scores: Dict[str, float] = defaultdict(lambda: -np.inf)\n for cur_topic in range(num_topics):\n logger.info(\"Topic %d:\", cur_topic + 1)\n num_admissible = len(np.argwhere(topic_words[cur_topic] > min_prob).flatten())\n admissible_words = np.argwhere(\n topic_words[cur_topic, mask] > min_prob\n ).flatten()\n if not len(admissible_words):\n logger.info(\"No admissible words where found, cannot infer label.\")\n return\n logger.info(\n \"\\tFound %d words with probability over %.4f, %d remained after removing \"\n \"common words.\",\n num_admissible,\n min_prob,\n len(admissible_words),\n )\n candidates = []\n candidates_names = []\n candidates_counts: Union[List, np.array] = []\n candidates_sizes = []\n for candidate in itertools.combinations(admissible_words, label_size):\n if np.min(corpus[:, candidate], axis=1).any():\n candidates.append(candidate)\n candidates_names.append(\" \".join(word_index[w] for w in candidate))\n candidates_counts.append(np.prod(corpus[:, list(candidate)], axis=1))\n candidates_sizes.append(len(candidate))\n num_cand = len(candidates_names)\n if not num_cand:\n logger.info(\"No candidates where found, cannot infer label.\")\n return\n logger.info(\"\\tFound %d candidate labels, computing their scores ...\", num_cand)\n candidates_counts = np.array(candidates_counts)\n joint_counts = candidates_counts @ corpus\n candidates_counts = np.sum(candidates_counts, axis=1)\n if smoothing:\n joint_counts += 1\n else:\n inds = np.argwhere(joint_counts == 0)\n joint_counts[joint_counts == 0] = (\n candidates_counts[inds[:, 0]] * words_counts[inds[:, 1]]\n )\n for cand_ind, candidate in enumerate(candidates):\n joint_counts[cand_ind, list(candidate)] = candidates_counts[cand_ind]\n\n # denominator = constant term > so we use counts instead of probs to compute PMI\n\n pmi = np.log(\n joint_counts / (candidates_counts[:, None] @ words_counts[:, None].T)\n )\n topic_probs = np.copy(topic_words).T\n topic_probs[:, cur_topic] *= coeff + 1\n topic_probs[:, [t for t in range(num_topics) if t != cur_topic]] *= -coeff\n scores = {\n name: score\n for name, score in zip(candidates_names, np.sum(pmi @ topic_probs, axis=1))\n }\n logger.info(\"\\tTop 5 candidates:\")\n best_labels = sorted(scores, key=scores.get, reverse=True)[:num_topics]\n best_labels_per_topic[cur_topic] = {}\n for label in best_labels:\n if scores[label] > best_scores[label]:\n for topic in best_labels_per_topic:\n if label in best_labels_per_topic[topic]:\n best_labels_per_topic[topic].pop(label)\n best_labels_per_topic[cur_topic][label] = scores[label]\n best_scores[label] = scores[label]\n for i, label_name in enumerate(best_labels[:5]):\n logger.info(\"\\t\\t %d. %s : %.4f\", i + 1, label_name, scores[label_name])\n\n topic_labels: List[str] = []\n for cur_topic in range(num_topics):\n scores = best_labels_per_topic[cur_topic]\n topic_labels.append(sorted(scores, key=scores.get, reverse=True)[0])\n\n logger.info(\"Selected the following labels:\")\n for ind_label, label in enumerate(topic_labels):\n logger.info(\n \"\\tTopic %d : %s (score: %.4f)\", ind_label + 1, label, best_scores[label]\n )\n\n logger.info(\"Saving topic labels ...\")\n with open(labels_output_path, \"w\", encoding=\"utf-8\") as fout:\n fout.write(\"\\n\".join(label for label in topic_labels))\n logger.info(\"Saved topic labels in '%s'.\", labels_output_path)\n","repo_name":"src-d/tm-experiments","sub_path":"tmexp/label.py","file_name":"label.py","file_ext":"py","file_size_in_byte":10116,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"}
+{"seq_id":"72140189693","text":"import logging\nimport os\nimport yaml\nfrom pprint import pformat\nfrom contextlib import contextmanager\n\nfrom marshmallow.exceptions import ValidationError\n\nfrom reactive_robot.config.schema import RxRobotConfigSchema\nfrom reactive_robot.exceptions import (\n InvalidConfigurationFileException,\n ConfigurationFileNotExists,\n)\n\nlog = logging.getLogger(\"reactive_robot.config\")\n\n\n@contextmanager\ndef _open_config_file(config_file):\n \"\"\"\n A context manager which yields an open file descriptor ready to be read.\n\n Accepts a filename as a string, an open or closed file descriptor, or None.\n When None, it defaults to `reactive-robot.yml` in the CWD. If a closed file descriptor\n is received, a new file descriptor is opened for the same file.\n\n The file descriptor is automatically closed when the context manager block is existed.\n \"\"\"\n\n # Default to the standard config filename.\n if config_file is None:\n paths_to_try = [\"reactive-robot.yml\", \"reactive-robot.yaml\"]\n # If it is a string, we can assume it is a path and attempt to open it.\n elif isinstance(config_file, str):\n paths_to_try = [config_file]\n # If closed file descriptor, get file path to reopen later.\n elif getattr(config_file, \"closed\", False):\n paths_to_try = [config_file.name]\n else:\n paths_to_try = None\n\n if paths_to_try:\n # config_file is not a file descriptor, so open it as a path.\n for path in paths_to_try:\n path = os.path.abspath(path)\n log.info(f\"Trying to load configuration file: {path}\")\n try:\n config_file = open(path, \"rb\")\n break\n except FileNotFoundError:\n log.info(f\"Config file '{path}' does not exist.\")\n continue\n else:\n log.error(f\"Config file '{paths_to_try[0]}' does not exist.\")\n raise ConfigurationFileNotExists(\n f\"Config file '{paths_to_try[0]}' does not exist.\"\n )\n else:\n log.debug(f\"Trying to load configuration file: {config_file}\")\n\n try:\n yield config_file\n finally:\n if hasattr(config_file, \"close\"):\n config_file.close()\n\n\ndef load_config(config_file=None, **kwargs):\n \"\"\"\n Load the configuration for a given file object or name\n\n The config_file can either be a file object, string or None. If it is None\n the default `reactive-robot.yml` filename will loaded.\n\n Extra kwargs are passed to the configuration to replace any default values\n unless they themselves are None.\n \"\"\"\n options = kwargs.copy()\n\n # Filter None values from the options. This usually happens with optional\n # parameters from Click.\n for key, value in options.copy().items():\n if value is None:\n options.pop(key)\n\n config_schema = RxRobotConfigSchema()\n with _open_config_file(config_file) as fd:\n data = yaml.safe_load(fd)\n log.debug(f\"Parsed configuration -> \\n{pformat(data)}\")\n try:\n dump = config_schema.load(data, unknown=True)\n except ValidationError as e:\n raise InvalidConfigurationFileException(e.messages)\n\n log.debug(\"Config object created -> %s \" % dump)\n return dump\n","repo_name":"yusufcanb/reactive-robot","sub_path":"reactive_robot/config/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3260,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"78"}
+{"seq_id":"19480358033","text":"\"\"\"\r\n CreateInflowFileFromLDASRunoff.py\r\n RAPIDpy\r\n\r\n Created by Alan D. Snow, 2016\r\n Adapted from CreateInflowFileFromWRFHydroRunoff.py.\r\n License: BSD-3-Clause\r\n\"\"\"\r\nfrom .CreateInflowFileFromLDASRunoff import CreateInflowFileFromLDASRunoff\r\n\r\n\r\nclass CreateInflowFileFromWRFHydroRunoff(CreateInflowFileFromLDASRunoff):\r\n \"\"\"Create Inflow File From WRF-Hydro Runoff\r\n\r\n Base class for creating RAPID NetCDF input\r\n of water inflow based on WRF-Hydro\r\n runoff and previously created weight table.\r\n\r\n According to David Gochis, underground runoff is\r\n \"a major fraction of total river flow in most places\"\r\n \"\"\"\r\n land_surface_model_name = \"WRF-Hydro\"\r\n header_wt = ['rivid', 'area_sqm', 'west_east', 'south_north', 'npoints']\r\n\r\n def __init__(self, lat_dim=\"south_north\",\r\n lon_dim=\"west_east\",\r\n lat_var=\"XLAT\",\r\n lon_var=\"XLONG\",\r\n surface_runoff_var=\"SFROFF\",\r\n subsurface_runoff_var=\"UDROFF\"):\r\n\r\n \"\"\"Define the tool (tool name is the name of the class).\"\"\"\r\n self.dims_oi = ['Time', lat_dim, lon_dim]\r\n\r\n super(CreateInflowFileFromWRFHydroRunoff, self).\\\r\n __init__(lat_dim, lon_dim, lat_var, lon_var,\r\n [surface_runoff_var, subsurface_runoff_var])\r\n","repo_name":"erdc/RAPIDpy","sub_path":"RAPIDpy/inflow/CreateInflowFileFromWRFHydroRunoff.py","file_name":"CreateInflowFileFromWRFHydroRunoff.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"78"}
+{"seq_id":"41225613185","text":"# Store the data\nred = []\nnon_red = []\n\n# Ask for input\ncolor = input(\"Is your student red-headed (Y/N): \")\nage = int(input(\"Enter your student's age: \"))\n\n# Add to lists\nif age >= 0:\n if color == \"Y\":\n red.append(age)\n else:\n non_red.append(age)\n\n# Keep asking\nwhile age >= 0:\n # Ask for input\n color = input(\"Is your student red-headed (Y/N): \")\n age = int(input(\"Enter your student's age: \"))\n\n # Add to lists\n if age >= 0:\n if color == \"Y\":\n red.append(age)\n else:\n non_red.append(age)\n\n# Calculate the stats\nred_min = str(min(red))\nred_max = str(max(red))\nred_len = str(len(red))\n\nnon_red_min = str(min(non_red))\nnon_red_max = str(max(non_red))\nnon_red_len = str(len(non_red))\n\n# Write to the file\nwith open(\"Student_Data_23Fa.txt\", \"w\") as output_file:\n # Write the column names\n output_file.write(\"Gender Number of Students Minimum Age Maximum Age\\n\")\n\n # Write the other lines\n red_line = f\"Redheaded {red_len:<{len('Number of Students ')}}{red_min:<{len('Minimum Age ')}}{red_max}\\n\"\n output_file.write(red_line)\n\n non_red_line = f\"Not Red {non_red_len:<{len('Number of Students ')}}{non_red_min:<{len('Minimum Age ')}}{non_red_max}\\n\"\n output_file.write(non_red_line)","repo_name":"RicePandaaaa/F23Exam2PracticeSolutions","sub_path":"More Exam 2 Practice Problems/q15.py","file_name":"q15.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"17760654485","text":"from vyper.exceptions import (\n CompilerPanic,\n NamespaceCollision,\n UndeclaredDefinition,\n)\n\n\nclass Namespace(dict):\n \"\"\"\n Dictionary subclass that represents the namespace of a contract.\n\n Attributes\n ----------\n _scopes : List[Set]\n List of sets containing the key names for each scope\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self._scopes = []\n from vyper.context import environment\n from vyper.context.types import get_types\n from vyper.functions.functions import get_builtin_functions\n\n self.update(get_types())\n self.update(environment.get_constant_vars())\n self.update(get_builtin_functions())\n\n def __eq__(self, other):\n return self is other\n\n def __setitem__(self, attr, obj):\n self.validate_assignment(attr)\n\n if self._scopes:\n self._scopes[-1].add(attr)\n super().__setitem__(attr, obj)\n\n def __getitem__(self, key):\n if key not in self:\n raise UndeclaredDefinition(f\"'{key}' has not been declared\")\n return super().__getitem__(key)\n\n def __enter__(self):\n if not self._scopes:\n raise CompilerPanic(\"Context manager must be invoked via namespace.enter_scope()\")\n\n def __exit__(self, exc_type, exc_value, traceback):\n if not self._scopes:\n raise CompilerPanic(\"Bad use of namespace as a context manager\")\n for key in self._scopes.pop():\n del self[key]\n\n def enter_scope(self):\n \"\"\"\n Enter a new scope within the namespace.\n\n Called as a context manager, e.g. `with namespace.enter_scope():`\n All items that are added within the context are removed upon exit.\n \"\"\"\n from vyper.context import environment\n\n self._scopes.append(set())\n\n if len(self._scopes) == 1:\n # add mutable vars (`self`) to the initial scope\n self.update(environment.get_mutable_vars())\n\n return self\n\n def update(self, other):\n for key, value in other.items():\n self.__setitem__(key, value)\n\n def clear(self):\n super().clear()\n self.__init__()\n\n def validate_assignment(self, attr):\n if attr in self:\n if attr not in [x for i in self._scopes for x in i]:\n raise NamespaceCollision(f\"Cannot assign to '{attr}', it is a builtin\")\n obj = super().__getitem__(attr)\n raise NamespaceCollision(f\"'{attr}' has already been declared as a {obj}\")\n\n\ndef get_namespace():\n \"\"\"\n Get the active namespace object.\n \"\"\"\n global _namespace\n try:\n return _namespace\n except NameError:\n _namespace = Namespace()\n return _namespace\n","repo_name":"nbrown1337/testflash","sub_path":"vyper-env/lib/python3.8/site-packages/vyper/context/namespace.py","file_name":"namespace.py","file_ext":"py","file_size_in_byte":2748,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"}
+{"seq_id":"37613625","text":"import cv2 #--------------------------------Importing Librabies\nimport time #--------------------------------Importing Librabies\nimport pandas #--------------------------------Importing Librabies\nfrom datetime import datetime #--------------------------------Importing Librabies\n\nvideo = cv2.VideoCapture(0) #--------------------------------Laptop Camera Start\nstatus_list = [None, None] #----------------Connected to status so that time will be obtained with the refrence of this\ntimes = [] #--------------------------------List of time will be obtained from this\nfirst_frame = None #--------------------------------Delcaring Variable\ndf = pandas.DataFrame(columns = [\"Start\", \"End\"]) #--------------------------------Declaration of DataFrame\n\nwhile True:\n check, frame = video.read() #--------------------------------Read the images from the Camera\n status = 0 #--------------------------------Delcaring Variable\n\n gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) #--------------------------------Transfers the images to gray color\n gray = cv2.GaussianBlur(gray, (21, 21), 0) #--------------------------------Blur the image\n\n if first_frame is None: #--------------------------------Continues the telecast\n first_frame = gray\n continue\n\n delta_frame = cv2.absdiff(first_frame, gray) #--------------------------------Make the difference\n thresh_frame = cv2.threshold(delta_frame, 30, 255, cv2.THRESH_BINARY)[1] #--------------------------------Black and white image screen\n thresh_frame = cv2.dilate(thresh_frame, None, iterations = 2) #--------------------------------Smooth the B & W screen\n\n (_,cnts,_) = cv2.findContours(thresh_frame.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) #--------------------------------Face recognitions\n for contour in cnts:\n if cv2.contourArea(contour) < 9500:\n continue\n status = 1\n (x, y, w, h) = cv2.boundingRect(contour)\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 255), 3)\n\n status_list.append(status) #--------------------------------Writing in Status_list\n status_list = status_list[-2:]\n if status_list[-1] == 1 and status_list[-2] == 0: #--------------------------------Jyare achanak bdlai che tyare e time record kre che\n times.append(datetime.now()) #--------------------------------Only two times will be recordedthat is incoming & outgoing\n if status_list[-1] == 0 and status_list[-2] == 1:\n times.append(datetime.now())\n\n cv2.imshow(\"Gray Frame\", gray) #--------------------------------Image showing in the screen\n cv2.imshow(\"Delta Frame\", delta_frame) #--------------------------------Image showing in the screen\n cv2.imshow(\"Threshold Frame\", thresh_frame) #--------------------------------Image showing in the screen\n cv2.imshow(\"Color Frame\", frame) #--------------------------------Image showing in the screen\n\n key = cv2.waitKey(1) #--------------------------------Trigger to stop\n if key == ord('q'):\n if status==1:\n times.append(datetime.now())\n break\n\n\nprint(status_list)\nfor z in times:\n print(z)\n\nfor i in range(0, len(times), 2): #--------------------------------Writing in DataFrame\n df = df.append({\"Start\": times[i], \"End\": times[i+1]}, ignore_index = True)\n#--------------------------------Range will start with 0 and will end at n-1 and 3rd digit is for between gap which will added in the first place of the Range i.e. \"0\"\n#--------------------------------\"ignore_index = True\" <--- is applied when there is not available any Series name. If dout, Remove it and try\n\ndf.to_csv(\"Times.csv\") #--------------------------------Formation of csv file. It will erase previous details and make new details\n\nvideo.release()\ncv2.destroyAllWindows()\n","repo_name":"manthan-ladva/Language-Practice","sub_path":"Python/App 5 Webcam Motion Detector/video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":4988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"15490867386","text":"def subexpression_printer(expr):\r\n stack_indexes = []\r\n\r\n for index, char in enumerate(expr):\r\n if char == '(':\r\n stack_indexes.append(index)\r\n elif char == ')':\r\n end_index = index\r\n start_index = stack_indexes.pop()\r\n subexpr = expr[start_index:end_index + 1]\r\n print(subexpr)\r\n\r\n\r\nsubexpression_printer(input())","repo_name":"ayk-dev/python-advanced","sub_path":"01-stacks-and-queus/matching_brackets.py","file_name":"matching_brackets.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"72452008253","text":"from django.contrib.auth import get_user_model\nfrom django.core.management.base import BaseCommand\n\nfrom alexandria.users.models import AccountType, USLocation\nfrom alexandria.utils.management.commands import (\n bootstrap_system_branches,\n bootstrap_types,\n create_permissions_groups,\n)\n\n\nclass Command(BaseCommand):\n help = \"Creates everything needed for the site to be functional.\"\n\n def handle(self, *args, **options):\n bootstrap_types.Command().handle()\n bootstrap_system_branches.Command().handle()\n create_permissions_groups.Command().handle()\n\n location, created = USLocation.objects.get_or_create(\n address_1=\"123 Sesame St.\",\n city=\"Kaufman Astoria Studios\",\n state=\"NY\",\n zip_code=\"11106\",\n )\n user, created = get_user_model().objects.get_or_create(\n card_number=\"1234\",\n email=\"adminvonadmin@example.com\",\n birth_year=1900,\n account_type=AccountType.objects.get(name=\"Superuser\"),\n title=\"Admin Extraordinaire\",\n legal_first_name=\"Admin\",\n legal_last_name=\"von Admin\",\n address=location,\n notes=\"It's the admin.\",\n )\n if created:\n user.set_password(\"asdf\")\n user.save()\n","repo_name":"AlexandriaILS/Alexandria","sub_path":"alexandria/utils/management/commands/bootstrap_site.py","file_name":"bootstrap_site.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"33888549348","text":"# coding=utf-8\nfrom wordcloud import WordCloud\nimport pandas as pd\nimport ast\nfrom matplotlib import pyplot as plt\nimport os\npd.options.mode.chained_assignment = None\nimport sys \nfrom pathlib import Path\n\nrails_root = sys.argv[1]\n\nfor p in Path(f\"{rails_root}/public/images\").glob(\"wordcloud.png\"):\n p.unlink()\n\ntxt = pd.read_csv(f\"{rails_root}/data/cloud_text.csv\",names=[\"id\", \"no_stop\"])\ntxt_str = \"\"\n\nfor i in range(len(txt)):\n try:\n txt[\"no_stop\"][i] = ast.literal_eval(txt[\"no_stop\"][i])\n txt_str += ' '.join(txt[\"no_stop\"][i])\n except:\n continue\n\nif len(txt_str) < 50:\n print(\"您所選擇區間資料過少,請重新選擇\")\nelse:\n cloud = WordCloud(width=960, height=400,background_color='white',font_path=f\"{rails_root}/app/assets/fonts/TaipeiSansTCBeta-Regular.ttf\").generate(txt_str)\n cloud.to_file(f'{rails_root}/public/images/wordcloud.png')\n\n\n","repo_name":"demo-6th/SociView","sub_path":"lib/tasks/Wordcloud/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"}
+{"seq_id":"18972641919","text":"success = False # в будущем, если победим, примет значение True\nmas = list(range(100000)) # создаём массив и наполняем его значениями от 0 до 99999\nkey = int(input('Введите число для поиска: ')) # вводим число с клавиатуры и преобразуем его в тип int\nl = 0 # индекс левой границы массива mas. 0, потому что исчесление элементов в массиве начинается имеенно с нуля\nr = mas[len(mas)-1] # правая граница. В отличие от левой, она может меняться в зависимости от количества элементов в массиве, поэтому мы её вычисляем с помощью функции определения длины len(). Единицу вычитаем, потому что первый элемент в массиве у нас с индексом 0. Например, при длинне массива 100000, последний элемент будет 99999\niterateCounter = 0 # счётчик (англ. counter) повторений (англ. iterations) цикла поиска\nmid = None # задаём пустую переменную, заполним её чуть позже. None означает \"ничего\" \nwhile ((l <= r)): # выполняем цикл до тех пор, пока выражение (l <= r) не будет равно False\n iterateCounter += 1 # увеличиваем значение счётчика на 1. Ещё можно написать так: iterateCounter = iterateCounter + 1, смысл тот же\n mid = (l + r) // 2 # считываем срединный индекс отрезка [l,r]\n if (mas[mid] == key): # сверяем введённый нами с клавиатуры key со стрединным значением\n success = True # победа! Теперь наш успех равен True\n if (mas[mid] > key): # проверяем, какую часть нужно отбросить\n r = mid - 1 # если срединное значение больше искомого, двигаем правую границу r влево и ставим на месте срединной переменной \n # l - - - - mid <- - - - r\n # l - - - - r\n else:\n l = mid + 1 \n # l - - - -> mid - - - - - r\n # l - - - - - r\n\nif (success == True): # в случае успеха печатаем статистику \n print(\"Индекс элемента \" + str(key) + \" в массиве равен: \" + str(mid))\n print(\"\")\n print(\"Количество циклов поиска искомого числа: \"+str(iterateCounter))\nelse: # иначе извиняемся\n print(\"Извините, но такого элемента в массиве нет\")\n","repo_name":"Tamerlanchiques/usb","sub_path":"практика/K1/binarySearchSimple.py","file_name":"binarySearchSimple.py","file_ext":"py","file_size_in_byte":3025,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"11674432509","text":"from django.conf.urls.defaults import patterns, url\nfrom django.contrib.auth.decorators import login_required\nfrom walls.views import AddWallView, EditWallView,\\\n CommentedWallDetailView, DeleteWallView, WallImagesListView,\\\n WallImagesDetailView, WallImagesEditView, WallImagesDeleteView,\\\n WallImagesAddView, WallImagesListEditView, MyWallsListView\n\nurlpatterns = patterns('',\n url(r'^my/$',\n login_required(MyWallsListView.as_view()),\n name='walls_my_list'\n ),\n url(r'^(?P\\d+)/$',\n CommentedWallDetailView.as_view(),\n name='walls_detail'\n ),\n url(r'^(?P\\d+)/edit/$',\n login_required(EditWallView.as_view()),\n name='walls_edit'\n ),\n url(r'^(?P\\d+)/delete/$',\n login_required(DeleteWallView.as_view()),\n name='walls_delete'\n ),\n url(r'^add/$',\n login_required(AddWallView.as_view()),\n name='walls_add'\n ),\n\n url(r'^(?P\\d+)/images/$',\n WallImagesListView.as_view(),\n name='walls_images_list'\n ),\n url(r'^(?P\\d+)/images/edit/$',\n login_required(WallImagesListEditView.as_view()),\n name='walls_images_list_edit'\n ),\n url(r'^(?P\\d+)/images/(?P\\d+)/$',\n WallImagesDetailView.as_view(),\n name='walls_images_detail'\n ),\n url(r'^(?P\\d+)/images/(?P\\d+)/edit/$',\n login_required(WallImagesEditView.as_view()),\n name='walls_images_edit'\n ),\n url(r'^(?P\\d+)/images/(?P\\d+)/delete/$',\n login_required(WallImagesDeleteView.as_view()),\n name='walls_images_delete'\n ),\n url(r'^(?P\\d+)/images/add/$',\n login_required(WallImagesAddView.as_view()),\n name='walls_images_add'\n ),\n\n url(r'^bbox.json/$',\n 'walls.views.bbox',\n name='walls_bbox'\n ),\n)\n","repo_name":"unpatioli/tenniswall","sub_path":"walls/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"40790637495","text":"import win32com.client as win32\nimport os\n\n\nclass ExcelDataBase(object):\n def __init__(self):\n self._excel_app = None\n self._work_book = None\n self._work_sheets = None\n self._work_sheet = None\n self._sheet = None\n self._xls_file = None\n self._is_open = False\n self._is_new = False\n\n def start(self) -> bool:\n try:\n self._excel_app = win32.Dispatch('Excel.Application')\n print(\"EXCEL Application start.\")\n except Exception as value:\n print(\"Exception occurred, value = \", value)\n return False\n return True\n\n def quit(self):\n \"\"\"\n 退出EXCEL,\n :return:\n \"\"\"\n self.close()\n if self._excel_app:\n self._excel_app.Quit()\n self._excel_app = None\n self._is_open = False\n self._xls_file = None\n self._is_open = False\n self._is_new = False\n return\n\n def open_book(self, file_name, not_exist_new) -> bool:\n \"\"\" \"\"\"\n # 如果指向的文件不存在,则需要新建一个\n if not (os.path.exists(file_name) and os.path.isfile(file_name)):\n if not not_exist_new:\n return False\n\n # 得到绝对路径,因为ActiveX只支持绝对路径,包括Open,包括 Save as,\n # 不光必须用绝对路径,还需要实用原生的路径分割符号'\\'\n\n self._work_book = self._excel_app.Workbooks.Open(file_name)\n if not self._work_book:\n return False\n # self._work_book = self.excel_app_.ActiveWorkBook\n self._xls_file = os.path.abspath(file_name)\n self._is_open = True\n self._work_sheets = self._work_book.Worksheets\n return True\n\n def new_book(self):\n \"\"\" \"\"\"\n # 新建一个xls,添加一个新的工作薄\n self._excel_app.WorkBooks.Add()\n self._work_book = self._excel_app.ActiveWorkBook\n self._is_open = True\n self._is_new = True\n return True\n\n def close(self):\n \"\"\"关闭打开的EXCEL,\"\"\"\n self.save()\n if not self._excel_app and not self._work_book:\n self._work_book.Close(True)\n self._work_book = None\n self._is_open = False\n self._xls_file = \"\"\n return\n\n def save(self):\n if not self._work_book or self._xls_file == \"\":\n return\n if self.book_saved:\n return\n if self._is_new:\n self._work_book.SaveAs(self._xls_file)\n self._is_new = False\n else:\n self._work_book.Save()\n\n @property\n def book_saved(self) -> bool:\n saved = self._work_book.Saved\n return bool(saved)\n\n def sheets_count(self):\n count = self._work_sheets.Count\n return count\n\n def sheets_name(self):\n \"\"\"返回sheets 的名称列表\"\"\"\n name_list = []\n count = self._work_sheets.Count\n i = 0\n while i < count:\n name_list.append(self._work_book.Worksheets(i + 1).Name)\n i += 1\n return name_list\n\n def load_sheet_byindex(self, sheet_index: int):\n self._work_sheet = self._work_book.Worksheets(sheet_index)\n if not self._work_sheet:\n return False\n return True\n\n def load_sheet_byname(self, sheet_name: str):\n \"\"\"\n\n :param sheet_name: \n :return: \n \"\"\"\n self._work_sheet = self._work_book.Worksheets(sheet_name)\n if not self._work_sheet:\n return False\n return True\n\n @staticmethod\n def _range_coord(read_range):\n \"\"\"\"\"\"\n row_count = read_range.Rows.Count\n column_count = read_range.Columns.Count\n # 因为excel可以从任意行列填数据而不一定是从1, 1 开始,因此要获取首行列下标\n # 第一行,列的起始位置\n row_start = read_range.Row\n column_start = read_range.Column\n return row_start, column_start, row_count, column_count\n\n @staticmethod\n def _range_data(read_range):\n \"\"\"\"\"\"\n if not read_range:\n return 0, 0, 0, 0, []\n else:\n row_start, column_start, row_count, column_count = \\\n ExcelDataBase._range_coord(read_range)\n return row_start, column_start, row_count, column_count, read_range.Value\n\n def used_range_coord(self):\n \"\"\"\n 取得UsedRange的各种坐标,包括起始行号,列号,以及占用的行总数,列总数\n 注意UsedRange并不一定是从0,0开始的\n :return:UsedRange的行起始,列起始,行总数,列总数\n \"\"\"\n used_range = self._work_sheet.UsedRange\n if not used_range:\n return 0, 0, 0, 0\n else:\n return self._range_coord(used_range)\n\n def used_range_data(self):\n used_rg = self._work_sheet.UsedRange\n return self._range_data(used_rg)\n\n def sheet_cell(self, row, column):\n # 如果预加载了数据,\n return self._work_sheet.Cells(row, column).Value\n\n @staticmethod\n def column_name(column_num):\n \"\"\"\"\"\"\n assert column_num > 0\n n = column_num\n lst = []\n while True:\n if n > 0:\n # EXCEL 奇特的规则导致的这个地方,没有0,和一般的转码不太一样\n n -= 1\n m = n % 26\n n //= 26\n lst.append(chr(m + ord('A')))\n if n <= 0:\n break\n lst.reverse()\n return \"\".join(lst)\n\n def range(self, cell1: str, cell2: str = None):\n return self._work_sheet.Range(cell1, cell2)\n\n def range2(self, cell1_row: int, cell1_column: int, cell2_row: int, cell2_column: int):\n cell1 = str(cell1_row) + ExcelDataBase.column_name(cell1_column)\n cell2 = str(cell2_row) + ExcelDataBase.column_name(cell2_column)\n return self._work_sheet.Range(cell1, cell2)\n\n def range_coord(self, cell1: str, cell2: str = None):\n get_range = self.range(cell1, cell2)\n return self._range_coord(get_range)\n\n def range_data(self, cell1: str, cell2: str = None):\n get_range = self.range(cell1, cell2)\n return self._range_data(get_range)\n\n\nif __name__ == '__main__':\n print(\"Hello world!{}\".format(__file__))\n print(\"column_name {} {}\".format(1, ExcelDataBase.column_name(1)))\n print(\"column_name {} {}\".format(26, ExcelDataBase.column_name(26)))\n print(\"column_name {} {}\".format(27, ExcelDataBase.column_name(27)))\n print(\"column_name {} {}\".format(52, ExcelDataBase.column_name(52)))\n print(\"column_name {} {}\".format(200, ExcelDataBase.column_name(200)))\n print(\"column_name {} {}\".format(888, ExcelDataBase.column_name(888)))\n","repo_name":"sailzeng/OAMail","sub_path":"source/office/excel_database.py","file_name":"excel_database.py","file_ext":"py","file_size_in_byte":6801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"8820350710","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 2 18:30:55 2016\n\n@author: yiyuezhuo\n\"\"\"\n\nimport requests\nimport webbrowser\nimport json\nimport datetime\nfrom bs4 import BeautifulSoup\nimport re\n\ndef fff(content, temp_name = 'temp.html'):\n if type(content) == bytes:\n content = str(content)\n elif type(content) == requests.models.Response:\n content = str(content.content)\n with open(temp_name, 'w')as f:\n f.write(content)\n webbrowser.open(temp_name)\n \ndef get_user_info(config_path = 'config.json'):\n with open(config_path) as f:\n config = json.load(f)\n return config\n \ndef get_date(delta_days):\n date = datetime.date.today() - datetime.timedelta(days = delta_days)\n return datetime.datetime.strftime(date,'%Y-%m-%d')\n\ndata_url = 'http://steamspy.com/ajax/slowdata.php'\nlogin_url = 'http://steamspy.com/login/'\nroot_url = 'http://steamspy.com/app/'\n\ndef need_login(method):\n def _method(self, *args, **kwargs):\n if self.logined == False:\n self.login()\n return method(self, *args, **kwargs)\n return _method\n\nclass Scraper(object):\n def __init__(self, info = None):\n self.session = requests.session()\n if info == None:\n self.info = get_user_info()\n else:\n self.info = info\n self.logined = False\n \n def login(self):\n self.session.get(login_url)\n form =self.info.copy()\n form.update({'keeplogged' : 1,\n 'submit' : '',\n 'doLogin' : 1})\n self.logined = True\n return self.session.post(login_url, data = form)\n \n @need_login\n def get_geography(self, appid):\n form = { 'request' : 'App Geography',\n 'appid' : appid,\n 'YesterdayD' : get_date(1),\n 'FreeDateD' : get_date(91)}\n res = self.session.post(data_url, data = form)\n rd = json.loads(res.content.decode('utf8'))\n assert rd['result'] == 'Success'\n data = json.loads(rd['html'][rd['html'].index('['):rd['html'].rindex(']')+1])\n return data\n \n def get_general(self, appid):\n url = root_url + appid\n html = self.session.get(url).content\n return enhance_general(parse_general(html))\n \nclass list_map_escape:\n pass\n \ndef list_map(term_map):\n # 还是另请高明吧\n def _func(tree, *args, **kwargs):\n rl = []\n for node in tree:\n if type(node) == list:\n rl.append(_func(node, *args, **kwargs))\n else:\n r = term_map(node, *args, **kwargs)\n if r != list_map_escape:\n rl.append(r)\n return rl\n return _func\n \n@list_map\ndef to_text(node):\n if hasattr(node,'text'):\n s = node.text\n else:\n s = node.__str__()\n if s.strip() == '':\n return list_map_escape\n else:\n return s\n\ndef test():\n scraper = Scraper()\n scraper.login()\n print(scraper.get_geography())\n \nif __name__ == '__main__':\n '''\n scraper = Scraper()\n scraper.login()\n data = scraper.get_geography('315810')\n '''\n\n''' test record\nsession = requests.session()\nres1 = session.get(login_url)\nform1 = get_user_info() # get username and password by json file config.json\nform1.update({'keeplogged' : 1,\n 'submit' : '',\n 'doLogin' : 1})\nres2 = session.post(login_url, data = form1)\nform2 = {'request' : 'App Geography',\n 'appid' : '315810',\n 'YesterdayD' : '2016-08-01',\n 'FreeDateD' : '2016-05-03'}\nres3 = session.post(data_url, data = form2)\nrd = json.loads(res3.content.decode('utf8'))\nassert rd['result'] == 'Success'\nrd2 = json.loads(rd['html'][rd['html'].index('['):rd['html'].rindex(']')+1])\n'''\n\ndef parse_general(html):\n soup = BeautifulSoup(html, 'lxml')\n el = list(soup.find(attrs = {'class' : 'p-r-30'}))[2]\n dic = {}\n cut = []\n for child in el.children:\n if child.name == 'strong':\n dic[cut[0].text] = to_text(cut[1:])\n cut = [child]\n else:\n cut.append(child)\n \n rd = {}\n # remove vervose key char :\n for key in dic.keys():\n if len(key)>0:\n value = dic[key]\n if key[-1] == ':':\n rd[key[:-1]] = value\n else:\n rd[key] = value\n # reduction list \n one_term_list = ['Owners',\n 'Peak concurrent players yesterday',\n 'Players in the last 2 weeks',\n 'Players total',\n 'Playtime in the last 2 weeks',\n 'Price',\n 'Release date',\n 'Score rank',\n 'Userscore',\n 'YouTube stats']\n for key in one_term_list:\n value = rd[key][0]\n if value[0] == ':':\n value = value[1:]\n rd[key] = value.strip()\n \n # special process\n rd['Tags'] = [value for i,value in enumerate(rd['Tags']) if i%2==0]\n rd['Category'] = [cat.strip() for cat in rd['Category'][0].split(',')]\n return rd\n \ndef enhance_general(string_dic, time_process = True):\n \n rd = string_dic.copy()\n \n for key in ['Owners',\n 'Players in the last 2 weeks',\n 'Players total']:\n value = string_dic[key]\n index = value.find('(')\n if index != -1:\n value = value[:index]\n # default is mean/average\n rd[key],rd[key + '_std'] = value.replace(',', '').split('±')\n rd[key],rd[key + '_std'] = int(rd[key]),int(rd[key + '_std'])\n \n for key in ['Score rank','Userscore']:\n value = float(string_dic[key][:-1])/100\n rd[key] = value\n rd['Price'] = float(string_dic['Price'][1:]) # remove $ char\n rd['Peak concurrent players yesterday'] = int(rd['Peak concurrent players yesterday'])\n \n if time_process:\n rd['Release date'] = datetime.datetime.strptime(string_dic['Release date'],'%b %d, %Y')\n \n average, median = re.match(r'(.+)\\(average\\)(.+)\\(median\\)',string_dic['Playtime in the last 2 weeks']).groups()\n average_m,average_s = average.strip().split(':')\n median_m,median_s = median.strip().split(':')\n rd['Playtime in the last 2 weeks'] = datetime.timedelta(seconds = int(average_m) * 60 + int(average_s))\n rd['Playtime in the last 2 weeks_median'] = datetime.timedelta(seconds = int(median_m) * 60 + int(median_s))\n \n return rd\n","repo_name":"yiyuezhuo/steam-research","sub_path":"steamspy.py","file_name":"steamspy.py","file_ext":"py","file_size_in_byte":6547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"10183513463","text":"import sys\n\ncurrent_working_directory = r\"C:\\Users\\Hoo\\Documents\\workspace\\python\\tencent\"\nsys.path.append(current_working_directory)\n\nimport numpy as np\nimport pandas as pd\n\nfrom matplotlib import font_manager as fm\nfrom matplotlib import cm\nfrom matplotlib import pyplot as plt\nfrom txvideo.txvideo.analysis.db_getDate import Data\n\nif __name__ == '__main__':\n data = Data()\n plt.rcParams['font.sans-serif'] = ['SimHei']\n shapes = list(sys.argv[1:])\n values = data.categoryDate(shapes)\n s = pd.Series(values, index=shapes)\n labels = s.index\n sizes = s.values\n # explode = (0.1, 0, 0) # \"explode\" , show the selected slice\n explode = tuple(float(item / 1000) for item in values)\n fig, axes = plt.subplots(figsize=(8, 5), ncols=2) # 设置绘图区域大小\n ax1, ax2 = axes.ravel()\n\n colors = cm.rainbow(np.arange(len(sizes)) / len(sizes)) # colormaps: Paired, autumn, rainbow, gray,spring,Darks\n patches, texts, autotexts = ax1.pie(sizes, labels=labels, autopct='%1.0f%%', explode=explode,\n shadow=False, startangle=170, colors=colors, labeldistance=1.2,\n pctdistance=1.1, radius=0.4)\n # labeldistance: 控制labels显示的位置\n # pctdistance: 控制百分比显示的位置\n # radius: 控制切片突出的距离\n\n ax1.axis('equal')\n\n # 重新设置字体大小\n proptease = fm.FontProperties()\n proptease.set_size('medium')\n # font size include: ‘xx-small’,x-small’,'small’,'medium’,‘large’,‘x-large’,‘xx-large’ or number, e.g. '12'\n plt.setp(autotexts, fontproperties=proptease)\n plt.setp(texts, fontproperties=proptease)\n\n ax1.set_title('腾讯视频电影分类统计', loc='center')\n\n # ax2 只显示图例(legend)\n ax2.axis('off')\n ax2.legend(patches, labels, loc='center left')\n\n plt.tight_layout()\n # plt.savefig(\"pie_shape_ufo.png\", bbox_inches='tight')\n plt.savefig('Demo_project_final.png')\n plt.show()\n","repo_name":"JayHooooooo/Python","sub_path":"txvideo/txvideo/analysis/platPic.py","file_name":"platPic.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"12087323301","text":"from random import sample\n\nclass Card():\n\t\"\"\"Class for single card, storing question, answer, \n\tand boolean flag status.\"\"\"\n\n\tdef __init__(self, question='', answer=''):\n\t\t\"\"\"Instantiate Card class.\"\"\"\n\t\tself.question = question\n\t\tself.answer = answer\n\t\tself.flag = False\n\t\tself.remove = False\n\n\tdef add_question(self, q): \t# Add question and answer, stored as string\n\t\tself.question = str(q)\n\n\tdef add_answer(self, a):\t###\n\t\tself.answer = str(a)\n\n\tdef get_question(self): \t# Returns question or answer string\n\t\treturn self.question\n\n\tdef get_answer(self):\t\t###\n\t\treturn self.answer\n\n\tdef ch_flag(self): \t\t\t# Change flag status\n\t\tself.flag *= -1\n\n\tdef get_flag(self): \t\t# Returns flag status (default is False)\n\t\treturn self.flag\n\n\tdef remove_card():\t\t\t# Sets card for removal from flagging\n\t\tself.remove = True\t\n\n\nclass Deck(Card):\n\t\"\"\"Class for deck of Card objects, including 'run_menu' function, \n\twhich drills cards on either side, allows user to flag for further review.\"\"\"\n\n\tdef __init__(self):\n\t\tself.cards = []\t\t\t\t\t# Empty list\n\t\tself.hr = '\\n'+'-'*25\t\t\t# Horizontal rule -\n\t\tself.m_hr = '\\n'+'*'*25\t\t\t# Horizontal rule *\n\t\tself.gate = False\t\t\t\t# To alternate handling of flags across runs\n\t\tself.yn = ''\t\t\t\t\t# Flag request input value\n\t\tself.new_flag_round = False\t\t# To indicate new flag round\n\t\tself.menu_response = ''\t\t\t# Input receiver\n\n\tdef add_card(self, c):\n\t\tself.cards.append(c)\t\t\t# Adds card to deck list\n\n\tdef shuffle(self):\n\t\tself.cards = sample(self.cards, len(self.cards))\t# Shuffles deck\n\n\tdef reset_flags(self):\t\t\t# Resets flagged cards in deck to False\n\t\tfor card in self.cards:\n\t\t\tif card.get_flag(): card.ch_flag()\n\n\tdef reset_remove(self):\t\t\t# Reset remove status to False\n\t\tfor card in self.cards:\n\t\t\tif card.remove: card.remove = False\n\n\tdef drill(self, rev=False):\t\t# Runs a drill of all cards and allows flagging\n\t\tself.reset_flags()\t\t\t# Resets flags for new run\n\t\tself.reset_remove()\t\t\t# Resets remove status to False for new run\n\t\tself.shuffle()\t\t\t\t# Shuffles deck\n\t\tfor card in self.cards:\n\t\t\tprint(self.hr)\n\t\t\tif rev: print(card.get_answer())\t# If running answers\n\t\t\telse: print(card.get_question())\t# Otherwise just do questions\n\t\t\tinput('\\n--(press return to flip)--\\n')\n\t\t\tif rev: print(card.get_question())\t# If running answers\n\t\t\telse: print(card.get_answer())\t\t# Otherwise just show the answer\n\t\t\tprint(self.hr)\n\t\t\tself.yn = input('\\nFlag card for review? (y/n)\\n')\n\t\t\tif self.yn == 'y' or self.yn == 'Y':\n\t\t\t\tcard.ch_flag()\t\t\t\t\t# Flag card for review if 'Y/y'\n\t\tprint(self.hr)\n\t\tprint(\"End of deck\\n\")\n\t\tprint(self.hr)\n\n\tdef drill_flags(self, rev=False):\n\t\tself.new_flag_round = False\t\t\t\t# Reset new round indicator\n\t\tself.shuffle()\t\t\t\t\t\t\t# Shuffle for new flag round\n\n\t\tif self.gate == False:\t\n\t\t\tfor card in self.cards:\t\t\t\t# If no gate: Remove unflagged cards\n\t\t\t\tif card.get_flag() == False: card.remove_card()\n\t\t\t\t\t\t\t\t\t\t\t\t# Generates list alternating\n\t\t\tself.flagged_cards = [card for card in self.cards \t# flagged and\n\t\t\t\tif card.get_flag()]\t\t\t\t\t\t\t\t# unflagged\n\t\telse:\t\t\t\t\t\t\t\t\t# If gate: Remove flagged cards\n\t\t\tfor card in self.cards:\n\t\t\t\tif card.get_flag(): card.remove_card()\n\t\t\tself.flagged_cards = [card for card in self.cards \n\t\t\t\tif not card.get_flag()]\n\t\tfor card in self.flagged_cards:\t\t\t# For flagged cards to run\n\t\t\tprint(self.hr)\n\t\t\tif rev: print(card.get_answer())\n\t\t\telse: print(card.get_question())\n\t\t\tinput('\\n---(press return to flip)---\\n')\n\t\t\tif rev: print(card.get_question())\n\t\t\telse: print(card.get_answer())\n\t\t\tprint(self.hr)\n\t\t\tself.yn = input('\\nFlag card for review (y/n)\\n')\n\t\t\tif self.yn == 'y' or self.yn == 'Y':\n\t\t\t\tnew_flag_round = True\t\t\t# Sets to remove unflagged cards\n\t\t\t\tcard.ch_flag()\t\t\t\t\t# Change flag to differentiate\n\t\tprint(self.hr)\n\t\tprint(\"End of deck\\n\")\n\t\tprint(self.hr)\t\t\t\t\n\t\tif new_flag_round: self.gate *= -1\t\t# Change gate to look at other flag\n\n\n\tdef run_menu(self):\n\t\twhile True:\t\t\t\t\t\t\t\t# Run until 'break'\n\t\t\tprint(self.m_hr)\n\t\t\tprint(\"FLASHCARDS MENU\")\n\t\t\tprint(\"1: Run Questions\")\n\t\t\tprint(\"2: Run Answers\")\n\t\t\tprint(\"Q: Quit\")\n\t\t\tprint(self.m_hr)\n\t\t\tself.menu_response = input()\n\t\t\ttry:\n\t\t\t\tif int(self.menu_response) == 1: \n\t\t\t\t\tself.drill()\n\t\t\t\t\tself.run_flags = input(\"Review flagged cards? (y/n)\\n\")\n\t\t\t\t\twhile self.run_flags == 'y' or self.run_flags == 'Y':\n\t\t\t\t\t\tself.drill_flags()\n\t\t\t\telif int(self.menu_response) == 2: \n\t\t\t\t\tself.drill(rev=True)\n\t\t\t\t\tself.run_flags = input(\"Review flagged cards? (y/n)\\n\")\n\t\t\t\t\twhile self.run_flags == 'y' or self.run_flags == 'Y':\n\t\t\t\t\t\tself.drill_flags(rev=True)\n\t\t\texcept:\n\t\t\t\tif self.menu_response == 'q' or self.menu_response == 'Q': break\n\npy_study = Deck()\nnew_cards = []\n\n# Definitions for card objects\nnew_cards.append(Card('List Comprehension', \n\t'[i for i in range(min, max)]'))\nnew_cards.append(Card('Bernoulli Distribution', \n\t'Probability distribution with binary outcomes over a single trial'))\n\"\"\"\nnew_cards.append(Card('Binomial Distribution', \n\t'Probability distribution with binary outcomes over multiple trials'))\nnew_cards.append(Card('Poisson Distribution', \n\t'Probability distribution with binary outcomes given lambda over time'))\nnew_cards.append(Card('Discrete Probability Distribution', \n\t'Probability distribution with finite (countable) possible outcomes'))\nnew_cards.append(Card('Continuous Probability Distribution', \n\t'Probability distribution with infinite (float) possible outcomes'))\nnew_cards.append(Card('Probability Mass Function', \n\t'Probability that discrete outcome will occur over X number of trials'))\nnew_cards.append(Card('Cumulative Density Function',\n\t'Probability that discrete/continuous outcome X or less will occur'))\n\"\"\"\n\n# Adds all cards to master deck\nfor card in new_cards:\n\tpy_study.add_card(card)\n\npy_study.run_menu()\t\t\t\t\t\t\t# Runs program\n\n","repo_name":"twludlow/flashcards","sub_path":"flashcards.py","file_name":"flashcards.py","file_ext":"py","file_size_in_byte":5744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"15210472479","text":"\"\"\"\n\n# TODO\n\n[32] Longest Valid Parentheses\n\n\nGiven a string containing just the characters '(' and ')', find the length of the longest valid (well-formed) parentheses substring.\n\n\n--------------------------------------------------\n\nExample 1:\n\n\nInput: s = \"(()\"\nOutput: 2\nExplanation: The longest valid parentheses substring is \"()\".\n\n\n--------------------------------------------------\n\nExample 2:\n\n\nInput: s = \")()())\"\nOutput: 4\nExplanation: The longest valid parentheses substring is \"()()\".\n\n\n--------------------------------------------------\n\nExample 3:\n\n\nInput: s = \"\"\nOutput: 0\n\n\n\nConstraints:\n\n\n\t0 <= s.length <= 3 * 10⁴\n\ts[i] is '(', or ')'.\n\n################################################################\n\n\n32. 最长有效括号\n给你一个只包含 '(' 和 ')' 的字符串,找出最长有效(格式正确且连续)括号子串的长度。\n\n\n\n示例 1:\n\n输入:s = \"(()\"\n输出:2\n解释:最长有效括号子串是 \"()\"\n\n\n示例 2:\n\n输入:s = \")()())\"\n输出:4\n解释:最长有效括号子串是 \"()()\"\n\n\n示例 3:\n\n输入:s = \"\"\n输出:0\n\n\n提示:\n\n0 <= s.length <= 3 * 104\ns[i] 为 '(' 或 ')'\n\n\n\"\"\"\n\nimport sys\nimport inspect\nimport os\nimport unittest\nfrom os.path import abspath, join, dirname\nfrom typing import *\n\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparentdir = os.path.dirname(currentdir)\nparentdir = os.path.dirname(parentdir) # algo\nparentdir = os.path.dirname(parentdir) # leetcode\nparentdir = os.path.dirname(parentdir) # algo\nsys.path.insert(0, parentdir)\n# print(sys.path)\n\n\nfrom algo.tree.builder import *\n\n\nclass Solution:\n \"\"\"\n 对于遇到的每个 '(' ,我们将它的下标放入栈中\n 对于遇到的每个 ')' ,我们先弹出栈顶元素表示匹配了当前右括号:\n 如果栈为空,说明当前的右括号为没有被匹配的右括号,我们将其下标放入栈中来\n 更新我们之前提到的「最后一个没有被匹配的右括号的下标」\n 如果栈不为空,当前右括号的下标减去栈顶元素即为\n 「以该右括号为结尾的最长有效括号的长度」\n\n 作者:LeetCode-Solution\n 链接:https://leetcode.cn/problems/longest-valid-parentheses/solution/zui-chang-you-xiao-gua-hao-by-leetcode-solution/\n 来源:力扣(LeetCode)\n 著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。\n\n \"\"\"\n\n def s1(self, s):\n stack = [-1]\n ans = 0\n for i in range(len(s)):\n if s[i] == \"(\":\n stack.append(i)\n elif s[i] == \")\":\n stack.pop()\n if stack:\n ans = max(ans, i - stack[-1])\n else:\n stack.append(i)\n\n return ans\n\n def s2(self, s):\n if not s:\n return 0\n\n N = len(s)\n # dp[i] 表示以 s[i] 结尾的,最长有效括号长度\n dp = [0] * N\n stack = []\n for i in range(N):\n if s[i] == \"(\":\n stack.append(s[i])\n else: # ')'\n if stack:\n stack.pop()\n pair_count = 2 + dp[i - 1]\n # 查看当前有效括号长度之前的 dp 数组结果\n pre_index = i - pair_count\n if pre_index > 0:\n pair_count += dp[pre_index]\n dp[i] = pair_count\n\n return max(dp)\n\n def longestValidParentheses(self, s: str) -> int:\n # return self.s1(s)\n\n return self.s2(s)\n\n\nclass TestSolution(unittest.TestCase):\n def setUp(self):\n self.sl = Solution()\n\n def test_sl(self):\n s = \"()(())\"\n self.assertEqual(\n self.sl.longestValidParentheses(s),\n 6,\n )\n print(\"################\")\n\n def test_sl2(self):\n s = \"()(()\"\n self.assertEqual(\n self.sl.longestValidParentheses(s),\n 2,\n )\n print(\"################\")\n\n def test_sl3(self):\n s = \")()())\"\n self.assertEqual(\n self.sl.longestValidParentheses(s),\n 4,\n )\n print(\"################\")\n\n def test_sl4(self):\n s = \"(()\"\n self.assertEqual(\n self.sl.longestValidParentheses(s),\n 2,\n )\n print(\"################\")\n\n def test_sl5(self):\n s = \")()())\"\n self.assertEqual(\n self.sl.longestValidParentheses(s),\n 4,\n )\n print(\"################\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"shiyang07ca/lab","sub_path":"algo/oj/leetcode/algo/00032/sl.py","file_name":"sl.py","file_ext":"py","file_size_in_byte":4647,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"}
+{"seq_id":"72174793851","text":"import json\nimport requests\nfrom helper import *\nimport system_status\nFEEDER_PROCESS_URL = \"http://172.16.13.61:6100/feeder_processes\"\n\n\ndef watch():\n statuses = []\n for url in SYSTEMS_TO_WATCH:\n status = system_status.SystemStatus(url, getCurrentStatus(url))\n statuses.append(status)\n\n return statuses\n\n\ndef getCurrentStatus(url):\n response = requests.head(url)\n return response.status_code\n\n\nSYSTEMS_TO_WATCH = [\n 'http://fund-clients-service-qa.aws.guideinvestimentos.com.br:7005/service_status',\n 'http://fund-data-service-qa.aws.guideinvestimentos.com.br:7000/service_status',\n 'http://localhost:7014/service_status'\n]\n","repo_name":"rlnascimento05/slackbot-guide","sub_path":"app/watcher.py","file_name":"watcher.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"35687914561","text":"import openai\nfrom dotenv import load_dotenv\nimport os\n\nload_dotenv()\n\n#definicao da chave da API\nopenai.api_key = os.getenv(\"OPENAI_API_KEY\")\n\ndef ler_arquivo(arquivo):\n with open(arquivo, 'r') as file:\n return file.read()\n \ndef resumo(texto):\n response = openai.Completion.create(\n engine=\"text-davinci-003\",\n #campo onde é inserido a requisição para o chatGPT\n prompt=f\"Resuma o seguinte texto: {texto}\",\n #temperature é o atributo que indica o nivel de ousadia na resposta, quanto mais próximo de 1, mais ousado é, e quanto mais proximo de 0, mais conservador é.\n temperature=1,\n max_tokens=2048,\n top_p=1,\n stop=None\n )\n resposta = response['choices'][0]['text'].strip()\n resposta.encode(\"utf-8\").decode()\n return resposta\n\narquivo = 'artigo.txt'\ntexto = ler_arquivo(arquivo)\n# print(texto)\nprint(resumo(texto))","repo_name":"mateuskienzle/openai_project","sub_path":"resumo.py","file_name":"resumo.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"12851369444","text":"\"\"\"\nTest the ssh_known_hosts states\n\"\"\"\n\nimport os\nimport shutil\n\nimport pytest\n\nfrom tests.support.case import ModuleCase\nfrom tests.support.mixins import SaltReturnAssertsMixin\nfrom tests.support.runtests import RUNTIME_VARS\n\nGITHUB_FINGERPRINT = \"b8:d8:95:ce:d9:2c:0a:c0:e1:71:cd:2e:f5:ef:01:ba:34:17:55:4a:4a:64:80:d3:31:cc:c2:be:3d:ed:0f:6b\"\nGITHUB_IP = \"140.82.121.4\"\n\n\n@pytest.mark.skip_if_binaries_missing(\"ssh\", \"ssh-keygen\", check_all=True)\nclass SSHKnownHostsStateTest(ModuleCase, SaltReturnAssertsMixin):\n \"\"\"\n Validate the ssh state\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.known_hosts = os.path.join(RUNTIME_VARS.TMP, \"known_hosts\")\n\n def tearDown(self):\n if os.path.isfile(self.known_hosts):\n os.remove(self.known_hosts)\n super().tearDown()\n\n @pytest.mark.slow_test\n def test_present(self):\n \"\"\"\n ssh_known_hosts.present\n \"\"\"\n kwargs = {\n \"name\": \"github.com\",\n \"user\": \"root\",\n \"enc\": \"ssh-rsa\",\n \"fingerprint\": GITHUB_FINGERPRINT,\n \"config\": self.known_hosts,\n }\n # test first\n ret = self.run_state(\"ssh_known_hosts.present\", test=True, **kwargs)\n self.assertSaltNoneReturn(ret)\n\n # save once, new key appears\n ret = self.run_state(\"ssh_known_hosts.present\", **kwargs)\n try:\n self.assertSaltTrueReturn(ret)\n except AssertionError:\n self.assertInSaltComment(\"Unable to receive remote host key\", ret)\n self.skipTest(\"Unable to receive remote host key\")\n\n self.assertSaltStateChangesEqual(\n ret, GITHUB_FINGERPRINT, keys=(\"new\", 0, \"fingerprint\")\n )\n\n # save twice, no changes\n self.run_state(\"ssh_known_hosts.present\", **kwargs)\n\n # test again, nothing is about to be changed\n ret = self.run_state(\"ssh_known_hosts.present\", test=True, **kwargs)\n self.assertSaltTrueReturn(ret)\n\n # then add a record for IP address\n # pylint: disable=repeated-keyword\n ret = self.run_state(\"ssh_known_hosts.present\", **dict(kwargs, name=GITHUB_IP))\n # pylint: enable=repeated-keyword\n try:\n self.assertSaltStateChangesEqual(\n ret, GITHUB_FINGERPRINT, keys=(\"new\", 0, \"fingerprint\")\n )\n except AssertionError:\n self.assertInSaltComment(\"Unable to receive remote host key\", ret)\n self.skipTest(\"Unable to receive remote host key\")\n\n # record for every host must be available\n ret = self.run_function(\n \"ssh.get_known_host_entries\",\n [\"root\", \"github.com\"],\n config=self.known_hosts,\n )[0]\n try:\n self.assertNotIn(ret, (\"\", None))\n except AssertionError:\n raise AssertionError(\"Salt return '{}' is in ('', None).\".format(ret))\n ret = self.run_function(\n \"ssh.get_known_host_entries\", [\"root\", GITHUB_IP], config=self.known_hosts\n )[0]\n try:\n self.assertNotIn(ret, (\"\", None, {}))\n except AssertionError:\n raise AssertionError(\n \"Salt return '{}' is in ('', None,\".format(ret) + \" {})\"\n )\n\n @pytest.mark.slow_test\n def test_present_fail(self):\n # save something wrong\n ret = self.run_state(\n \"ssh_known_hosts.present\",\n name=\"github.com\",\n user=\"root\",\n fingerprint=\"aa:bb:cc:dd\",\n config=self.known_hosts,\n )\n self.assertSaltFalseReturn(ret)\n\n @pytest.mark.slow_test\n def test_absent(self):\n \"\"\"\n ssh_known_hosts.absent\n \"\"\"\n known_hosts = os.path.join(RUNTIME_VARS.FILES, \"ssh\", \"known_hosts\")\n shutil.copyfile(known_hosts, self.known_hosts)\n if not os.path.isfile(self.known_hosts):\n self.skipTest(\n \"Unable to copy {} to {}\".format(known_hosts, self.known_hosts)\n )\n\n kwargs = {\"name\": \"github.com\", \"user\": \"root\", \"config\": self.known_hosts}\n # test first\n ret = self.run_state(\"ssh_known_hosts.absent\", test=True, **kwargs)\n self.assertSaltNoneReturn(ret)\n\n # remove once, the key is gone\n ret = self.run_state(\"ssh_known_hosts.absent\", **kwargs)\n self.assertSaltStateChangesEqual(\n ret, GITHUB_FINGERPRINT, keys=(\"old\", 0, \"fingerprint\")\n )\n\n # remove twice, nothing has changed\n ret = self.run_state(\"ssh_known_hosts.absent\", **kwargs)\n self.assertSaltStateChangesEqual(ret, {})\n\n # test again\n ret = self.run_state(\"ssh_known_hosts.absent\", test=True, **kwargs)\n self.assertSaltTrueReturn(ret)\n","repo_name":"saltstack/salt","sub_path":"tests/integration/states/test_ssh_known_hosts.py","file_name":"test_ssh_known_hosts.py","file_ext":"py","file_size_in_byte":4753,"program_lang":"python","lang":"en","doc_type":"code","stars":13606,"dataset":"github-code","pt":"78"}
+{"seq_id":"36405316039","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport logging\nfrom abc import ABCMeta, abstractmethod\nfrom typing import Callable, Union\n\nimport colorlog\nfrom pylfi.distances import euclidean\nfrom pylfi.utils.checks import check_distance_str\n\n\ndef setup_logger(name):\n \"\"\"Return a logger with a default ColoredFormatter.\"\"\"\n formatter = colorlog.ColoredFormatter(\n \"%(log_color)s%(levelname)-8s%(reset)s %(blue)s%(message)s\",\n datefmt=None,\n reset=True,\n log_colors={\n \"DEBUG\": \"cyan\",\n \"INFO\": \"green\",\n \"WARNING\": \"yellow\",\n \"ERROR\": \"red\",\n \"CRITICAL\": \"red\",\n },\n )\n\n logger = logging.getLogger(name)\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(logging.INFO)\n\n return logger\n\n\n# class ABCBase(metaclass=ABCMeta):\nclass ABCBase:\n def __init__(self, observation, simulator, statistics_calculator, priors, distance_metric, seed):\n \"\"\"\n simulator : callable\n simulator model\n summary_calculator : callable, defualt None\n summary statistics calculator. If None, simulator should output\n sum stat\n distance : str\n Can be a custom function or one of l1, l2, mse\n distance_metric : callable\n discrepancy measure\n \"\"\"\n self._obs_data = observation\n self._stat_calc = statistics_calculator\n self._simulator = simulator\n self._priors = priors\n self._seed = seed\n\n if isinstance(self._obs_data, tuple):\n self._obs_sumstat = self._stat_calc(*self._obs_data)\n else:\n self._obs_sumstat = self._stat_calc(self._obs_data)\n\n # Select distance function.\n if callable(distance_metric):\n self._distance_metric = distance_metric\n elif isinstance(distance_metric, str):\n check_distance_str(distance_metric)\n self._distance_metric = self._choose_distance(distance_metric)\n else:\n raise TypeError()\n\n #self.logger = setup_logger(self.__class__.__name__)\n #self.logger = colorlog.getLogger(self.__class__.__name__)\n\n @abstractmethod\n def sample(self):\n \"\"\"To be overwritten by sub-class: should implement sampling from\n inference scheme and return journal.\n\n Returns\n -------\n pylfi.journal\n Journal\n \"\"\"\n\n raise NotImplementedError\n\n @staticmethod\n def _choose_distance(distance):\n \"\"\"Return distance function for given distance type.\"\"\"\n if distance == 'l1':\n return None\n elif distance == 'l2':\n return euclidean\n elif distance == 'mse':\n return None\n\n @staticmethod\n def run_lra():\n \"\"\"Linear regression adjustment as in Beaumont et al. 2002.\n \"\"\"\n pass\n","repo_name":"nicolossus/pylfi","sub_path":"_dev/inferences/abc_base2.py","file_name":"abc_base2.py","file_ext":"py","file_size_in_byte":2940,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"}
+{"seq_id":"27464823968","text":"class Solution(object):\n def findKthLargest(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: int\n \"\"\"\n\n \"\"\"\n # Method 1: O(k+(n-k)lgk) time, min-heap\n heap = []\n for num in nums:\n heapq.heappush(heap, num)\n for _ in xrange(len(nums)-k):\n heapq.heappop(heap)\n return heapq.heappop(heap)\n \"\"\"\n\n # Method 2 O(nlogk)\n h = []\n for n in nums:\n if len(h) < k:\n heapq.heappush(h, n)\n else:\n heapq.heappushpop(h, n)\n return h[0]","repo_name":"DanishKhan14/DumbCoder","sub_path":"Python/heap/kthLargestNumber.py","file_name":"kthLargestNumber.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"30409807","text":"import argparse\n\nimport matplotlib.pyplot as plt\n\nfrom . import huber, hypres, ibm\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--draw\",\n action=\"store_true\",\n help=\"Use Device.draw() instead of Device.plot()\",\n )\n parser.add_argument(\n \"--same-scale\",\n action=\"store_true\",\n help=\"Whether to plot all devices on the same scale.\",\n )\n parser.add_argument(\n \"--no-terminals\", action=\"store_true\", help=\"Set with_terminals=False\"\n )\n args = parser.parse_args()\n\n squid_funcs = [\n hypres.small.make_squid,\n ibm.small.make_squid,\n ibm.medium.make_squid,\n ibm.large.make_squid,\n ibm.xlarge.make_squid,\n huber.make_squid,\n ]\n\n plt.rcParams[\"savefig.dpi\"] = 200\n\n fig, axes = plt.subplots(\n 1,\n len(squid_funcs),\n figsize=(len(squid_funcs) * 3, 3),\n sharex=args.same_scale,\n sharey=args.same_scale,\n constrained_layout=True,\n )\n\n for ax, make_squid in zip(axes, squid_funcs):\n squid = make_squid(with_terminals=(not args.no_terminals))\n if args.draw:\n squid.draw(ax=ax, legend=False)\n else:\n squid.plot_polygons(ax=ax, legend=False)\n ax.set_title(make_squid.__module__)\n plt.show()\n","repo_name":"loganbvh/superscreen","sub_path":"docs/notebooks/squids/show_all.py","file_name":"show_all.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"78"}
+{"seq_id":"3787683510","text":"# print(6,[1,2,3,4,5,4,7])\n\ndef solve(n, arr):\n l2=list()\n for i in range(len(arr)):\n if arr[i] not in l2:\n l2.append(arr[i])\n\n if len(l2) == len(arr):\n print(\"0\")\n else:\n print(\"1\")\n\n \n # print(\"1\") if len(l2) != n else print(\"0\")\n \n\n\n\nsolve(6,[1,2,3,4,5,4,7])","repo_name":"MrTypeError/DSA","sub_path":"Time Complexity/Finding_Duplicate.py","file_name":"Finding_Duplicate.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"}
+{"seq_id":"39784617781","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# Trying to gin up a human-readable, simple-minded (bilinear interpolation) algorithm for de-mosaicing a\n# sensor readout that has an RGGB color filter array (CFA).\n\n# Red filters lie over cells whose x coordinate is even and whose y coordinate is even: even, even\n# Blue filters: odd, odd\n# Green filters: even, odd *and* odd, even.\n\n\n# In[2]:\n\n\nimport numpy as np\nfrom PIL import Image\n\n\n# In[13]:\n\n\n# Image dimensions\nwidth = 255\nheight = 255\n\n# Dummy image data is grayscale - single component, 0..255.\n# Build it up as a gradient.\n# Give it a demosaiced red tinge by boosting pixels that should be\n# under a red filter in the Bayer image pattern.\ndummy_image_data = []\nfor y in range(height):\n row = []\n for x in range(width):\n red_boost = 100 if (x % 2, y % 2) == (0, 0) else 0\n row.append(min(255, x + red_boost))\n dummy_image_data.append(row)\n\n\ngray_image_data = np.array(dummy_image_data, dtype=np.uint8)\nprint(\"Dummy image data:\", gray_image_data)\n# PIL seems to be ignoring my mode, dangit.\ngray_img = Image.fromarray(gray_image_data, mode=\"L\")\ngray_img.show()\n\nprint(\"Converted back to numpy array:\")\nprint(np.asarray(gray_img))\n\n\n# In[14]:\n\n\n# Offset of each color component within a pixel:\nR = 0\nG = 1\nB = 2\n\n# filter pattern, addressable as [y][x]\npattern = [\n [R, G],\n [G, B]\n]\n\n# Demosaiced image data is RGB - three components.\ndemosaiced = []\nfor y in range(height):\n row = [[0, 0, 0] for x in range(width)]\n demosaiced.append(row)\n\n\ndef indices(v, limit):\n result = []\n for offset in [-1, 0, 1]:\n index = v + offset\n if 0 <= index < limit:\n result.append(index)\n return result\n\n\ndef channel(x, y):\n x_pattern = x % 2\n y_pattern = y % 2\n return pattern[y_pattern][x_pattern]\n\n\ndef demosaic(sensor_image, demosaiced, width, height):\n for x_image in range(width):\n x_indices = indices(x_image, width)\n for y_image in range(height):\n y_indices = indices(y_image, height)\n\n sums = {R: 0, G: 0, B: 0}\n counts = {R: 0, G: 0, B: 0}\n\n for x in x_indices:\n for y in y_indices:\n c = channel(x, y)\n sums[c] += sensor_image[y][x]\n counts[c] += 1\n for c in [R, G, B]:\n intensity = sums[c] / counts[c] if counts[c] > 0 else 0\n # May as well convert to 8-bit integer.\n pixel_value = min(255, max(0, int(intensity)))\n demosaiced[y_image][x_image][c] = pixel_value\n\n\ndemosaic(dummy_image_data, demosaiced, width, height)\n\n\n# In[15]:\n\n\ncolor_img = Image.fromarray(np.array(demosaiced, dtype=np.uint8), mode=\"RGB\")\ncolor_img.show()\n","repo_name":"mchapman87501/go_mars_2020_img_utils","sub_path":"notebooks/De-mosaicing.py","file_name":"De-mosaicing.py","file_ext":"py","file_size_in_byte":2765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"33078396737","text":"class Solution:\n def checkInclusion(self, s1: str, s2: str) -> bool:\n\n s1 = [ord(i) - ord('a') for i in s1]\n s2 = [ord(j) - ord('a') for j in s2]\n\n end=len(s1)\n start = 0\n while end <= len(s2):\n if sorted(s1) == sorted(s2[start:end]):\n return True\n else:\n start += 1\n end += 1\n return False\n","repo_name":"himanshu1214/Tech-interview-LeetCode-Blogs","sub_path":"Sorting/permutation_in_string.py","file_name":"permutation_in_string.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"40104676802","text":"from functools import partial\nfrom itertools import repeat\nfrom torch._six import container_abcs\n\nimport logging\nimport os\n\nimport numpy as np\nimport scipy\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom timm.models.layers import DropPath, trunc_normal_\n\n\n# From PyTorch internals\ndef _ntuple(n):\n def parse(x):\n if isinstance(x, container_abcs.Iterable):\n return x\n return tuple(repeat(x, n))\n return parse\n\n\nto_1tuple = _ntuple(1)\nto_2tuple = _ntuple(2)\nto_3tuple = _ntuple(3)\nto_4tuple = _ntuple(4)\nto_ntuple = _ntuple\n\n\nclass Mlp(nn.Module):\n def __init__(self,\n in_features,\n hidden_features=None,\n out_features=None,\n act_layer=nn.GELU,\n drop=0.):\n super().__init__()\n out_features = out_features or in_features\n hidden_features = hidden_features or in_features\n self.fc1 = nn.Linear(in_features, hidden_features)\n self.act = act_layer()\n self.fc2 = nn.Linear(hidden_features, out_features)\n self.drop = nn.Dropout(drop)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.act(x)\n x = self.drop(x)\n x = self.fc2(x)\n x = self.drop(x)\n return x\n\n\n\nclass FastFoodWrap(nn.Module):\n def __init__(self, module, intrinsic_dimension, device=0):\n \"\"\"\n Wrapper to estimate the intrinsic dimensionality of the\n objective landscape for a specific task given a specific model using FastFood transform\n :param module: pytorch nn.Module\n :param intrinsic_dimension: dimensionality within which we search for solution\n :param device: cuda device id\n \"\"\"\n super(FastFoodWrap, self).__init__()\n\n # Hide this from inspection by get_parameters()\n self.m = [module]\n\n self.name_base_localname = []\n\n # Stores the initial value: \\theta_{0}^{D}\n self.initial_value = dict()\n\n # Fastfood parameters\n self.fastfood_params = {}\n\n # Parameter vector that is updated\n # Initialised with zeros as per text: \\theta^{d}\n V = nn.Parameter(torch.zeros((intrinsic_dimension)).to(device))\n self.register_parameter(\"V\", V)\n v_size = (intrinsic_dimension,)\n\n # Iterate over layers in the module\n for name, param in module.named_parameters():\n # If param requires grad update\n if param.requires_grad:\n\n # Saves the initial values of the initialised parameters from param.data and sets them to no grad.\n # (initial values are the 'origin' of the search)\n self.initial_value[name] = v0 = (\n param.clone().detach().requires_grad_(False).to(device)\n )\n\n # Generate fastfood parameters\n DD = np.prod(v0.size())\n self.fastfood_params[name] = fastfood_vars(DD, device)\n\n base, localname = module, name\n while \".\" in localname:\n prefix, localname = localname.split(\".\", 1)\n base = base.__getattr__(prefix)\n self.name_base_localname.append((name, base, localname))\n\n for name, base, localname in self.name_base_localname:\n delattr(base, localname)\n\n def forward(self, x):\n # Iterate over layers\n for name, base, localname in self.name_base_localname:\n\n init_shape = self.initial_value[name].size()\n DD = np.prod(init_shape)\n\n # Fastfood transform te replace dence P\n ray = fastfood_torched(self.V, DD, self.fastfood_params[name]).view(\n init_shape\n )\n\n param = self.initial_value[name] + ray\n\n setattr(base, localname, param)\n\n # Pass through the model, by getting hte module from a list self.m\n module = self.m[0]\n x = module(x)\n return x\n\n\ndef fast_walsh_hadamard_torched(x, axis=0, normalize=False):\n \"\"\"\n Performs fast Walsh Hadamard transform\n :param x:\n :param axis:\n :param normalize:\n :return:\n \"\"\"\n orig_shape = x.size()\n assert axis >= 0 and axis < len(orig_shape), (\n \"For a vector of shape %s, axis must be in [0, %d] but it is %d\"\n % (orig_shape, len(orig_shape) - 1, axis)\n )\n h_dim = orig_shape[axis]\n h_dim_exp = int(round(np.log(h_dim) / np.log(2)))\n assert h_dim == 2 ** h_dim_exp, (\n \"hadamard can only be computed over axis with size that is a power of two, but\"\n \" chosen axis %d has size %d\" % (axis, h_dim)\n )\n\n working_shape_pre = [int(np.prod(orig_shape[:axis]))] # prod of empty array is 1 :)\n working_shape_post = [\n int(np.prod(orig_shape[axis + 1 :]))\n ] # prod of empty array is 1 :)\n working_shape_mid = [2] * h_dim_exp\n working_shape = working_shape_pre + working_shape_mid + working_shape_post\n\n ret = x.view(working_shape)\n\n for ii in range(h_dim_exp):\n dim = ii + 1\n arrs = torch.chunk(ret, 2, dim=dim)\n assert len(arrs) == 2\n ret = torch.cat((arrs[0] + arrs[1], arrs[0] - arrs[1]), axis=dim)\n\n if normalize:\n ret = ret / torch.sqrt(float(h_dim))\n\n ret = ret.view(orig_shape)\n\n return ret\n\n\ndef fastfood_vars(DD, device=0):\n \"\"\"\n Returns parameters for fast food transform\n :param DD: desired dimension\n :return:\n \"\"\"\n ll = int(np.ceil(np.log(DD) / np.log(2)))\n LL = 2 ** ll\n\n # Binary scaling matrix where $B_{i,i} \\in \\{\\pm 1 \\}$ drawn iid\n BB = torch.FloatTensor(LL).uniform_(0, 2).type(torch.LongTensor)\n BB = (BB * 2 - 1).type(torch.FloatTensor).to(device)\n BB.requires_grad = False\n\n # Random permutation matrix\n Pi = torch.LongTensor(np.random.permutation(LL)).to(device)\n Pi.requires_grad = False\n\n # Gaussian scaling matrix, whose elements $G_{i,i} \\sim \\mathcal{N}(0, 1)$\n GG = torch.FloatTensor(LL,).normal_().to(device)\n GG.requires_grad = False\n\n divisor = torch.sqrt(LL * torch.sum(torch.pow(GG, 2)))\n\n return [BB, Pi, GG, divisor, LL]\n\n\ndef fastfood_torched(x, DD, param_list=None, device=0):\n \"\"\"\n Fastfood transform\n :param x: array of dd dimension\n :param DD: desired dimension\n :return:\n \"\"\"\n dd = x.size(0)\n\n if not param_list:\n\n BB, Pi, GG, divisor, LL = fastfood_vars(DD, device=device)\n\n else:\n\n BB, Pi, GG, divisor, LL = param_list\n\n # Padd x if needed\n dd_pad = F.pad(x, pad=(0, LL - dd), value=0, mode=\"constant\")\n\n # From left to right HGPiH(BX), where H is Walsh-Hadamard matrix\n mul_1 = torch.mul(BB, dd_pad)\n # HGPi(HBX)\n mul_2 = fast_walsh_hadamard_torched(mul_1, 0, normalize=False)\n\n # HG(PiHBX)\n mul_3 = mul_2[Pi]\n\n # H(GPiHBX)\n mul_4 = torch.mul(mul_3, GG)\n\n # (HGPiHBX)\n mul_5 = fast_walsh_hadamard_torched(mul_4, 0, normalize=False)\n\n ret = torch.div(mul_5[:DD], divisor * np.sqrt(float(DD) / LL))\n\n return ret\n\n\n\nclass Activation_Function_Class(nn.Module):\n \"\"\"\n Implementation of various activation function.\n \"\"\"\n\n def __init__(self, hidden_act):\n\n if hidden_act.lower() == \"relu\":\n self.f = nn.functional.relu\n elif hidden_act.lower() == \"tanh\":\n self.f = torch.tanh\n elif hidden_act.lower() == \"swish\":\n\n def swish(x):\n return x * torch.sigmoid(x)\n\n self.f = swish\n elif hidden_act.lower() == \"gelu\":\n\n def gelu_new(x):\n \"\"\"\n Implementation of the gelu activation function currently in Google Bert repo (identical to OpenAI GPT).\n Also see https://arxiv.org/abs/1606.08415\n \"\"\"\n return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n\n self.f = gelu_new\n elif hidden_act.lower() == \"leakyrelu\":\n self.f = nn.functional.leaky_relu\n\n super().__init__()\n\n def forward(self, x):\n return self.f(x)\n\n\n\n# Single Adapter\nclass Adapter(nn.Module):\n \"\"\"\n Implementation of a single Adapter block.\n \"\"\"\n\n def __init__(\n self,\n input_size,\n down_sample=None,\n non_linearity=\"relu\",\n init_bert_weights=True,\n add_layer_norm_before=True,\n add_layer_norm_after=False,\n residual_before_ln=True,\n ):\n super().__init__()\n\n self.input_size = input_size\n self.add_layer_norm_before = add_layer_norm_before\n self.add_layer_norm_after = add_layer_norm_after\n self.residual_before_ln = residual_before_ln\n\n # list for all modules of the adapter, passed into nn.Sequential()\n seq_list = []\n\n # If we want to have a layer norm on input, we add it to seq_list\n if self.add_layer_norm_before:\n self.adapter_norm_before = nn.LayerNorm(self.input_size)\n seq_list.append(self.adapter_norm_before)\n\n # if a downsample size is not passed, we just half the size of the original input\n self.down_sample = down_sample\n if down_sample is None:\n self.down_sample = self.input_size // 2\n\n # Linear down projection of the input\n seq_list.append(nn.Linear(self.input_size, self.down_sample))\n\n # select non-linearity\n self.non_linearity = Activation_Function_Class(non_linearity.lower())\n\n seq_list.append(self.non_linearity)\n\n # sequential adapter, first downproject, then non-linearity then upsample. In the forward pass we include the\n # residual connection\n self.adapter_down = nn.Sequential(*seq_list)\n\n # Up projection to input size\n self.adapter_up = nn.Linear(self.down_sample, self.input_size)\n\n # If we want to have a layer norm on output, we apply it later after a separate residual connection\n # This means that we learn a new output layer norm, which replaces another layer norm learned in the bert layer\n if self.add_layer_norm_after:\n self.adapter_norm_after = nn.LayerNorm(self.input_size)\n\n # if we want to initialize with the bert strategy then this function is called for all the linear layers\n if init_bert_weights:\n self.adapter_down.apply(self.init_bert_weights)\n self.adapter_up.apply(self.init_bert_weights)\n\n def forward(self, x): # , residual_input=None):\n residual_input = x\n down = self.adapter_down(x)\n up = self.adapter_up(down)\n\n output = up\n\n # apply residual connection before layer norm if configured in this way\n if self.residual_before_ln:\n output = output + residual_input\n\n # apply layer norm if available\n if self.add_layer_norm_after:\n output = self.adapter_norm_after(output)\n\n # if residual should be applied after layer norm, apply it here\n if not self.residual_before_ln:\n output = output + residual_input\n\n return output, down, up\n\n # This is copied from the BertPreTrainedModel class to make this a self containing class.\n @staticmethod\n def init_bert_weights(module):\n \"\"\"Initialize the weights.\"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # std defaults to 0.02, this might need to be changed\n module.weight.data.normal_(mean=0.0, std=0.02)\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n\n \nclass Attention(nn.Module):\n def __init__(self,\n dim,\n num_heads=8,\n qkv_bias=False,\n qk_scale=None,\n attn_drop=0.,\n proj_drop=0.,\n res_score=False):\n super().__init__()\n\n self.num_heads = num_heads\n head_dim = dim // num_heads\n # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights\n self.scale = qk_scale or head_dim ** -0.5\n\n self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n self.attn_drop = nn.Dropout(attn_drop)\n self.proj = nn.Linear(dim, dim)\n self.proj_drop = nn.Dropout(proj_drop)\n self.res_score = res_score\n\n def forward(self, x, prev=None):\n B, N, C = x.shape\n qkv = self.qkv(x) \\\n .reshape(B, N, 3, self.num_heads, C // self.num_heads) \\\n .permute(2, 0, 3, 1, 4)\n q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)\n\n attn_score = (q @ k.transpose(-2, -1)) * self.scale\n\n if prev is not None and self.res_score:\n attn_score = attn_score + prev\n\n if self.res_score:\n prev = attn_score\n\n attn = F.softmax(attn_score, dim=-1)\n\n attn = self.attn_drop(attn)\n\n x = (attn @ v).transpose(1, 2).reshape(B, N, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n return x\n\n\nclass Block(nn.Module):\n\n def __init__(self,\n dim,\n num_heads,\n mlp_ratio=4.,\n qkv_bias=False,\n qk_scale=None,\n drop=0.,\n attn_drop=0.,\n drop_path=0.,\n act_layer=nn.GELU,\n norm_layer=nn.LayerNorm,\n pre_norm=True,\n res_score=False,\n dintrinsic = 100):\n super().__init__()\n self.norm1 = norm_layer(dim)\n self.attn = Attention(\n dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,\n attn_drop=attn_drop, proj_drop=drop, res_score=res_score\n )\n # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here\n self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n self.norm2 = norm_layer(dim)\n mlp_hidden_dim = int(dim * mlp_ratio)\n self.mlp = Mlp(\n in_features=dim, hidden_features=mlp_hidden_dim,\n act_layer=act_layer, drop=drop\n )\n self.pre_norm = pre_norm\n self.res_score = res_score \n\n #add adapter\n self.adapter = Adapter(dim,\n down_sample=64,\n non_linearity=\"relu\",\n init_bert_weights=True,\n add_layer_norm_before=True,\n add_layer_norm_after=False,\n residual_before_ln=True,\n )\n\n logging.info(f\"intrinsic dimension {dintrinsic}.\")\n self.intrinsic_attn = FastFoodWrap(Attention(\n dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,\n attn_drop=attn_drop, proj_drop=drop, res_score=res_score\n ), intrinsic_dimension=dintrinsic)\n self.intrinsic_mlp = FastFoodWrap(Mlp(\n in_features=dim, hidden_features=mlp_hidden_dim,\n act_layer=act_layer, drop=drop\n ), intrinsic_dimension=dintrinsic)\n self.intrinsic_adapter = FastFoodWrap(Adapter(dim,\n down_sample=64,\n non_linearity=\"relu\",\n init_bert_weights=True,\n add_layer_norm_before=True,\n add_layer_norm_after=False,\n residual_before_ln=True,\n ), intrinsic_dimension=dintrinsic)\n\n def forward(self, x, prev = None, measure_idim = None):\n if measure_idim == \"attention\":\n if self.pre_norm:\n attn = self.intrinsic_attn(self.norm1(x))\n x = x + self.drop_path(attn)\n x = x + self.drop_path(self.mlp(self.norm2(x)))\n else:\n attn = self.intrinsic_attn(x)\n x = self.norm1(x + self.drop_path(attn))\n x = self.norm2(x + self.drop_path(self.mlp(x)))\n elif measure_idim == \"adapter\":\n if self.pre_norm:\n attn = self.attn(self.norm1(x))\n x = x + self.drop_path(attn)\n x = x + self.intrinsic_adapter(self.drop_path(self.mlp(self.norm2(x))))[0]\n else:\n attn = self.attn(x)\n x = self.norm1(x + self.drop_path(attn))\n x = self.norm2(x + self.intrinsic_adapter(self.drop_path(self.mlp(x))))[0]\n elif measure_idim == \"mlp\":\n if self.pre_norm:\n attn = self.attn(self.norm1(x))\n x = x + self.drop_path(attn)\n x = x + self.drop_path(self.intrinsic_mlp(self.norm2(x)))\n else:\n attn = self.attn(x)\n x = self.norm1(x + self.drop_path(attn))\n x = self.norm2(x + self.drop_path(self.intrinsic_mlp(x)))\n else:\n if self.pre_norm:\n attn = self.attn(self.norm1(x), prev)\n x = x + self.drop_path(attn)\n x = x + self.drop_path(self.mlp(self.norm2(x)))\n else:\n attn = self.attn(x, prev)\n x = self.norm1(x + self.drop_path(attn))\n x = self.norm2(x + self.drop_path(self.mlp(x)))\n\n return x\n\n\nclass PatchEmbed(nn.Module):\n \"\"\" Image to Patch Embedding\n \"\"\"\n def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):\n super().__init__()\n img_size = to_2tuple(img_size)\n patch_size = to_2tuple(patch_size)\n num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])\n self.img_size = img_size\n self.patch_size = patch_size\n self.num_patches = num_patches\n\n self.proj = nn.Conv2d(\n in_chans, embed_dim, kernel_size=patch_size, stride=patch_size\n )\n\n def forward(self, x):\n B, C, H, W = x.shape\n # FIXME look at relaxing size constraints\n assert H == self.img_size[0] and W == self.img_size[1], \\\n f\"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).\"\n x = self.proj(x).flatten(2).transpose(1, 2)\n return x\n\n\nclass HybridEmbed(nn.Module):\n \"\"\" CNN Feature Map Embedding\n Extract feature map from CNN, flatten, project to embedding dim.\n \"\"\"\n def __init__(self,\n backbone,\n img_size=224,\n feature_size=None,\n in_chans=3,\n embed_dim=768):\n super().__init__()\n assert isinstance(backbone, nn.Module)\n img_size = to_2tuple(img_size)\n self.img_size = img_size\n self.backbone = backbone\n if feature_size is None:\n with torch.no_grad():\n # FIXME this is hacky, but most reliable way of determining the exact dim of the output feature\n # map for all networks, the feature metadata has reliable channel and stride info, but using\n # stride to calc feature dim requires info about padding of each stage that isn't captured.\n training = backbone.training\n if training:\n backbone.eval()\n o = self.backbone(\n torch.zeros(1, in_chans, img_size[0], img_size[1])\n )[-1]\n feature_size = o.shape[-2:]\n feature_dim = o.shape[1]\n backbone.train(training)\n else:\n feature_size = to_2tuple(feature_size)\n feature_dim = self.backbone.feature_info.channels()[-1]\n self.num_patches = feature_size[0] * feature_size[1]\n self.proj = nn.Linear(feature_dim, embed_dim)\n\n def forward(self, x):\n x = self.backbone(x)[-1]\n x = x.flatten(2).transpose(1, 2)\n x = self.proj(x)\n return x\n\n\nclass VisionTransformer(nn.Module):\n \"\"\" Vision Transformer with support for patch or hybrid CNN input stage\n \"\"\"\n def __init__(self,\n img_size=224,\n patch_size=16,\n in_chans=3,\n num_classes=1000,\n embed_dim=768,\n depth=12,\n num_heads=12,\n mlp_ratio=4.,\n qkv_bias=False,\n qk_scale=None,\n drop_rate=0.,\n attn_drop_rate=0.,\n drop_path_rate=0.,\n hybrid_backbone=None,\n norm_layer=nn.LayerNorm,\n use_cls_tocken=True,\n norm_embed=False,\n pre_norm=True,\n res_score=False,\n init='trunc_norm',\n dintrinsic = 100,\n layerType = \"mlp\",\n layernum = 100):\n super().__init__()\n self.num_classes = num_classes\n self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models\n\n if hybrid_backbone is not None:\n self.patch_embed = HybridEmbed(\n hybrid_backbone, img_size=img_size, in_chans=in_chans,\n embed_dim=embed_dim\n )\n else:\n self.patch_embed = PatchEmbed(\n img_size=img_size, patch_size=patch_size, in_chans=in_chans,\n embed_dim=embed_dim\n )\n\n self.norm_embed = norm_layer(embed_dim) if norm_embed else None\n num_patches = self.patch_embed.num_patches\n\n if use_cls_tocken:\n self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))\n self.pos_embed = nn.Parameter(\n torch.zeros(1, num_patches+1, embed_dim)\n )\n else:\n self.cls_token = None\n self.pos_embed = nn.Parameter(\n torch.zeros(1, num_patches, embed_dim)\n )\n\n self.pos_drop = nn.Dropout(p=drop_rate)\n\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule\n self.blocks = nn.ModuleList([\n Block(\n dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio,\n qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate,\n attn_drop=attn_drop_rate, drop_path=dpr[i],\n norm_layer=norm_layer, pre_norm=pre_norm,\n res_score=res_score,\n dintrinsic = dintrinsic\n )\n for i in range(depth)\n ])\n self.norm = norm_layer(embed_dim) if pre_norm else None\n\n # NOTE as per official impl, we could have a pre-logits representation dense layer + tanh here\n #self.repr = nn.Linear(embed_dim, representation_size)\n #self.repr_act = nn.Tanh()\n\n # Classifier head\n self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()\n\n if self.cls_token is not None:\n trunc_normal_(self.cls_token, std=.02)\n trunc_normal_(self.pos_embed, std=.02)\n\n if init == 'xavier':\n self.apply(self._init_weights_xavier)\n else:\n self.apply(self._init_weights_trunc_normal)\n\n self.layerType = layerType\n self.layernum = layernum\n\n def _init_weights_trunc_normal(self, m):\n if isinstance(m, nn.Linear):\n logging.info('=> init weight of Linear from trunc norm')\n trunc_normal_(m.weight, std=0.02)\n if m.bias is not None:\n logging.info('=> init bias of Linear to zeros')\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n\n def _init_weights_xavier(self, m):\n if isinstance(m, nn.Linear):\n logging.info('=> init weight of Linear from xavier uniform')\n nn.init.xavier_uniform_(m.weight)\n if m.bias is not None:\n logging.info('=> init bias of Linear to zeros')\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n\n def init_weights(self, pretrained='', pretrained_layers=[], verbose=True):\n if os.path.isfile(pretrained):\n pretrained_dict = torch.load(pretrained, map_location='cpu')\n logging.info(f'=> loading pretrained model {pretrained}')\n model_dict = self.state_dict()\n pretrained_dict = {\n k: v for k, v in pretrained_dict.items()\n if k in model_dict.keys()\n }\n need_init_state_dict = {}\n for k, v in pretrained_dict.items():\n need_init = (\n k.split('.')[0] in pretrained_layers\n or pretrained_layers[0] is '*'\n )\n if need_init:\n if verbose:\n logging.info(f'=> init {k} from {pretrained}')\n print(k, v.size(), model_dict[k].size())\n if 'pos_embed' == k and v.size() != model_dict[k].size():\n size_pretrained = v.size()\n size_new = model_dict[k].size()\n logging.info(\n '=> load_pretrained: resized variant: {} to {}'\n .format(size_pretrained, size_new)\n )\n\n ntok_new = size_new[1]\n ntok_new -= 1\n\n posemb_tok, posemb_grid = v[:, :1], v[0, 1:]\n\n gs_old = int(np.sqrt(len(posemb_grid)))\n gs_new = int(np.sqrt(ntok_new))\n\n logging.info(\n '=> load_pretrained: grid-size from {} to {}'\n .format(gs_old, gs_new)\n )\n\n posemb_grid = posemb_grid.reshape(gs_old, gs_old, -1)\n zoom = (gs_new / gs_old, gs_new / gs_old, 1)\n posemb_grid = scipy.ndimage.zoom(\n posemb_grid, zoom, order=1\n )\n posemb_grid = posemb_grid.reshape(1, gs_new**2, -1)\n v = torch.tensor(\n np.concatenate([posemb_tok, posemb_grid], axis=1)\n )\n\n need_init_state_dict[k] = v\n self.load_state_dict(need_init_state_dict, strict=False)\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return {'pos_embed', 'cls_token'}\n\n def forward_features(self, x):\n layerType = self.layerType\n\n B = x.shape[0]\n x = self.patch_embed(x)\n\n if self.norm_embed:\n x = self.norm_embed(x)\n\n if self.cls_token is not None:\n # stole cls_tokens impl from Phil Wang, thanks\n cls_tokens = self.cls_token.expand(B, -1, -1)\n x = torch.cat((cls_tokens, x), dim=1)\n\n x = x + self.pos_embed\n x = self.pos_drop(x)\n\n prev = None\n for id, blk in enumerate(self.blocks):\n # if id ==11:\n # if id ==0:\n if id ==self.layernum:\n x = blk(x, prev, layerType)\n else:\n x = blk(x, prev, None)\n\n # if id ==0:\n # # x = blk(x, prev, \"adapter\")\n # # x = blk(x, prev, \"attention\")\n # x = blk(x, prev, \"mlp\")\n # else:\n # x = blk(x, prev, None)\n\n if self.norm:\n x = self.norm(x)\n\n if self.cls_token is not None:\n x = x[:, 0]\n else:\n x = torch.mean(x, dim=1)\n\n return x\n\n def forward(self, x):\n x = self.forward_features(x)\n x = self.head(x)\n return x\n\n\ndef get_cls_model(config, dintrinsic, layerType, layernum, **kwargs):\n vit_spec = config.MODEL.SPEC\n vit = VisionTransformer(\n img_size=config.TRAIN.IMAGE_SIZE[0],\n patch_size=vit_spec.PATCH_SIZE,\n num_classes=config.MODEL.NUM_CLASSES,\n embed_dim=vit_spec.EMBED_DIM,\n qkv_bias=vit_spec.QKV_BIAS,\n depth=vit_spec.DEPTH,\n num_heads=vit_spec.NUM_HEADS,\n mlp_ratio=vit_spec.MLP_RATIO,\n drop_rate=vit_spec.DROP_RATE,\n attn_drop_rate=vit_spec.ATTN_DROP_RATE,\n drop_path_rate=vit_spec.DROP_PATH_RATE,\n norm_layer=partial(nn.LayerNorm, eps=1e-6),\n use_cls_tocken=vit_spec.USE_CLS_TOKEN,\n norm_embed=getattr(vit_spec, 'NORM_EMBED', False),\n pre_norm=getattr(vit_spec, 'PRE_NORM', True),\n res_score=getattr(vit_spec, 'RES_SCORE', False),\n init=getattr(vit_spec, 'INIT', 'trunc_norm'),\n dintrinsic = dintrinsic,\n layerType = layerType,\n layernum = layernum\n )\n\n if config.MODEL.INIT_WEIGHTS:\n vit.init_weights(\n config.MODEL.PRETRAINED,\n config.MODEL.PRETRAINED_LAYERS,\n config.VERBOSE\n )\n\n return vit\n","repo_name":"jkooy/Parameter-efficient-Fine-tuning-for-Vision-Transformers","sub_path":"full_shot/main/lib/models/cls_intrinsic_dimension.py","file_name":"cls_intrinsic_dimension.py","file_ext":"py","file_size_in_byte":29199,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"}
+{"seq_id":"70229080893","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nfrom .shapefun import shapefun\n\n\n\nclass FE_PostProcessing:\n\n\tdef __init__(self,name):\n\t\tself.node_strain = []\n\t\tself.node_stress = []\n\t\tself.nodes = []\n\t\tself.u =[]\n\t\tself.conn = []\n\t\tself.name_shapef, self.shape_f = name, None\n\t\tself.plot_type = 'e11'\n\n\n\tdef stress_strain(self):\n\t\tprint('\\n** Post process the data')\n\t# (pre-allocate space for nodal stress and strain)\n\t\tfor ni in range(len(self.nodes)):\n\t\t\tself.node_strain.append([0.0, 0.0, 0.0])\n\t\t\tself.node_stress.append([0.0, 0.0, 0.0])\n\t\tnode_strain = np.array(self.node_strain)\n\t\tnode_stress = np.array(self.node_stress)\n\n\t\tprint(f' min displacements: u1={min(self.u[0::2]):.4g}, u2={min(self.u[1::2]):.4g}')\n\t\tprint(f' max displacements: u1={max(self.u[0::2]):.4g}, u2={max(self.u[1::2]):.4g}')\n\t\temin = np.array([ 9.0e9, 9.0e9, 9.0e9])\n\t\temax = np.array([-9.0e9, -9.0e9, -9.0e9])\n\t\tsmin = np.array([ 9.0e9, 9.0e9, 9.0e9])\n\t\tsmax = np.array([-9.0e9, -9.0e9, -9.0e9])\n\n\t\tfor n_el in self.mesh_dict['Elem_num']: # loop through each element\n\t\t\t # for each element (conn is Nx4)\n\t\t\t\t\t\t\t\t\t\t # c is like [2,5,22,53]\t\t\t\n\t\t\tc = self.conn[n_el-1] # connectivtiy\t\t\t\t\t\t\n\t\t\tself.shape_f=getattr(shapefun,self.mesh_dict['shape_fun'][n_el-1])\n\t\t\tnodePts = self.nodes[c,:]\t\t\t# 4x2, eg: [[1.1,0.2], [1.2,0.3], [1.3,0.4], [1.4, 0.5]]\n\t\t\n\t\t\tif len(nodePts) == 4:\n\t\t\t\tB = np.zeros((3,8)) # \n\t\t\t\tfor q in self.q4:\t\t\t\t\t# for each integration pt, eg: [-0.7,-0.7]\n\t\t\t\t\tN,dN = self.shape_f(q) # 2x4\n\t\t\t\t\tJ = np.dot(dN, nodePts).T\t\t\t# 2x2\n\t\t\t\t\tdN = np.dot(np.linalg.inv(J), dN)\t# 2x4\n\t\t\t\t\tB[0,0::2] = dN[0,:]\t\t\t\t\t# 3x8\n\t\t\t\t\tB[1,1::2] = dN[1,:]\n\t\t\t\t\tB[2,0::2] = dN[1,:]\n\t\t\t\t\tB[2,1::2] = dN[0,:]\n\n\t\t\t\t\tUU = np.zeros((8,1))\t\t\t\t# 8x1\n\t\t\t\t\tUU[0] = self.u[2*c[0]]\n\t\t\t\t\tUU[1] = self.u[2*c[0] + 1]\n\t\t\t\t\tUU[2] = self.u[2*c[1]]\n\t\t\t\t\tUU[3] = self.u[2*c[1] + 1]\n\t\t\t\t\tUU[4] = self.u[2*c[2]]\n\t\t\t\t\tUU[5] = self.u[2*c[2] + 1]\n\t\t\t\t\tUU[6] = self.u[2*c[3]]\n\t\t\t\t\tUU[7] = self.u[2*c[3] + 1]\n\t\t\t\t\t# get the strain and stress at the integration point\n\t\t\t\t\tstrain = B @ UU\t\t# (B is 3x8) (UU is 8x1) \t\t=> (strain is 3x1)\n\t\t\t\t\tstress = self.C @ strain\t# (C is 3x3) (strain is 3x1) \t=> (stress is 3x1)\n\t\t\t\t\temin[0] = min(emin[0], strain[0][0])\n\t\t\t\t\temin[1] = min(emin[1], strain[1][0])\n\t\t\t\t\temin[2] = min(emin[2], strain[2][0])\n\t\t\t\t\temax[0] = max(emax[0], strain[0][0])\n\t\t\t\t\temax[1] = max(emax[1], strain[1][0])\n\t\t\t\t\temax[2] = max(emax[2], strain[2][0])\n\n\t\t\t\t\tnode_strain[c[0]][:] = strain.T[0]\n\t\t\t\t\tnode_strain[c[1]][:] = strain.T[0]\n\t\t\t\t\tnode_strain[c[2]][:] = strain.T[0]\n\t\t\t\t\tnode_strain[c[3]][:] = strain.T[0]\n\t\t\t\t\tnode_stress[c[0]][:] = stress.T[0]\n\t\t\t\t\tnode_stress[c[1]][:] = stress.T[0]\n\t\t\t\t\tnode_stress[c[2]][:] = stress.T[0]\n\t\t\t\t\tnode_stress[c[3]][:] = stress.T[0]\n\t\t\t\t\tsmax[0] = max(smax[0], stress[0][0])\n\t\t\t\t\tsmax[1] = max(smax[1], stress[1][0])\n\t\t\t\t\tsmax[2] = max(smax[2], stress[2][0])\n\t\t\t\t\tsmin[0] = min(smin[0], stress[0][0])\n\t\t\t\t\tsmin[1] = min(smin[1], stress[1][0])\n\t\t\t\t\tsmin[2] = min(smin[2], stress[2][0])\n\n\t\t\tif len(nodePts) == 2:\n\t\t\t\t\tB = np.zeros((1,4)) #\n\t\t\t\t\tfor q in self.q2:\t\t# for each Gauss point\n\t\t\t\t\t\t# q is 1x2, N(xi,eta)\n\t\t\t\t\t\t# dN = self.gradshapefun(q) # partial derivative of N wrt (xi): 1x4\n\t\t\t\t\t\tN,dN = self.shape_f(q) # N and partial derivatives dN\n\t\t\t\t\t\tJ = np.dot(dN[0::2].T, nodePts).T # Jacobian - J is 1\n\t\t\t\t\t\tL = np.linalg.norm(nodePts[0,:]-nodePts[1,:])\n\t\t\t\t\t\t# assemble B matrix [1x4]\n\t\t\t\t\t\tB[0,0] = -6*q/L**2\n\t\t\t\t\t\tB[0,1] = (3*q-1)/L\n\t\t\t\t\t\tB[0,2] = -6*q/L**2\n\t\t\t\t\t\tB[0,3] = (3*q+1)/L \n\n\t\t\t\t\t\tUU = np.zeros((4,1))\t\t\t\t# 4x1\n\t\t\t\t\t\tUU[0] = self.u[2*c[0]]\n\t\t\t\t\t\tUU[1] = self.u[2*c[0] + 1]\n\t\t\t\t\t\tUU[2] = self.u[2*c[1]]\n\t\t\t\t\t\tUU[3] = self.u[2*c[1] + 1]\n\n\t\t\t\t\t\t\t\t\t\t\t# get the strain and stress at the integration point\n\t\t\t\t\t\tstrain = B @ UU\t\t# (B is 1x4) (UU is 4x1) \t\t=> (strain is 1x1)\n\t\t\t\t\t\tstress = self.C_beam * strain\t# (C is 1x1) (strain is 1x1) \t=> (stress is 1x1)\n\t\t\t\t\t\tprint(strain)\n\t\t\t\t\t\temin[0] = min(emin[0], strain[0])\n\t\t\t\t\t\temax[0] = max(emax[0], strain[0])\n\n\t\t\t\t\t\tnode_strain[c[0]][:] = strain.T[0]\n\t\t\t\t\t\tnode_strain[c[1]][:] = strain.T[0]\n\t\t\t\t\t\tsmax[0] = max(smax[0], stress[0][0])\n\n\t\tprint(f' min strains: e11={emin[0]:.4g}, e22={emin[1]:.4g}, e12={emin[2]:.4g}')\n\t\tprint(f' max strains: e11={emax[0]:.4g}, e22={emax[1]:.4g}, e12={emax[2]:.4g}')\n\t\tprint(f' min stress: s11={smin[0]:.4g}, s22={smin[1]:.4g}, s12={smin[2]:.4g}')\n\t\tprint(f' max stress: s11={smax[0]:.4g}, s22={smax[1]:.4g}, s12={smax[2]:.4g}')\n\t\t\n\t\t\n\t\t###############################\n\t\tprint('\\n** Plot displacement')\n\t\txvec = []\n\t\tyvec = []\n\t\tres = []\n\t\tfor ni,pt in enumerate(self.nodes):\n\t\t\txvec.append(pt[0] + self.u[2*ni])\n\t\t\tyvec.append(pt[1] + self.u[2*ni+1])\n\t\t\tif self.plot_type=='u1': res.append(self.u[2*ni])\t\t\t# x-disp\n\t\t\tif self.plot_type=='u2': res.append(self.u[2*ni+1])\t\t# y-disp\n\t\t\tif self.plot_type=='s11': res.append(node_stress[ni])\t\t# s11\n\t\t\tif self.plot_type=='s22': res.append(node_stress[ni])\t\t# s22\n\t\t\tif self.plot_type=='s12': res.append(node_stress[ni])\t\t# s12\n\t\t\tif self.plot_type=='e11': res.append(node_strain[ni])\t\t# e11\n\t\t\tif self.plot_type=='e22': res.append(node_strain[ni])\t\t# e22\n\t\t\tif self.plot_type=='e12': res.append(node_strain[ni])\t\t# e12\n\t\ttri = []\n\t\tif len(nodePts) == 4:\n\t\t\tfor c in self.conn:\n\t\t\t\tif len(c) == 4:\n\t\t\t\t\ttri.append( [c[0], c[1], c[2]] )\n\t\t\t\t\ttri.append( [c[0], c[2], c[3]] )\n\t\t\tt = plt.tricontourf(xvec, yvec, res, triangles=tri, levels=14, cmap=plt.cm.jet)\n\t\t \t#plt.scatter(xvec, yvec, marker='o', c='b', s=0.5) # (plot the nodes)\n\t\t\tplt.grid()\n\t\t\tplt.colorbar(t)\n\t\t\tplt.title(self.plot_type)\n\t\t\tplt.axis('equal')\n\t\t\tplt.show()\n\t\t\tprint('Done.')\n\t\tbi = []\n\t\tif len(nodePts) == 2:\n\t\t\tfor c in self.conn:\n\t\t\t\tif len(c) == 2:\n\t\t\t\t\tbi.append( [c[0], c[1]])\n\t\t\tprint('plotting beam')\n\t\t\tfig, ax = plt.subplots()\n\t\t\tax.plot(xvec, res)\n\t\t \t#plt.scatter(xvec, yvec, marker='o', c='b', s=0.5) # (plot the nodes)\n\t\t\tax.grid()\n\t\t\tplt.title(self.plot_type)\n\t\t\tax.set_ylim(min(res),max(res))\n\t\t\tplt.show()\n\t\t\tprint('Done.')","repo_name":"LaYenka/KratoSim","sub_path":"src/FEM/FE_PostProcessing.py","file_name":"FE_PostProcessing.py","file_ext":"py","file_size_in_byte":6068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"1633388811","text":"from flask import Flask, request, render_template, send_file\r\nfrom PIL import Image\r\nimport cv2\r\nimport csv\r\nimport os\r\nimport numpy as np\r\nfrom datetime import datetime\r\nfrom ultralytics import YOLO\r\nimport pytesseract\r\n\r\n# C:\\Program Files\\Tesseract-OCR\r\npytesseract.pytesseract.tesseract_cmd = 'C:\\\\Program Files\\\\Tesseract-OCR\\\\tesseract.exe'\r\nmyconfig = r'--oem 3 --psm 6 outputbase digits'\r\n\r\n# Settings\r\nclass_name = 'railway-train-id'\r\ndetection_color = (255, 50, 255)\r\nmodel = YOLO(\"models/railway-train-id.pt\", \"v8\")\r\n\r\n# Vals to resize frames\r\nframe_wid = 640\r\nframe_hyt = 480\r\n\r\n# Function to Add new Wagon_ID to the Database\r\ndef add_record(wagon_id, datetime):\r\n # Check Wagon ID for Validness:\r\n if len(wagon_id) < 8:\r\n return\r\n\r\n # Check if the Wagon_id already exists or not\r\n with open('records.csv', 'r') as file:\r\n reader = csv.reader(file)\r\n for row in reader:\r\n if row[1] == wagon_id:\r\n print(f\"Wagon ID {wagon_id} already exists.\")\r\n return\r\n\r\n # Get the last ID from the existing records (auto-increment)\r\n try:\r\n with open('records.csv', 'r') as file:\r\n reader = csv.DictReader(file)\r\n records = list(reader)\r\n last_id = int(records[-1]['id'])\r\n except IndexError:\r\n last_id = 0\r\n\r\n # Increment the ID and append the new record\r\n new_id = last_id + 1\r\n new_record = {'id': str(new_id), 'wagon_id': wagon_id, 'datetime': datetime}\r\n\r\n with open('records.csv', 'a', newline='') as file:\r\n writer = csv.DictWriter(file, fieldnames=['id', 'wagon_id', 'datetime'])\r\n writer.writerow(new_record)\r\n\r\n print(f\"Record with ID {new_id} added successfully.\")\r\n\r\n\r\n# Process Image Function\r\ndef process_image_func(image):\r\n frame = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)\r\n\r\n # Resize the frame to optimize the run\r\n frame = cv2.resize(frame, (frame_wid, frame_hyt))\r\n result = frame\r\n accuracy_score = 0\r\n id_number = 0\r\n\r\n # Convert tensor array to numpy\r\n detect_params = model.predict(source=[frame], conf=0.45, save=False)\r\n DP = detect_params[0].numpy()\r\n print(DP)\r\n\r\n if len(DP) != 0:\r\n for i in range(len(detect_params[0])):\r\n boxes = detect_params[0].boxes\r\n box = boxes[i]\r\n conf = box.conf.numpy()[0]\r\n bb = box.xyxy.numpy()[0]\r\n\r\n # Cropping wagon number from the main Image\r\n result = frame[int(bb[1]):int(bb[3]), int(bb[0]):int(bb[2])]\r\n\r\n # Processing image number through OCR engine\r\n boxes = pytesseract.image_to_data(result, config=myconfig)\r\n for x, b in enumerate(boxes.splitlines()):\r\n if x != 0:\r\n b = b.split()\r\n if len(b) == 12:\r\n x, y, w, h = int(b[6]), int(b[7]), int(b[8]), int(b[9])\r\n cv2.rectangle(result, (x, y), (w + x, h + y), (0, 0, 255), 1)\r\n cv2.putText(result, b[11], (x + 45, y + h + 30), cv2.FONT_HERSHEY_COMPLEX, 0.8, (50, 50, 255),\r\n 1)\r\n accuracy_score = b[10]\r\n id_number = b[11]\r\n add_record(id_number, datetime.now().strftime(\"%D/%M/%Y %H:/%M:%S\"))\r\n\r\n return result, accuracy_score, id_number\r\n\r\n\r\n# Flask Web App things...\r\napp = Flask(__name__)\r\n\r\n@app.route(\"/\")\r\ndef main_page():\r\n return render_template('index.html', process_image='url')\r\n\r\n@app.route('/process', methods=['POST'])\r\ndef process_image():\r\n if 'image' not in request.files:\r\n return 'No image file uploaded'\r\n\r\n file = request.files['image']\r\n\r\n if file.filename == '':\r\n return 'No selected image'\r\n\r\n try:\r\n image = Image.open(file)\r\n processed_image, accuracy_score, number_id = process_image_func(image)\r\n\r\n # Save the processed image to a temporary file\r\n temp_filename = 'processed_image.jpg'\r\n processed_image = cv2.cvtColor(processed_image, cv2.COLOR_BGR2RGB)\r\n cv2.imwrite(os.path.join(app.static_folder, temp_filename), processed_image)\r\n\r\n return render_template('result.html', filename=temp_filename, accuracy_score=accuracy_score, number_id=number_id)\r\n except Exception as e:\r\n return f'Error processing image: {str(e)}'\r\n\r\n@app.route('/display_image/')\r\ndef display_image(filename):\r\n return send_file(filename, mimetype='image/jpeg')\r\n\r\n@app.route('/table')\r\ndef display_table():\r\n records = []\r\n with open('records.csv', 'r') as file:\r\n reader = csv.DictReader(file)\r\n for row in reader:\r\n records.append(row)\r\n\r\n return render_template('table.html', records=records)\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n","repo_name":"silvermete0r/CargoScan","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"25328869820","text":"# encoding=utf-8\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.chart_list),\n # 图表\n # http://127.0.0.1:9000/chart/kline/BCH-USD-SWAP/5T/2020-01-05%2014:55:00/\n path('kline////', views.chart_kline),\n\n # macd\n # http://127.0.0.1:9000/chart/kline_macd/BCH-USD-SWAP/5T/2020-01-05%2014:55:00/\n path('kline_macd////', views.chart_macd),\n]\n","repo_name":"royee820926/bitango","sub_path":"chart/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"421387858","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport os\n\nimport numpy as np\nimport pathlib\nimport pickle\n\n# Make train, validation and test splits deterministic from one run to another\nnp.random.seed(2017 + 5 + 17)\n\n# Dataset split\n# 00 'aquatic_mammals'\n# 01 'fish'\n# 02 'flowers'\n# 03 'food_containers'\n# 04 'fruit_and_vegetables'\n# 05 'household_electrical_devices'\n# 06 'household_furniture'\n# 07 'insects'\n# 08 'large_carnivores'\n# 09 'large_man-made_outdoor_things'\n# 10 'large_natural_outdoor_scenes'\n# 11 'large_omnivores_and_herbivores'\n# 12 'medium_mammals'\n# 13 'non-insect_invertebrates'\n# 14 'people'\n# 15 'reptiles'\n# 16 'small_mammals'\n# 17 'trees'\n# 18 'vehicles_1'\n# 19 'vehicles_2'\n\n# CIFAR100_PATH = '/mnt/datasets/public/cifar100'\n# CIFAR100_PATH = '/home/boris/Downloads/cifar-100-python'\nclass_split = {'train': {1, 2, 3, 4, 5, 6, 9, 10, 15, 17, 18, 19}, 'val': {8, 11, 13, 16}, 'test': {0, 7, 12, 14}}\n\ndef main(data_dir, output_dir):\n # load the full CFAR100 dataset, including train and test\n with open(os.path.join(data_dir, 'train'), 'rb') as fo:\n dict = pickle.load(fo, encoding='bytes')\n images = dict[b'data']\n fine_labels = dict[b'fine_labels']\n coarse_labels = dict[b'coarse_labels']\n\n with open(os.path.join(data_dir, 'test'), 'rb') as fo:\n dict = pickle.load(fo, encoding='bytes')\n images = np.concatenate((images, dict[b'data']))\n fine_labels = np.concatenate((fine_labels,dict[b'fine_labels']))\n coarse_labels = np.concatenate((coarse_labels,dict[b'coarse_labels']))\n\n images = images.reshape((-1, 3, 32, 32))\n images = images.transpose((0, 2, 3, 1))\n\n for split_name, split_coarse_classes in class_split.items():\n split_images=[]\n split_fine_labels=[]\n split_coarse_labels=[]\n for current_coarse_label in split_coarse_classes:\n idxs = coarse_labels == current_coarse_label\n split_images.append(images[idxs])\n split_fine_labels.append(fine_labels[idxs])\n split_coarse_labels.append(coarse_labels[idxs])\n\n split_images = np.concatenate(split_images)\n split_fine_labels = np.concatenate(split_fine_labels)\n split_coarse_labels = np.concatenate(split_coarse_labels)\n\n # Save dataset to disk\n permutation = np.random.permutation(len(split_images))\n features = split_images[permutation]\n targets = split_fine_labels[permutation]\n pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True)\n np.savez(\n os.path.join(output_dir, 'few-shot-{}.npz'.format(split_name)),\n features=features, targets=targets)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--data-dir', type=str,\n default=os.path.join(os.sep, 'mnt', 'datasets', 'public', 'cifar100', 'raw-data'),\n help='Path to the raw data')\n parser.add_argument(\n '--output-dir', type=str, default=os.path.join(os.sep, 'mnt', 'datasets', 'public', 'cifar100'),\n help='Output directory')\n\n args = parser.parse_args()\n main(args.data_dir, args.output_dir)\n","repo_name":"ServiceNow/TADAM","sub_path":"datasets/create_dataset_cifar100.py","file_name":"create_dataset_cifar100.py","file_ext":"py","file_size_in_byte":3234,"program_lang":"python","lang":"en","doc_type":"code","stars":103,"dataset":"github-code","pt":"78"}
+{"seq_id":"180804947","text":"import sys\r\nimport speech_recognition as sr\r\nimport pyttsx3\r\nimport pywhatkit\r\nimport datetime\r\nimport wikipedia\r\nimport pyjokes\r\nimport cv2\r\nimport os\r\nimport webbrowser as wb\r\nimport pyautogui as py\r\nimport time\r\nfrom PyQt5 import QtWidgets, QtCore, QtGui\r\nfrom PyQt5.QtCore import QTimer, QTime, QDate, Qt\r\nfrom PyQt5.QtGui import QMovie # for gif\r\nfrom PyQt5.QtCore import *\r\nfrom PyQt5.QtGui import *\r\nfrom PyQt5.QtWidgets import *\r\nfrom PyQt5.uic import loadUiType\r\nfrom jarvis__ui import Ui_MainWindow\r\n\r\n# i dont know how to interact with os then came to know that its a packge callef os\r\n# os .system to run any shell command\r\n# in os .system(\"contains the name of application with the same name as that in pc\")\r\n\r\nlistener = sr.Recognizer()\r\nengine = pyttsx3.init()\r\nvoices = engine.getProperty('voices')\r\nengine.setProperty('voice', voices[0].id)\r\ncommand = 'b'\r\n\r\n\r\ndef speak(text):\r\n engine.say(text)\r\n engine.runAndWait()\r\n\r\n\r\n# init got stuck ** key to this is to use dummy ** inside init (dummy)\r\n# my task is to know how does this wayresolve our problem\r\n# dummy doesnt solve the problem its just a way to test\r\n# now we need to unistall pyttsx3 and install pyttsx3==2.7 and it worked :)\r\n\r\n# lib folder contains the downloaded packages.\r\n\r\n\r\n\r\n\r\ndef wiki(p):\r\n person = p.replace('who is', '')\r\n info = wikipedia.summary(person)\r\n # 1 after represents the person is the limit of words in info\r\n print(info)\r\n speak(info)\r\n\r\n\r\ndef notepad():\r\n print(\"bgr\")\r\n py.press('win', interval=0.2)\r\n # press to automatically press the key\r\n py.typewrite('Notepad', interval=0.2)\r\n # typewrite\r\n py.press('enter', interval=0.2)\r\n speak(\"Please tell your content sir\")\r\n\r\n time.sleep(3)\r\n while True:\r\n x = take_command()\r\n if \"QUIT\" in x:\r\n break\r\n py.typewrite(x, interval=0.2)\r\n py.typewrite('\\n', interval=0.2)\r\n\r\n speak(\"Would you like to save it\")\r\n time.sleep(3)\r\n x = take_command()\r\n if \"YES\" in x:\r\n print('vt')\r\n py.press('ctrl + s', interval=0.2)\r\n\r\n\r\ndef camera():\r\n cap = cv2.VideoCapture(0)\r\n while True:\r\n res, frame = cap.read()\r\n cv2.imshow('cam_star', frame)\r\n if cv2.waitKey(10) == ord('q'):\r\n break\r\n\r\n\r\ndef chrome():\r\n # os.startfile()\r\n # i found difficulty over here bcz initially it was telling that no such program found but when i used '%s' then it was working perfectly fine\r\n\r\n speak(\"Please say what do you want to search\")\r\n x = take_command()\r\n # time.sleep(3)\r\n path = 'C:/Program Files/Google/Chrome/Application/chrome.exe %s'\r\n wb.get(path).open_new_tab(x)\r\n\r\n\r\n# I made a mistake that i didnt converted the whole text into a single case 'upper or lower' and provided me unusual results\r\n# i wanted to make a loop that could run until all the q key is pressed\r\n\r\n\r\nclass MainThread(QThread):\r\n def __init__(self):\r\n super(MainThread, self).__init__()\r\n\r\n def run(self):\r\n self.run_alexa()\r\n\r\n def take_command(self):\r\n try:\r\n with sr.Microphone() as source:\r\n print('listening...')\r\n\r\n listener.adjust_for_ambient_noise(source)\r\n voice = listener.listen(source)\r\n voice.pause_threshold = 3000\r\n self.command = listener.recognize_google(voice)\r\n self.command = self.command.upper()\r\n if 'alexa' in self.command:\r\n self.command = self.command.replace('alexa', '')\r\n print(self.command)\r\n except:\r\n pass\r\n return self.command\r\n\r\n def run_alexa(self):\r\n speak('Hello i am jarvis how can i help you')\r\n while True:\r\n self.p = self.take_command()\r\n print(command)\r\n\r\n if 'PLAY' in self.p:\r\n song = self.p.replace('play', '')\r\n speak('playing ' + song)\r\n print(song)\r\n pywhatkit.playonyt(song)\r\n\r\n\r\n elif 'TIME' in self.p:\r\n time = datetime.datetime.now().strftime('%I:%M %p')\r\n speak('Current time is ' + time)\r\n\r\n elif 'WHO IS' in self.p:\r\n wiki(self.p)\r\n\r\n\r\n\r\n elif ('SELFIE' in self.p) or ('CAMERA' in self.p):\r\n camera()\r\n\r\n\r\n elif (\"GOOGLE\" in self.p) or (\"SEARCH\" in self.p) or (\"CHROME\" in self.p) or (\"BROWSER\" in self.p) :\r\n speak(\"Opening\")\r\n speak(\"GOOGLE CHROME\")\r\n print(\".\")\r\n print(\".\")\r\n chrome()\r\n\r\n\r\n\r\n elif (\"IE\" in self.p) or (\"MSEDGE\" in self.p) or (\"EDGE\" in self.p) :\r\n speak(\"Opening\")\r\n speak(\"MICROSOFT EDGE\")\r\n print(\".\")\r\n print(\".\")\r\n os.startfile(\"C:\\Program Files (x86)\\Microsoft\\Edge\\Application\\msedge.exe\")\r\n\r\n elif (\"NOTEPAD\" in self.p) or (\"NOTES\" in self.p) or (\"NOTEPAD\" in self.p) :\r\n speak(\"Opening\")\r\n speak(\"NOTEPAD\")\r\n print(\".\")\r\n print(\".\")\r\n i = 0\r\n notepad()\r\n\r\n\r\n elif (\"VLCPLAYER\" in self.p) or (\"PLAYER\" in self.p) or (\"VIDEO PLAYER\" in self.p) :\r\n speak(\"Opening\")\r\n speak(\"VLC PLAYER\")\r\n print(\".\")\r\n print(\".\")\r\n os.startfile(\"C:\\Program Files (x86)\\VideoLAN\\VLC\\vlc.exe\")\r\n\r\n elif (\"ILLUSTRATOR\" in self.p) or (\"AI\" in self.p) :\r\n speak(\"Opening\")\r\n speak(\"ADOBE ILLUSTRATOR\")\r\n print(\".\")\r\n print(\".\")\r\n os.system(\"illustrator\")\r\n\r\n\r\n\r\n elif (\"WORD\" in self.p) or (\"MSWORD\" in self.p) :\r\n speak(\"Opening\")\r\n speak(\"MICROSOFT WORD\")\r\n print(\".\")\r\n print(\".\")\r\n os.system(\"C:\\Program Files\\Microsoft Office\\root\\Office16\\WINWORD.EXE\")\r\n\r\n\r\n elif \"QUIT\" in self.p:\r\n speak(\"Thank you for using me sir!\")\r\n break\r\n\r\n\r\n else:\r\n speak(\"please Type Again\")\r\n print(\".\")\r\n print(\".\")\r\n continue\r\n\r\n\r\nstartExecution = MainThread()\r\n\r\nclass Main(QMainWindow):\r\n def __init__(self):\r\n super().__init__()\r\n self.ui = Ui_MainWindow()\r\n self.ui.setupUi(self) # To display the ui by itself\r\n self.ui.pushButton.clicked.connect(self.startTask)\r\n self.ui.pushButton_2.clicked.connect(self.close)\r\n\r\n def startTask(self):\r\n self.ui.movie = QtGui.QMovie(\"7LP8.gif\")\r\n self.ui.label.setMovie(self.ui.movie)\r\n self.ui.movie.start()\r\n \r\n self.ui.movie = QtGui.QMovie('Iron Man Jarvis Live Wallpaper This jarvis boot animation (1).gif')\r\n self.ui.label_2.setMovie(self.ui.movie)\r\n self.ui.movie.start()\r\n\r\n self.ui.movie = QtGui.QMovie('Jarvis_Loading_Screen.gif')\r\n self.ui.label_3.setMovie(self.ui.movie)\r\n self.ui.movie.start()\r\n\r\n startExecution.start()\r\n \r\n\r\n def showTime(self):\r\n current_time = QTime.currentTime()\r\n current_date = QDate.currentDate()\r\n label_time = current_time.toString('hh:mm:ss')\r\n label_date = current_date.toString(Qt.ISODate)\r\n self.ui.textBrowser.setText(label_date)\r\n self.ui.textBrowser_2.setText(label_time)\r\n\r\n\r\n\r\napp = QApplication(sys.argv)\r\njarvis = Main()\r\njarvis.show()\r\nexit(app.exec_())\r\n","repo_name":"choudhary-robin/Cool_Buddy","sub_path":"Cool_Buddy/jarvis.py","file_name":"jarvis.py","file_ext":"py","file_size_in_byte":7611,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"}
+{"seq_id":"20827485825","text":"import re\nimport json\nimport requests\nfrom ftplib import FTP\nfrom io import BytesIO\nfrom zipfile import ZipFile\nfrom datetime import datetime\nfrom dateutil import parser\nfrom typing import Dict, Union, List\n\ntry:\n from typing_extensions import TypedDict\nexcept ModuleNotFoundError:\n pass\n\n\nclass Entry(TypedDict):\n version: Union[str, None]\n files: Dict[str, Union[str, None]]\n latest: bool\n\n\nDEFAULT: List[Entry] = []\n\nMONTHS_SHORT = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']\n\n\ndef get_website_source(url: str) -> str:\n headers = {'User-Agent': 'DataSource-Status Fetcher'}\n return requests.get(url, headers=headers).content.decode('utf-8')\n\n\ndef get_obo_ontology_version_line(url: str) -> Union[str, None]:\n r = requests.get(url, stream=True)\n for line in r.iter_lines():\n if line.decode('utf-8').strip().startswith(\"data-version:\"):\n return line.decode('utf-8')\n return None\n\n\ndef get_aact_entry() -> List[Entry]:\n source = get_website_source('https://aact.ctti-clinicaltrials.org/pipe_files')\n pattern = re.compile(r'(/static/exported_files/monthly/([0-9]{8})_pipe-delimited-export\\.zip)')\n matches = pattern.findall(source)\n entry: Entry = {\n 'version': matches[0][1][0:4] + '.' + matches[0][1][4:6] + '.' + matches[0][1][6:8],\n 'files': {\n 'pipe-delimited-export.zip': 'https://aact.ctti-clinicaltrials.org' + matches[0][0]\n },\n 'latest': True\n }\n return [entry]\n\n\ndef get_canadian_nutrient_file_entry() -> List[Entry]:\n source = get_website_source('https://www.canada.ca/en/health-canada/services/food-nutrition/healthy-eating/' +\n 'nutrient-data/canadian-nutrient-file-2015-download-files.html')\n pattern = re.compile(r'dateModified\">\\s*([0-9]{4})-([0-9]{2})-([0-9]{2})\\s*')\n matches = pattern.findall(source)\n entry: Entry = {\n 'version': matches[0][0] + '.' + matches[0][1] + '.' + matches[0][2],\n 'files': {\n 'cnf-fcen-csv.zip': 'https://www.canada.ca/content/dam/hc-sc/migration/hc-sc/fn-an/alt_formats/zip/' +\n 'nutrition/fiche-nutri-data/cnf-fcen-csv.zip'\n },\n 'latest': True\n }\n return [entry]\n\n\ndef get_cancer_drugs_db_entry() -> List[Entry]:\n source = get_website_source('https://www.anticancerfund.org/en/cancerdrugs-db')\n pattern = re.compile(r'Database build date:\\s+([0-9]{2})/([0-9]{2})/([0-9]{2})', re.IGNORECASE)\n matches = pattern.findall(source)\n entry: Entry = {\n 'version': matches[0][2] + '.' + matches[0][1] + '.' + matches[0][0],\n 'files': {\n 'cancerdrugsdb.txt': 'https://acfdata.coworks.be/cancerdrugsdb.txt'\n },\n 'latest': True\n }\n return [entry]\n\n\ndef get_dgidb_entry() -> List[Entry]:\n # TODO\n return DEFAULT\n\n\ndef get_drugbank_entry() -> List[Entry]:\n releases = json.loads(get_website_source('http://go.drugbank.com/releases.json'))\n latest_version = sorted([release['version'] for release in releases], reverse=True)[0]\n versions = []\n for release in releases:\n url = release['url']\n entry: Entry = {\n 'version': release['version'],\n 'files': {\n 'drugbank_all_full_database.xml.zip': url + '/downloads/all-full-database',\n 'drugbank_all_structures.sdf.zip': url + '/downloads/all-structures',\n 'drugbank_all_metabolite-structures.sdf.zip': url + '/downloads/all-metabolite-structures',\n },\n 'latest': release['version'] == latest_version\n }\n versions.append(entry)\n return versions\n\n\ndef get_drugcentral_entry() -> List[Entry]:\n source = get_website_source('https://drugcentral.org/ActiveDownload')\n pattern = re.compile(\n r'(https://unmtid-shinyapps\\.net/download/drugcentral\\.dump\\.([0-9]+)_([0-9]+)_([0-9]{4})\\.sql\\.gz)')\n matches = pattern.findall(source)\n entry: Entry = {\n 'version': matches[0][3] + '.' + matches[0][1] + '.' + matches[0][2],\n 'files': {\n 'drugcentral.dump.sql.gz': matches[0][0]\n },\n 'latest': True\n }\n return [entry]\n\n\ndef get_ema_entry() -> List[Entry]:\n # EMA updates the medicine data tables once a day\n entry: Entry = {\n 'version': datetime.today().strftime('%Y.%m.%d'),\n 'files': {\n 'Medicines_output_european_public_assessment_reports.xlsx':\n 'https://www.ema.europa.eu/sites/default/files/' +\n 'Medicines_output_european_public_assessment_reports.xlsx',\n 'Medicines_output_herbal_medicines.xlsx':\n 'https://www.ema.europa.eu/sites/default/files/Medicines_output_herbal_medicines.xlsx'\n },\n 'latest': True\n }\n return [entry]\n\n\ndef get_gene2phenotype_entry() -> List[Entry]:\n source = get_website_source('https://www.ebi.ac.uk/gene2phenotype')\n pattern = re.compile(r'([0-9]{4})-([0-9]{2})-([0-9]{2})')\n matches = pattern.findall(source)\n entry: Entry = {\n 'version': matches[0][0] + '.' + matches[0][1] + '.' + matches[0][2],\n 'files': {\n 'CancerG2P.csv.gz': 'https://www.ebi.ac.uk/gene2phenotype/downloads/CancerG2P.csv.gz',\n 'DDG2P.csv.gz': 'https://www.ebi.ac.uk/gene2phenotype/downloads/DDG2P.csv.gz',\n 'EyeG2P.csv.gz': 'https://www.ebi.ac.uk/gene2phenotype/downloads/EyeG2P.csv.gz',\n 'SkinG2P.csv.gz': 'https://www.ebi.ac.uk/gene2phenotype/downloads/SkinG2P.csv.gz'\n },\n 'latest': True\n }\n return [entry]\n\n\ndef get_gene_ontology_entry() -> List[Entry]:\n obo_url = 'http://current.geneontology.org/ontology/go.obo'\n version_line = get_obo_ontology_version_line(obo_url)\n pattern = re.compile(r'([0-9]{4})-([0-9]{2})-([0-9]{2})')\n matches = pattern.findall(version_line)\n entry: Entry = {\n 'version': matches[0][0] + '.' + matches[0][1] + '.' + matches[0][2],\n 'files': {\n 'go.obo': obo_url,\n 'goa_human.gaf.gz': 'http://current.geneontology.org/annotations/goa_human.gaf.gz',\n 'goa_human_complex.gaf.gz': 'http://current.geneontology.org/annotations/goa_human_complex.gaf.gz',\n 'goa_human_isoform.gaf.gz': 'http://current.geneontology.org/annotations/goa_human_isoform.gaf.gz',\n 'goa_human_rna.gaf.gz': 'http://current.geneontology.org/annotations/goa_human_rna.gaf.gz'\n },\n 'latest': True\n }\n return [entry]\n\n\ndef get_gwas_catalog_entry() -> List[Entry]:\n headers = {'User-Agent': 'DataSource-Status Fetcher'}\n request = requests.get('https://www.ebi.ac.uk/gwas/api/search/downloads/alternative', headers=headers, stream=True)\n disposition = request.headers['content-disposition']\n file_name = re.findall(\"filename=(.+)\", disposition)[0].strip()\n pattern = re.compile('([0-9]{4})-([0-9]{2})-([0-9]{2})')\n matches = pattern.findall(file_name)\n entry: Entry = {\n 'version': matches[0][0] + '.' + matches[0][1] + '.' + matches[0][2],\n 'files': {\n 'gwas_catalog_associations.tsv': 'https://www.ebi.ac.uk/gwas/api/search/downloads/alternative',\n 'gwas_catalog_studies.tsv': 'https://www.ebi.ac.uk/gwas/api/search/downloads/studies_alternative',\n 'gwas_catalog_ancestry.tsv': 'https://www.ebi.ac.uk/gwas/api/search/downloads/ancestry'\n },\n 'latest': True\n }\n return [entry]\n\n\ndef get_hgnc_entry() -> List[Entry]:\n ftp = FTP('ftp.ebi.ac.uk')\n ftp.login()\n modified_datetime = parser.parse(\n ftp.voidcmd('MDTM pub/databases/genenames/new/tsv/hgnc_complete_set.txt')[4:].strip())\n ftp.close()\n entry: Entry = {\n 'version': modified_datetime.strftime('%Y.%m.%d'),\n 'files': {\n 'hgnc_complete_set.txt': 'https://ftp.ebi.ac.uk/pub/databases/genenames/new/tsv/hgnc_complete_set.txt'\n },\n 'latest': True\n }\n return [entry]\n\n\ndef get_hpo_entry() -> List[Entry]:\n obo_url = 'https://raw.githubusercontent.com/obophenotype/human-phenotype-ontology/master/hp.obo'\n version_line = get_obo_ontology_version_line(obo_url)\n pattern = re.compile(r'([0-9]{4})-([0-9]{2})-([0-9]{2})')\n matches = pattern.findall(version_line)\n entry: Entry = {\n 'version': matches[0][0] + '.' + matches[0][1] + '.' + matches[0][2],\n 'files': {\n 'hp.obo': obo_url,\n 'phenotype.hpoa': 'http://purl.obolibrary.org/obo/hp/hpoa/phenotype.hpoa',\n 'genes_to_phenotype.txt': 'http://purl.obolibrary.org/obo/hp/hpoa/genes_to_phenotype.txt',\n 'phenotype_to_genes.txt': 'http://purl.obolibrary.org/obo/hp/hpoa/phenotype_to_genes.txt'\n },\n 'latest': True\n }\n return [entry]\n\n\ndef get_itis_entry() -> List[Entry]:\n source = get_website_source('https://www.itis.gov/downloads/index.html')\n pattern = re.compile(\n r'files are currently from the ([0-9]{2})-(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)-([0-9]{4})')\n matches = pattern.findall(source)\n entry: Entry = {\n 'version': matches[0][2] + '.' + str(MONTHS_SHORT.index(matches[0][1]) + 1) + '.' + matches[0][0],\n 'files': {\n 'itisMySQLTables.tar.gz': 'https://www.itis.gov/downloads/itisMySQLTables.tar.gz'\n },\n 'latest': True\n }\n return [entry]\n\n\ndef get_kegg_entry() -> List[Entry]:\n ftp = FTP('ftp.genome.jp')\n ftp.login()\n modified_datetimes = [parser.parse(ftp.voidcmd('MDTM pub/kegg/medicus/' + x)[4:].strip()) for x in\n ['dgroup/dgroup', 'disease/disease', 'drug/drug', 'network/network']]\n ftp.close()\n version = sorted([x.strftime('%Y.%m.%d') for x in modified_datetimes], reverse=True)[0]\n entry: Entry = {\n 'version': version,\n 'files': {\n 'dgroup': 'ftp://ftp.genome.jp/pub/kegg/medicus/dgroup/dgroup',\n 'drug': 'ftp://ftp.genome.jp/pub/kegg/medicus/drug/drug',\n 'disease': 'ftp://ftp.genome.jp/pub/kegg/medicus/disease/disease',\n 'network': 'ftp://ftp.genome.jp/pub/kegg/medicus/network/network',\n 'variant': 'ftp://ftp.genome.jp/pub/kegg/medicus/network/variant',\n 'human_genes_list.tsv': 'http://rest.kegg.jp/list/hsa',\n 'compounds_list.tsv': 'http://rest.kegg.jp/list/compound',\n 'organisms_list.tsv': 'http://rest.kegg.jp/list/organism',\n },\n 'latest': True\n }\n return [entry]\n\n\ndef get_med_rt_entry() -> List[Entry]:\n ftp = FTP('ftp1.nci.nih.gov')\n ftp.login()\n file_paths = ftp.nlst('/pub/cacore/EVS/MED-RT/Archive')\n file_names = [x.split('/')[-1] for x in file_paths if\n x.split('/')[-1].startswith('Core_MEDRT_') and x.split('/')[-1].endswith('_XML.zip')]\n file_names = sorted(file_names, reverse=True)\n ftp.close()\n pattern = re.compile(r'([0-9]{4}\\.[0-9]{2}\\.[0-9]{2})')\n versions = []\n for file_name in file_names:\n matches = pattern.findall(file_name)\n entry: Entry = {\n 'version': matches[0],\n 'files': {\n 'Core_MEDRT_XML.zip': 'https://evs.nci.nih.gov/ftp1/MED-RT/Archive/' + file_name\n },\n 'latest': file_name == file_names[0]\n }\n versions.append(entry)\n return versions\n\n\ndef get_mondo_entry() -> List[Entry]:\n obo_url = 'http://purl.obolibrary.org/obo/mondo.obo'\n version_line = get_obo_ontology_version_line(obo_url)\n pattern = re.compile(r'([0-9]{4})-([0-9]{2})-([0-9]{2})')\n matches = pattern.findall(version_line)\n entry: Entry = {\n 'version': matches[0][0] + '.' + matches[0][1] + '.' + matches[0][2],\n 'files': {\n 'mondo.obo': obo_url\n },\n 'latest': True\n }\n return [entry]\n\n\ndef get_ndf_rt_entry() -> List[Entry]:\n ftp = FTP('ftp1.nci.nih.gov')\n ftp.login()\n file_paths = ftp.nlst('/pub/cacore/EVS/NDF-RT/Archive')\n file_names = [x.split('/')[-1] for x in file_paths if x.split('/')[-1].startswith('NDFRT_Public_All')]\n file_names = sorted(file_names, reverse=True)\n ftp.close()\n pattern = re.compile(r'([0-9]{4})-([0-9]{2})-([0-9]{2})')\n versions = []\n for file_name in file_names:\n matches = pattern.findall(file_name)\n entry: Entry = {\n 'version': matches[0][0] + '.' + matches[0][1] + '.' + matches[0][2],\n 'files': {\n 'NDFRT_Public_All.zip': 'https://evs.nci.nih.gov/ftp1/NDF-RT/Archive/' + file_name\n },\n 'latest': file_name == file_names[0]\n }\n versions.append(entry)\n return versions\n\n\ndef get_open_targets_entry() -> List[Entry]:\n # TODO\n return DEFAULT\n\n\ndef get_pathway_commons_entry() -> List[Entry]:\n version_source = get_website_source('https://www.pathwaycommons.org/archives/PC2/')\n version_pattern = re.compile(r'v([0-9]+)/')\n matches = sorted([int(x[0]) for x in version_pattern.findall(version_source)], reverse=True)\n versions = []\n for version in matches:\n if version < 9:\n break\n url_prefix = 'https://www.pathwaycommons.org/archives/PC2/v%s/' % version\n entry: Entry = {\n 'version': str(version),\n 'files': {\n 'pathways.txt.gz': url_prefix + 'pathways.txt.gz',\n 'datasources.txt': url_prefix + 'datasources.txt',\n 'PathwayCommons.All.uniprot.gmt.gz': url_prefix + 'PathwayCommons%s.All.uniprot.gmt.gz' % version,\n 'PathwayCommons.All.hgnc.txt.gz': url_prefix + 'PathwayCommons%s.All.hgnc.txt.gz' % version,\n 'PathwayCommons.All.hgnc.sif.gz': url_prefix + 'PathwayCommons%s.All.hgnc.sif.gz' % version,\n 'PathwayCommons.All.hgnc.gmt.gz': url_prefix + 'PathwayCommons%s.All.hgnc.gmt.gz' % version,\n 'PathwayCommons.All.BIOPAX.owl.gz': url_prefix + 'PathwayCommons%s.All.BIOPAX.owl.gz' % version,\n },\n 'latest': version == matches[0]\n }\n versions.append(entry)\n return versions\n\n\ndef get_pharmgkb_entry() -> List[Entry]:\n pattern = re.compile(r'([0-9]{4})-([0-9]{2})-([0-9]{2})')\n version = None\n r = requests.get('https://s3.pgkb.org/data/drugLabels.zip', stream=True)\n with ZipFile(BytesIO(r.content)) as zip_file:\n for item in zip_file.filelist:\n if item.filename.startswith('CREATED'):\n match = pattern.findall(item.filename)[0]\n version = match[0] + '.' + match[1] + '.' + match[2]\n break\n entry: Entry = {\n 'version': version,\n 'files': {\n 'genes.zip': 'https://s3.pgkb.org/data/genes.zip',\n 'drugs.zip': 'https://s3.pgkb.org/data/drugs.zip',\n 'chemicals.zip': 'https://s3.pgkb.org/data/chemicals.zip',\n 'variants.zip': 'https://s3.pgkb.org/data/variants.zip',\n 'phenotypes.zip': 'https://s3.pgkb.org/data/phenotypes.zip',\n 'clinicalAnnotations.zip': 'https://s3.pgkb.org/data/clinicalAnnotations.zip',\n 'variantAnnotations.zip': 'https://s3.pgkb.org/data/variantAnnotations.zip',\n 'relationships.zip': 'https://s3.pgkb.org/data/relationships.zip',\n 'dosingGuidelines.json.zip': 'https://s3.pgkb.org/data/dosingGuidelines.json.zip',\n 'drugLabels.zip': 'https://s3.pgkb.org/data/drugLabels.zip',\n 'pathways-tsv.zip': 'https://s3.pgkb.org/data/pathways-tsv.zip',\n 'clinicalVariants.zip': 'https://s3.pgkb.org/data/clinicalVariants.zip',\n 'occurrences.zip': 'https://s3.pgkb.org/data/occurrences.zip',\n 'automated_annotations.zip': 'https://s3.pgkb.org/data/automated_annotations.zip'\n },\n 'latest': True\n }\n return [entry]\n\n\ndef get_redo_db_entry() -> List[Entry]:\n source = get_website_source('https://www.anticancerfund.org/en/redo-db')\n pattern = re.compile(r'Database build date:\\s+([0-9]{2})/([0-9]{2})/([0-9]{2})', re.IGNORECASE)\n matches = pattern.findall(source)\n entry: Entry = {\n 'version': matches[0][2] + '.' + matches[0][1] + '.' + matches[0][0],\n 'files': {\n 'redo_db.txt': 'https://acfdata.coworks.be/redo_db.txt'\n },\n 'latest': True\n }\n return [entry]\n\n\ndef get_redo_trials_db_entry() -> List[Entry]:\n source = get_website_source('https://www.anticancerfund.org/en/redo-trials-db')\n pattern = re.compile(r'\\s*([0-9]{2})/([0-9]{2})/([0-9]{4})', re.IGNORECASE)\n matches = pattern.findall(source)\n entry: Entry = {\n 'version': matches[0][2] + '.' + matches[0][1] + '.' + matches[0][0],\n 'files': {\n 'ReDO_Trials_DB.txt': 'https://acfdata.coworks.be/ReDO_Trials_DB.txt'\n },\n 'latest': True\n }\n return [entry]\n\n\ndef get_sider_entry() -> List[Entry]:\n ftp = FTP('xi.embl.de')\n ftp.login()\n modified_datetime = parser.parse(ftp.voidcmd('MDTM /SIDER/latest/meddra_all_label_se.tsv.gz')[4:].strip())\n ftp.close()\n entry: Entry = {\n 'version': modified_datetime.strftime('%Y.%m.%d'),\n 'files': {\n 'drug_names.tsv': 'http://sideeffects.embl.de/media/download/drug_names.tsv',\n 'drug_atc.tsv': 'http://sideeffects.embl.de/media/download/drug_atc.tsv',\n 'meddra_all_label_indications.tsv.gz': 'ftp://xi.embl.de/SIDER/latest/meddra_all_label_indications.tsv.gz',\n 'meddra_all_label_se.tsv.gz': 'ftp://xi.embl.de/SIDER/latest/meddra_all_label_se.tsv.gz',\n 'meddra_freq.tsv.gz': 'ftp://xi.embl.de/SIDER/latest/meddra_freq.tsv.gz',\n },\n 'latest': True\n }\n return [entry]\n\n\ndef get_unii_entry() -> List[Entry]:\n source = get_website_source('https://fdasis.nlm.nih.gov/srs/jsp/srs/uniiListDownload.jsp')\n pattern = re.compile(r'Last updated: (Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) ([0-9]{4})')\n matches = pattern.findall(source)\n entry: Entry = {\n 'version': matches[0][1] + '.' + str(MONTHS_SHORT.index(matches[0][0]) + 1),\n 'files': {\n 'UNIIs.zip': 'https://fdasis.nlm.nih.gov/srs/download/srs/UNIIs.zip',\n 'UNII_Data.zip': 'https://fdasis.nlm.nih.gov/srs/download/srs/UNII_Data.zip'\n },\n 'latest': True\n }\n return [entry]\n\n\ndef get_uniprot_entry() -> List[Entry]:\n ftp = FTP('ftp.uniprot.org')\n ftp.login()\n modified_datetime = parser.parse(ftp.voidcmd(\n 'MDTM pub/databases/uniprot/current_release/knowledgebase/taxonomic_divisions/uniprot_sprot_human.xml.gz')[\n 4:].strip())\n ftp.close()\n entry: Entry = {\n 'version': modified_datetime.strftime('%Y.%m.%d'),\n 'files': {\n 'uniprot_sprot_human.xml.gz': 'https://ftp.uniprot.org/pub/databases/uniprot/current_release/' +\n 'knowledgebase/taxonomic_divisions/uniprot_sprot_human.xml.gz'\n },\n 'latest': True\n }\n return [entry]\n\n\ndef get_usda_plants_entry() -> List[Entry]:\n entry: Entry = {\n # No version available\n 'version': None,\n 'files': {\n 'plantlst.txt': 'https://plants.sc.egov.usda.gov/assets/docs/CompletePLANTSList/plantlst.txt'\n },\n 'latest': True\n }\n return [entry]\n\n\ndef try_get_data_source_entry(log, data_source_id, func) -> List[Entry]:\n try:\n versions = func()\n print('Retrieved ' + str(len(versions)) + ' versions for data source \"' + data_source_id + '\"', file=log)\n return versions\n except Exception as e:\n print('Failed to retrieve data source \"' + data_source_id + '\" status', e, file=log)\n return DEFAULT\n\n\nif __name__ == '__main__':\n with open('update-log.txt', 'w', encoding='utf-8') as log:\n print('Updating data sources at ' + datetime.now().isoformat(), file=log)\n result = {\n 'AACT': try_get_data_source_entry(log, 'AACT', get_aact_entry),\n 'CanadianNutrientFile': try_get_data_source_entry(log, 'CanadianNutrientFile',\n get_canadian_nutrient_file_entry),\n 'CancerDrugsDB': try_get_data_source_entry(log, 'CancerDrugsDB', get_cancer_drugs_db_entry),\n 'DGIdb': try_get_data_source_entry(log, 'DGIdb', get_dgidb_entry),\n 'DrugBank': try_get_data_source_entry(log, 'DrugBank', get_drugbank_entry),\n 'DrugCentral': try_get_data_source_entry(log, 'DrugCentral', get_drugcentral_entry),\n 'EMA': try_get_data_source_entry(log, 'EMA', get_ema_entry),\n 'Gene2Phenotype': try_get_data_source_entry(log, 'Gene2Phenotype', get_gene2phenotype_entry),\n 'GeneOntology': try_get_data_source_entry(log, 'GeneOntology', get_gene_ontology_entry),\n 'GWASCatalog': try_get_data_source_entry(log, 'GWASCatalog', get_gwas_catalog_entry),\n 'HGNC': try_get_data_source_entry(log, 'HGNC', get_hgnc_entry),\n 'HPO': try_get_data_source_entry(log, 'HPO', get_hpo_entry),\n 'ITIS': try_get_data_source_entry(log, 'ITIS', get_itis_entry),\n 'KEGG': try_get_data_source_entry(log, 'KEGG', get_kegg_entry),\n 'MED-RT': try_get_data_source_entry(log, 'MED-RT', get_med_rt_entry),\n 'Mondo': try_get_data_source_entry(log, 'Mondo', get_mondo_entry),\n 'NDF-RT': try_get_data_source_entry(log, 'NDF-RT', get_ndf_rt_entry),\n 'OpenTargets': try_get_data_source_entry(log, 'OpenTargets', get_open_targets_entry),\n 'PathwayCommons': try_get_data_source_entry(log, 'PathwayCommons', get_pathway_commons_entry),\n 'PharmGKB': try_get_data_source_entry(log, 'PharmGKB', get_pharmgkb_entry),\n 'ReDO-DB': try_get_data_source_entry(log, 'ReDO-DB', get_redo_db_entry),\n 'ReDOTrialsDB': try_get_data_source_entry(log, 'ReDOTrialsDB', get_redo_trials_db_entry),\n 'Sider': try_get_data_source_entry(log, 'Sider', get_sider_entry),\n 'UNII': try_get_data_source_entry(log, 'UNII', get_unii_entry),\n 'UniProt': try_get_data_source_entry(log, 'UniProt', get_uniprot_entry),\n 'USDA-PLANTS': try_get_data_source_entry(log, 'USDA-PLANTS', get_usda_plants_entry),\n }\n with open('result.json', 'w', encoding='utf-8') as f:\n json.dump(result, f, indent=2, sort_keys=True)\n with open('result.min.json', 'w', encoding='utf-8') as f:\n json.dump(result, f)\n","repo_name":"BioDWH2/DataSource-Status","sub_path":"update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":22527,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"}
+{"seq_id":"39225786488","text":"# input 값으로 score 리스트 생성\nscore = []\n\nfor i in range(10):\n score.append(int(input())) # [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]\n\n\nresult = 0\n\nfor j in score:\n result += j\n if result >= 100: # 계산 결과 100 이상 if문 시작\n if result - 100 > 100 - (result - j): # \"계산 이후 - 100\" > \"100 - 계산 이전\"\n result -= j #계산 이후 값이 크다면(100에서 멀다면), 계산 이전 출력\n break # 값이 같다면, 계산 이후 출력\n\nprint(result)","repo_name":"unboxing96/ALGO","sub_path":"백준/Bronze/2851. 슈퍼 마리오/슈퍼 마리오.py","file_name":"슈퍼 마리오.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"35525487049","text":"import argparse\nimport re\n\narg_parser = argparse.ArgumentParser()\n\narg_parser.add_argument(\"alignment_file\")\n\nargs = arg_parser.parse_args()\n\ninfile = open(args.alignment_file, \"r\")\noutfile = open(args.alignment_file + \".aligned\", \"w\")\nfor line in infile:\n if line.startswith(\"NULL \"):\n alignment = []\n als = re.findall(\"\\(\\{(.*?)\\}\\)\", line.strip())\n for i, entry in enumerate(als[1:]):\n entry = entry.strip()\n if entry:\n for j in entry.split(\" \"):\n alignment.append((i, int(j)-1))\n\n alignment = [str(i)+\"-\"+str(j)for (i,j) in alignment]\n outfile.write(\" \".join(alignment) + \"\\n\")\n\ninfile.close()\noutfile.close()\n","repo_name":"mrmutator/alignment","sub_path":"word_alignment/giza/Parse.py","file_name":"Parse.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"27302708428","text":"\ndef create_tabular_data(scorecard_data, num_of_sets):\n table_data = []\n while len(scorecard_data) != num_of_sets+1:\n scorecard_data.append(('-','-'))\n rows = list(zip(*scorecard_data))\n sets_names = [\"Team Name\"]\n sets_names.extend([f\"Set-{str(i+1)}\" for i in range(num_of_sets)])\n table_data.append(sets_names)\n table_data.extend(rows)\n return table_data\n\ndef disply_table(table_data):\n for row in table_data:\n msg = \"{: >6}\"*len(table_data[0])\n print(f\"{msg}\".format(*row))\n\n","repo_name":"patil-ashutosh/TableTennis","sub_path":"src/game/common/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}
+{"seq_id":"14332851911","text":"import urllib2 #change to python3 \nimport datetime\n\n\ndef extractInfo(url):\n\t\n\tpage = urllib2.urlopen(url).read()\n\tpage = page[page.find(\"Booking Schedule\"):page.find(\"thedate\")]\n\tpalce = page.split(\"