diff --git "a/4436.jsonl" "b/4436.jsonl" new file mode 100644--- /dev/null +++ "b/4436.jsonl" @@ -0,0 +1,1975 @@ +{"seq_id":"74342334592","text":"import numpy as np\nimport agents\n\n\nCHECKPOINTS = 'checkpoints'\n\n\nclass Player(object):\n\n def __init__(self, env, monitor='output/', seed=None):\n self.env = env\n self.agents = {'universe': agents.A3C(env, monitor+'universe/',\n CHECKPOINTS+'/universe/'+env+'/', 1),\n 'tensorpack': agents.TPAgent(env, monitor+'tensorpack/',\n CHECKPOINTS+'/tensorpack/'+env+'/'+env, 1),\n 'random': agents.RandomAgent()}\n self.seed = seed\n self.best = ''\n \n \n def choose(self, num_episodes_eval=100):\n scores = {}\n for agent in self.agents.keys():\n scores[agent] = self.agents[agent].play(num_episodes_eval,\n env=self.env,\n record=False,\n seed=self.seed)\n \n self.best = max(scores, key=scores.get)\n\n\n def choose_and_record(self, num_episodes_eval=100, num_episodes_run=100):\n scores = {}\n for agent in self.agents.keys():\n scores[agent] = self.agents[agent].play(num_episodes_eval,\n env=self.env,\n record=False,\n seed=self.seed)\n \n self.best = max(scores, key=scores.get)\n self.agents[max(scores, key=scores.get)].play(num_episodes_run,\n env=self.env,\n record=True,\n seed=self.seed)\n\n def upload(self, outputm, api_key=''):\n self.agents[max(scores, key=scores.get)].do_submit(output, api_key)\n","repo_name":"libfun/deephack3","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"60"} +{"seq_id":"28228345063","text":"import cv2\nimport numpy as np\nfrom absl.logging import info\n\n\ndef visualize_image(image, scale, path):\n image = image.permute(1, 2, 0).cpu().numpy() * 255\n image = cv2.resize(image, (0, 0), fx=scale, fy=scale)\n info(f\"Saving image with scale({scale}) to {path}.\")\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n cv2.imwrite(filename=path, img=image)\n\n\ndef visualize_event(event, scale, path):\n event = event.permute(1, 2, 0).cpu().numpy()\n event_image = np.zeros((event.shape[0], event.shape[1], 3)) + 255\n event_image[event[:, :, 0] > 0] = [0, 0, 255]\n event_image[event[:, :, 1] > 0] = [255, 0, 0]\n event_image = event_image.astype(np.uint8)\n event_image = cv2.resize(event_image, (0, 0), fx=scale, fy=scale)\n info(f\"Saving event with scale({scale}) to {path}.\")\n cv2.imwrite(filename=path, img=event_image)\n\n\ndef visualize_event_alpx(event, path):\n event = event.squeeze().permute(1, 2, 0).cpu().numpy()\n event_abs = np.abs(event)\n event_abs = np.sum(event_abs, axis=2)\n event = np.sum(event, axis=2)\n event_image = np.zeros((event.shape[0], event.shape[1], 3)) + 255\n event_image[event_abs > 0] = [0, 0, 0]\n event_image[event > 0] = [0, 0, 255]\n event_image[event < 0] = [255, 0, 0]\n event_image = event_image.astype(np.uint8)\n info(f\"Saving event to {path}.\")\n cv2.imwrite(filename=path, img=event_image)\n","repo_name":"yunfanLu/INR-Event-VSR","sub_path":"egvsr/utils/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"60"} +{"seq_id":"1330376013","text":"import streamlit as st\nimport osmnx as ox\nimport networkx as nx\nimport pandas as pd\n\n# Load the graph (assuming you have saved it as 'delhi_ev_graph.graphml')\nG = ox.load_graphml('delhi_ev_graph.graphml')\n\ndef shortest_path_with_constraints(G, origin, destination, battery_range, charging_time, weight='length'):\n # Find the shortest path between origin and destination\n path = nx.shortest_path(G, origin, destination, weight=weight)\n\n # Calculate the path length\n path_length = nx.path_weight(G, path, weight=weight)\n\n # Check if the path length exceeds the battery range\n if path_length > battery_range:\n # Find charging stations on the path\n charging_stations_on_path = [node for node in path if G.nodes[node].get('charging_station', False)]\n\n # Check if there are charging stations on the path\n if charging_stations_on_path:\n # Find the nearest charging station to the origin\n nearest_charging_station = charging_stations_on_path[0]\n nearest_charging_station_distance = nx.shortest_path_length(G, origin, nearest_charging_station, weight=weight)\n\n # Compute the remaining battery range after reaching the nearest charging station\n remaining_battery_range = battery_range - nearest_charging_station_distance\n\n # Recursively compute the shortest path from the nearest charging station to the destination\n sub_path = shortest_path_with_constraints(G, nearest_charging_station, destination, remaining_battery_range + charging_time * 100, charging_time, weight)\n\n # Combine the sub-path with the current path\n path = path[:path.index(nearest_charging_station) + 1] + sub_path[1:]\n\n return path\n\n\n# Define a function to compute the battery range gained by charging for a specified time\ndef compute_battery_range(charging_time):\n # Compute the battery range gained by charging for the specified time\n battery_range_gained = charging_time * 60 # Assume 60 km of range gained per hour of charging\n \n return battery_range_gained\n\n# Streamlit app layout\nst.title(\"EV Routing Application\")\n\nstart_address = st.text_input(\"Enter starting address:\", value=\"\")\nend_address = st.text_input(\"Enter destination address:\", value=\"\")\nbattery_charge = st.number_input(\"Enter current battery charge (in kilometers):\", value=0)\ncharging_time = st.number_input(\"Enter charging time (in hours):\", value=0)\n\nif start_address and end_address and battery_charge is not None and charging_time is not None:\n # Geocode the addresses to get coordinates\n start_location = ox.geocode(start_address)\n end_location = ox.geocode(end_address) \n\n # Find the nearest nodes to the starting and ending points\n start_node = ox.distance.nearest_nodes(G, X=[start_location[1]], Y=[start_location[0]], return_dist=False)[0]\n end_node = ox.distance.nearest_nodes(G, X=[end_location[1]], Y=[end_location[0]], return_dist=False)[0]\n\n # Compute the battery range gained by charging for the specified time\n battery_range_gained = compute_battery_range(charging_time)\n\n # Compute the battery range considering the current battery charge and charging time\n battery_range = battery_charge + battery_range_gained\n\n # Compute the shortest path using Dijkstra's algorithm based on distance and battery constraints\n shortest_path = shortest_path_with_constraints(G, start_node, end_node, battery_range, charging_time, weight='length')\n shortest_path_distance = nx.shortest_path_length(G, source=start_node, target=end_node, weight='length', method='dijkstra')\n # Get the route geometry\n route_edges = ox.utils_graph.get_route_edge_attributes(G, shortest_path, attribute='geometry')\n route_geometry = [item for sublist in route_edges for item in sublist]\n \n # Plot the route\n fig, ax = ox.plot_graph_route(G, route=shortest_path, route_linewidth=6, node_size=0, bgcolor='k', edge_color='gray', edge_alpha=0.2, route_color='b')\n\n# Plot the origin and destination nodes\n orig_node_geom = G.nodes[start_node]['route_geometry']\n dest_node_geom = G.nodes[end_node]['route_geometry']\n ax.scatter(orig_node_geom.x, orig_node_geom.y, c='r', s=100, zorder=3)\n ax.scatter(dest_node_geom.x, dest_node_geom.y, c='r', s=100, zorder=3)\n \n# # Plot the route\n# fig, ax = ox.plot_graph_route(G, route=route_geometry, route_linewidth=6, node_size=0, bgcolor='k', edge_color='gray', edge_alpha=0.2, orig_dest_node_color='r', route_color='b')\n\n# # Show the route map\n# st.pyplot(route_map)\n\n# Display the route distance\n st.write(f\"Shortest path distance: {shortest_path_distance} meters\")\n","repo_name":"RhythmBindal/Predictive_Routing_for_EV_Charging_Stations","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4660,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"37650503089","text":"from enum import Enum, auto\nimport random\nimport sys\n\n\nclass BoardState(Enum):\n BLANK = auto()\n PLAYER = auto()\n CPU = auto()\n\n\nclass GameState(Enum):\n PLAYING = auto()\n PLAYER_WIN = auto()\n CPU_WIN = auto()\n DRAW = auto()\n\n\nclass Board():\n def __init__(self):\n # ボードの幅\n self.board_length = 3\n # マスの数\n self.board_area = self.board_length ** 2\n # 初期化\n self.board = [BoardState.BLANK] * self.board_area\n # 先攻:True、後攻:False\n self.is_player_turn = True\n # ゲームの状態\n self.state = GameState.PLAYING\n\n def display(self):\n print_length = len(str(self.board_area)) + 1\n for i in range(self.board_area):\n if self.board[i] == BoardState.PLAYER:\n print('o'.rjust(print_length), end='')\n elif self.board[i] == BoardState.CPU:\n print('x'.rjust(print_length), end='')\n else:\n print('{0:{print_length}d}'.format(\n i, print_length=print_length), end='')\n if (i + 1) % self.board_length == 0:\n print()\n\n def input_player(self):\n while True:\n i = int(input())\n if i < 0 or i >= self.board_area or self.stone_exists(i) is True:\n print('error: invalid number')\n continue\n self.set_stone(i)\n break\n\n def input_cpu(self):\n idxs = [i for i, x in enumerate(self.board) if x == BoardState.BLANK]\n i = random.randint(0, len(idxs) - 1)\n self.set_stone(idxs[i])\n\n def stone_exists(self, i):\n return True if self.board[i] != BoardState.BLANK else False\n\n def set_stone(self, i):\n self.board[i] = BoardState.PLAYER if self.is_player_turn else BoardState.CPU\n self.update_state()\n self.next_turn()\n\n def next_turn(self):\n self.is_player_turn = not self.is_player_turn\n\n def judge(self, board_sub):\n return True if board_sub[0] != BoardState.BLANK and all([board_sub[0] == i for i in board_sub]) else False\n\n def update_state(self):\n if self.judge(self.board[0: self.board_area: self.board_length + 1]) or \\\n self.judge(self.board[self.board_length - 1: (self.board_length - 1) * self.board_length + 1: self.board_length - 1]):\n if self.is_player_turn:\n self.state = GameState.PLAYER_WIN\n else:\n self.state = GameState.CPU_WIN\n return\n for i in range(self.board_length):\n if self.judge(self.board[i: self.board_area: self.board_length]) or \\\n self.judge(self.board[self.board_length * i: self.board_length * (i + 1)]):\n if self.is_player_turn:\n self.state = GameState.PLAYER_WIN\n else:\n self.state = GameState.CPU_WIN\n return\n if BoardState.BLANK not in self.board:\n self.state = GameState.DRAW\n return\n self.state = GameState.PLAYING\n\n\nclass TicTacToe(Board):\n def input_cpu(self):\n self.set_stone(self.minimax(0))\n\n def unset_stone(self, i):\n self.board[i] = BoardState.BLANK\n self.next_turn()\n\n def evaluate(self, depth):\n if self.state == GameState.CPU_WIN:\n self.state = GameState.PLAYING\n return 10 - depth\n elif self.state == GameState.PLAYER_WIN:\n self.state = GameState.PLAYING\n return depth - 10\n else:\n self.state = GameState.PLAYING\n return 0\n\n def minimax(self, depth):\n if self.state != GameState.PLAYING:\n return self.evaluate(depth)\n best_i = 0\n evaluation_value = sys.maxsize if self.is_player_turn else (\n -1) * sys.maxsize\n for i in range(self.board_area):\n if self.stone_exists(i) is False:\n self.set_stone(i)\n evaluation_value_tmp = self.minimax(depth + 1)\n if self.is_player_turn:\n if evaluation_value_tmp > evaluation_value:\n evaluation_value = evaluation_value_tmp\n best_i = i\n else:\n if evaluation_value_tmp < evaluation_value:\n evaluation_value = evaluation_value_tmp\n best_i = i\n self.unset_stone(i)\n if depth == 0:\n return best_i\n else:\n return evaluation_value\n\n def run(self):\n self.display()\n while self.state == GameState.PLAYING:\n msg = 'PLAYER' if self.is_player_turn else 'CPU'\n print(msg)\n if self.is_player_turn:\n self.input_player()\n else:\n self.input_cpu()\n self.display()\n msg = 'player win' if self.state == GameState.PLAYER_WIN else 'cpu win' if self.state == GameState.CPU_WIN else 'draw'\n print(msg)\n\n\nif __name__ == \"__main__\":\n tictactoe = TicTacToe()\n tictactoe.run()\n","repo_name":"ogyogy/tic-tac-toe","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"24801274237","text":"#!/usr/bin/env python3\n\n\"\"\"\nRun with Psy.0.1.typelib on a default path or specify the following\nenvironmental variables before starting this script:\n GI_TYPELIB_PATH = \"path/to/Psy-0.1.typelib\"\nWhen the suitable libpsy-1.0.so .dll is available on the path you\nshould be able to run it otherwise use:\n LD_LIBRARY_PATH = \"path/to/libpsy.so/folder\"\n\"\"\"\n\nimport gi\nimport typing\nimport math as m\nimport time as t\nimport argparse as ap\n\ngi.require_versions({\"Psy\": \"0.1\", \"GLib\": \"2.0\"})\n\nfrom gi.repository import Psy\nfrom gi.repository import GLib\n\n\nclass MyCross(Psy.Cross):\n \"\"\"\n In order to override a virtual method you have to prepend your\n method with do_, so PsyCrossClass->update is called in python\n by MyCross.do_update\n \"\"\"\n\n def do_update(self, timepoint, frame_num):\n self.props.x += 1\n self.props.y += 1\n self.set_color(\n Psy.Color(r=(m.sin(t.time()) / 2 + 0.5), g=m.cos(t.time()) / 2 + 0.5, b=0.5)\n )\n\n\nclass MyRect(Psy.Rectangle):\n def __init__(self, nth_frame, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.is_white = True\n self.white = Psy.Color.new_rgb(1, 1, 1)\n self.black = Psy.Color()\n self.nth_frame = nth_frame\n self.counter = 0\n\n def do_update(self, timepoint, frame_num):\n if self.counter % self.nth_frame == 0:\n if self.is_white:\n self.set_color(self.white)\n else:\n self.set_color(self.black)\n self.is_white = not self.is_white\n self.counter += 1\n\n\ndef stop_loop(\n circle: Psy.Circle,\n time_point: Psy.TimePoint,\n tup: typing.Tuple[GLib.MainLoop, Psy.TimePoint],\n):\n \"\"\"\n Exit from the mainloop and exit the program\n \"\"\"\n loop = tup[0]\n time_start = tup[1]\n\n try:\n print(\n \"Circle.start = {}\".format(\n circle.props.start_time.subtract(time_start).props.seconds\n )\n )\n print(\n \"Circle.stop = {}\".format(\n circle.props.stop_time.subtract(time_start).props.seconds\n )\n )\n print(\n \"The circle is presented for roughly = {} seconds\".format(\n circle.props.stop_time.subtract(circle.props.start_time).props.seconds\n )\n )\n except Exception as e:\n print(\"We shouldn't get here\", e)\n pass\n\n loop.quit()\n\n\ndef circle_update(circle: Psy.Circle, nth_frame, data):\n \"\"\"\n Do something nice with the circle\n \"\"\"\n circle.props.radius = circle.props.radius + 1\n circle.props.x = circle.props.x - 1\n circle.props.num_vertices = circle.props.num_vertices + 1\n\n\ndef main(args: ap.Namespace):\n \"\"\"run the program using the parameters in args\"\"\"\n loop = GLib.MainLoop()\n clock = Psy.Clock()\n start = clock.now()\n dur = Psy.Duration.new_ms(args.start_dur)\n stim_dur = Psy.Duration.new_ms(args.duration)\n window = Psy.GtkWindow(n_monitor=args.monitor)\n circle = Psy.Circle.new(window)\n rect = Psy.Rectangle.new(window)\n flikker = MyRect(\n args.swap,\n canvas=window,\n width=100,\n height=100,\n x=-1920 / 2 + 50,\n y=1080 / 2 - 50,\n )\n flikker.set_color(Psy.Color.new_rgb(1, 0, 0))\n rect.props.x, rect.props.y = -200, -200\n rect.props.width, rect.props.height = 100, 100\n rect.set_color(Psy.Color.new_rgb(1.0, 0.0, 0.0))\n cross = MyCross(canvas=window, x=200, y=200, line_length=100, line_width=30)\n circle.play_for(start.add(dur), stim_dur)\n cross.play_for(start.add(dur), stim_dur)\n rect.play_for(start.add(dur), stim_dur)\n flikker.play_for(start.add(dur), stim_dur)\n\n circle.connect(\"stopped\", stop_loop, (loop, start))\n circle.connect(\"update\", circle_update)\n\n loop.run()\n\n\nif __name__ == \"__main__\":\n cmd_parser = ap.ArgumentParser(\n \"python-test.py\",\n description=\"show some test stimuli\",\n epilog=\"Happy Experimenting\",\n )\n\n cmd_parser.add_argument(\n \"-m\", \"--monitor\", help=\"Choose a monitor by number\", type=int, default=0\n )\n cmd_parser.add_argument(\n \"-s\",\n \"--start-dur\",\n help=\"The start duration before onset of the stimuli in milliseconds\",\n type=int,\n default=500,\n )\n cmd_parser.add_argument(\n \"-d\",\n \"--duration\",\n help=\"Duration of the stimuli in milliseconds\",\n type=int,\n default=4000,\n )\n cmd_parser.add_argument(\n \"--swap\",\n help=\"num frames between successive swaps of the rectangle\",\n type=int,\n default=1,\n )\n\n args = cmd_parser.parse_args()\n main(args)\n","repo_name":"UiL-OTS-labs/psy-lib","sub_path":"tests/python-test.py","file_name":"python-test.py","file_ext":"py","file_size_in_byte":4667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"23703488085","text":"class RunnerUp:\n if __name__ == '__main__':\n print(\"Please enter how many number do you want to put int? : \")\n n = int(input())\n arr = map(int, input().split(' '))\n runner_up_list = arr\n reversed_num = list(runner_up_list)\n\n print(reversed_num)\n\n print(reversed_num.sort(reverse=True))\n\n n = [2, 3, 4, 5, 6]\n n.sort(reverse=True)\n print(n[1])","repo_name":"MemoNano/Webinar","sub_path":"HackerRank/RunnerUp.py","file_name":"RunnerUp.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"10353305917","text":"from http.server import BaseHTTPRequestHandler, HTTPServer\nimport os\nimport json\n\nclass S(BaseHTTPRequestHandler):\n\n def do_GET(self):\n # read title from path\n title = self.path[1:]\n rss_file = os.path.join('rss', f'{title}.jsonl')\n\n # check if file exists\n if not os.path.exists(rss_file):\n self.send_response(404)\n self.end_headers()\n return\n \n # load rss and send\n with open(rss_file) as f:\n rss = [json.loads(i) for i in f.read().splitlines()]\n rss.reverse()\n\n # create xml and send response\n xml = make_xml(title, rss)\n self.send_response(200)\n self.send_header(\"Content-type\", 'text/xml')\n self.send_header(\"Content-Length\", len(xml))\n self.end_headers()\n self.wfile.write(xml.encode('utf-8'))\n\n def do_POST(self):\n # read title from path\n title = self.path[1:]\n rss_file = os.path.join('rss', f'{title}.jsonl')\n\n # read data from POST body\n content_length = int(self.headers['Content-Length'])\n data = self.rfile.read(content_length)\n\n # check if valid json\n try:\n js = json.loads(data)\n if not ('name' in js and 'magnet' in js):\n raise Exception('Bad request')\n except:\n self.send_response(400)\n self.end_headers()\n return\n \n # fix escaping\n js['magnet'] = js['magnet'].replace('&', '&')\n\n # append to jsonl\n with open(rss_file, 'a') as f:\n json.dump(js, f)\n f.write('\\n')\n\n # send OK\n self.send_response(200)\n self.end_headers()\n\ndef make_xml(title, items):\n content = ''\n for item in items:\n content += f'{item[\"name\"]}{item[\"guid\"]}'\n return f'{title}{content}' \n\ndef start_server(port=8080):\n server_address = ('', port)\n httpd = HTTPServer(server_address, S)\n try:\n httpd.serve_forever()\n except KeyboardInterrupt:\n pass\n httpd.server_close()","repo_name":"Haroon96/TorrenterBot","sub_path":"rss_server.py","file_name":"rss_server.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"22739486150","text":"def bubble_sort(arr):\n n = len(arr)\n for i in range(n-1):\n # Flag to check if any swaps are made in this pass\n swapped = False\n\n # Last i elements are already in place\n for j in range(0, n - i - 1):\n # Swap if the element found is greater than the next element\n if arr[j] > arr[j + 1]:\n arr[j], arr[j + 1] = arr[j + 1], arr[j]\n swapped = True\n\n # If no swaps are made in this pass, the array is already sorted\n if not swapped:\n break\n\n# Example usage:\nif __name__ == \"__main__\":\n arr = [64, 34, 25, 12, 22, 11, 90]\n bubble_sort(arr)\n print(\"Sorted array:\", arr)\n","repo_name":"harshpujari/DSAbyThomasCormen","sub_path":"2.2bubbleSort.py","file_name":"2.2bubbleSort.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"19067160013","text":"import datetime\n\nimport requests\nfrom datetime import datetime\nfrom django.utils import timezone\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.shortcuts import render, redirect\nimport hashlib\nimport psycopg2\nimport json\nfrom django.http import JsonResponse\n\nfrom common.models import Xfactor_Log\n\nwith open(\"setting.json\", encoding=\"UTF-8\") as f:\n SETTING = json.loads(f.read())\nDBHost = SETTING['DB']['DBHost']\nDBPort = SETTING['DB']['DBPort']\nDBName = SETTING['DB']['DBName']\nDBUser = SETTING['DB']['DBUser']\nDBPwd = SETTING['DB']['DBPwd']\nUserTNM = SETTING['DB']['UserTNM']\nLogin_Method = SETTING['PROJECT']['LOGIN']\napiUrl = SETTING['API']['apiUrl']\nSesstionKeyPath = SETTING['API']['PATH']['SessionKey']\n\n\n# hi\n@csrf_exempt\ndef signup(request):\n if request.method == \"GET\":\n return render(request, 'common/signup.html')\n\n elif request.method == \"POST\":\n page = request.POST.get('page')\n x_id = request.POST.get('x_id')\n x_pw = request.POST.get('x_pw')\n re_x_pw = request.POST.get('re_x_pw')\n x_name = request.POST.get('x_name')\n x_email = request.POST.get('x_email')\n x_auth = request.POST.get('x_auth')\n res_data = {}\n\n RS = createUsers(x_id, x_pw, x_name, x_email, x_auth)\n if RS == \"1\":\n if page == 'um':\n res_data['error'] = \"회원가입에 성공하였습니다.\"\n redirect_url = '../user_management'\n function = 'Add User' # 분류 정보를 원하시는 텍스트로 변경해주세요.\n item = 'Add user '+ x_id\n result = '성공'\n user = request.session.get('sessionid')\n date = timezone.now()\n Xfactor_log = Xfactor_Log(\n log_func=function,\n log_item=item,\n log_result=result,\n log_user=user,\n log_date=date\n )\n Xfactor_log.save()\n return redirect(redirect_url)\n #return render(request, 'user_management.html', res_data)\n else :\n function = 'Signup User' # 분류 정보를 원하시는 텍스트로 변경해주세요.\n item = 'Signup user ' + x_id\n result = '성공'\n user = x_id\n date = timezone.now()\n Xfactor_log = Xfactor_Log(\n log_func=function,\n log_item=item,\n log_result=result,\n log_user=user,\n log_date=date\n )\n Xfactor_log.save()\n res_data['error'] = \"회원가입에 성공하였습니다.\"\n return render(request, 'common/login.html', res_data)\n else:\n res_data['error'] = \"아이디가 존재합니다.\"\n res_data['x_id'] = x_id\n res_data['x_name'] = x_name\n res_data['x_email'] = x_email\n res_data['x_auth'] = x_auth\n return render(request, 'common/signup.html', res_data)\n\n@csrf_exempt\ndef login(request):\n if Login_Method == \"WEB\":\n if request.method == 'GET':\n return render(request, 'common/login.html')\n\n # POST 방식 요청 -> 사용자가 보내는 데이터와 데이터베이스의 정보 일치여부 확인\n elif request.method == 'POST':\n x_id = request.POST.get('x_id', None)\n x_pw = request.POST.get('x_pw', None)\n\n # 응답 데이터\n res_data = {}\n\n # 모든 필드를 채우지 않았을 경우\n if not (x_id and x_pw):\n res_data['error'] = '아이디 또는 비밀번호를 입력해 주세요.'\n return render(request, 'common/login.html', res_data)\n # 모든 필드를 채웠을 경우\n else:\n RS = selectUsers(x_id, x_pw)\n print(RS)\n if RS == None:\n res_data['error'] = '아이디 또는 비밀번호가 일치하지 않습니다'\n return render(request, 'common/login.html', res_data)\n\n else:\n request.session['sessionid'] = RS[0]\n request.session['sessionname'] = RS[2]\n request.session['sessionemail'] = RS[3]\n function = 'Login' # 분류 정보를 원하시는 텍스트로 변경해주세요.\n item = 'admin 계정'\n result = '성공'\n user = RS[0]\n now = timezone.now().replace(microsecond=0)\n date = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n print(date)\n Xfactor_log = Xfactor_Log(\n log_func=function,\n log_item=item,\n log_result=result,\n log_user=user,\n log_date=date\n )\n Xfactor_log.save()\n return redirect('../dashboard')\n elif Login_Method == \"Tanium\":\n if request.method == 'GET':\n returnData = {'Login_Method': Login_Method}\n return render(request, 'common/login.html', returnData)\n\n elif request.method == 'POST':\n\n x_id = request.POST.get('x_id', None)\n x_pw = request.POST.get('x_pw', None)\n\n # 응답 데이터\n res_data = {}\n\n # 모든 필드를 채우지 않았을 경우\n if not (x_id and x_pw):\n res_data['error'] = '아이디 또는 비밀번호를 입력해 주세요.'\n return render(request, 'common/login.html', res_data)\n # 모든 필드를 채웠을 경우\n else:\n TRS = taniumUsers(x_id, x_pw)\n if TRS == None:\n res_data['error'] = '아이디 또는 비밀번호가 일치하지 않습니다'\n return render(request, 'common/login.html', res_data)\n else:\n request.session['sessionid'] = x_id\n return redirect('../dashboard')\n\n@csrf_exempt\ndef updateform(request):\n try:\n if request.method == \"GET\":\n # print(request.session.sessionid)\n return render(request, 'common/updateform.html')\n\n elif request.method == \"POST\":\n x_id = request.POST.get('x_id')\n x_pw = request.POST.get('x_pw')\n hashpassword = hashlib.sha256(x_pw.encode()).hexdigest()\n Conn = psycopg2.connect('host={0} port={1} dbname={2} user={3} password={4}'.format(DBHost, DBPort, DBName, DBUser, DBPwd))\n Cur = Conn.cursor()\n\n query = \"\"\"\n select\n *\n from\n \"\"\" + UserTNM + \"\"\"\n where\n x_id = '\"\"\" + x_id + \"\"\"'\n and\n x_pw = '\"\"\" + hashpassword + \"\"\"'\n\n \"\"\"\n Cur.execute(query)\n RS = Cur.fetchall()\n res_data = {}\n print(RS)\n if RS[0] != None:\n res_data['x_id'] = RS[0][0]\n res_data['x_name'] = RS[0][2]\n res_data['x_email'] = RS[0][3]\n res_data['x_auth'] = RS[0][4]\n # print(res_data)\n return render(request, 'common/update.html', res_data)\n except:\n res_data['error'] = '비밀번호를 다시한번 확인 해 주세요.'\n return render(request, 'common/updateform.html', res_data)\n\n@csrf_exempt\ndef update(request):\n if request.method == \"GET\":\n # 404 에러페이지 넣을것\n return render(request, '')\n\n elif request.method == \"POST\":\n x_id = request.POST.get('x_id')\n x_pw = request.POST.get('x_pw')\n re_x_pw = request.POST.get('re_x_pw')\n x_name = request.POST.get('x_name')\n x_email = request.POST.get('x_email')\n x_auth = request.POST.get('x_auth')\n res_data = {}\n if not (x_id and x_pw and x_name and x_email and x_auth):\n res_data['x_id'] = x_id\n res_data['x_pw'] = x_pw\n res_data['x_name'] = x_name\n res_data['x_email'] = x_email\n res_data['x_auth'] = x_auth\n res_data['error'] = \"모든 값을 입력해야 합니다.\"\n return render(request, 'common/update.html', res_data)\n if x_pw != re_x_pw:\n res_data['x_id'] = x_id\n res_data['x_pw'] = x_pw\n res_data['x_name'] = x_name\n res_data['x_email'] = x_email\n res_data['x_auth'] = x_auth\n res_data['error'] = '비밀번호가 다릅니다.'\n return render(request, 'common/update.html', res_data)\n else:\n RS = updateUsers(x_id, x_pw, x_name, x_email, x_auth)\n if RS == \"1\":\n request.session['sessionname'] = x_name\n request.session['sessionemail'] = x_email\n return redirect('../dashboard')\n else:\n res_data['error'] = '회원정보 변경이 실패했습니다.'\n return render(request, 'common/update.html', res_data)\n\n@csrf_exempt\ndef logout(request):\n if Login_Method == \"WEB\":\n if 'sessionid' in request.session:\n\n function = 'Logout' # 분류 정보를 원하시는 텍스트로 변경해주세요.\n item = 'admin 계정'\n result = '성공'\n user = request.session.get('sessionid')\n date = timezone.now()\n Xfactor_log = Xfactor_Log(\n log_func=function,\n log_item=item,\n log_result=result,\n log_user=user,\n log_date=date\n )\n Xfactor_log.save()\n del (request.session['sessionid'])\n del (request.session['sessionname'])\n del (request.session['sessionemail'])\n return render(request, 'common/login.html')\n else:\n return render(request, 'common/login.html')\n elif Login_Method == \"Tanium\":\n if request.method == 'GET':\n returnData = {'Login_Method': Login_Method}\n return render(request, 'common/login.html', returnData)\n if 'sessionid' in request.session:\n del (request.session['sessionid'])\n return render(request, 'common/login.html')\n else:\n return render(request, 'common/login.html')\n\n@csrf_exempt\ndef selectUsers(x_id, x_pw):\n try:\n hashpassword = hashlib.sha256(x_pw.encode()).hexdigest()\n # print(hashpassword)\n\n Conn = psycopg2.connect('host={0} port={1} dbname={2} user={3} password={4}'.format(DBHost, DBPort, DBName, DBUser, DBPwd))\n Cur = Conn.cursor()\n\n query = \"\"\"\n select \n *\n from\n \"\"\" + UserTNM + \"\"\"\n where\n x_id = '\"\"\" + x_id + \"\"\"'\n and\n x_pw = '\"\"\" + hashpassword + \"\"\"' \n\n \"\"\"\n\n Cur.execute(query)\n RS = Cur.fetchone()\n # print(RS)\n return RS\n except:\n print(UserTNM + ' Table connection(Select) Failure')\n\n@csrf_exempt\ndef createUsers(x_id, x_pw, x_name, x_email, x_auth):\n try:\n hashpassword = hashlib.sha256(x_pw.encode()).hexdigest()\n Conn = psycopg2.connect('host={0} port={1} dbname={2} user={3} password={4}'.format(DBHost, DBPort, DBName, DBUser, DBPwd))\n Cur = Conn.cursor()\n query = \"\"\" \n INSERT INTO \n common_xfactor_xuser\n (x_id, x_pw, x_name, x_email, x_auth) \n VALUES ( \n '\"\"\" + x_id + \"\"\"',\n '\"\"\" + hashpassword + \"\"\"' ,\n '\"\"\" + x_name + \"\"\"',\n '\"\"\" + x_email + \"\"\"',\n '\"\"\" + x_auth + \"\"\"'\n );\n \"\"\"\n Cur.execute(query)\n Conn.commit()\n Conn.close()\n a = \"1\"\n return a\n except:\n print(UserTNM + ' Table connection(Select) Failure')\n a = \"0\"\n return a\n\n\n@csrf_exempt\ndef updateUsers(x_id, x_pw, x_name, x_email, x_auth):\n try:\n hashpassword = hashlib.sha256(x_pw.encode()).hexdigest()\n Conn = psycopg2.connect('host={0} port={1} dbname={2} user={3} password={4}'.format(DBHost, DBPort, DBName, DBUser, DBPwd))\n Cur = Conn.cursor()\n query = \"\"\" \n UPDATE\n common_xfactor_xuser \n SET\n x_pw= '\"\"\" + hashpassword + \"\"\"',\n x_name= '\"\"\" + x_name + \"\"\"',\n x_email= '\"\"\" + x_email + \"\"\"',\n x_auth= '\"\"\" + x_auth + \"\"\"'\n WHERE\n x_id = '\"\"\" + x_id + \"\"\"';\n \"\"\"\n # print(query)\n Cur.execute(query)\n Conn.commit()\n Conn.close()\n a = \"1\"\n return a\n except:\n print(UserTNM + ' Table connection(Update) Failure')\n a = \"0\"\n return a\n\n@csrf_exempt\ndef taniumUsers(x_id, x_pw):\n try:\n path = SesstionKeyPath\n urls = apiUrl + path\n headers = '{\"username\" : \"' + x_id + '\",\"domain\":\"\", \"password\":\"' + x_pw + '\"}'\n response = requests.post(urls, data=headers, verify=False)\n code = response.status_code\n if code == 200:\n a = response.json()\n sessionKey = a['data']['session']\n returnList = sessionKey\n return returnList\n elif code == 403:\n print()\n\n except ConnectionError as e:\n print(e)\n\n@csrf_exempt\ndef delete(request):\n x_ids_str = request.POST.get('x_id') # 쉼표로 구분된 문자열을 얻음\n x_ids = x_ids_str.split(',')\n try:\n Conn = psycopg2.connect('host={0} port={1} dbname={2} user={3} password={4}'.format(DBHost, DBPort, DBName, DBUser, DBPwd))\n Cur = Conn.cursor()\n for x_id in x_ids:\n query = \"\"\" \n DELETE FROM\n common_xfactor_xuser\n WHERE\n x_id = %s;\n \"\"\"\n Cur.execute(query, (x_id,))\n\n Conn.commit()\n Conn.close()\n\n function = 'User Delete' # 분류 정보를 원하시는 텍스트로 변경해주세요.\n item = 'Delete user ' + x_id\n result = '성공'\n user = request.session.get('sessionid')\n now = datetime.now().replace(microsecond=0)\n date = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n print(date)\n Xfactor_log = Xfactor_Log(\n log_func=function,\n log_item=item,\n log_result=result,\n log_user=user,\n log_date=date\n )\n Xfactor_log.save()\n\n return JsonResponse({'result': 'success'}, status=200) # 성공적으로 삭제되었을 때 응답\n except Exception as e:\n print(str(e)) # 에러 메시지 출력 (디버깅 용)\n return JsonResponse({'result': 'failure'}, status=400) # 삭제 중 오류가 발생했을 때 응답\n","repo_name":"XionITS/X-Factor-SM-nc","sub_path":"common/views_user.py","file_name":"views_user.py","file_ext":"py","file_size_in_byte":15196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"13392801473","text":"from hierarchybuilder.topic_clustering import main_clustering as main_clustering\nfrom hierarchybuilder.expansions import parse_medical_data\nimport torch\nimport os\nimport sys\nfrom transformers import AutoTokenizer, AutoModel\nimport statistics\n\ncos = torch.nn.CosineSimilarity(dim=0, eps=1e-08)\nnlp = parse_medical_data.nlp\ndevice = torch.device(\"cpu\")\n# device = torch.device(\"cuda:1\" if torch.cuda.is_available() else \"cpu\")\nsapBert_tokenizer = AutoTokenizer.from_pretrained('cambridgeltl/SapBERT-from-PubMedBERT-fulltext')\nsapBert_model = AutoModel.from_pretrained('cambridgeltl/SapBERT-from-PubMedBERT-fulltext')\nmodel = sapBert_model\nmodel = model.eval()\n\ndict_span_to_lemma_lst = {}\ntopics_dict = {}\ndict_span_to_counter = {}\ndict_word_to_lemma = {}\ndict_lemma_to_synonyms = {}\ndict_longest_span_to_counter = {}\ndict_noun_lemma_to_synonyms = {}\ndict_noun_lemma_to_noun_words = {}\ndict_noun_lemma_to_counter = {}\ndict_noun_word_to_counter = {}\ndict_full_np_to_sentences = {}\nentries_number_limit = 50\nhost_and_port = \"127.0.0.1:5000\"\n\n\ndef initialize_data(examples, host_val, port_val, ignore_words, entries_number,\n device_type, has_umls_server):\n global topics_dict, dict_span_to_counter, dict_word_to_lemma, dict_lemma_to_synonyms, \\\n dict_longest_span_to_counter, dict_noun_lemma_to_synonyms, dict_noun_lemma_to_noun_words, \\\n dict_noun_lemma_to_counter, dict_noun_word_to_counter, entries_number_limit, device, model, \\\n host_and_port, dict_full_np_to_sentences\n host_and_port = \"http://\" + host_val + \":\" + str(port_val)\n if device_type:\n device = device_type\n model = model.to(device)\n model = model.eval()\n entries_number_limit = entries_number\n if ignore_words is None:\n ignore_words = set()\n collection_format_examples = parse_medical_data.get_examples_as_all_optional_answers_format(examples)\n topics_dict, dict_span_to_counter, dict_word_to_lemma, dict_lemma_to_synonyms, \\\n dict_longest_span_to_counter, dict_noun_lemma_to_synonyms, dict_noun_lemma_to_noun_words, dict_noun_lemma_to_counter, \\\n dict_noun_word_to_counter, dict_full_np_to_sentences = \\\n main_clustering.convert_examples_to_clustered_data(collection_format_examples, ignore_words, host_and_port,\n has_umls_server)\n dict_span_to_counter.update(dict_noun_word_to_counter)\n dict_span_to_counter.update(dict_noun_lemma_to_counter)\n\n\ndef dfs_for_cyclic(visited, helper, node):\n visited.append(node)\n helper.append(node)\n children = node.children\n for child in children:\n if child not in visited:\n ans = dfs_for_cyclic(visited, helper, child)\n if ans == True:\n print(child.span_lst)\n return True\n elif child in helper:\n print(child.span_lst)\n return True\n helper.remove(node)\n return False\n\n\ndef isCyclic(nodes_lst):\n visited = []\n helper = []\n for i in nodes_lst:\n if i not in visited:\n ans = dfs_for_cyclic(visited, helper, i)\n if ans == True:\n print(i.span_lst)\n return True\n return False\n\n\ndef update_nodes_labels(nodes_lst, visited=set()):\n labels_lst = set()\n for node in nodes_lst:\n if node in visited:\n continue\n visited.add(node)\n desc_labels = update_nodes_labels(node.children, visited)\n node.label_lst.update(desc_labels)\n labels_lst.update(node.label_lst)\n return labels_lst\n\n\ndef get_all_spans(np_object_lst, all_spans, visited=set()):\n for np_object in np_object_lst:\n if np_object in visited:\n continue\n visited.add(np_object)\n all_spans.update(np_object.span_lst)\n get_all_spans(np_object.children, all_spans, visited)\n\n\ndef dfs(visited, node):\n if node not in visited:\n visited.append(node)\n for neighbour in node.children:\n dfs(visited, neighbour)\n\n\ndef depth_dag(node, counter=0, visited=set()):\n max_depth = counter\n for child in node.children:\n current_max_depth = depth_dag(child, counter + 1, visited)\n if current_max_depth > max_depth:\n max_depth = current_max_depth\n return max_depth\n\n\ndef get_leaves(node, leaves, visited=set()):\n if node in visited:\n return\n visited.add(node)\n if not node.children:\n leaves.add(node)\n return\n for child in node.children:\n get_leaves(child, leaves, visited)\n\n\ndef get_all_nodes(nodes_lst, visited):\n for node in nodes_lst:\n if node in visited:\n continue\n visited.add(node)\n get_all_nodes(node.children, visited)\n\n\ndef calculation_for_paper(topic_object_lst, top_k_topics):\n visited = set()\n get_all_nodes(topic_object_lst, visited)\n all_dag_nodes = list(visited)\n print(\"number of nodes is \" + str(len(all_dag_nodes)))\n max_depth = 0\n for topic in topic_object_lst:\n depth = depth_dag(topic)\n if depth > max_depth:\n max_depth = depth\n print(\"the depth of the entire DAG is \" + str(max_depth))\n min_leaves = 10000\n max_leaves = 0\n total_leaves = 0\n max_depth = 0\n min_depth = 1000\n total_depth = 0\n all_leaves = set()\n for entry in top_k_topics:\n leaves = set()\n get_leaves(entry, leaves)\n all_leaves.update(leaves)\n leaves_number = len(leaves)\n total_leaves += leaves_number\n if leaves_number < min_leaves:\n min_leaves = leaves_number\n if leaves_number > max_leaves:\n max_leaves = leaves_number\n depth = depth_dag(entry)\n total_depth += depth\n if depth > max_depth:\n max_depth = depth\n if depth < min_depth:\n min_depth = depth\n # top k leaves\n print(\"average number of leaves is \")\n print(total_leaves / len(top_k_topics))\n print(\"minimal leaves for top k entry is \" + str(min_leaves))\n print(\"maximal leaves for top k entry is \" + str(max_leaves))\n # top k depth\n print(\"average number of depth is \")\n print(total_depth / len(top_k_topics))\n print(\"minimal depth for top k entry is \" + str(min_depth))\n print(\"maximal depth for top k entry is \" + str(max_depth))\n all_dag_nodes_in_top_k = set()\n get_all_nodes(top_k_topics, all_dag_nodes_in_top_k)\n number_of_children_array = []\n for node in all_dag_nodes_in_top_k:\n if node in top_k_topics:\n continue\n if node in all_leaves:\n continue\n number_of_children_array.append(len(node.children))\n # internal nodes\n print(\"minimal val of internal nodes is \" + str(min(number_of_children_array)))\n print(\"maximal val of internal nodes is \" + str(max(number_of_children_array)))\n print(\"average number of internal nodes \")\n print(len(number_of_children_array) / len(top_k_topics))\n ans = statistics.variance(number_of_children_array)\n print(\"The variance of list is : \")\n print(ans)\n","repo_name":"itayair/hierarchybuilder","sub_path":"hierarchybuilder/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7037,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"34752256802","text":"import torch\nimport numpy as np\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport copy\nfrom stale import compute_stale_grad_alpha\n\nclass Architect(object):\n def __init__(self, model, args):\n self.network_momentum = args.momentum\n self.network_weight_decay = args.weight_decay\n self.model = model\n self.optimizer = torch.optim.Adam(self.model.arch_parameters(),\n lr=args.arch_learning_rate, betas=(0.5, 0.999), weight_decay=args.arch_weight_decay)\n self.baseline = None\n self.baseline_decay = args.arch_baseline_decay\n\n\n def step(self, epoch_acc, epoch_index_normal, epoch_index_reduce):\n self._compute_grad(self.model.alphas_normal, epoch_acc, epoch_index_normal)\n self._compute_grad(self.model.alphas_reduce, epoch_acc, epoch_index_reduce)\n self.optimizer.step()\n self.optimizer.zero_grad()\n\n def _compute_grad(self, alphas, accuracy_list, index_list):\n grad = torch.zeros(alphas.size())\n prob = F.softmax(alphas, dim=-1)\n rewards = self._compute_reward(accuracy_list)\n for client_idx in range(len(rewards)):\n reward = rewards[client_idx]\n index = index_list[client_idx]\n client_grad = torch.Tensor(prob.shape)\n client_grad.copy_(prob)\n # nabla _alpha { log(p(g_i)) } = (p_1, ..., p_i-1, ..., p_N)\n for edge_idx in range(client_grad.shape[0]):\n index_prob = client_grad[edge_idx][index[edge_idx]]\n client_grad[edge_idx][index[edge_idx]] = index_prob -1\n grad += reward * client_grad\n grad /= len(rewards)\n alphas.grad = grad\n\n def _compute_reward(self,accuracy_list):\n # scale accuracy to 0-1\n avg_acc = torch.mean(torch.Tensor(accuracy_list)) / 100\n if self.baseline is None:\n self.baseline = avg_acc\n else:\n self.baseline += self.baseline_decay * (avg_acc - self.baseline)\n # reward = accuracy - baseline\n return [accuracy_list[i]/100 - self.baseline for i in range(len(accuracy_list))]\n\n def stale_step(self, epoch_acc, epoch_index_normal, epoch_index_reduce, stale_alphas_normal, stale_alphas_reduce, stale_acc, stale_index_normal, stale_index_reduce):\n self._compute_stale_grad(self.model.alphas_normal, epoch_acc, epoch_index_normal, stale_alphas_normal, stale_acc, stale_index_normal)\n self._compute_stale_grad(self.model.alphas_reduce, epoch_acc, epoch_index_reduce, stale_alphas_reduce , stale_acc, stale_index_reduce)\n self.optimizer.step()\n self.optimizer.zero_grad()\n\n def _compute_stale_grad(self, alphas, accuracy_list, index_list, old_alphas, old_accuracy, old_index):\n grad = torch.zeros(alphas.size())\n prob = F.softmax(alphas, dim=-1)\n rewards = self._compute_reward(accuracy_list)\n for client_idx in range(len(rewards)):\n reward = rewards[client_idx]\n index = index_list[client_idx]\n client_grad = torch.Tensor(prob.shape)\n client_grad.copy_(prob)\n # nabla _alpha { log(p(g_i)) } = (p_1, ..., p_i-1, ..., p_N)\n for edge_idx in range(client_grad.shape[0]):\n index_prob = client_grad[edge_idx][index[edge_idx]]\n client_grad[edge_idx][index[edge_idx]] = index_prob -1\n grad += reward * client_grad\n\n # stale update\n old_reward = self._compute_reward(old_accuracy)\n for stale_idx in range(len(old_alphas)):\n stale_grad = compute_stale_grad_alpha(old_index[stale_idx], old_alphas[stale_idx], alphas)\n grad += old_reward[stale_idx] * stale_grad\n grad /= (len(rewards)+len(old_alphas))\n alphas.grad = grad\n\n\n\n\n\nif __name__ == '__main__':\n from model_search import Network\n class TMP:\n def __init__(self):\n self.momentum = 0.9\n self.weight_decay = 3E-4\n self.arch_learning_rate = 1E-3\n self.arch_weight_decay = 3e-4\n args = TMP()\n criterion = nn.CrossEntropyLoss()\n model = Network(16, 10, 8, criterion)\n architect = Architect(model,args)\n pass","repo_name":"dixiyao/CS385","sub_path":"project2/rl_federated_nas/architect.py","file_name":"architect.py","file_ext":"py","file_size_in_byte":3869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"75561732352","text":"from itertools import combinations\nimport matplotlib.gridspec as grsp\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport random\nfrom SALib.sample import saltelli\nfrom SALib.analyze import sobol\nfrom scipy.special import binom\nimport seaborn as sns\nimport sys\nimport timeit\nimport pickle\nfrom scipy.stats import multivariate_normal\n\nstart_time = timeit.default_timer()\n\ndef f_emul(gp,poly,X_new,n_samples,seed,svm,ranges,N): \n\tX_whole = np.zeros((X_new.shape[0],X_new.shape[1]))\n\tfor i in range(X_new.shape[1]):\n\t\tX_whole[:,i] = X_new[:,i]/ranges[i,1]\n\n\tif svm != 0:\n\t\tlabel = svm.predict(X_whole)\n\n\t\tconta1 = 0\n\t\tcontameno1 = 0\n\t\tfor i in range(label.shape[0]):\n\t\t\tif label[i] == 1:\n\t\t\t\tconta1 = conta1 + 1\n\t\t\telse:\n\t\t\t\tcontameno1 = contameno1 + 1\n\n\t\tprint('With {} input points (N), X Sobol has dimensions: {}'.format(N,X_new.shape))\t\t\n\t\tprint('SVM makes {} predictions, which should be equal to {}, the number of X Sobol rows'.format(label.shape[0],X_whole.shape[0]))\n\t\tprint('SVM predicts {} points to be discarded over {} points, which is {} per cent'.format(contameno1,label.shape[0],np.round(contameno1/label.shape[0]*100,1) ) )\n\t\t# print('conta1 {}'.format(conta1))\n\t\t# print('contameno1 {}'.format(contameno1))\n\n\t\tlista = np.where(label==-1)[0]\n\n\tres, cov = gp.predict(X_new, return_cov=True)\n\tif svm != 0:\n\t\tres[lista,:] = np.zeros((len(lista),1)) \n\n\tmean = poly.predict(X_new)\n\tif svm != 0:\n\t\tmean[lista,:] = np.zeros((len(lista),1))\n\n\ty_sample = multivariate_normal.rvs( np.ndarray.flatten(res+mean) , cov, size=n_samples, random_state=seed).T\n\n\t# if etic == 'SIsvm':\n\t# \tfor jj in lista:\n\t# \t\ty_sample[jj,:] = np.zeros((1,n_samples))\n\n\treturn y_sample\n\n\nSEED = 8\n\ndef main():\n\tseed = SEED\n\trandom.seed(seed)\n\tnp.random.seed(seed)\n\n\t# Usage:\n\t# python3 run_gsa_mio.py ranges, num_par, GP, poly, folder to save indexes, num points Sobol sequence, SVM.\n\t# THe script can be called with 5 or 6 parameters, depending whether or not there's the SVM!\n\n\tranges = np.load(sys.argv[1])\n\t\n\tif int(sys.argv[2]) == 10: # EPI_ENDO - ENDO_EPI\n\t\tlabels = ['Scar radius','Scar depth','Scar conductivity','Internal bath','EHT thickness','EHT conductivity','CP thickness','CP conductivity','EHT-tissue contact area','Delta thickness'] # 10\n\tif int(sys.argv[2]) == 9: # TRANSMURAL\n\t\tlabels = ['Scar radius','Scar conductivity','Internal bath','EHT thickness','EHT conductivity','CP thickness','CP conductivity','EHT-tissue contact area','Delta thickness'] # 9\n\tif int(sys.argv[2]) == 8: # BLOCKED\n\t\tlabels = ['Scar radius','Internal bath','EHT thickness','EHT conductivity','CP thickness','CP conductivity','EHT-tissue contact area','Delta thickness'] # 8\n\tif int(sys.argv[2]) == 7: # FIXED\n\t\tlabels = ['Internal bath','EHT thickness','EHT conductivity','CP thickness','CP conductivity','EHT-tissue contact area','Delta thickness'] # 7\n\t\n\tGP_path = sys.argv[3]\n\tpoly_path = sys.argv[4]\n\n\ttag = sys.argv[5]\n\n\tsobol_seq_points = int(sys.argv[6])\n\n\tif len(sys.argv) == 8:\n\t\tsvm_path = sys.argv[7]\n\t\twith open(svm_path, 'rb') as f:\n\t\t\tsvm = pickle.load(f)\n\telse:\n\t\tsvm = 0\n\n\t#========================\n\t# GPE loading\n\t#========================\n\n\twith open(GP_path, 'rb') as f:\n\t\tgp = pickle.load(f)\n\n\twith open(poly_path, 'rb') as f:\n\t\tpoly = pickle.load(f)\n\t\n\t#========================\n\t# SA LIB\n\t#========================\n\tN = sobol_seq_points # deafult 1000, try 2000, 3000 and 5000 \n\tD = len(labels)\n\tn_draws = 1000\n\n\tI = ranges\n\tindex_i = labels\n\tindex_ij = ['({}, {})'.format(c[0], c[1]) for c in combinations(index_i, 2)]\n\n\tproblem = {\n\t\t'num_vars': D,\n\t\t'names': index_i,\n\t\t'bounds': I\n\t}\n\n\tX_sobol = saltelli.sample(problem, N, calc_second_order=True) # N x (2D + 2)\n\n\n\tY = f_emul(gp, poly, X_sobol, n_draws, seed, svm, I, N)\n\n\n\tST = np.zeros((0, D), dtype=float)\n\tS1 = np.zeros((0, D), dtype=float)\n\tS2 = np.zeros((0, int(binom(D, 2))), dtype=float)\n\tfor i in range(n_draws):\n\t\tS = sobol.analyze(problem, Y[:,i], calc_second_order=True, parallel=True, n_processors=24, seed=seed)\n\t\ttotal_order, first_order, (_, second_order) = sobol.Si_to_pandas_dict(S)\n\t\tST = np.vstack((ST, total_order['ST'].reshape(1, -1)))\n\t\tS1 = np.vstack((S1, first_order['S1'].reshape(1, -1)))\n\t\tS2 = np.vstack((S2, np.array(second_order['S2']).reshape(1, -1)))\n\n\tprint('GSA - Elapsed time: {:.1f} min'.format( (timeit.default_timer() - start_time)/60 ))\n\n\tnp.savetxt(str(tag) + '/STi.txt', ST, fmt='%.6f')\n\tnp.savetxt(str(tag) + '/Si.txt', S1, fmt='%.6f')\n\tnp.savetxt(str(tag) + '/Sij.txt', S2, fmt='%.6f')\n\n\tdf_ST = pd.DataFrame(data=ST, columns=index_i)\n\tdf_S1 = pd.DataFrame(data=S1, columns=index_i)\n\tdf_S2 = pd.DataFrame(data=S2, columns=index_ij)\n\n\t# gs = grsp.GridSpec(2, 2)\n\t# fig = plt.figure(figsize=(2*8.27, 4*11.69/3))\n\t# ax0 = fig.add_subplot(gs[0, 0])\n\t# ax1 = fig.add_subplot(gs[0, 1])\n\t# ax2 = fig.add_subplot(gs[1, :])\n\t# sns.boxplot(ax=ax0, data=df_S1)\n\t# sns.boxplot(ax=ax1, data=df_ST)\n\t# sns.boxplot(ax=ax2, data=df_S2)\n\t# ax0.set_ylim(0, 1)\n\t# ax0.set_title('First-order effect', fontweight='bold', fontsize=12)\n\t# ax0.set_xticklabels(ax0.get_xticklabels(), rotation=45, horizontalalignment='right')\n\t# ax1.set_ylim(0, 1)\n\t# ax1.set_title('Total effect', fontweight='bold', fontsize=12)\n\t# ax1.set_xticklabels(ax1.get_xticklabels(), rotation=45, horizontalalignment='right')\n\t# ax2.set_ylim(0, 1)\n\t# ax2.set_title('Second-order effect', fontweight='bold', fontsize=12)\n\t# ax2.set_xticklabels(ax2.get_xticklabels(), rotation=45, horizontalalignment='right')\n\t# plt.savefig('si_distr_salib_{}_{}_2.png'.format(N, etichette[lol]))\n\nif __name__ == '__main__':\n\tmain()\n\n","repo_name":"DamiFass/GP-GSA","sub_path":"run_GSA.py","file_name":"run_GSA.py","file_ext":"py","file_size_in_byte":5630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"10553783390","text":"from __future__ import absolute_import, division, print_function\n\nimport codecs\nimport re\n\nimport numpy as np\n\nfrom six.moves import range\n\n\nclass Alphabet(object):\n def __init__(self, config_file):\n self._config_file = config_file\n self._label_to_str = []\n self._str_to_label = {}\n self._size = 0\n with codecs.open(config_file, 'r', 'utf-8') as fin:\n for line in fin:\n if line[0:2] == '\\\\#':\n line = '#\\n'\n elif line[0] == '#':\n continue\n self._label_to_str += line[:-1] # remove the line ending\n self._str_to_label[line[:-1]] = self._size\n self._size += 1\n\n def string_from_label(self, label):\n return self._label_to_str[label]\n\n def label_from_string(self, string):\n try:\n return self._str_to_label[string]\n except KeyError as e:\n raise KeyError(\n '''ERROR: Your transcripts contain characters which do not occur in data/alphabet.txt! Use util/check_characters.py to see what characters are in your {train,dev,test}.csv transcripts, and then add all these to data/alphabet.txt.'''\n ).with_traceback(e.__traceback__)\n\n def decode(self, labels):\n res = []\n for label in labels:\n res.append(label)\n return res\n\n # def decode(self, labels):\n # return labels\n\n def size(self):\n return self._size\n\n def config_file(self):\n return self._config_file\n\n\ndef text_to_char_array(original, alphabet):\n r\"\"\"\n Given a Python string ``original``, remove unsupported characters, map characters\n to integers and return a numpy array representing the processed string.\n \"\"\"\n return np.asarray([alphabet.label_from_string(c) for c in original])\n\n\n# The following code is from: http://hetland.org/coding/python/levenshtein.py\n\n# This is a straightforward implementation of a well-known algorithm, and thus\n# probably shouldn't be covered by copyright to begin with. But in case it is,\n# the author (Magnus Lie Hetland) has, to the extent possible under law,\n# dedicated all copyright and related and neighboring rights to this software\n# to the public domain worldwide, by distributing it under the CC0 license,\n# version 1.0. This software is distributed without any warranty. For more\n# information, see \n\ndef levenshtein(a, b):\n \"Calculates the Levenshtein distance between a and b.\"\n n, m = len(a), len(b)\n if n > m:\n # Make sure n <= m, to use O(min(n,m)) space\n a, b = b, a\n n, m = m, n\n\n current = list(range(n+1))\n for i in range(1, m+1):\n previous, current = current, [i]+[0]*n\n for j in range(1, n+1):\n add, delete = previous[j]+1, current[j-1]+1\n change = previous[j-1]\n if a[j-1] != b[i-1]:\n change = change + 1\n current[j] = min(add, delete, change)\n\n return current[n]\n\n# Validate and normalize transcriptions. Returns a cleaned version of the label\n# or None if it's invalid.\n\n\ndef validate_label(label):\n # For now we can only handle [a-z ']\n if re.search(r\"[0-9]|[(<\\[\\]&*{]\", label) is not None:\n return None\n\n label = label.replace(\"-\", \"\")\n label = label.replace(\"_\", \"\")\n label = label.replace(\".\", \"\")\n label = label.replace(\",\", \"\")\n label = label.replace(\"?\", \"\")\n label = label.replace(\"\\\"\", \"\")\n label = label.strip()\n label = label.lower()\n\n return label if label else None\n","repo_name":"Chung-I/tsm-rnnt","sub_path":"stt/data/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":3603,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"60"} +{"seq_id":"44682235711","text":"from charms.reactive import (\n is_state, set_state,\n when, when_not,\n hookenv, hook,\n)\nfrom charmhelpers.core.hookenv import config\nfrom subprocess import check_call\nfrom charmhelpers.fetch import apt_install, apt_update\nfrom rubylib import bundle\nimport os\nimport stat\nimport pwd\n\ntry:\n from Crypto.PublicKey import RSA\nexcept ImportError:\n apt_update()\n apt_install('python3-crypto')\n from Crypto.PublicKey import RSA\n\n@when('ruby.available')\ndef setup_tests():\n apt_install(['git'])\n if not os.path.exists(config('app-path')):\n clone()\n bundle('install')\n gen_sshkey()\n\ndef clone():\n cmd = \"git clone {} {}\".format('https://github.com/hardening-io/tests-ssh-hardening.git', config('app-path'))\n res = check_call(cmd, shell=True)\n if res != 0:\n status_set('error', 'has a problem with git, try `resolved --retry')\n sys.exit(1)\n\ndef gen_sshkey():\n key = RSA.generate(2048)\n priv_key_file = '/home/ubuntu/.ssh/id_rsa'\n pub_key_file = '/home/ubuntu/.ssh/id_rsa.pub'\n uid = pwd.getpwnam(\"ubuntu\").pw_uid\n if os.path.exists(priv_key_file):\n return\n with open(priv_key_file, 'w') as content_file:\n os.chmod(priv_key_file, stat.S_IREAD)\n os.chown(priv_key_file, uid, -1)\n content_file.write(key.exportKey('PEM').decode('utf-8'))\n pubkey = key.publickey()\n with open(pub_key_file, 'w') as content_file:\n os.chown(pub_key_file, uid, -1)\n content_file.write(pubkey.exportKey('OpenSSH').decode('utf-8'))\n","repo_name":"CanonicalLtd/hardening-ssh-tests","sub_path":"reactive/hardening-ssh-tests.py","file_name":"hardening-ssh-tests.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"37479308793","text":"import pandas as pd\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\nfrom random import randint\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import StandardScaler\nfrom pandas.plotting import register_matplotlib_converters\nregister_matplotlib_converters()\n\nimport techindic.indicator as ti\nimport utils.stats as stats\nimport econometric.utils as model\nimport rl.objective_function as of\nimport rl.policy as policy\n#import net.variational_autoencoder as vae\n\nfilename=\"./model_backups\"\n#stockfile=\"./btc_year_summary.csv\"\nstockfile=\"./bitcoin.csv\"\n\n# Parse arguments from command line\nparser = argparse.ArgumentParser(description='Reinforcement Learning Model.')\nparser.add_argument('-e', '--epochs', dest='epochs', type=int, default=500, help='The number of epochs to train the model')\nparser.add_argument('-t', '--timesteps', dest='past_timesteps', type=int, default=5, help='Numbers of past data to take into account')\nparser.add_argument('-l', '--learning_rate', dest='learning_rate', type=float, choices=[0.1, 0.01, 0.001], default=0.01, help='Learning rate')\nparser.add_argument('--objective_function', dest='objective_function', choices=['Sharpe', 'Dsharpe'], default='Dsharpe', help='Objective function to maximize')\nparser.add_argument('--weights_init', dest='weights_init', choices=['Ones', 'Xavier'], default='Ones', help='How to initialize features weights')\nparser.add_argument('-v', '--verbose', action='store_true', help='Display more log messages')\nparser.add_argument('--version', action='version', version='1.0')\n#parser.add_argument('Windows Length', dest='accumulate', action='store_const', const=sum, default=max, help='sum the integers (default: find the max)')\n\nargs = parser.parse_args()\n\n# Hyperparameter setting\nepochs = args.epochs\npast_timesteps = args.past_timesteps\nlearning_rate = args.learning_rate\nverbose = args.verbose\nobjective_function = args.objective_function\n\nprint(\"Chosen hyperparameters: epochs:{}, windows:{}, learning rate:{}, objective function:{}, weights init scheme:{}, verbose:{}\".format(epochs, past_timesteps, learning_rate, objective_function, args.weights_init, verbose))\n\n# Import data, replace unwanted coma for float numbers, and convert to numeric number\ndata = pd.read_csv(stockfile)\n\n# No need for the new dataset\ndata.iloc[:, 1:].replace(',','', regex=True, inplace=True)\ndata_ordered = data.iloc[::-1].reset_index(drop=True)\ndata_processed = pd.concat([data_ordered.iloc[:,0], data_ordered.iloc[:, 1:].apply(pd.to_numeric, errors='coerce')], axis=1)\n\n# So we need this in ordrer to keep the next lines unchanged\n#data_processed = data\n\nprint(data.head())\n\n# Add Technical Indicators as features\nrsi = ti.RSIInidcator(data_processed['Fermeture']).rsi()\nmacd = ti.MACDIndicator(data_processed['Fermeture']).macd()\nbb = ti.BollingerBands(data_processed['Fermeture']).bollinger()\nma = ti.MovingAverage(data_processed['Fermeture']).movingAverage()\nwr = ti.WilliamsRIndicator(data_processed['Haut'], data_processed['Bas'], data_processed['Fermeture']).wr()\n\ndata_with_TI = pd.concat([data_processed, rsi, macd, bb, ma, wr], axis=1)\n\n# Available Features\n# [['Ouverture', 'Haut', 'Bas', 'ma7', 'ma21', '26ema', '12ema', 'MACD', 'upper_band', 'lower_band', 'ema', 'wr']]\n\n#selected_feature = data_with_TI[['Fermeture', 'Ouverture', 'VWP', 'MACD', 'ema', 'wr']]\nselected_feature = data_with_TI[['Fermeture', 'Ouverture','MACD', 'ema', 'wr']]\n\n# Numbers of selected features * (past_timesteps + 1 (because index start at 0)) + 1=Last_position\nnb_features = (past_timesteps + 1) * selected_feature.shape[1] + 1\n\n\n# Parameters initialization using Xavier initialization for tanh activation function\n# Or just using basic all ones initialization\nif args.weights_init == \"Xavier\":\n # normal distribution with mean=0 and variance= sqrt(2/(fan-in+fan-out))\n xavier_weights=np.random.normal(0, np.sqrt(2/(nb_features+1)), nb_features)\n theta=xavier_weights.flatten()\nelif args.weights_init == \"Ones\":\n theta = np.ones(nb_features)\n\n# Initialize sharpe ratios\nsharpes = np.zeros(epochs)\n\n# Split train/test sets, without shuffle as it is a time serie.\nselected_feature_train, selected_feature_test = train_test_split(selected_feature, test_size=0.2, shuffle=False)\n# With Scaling\nscaler = StandardScaler() # Or scaler = MinMaxScaler()\nselected_feature_scaled = scaler.fit_transform(selected_feature)\nselected_feature_scaled = pd.DataFrame(selected_feature_scaled, columns=selected_feature.columns)\nselected_feature_train_scaled, selected_feature_test_scaled = train_test_split(selected_feature_scaled, test_size=0.2, shuffle=False)\n\n#vae_data = vae.VariationalAutoencoder(selected_feature_train_scaled, selected_feature_test_scaled).generate()\n\n# Train the model\nfor i in range(epochs):\n grad, sharpe, positions, returns = policy.DirectReinforcementLearning(selected_feature_train_scaled, past_timesteps, nb_features, theta).gradientAscent(objective_function)\n theta = theta + grad * learning_rate\n if verbose:\n print(\"epochs:{} -> Gradients are:{} - Params:{}\".format(i, grad, theta))\n print(\"Sharpe: {}\".format(sharpe))\n sharpes[i] = sharpe\n\nprint(\"------- Training is over -------\")\nwith open(filename, 'a+') as f:\n f.write(\"epochs:{} \\nwindows:{} \\nlearning_rate:{} \\nobjective_function:{} \\nweights_init_scheme:{} \\ntheta:{}\\n\\n\".format(epochs, past_timesteps, learning_rate, objective_function, args.weights_init, theta))\n f.close()\nprint(\"------- Model Hyperparameters and parameters saved -------\")\n\n# Display sharpe ratio improvements over epochs\nif verbose:\n plt.figure()\n pd.Series(sharpes).plot()\n plt.legend(['Sharpe ratio'])\n plt.show()\n'''\n# parameters weight after 500 epochs for btc/usd min. VERY SUCCESSFULL\ntheta = [1.14979244, 1.15079199, 1.1502789, 0.80698264, 1.15029399, 0.33909104, 1.14928899, 1.14973558, 1.14949215, 0.76181637, 1.14962482, 0.29303071, 1.14896727, 1.1492582, 1.14909631, 0.72275713, 1.14918728, 0.28653372, 1.14881231, 1.14896827, 1.14888284, 0.69221627, 1.14893813, 0.30126941, 1.14928387, 1.14895654, 1.14912066, 0.68105205, 1.14916945, 0.37206205, 1.14969238, 1.14940828, 1.14954657, 0.68395287, 1.1495189, 0.49042269, 1.45383504]\n'''\n\n# Adjust indexes for the test set\nselected_feature_test_scaled.reset_index(drop=True, inplace=True)\nselected_feature_test.reset_index(drop=True, inplace=True)\n\n\n# Add an ARIMA prediction as a feature. For test set only as ARIMA is used for predicting test set's closed price\n# Warning: add too much time to be viable, in an online manner.\n# Need to find a way to add it. Currently we can't because we have not a theta for it.\nadd_arima = False\nif(add_arima):\n y_train = selected_feature_train[\"Fermeture\"]\n y_test = selected_feature_test[\"Fermeture\"]\n arima = model.FitARIMA(y_train, y_test).get_arima_values()\n arima_scaled = scaler.fit_transform(arima)\n arima_scaled_series = pd.Series( (v[0] for v in arima_scaled) )\n print(\"Arima {}\".format(arima_scaled_series))\n arima_scaled_series.reset_index(drop=True, inplace=True)\n\n # Add scaled ARIMA values to feature dataframe\n #selected_feature_test_scaled['ARIMA'] = arima_scaled_series\n selected_feature_test_scaled = selected_feature_test_scaled.assign(ARIMA=arima_scaled_series.values)\n print(selected_feature_test_scaled)\n\n# Run the model with the found parameters, on the test set\ngrad, sharpe, positions, returns = policy.DirectReinforcementLearning(selected_feature_test_scaled, past_timesteps, nb_features, theta).gradientAscent(objective_function)\n\n# Get profits to estimate wealth\nadd_profits, add_returns, _= of.Returns(selected_feature_test_scaled['Fermeture'], pd.Series(positions).round(), 0.025).getAdditiveProfits()\n\n# Actualise parameters\ntheta = theta + grad * learning_rate\n\n# Plot the changing positions\n# Separate buy signals from sell signals\nchanging_positions = (pd.Series(positions).round()).diff()\nchanging_positions.fillna(0, inplace=True)\nxbuy = [i for i in range(len(changing_positions)) if changing_positions[i] > 0]\nxsell = [i for i in range(len(changing_positions)) if changing_positions[i] < 0]\nybuy = []\nysell = []\nfor i in range(len(selected_feature_test_scaled['Fermeture'])):\n for j in range(len(xbuy)):\n if i == xbuy[j]:\n ybuy.append(selected_feature_test_scaled['Fermeture'].iloc[i])\n\n for k in range(len(xsell)):\n if i == xsell[k]:\n ysell.append(selected_feature_test_scaled['Fermeture'].iloc[i])\n\n# Plot the results\nplt.figure()\nplt.plot(pd.Series(add_returns).cumsum(), label=\"RLModel Add returns\", linewidth=1)\nplt.plot((selected_feature_test_scaled['Fermeture'].diff()).cumsum(), label=\"Buy and Hold\", linewidth=1)\nplt.plot(selected_feature_test_scaled['Fermeture'], label=\"Closing Price\", linewidth=1)\nplt.scatter(xbuy, ybuy, s=7, c='red', label=\"Buy Signal\")\nplt.scatter(xsell, ysell, s=7, c='blue', label=\"Sell Signal\")\nplt.xlabel('Ticks')\nplt.ylabel('Cumulative Returns');\nplt.legend()\nplt.title(\"RL Model vs. Buy and Hold - Test Data\");\nplt.show()\n","repo_name":"CarliKevn/Deep-Learning","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"35239800406","text":"#!/usr/bin/python3\n\"\"\"\nSend a POST request to the passed URL with an email as a parameter\n\"\"\"\nimport urllib.request\nimport urllib.parse\nimport sys\n\narg = sys.argv\nif __name__ == \"__main__\":\n url = arg[1]\n value = {}\n value['email'] = arg[2]\n data = urllib.parse.urlencode(value)\n data = data.encode('ascii')\n req = urllib.request.Request(url, data)\n with urllib.request.urlopen(req) as response:\n html = response.read()\n print(html.decode('utf-8'))\n","repo_name":"carvanino/alx-higher_level_programming","sub_path":"0x11-python-network_1/2-post_email.py","file_name":"2-post_email.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"15924726225","text":"import requests\nimport datetime\nfrom bs4 import BeautifulSoup\nfrom data_sources import source\n\n@source('live', name='Canada')\ndef import_data():\n for result in import_news():\n yield result\n\n for result in import_gov():\n yield result\n\ndef import_news():\n import string\n sourceURL = \"https://www.ctvnews.ca/health/coronavirus/tracking-every-case-of-covid-19-in-canada-1.4852102\"\n jsonURL = \"https://stats.ctvnews.ca/covidDapi/getAllCovidData\"\n\n # referer is required for authorization\n headers = {\n \"referer\": \"https://www.ctvnews.ca/health/coronavirus/tracking-every-case-of-covid-19-in-canada-1.4852102\"\n }\n\n datapoints = []\n content = requests.get(jsonURL, headers=headers, timeout=10).json()\n for row in content:\n entryDate = datetime.datetime.strptime(row['date'], \"%Y-%m-%d\").date()\n provinces = row['data']\n for data in provinces:\n province = string.capwords(data['provinceLabel'])\n\n total = data['totalCases']\n recovered = data.get('recoveries', None)\n deaths = data.get('deaths', None)\n tests = data.get('totalTests', None)\n yield {\n \"entry_date\": entryDate,\n \"country\": \"Canada\",\n \"province\": province,\n \"total\": total,\n \"recovered\": recovered,\n \"deaths\": deaths,\n \"tests\": tests\n }\n\ndef import_gov():\n url = \"https://www.canada.ca/en/public-health/services/diseases/2019-novel-coronavirus-infection.html\"\n soup = BeautifulSoup(requests.get(url, timeout=10).text, 'html.parser')\n stats = soup.select(\"#dataTable tbody tr\")\n datapoints = []\n for row in stats:\n tds = row.select(\"td\")\n province = tds[0].text\n total = int(tds[1].text.replace(\",\", \"\"))\n deaths = int(tds[3].text.replace(\",\", \"\"))\n if province != \"Canada\":\n yield {\n 'country': \"Canada\",\n 'province': province,\n 'total': total,\n 'deaths': deaths\n }\n\nif __name__ == \"__main__\":\n import_data()","repo_name":"myfatemi04/Corona-Vision","sub_path":"data_collection/data_sources/north_america/canada.py","file_name":"canada.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"42471591100","text":"import argparse\nfrom typing import List, Dict, Any\n\n\ndef add(x, y):\n return x+y\n\n\ndef mul(x, y):\n return x*y\n\n\nopcodes: Dict[int, Any] = dict(((1, add), (2, mul), (99, None)))\n\n\ndef sol1(data: List[int]) -> int:\n i = 0\n size = len(data)\n while i < size:\n opcode = data[i]\n method = opcodes[opcode]\n if method is not None:\n data[data[i + 3]] = method(data[data[i + 1]], data[data[i + 2]])\n else:\n break\n i += 4\n return data[0]\n\n\ndef sol2(data: List[str]) -> int:\n expected_data = 19690720\n for noun in range(100):\n for verb in range(100):\n new_data = [int(x) for x in data[0].split(',')]\n new_data[1] = noun\n new_data[2] = verb\n if expected_data == sol1(new_data):\n return 100 * noun + verb\n return -1\n\n\ndef get_input(filename: str) -> List[str]:\n with open(filename) as f:\n lines = f.readlines()\n return lines\n\n\ndef main() -> int:\n parser = argparse.ArgumentParser()\n parser.add_argument('--filename', default='input.txt')\n args = parser.parse_args()\n data = get_input(args.filename)\n new_data = [int(x) for x in data[0].split(',')]\n new_data[1] = 12\n new_data[2] = 2\n print(sol1(new_data))\n print(sol2(data))\n return 0\n\n\nif __name__ == '__main__':\n exit(main())\n","repo_name":"yoavcaspi/aoc2019","sub_path":"day2/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"23623140476","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport random\n\narr_1D = np.array([1,2,3,4])\n# print(arr_1D)\n# print(type(arr_1D))\n# print(arr_1D.ndim)\n\narr_2D = np.array([[1,2,3,4], [5,6,7,8]])\n# print(arr_2D)\n# print(type(arr_2D))\nprint(arr_2D[:, 2:3])\n\n# print(arr_2D.size)\n# print(arr_2D.shape)\n# print(arr_2D.dtype)\n\nones_arr = np.ones((4,6), dtype=int)\n# print(ones_arr)\n\nzero_arr = np.zeros((2,3), dtype=int)\n# print(zero_arr)\n\nempty_arr = np.empty((2,3))\n# print(empty_arr)\n\narange_arr = np.arange(1,17, 2)\n# print(my_arr)\n\nreshape_arr = np.arange(1,17).reshape(4,4)\n# print(reshape_arr)\n\nrevel_arr = np.arange(1,11).reshape(5,2).ravel()\n# print(revel_arr)\n\nlinespace_arr = np.linspace(1,11,12)\n# print(linespace_arr)\n\ntranspose_arr = arr_2D.transpose()\n# print(transpose_arr)\n\narr1 = np.arange(1,10).reshape(3,3)\narr2 = np.arange(1,10).reshape(3,3)\n\nadd = arr1 + arr2\nsub = arr1 - arr2\nmul = arr1 * arr2\nmatric_multiplication = arr1 @ arr2\n\nmax_digit = arr1.argmax(axis= 1)\n# print(max_digit)\n\nmini_digit = arr2.min(axis=0)\n# print(mini_digit)\n\nsum_of_arr = np.sum(arr1)\n# print(sum_of_arr)\n\nnp.mean(arr1)\nnp.sqrt(arr1)\nnp.std(arr1)\nnp.exp(arr1)\nnp.log(arr2)\nnp.log10(arr2)\n\narr_slicing = np.arange(1,101).reshape(10,10)\n\n# print(arr_slicing[:, 0:1])\n# print(arr_slicing[6,9])\n# print(arr_slicing[1:4, 1:4])\n# print(arr_slicing.shape)\n\nConnection_arr = np.concatenate((arr1, arr2), axis=1)\n# print(Connection_arr)\n\nsplit_arr = np.array([1,2,3,4,5])\n# print(np.split(split_arr, [1,3]))\n\nx_value = np.arange(0,3*np.pi, 0.1)\ny_sin = np.sin(x_value)\n\n# plt.plot(x_value, y_sin)\n# plt.show()\n\ny_cos = np.cos(x_value)\n# plt.plot(x_value, y_cos)\n# plt.show()\n\ny_tan = np.tan(x_value)\n# plt.plot(x_value,y_tan)\n# plt.show()\n\nrandom_arr = np.random.random((3,3))\n# print(random_arr)\n\nrandint_arr = np.random.randint(1,100, (5,4))\n# print(randint_arr)\n\nx = [1,2,3,4,5,6]\nchoice_arr = np.random.choice(x)\n# print(choice_arr)\n\npermutation_arr = np.random.permutation(x)\n# print(permutation_arr)\n\n# if we want to print same number than we'll use seed()\n\nnp.random.seed(10)\nx = [1,2,3,4,5,6]\nchoice_arr = np.random.choice(x)\n# print(choice_arr)\n\nperson_name = 'Ali Rehan Codes'\nstr1 = 'Hello'\n\n# print(np.char.add(person_name, str1))\n# print(np.char.center(str1, 60, fillchar=\"*\"))\n\nnp.char.lower(str1)\nnp.char.upper(str1)\nnp.char.title(str1)\nnp.char.split(person_name)\n\nstr2 = 'dmy'\n# print(np.char.join(':', str2))\n\n","repo_name":"AliRehanCodes/Python-NumPy","sub_path":"NumPy.py","file_name":"NumPy.py","file_ext":"py","file_size_in_byte":2415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"31799312086","text":"import re\nimport json\nimport pickle\n\n#NOTE: Not very readable for the basic people U_U\n\n# Save python objects in binary format\n# !IMPORTANT: 'movies' can relate to series as well... That's IMDB folks!\n# - obj/acts.pkl: Dict where key is an actor, and the values the movies\n# - obj/directs.pkl: Dict where key is director, and the values the movies\n# - obj/movies_actors.pkl: All actors which participated in the movies from acts AND directs\n# - obj/movies_directors.pkl: All directors which participated in the movies from acts AND directs\ndef save_obj(obj, name):\n with open('obj/'+ name + '.pkl', 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\ndef load_obj(name):\n with open('obj/' + name + '.pkl', 'rb') as f:\n return pickle.load(f)\n\nclass Act(object):\n def wsearch(w):\n return re.compile(r'\\b({0})\\b'.format(w), flags=re.IGNORECASE).search\n\n def __init__(self, first, last):\n self.f = first\n self.l = last\n\n def regexp(self):\n return (Act.wsearch(self.f), Act.wsearch(self.l))\n\n def __repr__(self):\n return f\"f: {self.f} l: {self.l}\"\n\n## Finding Actors Information!\n#act_n = [\"Uma Thurman\", \"Harvey Keitel\", \"Bill Murray\", \"Frances McDormand\"]\n#gen_st = \"acts\"\nact_n = [\"Quentin Tarantino\", \"Wes Anderson\"]\ngen_st = \"directs\"\n#act_man_f = \"actors.list\"\n#act_woman_f = \"actresses.list\"\nact_man_f = \"directors.list\"\nact_woman_f = \"t\"\nact_l = [(lambda n: Act(n[0], n[1]))([t.lower() for t in s.split()]) for s in act_n]\nact_d = {}\nmov_d = {}\n\ndef st_op(t):\n s = t\n p = s.find(\"(\")\n count = 0\n while p < len(s) and p + 1 < len(s) and not s[p+1].isdigit() and not s[p+1] == '?':\n tlist = list(s)\n tlist[p] = '_'\n s = \"\".join(tlist)\n p = s.find(\"(\")\n count += 1\n if count > 20:\n return len(t)\n return p\n\ndef find_person_movie(l, f):\n if l and l[0] != '\\t':\n for i, a in enumerate(act_l):\n if all(x(l[:l.find('\\t')].replace(\",\",\"\")) for x in a.regexp()) and not act_d.get(act_n[i], None):\n cs = [l[l.find('\\t'):st_op(l)].strip().replace(\"\\t\",\"\").replace(\"\\n\",\"\")]\n l = f.readline()\n while l and l[0] == '\\t':\n mv = l.strip().replace(\"\\t\", \"\").replace(\"\\n\",\"\")\n cs.append(mv[:st_op(mv)].strip())\n l = f.readline()\n act_d[act_n[i]] = cs\n return l\n\ndef act_d_builder(l, f):\n if l and l[0] != '\\t':\n print(l[:l.find('\\t')].strip())\n if not act_d.get(l[:l.find('\\t')].strip(), None):\n idx = l[:l.find('\\t')].strip()\n if not list(filter(None, idx)):\n return\n cs = [l[l.find('\\t'):st_op(l)].strip().replace(\"\\t\",\"\").replace(\"\\n\",\"\")]\n l = f.readline()\n while l and l[0] == '\\t':\n mv = l.strip().replace(\"\\t\", \"\").replace(\"\\n\",\"\")\n cs.append(mv[:st_op(mv)])\n l = f.readline()\n act_d[idx] = cs\n return l\n\ndef mov_actors_builder(l, f, movd):\n if l and l[0] != '\\t':\n idx = l[:l.find('\\t')].strip()\n if not list(filter(None, idx)):\n return\n cs = [l[l.find('\\t'):st_op(l)].strip().replace(\"\\t\",\"\").replace(\"\\n\",\"\")]\n l = f.readline()\n while l and l[0] == '\\t':\n mv = l.strip().replace(\"\\t\", \"\").replace(\"\\n\",\"\")\n mv = mv[:st_op(mv)].strip()\n if mv in movd:\n if not mov_d.get(mv, None):\n mov_d[mv] = set()\n mov_d[mv].add(idx)\n else:\n mov_d[mv].add(idx)\n if(mv == \"Plain Pleasures\"):\n print(f\"Adicionado em {mv} = {idx}\")\n l = f.readline()\n return l\n\ndef build_movieDict():\n actd = load_obj(\"acts\")\n drcd = load_obj(\"directs\")\n movd = set()\n for block in list(actd.values()) + list(drcd.values()):\n for mov in block:\n movd.add(mov)\n return movd\n\ndef print_dict(name):\n \"DON'T INCLUDE THE .pkl IN THE 'name' ARG!!\"\n t = load_obj(name)\n for k in t.keys():\n print(k)\n for m in t[k]:\n print(f\"\\t\\t{m}\")\n print(\"\")\n\ndef main():\n print_dict(\"directs\")\n exit()\n movd = build_movieDict()\n with open(act_man_f, \"r\", encoding='latin-1') as m, open(act_woman_f, \"r\", encoding='latin-1') as w:\n c = 0\n lm = m.readline()\n lw = w.readline()\n while lm or lw:\n lm, lw = find_person_movie(lm, m), find_person_movie(lw, w)\n lm, lw = m.readline(), w.readline()\n if c%50000 == 0:\n print(f\"{c}º iteration...\")\n #input(\"continua...\")\n c+=1\n\n #save_obj(act_d,\"directs\")\nif __name__ == '__main__':\n main()\n","repo_name":"robotenique/movies-ontology","sub_path":"imdbAnalyzer.py","file_name":"imdbAnalyzer.py","file_ext":"py","file_size_in_byte":4868,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"60"} +{"seq_id":"21401711484","text":"with open('./day1_input.txt', 'r') as file:\r\n\tdata = []\r\n\tfor line in file:\r\n\t\t\tif line[0] == '+':\r\n\t\t\t\tdata.append(int(line[1:]))\r\n\t\t\tif line[0] == '-':\r\n\t\t\t\tdata.append(-int(line[1:]))\r\n\tfreq = 0\r\n\tseen_freqs = [0]\r\n\titteration = 0\r\n\twhile True:\r\n\t\t#print('Itteration: ' + str(itteration))\r\n\t\tif(itteration%1 == 0):\r\n\t\t\tprint(str(itteration) + ', ' + str(len(seen_freqs)))\r\n\t\tfor change in data:\r\n\t\t\tfreq += change\r\n\r\n\t\t\tif freq in seen_freqs:\r\n\t\t\t\tprint('Repeated frequency: ' + str(freq))\r\n\t\t\t\tquit()\r\n\t\t\telse:\r\n\t\t\t\tseen_freqs.append(freq)\r\n\t\t\t\t#print(len(seen_freqs))\r\n\r\n\t\titteration += 1\r\n\r\n\t\tprint(freq)\r\n\t\t#print(seen_freqs)\r\n","repo_name":"Emil-IT/AoC18","sub_path":"day1.py","file_name":"day1.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"28464629212","text":"import time\nfrom dotenv import dotenv_values\nfrom autockt_client import (\n start as start_client,\n Config,\n)\nfrom autockt_shared import auto_ckt_sim_hdl21, OpAmpInput, auto_ckt_sim\n\nENABLE_HTTPS = True\n\n\ndef main():\n \"\"\"\n Not picked up by pytest\n \"\"\"\n\n # Load the .env file\n env = dotenv_values()\n\n # And get the server URL\n THE_SERVER_URL = env.get(\"THE_SERVER_URL\", None)\n if not THE_SERVER_URL:\n raise ValueError(\"THE_SERVER_URL not set in .env file\")\n cfg = Config(server_url=\"34.83.44.225\", enable_https=ENABLE_HTTPS)\n\n start_client(cfg)\n\n to_test = OpAmpInput(3, 3, 3, 3, 3, 3, 1e-12)\n\n start_time = time.time()\n auto_ckt_sim(to_test)\n end_time = time.time()\n print(\"total time (auto_ckt_sim): \" + str(end_time - start_time))\n\n start_time = time.time()\n auto_ckt_sim_hdl21(to_test)\n end_time = time.time()\n print(\"total time (auto_ckt_sim_hdl21): \" + str(end_time - start_time))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"BWRC-AMS-ML-Discovery/BwrcAmsMlDiscovery","sub_path":"scripts/speed_benchmark.py","file_name":"speed_benchmark.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"86272167792","text":"import tenpy.utility.operators as op\r\nimport math\r\n\r\nclass CreateVector:\r\n\r\n def __init__(self, info = []):\r\n self.info = info\r\n\r\n########################################################################################################################\r\n### VECTOR PROPERTIES\r\n########################################################################################################################\r\n\r\n #Returns value of vector as an array\r\n def val(self):\r\n return self.info\r\n\r\n #Returns magnitude of vector\r\n def mag(self):\r\n mag = 0\r\n for i in range(len(self.info)):\r\n mag += math.pow(self.info[i], len(self.info))\r\n return math.sqrt(mag)\r\n\r\n #Returns angle of vector\r\n def angle(self):\r\n self.x = self.info[0]\r\n self.y = self.info[1]\r\n if len(self.info) > 2:\r\n self.z = self.info[2]\r\n\r\n if len(self.info) == 2:\r\n if (self.x < 0) and (self.y > 0):\r\n return math.pi + math.atan(self.y/self.x)\r\n elif (self.x < 0) and (self.y < 0):\r\n return math.pi - math.atan(self.y/self.x)\r\n elif (self.x > 0) and (self.y < 0):\r\n return 2*math.pi + math.atan(self.y/self.x)\r\n elif (self.x > 0) and (self.y > 0):\r\n return math.atan(self.y/self.x)\r\n elif (self.x > 0) and (self.y == 0):\r\n return 0.0\r\n elif (self.x < 0) and (self.y == 0):\r\n return math.pi\r\n elif (self.x == 0) and (self.y > 0):\r\n return math.pi/2\r\n elif (self.x == 0) and (self.y < 0):\r\n return 3*math.pi/2\r\n else:\r\n return math.atan(self.y/self.x)\r\n elif len(self.info) == 3:\r\n pass\r\n else:\r\n return \"Error: Tenpy does not support non-two dimensional vector angles\"\r\n\r\n def angle_between(self, other):\r\n angle = math.acos(self.dot_prod(other)/(self.mag()*other.mag()))\r\n return angle\r\n\r\n########################################################################################################################\r\n### GENERAL VECTOR EVALUATION\r\n########################################################################################################################\r\n\r\n # Operates on vectors by element\r\n def element_eval(self, other=[], sign=\"+\"):\r\n if len(self.info) == len(other.info):\r\n result = []\r\n for i in range(len(self.info)):\r\n result.append(op.get_operator_fn(sign)(self.info[i], other.info[i]))\r\n return CreateVector(result)\r\n else:\r\n return \"Vector Not Operatable\"\r\n\r\n########################################################################################################################\r\n### VECTOR MULTIPLICATION\r\n########################################################################################################################\r\n\r\n def dot_prod(self, other):\r\n prod = 0\r\n if len(self.info) == len(other.info):\r\n for i in range(len(self.info)):\r\n prod += self.info[i] * other.info[i]\r\n else:\r\n return \"Error: Vector Length Mismatch\"\r\n return prod\r\n\r\n def cross_prod(self, other):\r\n prod = []\r\n\r\n a = self.info\r\n b = other.info\r\n if len(self.info) == len(other.info):\r\n if len(self.info) == 3 and len(other.info) == 3:\r\n #angle = math.acos(self.dot_prod(other)/self.mag()*other.mag())\r\n prod.append(a[1]*b[2]-a[2]*b[1])\r\n prod.append(a[2]*b[0]-a[0]*b[2])\r\n prod.append(a[0]*b[1]-a[1]*b[0])\r\n else:\r\n return \"Sorry, cross product is only applicable for 3d vectors\"\r\n else:\r\n return \"Error: Vector Length Mismatch\"\r\n return CreateVector(prod)\r\n\r\n########################################################################################################################\r\n### VECTOR RELATIONS\r\n########################################################################################################################\r\n\r\n def is_parallel(self, other):\r\n tolerance = 2.0e-5\r\n if (self.angle_between(other) < 0.0+tolerance):\r\n print(self.angle_between(other))\r\n return True\r\n else:\r\n return False\r\n\r\n def is_orthogonal(self, other):\r\n if (self.dot_prod(other) == 0):\r\n return True\r\n else:\r\n return False","repo_name":"SoftLocked/Tenpy","sub_path":"tenpy/vector.py","file_name":"vector.py","file_ext":"py","file_size_in_byte":4548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"19892667142","text":"class Solution(object):\n def hammingWeight(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n count = 0\n flag = 1\n\n while flag <= n:\n if n & flag:\n count += 1\n flag = flag << 1\n print(flag)\n print(count)\n\n return count\n\n def hammingWeight2(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n count = 0\n\n while n:\n count += 1\n n = (n - 1) & n\n\n return count\n\n\ns = Solution()\ns.hammingWeight(0o0000000000000000000000000001011)\n","repo_name":"Dooooooooo21/leetcode","sub_path":"offer/15.py","file_name":"15.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"41382942805","text":"import sys\nfrom PIL import Image\n\n# check for args\nif len(sys.argv) < 7:\n print(' ')\n print('Incorrect arguments provided.')\n exit()\n\n# get original\nimage_original = Image.open( sys.argv[1] )\n# make sure its rgba\nimage_original = image_original.convert('RGBA')\n\n# calc crop args\nx_start = int( sys.argv[3] )\ny_start = int( sys.argv[4] )\nx_end = int( sys.argv[3] ) + int( sys.argv[5] )\ny_end = int( sys.argv[4] ) + int( sys.argv[6] )\n\n# crop\nimage_crop = image_original.crop( ( x_start, y_start, x_end, y_end ) )\n\n# save crop\nimage_crop.save( sys.argv[2] )\nprint(' ')\nprint(f\"Cropped image stored as {sys.argv[2]}\")\n\n","repo_name":"Brugman/dgg-place-image-tools","sub_path":"crop.py","file_name":"crop.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"38762629787","text":"# indexation : 0 = lieu vide, 1 = arbre, 2 = arbre en feu : rajouter un état final \"brûlé\" ? (totalement optionnel ceci dit)\n# manque de jolies couleurs QQ (trouver LA colormap des familles) (ou coloriser tout ça après coup aussi ça marche bien)\n\n# cf. propagation directionnelle pour les bails ajoutés (vent selon une direction, humidité, phénomène d'étincelle)\n\nfrom matplotlib.pyplot import matshow\nimport matplotlib as mpl\nimport matplotlib.pylab as plt\nimport matplotlib.animation as animation\nimport random\nimport numpy as np\n\n# 1) CREATION DE LA MATRICE ASSOCIEE A LA FORET\n \ndef bool_p(p):\n # renvoie True avec une probabilite p et False avec une probabilité 1-p\n return random.random() <= p\n \ndef matgen(n,m,d):\n # cree une forest de dimensions n*m avec des arbres places aléatoirements à une densité d (facteur de percolation par site ici)\n forest = np.zeros((n,m))\n for i in range(n):\n for j in range(m):\n if bool_p(d):\n forest[i,j] = 1.\n else:\n forest[i,j] = 0.\n return forest\n \n \n# 2) FONCTIONS UTILES\n\np_l = 1. # proba de percolation par lien, modifiée par l'utilisateur en cas de besoin\nw_dir = 'null' # direction éventuelle du vent\n \n\"\"\" \ndef burn_spot_R(forest): # nécessite d'être fix... pb de float ?\n # met le feu à un arbre aléatoirement\n n,m = forest.shape\n x=random.randint(0,n)\n y=random.randint(0,m)\n while(forest[x,y] != 1.):\n x=random.randint(0,n)\n y=random.randint(0,m)\n forest[x,y] = 2.F\n return(forest) \"\"\"\n\ndef start_R(forest): # alternative, renvoie un couple (i,j) tel que la case soit verte (au hasard)\n n,m = forest.shape\n x=random.randint(0,n-1)\n y=random.randint(0,m-1)\n while(forest[x,y] != 1.):\n x=random.randint(0,n-1)\n y=random.randint(0,m-1)\n return(x,y)\n\ndef burn_spot(forest,i,j):\n # met le feu à l'arbre en position (i,j)\n if forest[i,j] == 1.:\n forest[i,j] = 2. # on fout le feu la case d'indice (i,j)\n return forest\n \n\"\"\" def neighbors_from(x,y,forest): # renvoie un voisinage à 4 ou 8 cases dans la mesure du possible, du lieu en question\n # n,m = forest.shape\n # return [(x, y + 1 if y + 1 < SIZE else 0), (x, y - 1), (x + 1 if x + 1 < SIZE else 0, y),(x - 1, y)]\n \"\"\"\n \ndef nextToFire(forest,i,j):\n # existence d'un arbre en feu au voisinage de l'arbre (i,j)\n # tenter d'utiliser neighbors_from plutôt ? revoir le voisinage\n \n n,m=forest.shape\n if forest[i,j] == 1.:\n if (i > 0 and forest[i - 1,j] == 2.):\n return True\n if (i < n - 1 and forest[i + 1,j] == 2.):\n return True\n if (j > 0 and forest[i,j - 1] == 2.):\n return True\n if (j < m - 1 and forest[i,j + 1] == 2.):\n return True\n return False\n \n\"\"\" Autre procédé de vérification pour les alentours...\n\n if forest[i,j] == 1.:\n for y in range(max(0,i-1),min(n,i+2)):\n if forest[y,j] == 2.:\n return True\n for x in range(max(0,j-1),min(m,j+2)):\n if forest[i,x] == 2.:\n return True\n return False\n \n\"\"\"\n \ndef propagateFire(forest):\n \"\"\"les arbres qui peuvent bruler autour d'un arbre en feu prennent feu\n rq. : on se place dans un cadre de percolation par site, la probabilité variante est celle de densité de placement, pas celle d'ouverture des liens dans L^d...\n ainsi, un arbre à proximité du feu prend systématiquement feu mskn\n pour tenir compte du phénomène de percolation par lien, implémenter une probabilité que le lien entre l'arbre en question et le voisin soit ouvert \"\"\"\n \n n,m=forest.shape # en pratique toujours une grille carrée pour simplifier les choses\n for i in range(n): # densité brute et méchante ici (percolation par site)\n for j in range(m):\n if nextToFire(forest,i,j):\n if random.random() <= p_l: # intervention des liens du graphe ouverts / fermés ici \n forest[i,j] = 2.\n return forest\n \ndef stillOnFire(forest):\n # vérifie s'il existe encore un arbre susceptible de cramer\n n,m=forest.shape\n for i in range(n):\n for j in range(m):\n if nextToFire(forest,i,j):\n return True\n return False\n \ndef burnForest(forest,i,j):\n # démarre le feu aux coordonnées (i,j) et propage le feu jusqu'à ce que ça ne soit plus possible\n forest = burn_spot(forest,i,j)\n while stillOnFire(forest):\n forest = propagateFire(forest)\n return forest\n\ndef count(forest,f): # compte le nombre de zones indicées f\n n,m = forest.shape\n C = 0 # compteur de zones \n for k in range(n):\n for i in range(m):\n if forest[k,i] == f:\n C+=1\n return C\n\n# 2.2) COEUR DU PROGRAMME ET STATS\n\ndef simulation(n,m,d,p,mode,w): # rajouter un argument p_l pour la percolation par lien ici (n,m = dimensions, d,p = densité et proba de lien, mode = animé ou non, w = direction du vent (ou non))\n\n p_l = p # percolation par lien définie\n w_dir = w\n forest = matgen(n,m,d) \n green = count(forest,1.) # nb d'arbres à l'état initial\n void = count(forest,0.) # nb de zones vide au départ\n \n i,j = start_R(forest)\n if (mode == 1):\n forest = animate(forest,i,j) # procedé de percolation animé\n elif (mode == 0): # pour effectuer des centaines d'essais, mieux vaux désactiver l'animation\n forest = animate_nofilm(forest,i,j) # sans image\n \n bnt = count(forest,2.) # nb d'arbres brûlés\n bntPA = bnt / (n*m) # proportion brûlé / total\n bntPR = bnt / green # proportion brûlé / nb d'arbres qu'il y avait au début mskn\n \n return([bnt,bntPA,bntPR]) # certaines expériences à d > 0.5 sont plutôt étranges...\n\n\ndef stat_density(n,m,p): # STAT : proba par sites (paramètre variant : densité d)\n d = 0.1 # d va évoluer de 0.1 à 0.95 par pas de 0.05\n result=[] # matrice qui contiendra les listes (proba, bnt, bntPA et PR)\n while (d <= 0.95):\n interm=[] # contient les résultats intermédiaires dont on va prendre la moyenne à la fin\n k = 1\n while (k <= 100): # on fait 100 essais par valeur de densité d\n interm.append(simulation(n,m,d,p,0))\n k+=1\n # ici, interm contient [[brulé à l'essai 1, proportions à l'essai 1],[brulé à l'essai 2, proportions à l'essai 2],...]\n bntM = 0\n bntPAM = 0\n bntPRM = 0\n for k in range(len(interm)):\n bntM += interm[k][0]\n bntPAM += interm[k][1]\n bntPRM += interm[k][2]\n bntM /= len(interm)\n bntPAM /= len(interm)\n bntPRM /= len(interm)\n \n result.append([d,bntM,bntPAM,bntPRM]) # result contient un tableau avec densité (variante), nb de brulés et proportions\n d += 0.05\n \n return(result)\n\n\"\"\" def stat_areasonfire(n,m,p): # STAT : en percolation par sites, évalue le nb de zones en feu en fonction du nb d'étapes\n d = 0.6 # d évolue cette fois de 0.6 à 0.9 par pas de 0.05\n result=[] # la liste va contenir cette fois le nombre d'arbres en feu à chaque étape en moyenne sur 20 essais cette fois\n while (d <= 0.95):\n interm=[]\n k = 1\n while (k <= 100):\n F = matgen(n,m,d)\n interm.append \"\"\"\n \n# 3) ANIMATION # ajouter cmap=plt.cm.nom_de_la_map pour les couleurs dans matshow\n \ndef animate(forest,i,j):\n fig = plt.figure() # nouvelle figure\n film = []\n # Initialisation\n forestOnFire = burn_spot(forest,i,j)\n film.append([matshow(forestOnFire, fignum=False, animated=True)])\n plt.draw()\n \n # Propagation\n while stillOnFire(forest):\n forestOnFire = propagateFire(burn_spot(forest,i,j))\n film.append([matshow(forestOnFire, fignum=False, animated=True)])\n plt.draw()\n \n # Animation\n ani = animation.ArtistAnimation(fig, film, interval=100, blit=True, repeat=False)\n \n plt.draw()\n plt.show()\n \n return(forest)\n \ndef animate_nofilm(forest,i,j): # pour les stats\n forestOnFire = burn_spot(forest,i,j)\n while stillOnFire(forest):\n forestOnFire = propagateFire(burn_spot(forest,i,j))\n \n return (forest)\n \n ","repo_name":"dylanankrah/fire-prediction-TIPE","sub_path":"Code source/sauvegardes/source principale (sauvegarde, propagation isotrope).py","file_name":"source principale (sauvegarde, propagation isotrope).py","file_ext":"py","file_size_in_byte":8312,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"42587209990","text":"from abc import ABC\n\nimport torch\nimport torch.nn as nn\n\n\nclass BimodalFusion(nn.Module, ABC):\n \"\"\"Bimodal fusion combines features from different modalities into\n a single tensor.\n\n The input modalities' feature tensors are expected to have matching\n sizes [N x C_1] and [N x C_2]. For residual fusion, we further\n require C_1 = C_2.\n\n By convention, the second features are fused into the first, main\n modality. This matters as the output format will match that of the\n main modality\n \"\"\"\n\n MODES = ['residual', 'concatenation', 'both', 'modality']\n\n def __init__(self, mode='residual', **kwargs):\n super(BimodalFusion, self).__init__()\n self.mode = mode\n if self.mode == 'residual':\n self.f = lambda a, b: a + b\n elif self.mode == 'concatenation':\n self.f = lambda a, b: torch.cat((a, b), dim=-1)\n elif self.mode == 'both':\n self.f = lambda a, b: torch.cat((a, a + b), dim=-1)\n elif self.mode == 'modality':\n self.f = lambda a, b: b\n else:\n raise NotImplementedError(\n f\"Unknown fusion mode='{mode}'. Please choose among \"\n f\"supported modes: {self.MODES}.\")\n\n def forward(self, x_main, x_mod):\n if x_main is None:\n return x_mod\n if x_mod is None:\n return x_main\n\n # If the x_mod is a sparse tensor, we only keep its features\n x_mod = x_mod if isinstance(x_mod, torch.Tensor) else x_mod.F\n\n # Update the x_main while respecting its format\n x_main = self.f(x_main, x_mod)\n\n return x_main\n\n def extra_repr(self) -> str:\n return f\"mode={self.mode}\"\n","repo_name":"drprojects/DeepViewAgg","sub_path":"torch_points3d/modules/multimodal/fusion.py","file_name":"fusion.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","stars":205,"dataset":"github-code","pt":"60"} +{"seq_id":"31436347534","text":"import pyopencl as cl\nimport pyopencl.array as cl_array\nfrom pyopencl.tools import SVMAllocator, SVMPool\nimport numpy as np\n\nn = 50000\na = np.random.rand(n).astype(np.float32)\nb = np.random.rand(n).astype(np.float32)\n\n\nctx = cl.create_some_context()\nqueue = cl.CommandQueue(ctx)\n\nalloc = SVMAllocator(ctx, alignment=0, queue=queue)\nalloc = SVMPool(alloc)\n\na_dev = cl_array.to_device(queue, a, allocator=alloc)\nb_dev = cl_array.to_device(queue, b, allocator=alloc)\ndest_dev = cl_array.empty_like(a_dev)\n\nprg = cl.Program(ctx, \"\"\"\n __kernel void sum(__global const float *a,\n __global const float *b, __global float *c)\n {\n int gid = get_global_id(0);\n c[gid] = a[gid] + b[gid];\n }\n \"\"\").build()\n\nknl = prg.sum\nknl(queue, a.shape, None, a_dev.data, b_dev.data, dest_dev.data)\n\nnp.testing.assert_allclose(dest_dev.get(), (a_dev+b_dev).get())\n","repo_name":"inducer/pyopencl","sub_path":"examples/demo_array_svm.py","file_name":"demo_array_svm.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":998,"dataset":"github-code","pt":"60"} +{"seq_id":"28959390151","text":"import numpy as np\nimport ADmetrics as adm\nimport tensorflow as tf\nfrom sklearn import metrics\nimport tensorflow_probability as tfp\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.datasets import make_classification\nfrom sklearn.model_selection import train_test_split\n\ntfd = tfp.distributions\ntfpl = tfp.layers\ntfk = tf.keras\ntfkl = tf.keras.layers\n\ntf.keras.backend.set_floatx('float64')\n\n\nclass MargMLVIEstimation(tf.keras.Model):\n\n \"\"\" Marginalized Maximum Likelihood Estimation using Black-Box Variational Inference (Bayes) \"\"\"\n\n def __init__(self, n_units, n_features, n_classes, dense_layer_type, name='theta', **kwargs):\n super(MargMLVIEstimation, self).__init__(name=name, **kwargs)\n self.n_units = n_units\n self.n_classes = n_classes\n self.n_features = n_features\n self.dense_layer_type = dense_layer_type\n\n c = np.log(np.expm1(1.))\n scale = 1e-5 + tf.nn.softplus(c)\n\n if self.dense_layer_type.lower() == \"pw-re\".lower() \\\n or self.dense_layer_type.lower() == \"pw-li\".lower():\n if self.dense_layer_type.lower() == \"pw-re\".lower():\n self.dense_1 = tfpl.DenseReparameterization(self.n_features, activation=tf.nn.relu,\n trainable=True, name='pw_dense_1',)\n else:\n self.dense_1 = tfpl.DenseReparameterization(self.n_features, activation=None,\n trainable=True, name='pw_dense_1',)\n # In order to be able to compute the log likelihood\n # a distribution is needed, and it is instantiated as follow:\n self.pz_x = tfpl.DistributionLambda(lambda t: tfd.Normal(loc=t, scale=scale,),\n trainable=True, name='pz_x')\n self.py_x_z = tfpl.DenseReparameterization(self.n_classes,\n activation=None, name='py_x_z')\n\n elif self.dense_layer_type.lower() == \"fo-re\".lower()\\\n or self.dense_layer_type.lower() == \"fo-li\".lower():\n if self.dense_layer_type.lower() == \"fo-re\".lower():\n # With Relu activation\n self.dense_1 = tfpl.DenseFlipout(self.n_features, activation=tf.nn.relu,\n name='fl_dense_1')\n else:\n # With Linear activation\n self.dense_1 = tfpl.DenseFlipout(self.n_features, activation=None,\n name='fl_dense_1')\n\n # In order to be able to compute the log likelihood\n # a distribution is needed, and it is instantiated as follow:\n self.pz_x = tfpl.DistributionLambda(lambda t: tfd.Normal(loc=t, scale=scale, ),\n trainable=True, name='pz_x')\n self.py_x_z = tfpl.DenseFlipout(self.n_classes, activation=None,\n name='py_x_z')\n\n def call(self, inputs, training=False):\n x = self.dense_1(inputs, training=training)\n if training:\n pz_x = self.pz_x(x, training=training)\n py_x_z_logits = self.py_x_z(x, training=training)\n return pz_x, py_x_z_logits\n # py_x_z_logits\n return self.py_x_z(x, training=training)\n\n\ndef apply_mmle(n_units, n_features, n_classes, n_epochs, batch_size, learning_rate,\n dense_layer_type='path_wise', x_train=None, y_train=None, x_val=None,\n y_val=None, x_test=None, y_test=None, verbose=False):\n\n if x_train is not None:\n train_dataset = tf.data.Dataset.from_tensor_slices(\n (x_train, y_train)).batch(batch_size)\n else:\n print(\"No training set is provided!\")\n return None\n\n if x_val is not None:\n valid_dataset = tf.data.Dataset.from_tensor_slices(\n (x_val, y_val)).batch(batch_size)\n\n if x_test is not None:\n test_dataset = tf.data.Dataset.from_tensor_slices(\n (x_test, y_test)).batch(batch_size)\n\n # metrics for monitoring the training and validation procedure\n train_loss = tf.keras.metrics.Mean(name='train_loss')\n train_accuracy = tf.keras.metrics.BinaryAccuracy(name='train_accuracy')\n\n test_loss = tf.keras.metrics.Mean(name='test_loss')\n test_accuracy = tf.keras.metrics.BinaryAccuracy(name='test_accuracy')\n\n model_adv = MargMLVIEstimation(n_units=n_units, n_features=n_features,\n n_classes=n_classes,\n dense_layer_type=dense_layer_type)\n\n optimizer = tf.optimizers.Adam(learning_rate=learning_rate)\n\n @tf.function\n def training_step(x_batch, y_batch, model):\n with tf.GradientTape() as tape:\n\n dist_pz_x, py_x_z_logits = model(x_batch, training=True)\n\n neg_log_lik_pz_x = -tf.reduce_sum(\n dist_pz_x.log_prob(x_batch))\n\n neg_log_lik_py_x_z = tf.nn.softmax_cross_entropy_with_logits(\n labels=y_batch, logits=py_x_z_logits)\n\n neg_log_lik_py_x_z_cls = -tf.reduce_sum(\n tf.nn.softmax_cross_entropy_with_logits(labels=y_batch, logits=py_x_z_logits))\n kl_loss = sum(model.losses) # /(x_train.shape[0])\n\n total_loss = tf.math.multiply(neg_log_lik_pz_x, neg_log_lik_py_x_z) + neg_log_lik_py_x_z_cls # + kl_loss\n\n gradients = tape.gradient(total_loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n\n _ = train_loss.update_state(total_loss)\n predictions = tf.nn.softmax(py_x_z_logits)\n _ = train_accuracy.update_state(y_batch, predictions)\n\n @tf.function\n def testing_step(x_batch_t, y_batch_t, model):\n dist_pz_x_t, py_x_z_logits_t = model(x_batch_t, training=True)\n neg_log_lik_pz_x_t = -tf.reduce_sum(dist_pz_x_t.log_prob(x_batch_t))\n neg_log_lik_py_x_z_t = tf.nn.softmax_cross_entropy_with_logits(labels=y_batch_t, logits=py_x_z_logits_t)\n neg_log_lik_py_x_z_cls_t = -tf.reduce_sum(\n tf.nn.softmax_cross_entropy_with_logits(labels=y_batch_t, logits=py_x_z_logits_t))\n kl_loss = sum(model.losses) # /(x_train.shape[0])\n\n total_loss_t = tf.math.multiply(neg_log_lik_pz_x_t, neg_log_lik_py_x_z_t) + neg_log_lik_py_x_z_cls_t\n\n _ = test_loss.update_state(total_loss_t)\n predictions_t = tf.nn.softmax(py_x_z_logits_t)\n _ = test_accuracy.update_state(y_batch_t, predictions_t)\n\n for epoch in range(n_epochs):\n\n # Reset the metrics at the start of the next epoch\n train_loss.reset_states()\n train_accuracy.reset_states()\n test_loss.reset_states()\n test_accuracy.reset_states()\n\n for step, (x_batch, y_batch) in enumerate(train_dataset):\n training_step(x_batch=x_batch, y_batch=y_batch, model=model_adv,)\n\n if x_val is not None:\n for step, (x_batch_v, y_batch_v) in enumerate(valid_dataset):\n testing_step(x_batch_t=x_batch_v, y_batch_t=y_batch_v, model=model_adv, )\n\n if x_test is not None:\n for step, (x_batch_t, y_batch_t) in enumerate(test_dataset):\n testing_step(x_batch_t=x_batch_t, y_batch_t=y_batch_t, model=model_adv, )\n\n # Log every 100 batches.\n if step % 100 == 0 and verbose:\n template = 'Epoch {}, Train Loss: {}, Train Accuracy: {}, Valid Loss: {}, Valid Accuracy: {}'\n print(template.format(epoch + 1,\n train_loss.result(),\n train_accuracy.result(),\n test_loss.result(),\n test_accuracy.result()))\n return model_adv\n\n\nif __name__ == '__main__':\n\n n_units = 10\n n_epochs = 500\n n_classes = 2\n n_features = 10\n batch_size = 100\n n_samples = 1000 # 100\n learning_rate = 1e-2\n\n inside_call = True\n dense_layer_type = 'path_wise'\n\n # Generate synthetic data / load data sets\n x_in, y_in = make_classification(n_samples=n_samples, n_features=n_features, n_informative=n_classes, n_redundant=0,\n n_repeated=0, n_classes=n_classes, n_clusters_per_class=1,\n weights=[0.2, 0.8], flip_y=0.4, class_sep=1.0, hypercube=True,\n shift=0.0, scale=1.0, shuffle=True, random_state=42)\n print(\"y_in:\", set(y_in))\n\n # Normalizing the data points\n x_in = np.divide(x_in, np.ptp(x_in, axis=0))\n x_in = x_in.astype('float64')\n y_in = y_in.astype('float64').reshape(-1, 1)\n\n one_hot_encoder = OneHotEncoder(sparse=False)\n y_in = one_hot_encoder.fit_transform(y_in)\n y_in = y_in.astype('float64')\n\n x_train, x_test, y_train, y_test = train_test_split(x_in, y_in, test_size=0.4, random_state=42, shuffle=True)\n x_test, x_val, y_test, y_val = train_test_split(x_test, y_test, test_size=0.5, random_state=42, shuffle=True)\n\n print(\"shapes:\", x_train.shape, y_train.shape, x_test.shape, y_test.shape, x_val.shape, y_val.shape)\n\n x_min = np.min(x_train, axis=0)\n x_max = np.max(x_train, axis=0)\n x_range = x_max - x_min\n\n model_adv = apply_mmle(n_units=n_units, n_features=n_features, n_classes=n_classes,\n n_epochs=n_epochs, batch_size=batch_size, learning_rate=learning_rate,\n dense_layer_type=dense_layer_type, x_train=x_train, y_train=y_train,\n x_val=x_val, y_val=y_val, x_test=None, y_test=None, verbose=False)\n\n py_x_z_logits = model_adv(x_test, training=False)\n py_x_z_probs = tf.nn.softmax(py_x_z_logits)\n labels_pred = tf.argmax(py_x_z_probs, axis=1)\n labels_true = one_hot_encoder.inverse_transform(y_test)\n\n if inside_call:\n adm.plot_roc_auv_curve_of_an_algorithm(alg_ms=labels_pred, gt_ms=labels_true,\n alg_probs=py_x_z_probs, gt_ms_onehot=y_test,\n data_name='make_cls', alg_name='fpvi-pw',\n name_of_auc_roc_fig=dense_layer_type, sample_weight=None, case=0)\n prf = metrics.precision_recall_fscore_support(y_true=labels_true, y_pred=labels_pred, average='weighted')\n print(\"PRF:\", prf)\n","repo_name":"Sorooshi/MMLE-by-BBVI","sub_path":"codes/marginalized_maximum_likelihood_estimation_by_bbvi.py","file_name":"marginalized_maximum_likelihood_estimation_by_bbvi.py","file_ext":"py","file_size_in_byte":10417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"33098377646","text":"import argparse\nfrom fabrics import create_frame_reader, create_output_saver, \\\n create_detector, create_tracker\nfrom full_video_detector import FullVideoDetector\nfrom stepwise_video_detector import StepwiseVideoDetector\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n # Video to detect and track vehicles\n parser.add_argument('-q', '--frame_sequence', action = 'store_true',\n help = 'flag that allows to process image sequence')\n parser.add_argument('-v', '--video', help = 'video to detect vehicles or \\\n directory name containing image sequence')\n \n # Video-based detection algorithm\n parser.add_argument('-a', '--algorithm', default = 'FD', help = 'type \\\n of video-based detection algorithm (\\'FD\\' - full detection \\\n frame-by-frame, FDT - full detection + creating tracks)')\n \n # Detector parameters\n parser.add_argument('-d', '--detector', default = 'OpenCV',\n help = 'detector name (\\'OpenCV\\' mode supports DNN-based detectors)')\n #default = 'caffe'\n parser.add_argument('-f', '--framework', default = 'darknet',\n help = 'deep learning framework (\\'caffe\\', \\'darknet\\') \\\n are supported')\n parser.add_argument('-l', '--labels', default = '../tests/voc_classes.txt',\n help = 'file containing object classes for object detection \\\n in format \\' \\'')\n #default = '../tests/resnet50_rfcn_final.caffemodel',\n parser.add_argument('-w', '--weights',\n default = '../tests/yolo-voc.weights',\n help = 'model trained to detect objects')\n #default = '../tests/rfcn_pascal_voc_resnet50.prototxt',\n parser.add_argument('-p', '--representation',\n default = '../tests/yolo-voc.cfg',\n help = 'model description')\n #default = '102.9801 115.9465 122.7717',\n parser.add_argument('-m', '--mean', default = '0 0 0',\n help = 'mean intensity value')\n #default = 800,\n parser.add_argument('-c', '--cols', default = 416,\n help = 'input width (cols)')\n #default = 600,\n parser.add_argument('-r', '--rows', default = 416,\n help = 'input height (rows)')\n #default = 1.0,\n parser.add_argument('-s', '--scale_factor', default = 0.00392,\n help = 'scale factor for the input blob')\n #default = 'bgr',\n parser.add_argument('-bgr', default = 'rgb',\n help = 'flag to set the sequence of channels (\\'bgr\\' or \\'rgb\\')')\n parser.add_argument('-e', '--confidence_threshold', default = 0.5,\n help = 'confidence threshold')\n \n # Tracker parameters\n parser.add_argument('-t', '--tracker', default = None, help = 'tracker \\\n name supported by OpenCV (\\'BOOSTING\\', \\'MIL\\', \\'KCF\\', \\'TLD\\', \\\n \\'MEDIANFLOW\\', \\'GOTURN\\', \\'MOSSE\\', \\'CSRT\\')')\n\n # Options\n parser.add_argument('-so', '--std_output', action = 'store_true',\n help = 'redirect output information about detected objects \\\n to the standard output')\n parser.add_argument('-o', '--output', default = 'output.txt',\n help = 'output file containing list of detected vehicles')\n\n args = parser.parse_args()\n\n try:\n # Prepare video, detector and tracker\n video = create_frame_reader(args.frame_sequence, args.video)\n output_saver = create_output_saver(args.std_output, args.output)\n detector = create_detector(args.detector, args.labels, args.framework,\n args.weights, args.representation, args.mean, args.cols,\n args.rows, args.scale_factor, args.bgr, args.confidence_threshold)\n tracker = create_tracker(args.tracker)\n # Detect and track vehicles\n if (args.algorithm == 'FD'):\n video_detector = FullVideoDetector(video, detector,\n output_saver)\n elif (args.algorithm == 'FDT'):\n video_detector = StepwiseVideoDetector(video, detector, tracker,\n output_saver)\n else:\n raise ValueError('Video-based detection method {} \\\n is not supported'.format(args.algorithm))\n video_detector.process()\n except Exception as ex:\n print('ERROR: {}'.format(str(ex)))\n","repo_name":"valentina-kustikova/dnn-object-detectors-comp","sub_path":"vehicle-detector/video-detector/video_analyzer.py","file_name":"video_analyzer.py","file_ext":"py","file_size_in_byte":4170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"1569718857","text":"import frappe\nfrom frappe import _, bold\nfrom frappe.model.document import Document\n\n\nclass PartyLink(Document):\n\tdef validate(self):\n\t\tif self.primary_role not in [\"Customer\", \"Supplier\"]:\n\t\t\tfrappe.throw(\n\t\t\t\t_(\n\t\t\t\t\t\"Allowed primary roles are 'Customer' and 'Supplier'. Please select one of these roles only.\"\n\t\t\t\t),\n\t\t\t\ttitle=_(\"Invalid Primary Role\"),\n\t\t\t)\n\n\t\texisting_party_link = frappe.get_all(\n\t\t\t\"Party Link\",\n\t\t\t{\"primary_party\": self.primary_party, \"secondary_party\": self.secondary_party},\n\t\t\tpluck=\"primary_role\",\n\t\t)\n\t\tif existing_party_link:\n\t\t\tfrappe.throw(\n\t\t\t\t_(\"{} {} is already linked with {} {}\").format(\n\t\t\t\t\tself.primary_role, bold(self.primary_party), self.secondary_role, bold(self.secondary_party)\n\t\t\t\t)\n\t\t\t)\n\n\t\texisting_party_link = frappe.get_all(\n\t\t\t\"Party Link\", {\"primary_party\": self.secondary_party}, pluck=\"primary_role\"\n\t\t)\n\t\tif existing_party_link:\n\t\t\tfrappe.throw(\n\t\t\t\t_(\"{} {} is already linked with another {}\").format(\n\t\t\t\t\tself.secondary_role, self.secondary_party, existing_party_link[0]\n\t\t\t\t)\n\t\t\t)\n\n\t\texisting_party_link = frappe.get_all(\n\t\t\t\"Party Link\", {\"secondary_party\": self.primary_party}, pluck=\"primary_role\"\n\t\t)\n\t\tif existing_party_link:\n\t\t\tfrappe.throw(\n\t\t\t\t_(\"{} {} is already linked with another {}\").format(\n\t\t\t\t\tself.primary_role, self.primary_party, existing_party_link[0]\n\t\t\t\t)\n\t\t\t)\n\n\n@frappe.whitelist()\ndef create_party_link(primary_role, primary_party, secondary_party):\n\tparty_link = frappe.new_doc(\"Party Link\")\n\tparty_link.primary_role = primary_role\n\tparty_link.primary_party = primary_party\n\tparty_link.secondary_role = \"Customer\" if primary_role == \"Supplier\" else \"Supplier\"\n\tparty_link.secondary_party = secondary_party\n\n\tparty_link.save(ignore_permissions=True)\n\n\treturn party_link\n","repo_name":"frappe/erpnext","sub_path":"erpnext/accounts/doctype/party_link/party_link.py","file_name":"party_link.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","stars":15303,"dataset":"github-code","pt":"60"} +{"seq_id":"34980979528","text":"#!/usr/bin/env python3\n\nimport os\nfrom subprocess import call\nimport argparse\nimport multiprocessing\n\ndef run_unit_tests(octopus_build_dir, use_verbose_output):\n octopus_test_dir = octopus_build_dir + \"/test\"\n os.chdir(octopus_test_dir)\n ctest_options = []\n if use_verbose_output:\n ctest_options.append(\"--verbose\")\n call([\"ctest\"] + ctest_options)\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--type',\n help='C++ compiler path',\n default=\"unit\")\nparser.add_argument('--verbose',\n help='Output verbose test information',\n action='store_true')\nparser.add_argument('--compiler',\n help='C++ compiler path')\nparser.add_argument('--threads',\n help='The number of threads to use for building',\n type=int)\nargs = vars(parser.parse_args())\n\nif args[\"type\"] not in [\"unit\", \"valgrind\", \"regression\"]:\n print(\"Unknown test type \" + type)\n exit()\n\n# This file is in octopus-dir/test\noctopus_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n\nroot_cmake = octopus_dir + \"/CMakeLists.txt\"\n\nif not os.path.exists(root_cmake):\n print(\"octopus source directory corrupted: root CMakeLists.txt is missing. Please re-download source code.\")\n exit()\n\noctopus_build_dir = octopus_dir + \"/build\"\n\nif not os.path.exists(octopus_build_dir):\n print(\"octopus source directory corrupted: build directory is missing. Please re-download source code.\")\n exit()\n\nos.chdir(octopus_build_dir) # so cmake doesn't pollute root directory\n\ncmake_options = []\n\nif args[\"type\"] == \"unit\":\n cmake_options.extend([\"-DBUILD_TESTING=ON\", octopus_dir])\nelif args[\"type\"] == \"valgrind\":\n cmake_options.append(\"-DCMAKE_BUILD_TYPE=Debug\")\n\nif args[\"compiler\"]:\n cmake_options.append(\"-DCMAKE_CXX_COMPILER=\" + args[\"compiler\"])\n\nret = call([\"cmake\"] + cmake_options + [\"..\"])\n\nmake_options = []\n\nif args[\"threads\"]:\n if (args[\"threads\"] > 1):\n make_options.append(\"-j\" + str(args[\"threads\"]))\nelse:\n make_options.append(\"-j\" + str(multiprocessing.cpu_count()))\n\nif ret == 0:\n ret = call([\"make\"] + make_options)\n if ret == 0:\n if args[\"type\"] == \"unit\":\n run_unit_tests(octopus_build_dir, args[\"verbose\"])\n elif args[\"type\"] == \"valgrind\":\n call([\"make\", \"install\"])\n","repo_name":"luntergroup/octopus","sub_path":"test/install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","stars":288,"dataset":"github-code","pt":"60"} +{"seq_id":"74601972989","text":"# Desenvolva um programa que pergunte a distância de uma viagem em KM. \n# Calcule o preço da passagem, cobrando 0,50 centavos por km para viagens de até 200km e 0,45 centavos para viagens mais longas.\n\nkm = float (input('Digite quantos KM terá a sua viagem: '))\npr1 = km*0.45\npr2 = km*0.50\nif km >= 200:\n print('O preço da sua viagem é: {:.2f} reais'.format(pr2))\nelse:\n print('O preço da sua viagem é: {:.2f} reais'.format(pr1))","repo_name":"luizhmfonseca/Estudos-Python","sub_path":"EXERCÍCIOS - meus códigos/EX31.py","file_name":"EX31.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"22999297102","text":"# -- coding: UTF-8 --\n\n\"\"\"\nGiven an input dictionary in the form of a JSON how will you construct an\ninstance of a class without explicitly calling the class constructor.\n\"\"\"\n\nimport logging.config\n\n\n__author__ = 'saranya@gyandata.com'\n\nLOGGER = logging.getLogger('root')\nLOGGER_CONFIG_PATH = 'config/logging.json'\n\n\nclass JsonData:\n \"\"\" A class contains the co ordinates of a point\"\"\"\n def __init__(self, x, y):\n \"\"\"\n Constructor\n :ivar x: The x coordinate of the point\n :ivar y: The y coordinate of the point\n \"\"\"\n self.x = x\n self.y = y\n\n def __str__(self):\n return \"Value of x: %d and Value of y: %d\" % (self.x, self.y)\n","repo_name":"saranyasivam98/Classes_Objects","sub_path":"classes/without_constructor.py","file_name":"without_constructor.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"1353902684","text":"# insertion sort(挿入ソート)\n\ndef insertion_sort(a: list[int]) -> None:\n for i in range(1, len(a)):\n v: int = a[i]\n\n # 挿入する場所jを探す\n j: int = i\n while j > 0:\n if a[j - 1] > v:\n a[j] = a[j - 1] # vより大きいものは1つ後ろに移す\n else:\n break\n j -= 1\n a[j] = v\n print(a)\n\n\nif __name__ == '__main__':\n a: list[int] = [5, 9, 2, 0, 4]\n\n insertion_sort(a)\n\n print(a)\n","repo_name":"yukinakanaka/AlgorithmAndDataStructure","sub_path":"chap12/12.1.py","file_name":"12.1.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"ja","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"6645261967","text":"import config.package\nimport os\n\nclass Configure(config.package.CMakePackage):\n def __init__(self, framework):\n config.package.CMakePackage.__init__(self, framework)\n self.gitcommit = 'master'\n # using fork of Sherry's branch to work around bug in handling of BLAS, pull request made \n self.download = ['git://https://github.com/petsc/superlu']\n# self.download = ['git://https://github.com/xiaoyeli/superlu']\n# self.download = ['git://https://bitbucket.org/petsc/pkg-superlu.git',\n# 'http://ftp.mcs.anl.gov/pub/petsc/externalpackages/superlu_5.1.tar.gz']\n self.functions = ['set_default_options']\n self.includes = ['slu_ddefs.h']\n self.liblist = [['libsuperlu.a']]\n # SuperLU has NO support for 64 bit integers, use SuperLU_Dist if you need that\n self.requires32bitint = 1; # 1 means that the package will not work with 64 bit integers\n self.excludedDirs = ['SuperLU_DIST','SuperLU_MT']\n # SuperLU does not work with --download-fblaslapack with Compaqf90 compiler on windows.\n # However it should work with intel ifort.\n self.downloadonWindows= 1\n self.hastests = 1\n self.hastestsdatafiles= 1\n return\n\n def setupDependencies(self, framework):\n config.package.CMakePackage.setupDependencies(self, framework)\n self.blasLapack = self.framework.require('config.packages.BlasLapack',self)\n self.deps = [self.blasLapack]\n return\n\n def formCMakeConfigureArgs(self):\n args = config.package.CMakePackage.formCMakeConfigureArgs(self)\n args.append('-DUSE_XSDK_DEFAULTS=YES')\n\n args.append('-DTPL_BLAS_LIBRARIES=\"'+self.libraries.toString(self.blasLapack.dlib)+'\"')\n\n # Tests are broken on Apple since they depend on a shared library that is not resolved against BLAS\n args.append('-Denable_tests=0')\n # CMake in SuperLU should set this; but like many other packages it does not\n args.append('-DCMAKE_INSTALL_NAME_DIR:STRING=\"'+os.path.join(self.installDir,self.libdir)+'\"')\n return args\n\n","repo_name":"taupalosaurus/petscAdapt","sub_path":"config/BuildSystem/config/packages/SuperLU.py","file_name":"SuperLU.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"1136252163","text":"import discord\nimport random\nimport os\nfrom dotenv import load_dotenv\n\nload_dotenv(verbose=True)\n\n#디스코드 개발 토큰\ntoken = os.getenv('TOKEN')\n\n\n#msg 초기화\nmsg = None \nx = None\n\n\nclient = discord.Client()\n\n#타자연습 영어 문장들 15개\nt = [\"Experience is not what happens to a man; it is what a man does with what happens to him.\",\n \"There is no feeling, except the extremes of fear and grief, that does not find relief in music.\",\n \"I hear and I forget. I see and I remember. I do and I understand.\",\n \"If you want to see what children can do, you must stop giving them things.\",\n \"The man of virtue makes the difficulty to be overcome his first business, and success only a subsequent consideration.\",\n \"People fail forward to success.\",\n \"It is wise to apply the oil of refined politeness to the mechanisms of friendship.\",\n \"Without friends no one would choose to live, though he had all other goods.\",\n \"Man's feelings are always purest and most glowing in the hour of meeting and of farewell.\",\n \"This bud of love, by summer's ripening breath, May prove a beauteous flower when next we meet.\",\n \"Running cross country is the closest man will ever get to flying.\",\n \"My philosophy is that not only are you responsible for your life, but doing the best at this moment puts you in the best place for the next moment.\",\n \"He that lives upon hope will die fasting.\",\n \"It has never been my object to record my dreams, just to realize them.\"\n ]\n\n#타자연습 한글 문장들 15개\nT = [\"사랑은 언제까지나 지속되어야 하는 것인가, 아니면 이런 저런 정거장에 멈춰서는 여러 열차와 같은 것인가? 내가 그녀를 사랑한다면 어떻게 그녀를 떠날 수 있나? 그 때 내가 그렇게 느꼈다면, 지금은 왜 아무 것도 느끼지 못할까?\",\n \"죄를 미워하되 죄인은 사랑하라.\",\n \"젊은이들은 젊음이 얼마나 힘들고 무시무시할 수 있는지 안다. 그들의 젊음은 다른 모든 사람들에게 허비되는데 그야말로 끔찍한 일이다. 젊은이들에게는 권위도 존경도 없다.\",\n \"들은 것은 잊어버리고, 본 것은 기억하고 직접 해본 것은 이해한다\",\n \"아이들이 무엇을 할 수 있는지 확인해보고 싶다면 주는 것을 멈추어 보면 된다.\",\n \"어진 사람은 난관의 극복을 제일 중요한 일로 생각하고, 성공 여부는 부차적인 것으로 본다.\",\n \"실패하는 것은 곧 성공으로 한 발짝 더 나아가는 것이다.\",\n \"우정이라는 기계에 잘 정제된 예의라는 기름을 바르는 것은 현명하다.\",\n \"모든 것을 가졌다 해도 친구가 없다면, 아무도 살길 원치 않을 것이다.\",\n \"책은 가장 조용하고 변함 없는 벗이다. 책은 가장 쉽게 다가갈 수 있고 가장 현명한 상담자이자, 가장 인내심 있는 교사이다.\",\n \"그것은 죽었으면 하고 바라는 사람들이 시간을 죽이기 위해 읽는 책이었다.\",\n \"인간의 감정은 누군가를 만날 때와 헤어질 때 가장 순수하며 가장 빛난다.\",\n \"이 사랑의 꽃봉오리는 여름날 바람에 마냥 부풀었다가, 다음 만날 때엔 예쁘게 꽃필 거예요.\",\n \"일부 과학자들에 따르면 미래는 과거와 똑같을 것이다. 단지 훨씬 값비쌀 뿐이다.\",\n \"우아함이란 이제 갖 사춘기를 벗어난 이들의 특권이 아니라, 이미 스스로의 미래를 꽉 잡고 있는 이들의 것이다.\"]\n\n#디스코드 봇 실행시 터미널로 보여주기\nclass Typewriterbot(discord.Client):\n chatTest = \"False\"\n channel = \"NULL\"\n q = \"NULL\"\n \n print(\"debug ready\")\n async def on_ready(self):\n game = discord.Game(\"!문제를 해결\")\n\n await client.change_presence(status=discord.Status.online, activity=game)\n print(\"Ready to Action...\")\n\n async def on_message(self, message):\n \n if message.author.bot:\n return None\n\n\n #기본적인 명령어\n if message.content == '!안녕':\n channel = message.channel\n msg = \"안녕\"\n await channel.send(msg)\n return None\n\n if message.content == '!잘가':\n channel = message.channel\n msg = \"잘가 ㅠㅠ\"\n await channel.send(msg)\n return None\n \n \n #명령어 보여주기 \n if message.content == '!명령어': \n channel = message.channel\n msg = \"```\\n!안녕 - 안녕\\n\"\n msg += \"!잘가 - 잘가 ㅠㅠ\\n\"\n msg += \"!타자연습 - 타자 연습 시작합니다.\\n\"\n msg += \"!타자연습 영어 - 타자 연습 영어 시작합니다.\\n\"\n msg += \"!타자연습 한글 - 타자 연습 한글 시작합니다.\\n\"\n msg += \"!c언어 - 필요한 문법 책 링크를 보여줍니다.\\n\"\n msg += \"!c언어 OO - OO 보여줌\\n 예시:!c언어 for\\n```\"\n await channel.send(msg)\n return None\n\n \n #c언어 명령어\n if message.content == '!c언어':\n channel = message.channel\n msg = \"```\\nC언어\\n\"\n msg += \"비쥬얼 스튜디오 설치하기\\n https://visualstudio.microsoft.com/ko/downloads/\\n\"\n msg += \"DEV C++ 설치하기\\n https://gabii.tistory.com/entry/Dev-C-Dev-C-%EB%8B%A4%EC%9A%B4%EB%A1%9C%EB%93%9C-%EB%B0%8F-%EC%84%A4%EC%B9%98\\n\"\n msg += \"변수\\n https://thebook.io/006989/ch02/01/\\n\"\n msg += \"함수\\n https://thebook.io/006989/ch03/\\n\"\n msg += \"연산자\\n https://thebook.io/006989/ch04/\\n\"\n msg += \"조건문\\n https://thebook.io/006989/ch05/\\n\"\n msg += \"반복문\\n https://thebook.io/006989/ch06/\\n\"\n msg += \"배열\\n https://thebook.io/006989/ch07/\\n\"\n msg += \"포인터\\n https://thebook.io/006989/ch08/\\n\\n\"\n msg += \"링크는 복사 붙여넣기로 사용하실 수 있습니다\\n```\"\n await channel.send(msg)\n return None\n\n if message.content == '!c언어 변수':\n channel = message.channel\n msg = \"변수\\n https://thebook.io/006989/ch02/01/\\n\"\n await channel.send(msg)\n return None\n\n if message.content == '!c언어 함수':\n channel = message.channel\n msg = \"함수\\n https://thebook.io/006989/ch03/\\n\"\n await channel.send(msg)\n return None\n\n if message.content == '!c언어 연산자':\n channel = message.channel\n msg = \"연산자\\n https://thebook.io/006989/ch04/\\n\"\n await channel.send(msg)\n return None \n\n if message.content == '!c언어 조건문':\n channel = message.channel\n msg = \"조건문\\n https://thebook.io/006989/ch05/\\n\"\n await channel.send(msg)\n return None \n \n if message.content == '!c언어 반복문':\n channel = message.channel\n msg = \"반복문\\n https://thebook.io/006989/ch06/\\n\"\n await channel.send(msg)\n return None \n\n if message.content == '!c언어 배열':\n channel = message.channel\n msg = \"배열\\n https://thebook.io/006989/ch07/\\n\"\n await channel.send(msg)\n return None \n \n if message.content == '!c언어 포인터':\n channel = message.channel\n msg = \"포인터\\n https://thebook.io/006989/ch08/\\n\"\n await channel.send(msg)\n return None \n \n #타자 연습 \n #타자연습 영어 시작\n if message.content == '!타자연습 영어': \n channel = message.channel\n msg = \"시작\\n\"\n msg += \"<문장>\"\n self.chatTest = \"True\"\n await channel.send(msg)\n \n self.q = random.choice(t)\n await channel.send(\"============= 문제 ============\\n!정답 입력후 정답을 입력해주세요 예(!정답 Hello world)\")\n await channel.send(self.q)\n return None\n\n if self.chatTest == \"True\": \n print(self.chatTest)\n channel = message.channel\n msg = message.content\n \n print(self.q)\n print(message.content)\n \n if '!정답 ' + self.q == message.content: #정답 입력받기\n await channel.send(\"정답\")\n print(\"정답\")\n else:\n await channel.send(\"땡\")\n print(\"땡\")\n\n self.chatTest = \"False\"\n return None\n\n\n #타자연습 한글 시작\n if message.content == '!타자연습 한글': \n channel = message.channel \n msg = \"시작\\n\"\n msg += \"<문장>\"\n self.chatTest = \"True\"\n await channel.send(msg)\n \n self.q = random.choice(T)\n await channel.send(\"============= 문제 ============\\n!정답 입력후 정답을 입력해주세요 예(!정답 안녕하세요)\")\n await channel.send(self.q)\n return None\n\n if self.chatTest == \"True\": \n print(self.chatTest)\n channel = message.channel\n msg = message.content\n \n print(self.q)\n print(message.content)\n \n if '!정답 ' + self.q == message.content: #정답 입력받기\n await channel.send(\"정답\")\n print(\"정답\")\n \n else:\n await channel.send(\"땡\")\n print(\"땡\")\n \n\n self.chatTest = \"False\"\n return None\n \nif __name__ == \"__main__\":\n client = Typewriterbot()\n client.run(token) ","repo_name":"potatovllage/Discord_Bot","sub_path":"discordbot.py","file_name":"discordbot.py","file_ext":"py","file_size_in_byte":10186,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"31333734867","text":"from django.urls import path\nfrom home.views import *\nfrom admin_dashboard.views import contact_us_mail\n\nurlpatterns = [\n path('', HomePageView.as_view(), name='home'),\n path('about', AboutPageView.as_view(), name='about'),\n path('services', ServicePageView.as_view(), name='service'),\n path('contact-us', ContactPageView.as_view(), name='contact'),\n path('gallery', GalleryPageView.as_view(), name='gallery'),\n path('mail/send', contact_us_mail, name='send_mail'),\n\n\n]\n\n","repo_name":"Ziko278/poultry","sub_path":"home/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"35723376397","text":"from flask import Flask, request\nfrom flask_restful import Resource, Api, reqparse, inputs\nfrom celery.result import AsyncResult\n\nfrom tasks import get_text, get_images, celery_app\n\napp = Flask(__name__)\napi = Api(app)\n\nparser = reqparse.RequestParser()\nparser.add_argument('site')\n\nclass Text(Resource):\n def post(self):\n args = parser.parse_args()\n result = get_text.delay(args['site'])\n return {'task_id': result.task_id}\n\nclass Images(Resource):\n def post(self):\n args = parser.parse_args()\n result = get_images.delay(args['site'])\n return {'task_id': result.task_id}\n\nclass Status(Resource):\n def get(self, task_id):\n status = AsyncResult(task_id, app=celery_app).status\n return {'status': status}\n\n\napi.add_resource(Text, '/text')\napi.add_resource(Images, '/images')\napi.add_resource(Status, '/status/')\n\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"DominikZabron/machine-learning-assistance","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"3922067364","text":"# configs\n\n# --- streaming ---\n# access token\nACCESS_TOKEN = '1440425207190024196-iSOaQja7rMiCMbZwoslmfOe0RX0XPu'\n# access secret\nACCESS_SECRET = '2eOScxtfc6aUqfZc4xEDfMTRlxJUIkavB1i0Shfbi6fIt'\n# consumer key\nCONSUMER_KEY = 'GnCIBr4YWVSFH65ZbOFvXJEpB'\n# consumer secret\nCONSUMER_SECRET = 'Q15jf7i3M7JcOtevi4EYhCjLblKUrJOdDBKgUAxz5npWVmifVB'\n\n# twtter checkpoint path\ncp_path = './data/checkpoint_TwitterApp'\n# twitter data output path\noutput_directory = './data/twitter/movie'\n\n# client IP\nIP = 'localhost'\n# client port\nPORT = 9001\n\n# the tags to track\ntags = ['movie']\n\n\n# --- movies ---\n# tmdb api key\napi_key = '94b42385a681053cab08a06553dcfa19'\n# tmdb language\nlanguage = 'en'\n# tmdb mode\ndebug = True\n\n# feature to collect\nfeatures_default = [\n 'id', 'title', 'release_date', 'vote_average', 'vote_count', \n 'genres', 'budget', 'popularity', 'revenue'\n]\n\n# raw data path\npath = './data/movies.csv'\n\n\n# --- preprocess --\n# preprocessed data path\npp_path = './data/movies_pp.csv'\n# lda num topics\nnum_topics=10\n# lda max iters\nmax_iterations=50\n# lda num of words for each topic\nwordNumbers=10\n\n\n# --- analysis ---\n# predicted results path\npred_path = './data/predicts.csv'\ntopic_path = './data/topics.txt'\n\n","repo_name":"harrypotter1501/movie-sentiment","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"27493246001","text":"# learning how to split \n\n#line = \"This is a string\"\n \n#print(line.split())\n#print(line.split(' '))\n\n#print('-'.join(line.split(' ')))\n\n#line = line.split(\" \")\n#line = \"-\".join(line)\n#print(line)\n\n\n#var = \"If there is a will there is a way\"\n\n#var = var.split()\n#var= \"/\".join(line)\n\n#print(var)\n\n#steve = \"starting to get a hang of this split thing\"\n\n#steve = steve.split(\" \")\n#steve = \"$\".join(steve)\n#print(steve)\n\n#cando = \"Want me to split a string and then join it with a - ?\"\n#print(cando)\n\n#cando = cando.split()\n#print(cando)\n#cando = \"-\".join(cando)\n#print(cando)\n\n#park = '-'.join(park)\n\n#print(park)\n\n#today = \"Is goin to be a good day\"\n#today = today.split()\n#today = \"-\".join(today)\n#print(today)\n\n#wisconsin = \"Really cold this time of year\"\n#wisconsin = wisconsin.split()\n#wisconsin = '-'.join(wisconsin)\n#print(wisconsin)\n\n#france = \"its nice and warm this time of year\"\n#france = france.split()\n#france = '-'.join(france)\n#print(france)\n\nlast_one = \"This is my last one as I think I've done it enough times to remember\"\n\nlast_one = last_one.split() \nprint(last_one) #print\n\nlast_one = \"-\".join(last_one) \nprint(last_one)\n\n\n\n\n\n\n\n\n\n","repo_name":"steve0c/red_python","sub_path":"spliting-lab.py","file_name":"spliting-lab.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"22515570637","text":"from django.contrib.auth.forms import UserCreationForm\nfrom django import forms\nfrom django.contrib.auth.models import User\n\nclass RegisterForm(UserCreationForm):\n\temail = forms.EmailField(label='Email address', max_length=75)\n\t\n\tclass Meta:\n\t\tmodel = User\n\t\tfields = ('username', 'email',) \t\n\t\t\n\tdef clean_email(self):\n\t\temail = self.cleaned_data[\"email\"]\n\t\t\n\t\ttry:\n\t\t\tUser.objects.get(email__iexact=email)\n\t\texcept User.DoesNotExist:\n\t\t\treturn email\n\t\t\n\t\traise forms.ValidationError(\"A user with that email address already exists.\")\n\t\n\tdef save(self, commit=True):\n\t\tuser = super(UserCreationForm, self).save(commit=False)\n\t\tuser.set_password(self.cleaned_data[\"password1\"])\n\t\tuser.email = self.cleaned_data[\"email\"]\n\t\tuser.is_active = True\n\t\tif commit:\n\t\t\tuser.save()\n\t\t\t\n\t\treturn user","repo_name":"tfaris/pyabetic","sub_path":"account_forms.py","file_name":"account_forms.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"70710823873","text":"import numpy as np\r\n\r\n\r\nclass WordDict():\r\n def __init__(self, embeddingPath):\r\n self.word2ID, self.ID2word, self.wordVector = self.loadEmbedding(embeddingPath)\r\n\r\n def loadEmbedding(self, Path):\r\n word2ID = {'UNK': 0}\r\n ID2word = {0: 'UNK'}\r\n wordVector = []\r\n\r\n with open(Path, encoding='utf-8') as f:\r\n idx = 1\r\n while True:\r\n line = f.readline()\r\n if not line:\r\n break\r\n if idx == 1:\r\n dim = len(line.split(' ')) - 1\r\n unk_embedding = np.random.normal(0.0, 0.5, dim).astype('float32').tolist()\r\n wordVector.append(unk_embedding)\r\n\r\n lineList = line.split(' ')\r\n word2ID[lineList[0]] = idx\r\n ID2word[idx] = lineList[0]\r\n wordVector.append([float(x) for x in lineList[1:]])\r\n\r\n idx += 1\r\n\r\n return word2ID, ID2word, wordVector\r\n\r\n def get_word2ID(self):\r\n return self.word2ID\r\n\r\n def get_ID2word(self):\r\n return self.ID2word\r\n\r\n def get_wordVector(self):\r\n return self.wordVector\r\n\r\nif __name__ == '__main__':\r\n wordDict = WordDict('./glove.6B.100d.txt')\r\n","repo_name":"chipinzhen/BiLSTM-CRF","sub_path":"data/word_dict.py","file_name":"word_dict.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"21953694105","text":"from typing import List\n\n\nclass Solution:\n def countDistinct(self, nums: List[int], k: int, p: int) -> int:\n for i in range(len(nums)):\n if nums[i] % p == 0:\n nums[i] = str(nums[i]//p) + 'x'\n nums = [str(i) for i in nums]\n final_set = set()\n for i in range(len(nums)):\n for j in range(i+1, len(nums)+1):\n string = ' '.join(nums[i:j])\n if string.count('x') <= k:\n final_set.add(string)\n return len(final_set)\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n #print(Solution().countDistinct(nums = [2,3,3,2,2], k = 2, p = 2))\n #print(Solution().countDistinct(nums = [1,2,3,4], k = 4, p = 1))\n print(Solution().countDistinct([1,9,8,7,19], 1, 6))\n","repo_name":"liuyuhanalex/Leetcode","sub_path":"contest/6049. K Divisible Elements Subarrays.py","file_name":"6049. K Divisible Elements Subarrays.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"72067172030","text":"import os\nimport struct\nimport shutil\nimport tempfile\nimport unittest\n\nimport numpy as np\n\nfrom mibidata import pseudodepths\n\n# File variant 1, 8 bins per spectrum, 16 = 4x4 pixels.\nNUM_PIXELS = 4\nHEADER = (1, 8, NUM_PIXELS)\n# Assume 10 cycles per pixel, so each data sub-list has 10 zeros and the rest\n# of the counts are bin numbers from 1 to 8. The (0, 0) entry to indicate\n# new cycle comes at the _end_ of the data for each cycle.\n#\n# This following data was generated with:\n#\n# pixel_lengths = np.random.randint(9, 20, 16)\n# DATA = []\n# for pl in pixel_lengths:\n# entries = np.zeros(pl, int)\n# num_counts = pl - 9\n# inds = np.random.choice(np.arange(pl), num_counts, replace=False)\n# bins = np.random.randint(1, 8, num_counts)\n# entries[inds] = bins\n# DATA.append(list(entries) + [0])\nDATA = [\n [0, 3, 0, 1, 1, 0, 3, 0, 0, 0, 3, 0, 3, 7, 3, 4, 0, 0, 5, 0],\n [0, 5, 4, 2, 0, 0, 0, 0, 0, 0, 3, 0, 2, 2, 0, 2, 0],\n [0, 0, 0, 6, 7, 0, 0, 0, 0, 0, 0, 2, 2, 0],\n [0, 1, 1, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0],\n]\n# How many counts to expect if we manually split into two depths.\nDEPTH0 = [\n [0, 3, 0, 1, 1, 0, 3, 0, 0],\n [0, 5, 4, 2, 0, 0, 0, 0],\n [0, 0, 0, 6, 7, 0, 0],\n [0, 1, 1, 0, 0, 3, 0, 0],\n]\nDEPTH1 = [\n [0, 3, 0, 3, 7, 3, 4, 0, 0, 5, 0],\n [0, 0, 3, 0, 2, 2, 0, 2, 0],\n [0, 0, 0, 0, 2, 2, 0],\n [0, 0, 0, 0, 0],\n]\n\n\nclass TestMsdf(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n fh, fn = tempfile.mkstemp()\n cls.msdf = fn\n os.close(fh)\n sat = np.zeros((NUM_PIXELS, 2), int)\n cls.header = struct.pack(pseudodepths.HEADER_FORMAT, 1, 8, NUM_PIXELS)\n with open(fn, 'wb') as infile:\n infile.write(cls.header)\n end_sat = pseudodepths.HEADER_SIZE + \\\n NUM_PIXELS * pseudodepths.SAT_ENTRY_SIZE\n infile.seek(end_sat)\n for i, pixel in enumerate(DATA):\n sat[i, 0] = infile.tell()\n sat[i, 1] = len(pixel)\n for timestamp in pixel:\n count = int(timestamp > 0)\n infile.write(\n struct.pack(pseudodepths.DATA_FORMAT, timestamp, count))\n infile.seek(pseudodepths.HEADER_SIZE)\n for offset, length in sat:\n infile.write(\n struct.pack(pseudodepths.SAT_ENTRY_FORMAT, offset, length))\n cls.data_start = end_sat\n\n def setUp(self):\n self.tempdir = tempfile.mkdtemp()\n\n @classmethod\n def tearDownClass(cls):\n os.remove(cls.msdf)\n\n def tearDown(self):\n shutil.rmtree(self.tempdir)\n\n def _pack_sat(self, depth):\n sat = b''\n offset = self.data_start\n for d in depth:\n sat += struct.pack(\n pseudodepths.SAT_ENTRY_FORMAT, offset, len(d))\n offset += pseudodepths.DATA_SIZE * len(d)\n return sat\n\n def _pack_data(self, depth):\n data = b''\n for pixel in depth:\n for i in pixel:\n data += struct.pack(\n pseudodepths.DATA_FORMAT, i, int(i > 0))\n return data\n\n def test_split_into_one(self):\n \"\"\"If we split into one output file, we should get the same file out.\n \"\"\"\n cycles_per_pixel, cycles_per_scan = pseudodepths.divide(\n self.msdf, 1, self.tempdir)\n self.assertEqual(cycles_per_pixel, 10)\n self.assertEqual(cycles_per_scan, 10)\n new_file = os.path.join(self.tempdir, 'Depth0', 'Image.msdf')\n self.assertTrue(os.path.exists(new_file))\n with open(self.msdf, 'rb') as infile:\n expected_buffer = infile.read()\n with open(new_file, 'rb') as infile:\n new_buffer = infile.read()\n self.assertEqual(new_buffer, expected_buffer)\n\n def test_split_into_two(self):\n cycles_per_pixel, cycles_per_scan = pseudodepths.divide(\n self.msdf, 2, self.tempdir)\n self.assertEqual(cycles_per_pixel, 10)\n self.assertEqual(cycles_per_scan, 5)\n depth0 = os.path.join(self.tempdir, 'Depth0', 'Image.msdf')\n depth1 = os.path.join(self.tempdir, 'Depth1', 'Image.msdf')\n with open(depth0, 'rb') as infile:\n depth0_header = infile.read(pseudodepths.HEADER_SIZE)\n depth0_sat = infile.read(NUM_PIXELS * pseudodepths.SAT_ENTRY_SIZE)\n depth0_data = infile.read()\n with open(depth1, 'rb') as infile:\n depth1_header = infile.read(pseudodepths.HEADER_SIZE)\n depth1_sat = infile.read(NUM_PIXELS * pseudodepths.SAT_ENTRY_SIZE)\n depth1_data = infile.read()\n\n self.assertEqual(depth0_header, self.header)\n self.assertEqual(depth1_header, self.header)\n self.assertEqual(depth0_sat, self._pack_sat(DEPTH0))\n self.assertEqual(depth1_sat, self._pack_sat(DEPTH1))\n self.assertEqual(depth0_data, self._pack_data(DEPTH0))\n self.assertEqual(depth1_data, self._pack_data(DEPTH1))\n\n def test_split_into_three(self):\n \"\"\"This should raise because the number of cycles is not divisible by\n the number of desired pseudo-depths.\"\"\"\n with self.assertRaises(ValueError):\n pseudodepths.divide(self.msdf, 3, self.tempdir)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"ionpath/mibilib","sub_path":"mibidata/tests/test_pseudodepths.py","file_name":"test_pseudodepths.py","file_ext":"py","file_size_in_byte":5304,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"60"} +{"seq_id":"25377520657","text":"import os\nfrom random import randint\n\ndef update_computer_brain(x, y):\n with open('FirstLastWord.txt', 'a+', encoding='UTF-8') as f:\n for value in x:\n if value not in y:\n f.write(f\"{value}\\n\")\n\n\n\nos.chdir('K:\\\\Python Group Study\\\\프로그래밍 기초 in Python\\\\GroupStudy')\n\nf = open('FirstLastWord.txt','r', encoding='UTF-8') # Brings Words that Computer has\n\nsys_wdbook = [] # List of words that computer has\ngame_wdbook = [] # List of words that used in game\nturn_count = 0 # How many turns went\nnxt_turn = \"\"\n\nfor line in f : # Read the file that has the words\n sys_wdbook.append(line.strip())\n\nwhile True: # Select who attacks first\n try:\n print(\"게임을 종료하시려면 언제든지 2번을 눌러주세요.\")\n turn = int(input(\"선공 후공을 정해주세요.(0: 선공, 1: 후공, 2: 종료) : \"))\n if turn == 0 or turn == 1 or turn == 2:\n break\n else:\n print(\"번호가 잘못되었습니다.\")\n except ValueError: #Error exception while choosing the turns\n print(\"숫자를 입력해주세요.\")\n\n\nwhile True: # Game first turn starts\n \n if int(turn) == 0 : # User's First Attack\n user_input = input(\"세상에 존재하는 단어(2~3자) 하나를 입력해주세요 :\") # User will input word\n if user_input == \"2\": \n print(\"사용자에 의해 게임이 종료되었습니다.\")\n exit()\n\n if len(user_input) > 3 or len(user_input) < 2:\n print(\"글자수를 맞춰주세요.\")\n\n elif len(user_input) <= 3 and len(user_input) >= 2:\n if turn_count == 0: # 1st user's input\n game_wdbook.append(user_input)\n turn_count += 1\n nxt_turn = \"com\"\n break\n\n\n elif int(turn) == 1: # Computer's First Attack\n sys_input = sys_wdbook[randint(0,len(sys_wdbook)-1)]\n print(sys_input)\n game_wdbook.append(sys_input)\n turn_count += 1\n nxt_turn = \"user\"\n break\n\n \n elif int(turn) == 2:\n print(\"게임 종료!!\")\n exit() \n\n","repo_name":"Wooil96/PythonStudy","sub_path":"프로그래밍 기초 in Python/GroupStudy/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"46226537979","text":"import tensorflow as tf\n\n# --------------------------------- General -----------------------------------\n# The random seed to use. Seed or False. Kind of pointless at this point,\n# relict from earlier version.\nRANDOM_SEED = False\n\n# The number of layers to fit, which needs to be the same as in the respective\n# PPC configuration.\nN_LAYERS = 171\nN_DOMS = 5160\n\n# -------------------------------- TensorFlow ---------------------------------\nTF_FLOAT_PRECISION = tf.float32\nTF_CPU_ONLY = False\n# The length of the simulated photons array, this is the amount of photons we\n# optimize on each step. More is better, but it is limited by available GPU\n# memory. With the current program 700000 seems to be about the maximum a Tesla\n# P40 can handle without running out of memory.\nTF_HITLIST_LEN = 700000\n\n# ----------------------------- Ice Mocel Config ------------------------------\n# This is the path to the ice model to use to calculate the intrinsic\n# absorption coefficient and wavelength dependency of the dust absorption\n# coefficient. So the relevevant files are icemodel.dat for the delta\n# temperature values and icemodel.par for the 6 parameter ice model parameters.\nICE_MODEL_PATH = '/home/aharnisch/modded-PPC/real/ice/'\n\n# ------------------------------- Flasher Data --------------------------------\nDATA_PATH = '/net/big-tank/POOL/users/aharnisch/fake_flasher_data/'\n\n# ----------------------------- Simulation Data -------------------------------\n# The simulated photon data directory\nPHOTON_PATH = '/net/big-tank/POOL/users/aharnisch/iceopt_photons/'\n\n# --------------------------------- Training ----------------------------------\n# Flashing string, for now we only flash this one string. Should not make a\n# difference when comparing to simulation anyways since there are no model\n# errors. String 36 is in the middle of deep core. String 69 is in the top\n# right vorner of the second to last xy layer of strings (minimally effected by\n# deep core.)\nFLASHER_STRINGS = [36]\n\n# If this flag is set to true, the gradient is averaged over an entire string\n# before fed to the optimizer. This is not the same as evaluating string\n# batches. The batches are still performed on individual emitter DOMs but\n# instead of applying the gradient each time we evaluate it for all DOMs on a\n# string and then feed the acuumulated gradient to the optimizer. This might\n# make the gradient more stable becaue it includes information on all layers.\n# It also smooths out the loss significantly which is helpful when debugging.\n# If it is set to False the gradient is applied on every dom batch each time.\nGRADIENT_AVERAGING = True\n\n# The initial absorption coefficients to start with.\nINITIAL_ABS = [0.01 for i in range(N_LAYERS)]\n\n# The smallest allowed absorption coeffizient, values below are clipped on\n# every step\nMIN_ABS = 0.001\n\n# The maximum number of training steps to perform.\nMAX_STEPS = 200\n\n# The number of hits to rescale to. We rescale to this fixed amount of hits\n# every time to make the loss more comparable for different emitter DOMs. The\n# reason we have to rescale at all is the fact that we don't know how many\n# photons have been emitted on data.\nRESCALED_HITS = 100000\n\n\n# -------------------------------- Optimizer ----------------------------------\n# The initial learning rate\nINITIAL_LEARNING_RATE = 0.001\n# True or False to activate/deactivate learning rate decay\nLEARNING_DECAY = False\n# Decay modes: Linear or Exponential\nLEARNING_DECAY_MODE = 'Exponential'\n# decrease the INITIAL_LEARNING_RATE every LEARNING_STEPS steps by\n# LEARNING_DECR linearly or exponentially\nLEARNING_DECR = 0.95\nLEARNING_STEPS = 10\n\n# supported optimizers: Adam, GradientDescent\nOPTIMIZER = 'Adam'\nADAM_SETTINGS = dict(beta1=0.9, beta2=0.999, epsilon=1e-08)\n\n# --------------------------------- Logging -----------------------------------\nWRITE_INTERVAL = 1 # how many steps between each write\n","repo_name":"AlexHarn/tf-ppc-iceopt","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"36137579909","text":"# https://codeforces.com/problemset/problem/727/A\n\nfrom collections import defaultdict\n\n\ndef solve():\n a, b = map(int, input().split())\n target = b\n graph = defaultdict(lambda: [None, None, None])\n\n flag = True\n while b > a:\n parent = b\n if b % 2 == 0:\n b //= 2\n graph[parent][0] = b\n else:\n if (b - 1) % 10 != 0:\n flag = False\n break\n b = b // 10\n graph[parent][1] = b\n\n graph[b][2] = parent\n\n if a not in graph or not flag:\n print(\"NO\")\n return\n\n path = []\n while a < target:\n path.append(a)\n a = graph[a][2]\n path.append(target)\n\n print(f\"YES\\n{len(path)}\")\n print(*path)\n\n\nsolve()\n","repo_name":"Son-OfAnton/Competitive-Programming","sub_path":"Contest/Contest_15/transformationFromAtoB.py","file_name":"transformationFromAtoB.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"10301604117","text":"from flask import Flask, render_template\n\nfrom flight_radar.flight_radar import process, get_flight_info\n\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef index():\n return render_template('index.html')\n\n\n@app.route(\"/flights\")\ndef flights():\n flights_info = process()\n return render_template('flights.html', flights=flights_info)\n\n\n@app.route(\"/flight/\")\ndef flight(ads_hex):\n flight_info = get_flight_info(ads_hex)\n return render_template('flight.html', flight=flight_info)\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=5000)\n","repo_name":"ercanse/flight-track","sub_path":"ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"39031565228","text":"# 골드4\n# 소수 경로\n# 에라토스테네스의 체\ndef et_prime(dp):\n for i in range(2, 10000):\n if dp[i] == False:\n if i >= 1000: sosu.append(i) # 1000 이상만 저장\n for j in range(i*2, 10000, i): dp[j] = True\n \n del dp\n\ndef bfs(start, end):\n\n if start == end:\n return 0\n else:\n dq = deque([[start, 1]])\n visited[int(start)] = True\n \n while dq:\n standard, depth = map(str, dq.popleft())\n standard = list(standard)\n \n for s in range(4): # 한번에 한 자릿수만 변경이 되므로 각 자릿수별로 연산\n for i in range(10):\n if standard[s] != str(i): # 기존값과 비교값이 같으면 패스\n temp = standard[s] # 기존값 임시저장\n standard[s] = str(i) # 기존값을 연산을 위해 비교값으로 대체\n ch = ''.join(standard) # 문자열 연결 \n\n if visited[int(ch)] == False: # 방문 체크 - 방문 안한경우만\n visited[int(ch)] = True \n if end == ch:\n return int(depth) # 최초 발견이 곧 최소횟수\n elif sosu.count(int(ch)) > 0: \n dq.append([ch, int(depth)+1]) \n\n standard[s] = temp # 기존값 복원\n\nif __name__ == \"__main__\":\n from collections import deque\n N = int(input())\n DP = [False for _ in range(10000)]\n sosu = []\n et_prime(DP)\n \n for i in range(N):\n a, b = input().split()\n visited = [False for _ in range(10000)]\n result = bfs(a, b)\n\n if result is None: print(\"Impossible\")\n else: print(result)\n\n\n\"\"\"\n3\n1033 8179\n1373 8017\n1033 1033\n\"\"\"","repo_name":"woghks778803/algorithm-study","sub_path":"backjoon/Gold/1963.py","file_name":"1963.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"31488606236","text":"import enum\n\nfrom django.db import models\nfrom django.core.exceptions import ValidationError\nfrom django.contrib.postgres.fields import JSONField\nfrom django.utils.translation import ugettext_lazy as _\nfrom .organization_model import Organization\n\nfrom os2datascanner.utils.system_utilities import time_now\nfrom os2datascanner.engine2.pipeline.messages import MatchesMessage\n\n\nclass DocumentReport(models.Model):\n scan_time = models.DateTimeField(null=True, db_index=True,\n verbose_name=_('scan time'))\n\n created_timestamp = models.DateTimeField(null=True,\n verbose_name=_('created timestamp'))\n\n organization = models.ForeignKey(Organization,\n null=True, blank=True,\n verbose_name=_('organization'),\n on_delete=models.PROTECT)\n\n path = models.CharField(max_length=2000, verbose_name=_(\"path\"),\n db_index=True)\n # It could be that the meta data should not be part of the jsonfield...\n data = JSONField(null=True)\n\n source_type = models.CharField(max_length=2000,\n verbose_name=_(\"source type\"))\n\n sensitivity = models.IntegerField(null=True, verbose_name=_(\"sensitivity\"))\n\n probability = models.FloatField(null=True, verbose_name=_(\"probability\"))\n\n # datasource_last_modified stores when the scanned file/email/element itself, has last been updated.\n # This timestamp is collected during scan and is from the datasource.\n datasource_last_modified = models.DateTimeField(null=True)\n\n def _str_(self):\n return self.path\n\n @property\n def matches(self):\n matches = self.data.get(\"matches\")\n return MatchesMessage.from_json_object(matches) if matches else None\n\n @enum.unique\n class ResolutionChoices(enum.Enum):\n # Future simplification note: the behaviour of the enumeration values\n # of this class is modelled on Django 3's model.Choices\n OTHER = 0, \"Andet\"\n EDITED = 1, \"Redigeret\"\n MOVED = 2, \"Flyttet\"\n REMOVED = 3, \"Slettet\"\n NO_ACTION = 4, \"Intet foretaget\"\n\n def __new__(cls, *args):\n obj = object.__new__(cls)\n # models.Choices compatibility: the last element of the enum value\n # tuple, if there is one, is a human-readable label\n obj._value_ = args[0] if len(args) < 3 else args[:-1]\n return obj\n\n def __init__(self, *args):\n self.label = args[-1] if len(args) > 1 else self.name\n\n # This is a class *property* in model.Choices, but that would require\n # sinister metaclass sorcery\n @classmethod\n def choices(cls):\n return [(k.value, k.label) for k in cls]\n\n resolution_status = models.IntegerField(choices=ResolutionChoices.choices(),\n null=True, blank=True, db_index=True,\n verbose_name=_(\"resolution status\"))\n\n resolution_time = models.DateTimeField(blank=True, null=True,\n verbose_name=_(\"resolution time\"))\n\n custom_resolution_status = models.CharField(max_length=1024, blank=True,\n verbose_name=_(\"justification\"))\n \n def clean(self):\n self.clean_custom_resolution_status()\n\n def clean_custom_resolution_status(self):\n self.custom_resolution_status = self.custom_resolution_status.strip()\n if self.resolution_status == 0 and not self.custom_resolution_status:\n raise ValidationError(\n {\n \"custom_resolution_status\":\n \"Resolution status 0 requires an\"\n \" explanation\"\n })\n\n def __init__(self, *args, **kwargs):\n # TODO: move to property/model method\n super().__init__(*args, **kwargs)\n self.__resolution_status = self.resolution_status\n\n def save(self, *args, **kwargs):\n now = time_now()\n\n # If Resolution status goes from not handled to handled - change resolution_time to now\n if self.__resolution_status == None and (self.resolution_status or self.resolution_status == 0):\n self.resolution_time = now\n\n # Adds a timestamp if it's a new match:\n if not self.pk:\n self.created_timestamp = now\n\n super().save(*args, **kwargs)\n\n # Add DocumentReport to Alias.match_relation, when it's saved to the db.\n from .aliases.alias_model import Alias\n try:\n metadata = self.data['metadata']['metadata'].values()\n value = list(metadata)[0]\n\n aliases = Alias.objects.select_subclasses()\n \n for alias in aliases:\n if str(alias) == value:\n try:\n tm = Alias.match_relation.through\n tm.objects.bulk_create([tm(documentreport_id=self.pk, alias_id=alias.pk)], ignore_conflicts=True)\n except:\n print(\"Failed to create match_relation\")\n except:\n print(self, \" has no metadata\")\n\n from ..views.views import send_socket_message\n send_socket_message()\n\n class Meta:\n verbose_name_plural = _(\"document reports\")\n ordering = ['-sensitivity', '-probability']\n","repo_name":"mBoegvald/bachelor-os2datascanner","sub_path":"src/os2datascanner/projects/report/reportapp/models/documentreport_model.py","file_name":"documentreport_model.py","file_ext":"py","file_size_in_byte":5563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"73247043391","text":"from django.urls import path\nfrom . import views\nfrom . import api\n\n\napp_name = 'quiz'\n\nurlpatterns = [\n path(r'', views.quizside, name='quiz'),\n path(r'quiz/', views.quizcode, name='quizcode'),\n path(r'quiz/lag-quiz/', views.CreateQuiz.as_view(), name='create_quiz'),\n path(r'quiz//', views.play_quiz, name='play_quiz'),\n path(r'quiz//api/', api.QuizAPI.as_view(), name='play_quiz_api'),\n]\n","repo_name":"Fredrik3B/quiz","sub_path":"nettside/quiz/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"60"} +{"seq_id":"15924637455","text":"import pandas as pd\nimport numpy as np\nimport io\nfrom upload import upload\nfrom datetime import timedelta, date\nimport requests\n\ndef import_csv_data(csv_text, entry_date):\n\t# load the CSV data\n\tstring_io = io.StringIO(csv_text)\n\tdf = pd.read_csv(string_io, keep_default_na=False, na_values=['___'])\n\t\n\t# define the columns\n\tlat_col = lng_col = \"\"\n\tcountry_col = province_col = county_col = \"\"\n\ttotal_col = death_col = recovered_col = \"\"\n\t\n\t# future-proofing\n\tfor col in df.columns:\n\t\tif 'lat' in col.lower(): lat_col = col\n\t\telif 'long' in col.lower(): lng_col = col\n\t\telif 'country' in col.lower(): country_col = col\n\t\telif 'province' in col.lower(): province_col = col\n\t\telif 'county' in col.lower(): county_col = col\n\t\telif \"death\" in col.lower(): death_col = col\n\t\telif \"dead\" in col.lower(): death_col = col\n\t\telif \"confirm\" in col.lower(): total_col = col\n\t\telif \"recover\" in col.lower(): recovered_col = col\n\t\n\tcontent = {\n\t\t'datapoint': [],\n\t\t'location': []\n\t}\n\n\tdf.sort_values(by=[col for col in [county_col, province_col, country_col] if col], ascending=False)\n\t\n\tfor _, row in df.iterrows():\n\t\t# Steps\n\t\t# 1. Find country, province, and county name\n\t\t# 2. Get the actual coronavirus numbers\n\t\t# 3. Estimate the location if we can\n\n\t\t# STEP 1 #\n\t\tcountry = row[country_col]\n\t\tprovince = row[province_col] if not pd.isnull(row[province_col]) else ''\n\t\tcounty = row[county_col] if county_col else ''\n\n\t\t## FOR DEBUG ##\n\t\tif (\"Korea\" not in country):\n\t\t\tcontinue\n\n\t\t# STEP 2 #\n\t\ttotal = row[total_col]\n\t\tdeaths = row[death_col]\n\t\trecovered = row[recovered_col]\n\n\t\tif not total: total = 0\n\t\tif not deaths: deaths = 0\n\t\tif not recovered: recovered = 0\n\n\t\tlocation_row = {}\n\t\tdatapoint_row = {}\n\n\t\tif province == 'Recovered':\n\t\t\tdatapoint_row = {\n\t\t\t\t\"country\": country,\n\t\t\t\t\"recovered\": recovered,\n\t\t\t\t\"entry_date\": entry_date\n\t\t\t}\n\n\t\t\tlocation_row = { \"country\": country }\n\t\telse:\n\t\t\tdatapoint_row = {\n\t\t\t\t\"country\": country,\n\t\t\t\t\"province\": province,\n\t\t\t\t\"county\": county,\n\t\t\t\t\"total\": total,\n\t\t\t\t\"deaths\": deaths,\n\t\t\t\t\"recovered\": recovered,\n\t\t\t\t\"entry_date\": entry_date\n\t\t\t}\n\n\t\t\tlocation_row = {\n\t\t\t\t\"country\": country,\n\t\t\t\t\"province\": province,\n\t\t\t\t\"county\": county\n\t\t\t}\n\n\t\t# Save the primary location data if we can\n\t\tif lat_col and lng_col:\n\t\t\tlat, lng = row[lat_col], row[lng_col]\n\t\t\tif lat and lng:\n\t\t\t\tlocation_row['latitude'] = lat\n\t\t\t\tlocation_row['longitude'] = lng\n\n\t\tcontent['location'].append(location_row)\n\t\tcontent['datapoint'].append(datapoint_row)\n\treturn content\n\ndef import_jhu_date(entry_date):\n\tdate_formatted = entry_date.strftime(\"%m-%d-%Y\")\n\tprint(\"\\rLoading data from JHU \" + date_formatted + '...', end='\\r')\n\t\n\t# download from Github\n\tgithub_raw_url = f\"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/{date_formatted}.csv\"\n\tresponse = requests.get(github_raw_url, timeout=10)\n\t\n\tif response.status_code == 200:\n\t\treturn import_csv_data(response.text, entry_date)\n\telse:\n\t\tprint(\"404 not found\")\n\ndef import_jhu_date_range(date_1, date_2):\n\tnext_date = timedelta(days=1)\n\tcurrent_date = date_1\n\t\n\twhile current_date <= date_2:\n\t\tprint(\"Loading JHU data for\", current_date, \" \")\n\t\tresult = import_jhu_date(current_date)\n\t\tif result:\n\t\t\tyield result\n\t\tcurrent_date += next_date\n\ndef import_jhu_historical():\n\treturn import_jhu_date_range(date_1=date(2020, 3, 10), date_2=date.today())\n","repo_name":"myfatemi04/Corona-Vision","sub_path":"data_collection/data_imports/import_jhu.py","file_name":"import_jhu.py","file_ext":"py","file_size_in_byte":3419,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"21834804270","text":"import requests\r\n\r\ndef pegar_cotacoes():\r\n # Função que requisita a API informações sobre cotação atual \r\n requisicao = requests.get(\"https://economia.awesomeapi.com.br/last/USD-BRL,EUR-BRL,BTC-BRL\")\r\n\r\n requisicao_dic = requisicao.json()\r\n\r\n cotacao_dolar = requisicao_dic['USDBRL']['bid']\r\n cotacao_euro = requisicao_dic['EURBRL']['bid']\r\n cotacao_btc = requisicao_dic['BTCBRL']['bid']\r\n\r\n # estou chamando a label com a variável \"text\" que será completada com a geração do texto\r\n texto_cotacao[\"text\"] = f'''\r\n Dólar: {cotacao_dolar}\r\n Euro: {cotacao_euro}\r\n BTC: {cotacao_btc}'''\r\n\r\n\r\nimport tkinter as tk\r\n\r\n# inicia a janela \r\njanela1 = tk.Tk() \r\n\r\n# alterar titulo da janela\r\njanela1.title('Cotação atual') \r\n\r\n# label cria o texto na janela (param1 = janela, param2 = texto)\r\ntexto_janela = tk.Label(janela1, text=\"Clique no botão para ver a cotação das moedas\") \r\n# o grid define onde a Label do texto ficara (param1 = coluna, param2 = linha, param3 = espaçamento largura, param4 = espaçamento altura)\r\ntexto_janela.grid(column=1, row=0, padx=50, pady=20) \r\n\r\n# criando o botão (param1 = janela, param2 = texto, param3 = comando que o botão executara)\r\nbotao = tk.Button(janela1, text=\"Buscar Cotações\", command=pegar_cotacoes) \r\nbotao.grid(column=1, row=2)\r\n\r\ntexto_cotacao = tk.Label(janela1, text=\"\") # deixo o texto em branco (será preenchido na função)\r\ntexto_cotacao.grid(column=1, row=3, pady=20)\r\n\r\n\r\njanela1.mainloop() # mantém a janela aberta (última linha do código)","repo_name":"kakanetwork/Estudos-Linguagens","sub_path":"Prog-Py/Tkinter/Cotacao.py","file_name":"Cotacao.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"26095231648","text":"\"\"\"\ncolors = [\"red\", \"blue\", \"green\", \"purple\"]\nages = [14, 15, 15, 16, 16, 13, 18, 14, 15, 15, 14]\n\n\n# 5 - how do we remove an item from the list?\nprint(colors)\ndel colors[2]\nprint(colors)\n\"\"\"\ncolors = [\"red\", \"blue\", \"green\", \"purple\"]\nages = [14, 15, 15, 16, 16, 13, 18, 14, 15, 15, 14]\n\n\n# 6 - what happens when we use the append() function?\nprint(colors)\ncolors.append(\"orange\")\nprint(colors)\n","repo_name":"CNieves121/lps_compsci","sub_path":"class_samples/3-7_forloops/PartnerPractice.py","file_name":"PartnerPractice.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"3310874999","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/4/20 \n# @File : lr_scheduler.py\n# @Software: PyCharm\nimport tensorflow as tf\n\n\n\ndef MultiStepLR(initial_learning_rate, lr_steps, lr_rate, name='MultiStepLR'):\n \"\"\"Multi-steps learning rate scheduler.\"\"\"\n lr_steps_value = [initial_learning_rate]\n for _ in range(len(lr_steps)):\n lr_steps_value.append(lr_steps_value[-1] * lr_rate)\n return tf.keras.optimizers.schedules.PiecewiseConstantDecay(\n boundaries=lr_steps, values=lr_steps_value)\n\n\ndef MultiStepWarmUpLR(initial_learning_rate, lr_steps, lr_rate,\n warmup_steps=0., min_lr=0.,\n name='MultiStepWarmUpLR'):\n \"\"\"Multi-steps warm up learning rate scheduler.\"\"\"\n assert warmup_steps <= lr_steps[0]\n assert min_lr <= initial_learning_rate\n lr_steps_value = [initial_learning_rate]\n for _ in range(len(lr_steps)):\n lr_steps_value.append(lr_steps_value[-1] * lr_rate)\n return PiecewiseConstantWarmUpDecay(\n boundaries=lr_steps, values=lr_steps_value, warmup_steps=warmup_steps,\n min_lr=min_lr)\n\n\ndef CosineAnnealingLR_Restart(initial_learning_rate, t_period, lr_min):\n \"\"\"Cosine annealing learning rate scheduler with restart.\"\"\"\n return tf.keras.experimental.CosineDecayRestarts(\n initial_learning_rate=initial_learning_rate,\n first_decay_steps=t_period, t_mul=1.0, m_mul=1.0,\n alpha=lr_min / initial_learning_rate)\n\n\nclass PiecewiseConstantWarmUpDecay(\n tf.keras.optimizers.schedules.LearningRateSchedule):\n \"\"\"A LearningRateSchedule wiht warm up schedule.\n Modified from tf.keras.optimizers.schedules.PiecewiseConstantDecay\"\"\"\n\n def __init__(self, boundaries, values, warmup_steps, min_lr,\n name=None):\n super(PiecewiseConstantWarmUpDecay, self).__init__()\n\n if len(boundaries) != len(values) - 1:\n raise ValueError(\n \"The length of boundaries should be 1 less than the\"\n \"length of values\")\n\n self.boundaries = boundaries\n self.values = values\n self.name = name\n self.warmup_steps = warmup_steps\n self.min_lr = min_lr\n\n def __call__(self, step):\n with tf.name_scope(self.name or \"PiecewiseConstantWarmUp\"):\n step = tf.cast(tf.convert_to_tensor(step), tf.float32)\n pred_fn_pairs = []\n warmup_steps = self.warmup_steps\n boundaries = self.boundaries\n values = self.values\n min_lr = self.min_lr\n\n pred_fn_pairs.append(\n (step <= warmup_steps,\n lambda: min_lr + step * (values[0] - min_lr) / warmup_steps))\n pred_fn_pairs.append(\n (tf.logical_and(step <= boundaries[0],\n step > warmup_steps),\n lambda: tf.constant(values[0])))\n pred_fn_pairs.append(\n (step > boundaries[-1], lambda: tf.constant(values[-1])))\n\n for low, high, v in zip(boundaries[:-1], boundaries[1:],\n values[1:-1]):\n # Need to bind v here; can do this with lambda v=v: ...\n pred = (step > low) & (step <= high)\n pred_fn_pairs.append((pred, lambda: tf.constant(v)))\n\n # The default isn't needed here because our conditions are mutually\n # exclusive and exhaustive, but tf.case requires it.\n return tf.case(pred_fn_pairs, lambda: tf.constant(values[0]),\n exclusive=True)\n\n def get_config(self):\n return {\n \"boundaries\": self.boundaries,\n \"values\": self.values,\n \"warmup_steps\": self.warmup_steps,\n \"min_lr\": self.min_lr,\n \"name\": self.name\n }","repo_name":"PureHing/face-mask-detection-tf2","sub_path":"components/lr_scheduler.py","file_name":"lr_scheduler.py","file_ext":"py","file_size_in_byte":3816,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"60"} +{"seq_id":"9234704222","text":"# -*- coding: utf-8 -*-\n# !/usr/bin/env python\n\"\"\"\n-------------------------------------------------\n File Name: GetFreeProxy.py\n Description : 抓取免费代理\n Author : JHao\n date: 2016/11/25\n-------------------------------------------------\n Change Activity:\n 2016/11/25:\n-------------------------------------------------\n\"\"\"\nimport re\nimport sys\nimport requests\n\ntry:\n from importlib import reload # py3 实际不会实用,只是为了不显示语法错误\nexcept:\n reload(sys)\n sys.setdefaultencoding('utf-8')\n\nsys.path.append('..')\n\nfrom Util.WebRequest import WebRequest\nfrom Util.utilFunction import getHtmlTree\nfrom Util.utilFunction import verifyProxyFormat\n\n# for debug to disable insecureWarning\nrequests.packages.urllib3.disable_warnings()\n\n\"\"\"\n data5u.com\n 66ip.cn\n 31f.cn\n xicidaili.com\n goubanjia.com\n kxdaili.com\n kuaidaili.com\n xsdaili.com\n zdaye.com\n ip3366.net\n iphai.com\n jiangxianli.com\n feiyiproxy.com\n qydaili.com\n\"\"\"\n\n\nclass GetFreeProxy(object):\n \"\"\"\n proxy getter\n \"\"\"\n\n def __init__(self):\n pass\n\n @staticmethod\n def freeProxyFirst():\n \"\"\"\n 无忧代理 http://www.data5u.com/\n 几乎没有能用的\n :return:\n \"\"\"\n url_list = [\n 'http://www.data5u.com/',\n 'http://www.data5u.com/free/gngn/index.shtml',\n 'http://www.data5u.com/free/gnpt/index.shtml'\n ]\n for url in url_list:\n html_tree = getHtmlTree(url)\n ul_list = html_tree.xpath('//ul[@class=\"l2\"]')\n for ul in ul_list:\n try:\n yield ':'.join(ul.xpath('.//li/text()')[0:2])\n except Exception as e:\n print(e)\n\n @staticmethod\n def freeProxySecond(area=34):\n \"\"\"\n 代理66 http://www.66ip.cn/\n :param area: 抓取代理页数,page=1北京代理页,page=2上海代理页......\n :return:\n \"\"\"\n area = 34 if area > 34 else area\n for area_index in range(1, area + 1):\n url = \"http://www.66ip.cn/areaindex_{}/1.html\".format(str(area_index))\n html_tree = getHtmlTree(url)\n tr_list = html_tree.xpath(\"//div[@id='footer']//table//tr[position()>1]\")\n if len(tr_list) == 0:\n continue\n for tr in tr_list:\n yield tr.xpath(\"./td[1]/text()\")[0] + \":\" + tr.xpath(\"./td[2]/text()\")[0]\n\n @staticmethod\n def freeProxyThird():\n \"\"\"\n 31代理 http://31f.cn/http-proxy/\n :return:\n \"\"\"\n urls = ['http://31f.cn/http-proxy/', 'http://31f.cn/https-proxy/']\n for url in urls:\n html_tree = getHtmlTree(url)\n try:\n tr_list = html_tree.xpath('//table[@class=\"table table-striped\"]//tr[position()>1]')\n for tr in tr_list:\n try:\n if '天' in tr.xpath('./td[9]/text()')[0]:\n continue\n yield tr.xpath('./td[2]/text()')[0] + ':' + tr.xpath('./td[3]/text()')[0]\n except Exception as e:\n print(e)\n except Exception as e:\n print(e)\n\n @staticmethod\n def freeProxyFourth(page_count=2):\n \"\"\"\n 西刺代理 http://www.xicidaili.com\n :return:\n \"\"\"\n url_list = [\n 'http://www.xicidaili.com/nn/', # 高匿\n 'http://www.xicidaili.com/nt/', # 透明\n ]\n for each_url in url_list:\n for i in range(1, page_count + 1):\n page_url = each_url + str(i)\n tree = getHtmlTree(page_url)\n proxy_list = tree.xpath('.//table[@id=\"ip_list\"]//tr[position()>1]')\n for proxy in proxy_list:\n try:\n yield ':'.join(proxy.xpath('./td/text()')[0:2])\n except Exception as e:\n pass\n\n @staticmethod\n def freeProxyFifth():\n \"\"\"\n guobanjia http://www.goubanjia.com/\n :return:\n \"\"\"\n url = \"http://www.goubanjia.com/\"\n tree = getHtmlTree(url)\n proxy_list = tree.xpath('//td[@class=\"ip\"]')\n # 此网站有隐藏的数字干扰,或抓取到多余的数字或.符号\n # 需要过滤掉

的内容\n xpath_str = \"\"\".//*[not(contains(@style, 'display: none'))\n and not(contains(@style, 'display:none'))\n and not(contains(@class, 'port'))\n ]/text()\n \"\"\"\n for each_proxy in proxy_list:\n try:\n # :符号裸放在td下,其他放在div span p中,先分割找出ip,再找port\n ip_addr = ''.join(each_proxy.xpath(xpath_str))\n port = each_proxy.xpath(\".//span[contains(@class, 'port')]/text()\")[0]\n yield '{}:{}'.format(ip_addr, port)\n except Exception as e:\n pass\n\n @staticmethod\n def freeProxySixth():\n \"\"\"\n 开心代理 http://ip.kxdaili.com/dailiip.html\n :return:\n \"\"\"\n urls = ['http://ip.kxdaili.com/dailiip/1/{}.html#ip'.format(str(page)) for page in range(1, 8)]\n for url in urls:\n try:\n html_tree = getHtmlTree(url)\n tr_list = html_tree.xpath('//table[@class=\"ui table segment\"]//tbody//tr')\n for tr in tr_list:\n try:\n yield tr.xpath('./td[1]/text()')[0] + ':' + tr.xpath('./td[2]/text()')[0]\n except Exception as e:\n print(e)\n except Exception as e:\n print(e)\n\n @staticmethod\n def freeProxySeventh():\n \"\"\"\n 快代理 https://www.kuaidaili.com\n \"\"\"\n url_list = [\n 'https://www.kuaidaili.com/free/inha/{page}/',\n 'https://www.kuaidaili.com/free/intr/{page}/'\n ]\n for url in url_list:\n for page in range(1, 3):\n page_url = url.format(page=page)\n tree = getHtmlTree(page_url)\n proxy_list = tree.xpath('.//table//tr')\n for tr in proxy_list[1:]:\n yield ':'.join(tr.xpath('./td/text()')[0:2])\n\n @staticmethod\n def freeProxyEight():\n \"\"\"\n 小舒代理 http://www.xsdaili.com/\n \"\"\"\n url = 'http://www.xsdaili.com/'\n html_tree = getHtmlTree(url)\n new_url = url + html_tree.xpath('//div[@class=\"col-md-12\"]/div[1]//a[1]/@href')[0]\n new_html_tree = getHtmlTree(new_url)\n proxy_list = new_html_tree.xpath('//div[@class=\"cont\"]/text()')\n for proxy in proxy_list:\n try:\n yield proxy.split('@')[0].strip()\n except Exception as e:\n print(e)\n\n @staticmethod\n def freeProxyNinth():\n \"\"\"\n 站大爷代理 http://ip.zdaye.com/\n :return:\n \"\"\"\n url = 'http://ip.zdaye.com/'\n html_tree = getHtmlTree(url)\n item_list = html_tree.xpath('//div[@class=\"Loglist\"]/div[2]/div[@class=\"panel-body\"]//a/text()')\n for item in item_list:\n try:\n yield item.split('@')[0].strip()\n except Exception as e:\n print(e)\n\n header = {\n 'Referer': 'http://ip.zdaye.com/',\n }\n new_urls = html_tree.xpath('//div[@class=\"Loglist\"]/div[1]/div[@class=\"panel-body\"]//a/@href')\n for new_url in new_urls:\n try:\n new_html_tree = getHtmlTree(url + new_url, header=header)\n new_item_list = new_html_tree.xpath('//div[@class=\"cont\"]/text()')\n for new_item in new_item_list:\n try:\n yield new_item.split('@')[0].strip()\n except Exception as e:\n print(e)\n except Exception as e:\n print(e)\n\n @staticmethod\n def freeProxyTen():\n \"\"\"\n 云代理 http://www.ip3366.net/free/\n :return:\n \"\"\"\n urls = ['http://www.ip3366.net/free/?stype=1&page={}'.format(str(i)) for i in range(1, 4)]\n request = WebRequest()\n for url in urls:\n r = request.get(url)\n proxies = re.findall(r'(\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})[\\s\\S]*?(\\d+)', r.text)\n for proxy in proxies:\n yield \":\".join(proxy)\n\n @staticmethod\n def freeProxyEleven():\n \"\"\"\n IP海 http://www.iphai.com/free/ng\n :return:\n \"\"\"\n urls = [\n 'http://www.iphai.com/free/ng',\n 'http://www.iphai.com/free/np',\n 'http://www.iphai.com/free/wg',\n 'http://www.iphai.com/free/wp'\n ]\n request = WebRequest()\n for url in urls:\n r = request.get(url)\n proxies = re.findall(r'\\s*?(\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})\\s*?[\\s\\S]*?\\s*?(\\d+)\\s*?',\n r.text)\n for proxy in proxies:\n yield \":\".join(proxy)\n\n @staticmethod\n def freeProxyTwelve(page_count=8):\n \"\"\"\n guobanjia http://ip.jiangxianli.com/?page=\n 免费代理库\n 超多量\n :return:\n \"\"\"\n for i in range(1, page_count + 1):\n url = 'http://ip.jiangxianli.com/?page={}'.format(i)\n html_tree = getHtmlTree(url)\n tr_list = html_tree.xpath(\"/html/body/div[1]/div/div[1]/div[2]/table/tbody/tr\")\n if len(tr_list) == 0:\n continue\n for tr in tr_list:\n yield tr.xpath(\"./td[2]/text()\")[0].strip() + \":\" + tr.xpath(\"./td[3]/text()\")[0].strip()\n\n @staticmethod\n def freeProxyThirteen():\n \"\"\"\n 飞蚁代理 http://www.feiyiproxy.com/?page_id=1457\n :return:\n \"\"\"\n url = 'http://www.feiyiproxy.com/?page_id=1457'\n html_tree = getHtmlTree(url)\n tr_list = html_tree.xpath('//div[@class=\"et_pb_code et_pb_module et_pb_code_1\"]//tr[position()>1]')\n for tr in tr_list:\n yield tr.xpath('./td[1]/text()')[0].strip() + ':' + tr.xpath('./td[2]/text()')[0].strip()\n\n @staticmethod\n def freeProxyFourteen():\n \"\"\"\n 旗云代理 http://www.qydaili.com/free/?action=china&page=\n :return:\n \"\"\"\n urls = ['http://www.qydaili.com/free/?action=china&page={}'.format(page) for page in range(1, 4)]\n for url in urls:\n html_tree = getHtmlTree(url)\n tr_list = html_tree.xpath('//table[@class=\"table table-bordered table-striped\"]//tbody//tr')\n for tr in tr_list:\n yield tr.xpath('./td[1]/text()')[0].strip() + ':' + tr.xpath('./td[2]/text()')[0].strip()\n\n @staticmethod\n def freeProxyWallFirst():\n \"\"\"\n 墙外网站 cn-proxy\n :return:\n \"\"\"\n urls = ['http://cn-proxy.com/', 'http://cn-proxy.com/archives/218']\n request = WebRequest()\n for url in urls:\n r = request.get(url)\n proxies = re.findall(r'(\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})[\\w\\W](\\d+)', r.text)\n for proxy in proxies:\n yield ':'.join(proxy)\n\n @staticmethod\n def freeProxyWallSecond():\n \"\"\"\n https://proxy-list.org/english/index.php\n :return:\n \"\"\"\n urls = ['https://proxy-list.org/english/index.php?p=%s' % n for n in range(1, 10)]\n request = WebRequest()\n import base64\n for url in urls:\n r = request.get(url)\n proxies = re.findall(r\"Proxy\\('(.*?)'\\)\", r.text)\n for proxy in proxies:\n yield base64.b64decode(proxy).decode()\n\n @staticmethod\n def freeProxyWallThird():\n urls = ['https://list.proxylistplus.com/Fresh-HTTP-Proxy-List-1']\n request = WebRequest()\n for url in urls:\n r = request.get(url)\n proxies = re.findall(r'(\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})[\\s\\S]*?(\\d+)', r.text)\n for proxy in proxies:\n yield ':'.join(proxy)\n\n\nif __name__ == '__main__':\n from CheckProxy import CheckProxy\n\n CheckProxy.checkGetProxyFunc(GetFreeProxy.freeProxyFourteen)\n # CheckProxy.checkGetProxyFunc(GetFreeProxy.freeProxySecond)\n #\n # CheckProxy.checkAllGetProxyFunc()\n\n","repo_name":"AndrewZStore/private_proxy_pool","sub_path":"ProxyGetter/getFreeProxy.py","file_name":"getFreeProxy.py","file_ext":"py","file_size_in_byte":12603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"36548730197","text":"from unittest import TestCase\n\nfrom unittest.mock import Mock, patch, sentinel as s\nfrom xivo import config_helper\n\nfrom ..controller import Controller\nfrom ..http_server import api, app\n\n\nclass TestController(TestCase):\n def setUp(self):\n self.http_server = (\n patch('wazo_phoned.controller.HTTPServer').start().return_value\n )\n self.plugin_manager = patch('wazo_phoned.controller.plugin_helpers').start()\n self.token_renewer = (\n patch('wazo_phoned.controller.TokenRenewer').start().return_value\n )\n self.bus_consumer = (\n patch('wazo_phoned.controller.CoreBusConsumer').start().return_value\n )\n self.status_aggregator = (\n patch('wazo_phoned.controller.StatusAggregator').start().return_value\n )\n config_helper.get_xivo_uuid = Mock(return_value='VALID-UUID')\n\n def tearDown(self):\n patch.stopall()\n\n def test_run_starts_http_server(self):\n config = self._create_config(**{'rest_api': {}, 'debug': s.debug})\n controller = Controller(config)\n controller.run()\n self.http_server.run.assert_called_once_with()\n\n def test_run_loads_plugins(self):\n config = self._create_config(\n **{'enabled_plugins': {'cisco': True, 'aastra': False}}\n )\n\n controller = Controller(config)\n controller.run()\n\n self.plugin_manager.load.assert_called_once_with(\n namespace='wazo_phoned.plugins',\n names=config['enabled_plugins'],\n dependencies={\n 'config': config,\n 'api': api,\n 'app': app,\n 'token_changed_subscribe': self.token_renewer.subscribe_to_token_change,\n 'bus_consumer': self.bus_consumer,\n 'status_aggregator': self.status_aggregator,\n 'phone_plugins': controller.phone_plugins,\n },\n )\n\n def _create_config(self, **kwargs):\n config = dict(kwargs)\n config.setdefault(\n 'auth',\n {\n 'host': 'localhost',\n 'port': 9497,\n 'verify_certificate': False,\n 'service_id': 'phoned',\n 'service_key': '123',\n },\n )\n config.setdefault('dird', {})\n config['dird'].setdefault('host', '')\n config['dird'].setdefault('port', '')\n config.setdefault(\n 'bus',\n {\n 'username': 'guest',\n 'password': 'guest',\n 'host': 'localhost',\n 'port': 5672,\n 'subscribe_exchange_name': 'wazo-headers',\n 'subscribe_exchange_type': 'headers',\n },\n )\n config.setdefault('rest_api', {})\n config['rest_api'].setdefault('authorized_subnets', [])\n config.setdefault('enabled_plugins', {})\n return config\n","repo_name":"wazo-platform/wazo-phoned","sub_path":"wazo_phoned/tests/test_controller.py","file_name":"test_controller.py","file_ext":"py","file_size_in_byte":2932,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"60"} +{"seq_id":"26210473775","text":"import os \r\nimport logging\r\n\r\ndef create_logger(name, log_file, level=logging.INFO):\r\n\r\n folder = log_file.replace(log_file.split('/')[-1], \"\")\r\n if os.path.isdir(folder) is False:\r\n os.mkdir(folder)\r\n\r\n \"\"\"Function setup as many loggers as you want\"\"\"\r\n logging.basicConfig(filename = log_file, level = level, format = '%(asctime)s %(levelname)s %(message)s')\r\n handler = logging.StreamHandler() \r\n\r\n logger = logging.getLogger(name)\r\n logger.addHandler(handler)\r\n\r\n return logger\r\n ","repo_name":"Baron1014/credict_card_recsys","sub_path":"logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"16399145480","text":"import os\r\n\r\nFILE_LOC = \"data.txt\"\r\n\r\n# color related code for better view on terminal\r\ncolors = {\r\n 'DANGER': '\\033[91m',\r\n 'WARNING': '\\033[33m',\r\n 'MILD': '\\033[94m',\r\n 'OK': '\\033[92m',\r\n 'RESET': '\\033[0m',\r\n 'BLUE': '\\033[94m',\r\n 'CYAN': '\\033[96m',\r\n}\r\n\r\nformat = {0: 5, 1:25, 2:10, 3:25, 4:5}\r\n\r\n# function to get the colored text\r\ndef color(text, text_color):\r\n if text_color in colors:\r\n return ''.join([colors[text_color], text, colors['RESET']])\r\n return text\r\n\r\n# the help text\r\nprint(\"\"\"Usage of the ToDo App CLI\\nWe support following commands as of now\\n\"\"\")\r\nprint('Command List')\r\nprint(\"\\t\" + color(\"Add item\", 'CYAN') + \" \" + color('--add', 'OK'))\r\n# print(\"\\t\" + color(\"Add due date\", 'CYAN') + \" \" + color('--due', 'OK'))\r\nprint(\"\\t\" + color(\"Remove an Item\", 'CYAN') + \" \" + color('--remove', 'OK'))\r\nprint(\"\\t\" + color(\"Mark item done\", 'CYAN') + \" \" + color('--done', 'OK'))\r\nprint(\"\\t\" + color(\"Mark item not done\", 'CYAN') + \" \" + color('--undone', 'OK'))\r\nprint(\"\\t\" + color(\"View items\", 'CYAN') + \" \" + color('--view', 'OK'))\r\n\r\ncmd = input(\"Enter the command \")\r\n\r\nif cmd:\r\n # Add the item\r\n if cmd == \"--add\":\r\n print(color(\"Add your task in following format\", 'BLUE'))\r\n print(color(\"'Title', 'Due Date', 'Description'\\n\", 'CYAN'))\r\n data = input()\r\n data = data.split(\",\")\r\n data = \", \".join(data)\r\n if data:\r\n with open(FILE_LOC, 'r+') as f:\r\n last_line = f.readlines()[-1]\r\n if last_line:\r\n try:\r\n count = int(last_line.split(\",\")[0])+1\r\n except ValueError:\r\n count = 1\r\n f.write(str(count)+\", \"+data+\", [ ]\"\"\\n\")\r\n print(color(\"Item successfully added!\", 'CYAN'))\r\n # remove the item\r\n elif cmd == \"--remove\":\r\n print(\"\\n\"+color(\"Remove your task by providing the id of the task\", 'BLUE'))\r\n try:\r\n id = int(input(\"Enter the id of the task you want to delete \"))\r\n except ValueError:\r\n raise(\"Please enter the correct id value\")\r\n with open(FILE_LOC, 'r') as f:\r\n lines = f.readlines()\r\n with open(FILE_LOC, 'w+') as f:\r\n count = 0\r\n for line in lines:\r\n if count == 0:\r\n f.write(line)\r\n count += 1\r\n continue\r\n no = int(line.split(\", \")[0])\r\n if no != id:\r\n f.write(line)\r\n count += 1\r\n print(color(\"Item successfully removed!\", 'CYAN')) \r\n # mark the item as done \r\n elif cmd == \"--done\":\r\n print(\"\\n\"+color(\"Mark your task as done by providing the id of the task\", 'BLUE'))\r\n try:\r\n id = int(input(\"Enter the id of the task you want to mark as done \"))\r\n except ValueError:\r\n raise(\"Please enter the correct id value\")\r\n with open(FILE_LOC, 'r') as f:\r\n lines = f.readlines()\r\n with open(FILE_LOC, 'w+') as f:\r\n count = 0\r\n for line in lines:\r\n if count == 0:\r\n f.write(line)\r\n count += 1\r\n continue\r\n no = int(line.split(\", \")[0])\r\n if no == id:\r\n line_text = line.split(\", \")\r\n line_text[-1] = \"[x]\\n\"\r\n line = \", \".join(line_text)\r\n f.write(line)\r\n else:\r\n f.write(line)\r\n count += 1\r\n print(color(\"Item successfully updated!\", 'CYAN')) \r\n # mark the item as undone\r\n elif cmd == \"--undone\":\r\n print(\"\\n\"+color(\"Mark your task as undone by providing the id of the task\", 'BLUE'))\r\n try:\r\n id = int(input(\"Enter the id of the task you want to mark as undone \"))\r\n except ValueError:\r\n raise(\"Please enter the correct id value\")\r\n with open(FILE_LOC, 'r') as f:\r\n lines = f.readlines()\r\n with open(FILE_LOC, 'w+') as f:\r\n count = 0\r\n for line in lines:\r\n if count == 0:\r\n f.write(line)\r\n count += 1\r\n continue\r\n no = int(line.split(\", \")[0])\r\n if no == id:\r\n line_text = line.split(\", \")\r\n line_text[-1] = \"[ ]\\n\"\r\n line = \", \".join(line_text)\r\n f.write(line)\r\n else:\r\n f.write(line)\r\n count += 1\r\n print(color(\"Item successfully updated!\", 'CYAN')) \r\n # list all the todo items\r\n elif cmd == \"--view\":\r\n with open(FILE_LOC, 'r') as f:\r\n lines = f.readlines()\r\n count = 0\r\n for line in lines:\r\n data = []\r\n line = line.split(\", \")\r\n for i in range(len(line)):\r\n data.append(line[i].strip().ljust(format[i]))\r\n if count == 0:\r\n print(color(\" \".join(data), 'OK'))\r\n else:\r\n print(color(\" \".join(data), 'RESET'))\r\n count += 1 \r\n else:\r\n print(\"Command not found, please run the script again and check the help\")\r\n","repo_name":"inovizz/python-for-devops","sub_path":"apps/todo-cli/cli0.py","file_name":"cli0.py","file_ext":"py","file_size_in_byte":5462,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"60"} +{"seq_id":"12767091250","text":"import sys\nimport socket\nfrom datetime import datetime\n\n\ndef main():\n run = True\n port = sys.argv[1]\n ip_parent = sys.argv[2]\n parent_port = sys.argv[3]\n ips_file = sys.argv[4]\n parent_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # connecting to parent server\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # connecting to client (to me)\n client_socket.bind(('', int(port)))\n\n ips_list = []\n file = open(ips_file)\n for lines in file:\n lines = lines.strip(\"\\n\")\n lines = lines.split(\",\")\n # the time - alive remaining from the ttl.\n lines.append(datetime(1, 1, 1, 1, 1, 1)) # initial time.\n lines.append(\"0\") # if its 0 - from ips.txt, don't consider ttl\n ips_list.append(lines)\n\n ips_dict = {x[0]: x[1:] for x in ips_list} # convert the list of lists to dictionary\n\n while run:\n flag = 0\n data, addr = client_socket.recvfrom(1024)\n data = data.decode()\n\n if data in ips_dict:\n desired_ip = ips_dict[data][0]\n desired_ttl = ips_dict[data][1]\n ip_and_ttl_reply = desired_ip + \",\" + desired_ttl\n # if the ttl not finished then we can sent the entry to the client, o.w we need to ask parent\n if is_this_entry_relevant(desired_ttl, ips_dict[data][2], ips_dict[data][3]):\n client_socket.sendto(ip_and_ttl_reply.encode(), addr)\n update_file(ips_file, ips_dict)\n else:\n flag = 1\n else:\n flag = 1\n\n if flag == 1: # need to communicate with parent server tp get the entry\n parent_socket.sendto(data.encode(), (ip_parent, int(parent_port)))\n parent_data, parent_add = parent_socket.recvfrom(1024) # receive answer from parent\n parent_data = parent_data.decode()\n # add to dictionary\n first_add, second_add = parent_data.split(',') #ip and ttl accordingly.\n third_add = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')\n fourth_add = \"1\" # because this entry come from parent\n ips_dict[data] = first_add, second_add, third_add, fourth_add\n reply = ips_dict[data][0] + \",\" + ips_dict[data][1]\n client_socket.sendto(reply.encode(), addr)\n update_file(ips_file, ips_dict)\n client_socket.close()\n parent_socket.close()\n\n\ndef is_this_entry_relevant(ttl, remaining_time, is_from_parent):\n if is_from_parent == \"0\":\n # that means this entry is from txt\n return True\n # else - we check the ttl\n remaining_time = datetime.strptime(remaining_time, '%Y-%m-%d %H:%M:%S.%f') # convert str to datetime.\n result = (datetime.now() - remaining_time).total_seconds()\n return result < float(ttl)\n\n\ndef update_file(file_name, dic):\n with open(file_name, 'w') as file:\n file.truncate() # delete all file content.\n for name in dic:\n ip_address = dic[name][0]\n ttl = dic[name][1]\n remaining = dic[name][2]\n parent_flag = dic[name][3]\n if not is_this_entry_relevant(ttl, remaining, parent_flag):\n continue\n str_write = name + \",\" + ip_address + \",\" + ttl + \"\\n\"\n file.write(str_write) \n for name in list(dic.keys()):\n ip_address = dic[name][0]\n ttl = dic[name][1]\n remaining = dic[name][2]\n parent_flag = dic[name][3]\n if not is_this_entry_relevant(ttl, remaining, parent_flag):\n del dic[name]\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"YaelSim/ex1-networks","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"15029483394","text":"from fastapi import FastAPI, BackgroundTasks, Path\nimport asyncio\nfrom threading import Thread, Lock\n\nfrom worker import Worker\n\napp = FastAPI()\nworker = Worker()\n\nThread(target=worker.run_worker).start()\n\n@app.get(\"/add\")\nasync def add_item(\n background_tasks: BackgroundTasks, \n num: int = Path(..., title=\"The num might be positive integer\", ge=0, le=1000), \n timeout: int = Path(5, title=\"The num might be in seconds\", ge=0, le=1000)\n):\n background_tasks.add_task(worker.add_task, num, timeout)\n return {\"message\": \"task has added\"}\n\n@app.get(\"/list\")\nasync def show_list():\n return worker._list\n\n@app.get(\"/queue\")\nasync def show_queue():\n return worker._queue","repo_name":"kordimsan/async-api-web-app","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"71950170111","text":"import ipfblock\nimport ioport\nimport ipf.ipfblock.processing\nfrom ipf.ipftype.ipfimage3ctype import IPFImage3cType\n\n\nclass Arithmetic(ipfblock.IPFBlock):\n \"\"\" Abstract arithmetic block, \n \n Works with two images, produces one result image of same size \n \n \"\"\"\n type = \"Arithmetic\"\n category = \"Arithmetic and logic\"\n is_abstract_block = True\n \n def __init__(self):\n super(Arithmetic, self).__init__()\n self.input_ports[\"input_image_1\"] = ioport.IPort(self, IPFImage3cType)\n self.input_ports[\"input_image_2\"] = ioport.IPort(self, IPFImage3cType)\n self.output_ports[\"output_image\"] = ioport.OPort(self, IPFImage3cType)\n \n\n def get_preview_image(self):\n return self.output_ports[\"output_image\"]._value \n \n \n \n\n","repo_name":"anton-golubkov/Garland","sub_path":"src/ipf/ipfblock/arithmetic.py","file_name":"arithmetic.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"34991489356","text":"from collections import defaultdict\nfrom timeit import default_timer as timer\nimport sys\n\n\n# sys.setrecursionlimit(50000)\nwith open(\"knapsack1.txt\", \"r\") as f:\n first_line = [int(i) for i in f.readline().split()]\n W = first_line[0]\n n = first_line[1]\n data = f.readlines()\n v = [0]\n w = [0]\n for line in data:\n l = [int(j) for j in line.split()]\n v.append(l[0])\n w.append(l[1])\n\n\ndef multi_dict(k):\n if k == 1:\n return defaultdict()\n else:\n return defaultdict(lambda: multi_dict(k-1))\n\n\n# direct method\nA = multi_dict(2)\nfor x in range(W+1):\n A[0][x] = 0\nfor i in range(1, n+1):\n if i > 1:\n del A[i-2]\n for x in range(0, W+1):\n if x-w[i] >= 0:\n A[i][x] = max(A[i-1][x], A[i-1][x-w[i]]+v[i])\n else:\n A[i][x] = A[i-1][x]\n\nprint(A[n][W])\n\n\n# def knapSack(Wt, wt, vt, nt):\n# if nt == 0 or Wt == 0:\n# return 0\n# if wt[n-1] > Wt:\n# return knapSack(Wt, wt, vt, n-1)\n# else:\n# return max(\n# vt[n-1]+knapSack(Wt-wt[nt-1], wt, vt, n-1),\n# knapSack(Wt, wt, vt, n-1)\n# )\n#\n#\n# print(knapSack(W, w, v, n))","repo_name":"zhzeshu/Algorithms_Coursera","sub_path":"week12.py","file_name":"week12.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"75310281792","text":"import csv\nfrom matplotlib import pyplot as plt\nfrom matplotlib.ticker import MultipleLocator\nfrom matplotlib import rcParams\n\nconfig = {\n \"font.family\": \"serif\",\n # \"font.size\": 24,\n \"mathtext.fontset\": \"stix\",\n \"font.serif\": [\"SimHei\"],\n}\nrcParams.update(config)\n\n\ndef get_data(fliename, i, rounds):\n train_accuracy = []\n with open(\"{}.csv\".format(fliename), \"r\", encoding=\"utf-8\") as f:\n f_read = csv.reader(f)\n num = 0\n for v in f_read:\n\n if len(v) != 0 and num != 0:\n train_accuracy.append(float(v[i]))\n num += 1\n if num == rounds + 1:\n break\n return train_accuracy\n\n\ndef plot_MI(file, order, name, title, save_name, rounds, index='Testing Accuracy'):\n \"\"\"\n Verify the validity of mutual information\n \"\"\"\n test_accuracy = get_data(file, order[0], rounds)\n test_accuracy1 = get_data(file, order[1], rounds)\n test_accuracy2 = get_data(file, order[2], rounds)\n plt.figure()\n\n plt.plot(range(len(test_accuracy)), test_accuracy, marker=\".\", markersize=3,\n label=u'' + name[0], linewidth=1.0, color='r')\n plt.plot(range(len(test_accuracy1)), test_accuracy1, marker=\".\", markersize=3,\n label=u'' + name[1], linewidth=1.0, color='b')\n plt.plot(range(len(test_accuracy2)), test_accuracy2, marker=\".\", markersize=3,\n label=u'' + name[2], linewidth=1.0, color='purple')\n\n plt.tick_params(axis='both', which='major', direction='in', width=1, labelsize=10) # 刻度width=2\n\n ax = plt.gca()\n\n ax.yaxis.set_major_locator(MultipleLocator(0.01))\n ax.xaxis.set_major_locator(MultipleLocator(rounds / 10))\n\n # ax.set_ylim(0.64, 0.76)\n ax.set_ylim(0.92, 0.97)\n ax.set_xlim(0, rounds)\n\n ax.spines['bottom'].set_linewidth(0.6) # 设置底部坐标轴的粗细\n ax.spines['left'].set_linewidth(0.6) # 设置左边坐标轴的粗细\n ax.spines['right'].set_linewidth(0.6) # 设置右边坐标轴的粗细\n ax.spines['top'].set_linewidth(0.6)\n\n plt.legend(loc='lower right', fontsize=10)\n\n plt.text(0.02, 0.94, s='$\\mathrm{(b)}$', fontsize=10, transform=ax.transAxes)\n\n ax.set_ylabel(index, fontsize=10)\n ax.set_xlabel('Communication Rounds', fontsize=10)\n plt.title(title, fontsize=10)\n\n plt.rcParams['savefig.dpi'] = 1000\n plt.rcParams['figure.dpi'] = 1000\n plt.savefig('zn' + save_name, format='jpg', transparent=True)\n plt.show()\n\n\ndef plot(file, order, name, title, save_name, rounds, index='Testing Accuracy'):\n \"\"\"\n Compare the difference weighting schemes(Loss and Avg)\n \"\"\"\n test_accuracy = get_data(file, order[0], rounds)\n test_accuracy1 = get_data(file, order[1], rounds)\n plt.figure()\n\n plt.plot(range(len(test_accuracy)), test_accuracy, marker=\".\", markersize=3, markevery=3,\n label=u'' + name[0], linewidth=1.0, color='r')\n plt.plot(range(len(test_accuracy1)), test_accuracy1, marker=\".\", markersize=3, markevery=3,\n label=u'' + name[1], linewidth=1.0, color='b')\n\n plt.tick_params(axis='both', which='major', direction='in', width=1, labelsize=10) # 刻度width=2\n\n ax = plt.gca()\n\n ax.yaxis.set_major_locator(MultipleLocator(0.02))\n ax.xaxis.set_major_locator(MultipleLocator(rounds / 10))\n\n plt.ylim(0.64, 0.76)\n plt.xlim(0, rounds)\n ax.spines['bottom'].set_linewidth(0.6)\n ax.spines['left'].set_linewidth(0.6)\n ax.spines['right'].set_linewidth(0.6)\n ax.spines['top'].set_linewidth(0.6)\n\n plt.legend(loc='lower right', fontsize=10)\n plt.text(0.02, 0.94, s='$\\mathrm{(a)}$', fontsize=10, transform=ax.transAxes)\n\n plt.ylabel(index, fontsize=10)\n plt.xlabel('Communication Rounds', fontsize=13) # 'Communication Rounds'\n plt.title(title, fontsize=10)\n\n # 保存\n plt.rcParams['savefig.dpi'] = 1000\n plt.rcParams['figure.dpi'] = 1000\n plt.savefig('zn' + save_name, format='jpg', transparent=True)\n plt.show()\n\n\nif __name__ == '__main__':\n rounds = 50\n # file = '../exp/fmnist15_w'\n # order = [1, 2, 0]\n # name = ['$\\mathrm{MI}$', '$\\mathrm{Fomo}$', '$\\mathrm{Avg}$']\n # title = r\"$\\mathrm{FMNIST}$数据集, $\\mathrm{15}$个客户机\"\n # save_name = \"fmnist15_w.jpg\"\n\n # file = '../exp/cifar15_w'\n # order = [1, 0, 2]\n # name = ['$\\mathrm{MI}$', '$\\mathrm{Fomo}$', '$\\mathrm{Avg}$']\n # title = r\"$\\mathrm{CIFAR}$-10数据集, $\\mathrm{15}$个客户机\"\n # save_name = \"cifar15_w.jpg\"\n # plot_MI(file, order, name, title, save_name, rounds,'平均测试准确率')\n\n # order = [1, 0]\n # name = ['$\\mathrm{Avg}$', '$\\mathrm{Loss}$']\n\n # CIFAR\n # rounds = 100\n # file = '../exp/avg_w/cifar50_avg_w'\n # title = r\"$\\mathrm{CIFAR}$-10数据集, $\\mathrm{50}$个客户机\"\n # save_name = \"cifar50_avg_w.jpg\"\n\n # rounds = 50\n # file = '../exp/avg_w/cifar15_avg_w'\n # title = r\"$\\mathrm{CIFAR}$-10数据集, $\\mathrm{15}$个客户机\"\n # save_name = \"cifar15_avg_w.jpg\"\n # FMNIST\n\n # rounds = 50\n # file = '../exp/avg_w/fmnist15_avg_w'\n # title = r\"$\\mathrm{FMNIST}$数据集, $\\mathrm{15}$个客户机\"\n # save_name = \"fmnist15_avg_w.jpg\"\n #\n # rounds = 100\n # file = '../exp/avg_w/fmnist50_avg_w'\n # title = r\"$\\mathrm{FMNIST}$数据集, $\\mathrm{50}$个客户机\"\n # save_name = \"fmnist50_avg_w.jpg\"\n # plot(file, order, name, title, save_name, rounds, '平均测试准确率')\n","repo_name":"zhengLabs/pFedMI","sub_path":"plot/plot2.py","file_name":"plot2.py","file_ext":"py","file_size_in_byte":5416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"4160124205","text":"import random\nimport tkinter as tk\n\n\nm = tk.Tk()\n''' \nwidgets are added here \n'''\n\n\ndef roll(y):\n result = random.randint(1, y)\n print(result)\n # return result\n\n\nprint('enter the maximum number')\n\n# print(roll(int(input())))\n\n\nm.title('Dice roller')\nbutton = tk.Button(m, text='Roll', width=25, command=roll(6))\nbutton.pack()\n\nm.mainloop()\n\n\n\n","repo_name":"DuncanSage/dice","sub_path":"dice.py","file_name":"dice.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"3954503384","text":"import zmq\nimport time\nfrom ppp import *\nfrom queue import WRouteQueue\nfrom queue import WRouteItem\nfrom threading import Thread\n\nimport sys\nsys.path.append('../../lib')\nfrom default import WROUTE_ADDR, WROUTE_BACKEND_ADDR\nfrom log import log_err\n\nclass WRoutePoller(Thread):\n def _init_sock(self):\n self._context = zmq.Context(1)\n self._frontend = self._context.socket(zmq.ROUTER)\n self._backend = self._context.socket(zmq.ROUTER)\n self._frontend.bind(WROUTE_ADDR)\n self._backend.bind(WROUTE_BACKEND_ADDR)\n self._poller_backend = zmq.Poller()\n self._poller_backend.register(self._backend, zmq.POLLIN)\n self._poller = zmq.Poller()\n self._poller.register(self._frontend, zmq.POLLIN)\n self._poller.register(self._backend, zmq.POLLIN)\n \n def __init__(self):\n Thread.__init__(self)\n self._queue = WRouteQueue()\n self._init_sock()\n self._active = True\n \n @property\n def name(self):\n return self.__class__.__name__\n \n def run(self):\n timeout = time.time() + PPP_HEARTBEAT_INTERVAL\n while self._active:\n if len(self._queue.queue) > 0:\n poller = self._poller\n else:\n poller = self._poller_backend\n socks = dict(poller.poll(PPP_HEARTBEAT_INTERVAL * 1000))\n if socks.get(self._backend) == zmq.POLLIN:\n frames = self._backend.recv_multipart()\n if not frames:\n break\n identity = frames[0]\n self._queue.add(WRouteItem(identity))\n msg = frames[1:]\n if len(msg) == 1:\n if msg[0] not in (PPP_READY, PPP_HEARTBEAT):\n log_err(self, \"invalid message, %s\" % msg)\n else:\n self._frontend.send_multipart(msg)\n \n if time.time() >= timeout:\n for identity in self._queue.queue:\n msg = [identity, PPP_HEARTBEAT]\n self._backend.send_multipart(msg)\n timeout = time.time() + PPP_HEARTBEAT_INTERVAL\n \n if socks.get(self._frontend) == zmq.POLLIN:\n frames = self._frontend.recv_multipart()\n if not frames:\n break\n frames.insert(0, self._queue.pop())\n self._backend.send_multipart(frames)\n \n self._queue.purge()\n \n def stop(self):\n self._active = False\n ","repo_name":"sitian/wing","sub_path":"services/wroute/poller.py","file_name":"poller.py","file_ext":"py","file_size_in_byte":2595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"24675497902","text":"\"\"\"Optimal expert that additionally uses substitution actions.\"\"\"\nfrom typing import Any, Iterable, List, Sequence\n\nimport numpy as np\n\nfrom trans import actions\nfrom trans import optimal_expert\nfrom trans.actions import Copy, Del, Edit, EndOfSequence, Ins, Sub\n\n\nclass EditDistanceAligner(actions.Aligner):\n def __init__(self, del_cost=1.0, ins_cost=1.0, sub_cost=1.0):\n self.del_cost = del_cost\n self.ins_cost = ins_cost\n self.sub_cost = sub_cost\n\n def action_sequence_cost(\n self, x: Sequence[Any], y: Sequence[Any], x_offset: int, y_offset: int\n ) -> float:\n ed = optimal_expert.edit_distance(\n x,\n y,\n del_cost=self.del_cost,\n ins_cost=self.ins_cost,\n sub_cost=self.sub_cost,\n x_offset=x_offset,\n y_offset=y_offset,\n )\n return ed[-1, -1]\n\n def action_cost(self, action: Edit):\n if isinstance(action, Copy) or isinstance(action, EndOfSequence):\n return 0\n if isinstance(action, Del):\n return self.del_cost\n if isinstance(action, Ins):\n return self.ins_cost\n if isinstance(action, Sub):\n return self.sub_cost\n raise ValueError(f\"Unexpected action: {action}!\")\n\n\nclass NoSubstitutionAligner(EditDistanceAligner):\n def __init__(self):\n super().__init__(del_cost=1.0, ins_cost=1.0, sub_cost=1.0)\n\n def action_cost(self, action: Edit):\n if isinstance(action, Sub):\n return np.inf\n return super().action_cost(action)\n\n\nclass OptimalSubstitutionExpert(optimal_expert.OptimalExpert):\n def __init__(\n self, aligner: actions.Aligner, maximum_output_length: int = 150\n ):\n super().__init__(maximum_output_length)\n self.aligner = aligner\n\n def find_valid_actions(\n self,\n x: Sequence[Any],\n i: int,\n y: Sequence[Any],\n prefixes: Iterable[optimal_expert.Prefix],\n ):\n if len(y) >= self.maximum_output_length:\n return {EndOfSequence()}\n input_not_empty = i < len(x)\n attention = x[i] if input_not_empty else None\n actions_prefixes: List[optimal_expert.ActionsPrefix] = []\n for prefix in prefixes:\n prefix_insert = prefix.leftmost_of_suffix\n if prefix_insert is None:\n valid_actions = {EndOfSequence()}\n else:\n valid_actions = {Ins(prefix_insert)}\n if input_not_empty:\n if prefix_insert is not None:\n if prefix_insert == attention:\n valid_actions.add(Copy(attention, prefix_insert))\n else:\n valid_actions.add(\n Sub(old=attention, new=prefix_insert)\n )\n valid_actions.add(Del(attention))\n actions_prefix = optimal_expert.ActionsPrefix(\n valid_actions, prefix\n )\n actions_prefixes.append(actions_prefix)\n return actions_prefixes\n\n def roll_out(\n self,\n x: Sequence[Any],\n t: Sequence[Any],\n i: int,\n actions_prefixes: Iterable[optimal_expert.ActionsPrefix],\n ):\n costs_to_go = dict()\n for actions_prefix in actions_prefixes:\n suffix_begin = actions_prefix.prefix.j\n for action in actions_prefix.actions:\n if isinstance(action, Del):\n x_offset = i + 1\n t_offset = suffix_begin\n elif isinstance(action, Ins):\n x_offset = i\n t_offset = suffix_begin + 1\n elif isinstance(action, Sub):\n x_offset = i + 1\n t_offset = suffix_begin + 1\n elif isinstance(action, EndOfSequence):\n x_offset = i\n t_offset = suffix_begin\n else:\n raise ValueError(f\"Unknown action: {action}\")\n sequence_cost = self.aligner.action_sequence_cost(\n x, t, x_offset, t_offset\n )\n action_cost = self.aligner.action_cost(action)\n cost = action_cost + sequence_cost\n if action not in costs_to_go or costs_to_go[action] > cost:\n costs_to_go[action] = cost\n return costs_to_go\n","repo_name":"sigmorphon/2021-task1","sub_path":"baseline/trans/optimal_expert_substitutions.py","file_name":"optimal_expert_substitutions.py","file_ext":"py","file_size_in_byte":4428,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"60"} +{"seq_id":"36306909958","text":"#Import libraries\r\nfrom itertools import permutations\r\nfrom matplotlib import style \r\nimport math\r\nimport matplotlib.pyplot as plt \r\n\r\n####################################################################################\r\n#Declare empty arrays to store x,y coordinates\r\nleast_distance = math.inf\r\nbestx = []\r\nbesty = []\r\nbest_path = []\r\nx = []\r\ny = []\r\n\r\n####################################################################################\r\n#function definitions\r\n\r\n#read and parse data file\r\ndef read_datafile(path):\r\n #Import data file\r\n i = 0\r\n x = []\r\n y = []\r\n with open((path), \"r\") as file:\r\n for line in file:\r\n split_line = line.strip().split(\" \")\r\n \r\n #Track line number to remove header info\r\n if i > 6:\r\n #Populate x,y coordinate pairs into arrays\r\n x.append(float(split_line[1]))\r\n y.append(float(split_line[2]))\r\n #increment line counter\r\n i += 1 \r\n return x, y\r\n\r\n#######################################################\r\n#graph sets of xy coordinates\r\ndef graph_coords(x, y, min_dist):\r\n #Define graph style\r\n style.use('dark_background')\r\n \r\n # plotting the points\r\n plt.plot(x, y,'ro-')\r\n for i in range(len(x) - 1):\r\n plt.annotate(i + 1, (x[i], y[i]), textcoords=\"offset points\", xytext=(0,5), ha = 'center')\r\n \r\n # naming the axes \r\n plt.xlabel('x - axis') \r\n plt.ylabel('y - axis') \r\n \r\n # giving a title to my graph \r\n plt.title((\"Optimum Path Length: \" + str(min_dist)))\r\n \r\n # function to show the plot \r\n plt.pause(.05)\r\n plt.show() \r\n\r\n return\r\n\r\n#######################################################\r\n#Convert the indices to actual point numbers as in the tsp file\r\ndef index2point(path):\r\n out = []\r\n for element in path:\r\n out.append(element+1) \r\n return out\r\n\r\n#######################################################\r\n#Calculate distance for the trip\r\ndef calculate_trip_dist(tripx, tripy):\r\n dist = 0\r\n for i in range(len(tripx) - 1):\r\n dist = dist + (math.hypot(tripx[i] - tripx[(i+1)], tripy[i] - tripy[i+1]))\r\n return dist\r\n\r\n####################################################################################\r\n\r\n############\r\n#INPUT \r\n###########\r\n\r\n#data file path\r\nfile_path = str(r'C:\\Users\\burkh\\OneDrive\\Desktop\\AI\\Project1\\datasets\\Random4.tsp')\r\n#used to read and parse the tsp file\r\nx, y = read_datafile(file_path)\r\n\r\n\r\n############\r\n#PROCESSING\r\n###########\r\n\r\n#Create all possible paths\r\nperm = permutations(range(len(x)))\r\n\r\n#iterate through permutations\r\nfor p in perm: \r\n tripx = []\r\n tripy = []\r\n \r\n #record trip using permutations of the range of 0 -> length(x) as pointers\r\n for element in p:\r\n tripx.append(x[element])\r\n tripy.append(y[element])\r\n \r\n #take trip back to starting node \r\n tripx.append(x[p[0]])\r\n tripy.append(y[p[0]])\r\n \r\n #Calculate distance for the trip\r\n temp_dist = 0\r\n temp_dist = calculate_trip_dist(tripx, tripy)\r\n \r\n #store lowest distance and trip\r\n if(temp_dist < least_distance):\r\n least_distance = temp_dist\r\n #Store current optimums\r\n bestx = tripx\r\n besty = tripy\r\n best_path = p\r\n \r\n############\r\n#OUTPUT\r\n###########\r\n \r\n#Create graph of the optimum path \r\ngraph_coords(bestx, besty, least_distance)\r\n\r\n#Send output for report\r\nprint(least_distance)\r\nprint(index2point(best_path)) #Write best path using point numbers","repo_name":"BurkhardtMicah/Artificial-Intelligence","sub_path":"BruteForce_TSP.py","file_name":"BruteForce_TSP.py","file_ext":"py","file_size_in_byte":3538,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"39539041797","text":"#build path dictionary from pathout.txt\n\n'''\nfile = open('./output_data/pathout.txt','rU')\nlines = file.readlines()\nfile.close()\n\npathDict = {}\nfor l in range(1,len(lines)):\n x = lines[l].split()\n pathID = x[0]\n path = x[1]\n pathNodes = x[1].split('|')\n pathTarget = pathNodes[len(pathNodes)-1]\n pathDict[pathID]=pathTarget\n'''\n\nfile = open('./gams_intermediates/results.txt','rU')\nlines = file.readlines()\nfile.close()\n\ntargetSet = set()\nfor l in lines:\n #pathID = l.split()[0].replace('\"','')\n #targetSet.add(pathDict[pathID])\n targetSet.add(l.split()[0].replace('\"',''))\n\noutfile = open('./gams_intermediates/truetargets.gms','w')\n\noutfile.write('Set trueTargets(node)\\t\"targets\"\\n')\noutfile.write('/'+'\\t')\nfor t in targetSet:\n outfile.write(t+'\\n')\noutfile.write('/;\\n')\noutfile.close()\nprint(len(targetSet))\n\nfile = open('./input_data/targetntype2.tab','w')\nfile.write('#node\\tntype=discrete(source|target)\\n')\nfile.write('7025\\tsource\\n')\nfor t in targetSet:\n file.write(t+'\\ttarget\\n')\nfile.close()\n\n","repo_name":"Craven-Biostat-Lab/subnetwork_inference","sub_path":"NR2F1/scripts/target_ntype2_builder.py","file_name":"target_ntype2_builder.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"1158883225","text":"import question_bank\r\nimport time\r\n\r\nq_list=list(question_bank.all_questions)\r\na_list=list(question_bank.all_questions.values())\r\nrounds = 0\r\nscore = 0\r\n\r\ndef quiz():\r\n print(\"Welcome to the Videogame Trivia Quiz\")\r\n for questions in q_list:\r\n question() \r\n print(\"Thank you for playing! Your final score is:\", score)\r\n\r\ndef question():\r\n global a_list\r\n global q_list\r\n global rounds\r\n global score\r\n while rounds < 10:\r\n print(q_list[0])\r\n print(a_list[0])\r\n user_answer = input(\"Answer: \").upper()\r\n if user_answer not in [\"A\",\"B\",\"C\",\"D\"]:\r\n print(\"You have entered an invalid option\")\r\n continue\r\n elif user_answer == question_bank.answers[0]:\r\n print(\"Correct!\")\r\n a_list.pop(0)\r\n question_bank.answers.pop(0)\r\n q_list.pop(0)\r\n rounds += 1\r\n score += 1\r\n else:\r\n print(\"Sorry, the correct answer was\", question_bank.answers[0])\r\n a_list.pop(0)\r\n question_bank.answers.pop(0)\r\n q_list.pop(0)\r\n rounds +=1\r\n time.sleep(1)\r\n\r\nif __name__ == \"__main__\":\r\n quiz()","repo_name":"baconeggers/PersonalProjects","sub_path":"Trivia Game.py","file_name":"Trivia Game.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"40344094232","text":"# -*- coding: utf-8 -*-\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\nfrom pathlib import Path\n\nimport pandas as pd\nimport pytest\n\nfrom halfpipe.design import group_design, prepare_data_frame\n\ntsv_str = \"\"\"SubjID\\tSex\\tSite.\\tRecur\\tAD\n11111111\\t0\\t1\\t0\\tNA\n11111112\\t1\\t1\\t1\\tNA\n11111113\\t0\\t1\\t2 \\tNA\n11111114\\t1\\t1\\tNA \\tNA\n11111115\\t0\\t1\\t1\\tNA\n\"\"\"\n\n\n@pytest.mark.parametrize(\n \"tsv_str\",\n [tsv_str, tsv_str.replace(\"\\t\", \",\"), tsv_str.replace(\"\\t\", \" \")],\n)\ndef test_prepare_data_frame(tmp_path: Path, tsv_str: str):\n spreadsheet_path = tmp_path / \"spreadsheet.tsv\"\n\n with open(spreadsheet_path, \"w\") as file_handle:\n file_handle.write(tsv_str)\n\n variables: list[dict] = [\n {\"name\": \"SubjID\", \"type\": \"id\"},\n {\"name\": \"Sex\", \"type\": \"categorical\", \"levels\": [\"0\", \"1\"]},\n {\"name\": \"Site.\", \"type\": \"categorical\", \"levels\": [\"1\"]},\n {\"name\": \"Recur\", \"type\": \"categorical\", \"levels\": [\"0\", \"1\", \"2\"]},\n {\"name\": \"AD\", \"type\": \"categorical\", \"levels\": [\"0\", \"1\", \"2\"]},\n ]\n subjects = [\"11111111\", \"11111112\", \"11111113\", \"11111114\", \"11111115\"]\n\n df = prepare_data_frame(spreadsheet_path, variables, subjects)\n assert df.shape == (5, 4)\n assert df[\"Recur\"].dtype == \"category\"\n assert df[\"Recur\"].notnull().sum() == 4\n assert df[\"AD\"].dtype == \"category\"\n\n\ndef test_group_design():\n data_frame = pd.DataFrame(\n {\n \"subject\": [\"s1\", \"s2\", \"s3\", \"s4\", \"s5\", \"s6\"],\n \"group\": [\"A\", \"A\", \"B\", \"B\", \"C\", \"C\"],\n }\n )\n data_frame = data_frame.set_index(\"subject\")\n data_frame[\"group\"] = data_frame[\"group\"].astype(\"category\")\n\n subjects = [\"s1\", \"s2\", \"s3\", \"s4\"]\n contrasts = [\n dict(type=\"infer\", variable=[\"group\"], levels=[\"A\", \"B\", \"C\"]),\n ]\n\n design = group_design(\n data_frame,\n contrasts,\n subjects,\n )\n assert len(design.regressor_list) == 2\n","repo_name":"HALFpipe/HALFpipe","sub_path":"tests/test_design.py","file_name":"test_design.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"60"} +{"seq_id":"14824526990","text":"from django.urls import path\n\nfrom accounts import views\n\nurlpatterns = [\n path('login', views.login_view, name='login'),\n path('logout', views.logout_view, name='logout'),\n path('profile', views.profile_view, name='profile'),\n path('division:', views.division_view, name='division'),\n path('profile:', views.profile_view, name='profile'),\n path('add_profile', views.add_profile, name='add_profile'),\n path('add_division', views.add_division, name='add_division'),\n path('profiles', views.profiles_view, name='profiles'),\n]\n","repo_name":"Qvineox/accessControl_app","sub_path":"accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"2554164615","text":"#import library\r\nfrom tkinter import *\r\nimport random\r\n\r\n\r\n#initialize window\r\nroot = Tk()\r\nroot.geometry('500x500')\r\nroot.resizable(0,0)\r\nroot.title('Rock,Paper,Scissors')\r\nroot.config(bg ='lightskyblue')\r\n\r\n#heading\r\nLabel(root, text = 'Rock, Paper ,Scissors' , font='arial 30 bold', bg = 'white').pack()\r\n\r\n##user choice\r\nuser_take = StringVar()\r\nLabel(root, text = 'choose any one: rock, paper ,scissors' , font='arial 19 bold', bg = 'white').place(x = 20,y=70)\r\nEntry(root, font = 'arial 15', textvariable = user_take , bg = 'antiquewhite2').place(x=140 , y = 130)\r\n\r\n\r\n#computer choice\r\nc_pick = random.randint(1,3)\r\nif c_pick == 1:\r\n c_pick = 'rock'\r\nelif c_pick ==2:\r\n c_pick = 'paper'\r\nelse:\r\n c_pick = 'scissors'\r\n \r\n\r\n#play\r\nResult = StringVar()\r\n\r\ndef play():\r\n user_pick = user_take.get()\r\n if user_pick == c_pick:\r\n Result.set('Tie,both are same')\r\n elif user_pick == 'rock' and c_pick == 'paper':\r\n Result.set('You loose,computer selected paper')\r\n elif user_pick == 'rock' and c_pick == 'scissors':\r\n Result.set('You win,computer selected scissors')\r\n elif user_pick == 'paper' and c_pick == 'scissors':\r\n Result.set('You loose,computer selected scissors')\r\n elif user_pick == 'paper' and c_pick == 'rock':\r\n Result.set('You win,computer selected rock')\r\n elif user_pick == 'scissors' and c_pick == 'rock':\r\n Result.set('You loose,computer selected rock')\r\n elif user_pick == 'scissors' and c_pick == 'paper':\r\n Result.set('You win ,computer selected paper')\r\n else:\r\n Result.set('invalid, Try again')\r\n \r\n \r\n#reset\r\ndef Reset():\r\n Result.set(\"\") \r\n user_take.set(\"\")\r\n\r\n##exit\r\ndef Exit():\r\n root.destroy()\r\n\r\n\r\n#button\r\nEntry(root, font = 'arial 10 bold', textvariable = Result, bg ='antiquewhite2',width = 64,).place(x=25, y = 250)\r\n\r\nButton(root, font = 'arial 13 bold', text = 'PLAY' ,padx =6,bg ='seashell4' ,command = play).place(x=213,y=190)\r\n\r\nButton(root, font = 'arial 13 bold', text = 'RESET' ,padx =6,bg ='seashell4' ,command = Reset).place(x=110,y=310)\r\n\r\nButton(root, font = 'arial 13 bold', text = 'EXIT' ,padx =6,bg ='seashell4' ,command = Exit).place(x=310,y=310)\r\n\r\nroot.mainloop()\r\n","repo_name":"taimoorfahim/Rock-Paper-Scissors_With-UI","sub_path":"rock-paper-scissors.py","file_name":"rock-paper-scissors.py","file_ext":"py","file_size_in_byte":2236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"8285185336","text":"from Scene.scene import *\nimport pygame\nimport ctypes\nfrom Data.rec_data import *\n\nclass GameScene(Scene):\n\n def __init__(self, display, id, w_size, connector, resources):\n\n print(\"Game Scene\")\n \n super().__init__(display, id, w_size, connector, resources)\n \n # Constants\n self.GET_DATA_TIME = 1.25\n self.FONT_PATH = r\"..\\Resource\\OpenSans-Regular.ttf\"\n\n self._is_in_intro = True\n self._intro_font = pygame.font.Font(self.FONT_PATH, 40)\n self._info_text = self._intro_font.render(\"You think you can beat my bot? Let's try\", 0, (0, 0, 0))\n\n self._trans_to_next_scene = False\n\n self._data_trainer = DataTrainer()\n self.NUM_OF_INPUT_SAMPLE = DataTrainer.NUM_OF_INPUT_SAMPLE\n self._input_data = []\n\n self._connector = connector\n\n self._ptr_accel = self._connector.get_accel()\n self._ptr_emg_data = self._connector.get_emg_data()\n\n self._clock = pygame.time.Clock()\n self._timer = 0\n self._movement_accel = [(0, 0, 0), (0, 0, 0)]\n self._has_wave = False\n\n self._turn_number = 0\n\n for i in range(len(self._resources)):\n self._resources[i] = pygame.transform.scale(self._resources[i], (125, 125))\n\n self._render_imgs = []\n offset = 450, (self._w_size[1] - self._resources[3].get_rect().size[1] * 2 * 1.2) // 2\n for i in range(6): \n original_pos = (i % 3) * self._resources[3].get_rect().size[0] * 1.2, (i // 3) * self._resources[3].get_rect().size[1] * 1.2\n pos = tuple(map(lambda x, y: x + y, offset, original_pos))\n self._render_imgs.append([self._resources[3], pos])\n\n def update(self):\n\n super().update()\n\n if self._is_in_intro:\n self._timer += self._clock.tick() / 1000\n if self._timer >= 3.0:\n self._timer = 0\n self._is_in_intro = False\n \n elif self._trans_to_next_scene:\n self._timer += self._clock.tick() / 1000\n if self._timer >= 3.0:\n self._isEndScene = True\n\n else:\n if not self._has_wave:\n if self._timer <= 0.25:\n if self._timer == 0:\n temp = (ctypes.c_float * 3).from_address(self._ptr_accel)\n self._movement_accel[0] = [temp[i] for i in range(3)]\n self._timer += self._clock.tick() / 1000\n \n else:\n self._timer = 0\n temp = (ctypes.c_float * 3).from_address(self._ptr_accel)\n self._movement_accel[1] = [temp[i] for i in range(3)]\n ox = self._movement_accel[0][0]\n x = self._movement_accel[1][0]\n # Wave down\n if x - ox < -0.7 and ox != 0:\n self._has_wave = True\n \n else:\n if self._timer <= self.GET_DATA_TIME:\n self._timer += self._clock.tick() / 1000\n temp = (ctypes.c_int * DataTrainer.NUM_OF_SENSORS).from_address(self._ptr_emg_data)\n self._input_data.append([temp[i] for i in range(DataTrainer.NUM_OF_SENSORS)])\n else:\n # Finish delta_time second\n\n self._timer = 0\n self._has_wave = False\n\n print(len(self._input_data))\n\n predict_result = self._data_trainer.predict(self._input_data[:DataTrainer.NUM_OF_INPUT_SAMPLE])\n self._render_imgs[self._turn_number + 3][0] = self._resources[predict_result]\n self._render_imgs[self._turn_number][0] = self._resources[(predict_result + 1) % 3]\n \n if self._turn_number < 2:\n self._turn_number = self._turn_number + 1\n \n else:\n self._trans_to_next_scene = True\n\n self._input_data = []\n \n\n # Event handling\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n print(\"Exiting...\")\n self._connector.close()\n pygame.quit()\n exit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n # Change Scene to Game Scene:\n self._isEndScene = True\n else: pass\n else: pass\n\n def render(self):\n super().render()\n\n if self._is_in_intro:\n self._display.blit(self._info_text, fix_pos([self._w_size[0] // 2, self._w_size[1] // 2], self._info_text.get_size()))\n\n else:\n self._display.blit(self._resources[4], (120, 300))\n self._display.blit(self._resources[5], (120, 140))\n\n for i in range(6):\n self._display.blit(self._render_imgs[i][0], self._render_imgs[i][1])\n\n \n ","repo_name":"ngthluu/RPS-Myogame","sub_path":"Scene/game_scene.py","file_name":"game_scene.py","file_ext":"py","file_size_in_byte":5040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"71970817152","text":"alphabet = ('A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z')\ndecryptedWord = []\n\nword = list(str(input('Write the ciphered word: ')).upper().strip())\nfactor = int(input('Write the factor: '))\n\nfor char in word:\n actualIndex = alphabet.index(char)\n newIndex = actualIndex - factor\n if newIndex > len(alphabet):\n print(\"I don't know how it happened, but 'newIndex' > 'alphabet'\")\n exit()\n elif newIndex < 0: newIndex = len(alphabet) - (newIndex * -1)\n decryptedWord.append(alphabet[newIndex])\n\nfor char in decryptedWord: print(char, end='')\n","repo_name":"Icaro-G-Silva/AlmostNothingUseful","sub_path":"CesarCipher/Python/CesarCipher.py","file_name":"CesarCipher.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"10240207395","text":"from flask import Flask,render_template,redirect,request,flash,make_response\nimport os\nimport hashlib\nimport datetime\nfrom cloudant.client import Cloudant\nfrom cloudant import query,result,document\n\n\napp = Flask(__name__)\napp.secret_key = 'lionel'\n\nPORT = os.getenv('PORT', '5000');\n\nUSERNAME=\"\";\nPASSWORD=\"\";\nURL=\"\";\nclient = Cloudant(USERNAME, PASSWORD, url=\"\");\n\nUPLOAD_FOLDER=os.path.dirname(__file__)+\"/tmp/\";\napp.config['UPLOAD_FOLDER']=UPLOAD_FOLDER;\n\n@app.route('/')\ndef home():\n filelist=retrieve_files();\n return render_template(\"upload.html\",files=filelist);\n\n\n@app.route('/upload',methods=['POST'])\ndef upload():\n f = request.files['fileToUpload'];\n file_name = f.filename;\n f.save(os.path.join(app.config['UPLOAD_FOLDER'], file_name));\n contents = '';\n hash_content = hashlib.md5();\n with open(UPLOAD_FOLDER + file_name, 'rb') as content_file:\n contents = content_file.read();\n hash_content.update(contents);\n print(hash_content.hexdigest());\n\n client.connect();\n\n db = client['lionelfilestorage'];\n file_version_query=query.Query(db,selector={'file_name':file_name});\n version_no=-1;\n is_file_exists=False;\n with query.Query.custom_result(file_version_query) as query_result:\n for doc in query_result:\n version_no = doc[\"version_no\"];\n if doc['file_hashvalue'] == hash_content.hexdigest():\n is_file_exists=True;\n break;\n msg =''\n if is_file_exists==True:\n msg='File already exists,cant upload the same file.'\n else :\n version_no=1 if version_no==-1 else version_no+1;\n file_doc = {\n '_id':file_name+str(version_no),\n 'file_name': file_name,\n 'file_content': contents,\n 'version_no': version_no,\n 'file_hashvalue':hash_content.hexdigest(),\n 'last_modified':str(datetime.datetime.now()),\n 'type':'filedoc'\n }\n db.create_document(file_doc);\n msg='File uploaded sucessfully';\n client.disconnect();\n os.remove(UPLOAD_FOLDER + file_name);\n flash(msg);\n return redirect(\"/\");\n\n@app.route('/delete',methods=['GET','POST'])\ndef delete():\n if request.method=='POST':\n msg='';\n try :\n file_name = request.form['file_name'];\n version_no = request.form['version_no'];\n client.connect();\n db = client['lionelfilestorage'];\n my_doc = db[file_name + version_no];\n my_doc.delete();\n msg='File deleted successfully';\n except KeyError:\n msg='File not found for deletion';\n except Exception:\n msg='Some issue occured';\n finally:\n client.disconnect();\n flash(msg);\n filelist=retrieve_files();\n return render_template(\"delete.html\",files=filelist);\n\n@app.route('/download',methods=['GET','POST'])\ndef download():\n if request.method=='POST':\n try:\n file_name = request.form['file_name'];\n version_no = request.form['version_no'];\n client.connect();\n db = client['lionelfilestorage'];\n my_doc = db[file_name + version_no];\n response = make_response(my_doc['file_content']);\n response.headers[\"Content-Disposition\"] = \"attachment; filename=\" +file_name;\n msg = 'File downloaded successfully';\n flash(msg);\n return response;\n except KeyError:\n msg='File not found for download';\n except Exception:\n msg='Some issue occured';\n finally:\n client.disconnect();\n flash(msg);\n filelist = retrieve_files();\n return render_template(\"download.html\",files=filelist);\n\ndef retrieve_files():\n list=[];\n client.connect();\n db = client['lionelfilestorage'];\n file_version_query = query.Query(db, selector={'type':'filedoc'});\n with query.Query.custom_result(file_version_query) as query_result:\n for doc in query_result:\n list.append(doc);\n client.disconnect();\n return list;\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=int(PORT))\n #app.run()\n","repo_name":"lionel-ronald-sequeira/IBM-Bluemix2","sub_path":"welcome.py","file_name":"welcome.py","file_ext":"py","file_size_in_byte":4224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"19844268668","text":"import matplotlib.pyplot as plt\nimport torch\nimport numpy as np\nimport scipy.io as sio\nfrom gan import Generator256\n\n\n\ndef load_data(DATA_PATH, device):\n data = sio.loadmat(DATA_PATH)\n \n S = torch.from_numpy(data['S']).type(torch.float32)\n T = torch.from_numpy(data['T']).type(torch.float32)\n C = torch.from_numpy(data['C']).type(torch.float32)\n \n S_true = torch.from_numpy(data['S_true']).type(torch.float32)\n C_true = torch.from_numpy(data['C_true']).type(torch.float32)\n T_true = torch.from_numpy(data['T_true']).type(torch.float32)\n \n # Permutation for compatibility with matlab generated arrays\n T = T.permute(2,0,1).to(device)\n T_true = T_true.permute(2,0,1).to(device)\n try:\n S = S.permute(2,0,1).to(device)\n except:\n S = S.unsqueeze(dim=0)\n \n try:\n S_true = S_true.permute(2,0,1).to(device)\n except:\n S_true = S_true.unsqueeze(dim=0)\n \n \n C = C.permute(1,0).to(device)\n C_true = C_true.permute(1,0).to(device)\n \n return S, C, T, S_true, C_true, T_true\n\n\n \n \ndef load_generator(GAN_PATH, device):\n generator = Generator256()\n \n checkpoint = torch.load(GAN_PATH, map_location=torch.device('cpu'))\n \n generator.load_state_dict(checkpoint['g_model_state_dict'])\n generator.eval()\n return generator.to(device)\n\ndef load_all( datapath, z_dimension, device, visualize_data = True, k=25):\n\n S, C, T, S_true, C_true, T_true = load_data(datapath, device)\n\n # C_true[0:,50:] = 0\n # T_true = get_tensor(S_true.unsqueeze(dim=1), C_true, device)\n\n if visualize_data:\n print(S.shape[0], \"emitters.\")\n fig, ax = plt.subplots(1, C.shape[0]+1, figsize=(5*C.shape[0], 3))\n \n for i in range(C.shape[0]):\n ax[i].plot(C_true[i,:].cpu().detach().numpy())\n \n ax[i+1].imshow(torch.log(T_true[k,...]).cpu() )\n\n # parameters\n R, I, J = S.shape # R is the number of emitters, I,J is the size of the SLF\n K = C.shape[-1] # K is the number of frequency bands\n\n # Initialize the latent vectors for each emitter\n Z_init = torch.randn((R, z_dimension), dtype=torch.float32).to(device) \n Z = torch.zeros((R, z_dimension), dtype=torch.float32).to(device) \n\n # zero start \n S_init = torch.zeros( (R, 1, I, J) ).to(device) \n C_init = torch.zeros(C.shape).to(device)\n\n T = T.unsqueeze(dim=1)\n\n return S_init, C_init, T, S_true, C_true, T_true, Z_init, Z","repo_name":"XiaoFuLab/Quantized-Radio-Map-Estimation-BTD-and-DGM","sub_path":"qmc_utils.py","file_name":"qmc_utils.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"60"} +{"seq_id":"41081490560","text":"import numpy as np\nimport pytesseract as pt\nimport cv2\nimport win32gui\nimport mss\nimport mss.tools\nfrom CNN import CNN\nfrom PIL import Image\nfrom PIL import ImageGrab\nfrom pynput.mouse import Button, Controller\nfrom grabscreen import grab_screen\nfrom directkeys import *\nimport time\nfrom grabImageUsingCV2 import fetchScoreFromImage\n\nmouse=Controller()\nclass Dave(object):\n cnn_graph = CNN()\n def __init__(self):\n self.rward=0\n # self.reset()\n self.visit=0\n self.previous_screen=[]\n self.previous_compare_screen=[]\n # self.previous_gold_screen=[]\n self.previous_screen_score = 0\n self.curr_score = 0\n self.prev_unprocessed = 0\n self.flagTrophy = False\n\n def _get_reward(self, action):\n flag1=False\n flag2=False\n flag3=False\n flagTrophy = False\n screen = mss.mss().grab((0,0,1366,768))\n mss.tools.to_png(screen.rgb,screen.size,output='screen.jpg')\n self.visit+=1\n screen = np.asarray(screen)\n if self.visit > 1 :\n self.curr_score = 0\n reward_scrn = mss.mss().grab((310,0,470,47))\n mss.tools.to_png(reward_scrn.rgb,reward_scrn.size,output='reward.jpg')\n # reward_scrn = Image.open(\"reward.jpg\")\n reward_scrn = cv2.imread(\"reward.jpg\")\n self.curr_score = fetchScoreFromImage(reward_scrn) \n prev_score = self.previous_screen_score\n if self.curr_score != self.prev_unprocessed:\n self.prev_unprocessed = self.curr_score\n if self.curr_score < self.previous_screen_score:\n self.curr_score = self.previous_screen_score + 100\n elif(self.curr_score - self.previous_screen_score >= 1000):\n self.flagTrophy = True\n\n diff_in_score = self.curr_score - self.previous_screen_score\n if(diff_in_score >= 50):\n flag1 = True\n self.previous_screen_score = self.curr_score\n else:\n self.curr_score = self.previous_screen_score\n print(\"\\nPrevious Score was %d and Current score is %d \\n\" % (prev_score, self.curr_score))\n\n compare_screen=screen[50:,0:]\n if self.visit>1:\n diff2=cv2.subtract(compare_screen,self.previous_compare_screen)\n b2,g2,r2,a2=cv2.split(diff2)\n #print(cv2.countNonZero(b1) , cv2.countNonZero(g1) , cv2.countNonZero(r1))\n max2=max(cv2.countNonZero(b2) , cv2.countNonZero(g2) , cv2.countNonZero(r2))\n print(\"difference\",max2)\n if max2<1300:\n flag3=True \n self.previous_compare_screen=compare_screen \n \n # lives_screen= screen[55:75,70:150]\n # if self.visit>1:\n # diff1=cv2.subtract(lives_screen,self.previous_screen)\n # b1,g1,r1=cv2.split(diff1) \n # if cv2.countNonZero(b1) > 0 or cv2.countNonZero(g1) > 0 or cv2.countNonZero(r1) > 0:\n # flag2=True \n # self.previous_screen=lives_screen\n \n # flag 2 is for lives\n # if flag2:\n # return -1\n # flag 1 is true if any gem is collected\n if flagTrophy:\n return 1\n if flag1:\n return 0.2\n # flag 3 is true if there is no change in new frame.\n if flag3:\n return -0.1\n ingame_reward = 0.01\n return ingame_reward\n\n def _is_over(self, reward):\n if self.curr_score == 2200:\n is_over = True\n else:\n is_over = False\n return is_over\n \n \n def observe(self):\n screen = np.asarray(ImageGrab.grab(bbox=(0,50,1336,668))) \n state = self.cnn_graph.get_image_feature_map(screen)\n return state\n\n def act(self, action):\n keys_to_press = [[uparrow,leftarrow],[leftarrow],[uparrow,leftarrow],[uparrow,rightarrow],[rightarrow],[uparrow,rightarrow]]\n PressKey(keys_to_press[action][0])\n time.sleep(0.3)\n ReleaseKey(keys_to_press[action][0])\n i=1\n if(len(keys_to_press[action]) >=1):\n while i \\\n \\\n '\n\nif __name__ == \"__main__\":\n app.run()\n\n","repo_name":"Miti56/Smart-Alarm-Flask","sub_path":"Prueba Flask.py","file_name":"Prueba Flask.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"39250063234","text":"import pytest\nimport saltext.credstash.sdbs.credstash as credstash_sdb\n\n\n@pytest.fixture\ndef configure_loader_modules():\n module_globals = {\n \"__salt__\": {\"this_does_not_exist.please_replace_it\": lambda: True},\n }\n return {\n credstash_sdb: module_globals,\n }\n\n\ndef test_replace_this_this_with_something_meaningful():\n assert \"this_does_not_exist.please_replace_it\" in credstash_sdb.__salt__\n assert credstash_sdb.__salt__[\"this_does_not_exist.please_replace_it\"]() is True\n","repo_name":"major0/salt-credstash-extension","sub_path":"tests/unit/sdbs/test_credstash.py","file_name":"test_credstash.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"26435355250","text":"#!/usr/bin/env python3\nimport newspaper\nimport os\nimport sys\nimport datetime\nimport traceback\n\nos.chdir(sys.path[0]) \npath = os.getcwd()\n\ndef symlink():\n if not os.path.exists('/tmp/.newspaper_scraper'):\n origem = os.path.join(path,'newspaper_scraper')\n print(origem)\n #os.symlink(origem,'/tmp/.newspaper_scraper/')\n\ndef log(log : str, logsFile):\n now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n logsFile.write(f'{log}[{now}]\\n')\n\ndef create_dirs():\n #create date dirs\n today = datetime.date.today()\n year = today.year\n month = today.month\n day = today.day\n\n outFile = f'{path}/articles/{year}/{month}'\n os.makedirs(outFile,exist_ok=True)\n return outFile + f'/{day}.html'\n\ndef main():\n symlink()\n\n url = 'https://www.jn.pt'\n #url = 'https://www.jornaldeangola.ao/ao/'\n #url = 'https://novojornal.co.ao'\n\n #create log file\n logsFile = open(f'{path}/logs.txt','w')\n\n log('Geting articles',logsFile)\n \n j = newspaper.build(url,memoized_articles=False)\n\n nArticles = j.size()\n log(f'{nArticles} articles',logsFile)\n\n outFile = create_dirs()\n output = open(outFile,'a')\n\n output.write('\\n')\n \n na = 0\n for article in j.articles:\n log(f'Article {na} of {nArticles}',logsFile)\n na+=1\n try:\n ar = newspaper.Article(article.url())\n ar.download()\n ar.parse()\n output.write(\n f'''\n

\n \n {ar.title}\n \n \n {ar.url}\n \n \n {ar.autor}\n \n \n {ar.date}\n \n \n {ar.tags}\n \n \n {ar.text}\n \n
\n '''\n )\n except Exception:\n print(traceback.format_exc)\n log(traceback.format_exc,logsFile)\n\n output.write('\\n')\n output.close()\n\n log('Finished writing',logsFile)\n\n logsFile.close()\n\nif __name__ == \"__main__\":\n main()","repo_name":"surumkata/spln-2223","sub_path":"TPC6/corpus.py","file_name":"corpus.py","file_ext":"py","file_size_in_byte":2447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"14001908779","text":"import pygame, sys\nfrom pygame.locals import *\n\nimport brain\nfrom brain import *\n\nimport bird\nfrom bird import *\nimport pipe\nfrom pipe import *\n\n# Initializing\npygame.init()\n\n# Setting up FPS\nFPS = 60\nFramePerSec = pygame.time.Clock()\n\n# Creating colors\n# BLUE = (0, 0, 255)\nRED = (255, 0, 0)\n# GREEN = (0, 255, 0)\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\n\n# Other Variables for use in the program\nSCREEN_WIDTH = 400\nSCREEN_HEIGHT = 600\n\n# Create a black screen\nDISPLAYSURF = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\nDISPLAYSURF.fill(BLACK)\npygame.display.set_caption(\"Game\")\n\n# Create player\nplayer = bird.Bird(SCREEN_HEIGHT, DISPLAYSURF, WHITE)\n\n# Create pipe array\npipes = []\npipes.append(pipe.Pipe(SCREEN_HEIGHT, SCREEN_WIDTH, DISPLAYSURF, WHITE))\n\n# Pipe spawner\npipe_delay = 2000 # 2 seconds\nnew_pipe = pygame.USEREVENT + 1\npygame.time.set_timer(new_pipe, pipe_delay)\n\n# Create brain\nbrain = brain.Brain()\n\n# Sprite groups\n# all_sprites = pygame.sprit\n\n# Game Loop\nwhile True:\n # Refresh background\n DISPLAYSURF.fill(BLACK)\n\n # Cycles through all occurring events\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n player.up()\n if event.type == new_pipe:\n pipes.append(pipe.Pipe(SCREEN_HEIGHT, SCREEN_WIDTH, DISPLAYSURF, WHITE))\n # all_sprites.add(pipes[-1])\n\n player.update()\n player.show()\n\n for p in pipes:\n p.update()\n p.show()\n\n # Check if pipe hits bird\n if p.top_rect.colliderect(player.bird_rect) or p.bottom_rect.colliderect(player.bird_rect):\n p.colour = RED\n else:\n p.colour = WHITE\n if player.y == SCREEN_HEIGHT:\n DISPLAYSURF.fill(RED)\n if p.offscreen():\n # pipes.pop(pipes.index(p))\n pipes = [pipes[1]]\n\n pygame.display.flip()\n FramePerSec.tick(FPS)\n","repo_name":"jjacobgreen/projects","sub_path":"flappy/flappy_playable.py","file_name":"flappy_playable.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"5849481958","text":"from rdflib.plugins.serializers.turtle import TurtleSerializer\nfrom rdflib.namespace import Namespace, FOAF, SKOS, RDF\nfrom rdflib import BNode\nimport logging\nimport re\n\nSD = Namespace('http://www.w3.org/ns/sparql-service-description#')\nISOTHES = Namespace('http://purl.org/iso25964/skos-thes#')\n\nlogger = logging.getLogger(__name__)\n\n\nclass OrderedTurtleSerializer(TurtleSerializer):\n\n short_name = \"ots\"\n\n def __init__(self, store):\n super(OrderedTurtleSerializer, self).__init__(store)\n\n # Class order:\n self.class_order = []\n\n # Sort key generators for specific classes :\n self.sorters_by_class = {}\n\n # Default sort key generators\n self.sorters = [\n ('^(.+)$', lambda x: str(x[0])),\n ]\n\n def getSorters(self, class_uri):\n return self.sorters_by_class.get(class_uri, self.sorters)\n\n def getSortKeyFunction(self, class_uri):\n sorters = self.getSorters(class_uri)\n\n # Order of instances:\n def sortKeyFn(x):\n # Check if the instances match any special pattern:\n for pattern, func in sorters:\n m1 = re.search(pattern, x)\n if m1:\n return func(m1.groups())\n logging.warning('%s did not match any sorters', x)\n\n return sortKeyFn\n\n def orderSubjects(self):\n seen = {}\n subjects = []\n\n # Find classes not included in self.class_order and sort them alphabetically\n other_classes = [x for x in set(self.store.objects(predicate=RDF.type)) if x not in self.class_order]\n other_classes = sorted(other_classes)\n\n # Loop over all classes\n for class_uri in self.class_order + other_classes:\n\n # Sort the members of each class\n members = sorted(self.store.subjects(RDF.type, class_uri),\n key=self.getSortKeyFunction(class_uri))\n\n for member in members:\n subjects.append(member)\n self._topLevels[member] = True\n seen[member] = True\n\n # Include anything not seen yet\n recursable = [\n (isinstance(subject, BNode),\n self._references[subject], subject)\n for subject in self._subjects\n if subject not in seen\n ]\n\n recursable.sort()\n subjects.extend([subject for (isbnode, refs, subject) in recursable])\n\n return subjects\n","repo_name":"scriptotek/otsrdflib","sub_path":"otsrdflib/ots.py","file_name":"ots.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"60"} +{"seq_id":"43809174699","text":"#\n# Expert distribution for all datasets.\n#\nfrom airflow import DAG\nfrom airflow.utils.dates import days_ago\nfrom datetime import timedelta\nfrom ala import cluster_setup, ala_config, ala_helper\nfrom ala.ala_helper import step_bash_cmd, s3_cp, get_default_args\n\nDAG_ID = 'Expert_distribution'\n\nSPARK_STEPS = [\n s3_cp(\"a. Copy IndexRecord to S3\", f\"s3://{ala_config.S3_BUCKET_AVRO}/pipelines-all-datasets/index-record\", \"hdfs:///pipelines-all-datasets/index-record\"),\n s3_cp(\"b. Copy existing Outlier Distribution Cache\", f\"s3://{ala_config.S3_BUCKET_AVRO}/pipelines-outlier/\", \"hdfs:///pipelines-outlier/\", action_on_failure=\"CONTINUE\"),\n step_bash_cmd(\"c. Outlier detection\", f\" la-pipelines outlier all --cluster\"),\n step_bash_cmd(\"d. Delete AVRO output on S3\", f\" sudo -u hadoop aws s3 rm s3://{ala_config.S3_BUCKET_AVRO}/pipelines-outlier/all --recursive\"),\n step_bash_cmd(\"e. Delete temp SCP directories created by s3-dist-cp\", f\" sudo -u hadoop hdfs dfs -rm -f /pipelines-outlier/pipelines-outlier_$folder$\"),\n s3_cp(\"f. Copy Outliers results to S3\", \"hdfs:///pipelines-outlier\", f\"s3://{ala_config.S3_BUCKET_AVRO}/pipelines-outlier\")\n]\n\nwith DAG(\n dag_id=DAG_ID,\n description=\"Expert distribution\",\n default_args=get_default_args(),\n dagrun_timeout=timedelta(hours=4),\n start_date=days_ago(1),\n schedule_interval=None,\n tags=['emr', 'all-datasets']\n) as dag:\n cluster_setup.run_large_emr(dag, SPARK_STEPS, \"bootstrap-index-actions.sh\")\n","repo_name":"AtlasOfLivingAustralia/pipelines-airflow","sub_path":"dags/expert_distribution_dag.py","file_name":"expert_distribution_dag.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"18395881208","text":"#https://leetcode.com/problems/unique-paths/discuss/22953/Java-DP-solution-with-complexity-O(n*m)\nclass Solution:\n def uniquePaths(self, m: int, n: int) -> int:\n #unique paths using 2-D array\n dp = [[0] * (n) for _ in range(m)]\n for i in range(m):\n dp[i][0] = 1\n for j in range(n):\n dp[0][j] = 1\n for i in range(1, m):\n for j in range(1, n):\n dp[i][j] = dp[i][j-1] + dp[i-1][j]\n\n return dp[m-1][n-1]\n \n def printdp(self, dp):\n for i in (dp):\n print(i)\n \n\n\ns = Solution()\nres = s.uniquePaths(3,2)\nprint(res)\n ","repo_name":"ArunAaryan/LeetCodePlaylist","sub_path":"LeetCodeMedium/UniquePaths.py","file_name":"UniquePaths.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"9689822798","text":"import sys\nimport csv\nimport json\nimport argparse\nimport pandas as pd\nimport numpy as np\n\nimport serial\nfrom os import path\nfrom io import StringIO\n\nfrom PyQt5.Qt import *\nfrom pyqtgraph import PlotWidget\nfrom PyQt5 import QtCore\nimport pyqtgraph as pq\n\nimport threading\nimport time\n\nDATA_COLUMNS_NAMES = [\"type\", \"mac\", \"rssi\", \"channel\", \"secondary channel\" ,\"sig_mode\", \"bandwith\", \"STBC\", \"Length(bytes)\"]\n\n\nclass csi_data_graphical_window(QWidget):\n def __init__(self):\n super().__init__()\n\n self.resize(1280, 720)\n self.plotWidget_ted = PlotWidget(self)\n self.plotWidget_ted.setGeometry(QtCore.QRect(0, 0, 1280, 720))\n\n self.plotWidget_ted.setYRange(-20, 100)\n self.plotWidget_ted.addLegend()\n\n \n self.curve_list = []\n\n self.timer = pq.QtCore.QTimer()\n self.timer.timeout.connect(self.update_data)\n self.timer.start(100)\n\n def update_data(self):\n return\n\ndef csi_data_read_parse(port: str, csv_writer):\n ser = serial.Serial(port=port, baudrate=921600,\n bytesize=8, parity='N', stopbits=1)\n if ser.isOpen():\n print(\"open success\")\n else:\n print(\"open failed\")\n return\n\n while True:\n strings = str(ser.readline())\n if not strings:\n break\n\n strings = strings.lstrip('b\\'').rstrip('\\\\r\\\\n\\'')\n index = strings.find('CSI_RIG')\n\n if index == -1:\n continue\n\n csv_reader = csv.reader(StringIO(strings))\n csi_data = next(csv_reader)\n\n #print(csi_data)\n #print(len(csi_data))\n\n if csi_data[1] != \"a6:6f:82:3d:89:99\":\n continue\n\n if len(csi_data) != len(DATA_COLUMNS_NAMES):\n print(\"element number is not equal\")\n continue\n\n print(csi_data)\n\n csv_writer.writerow(csi_data)\n\n\n ser.close()\n return\n\n\nclass SubThread (QThread):\n def __init__(self, serial_port, save_file_name):\n super().__init__()\n self.serial_port = serial_port\n\n save_file_fd = open(save_file_name, 'w')\n self.csv_writer = csv.writer(save_file_fd)\n self.csv_writer.writerow(DATA_COLUMNS_NAMES)\n\n def run(self):\n csi_data_read_parse(self.serial_port, self.csv_writer)\n\n def __del__(self):\n self.wait()\n\n\nif __name__ == '__main__':\n if sys.version_info < (3, 6):\n print(\" Python version should >= 3.6\")\n exit()\n\n parser = argparse.ArgumentParser(\n description=\"Read CSI data from serial port and display it graphically\")\n parser.add_argument('-p', '--port', dest='port', action='store', required=True,\n help=\"Serial port number of csv_recv device\")\n parser.add_argument('-s', '--store', dest='store_file', action='store', default='./csi_recv_rigor.csv',\n help=\"Save the data printed by the serial port to a file\")\n\n args = parser.parse_args()\n serial_port = args.port\n file_name = args.store_file\n\n app = QApplication(sys.argv)\n\n subthread = SubThread(serial_port, file_name)\n subthread.start()\n\n window = csi_data_graphical_window()\n window.show()\n\n sys.exit(app.exec())\n","repo_name":"orkunispir/wifi-csi-sniffer","sub_path":"tools/csi_data_read.py","file_name":"csi_data_read.py","file_ext":"py","file_size_in_byte":3193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"71951843071","text":"import time\n\n# Relationship between speed, distance and time:\n# time = distance / speed\n\ndistance = input(\"What is the total amount of metres you would like to travel:\\n\")\nspeed = input(\"What is the estimated average speed? Give answer in metre/second:\\n\")\n\nt = float(distance) / float(speed)\nformatted_time = time.strftime(\"%H:%M:%S\", time.gmtime(t))\n\nprint(\n f\"It will take approximately {formatted_time} (hh,mm,ss) to arive at your destination.\"\n)\n","repo_name":"Anton-Ca/Courses","sub_path":"Python/Pyth_intro/l03/t5.py","file_name":"t5.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"5046406506","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n・尺取法\n・まあWAだけども。\n\"\"\"\n\nimport sys\nfrom collections import deque, Counter, defaultdict\nfrom math import sqrt, hypot, factorial, pi, sin, cos, radians\nfrom heapq import heappop, heappush, heapify, heappushpop\nfrom bisect import bisect_left, bisect_right\nfrom itertools import permutations, product, combinations, combinations_with_replacement\nfrom operator import itemgetter, mul\nfrom copy import deepcopy\nfrom functools import reduce, partial\n\ndef input(): return sys.stdin.readline().strip()\nsys.setrecursionlimit(10 ** 9)\nINF = float('inf')\nMOD = 10 ** 9 + 7\n\nS = input()\nN = len(S)\n\nl,r = 0,1\nmx = 0\n# 外ループで左を動かす\nwhile l < N:\n # 内ループは条件を満たす限り右を動かす\n while r < N and S[l] != S[r]:\n r += 1\n # 同じ文字までの間隔が最大の所を探す\n if r < N:\n mx = max(r - l - 1, mx)\n if l == r:\n # 左が右に追いついたら、右も左に合わせて+1する\n r += 1\n l += 1\n\nif mx == 0:\n if N % 2 == 0:\n print('Second')\n else:\n print('First')\nelse:\n if mx % 2 == 0:\n print('First')\n else:\n print('Second')\n","repo_name":"Coki628/kyopro_submissions","sub_path":"AtCoder/ABC048d.py","file_name":"ABC048d.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"41582360579","text":"__author__ = \"soopercool101\"\r\n__version__ = \"1.0.0\"\r\n\r\nfrom BrawlCrate.API import *\r\nfrom BrawlLib.SSBB.ResourceNodes import *\r\nfrom BrawlCrate.NodeWrappers import PluginWrapper\r\nfrom System.Windows.Forms import ToolStripMenuItem\r\nimport struct\r\n\r\n# Wrapper for UVs\r\nclass UVWrapper(PluginWrapper):\r\n # This function returns a new instance of the class.\r\n # Necessary in order to properly call necessary functions\r\n def GetInstance(self):\r\n return UVWrapper()\r\n\r\ndef shiftUV_handler(sender, event_args):\r\n # Gather the required inputs from the user\r\n scaleX = BrawlAPI.UserFloatInput(\"X Scale multiplier to apply to the UVs (Applied first)\", \"X-Scale\", 1.0)\r\n scaleY = BrawlAPI.UserFloatInput(\"Y Scale multiplier to apply to the UVs (Applied first)\", \"Y-Scale\", 1.0)\r\n transX = BrawlAPI.UserFloatInput(\"X Translation offset to apply to the UVs (Applied second)\", \"X-Translation\", 0.0)\r\n transY = BrawlAPI.UserFloatInput(\"Y Translation offset to apply to the UVs (Applied second)\", \"Y-Translation\", 0.0)\r\n shiftUV(scaleX, scaleY, transX, transY)\r\n\r\ndef shiftUV(scaleX, scaleY, transX, transY):\r\n i = 0\r\n # Apply the modifiers to each UV point\r\n for vec2 in BrawlAPI.SelectedNode.Points:\r\n vec2.X = vec2.X * scaleX\r\n vec2.X = vec2.X + transX\r\n vec2.Y = vec2.Y * scaleY\r\n vec2.Y = vec2.Y + transY\r\n # Due to some quirk, necessary to set this manually to ensure saving works\r\n BrawlAPI.SelectedNode.Points[i] = vec2\r\n i += 1\r\n # Due to some quirk, necessary to set this manually to ensure saving works\r\n BrawlAPI.SelectedNode.Points = BrawlAPI.SelectedNode.Points\r\n # Flag node as needing saving\r\n BrawlAPI.SelectedNode.SignalPropertyChange()\r\n\r\n# Create an instance of our wrapper class and add it to the API wrapper cache\r\nwrapper = UVWrapper()\r\nBrawlAPI.AddWrapper[MDL0UVNode](wrapper)\r\n# Add a context menu item to our new wrapper\r\nBrawlAPI.AddContextMenuItem(UVWrapper, ToolStripMenuItem(\"Shift UVs\", None, shiftUV_handler))\r\n","repo_name":"soopercool101/BrawlCrateSamplePlugins","sub_path":"Loaders/Utility/Shift UVs.py","file_name":"Shift UVs.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"34102075826","text":"import os, json\nfrom langchain.llms import Databricks, OpenAI\nfrom dotenv import load_dotenv\nfrom langchain.prompts import ChatPromptTemplate\n\nclass RoleBasedAdvisor:\n def __init__(self, language_model='openai', config_file_path=None):\n self.template_string = \"\"\"{role_name} \\\nRespond to the user question that is delimited in triple backticks \\\nwith thoughtful and concise instructions that the user can easily implement in their \\\nday to day life.\nuser_question: ```{user_question}```\n\"\"\"\n self.role_description = {}\n self.role_description['doctor'] = \"\"\"You are a doctor (primary care physician) with 25 years of experience practicing in California. \\\nYou emphasize the importance of a healthy lifestyle that includes nutritious food and vigorous exercise.\"\"\"\n self.role_description['father'] = \"\"\"You are the user's father and cares deeply about their well being. You emphasize the importance of \\\nworking hard and getting a good education.\"\"\"\n self.role_description['business_partner'] = \"\"\"You are the user's business partner. You share a mutual interest in the success of your \\\ncompany. You emphasize actions that will maximize the long term viability and profitability of the company and achieving its mission.\"\"\"\n self.role_description['career_coach'] = \"\"\"You are the user's manager at work. You see great potential in the user to progress in their \\\ncareer. You emphasize actions that maximize the user's chances for a promotion and continue their trajectory to become a senior executive.\"\"\"\n self.user_question = \"I want to live a life that maximizes happiness and creates a positive impact on the world. What \\\nare the top 5 things I should do in the next week towards these goals?\"\n\n self.language_model = language_model\n if config_file_path is not None:\n with open(config_file_path) as f:\n self.config = json.load(f)\n self.llm = self.get_llm(language_model)\n\n def get_llm(self, language_model='openai'):\n load_dotenv()\n if 'DATABRICKS_RUNTIME_VERSION' in os.environ and language_model == 'openai': # Running in Databricks\n if 'OPENAI_API_KEY' not in os.environ:\n os.environ['OPENAI_API_KEY'] = dbutils.secrets.get('vbalasu', 'openai-databricks')\n\n if language_model == 'openai':\n llm = OpenAI(temperature=0.0, max_tokens=500)\n return llm\n elif language_model == 'llamav2':\n llm = Databricks(cluster_driver_port=self.config['port'], cluster_id=self.config['cluster_id'],\n model_kwargs={'temperature':0.0, 'max_new_tokens':500})\n return llm\n else:\n print('Unknown language model')\n return False\n \n def answer_as_role(self, user_question, role, verbose=False):\n prompt_template = ChatPromptTemplate.from_template(self.template_string)\n prompt = prompt_template.format_prompt(role_name=role, user_question=user_question)\n question = prompt.messages[0].content\n if verbose:\n print('/*\\n', f'LANGUAGE MODEL: {self.language_model}\\n\\n', question, '*/\\n\\n')\n return self.llm(question)","repo_name":"vbalasu/databricks-goodies","sub_path":"role-based-advisor/advisor.py","file_name":"advisor.py","file_ext":"py","file_size_in_byte":3201,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"2070378967","text":"# tuple fron iterobj\nL = [1, 2, 3]\niterator = iter(L)\nt = tuple(iterator)\nprint(t)\n\n# genexps and listcomps\n\"\"\"\nGenerator expressions are surrounded by parentheses (“()”)and list comprehensions are surrounded by square brackets (“[]”)\n\"\"\"\nxs = [4, 6, 7, 9, 10]\nxss = (x * 2 for x in xs)\nprint(xss) # xss is an iterobj\n\nxss_list = [x*2 for x in xs]\nprint(xss_list) # xss_list is a list\n\n# Generator functions and yields\n\ndef agen():\n i = 10\n yield [i, i, i]\n\na, b, c = agen()\n","repo_name":"nonomino/bad_python","sub_path":"noob/functional.py","file_name":"functional.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"45928289609","text":"import time\nimport numpy as np\n\nfrom rich.console import Console\nfrom rich import print\n\nimport cro_dt.VectorTree as vt\nfrom cro_dt.sup_configs import get_config, load_dataset\nconsole = Console()\n\nif __name__ == \"__main__\":\n # X, y = load_dataset(\"breast_cancer\")\n # X = np.array([[-3, -2], [-4, -2], [3, 3]])\n # y = np.array([1, 1, 0])\n W = np.array([[2, 3, -4], [5, -5, -4], [6, -2, 3]])\n X = np.array([[-3, -2], [4, -2], [3, 3], [-3, 1], [-2, -2], [-4, -4]])\n y = np.array([1, 1, 0, 1, 0, 0])\n # W = np.array([[2, 3, -4], [5, -5, -4], [4, 0, 2], [2, 0, 1], [6, -2, 3], [1, 3, 2], [3, 2, 4]])\n depth = int(np.log2(len(W) + 1))\n n_classes = np.max(y) + 1\n\n M = vt.create_mask_dx(depth)\n X_ = np.vstack((np.ones(len(X)).T, X.T)).T\n Y_ = np.tile(y, (len(W) + 1, 1))\n\n start_time = time.time()\n for _ in range(10000):\n acc1, labels1 = vt.dt_matrix_fit(X, y, W, -M, X_, Y_ + vt.MAGIC_NUMBER)\n end_time = time.time()\n\n console.rule(\"[red]Original vector tree implementation[/red]\")\n print(f\"Labels: {labels1}\")\n print(f\"Accuracy: {acc1}\")\n print(f\"Elapsed time: {'{:.3f}'.format(end_time - start_time)} seconds\")\n\n start_time = time.time()\n for _ in range(10000):\n acc2, labels2 = vt.dt_matrix_fit_dx(X, y, W, depth, n_classes, X_, Y_, M)\n end_time = time.time()\n\n console.rule(\"[red]New vector tree implementation[/red]\")\n print(f\"Labels: {labels2}\")\n print(f\"Accuracy: {acc2}\")\n print(f\"Elapsed time: {'{:.3f}'.format(end_time - start_time)} seconds\")\n\n start_time = time.time()\n for _ in range(10000):\n acc2, labels2 = vt.dt_matrix_fit_dx2(X, y, W, depth, n_classes, X_, Y_, M)\n end_time = time.time()\n\n console.rule(\"[red]Vector tree implementation but using numpy methods instead of pure matrix stuff[/red]\")\n print(f\"Labels: {labels2}\")\n print(f\"Accuracy: {acc2}\")\n print(f\"Elapsed time: {'{:.3f}'.format(end_time - start_time)} seconds\")\n \n # solution.update_leaves_by_dataset(X_train, y_train)\n # y_pred = solution.predict_batch(X_train)\n # accuracy = np.mean([(1 if y_pred[i] == y_train[i] else 0) for i in range(len(X_train))])\n # return accuracy","repo_name":"vgarciasc/CRO-DT","sub_path":"cro_dt/time_test.py","file_name":"time_test.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"20899542216","text":"import asyncio\n\n\nasync def count():\n print(\"One\")\n await asyncio.sleep(1)\n print(\"Two\")\n\n\nasync def main():\n await asyncio.gather(count(), count(), count())\n\n\nif __name__ == \"__main__\":\n import time\n\n s = time.perf_counter()\n asyncio.run(main())\n elapsed = time.perf_counter() - s\n print(f\"{__file__} executed in {elapsed:0.2f} seconds.\")\n\n\n#local branch 2 development branch\n\n\n# trying out git rebase\n# second commit for trying out rebase","repo_name":"axecalibur-dev/AsycnIOPython","sub_path":"async.py","file_name":"async.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"24886324124","text":"# encoding:utf-8\n\n# -*- Python单例模式示例 -*-\n\nclass Singleton(object):\n def __new__(cls):\n if not hasattr(cls, 'instance'):\n cls.instance = super(Singleton, cls).__new__(cls)\n return cls.instance\n\nif __name__ == '__main__':\n a = Singleton() \n b = Singleton() \n print(id(a))\n print(id(b))\n","repo_name":"tanteng/learn-python","sub_path":"singleton.py","file_name":"singleton.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","stars":358,"dataset":"github-code","pt":"60"} +{"seq_id":"14780755399","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2022 Dec 5\n\n@author: kblackw1\n\"\"\"\n\n############ reward ################\nfrom BanditTaskParam import params,rwd,validate_T,validate_R\n\nrwd['reward']=(rwd['reward']+2*rwd['base']+2*rwd['partial']) #for equivalence to 3 step task?\nact={'left':0,'right':1} \nstates={'loc':{'Pport':1},\n 'tone':{'6kHz':6}} \nparams['state_units']={'loc':False,'tone':False} #Try false/true\nstart=(states['loc']['Pport'],states['tone']['6kHz']) #used many times\nenv_params={'start':start}\nloc=states['loc'] #used only to define R and T\ntone=states['tone'] #used only to define R and T\nparams['events_per_trial']=1\n\nRbandit={};Tbandit={} #dictionaries to improve readability/prevent mistakes\nprwdR=0.8; prwdL=0.5 #initial values. These change with each block of trials\n\nTbandit={start:{act['left']:[(start,1)],act['right']:[(start,1)]}}\nRbandit={start:{act['left']:[(rwd['reward'],prwdL),(rwd['base'],1-prwdL)], \\\n act['right']: [(rwd['reward'],prwdR),(rwd['base'],1-prwdR)]}}\n\nif __name__== '__main__':\n ######## Make sure all needed transitions have been created\n validate_T(Tbandit,msg='validate bandit T')\n validate_R(Tbandit,Rbandit,msg='validate bandit R')\n","repo_name":"neurord/TD2Q","sub_path":"Bandit1stepParam.py","file_name":"Bandit1stepParam.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"20736283493","text":"# print even numbers\n# for i in range(2,11,2):\n# print(i,end=\" \")\n\n# sum of even numbers\n# sum=0\n# for i in range(2,10,2):\n# print(i)\n# sum=sum+i\n# print(\"sum is \",sum)\n#\n# the sum of even nmbewrs n th\n# sum=0\n# n=int(input(\"enter the limit=\"))\n# for i in range(2,n,2):\n# print(i)\n# sum=sum+i\n# print(\"sum is\",sum)\n# the sum of odd numbers n th numbers\nsum=0\nn=int(input(\"enter the limit=\"))\nfor i in range(1,n,1):\n print(i)\n sum = sum + i\nprint(\"sum is\", sum)\n\n# multiplication table of a number\n# n=int(input(\"enter the number=\"))\n# for i in range(1,11,1):\n# print(i,\"* \",n,\"=\",i*n)\n\n","repo_name":"Muhammedshaijal/python-works","sub_path":"even numbers.py","file_name":"even numbers.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"39533758637","text":"from tkinter import *\r\njanela = Tk()\r\n#cria o objeto janela\r\njanela.title('Cainho')\r\n#nome da janela aberta\r\njanela.geometry('280x218')\r\n#fixa a resolução da janela\r\nlabel = Label(janela, text='Fontenelle perde a OBR')\r\nlabel.grid(column = 18,row = 8)\r\njanela.mainloop()\r\n#mantém a janela aberta ","repo_name":"Craveir0/Program_Fontnelson_FtCraveiro","sub_path":"Aula_1 - FontCrav.py","file_name":"Aula_1 - FontCrav.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"23455462014","text":"\"\"\"\r\nTakes 12 images and outputs copies that each have a calender-month table pasted onto them.\r\n\r\nUsage: calendize.py [options]\r\n\r\nThe options are:\r\n\r\n[-a --alpha - The transparency of the calendar (0..1)]\r\n[-b --bottom - The bottom margin of the calendar (by default is auto-calculated)]\r\n[-c --borderColor - The color of the table borders]\r\n[--dpi] - DPI to render (by default is auto-calculated from the image size)\r\n[-h --help]\r\n[-m --month - Output for one month only (1..12)]\r\n[-r --right - The right margin of the calendar (by default is auto-calculated)]\r\n[-t --textColor - The color of the text]\r\n[-v --verbose - Verbose output]\r\n\r\nExamples:\r\ncalendize.py 2022 my-12-images temp\r\ncalendize.py 2022 my-12-images temp --dpi 150\r\ncalendize.py 2022 my-12-images temp --dpi 150 -b blue\r\ncalendize.py 2022 my-12-images temp --dpi 150 --borderColor blue --alpha 0.7\r\n\"\"\"\r\nfrom PIL import Image\r\nfrom os.path import isfile, join\r\nfrom os import listdir\r\nfrom calendar import month\r\nfrom optparse import OptionParser\r\nimport os\r\nfrom pathlib import Path\r\n\r\nimport _date_utils\r\nimport _figure_renderer\r\nimport service_auto_dpi_calculator\r\n\r\n# usage() - prints out the usage text, from the top of this file :-)\r\n\r\n\r\ndef usage():\r\n print(__doc__)\r\n\r\n\r\n# optparse - parse the args\r\nparser = OptionParser(\r\n usage=__doc__)\r\nparser.add_option('-a', '--alpha', dest='alpha', default=1.0,\r\n help=\"The transparency of the calendar (0..1). Defaults to 1 (fully opaque).\")\r\nparser.add_option('-b', '--bottom', dest='bottom_margin', default=50,\r\n help=\"The bottom margin of the calendar. By default is auto-calculated.\")\r\nparser.add_option('-c', '--borderColor', dest='borderColor', default=\"black\",\r\n help=\"The color of the table borders - for example black or red or blue\")\r\nparser.add_option('--dpi', dest='dpi', default=None,\r\n help=\"The DPI to render (Dots Per Inch). By default is auto-calculated for image size.\")\r\nparser.add_option('-m', '--month', dest='month', default=-1,\r\n help=\"Output for one month only (1..12)\")\r\nparser.add_option('-r', '--right', dest='right_margin', default=50,\r\n help=\"The right margin of the calendar. By default is auto-calculated.\")\r\nparser.add_option('-t', '--textColor', dest='textColor', default=\"black\",\r\n help=\"The color of the text - for example black or red or blue\")\r\nparser.add_option('-v', '--verbose', dest='is_verbose',\r\n action='store_const',\r\n const=True, default=False,\r\n help=\"Turn on verbose output\")\r\n\r\n(options, args) = parser.parse_args()\r\nif (len(args) != 3):\r\n usage()\r\n exit(2)\r\n\r\nYEAR = int(args[0])\r\nINPUTDIR = args[1]\r\nOUTDIR = args[2]\r\n\r\ndpi_options = options.dpi\r\nif options.dpi is not None:\r\n dpi_options = int(options.dpi)\r\n\r\n\r\ndef is_supported_file_type(filepath):\r\n file_extensions = [\".jpg\", \".jpeg\", \".png\"]\r\n return any(map(lambda ext: filepath.lower().endswith(ext), file_extensions))\r\n\r\n\r\ndef get_input_files(input_dir):\r\n # Assumption: files are soted already - this allows user to decide which is for which month...\r\n files = [f for f in listdir(input_dir) if isfile(\r\n join(input_dir, f)) and is_supported_file_type(f)]\r\n files = map((lambda f: join(input_dir, f)), files)\r\n return list(files)\r\n\r\n\r\ndef get_image_dimensions(image_file_path):\r\n image = Image.open(image_file_path)\r\n image_width, image_height = image.size\r\n return (image_width, image_height)\r\n\r\n\r\ndef calculate_bottom_right_offset_for(calender_image, background_image, bottom_margin, right_margin):\r\n background_image_width, background_image_height = background_image.size\r\n calender_image_width, calender_image_height = calender_image.size\r\n\r\n margin_tuple_from_topleft = (background_image_width - calender_image_width -\r\n right_margin, background_image_height - calender_image_height - bottom_margin)\r\n\r\n return margin_tuple_from_topleft\r\n\r\n\r\ndef paste_with_transparency(fg_img, bg_img, alpha=1.0, box=(0, 0)):\r\n fg_img_trans = Image.new(\"RGBA\", fg_img.size)\r\n fg_img_trans = Image.blend(fg_img_trans, fg_img, alpha)\r\n bg_img.paste(fg_img_trans, box, fg_img_trans)\r\n return bg_img\r\n\r\n\r\ndef paste_calendar_into_image(calendar_image_file_path, input_image_path, output_image_path, bottom_margin, right_margin, alpha):\r\n calender_image = Image.open(calendar_image_file_path)\r\n background_image = Image.open(input_image_path)\r\n offset = calculate_bottom_right_offset_for(\r\n calender_image, background_image, bottom_margin, right_margin)\r\n background_image = paste_with_transparency(\r\n calender_image, background_image, alpha, offset)\r\n background_image.save(output_image_path)\r\n\r\n\r\ndef generate_output_image_filename(input_image_path, month, year):\r\n input_image_name = os.path.basename(input_image_path)\r\n month_2_digits = f\"{month:02d}\"\r\n month_name = _date_utils.month_name(month)\r\n output_filename = f\"{year}-{month_2_digits}-{month_name}--{input_image_name}\"\r\n # output to PNG since repeatedly saving JPG will affect quality\r\n return Path(output_filename).with_suffix('.png')\r\n\r\n\r\nfiles = get_input_files(INPUTDIR)\r\nfiles_count = len(files)\r\nif (files_count != 12):\r\n print(\r\n f\"The input folder '{INPUTDIR}' should contain 12 images but found {files_count}\")\r\n exit(3)\r\n\r\nPath(OUTDIR).mkdir(parents=True, exist_ok=True)\r\n\r\n\r\ndef calculate_dpi_and_margins(input_image_path):\r\n input_width, input_height = get_image_dimensions(input_image_path)\r\n dpi_and_margins = service_auto_dpi_calculator.DpiAndMargins(\r\n dpi_options, int(options.bottom_margin), int(options.right_margin))\r\n if (options.dpi is None):\r\n dpi_and_margins = service_auto_dpi_calculator.calculate_dpi_and_margins_from_image_size(\r\n input_width, input_height, options.is_verbose)\r\n return dpi_and_margins\r\n\r\n\r\ndef generate_for_month(month):\r\n # month: 1 = January\r\n print(f\"Generating {_date_utils.month_name(month)} {YEAR} ...\")\r\n\r\n input_image_path = files[month - 1]\r\n\r\n dpi_and_margins = calculate_dpi_and_margins(input_image_path)\r\n\r\n calendar_image_file_path = _figure_renderer.render_table_for_month(\r\n month, YEAR, OUTDIR, options.borderColor, options.textColor, dpi_and_margins.dpi)\r\n\r\n output_image_path = os.path.join(\r\n OUTDIR, generate_output_image_filename(input_image_path, month, YEAR))\r\n paste_calendar_into_image(calendar_image_file_path, input_image_path, output_image_path,\r\n dpi_and_margins.bottom_margin, dpi_and_margins.right_margin, float(options.alpha))\r\n os.unlink(calendar_image_file_path)\r\n print(f\" - calendized image saved to {output_image_path} [OK]\")\r\n\r\n\r\nif (int(options.month) >= 1):\r\n generate_for_month(int(options.month))\r\nelse:\r\n for month in range(1, 12 + 1):\r\n generate_for_month(month)\r\n\r\nprint(\"[done]\")\r\n","repo_name":"mrseanryan/calendizer","sub_path":"calendize.py","file_name":"calendize.py","file_ext":"py","file_size_in_byte":7061,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"12381304821","text":"machine = [{'s': (0, 'Выдать сдачу 0 р'), '1': (1, 'Приняты деньги'), '2': (2, 'Приняты деньги'), '5': (0, 'Выдать шоколадку')},\n {'s': (0, 'Выдать сдачу 1 р'), '1': (2, 'Приняты деньги'), '2': (3, 'Приняты деньги'), '5': (1, 'Выдать шоколадку')},\n {'s': (0, 'Выдать сдачу 2 р'), '1': (3, 'Приняты деньги'), '2': (4, 'Приняты деньги'), '5': (2, 'Выдать шоколадку')},\n {'s': (0, 'Выдать сдачу 3 р'), '1': (4, 'Приняты деньги'), '2': (0, 'Выдать шоколадку'), '5': (3, 'Выдать шоколадку')},\n {'s': (0, 'Выдать сдачу 4 р'), '1': (0, 'Выдать шоколадку'), '2': (1, 'Выдать шоколадку'), '5': (4, 'Выдать шоколадку')}]\nall_commands = set().union(*(d.keys() for d in machine))\nstate = 0\n\ncommand = input()\n\nwhile command in all_commands:\n output = machine[state][command][1]\n state = machine[state][command][0]\n print(output)\n command = input()\nelse:\n print('Был получен неизвестный сигнал. Работа автомата прекращена.')\n","repo_name":"shinkai-tester/python_beginner","sub_path":"Lesson10/choc.py","file_name":"choc.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"18946553957","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 26 10:05:27 2018\n\n@author: willalex\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\n\ntest1 = pd.read_csv('test_input.txt', sep=',', nrows = 180000, header = None, names = ['UserID', 'Bal', 'Date', 'Balance']) \ntest2 = pd.read_csv('test_input.txt', sep=',', skiprows = 180000, header = None , \n names =['UserID', 'Trx', 'CardID', 'Date', '2GAbstract', 'TranscationName', 'Classify', 'Amount'])\n#处理错位问题,处理的方法不是很好,以后还得学习\ntest2.loc[ test2['Amount'].isnull(), 'Amount'] = test2['Classify']\ntest2.loc[ test2['Amount'] == test2['Classify'], 'Classify' ] = test2['TranscationName']\ntest2.loc[ test2['Classify'] == test2['TranscationName'], 'TranscationName'] = None\ntest2['Amount'] = test2['Amount'].astype(float)\n\ntrain1 = pd.read_csv('train.txt', sep=',', nrows = 3638790, header = None, names = ['UserID', 'Bal', 'Date', 'Balance'])\ntrain2 =pd.read_csv('train.txt', sep=',', skiprows = 3638790, header = None, \n names =['UserID', 'Trx', 'CardID', 'Date', '2GAbstract', 'TranscationName', 'Classify', 'Amount'])\n\ntrain2.loc[ train2['Amount'].isnull(), 'Amount'] = train2['Classify']\ntrain2.loc[ train2['Amount'] == train2['Classify'], 'Classify' ] = train2['TranscationName']\ntrain2.loc[ train2['Classify'] == train2['TranscationName'], 'TranscationName'] = None\ntrain2['Amount'] = test2['Amount'].astype(float)\n\n#Merge\ntestdata = pd.merge(test1, test2, how = 'left', on = ['UserID', 'Date'], suffixes = ['_bal', '_trx'])\ntraindata = pd.merge(train1, train2, how = 'left', on = ['UserID', 'Date'])\n\n#取出最后一天\nnewdf = test1.groupby('UserID')\nnewdf2 = newdf['Date'].max()\n#sortedbyBalance = grouped['Date']\n\nnewddf = pd.DataFrame({'UserID': newdf2.index, 'Date':newdf2.values})\nnewddf.drop(3000, inplace = True)\n\ntemp = pd.merge(test1, newddf, how = 'inner', on = ['UserID', 'Date'])\ntemp['Date'] = temp['Date'] + 7\n\n#submission\ntemp.to_csv('demo.txt',sep = ',',index = False, header = False)\n","repo_name":"WillAlex2017/18ZhaoShangBank","sub_path":"moneyflow/demo1.py","file_name":"demo1.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"31915850958","text":"# printing out the co-prime number\ndef factor(n):\n factorlist=[]\n for i in range(2,n):\n if n%i==0:\n factorlist.append(i)\n return factorlist\n\nuser=input()\ninputlist=[]\nwhile user !=\"0\":\n user=int(user)\n if user<1000000000:\n inputlist.append(user)\n user=input()\n\nfor x in inputlist:\n z=factor(x)\n final=[]\n for i in range(2,x):\n k=0\n for m in z:\n if i%m==0:\n k+=1\n if k==0:\n final.append(i)\n print(len(final))\n\n","repo_name":"ahammadshawki8/Fun-Coding","sub_path":"co-prime.py","file_name":"co-prime.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"38364240860","text":"from recog import recog\nfrom say import say\nfrom search_package import searchpac\nfrom install_package import install\nimport os\nimport datetime\nimport wikipedia\nimport webbrowser as wb\nimport subprocess\nimport pyautogui\nimport screen_brightness_control as sbc\nfrom AppOpener import run\nfrom lxml import html\nimport requests\nimport time\n\n\n# Function to Greet the user based on time\ndef Greeting():\n hour = int(datetime.datetime.now().hour)\n if hour>=0 and hour<12:\n say(\"Good Morning !!\")\n elif hour>=12 and hour<18:\n say(\"Good Afternoon!!\")\n else:\n say(\"Good Evening !!\")\n print(\"I am Thor, How may I help you?\")\n say(\"I am Thor, How may I help you?\")\n\n\n\nif __name__ == \"__main__\":\n Greeting() \n while True:\n query = recog().lower() # converting the query to all lowercase\n # searching for keywords inside the text query\n if \"wikipedia\" in query: # if we get a keyword as wikipedia\n say(\"Searching Wikipedia\")\n # replacing wikipedia with \"\"\n query = query.replace(\"wikipedia\",\"\") \n # searching the new query directly on wikipedia using the wikipedia library\n results = wikipedia.summary(query,sentences = 2)\n say(\"According to Wikipedia\")\n print(results)\n say(results)\n elif \"increase volume by\" in query: # if we get a keyword as increase volume by\n say(\"Increasing Volume\")\n # replacing increase volume by with \"\"\n query = query.replace(\"increase volume by\",\"\")\n # converting left query to integer\n query = int(query)\n # loop for query times\n for i in range(query):\n # pressing the volume up button query times\n pyautogui.press('volumeup')\n elif \"decrease volume by\" in query:# if we get a keyword as decrease volume by\n say(\"Decreasing Volume\")\n # replacing decrease volume by with \"\"\n query = query.replace(\"decrease volume by\",\"\")\n # converting left query to integer\n query = int(query)\n # loop for query times\n for i in range(query):\n # pressing the volume down button query times\n pyautogui.press('volumedown')\n \n elif \"open youtube\" in query:# if we get a keyword as open youtube\n # opeaning youtube.com using webbrowser library\n wb.open(\"youtube.com\")\n elif \"open google\" in query:# if we get a keyword as open google\n # opeaning google.com using webbrowser library\n wb.open(\"google.com\")\n elif \"open chess\" in query:# if we get a keyword as open chess\n # opeaning chess.com using webbrowser library\n wb.open(\"chess.com\")\n elif \"play music\" in query:# if we get a keyword as play music\n # address of the direcotory where the music is \n music_Dir = \"\"\n songs = os.listdir(music_Dir)\n # playing the first song in the directory\n os.startfile(os.path.join(music_Dir,songs[0]))\n elif \"the time\" in query:# if we get a keyword as the time\n # Time in H M S format\n Time = datetime.datetime.now().strftime(\"%H:%M:%S\")\n say(\"The time is\")\n say(Time) \n elif \"open\" in query:# if we get a keyword as open\n # replacing open with \"\"\n query = query.replace(\"open \",\"\")\n run(query)\n elif \"set brightness to\" in query:# if we get a keyword as set brightness to\n say(\"setting brightness\")\n # replacing set brightness to with \"\"\n query = query.replace(\"set brightness to\",\"\")\n # converting left query to integer\n query = int(query)\n # using the screen_brightness_control to set the brightness to query\n sbc.set_brightness(query)\n elif \"click\" in query:# if we get a keyword as click\n # holding down the win button\n pyautogui.keyDown(\"win\")\n # pressing the prtscn button\n pyautogui.press(\"printscreen\")\n # releasing up the win button\n pyautogui.keyUp(\"win\")\n elif \"search on youtube\" in query:# if we get a keyword as search as youtube\n # replacing search on youtube with \"\"\n query = query.replace(\"search on youtube\",\"\")\n # appending the query to the youtube url\n query = 'https://www.youtube.com/results?search_query='+query\n print(query)\n # opeaning the new formed url \n wb.open(query)\n elif \"search on google\" in query:# if we get a keyword as search on google\n # replacing search on google with \"\"\n query = query.replace(\"search on google\",\"\")\n # appending the query to the google url\n query = 'https://www.google.com/search?q='+query\n print(query)\n # opeaning the new formed url \n wb.open(query)\n else:\n print(\"I did not recognize that\") \n say(\"I did not recognize that!!\")","repo_name":"NamanHH99/Thor-Voice-Assistant","sub_path":"scripts/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"24620471334","text":"from django.test import TestCase\nfrom django.urls import reverse, resolve\nfrom .models import Product, ProductCategory\nfrom products.views import products_view, products_by_category_view, product_detail_view\n\n# Create your tests here.\n\n\nclass ProductTests(TestCase):\n \"\"\"Product name test\"\"\"\n\n def test_str(self):\n test_name = Product(product_name='Pork chops')\n self.assertEqual(str(test_name), 'Pork chops')\n\n\nclass test_url(TestCase):\n \"\"\"Test urls are resolved\"\"\"\n\n def test_product_url_resolves(self):\n url = reverse('products')\n self.assertEquals(resolve(url).func, products_view)\n\n def test_product_category_url_resolves(self):\n url = reverse('products_by_category', args=['path'])\n self.assertEquals(resolve(url).func, products_by_category_view)\n \n def test_product_detail_view_url_resolves(self):\n url = reverse('product_detail_view', args=['path'])\n self.assertEquals(resolve(url).func, product_detail_view)\n\n\nclass BasicTest(TestCase):\n \"\"\"Check fields post\"\"\"\n\n def test_fields(self):\n category = ProductCategory(category_name=\"Pork\")\n category.save()\n product = Product()\n product.category_name = category\n product.product_category = category\n product.product_name = \"Test Product\"\n product.product_price = 2.99\n product.product_weight = 350\n product.product_serves = 4\n product.product_description = \"A new test product\"\n product.product_image_name = \"donkey.jpg\"\n product.product_stock_qty = 10\n product.product_live = True\n product.save()\n record = Product.objects.get(id=1)\n self.assertEqual(record, product)\n","repo_name":"JayPeaa/msproject5","sub_path":"products/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"9043455983","text":"#Ascii sort of works, but it sucks, so all ascii functions have been commented out by default. You can uncomment them all to enable ascii conversion\n\nimport os\nimport tkinter as tk\nimport time\nimport pyperclip\n\ndef bin_pretty(binum):\n x = len(binum)\n r = x%4\n if r == 0:\n r = 4\n r = 4 - r\n while r >> 0:\n binum = \"0\" + binum\n r -= 1\n tempbin = binum\n fbin = \"\"\n while len(tempbin) >> 0:\n fbin = fbin + tempbin[:4] + \" \"\n tempbin = tempbin[4:]\n return fbin\n\n#Handle Events\n#Convert on Return:\ndef dec_key(event):\n copiedtext.pack_forget()\n try:\n #Convert from dec\n number = ent_dec.get()\n if number == \"\":\n ent_bin.delete(0, tk.END)\n ent_hex.delete(0, tk.END)\n# ent_ascii.delete(0, tk.END)\n return\n number = number.replace(\" \", \"\")\n number = int(number)\n decinum = number\n hexinum = hex(number)[2:]\n binanum = bin(number)[2:]\n# try:\n# ascinum = chr(decinum)\n# except OverflowError:\n# ascinum = \"Too big!\"\n# pass\n fbin = bin_pretty(binanum)\n except Exception as e:\n ent_bin.delete(0, tk.END)\n ent_bin.insert(0, \"ERROR: \" + str(e))\n ent_hex.delete(0, tk.END)\n ent_hex.insert(0, \"ERROR: \" + str(e))\n# ent_ascii.delete(0, tk.END)\n# ent_ascii.insert(0, \"ERROR: \" + str(e))\n return\n\n #Print Output\n ent_bin.delete(0, tk.END)\n ent_bin.insert(0, fbin)\n ent_hex.delete(0, tk.END)\n ent_hex.insert(0, hexinum.upper())\n# ent_ascii.delete(0, tk.END)\n# ent_ascii.insert(0, ascinum)\ndef bin_key(event):\n copiedtext.pack_forget()\n try:\n #Convert from bin\n number = ent_bin.get()\n if number == \"\":\n ent_dec.delete(0, tk.END)\n ent_hex.delete(0, tk.END)\n# ent_ascii.delete(0, tk.END)\n return\n number = number.replace(\" \", \"\")\n number = int(number)\n binanum = number\n decinum = int(str(number), 2)\n hexinum = hex(decinum)[2:]\n# try:\n# ascinum = chr(decinum)\n# except OverflowError:\n# ascinum = \"Too big!\"\n# pass\n fbin = bin_pretty(str(binanum))\n except Exception as e:\n ent_dec.delete(0, tk.END)\n ent_dec.insert(0, \"ERROR: \" + str(e))\n ent_hex.delete(0, tk.END)\n ent_hex.insert(0, \"ERROR: \" + str(e))\n# ent_ascii.delete(0, tk.END)\n# ent_ascii.insert(0, \"ERROR: \" + str(e))\n return\n\n #Print Output\n ent_bin.delete(0, tk.END)\n ent_bin.insert(0, fbin)\n ent_dec.delete(0, tk.END)\n ent_dec.insert(0, decinum)\n ent_hex.delete(0, tk.END)\n ent_hex.insert(0, hexinum.upper())\n# ent_ascii.delete(0, tk.END)\n# ent_ascii.insert(0, ascinum)\ndef hex_key(event):\n copiedtext.pack_forget()\n try:\n #Convert from hex\n number = ent_hex.get()\n if number == \"\":\n ent_bin.delete(0, tk.END)\n ent_dec.delete(0, tk.END)\n# ent_ascii.delete(0, tk.END)\n return\n number = number.replace(\" \", \"\")\n hexinum = number\n decinum = int(number, 16)\n binanum = bin(decinum)[2:]\n# try:\n# ascinum = chr(decinum)\n# except OverflowError:\n# ascinum = \"Too big!\"\n# pass\n fbin = bin_pretty(binanum)\n except Exception as e:\n ent_bin.delete(0, tk.END)\n ent_bin.insert(0, \"ERROR: \" + str(e))\n ent_dec.delete(0, tk.END)\n ent_dec.insert(0, \"ERROR: \" + str(e))\n# ent_ascii.delete(0, tk.END)\n# ent_ascii.insert(0, \"ERROR: \" + str(e))\n return\n\n #Print Output\n ent_hex.delete(0, tk.END)\n ent_hex.insert(0, hexinum.upper())\n ent_bin.delete(0, tk.END)\n ent_bin.insert(0, fbin)\n ent_dec.delete(0, tk.END)\n ent_dec.insert(0, decinum)\n# ent_ascii.delete(0, tk.END)\n# ent_ascii.insert(0, ascinum)\n#def askey(event):\n# copiedtext.pack_forget()\n# try:\n# #Convert from ascii\n# number = ent_ascii.get()\n# if number == \"\":\n# ent_bin.delete(0, tk.END)\n# ent_hex.delete(0, tk.END)\n# ent_dec.delete(0, tk.END)\n# return\n# number = number.replace(\" \", \"\")\n# ascinum = number.upper()\n# decinum = \"\"\n# for i in range(len(number)):\n# decinum = decinum + str(ord(number[i]))\n# decinum = int(decinum)\n# binanum = bin(decinum)[2:]\n# hexinum = hex(decinum)[2:]\n# fbin = bin_pretty(binanum)\n# except Exception as e:\n# ent_bin.delete(0, tk.END)\n# ent_bin.insert(0, \"ERROR: \" + str(e))\n# ent_hex.delete(0, tk.END)\n# ent_hex.insert(0, \"ERROR: \" + str(e))\n# ent_dec.delete(0, tk.END)\n# ent_dec.insert(0, \"ERROR: \" + str(e))\n# return\n#\n#\n# #Print Output\n# ent_bin.delete(0, tk.END)\n# ent_bin.insert(0, fbin)\n# ent_dec.delete(0, tk.END)\n# ent_dec.insert(0, decinum)\n# ent_hex.delete(0, tk.END)\n# ent_hex.insert(0, hexinum.upper())\n#ASCII FUNCTIONALITY REMOVED!\n\n#Copy values on click:\ndef dec_copy(event):\n copyme = ent_dec.get()\n pyperclip.copy(copyme)\n copiedtext.pack()\n\ndef bin_copy(event):\n copyme = ent_bin.get()\n pyperclip.copy(copyme.replace(\" \", \"\"))\n copiedtext.pack()\n\ndef hex_copy(event):\n copyme = ent_hex.get()\n pyperclip.copy(copyme)\n copiedtext.pack()\n\n#def ascopy(event):\n# copyme = ent_ascii.get()\n# pyperclip.copy(copyme)\n\n#Define Window\nwindow = tk.Tk()\nwindow.geometry(\"960x215\")\nwindow.title(\"Conversion Tool\")\n\n#Define Widgets and Actions\nentertext = tk.Label(text=\"Press ENTER to Convert\")\ncopytext = tk.Label(text=\"Right Click to Copy a Value\")\ncopiedtext = tk.Label(text=\"Value Copied!\")\nlbl_dec = tk.Label(text=\"Decimal\")\nbutt_dec = tk.Button(text=\"Copy\")\nlbl_bin = tk.Label(text=\"Binary\")\nlbl_hex = tk.Label(text=\"Hexidecimal\")\n#lbl_ascii = tk.Label(text=\"Ascii\")\nent_dec = tk.Entry()\nent_bin = tk.Entry()\nent_hex = tk.Entry()\n#ent_ascii = tk.Entry()\n#KeyRelease would be so cool, but the bin_key func would need some serious rework\nent_dec.bind(\"\", dec_key)\nent_bin.bind(\"\", bin_key)\nent_hex.bind(\"\", hex_key)\n#ent_ascii.bind(\"\", askey)\n#ent_dec.bind(\"\", dec_key)\n#ent_bin.bind(\"\", bin_key)\n#ent_hex.bind(\"\", hex_key)\n#ent_ascii.bind(\"\", askey)\nent_dec.bind(\"\", dec_copy)\nent_bin.bind(\"\", bin_copy)\nent_hex.bind(\"\", hex_copy)\n#ent_ascii.bind(\"\", ascopy)\n\n#Load Window Elements\nentertext.pack()\ncopytext.pack()\nlbl_dec.pack(anchor=\"w\")\nent_dec.pack(fill=tk.X)\nlbl_bin.pack(anchor=\"w\")\nent_bin.pack(fill=tk.X)\nlbl_hex.pack(anchor=\"w\")\nent_hex.pack(fill=tk.X)\n#lbl_ascii.pack(anchor=\"w\")\n#ent_ascii.pack(fill=tk.X)\n\n#Start Event Loop\ntk.mainloop()\n","repo_name":"aoxhwjfoavdlhsvfpzha/BaseConverter","sub_path":"ConversionTool.py","file_name":"ConversionTool.py","file_ext":"py","file_size_in_byte":6899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"27096304384","text":"import csv\r\nimport pymysql\r\nimport configparser\r\n\r\n\r\nconfig = configparser.ConfigParser()\r\nconfig.read_file(open('credentials.py'))\r\ndbhost = config['csc']['dbhost']\r\ndbuser = config['csc']['dbuser']\r\ndbpw = config['csc']['dbpw']\r\n\r\ndbschema = 'dryan16'\r\n\r\ndbconn = pymysql.connect(host=dbhost,\r\n user=dbuser,\r\n passwd=dbpw,\r\n db=dbschema,\r\n use_unicode=True,\r\n charset='utf8mb4',\r\n autocommit=True)\r\ncursor = dbconn.cursor()\r\n\r\n\r\nfilename = 'peopleData.csv'\r\nmyRows = []\r\ntry:\r\n with open(filename, 'r') as myCSV:\r\n data = csv.reader(myCSV)\r\n next(myCSV)\r\n for row in data:\r\n myRows.append(row)\r\n myCSV.close()\r\nexcept FileNotFoundError:\r\n print('no file!')\r\n\r\ninsertQuery = 'INSERT INTO peopleData (first_name, last_name, company_name, adress, city, \\\r\n county, state, zip, phone1, phone2, email, web) \\\r\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'\r\n \r\nfor item in myRows:\r\n zero = item[0]\r\n one = item[1]\r\n two = item[2]\r\n three = item[3]\r\n four = item[4]\r\n five = item[5]\r\n six = item[6]\r\n seven = item[7]\r\n eight = item[8]\r\n nine = item[9]\r\n ten = item[10]\r\n eleven = item[11]\r\n cursor.execute(insertQuery, (zero, one, two, three, four, five, six, seven,eight,\\\r\n nine, ten, eleven))\r\nprint(\"_______________\")\r\ndbconn.close()\r\n","repo_name":"dryan9/Database-Management","sub_path":"week13__1.py","file_name":"week13__1.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"10410847543","text":"# class Car:\n# def __init__(self,price,speed,fuel,mileage):\n# self.price = price\n# self.speed = speed\n# self.fuel = fuel\n# self.mileage = mileage\n\n# def display_all(self):\n# print(\"price:\",self.price,\"speed:\",self.speed,\"fuel:\",self.fuel,\"mileage:\",self.mileage)\n\n\n# car1 = Car(300,30,2,10)\n\n\n# car1.display_all()\n\nclass car:\n def __init__(self,price,speed,fuel,mileage):\n self.price=price\n if(self.price>10000):\n tax=0.15\n else:\n tax=0.12\n self.speed=speed\n self.fuel=fuel\n self.mileage=mileage\n self.tax=tax\n def display_all(self):\n # print(\"Price:{}\".format(self.price))\n # print(\"Speed:{}\".format(self.speed))\n # print(\"fuel:{}\".format(self.fuel))\n # print(\"mileage:{}\".format(self.mileage)\n # print(\"tax:{}\".format(self.tax))\n print(\"Price: {}\".format(self.price))\n print(\"Speed: {}mph\".format(self.speed))\n print(\"Fuel: {}\".format(self.fuel))\n print(\"Mileage: {}mpg\".format(self.mileage))\n print(\"Tax: {}\".format(self.tax))\n\ntwo=car(300,31,'small',2)\none=car(2000,35,'Full',15)\none.display_all()\ntwo.display_all()\n\n\n # def __init__(self,price,name,weight,brand,status):\n # self.price = price\n # self.name = name\n # self.weight = weight\n # self.brand = brand\n # self.status = \"for sale\"\n\n # def return_item(self,reason_for_return):\n # if (self.reason_for_return = \"defective\"):\n # self.status = \"defective\"\n # self.price = 0\n # elif(self.reason_for_return = \"like new\"):\n # self.status = \"for sale\"\n # elif(self.reason_for_return = \"opened\"):\n # self.status = \"used\"\n # return self","repo_name":"michaelcan2/CodingDojo","sub_path":"Python/car_j.py","file_name":"car_j.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"29038317447","text":"from flask import Blueprint, render_template, request\nfrom twit_app.models import db, User, Tweet\n\ndelete_routes = Blueprint('delete_routes', __name__)\n\n@delete_routes.route('/', methods=[\"GET\", \"POST\"])\ndef index():\n if request.method == \"POST\":\n print(dict(request.form))\n\n twit_user = request.form\n input_name = twit_user['delete_user']\n\n user_info = User.query.filter_by(username=input_name).one()\n user_id = user_info.__dict__['id']\n\n Tweet.query.filter_by(user_id=user_id).delete()\n User.query.filter_by(username=input_name).delete()\n\n db.session.commit()\n\n data = User.query.all()\n return render_template(\"delete.html\", data=data)","repo_name":"rmsgn100/Section3-Solo-Project","sub_path":"twit_app/routes/delete_routes.py","file_name":"delete_routes.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"11390026538","text":"import inspect\n\n\ndef expand(func):\n \"\"\"Decorator that expands an object's attributes to fill a function's\n parameters\n\n The decorated function should be passed an object whose attributes share\n the names of the function's parameters. The objects attributes will be\n expanded into both positional and keyword parameters.\n Supports functions and class methods.\n\n Decorated function parameters:\n expand: keyword arg. True to activate object expansion,\n else the decorated function behaves normally\n\n Usage:\n >>> @expand\n ... def f(a, b, c):\n ... print \"a: %s, b: %s, c: %s\" % (a, b, c)\n >>> f(3, 2, 1)\n a: 3, b: 2, c: 1\n >>> class Args(): # similar to the object return by argparse\n ... a = 3\n ... b = 2\n ... c = 1\n >>> f(Args(), expand=True)\n a: 3, b: 2, c: 1\n \"\"\"\n\n def _inner(*args, **kwargs):\n func_varnames = inspect.getargspec(func).args\n\n if kwargs.get(\"expand\") is True and func_varnames:\n func_args = []\n\n # deal with methods\n if func_varnames[0] is \"self\":\n func_args.append(args[0])\n args = args[1:]\n func_varnames = func_varnames[1:]\n\n func_args.extend(\n [getattr(args[0], varname) for varname in func_varnames]\n )\n\n return func(*func_args)\n\n else:\n return func(*args, **kwargs)\n\n return _inner\n","repo_name":"richlanc/argspander","sub_path":"argspander/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"30952257828","text":"\nfrom django.urls import path, include\nfrom . import views\nfrom . import api\napp_name='job'\nurlpatterns = [\n path('', views.job_list,name='job_list' ),\n #path('/', views.job_detail,name = 'job_detail' ),\n path('add_job/', views.add_job,name = 'add_job' ),\n\n path('/', views.job_detail,name = 'job_detail' ),\n #apis\n path('api/all_jobs_api',api.api_job_list,name='all_jobs_api'),\n path('api/job_detail_api/',api.api_job_detail,name='job_detail'),\n #generic api vewis\n path('api/v2/all_jobs_api',api.JobListApi.as_view(),name='job_list_veiw'),\n path('api/v2/all_jobs_api/job_detail/',api.JobDetailApi.as_view(),name='job_detail_view'),\n]\n","repo_name":"abdobassel/django_job_board","sub_path":"job/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"39664134935","text":"from setuptools import find_packages, setup\n\nINSTALL_REQUIRES = [\"django==2.2\"]\n\nsetup(\n name=\"django-test\",\n packages=find_packages(),\n test_suite=\"tests\",\n python_requires=\">=3.9\",\n url=\"https://github.com/LaurenceWarne/django-test\",\n version=\"0.1\",\n author=\"Laurence Warne\",\n license=\"MIT\",\n install_requires=INSTALL_REQUIRES\n)\n","repo_name":"LaurenceWarne/django-test","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"73248351231","text":"import requests\nimport socket\nfrom utilities import get_machine_id\n\nclass HTTPReporter :\n def __init__(self, logger, config):\n self.logger = logger\n self.config = config\n self.id = None\n\n def report_results(self, results):\n if results[\"passCount\"] == 1:\n self.register(results[\"video_name\"])\n\n self.send_heart_rate(results)\n\n def register(self, video_name):\n registration_info = {\"device\":get_machine_id(), \"video\":video_name}\n try:\n if \"computer_name\" in self.config:\n registration_info.update({\"name\":socket.gethostname()})\n if \"computer_description\" in self.config:\n registration_info.update({\"description\": self.config[\"computer_description\"]})\n response = requests.post(\"{}{}\".format(self.config[\"server_url\"], \"register\"), registration_info)\n if response.status_code == 200:\n result = response.json()\n self.id = result[\"id\"]\n return True\n else:\n self.logger.error(\"Registration failure, HTTP status: {}\".format(response.status_code))\n return False\n except requests.exceptions.RequestException as err:\n self.logger.error(\"Exception: {}\".format(err))\n\n def send_heart_rate(self, results):\n try:\n http_data = results.copy()\n http_data.update({\"DeviceId\":self.id})\n for key, value in results[\"trackers\"].items():\n http_data.update({key: value})\n response = requests.post(\"{}{}\".format(self.config[\"server_url\"], \"heartrate\"), http_data)\n if response.status_code == 200:\n return True\n else:\n self.logger.error(\"POST failure, HTTP status: {}\".format(response.status_code))\n return False\n\n except requests.exceptions.RequestException as err:\n self.logger.error(\"Exception: {}\".format(err))\n","repo_name":"FredOleary/VideoBiometrics","sub_path":"collector/reporters/http_reporter.py","file_name":"http_reporter.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"23231441414","text":"# given 2 arrays (strings), return a bool if it's the same or not\n# case sensitive with no white spaces\n\nlist1 = [\"kobe\", \"mj\", \"lebron\", \"shaq\", \"dude\",]\nlist2 = [\"kobe\", \"shaq\", \"mj\", \"lebron\", \"other dude\"]\n\n#solution 1\ndef two_list_is_equal_a(list1, list2):\n cloned_list1 = list1\n for i in range(len(list2)):\n if list2[i] in cloned_list1:\n cloned_list1.remove(list2[i])\n else:\n return False\n if len(cloned_list1) != 0:\n return False\n return True\n\nprint(\"1A)\",two_list_is_equal_a(list1, list2),\"\\n\")\n\ndef two_list_is_equal_b(ar_1, ar_2):\n if len(ar_1) != len(ar_2):\n return False\n seen_items_set = set(ar_1)\n for item in ar_2:\n if item not in seen_items_set:\n return False\n seen_items_set.remove(item)\n if len(seen_items_set) > 0:\n return False\n return True\n\nprint(\"1B)\",two_list_is_equal_b(list1, list2),\"\\n\")","repo_name":"SamuelFolledo/SPD1.41-Communication-and-Interviewing","sub_path":"classwork/is_two_list_equal.py","file_name":"is_two_list_equal.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"38972737615","text":"from pyclientlib import SkinnyClient\n\n\ndef main():\n c = SkinnyClient()\n while True:\n print(\"> \", end=\"\")\n s = input()\n if not s:\n continue\n split = s.split()\n if split[0] == \"open\":\n print(c.Open(split[1]))\n elif split[0] == \"write\":\n print(c.SetContent(int(split[1]), split[2]))\n elif split[0] == \"read\":\n print(c.GetContent(int(split[1])))\n elif split[0] == \"lock\":\n print(c.Acquire(int(split[1]), True))\n elif split[0] == \"unlock\":\n print(c.Release(int(split[1])))\n elif split[0] == \"trylock\":\n print(c.TryAcquire(int(split[1]), True))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"c5h11oh/DistributedSystems-Chubby","sub_path":"demo/demo1.py","file_name":"demo1.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"71079501310","text":"import argparse\nimport copy\nimport json\nimport os\nfrom pathlib import Path\nfrom tdw.librarian import ModelLibrarian, ModelRecord\n\ndef str2table(v):\n return v.split(',')\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--directory\", type=str, default=\"../datasets/toys4K_obj_all/\")\nparser.add_argument(\"--dest\", type=str, default=\"library2/\")\nparser.add_argument(\"--vhacd\", type=int, default=10000) #precision of vahcd mesh decomposition, 800 000 the original default\nparser.add_argument(\"--name\", type=str2table, default=\"\")\nargs = parser.parse_args()\n\nsrc = Path().resolve().joinpath(args.directory).resolve()\ndest = Path().resolve().joinpath(args.dest)\nfile_type=\"obj\"\nlibrary_path = dest.joinpath(\"toys.json\")\nif not os.path.exists(dest):\n os.mkdir(args.dest)\n\ndef create_library(library_path, src) :\n # ModelLibrarian.create_library(description=\"Toys model librarian\", path=str(Path().home().joinpath(\"postdoc/datasets/toys4k_lib/toys.json\")))\n ModelLibrarian.create_library(description=\"Toys model librarian\", path=str(library_path))\n lib = ModelLibrarian(str(library_path.resolve()))\n for f in src.rglob(\"*.\"+file_type):\n # if f.name < \"chair_144\" and f.name > \"cupcake_026\":\n # continue\n\n record = ModelRecord()\n record.name = \"\".join(list(str(f.name))[:-4])\n if record.name == \"\":\n pass\n record.wcategory = \"\".join(list(str(f.name))[:-8])\n record.wnid = record.wcategory\n record.scale_factor = 0.1 if file_type == \"fbx\" else 0.2\n for platform in record.urls:\n dest_url = dest.joinpath(record.wnid + \"/\" + record.name + \"/\" + platform)\n url=\"file:///\" + str(dest_url.resolve()).replace(\"\\\\\", \"/\")\n record.urls[platform] = url\n lib.add_or_update_record(record, overwrite=True, write=False)\n # Write to disk.\n lib.write(pretty=False)\n\ndef fix_json(library_path):\n with open(str(library_path.resolve()), \"r\") as f:\n text = list(f.read())\n for i in range(len(text)):\n if text[i] == \",\":\n if text[i + 1] != \" \" and text[i + 1] != \"\\\"\":\n text[i] = \".\"\n fixed_text = \"\".join(text)\n with open(str(library_path.resolve()), 'w') as f:\n f.write(fixed_text)\n\n\ncreate_library(library_path,src)\nfix_json(library_path)\n\njs = json.load(open(library_path))\njs_copy = copy.deepcopy(js)\nto_remove = []\nwith open(\"disabled\") as f:\n rl = f.readlines()\n for l in rl:\n cat = \"\".join(list(l)[:-5])\n name = \"\".join(list(l)[:-1])\n if cat == \"\":\n continue\n to_remove.append(name)\nfor name in to_remove:\n if name in js_copy[\"records\"]:\n del js_copy[\"records\"][name]\n\nwith open(library_path, 'w') as f:\n json.dump(js_copy, f)\n","repo_name":"Aubret/models_tdw","sub_path":"create_library.py","file_name":"create_library.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"24452701505","text":"import cv2\nfrom Database_Operations import database\nimport pyzbar.pyzbar as pyzbar\n\ndef Qr_scan():\n img=cv2.VideoCapture(0)\n font = cv2.FONT_HERSHEY_SIMPLEX\n\n\n while True:\n success, frame= img.read()\n decoded=pyzbar.decode(frame)\n\n for object in decoded:\n (x,y,w,h)=object.rect\n cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255),3)\n a2=database()\n rollno=str(object[0], 'utf-8')\n rollno_l=rollno.split('/')\n print(rollno_l)\n check=a2.verify(rollno_l[0],rollno_l[1])\n\n if(check==True):\n\n cv2.putText(frame,\"DETECTED\",(x,y),font,1,(0,255,0),2)\n\n else:\n cv2.putText(frame, \"INVALID\", (x, y), font, 1, (0, 255, 0), 2)\n\n\n cv2.imshow(\"Frame\",frame)\n if cv2.waitKey(1) & 0xFF==ord('q'):\n break\n\nQr_scan()","repo_name":"rishabhgupta03/Smart-Surveillance-System","sub_path":"Scanner.py","file_name":"Scanner.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"35225941683","text":"k,n=map(int,input().split())\n\ncable=[]\n\nfor _ in range(k):\n length=int(input())\n cable.append(length)\n\nstart,end=1,max(cable)\nans=0\n\nwhile start<=end:\n mid=(start+end)//2\n sum=0\n for c in cable:\n sum+=c//mid\n if sum>=n:\n start=mid+1\n ans=mid\n else:\n end=mid-1\n\nprint(ans)","repo_name":"SarahParkSehyun/Baekjoon_PnP","sub_path":"silver/1654.py","file_name":"1654.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"35675432524","text":"from CryoCore import API\nfrom CryoCloud.Common.DockerProcess import DockerProcess\n\n\nccmodule = {\n \"description\": \"Run stuff in docker environments\",\n \"depends\": [\"\"],\n \"provides\": [\"\"],\n \"inputs\": {\n \"gpu\": \"Run on GPU, default False\",\n \"target\": \"Target docker\",\n \"env\": \"Environment variables\",\n \"dirs\": \"Directories to map as volumes\",\n \"arguments\": \"Arguments for docker process\",\n \"log_all\": \"Log all output as debug, default False\",\n \"debug\": \"Debug - write docker commands to /tmp/ default False\"\n },\n \"outputs\": {\n },\n \"defaults\": {\n \"runOn\": \"success\"\n }\n}\n\n\ndef process_task(worker, task, cancel_event=None):\n \"\"\"\n worker.status and worker.log are ready here.\n\n Move files from one place to another\n Needs task[\"args\"][\"src\"] and \"dst\"\n\n \"\"\"\n\n gpu = False\n env = {}\n dirs = []\n args = []\n\n a = task[\"args\"]\n gpu = a.get(\"gpu\", False)\n\n if \"target\" not in task[\"args\"]:\n raise Exception(\"Missing docker target\")\n target = a[\"target\"]\n if not isinstance(target, list):\n target = [target]\n\n if len(target) == 0:\n raise Exception(\"Require parameter 'target'\")\n\n env = a.get(\"env\", {})\n dirs = a.get(\"dirs\", [])\n args = a.get(\"arguments\", [])\n log_all = a.get(\"log_all\", False)\n debug = a.get(\"debug\", False)\n\n dp = DockerProcess(target, worker.status, worker.log, API.api_stop_event,\n dirs=dirs, env=env, gpu=gpu, args=args, log_all=log_all,\n cancel_event=cancel_event, debug=debug)\n # cancel_event=cancel_event) # Doesn't work\n retval = dp.run()\n\n worker.log.debug(\"Docker completed\")\n return worker.status[\"progress\"].get_value(), retval\n","repo_name":"Snarkdoof/cryocloud","sub_path":"CryoCloud/Modules/docker.py","file_name":"docker.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"19700020104","text":"import numpy as np\n\ndef read(filename):\n\t#function that reads the x and y coordinate\n\tinfile = open(filename, 'r')\n\tTimestep = []\n\tTime =[]\n\tTemperature = []\n\tDiffusionConstant = []\n\trelevant_lines = infile.readlines()[1:] #skips the irrelevant lines\n\tfor line in relevant_lines: \n\t\tdata = line.split()\n\t\tTimestep.append(float(data[0]))\n\t\tTime.append(float(data[1]))\n\t\tTemperature.append(float(data[2]))\n\t\tDiffusionConstant.append(float(data[3]))\n\tinfile.close()\n\tTimestep = np.array(Timestep)\n\tTime = np.array(Time)\n\tTemperature = np.array(Temperature)\n\tDiffusionConstant = np.array(DiffusionConstant)\n\n\treturn Timestep, Time, Temperature, DiffusionConstant\n\n#Timestep, Time, Temperature, DiffusionConstant = read('statistics.txt')\n\nT_init = []\nfor i in range(1, 1001, 50):\n\tT_init.append(i)\nT_init = np.array(T_init)\n\nT_ratio = np.array([0.669704,0.741992,0.678682,0.690175,0.693458,0.719683,0.759031,0.779948,0.709018,0.740829,0.765328,0.744585,0.761976,0.755197,0.720936,0.738801,0.766893,0.729957,0.729681,0.751274])\n\nT = T_ratio*T_init #temperature at equilibrium\n\nimport matplotlib.pyplot as plt\n\nplt.plot(T, T_ratio, 'go')\nplt.rcParams.update({'font.size': 14})\nplt.ylabel('$T/T_i [m^2/s]$')\nplt.xlabel('$T$ [K]')\nplt.show()\n\n\n","repo_name":"livewj/Project5","sub_path":"build-molecular-dynamics-fys3150-Desktop_Qt_5_7_0_clang_64bit-Debug/readfile_Tloop.py","file_name":"readfile_Tloop.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"17937335810","text":"###########################################################################################################\n# 稼働設定:解像度 1920*1080 表示スケール125%\n###########################################################################################################\n# モジュールインポート\nimport pyautogui as pg\nimport time\nimport MJSOpen\n\n# pandasインポート\nimport pandas as pd\n\n# 配列計算関数numpyインポート\nimport numpy as np\n\n# osインポート\nimport os\n\n# datetimeインポート\n\n# 例外処理判定の為のtracebackインポート\nimport traceback\n\n# pandas(pd)で関与先データCSVを取得\nimport pyautogui\nimport pyperclip # クリップボードへのコピーで使用\nimport Function.ExcelFileAction as EFA\nimport Function.CSVOut as FCO\nimport Function.MJSSPOPDFMarge as PDFM\nimport datetime\nimport openpyxl\nfrom openpyxl.formatting.rule import Rule\nfrom ctypes import windll\n\nimport os\nimport sys\nimport PyPDF2\nfrom pdfminer.high_level import extract_text\nfrom pdfminer.pdfparser import PDFParser\nfrom pdfminer.pdfdocument import PDFDocument\nfrom pdfminer.pdfpage import PDFPage\nimport WarekiHenkan as WK\n\n# logger設定------------------------------------------------------------------------------------------------------------\nimport logging.config\n\nlogging.config.fileConfig(r\"LogConf\\loggingMJSSysUp.conf\")\nlogger = logging.getLogger(__name__)\n# ----------------------------------------------------------------------------------------------------------------------\n\n\ndef DriverUIWaitXPATH(UIPATH, driver): # XPATH要素を取得するまで待機\n for x in range(10):\n try:\n driver.find_element_by_xpath(UIPATH)\n Flag = 1\n return True\n except:\n Flag = 0\n if Flag == 0:\n return False\n\n\n# ----------------------------------------------------------------------------------------------------------------------\ndef DriverUIWaitAutomationId(UIPATH, driver): # XPATH要素を取得するまで待機\n for x in range(10):\n try:\n driver.find_element_by_accessibility_id(UIPATH)\n Flag = 1\n return True\n except:\n Flag = 0\n if Flag == 0:\n return False\n\n\n# ----------------------------------------------------------------------------------------------------------------------\ndef DriverUIWaitName(UIPATH, driver): # XPATH要素を取得するまで待機\n for x in range(10):\n try:\n driver.find_element_by_Name(UIPATH)\n Flag = 1\n return True\n except:\n Flag = 0\n if Flag == 0:\n return False\n\n\n# ------------------------------------------------------------r----------------------------------------------------------\ndef DriverUIWaitclassname(UIPATH, driver): # XPATH要素を取得するまで待機\n for x in range(10):\n try:\n driver.find_element_by_class_name(UIPATH)\n Flag = 1\n return True\n except:\n Flag = 0\n if Flag == 0:\n return False\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# ----------------------------------------------------------------------------------------------------------------------\ndef DriverFindClass(UIPATH, driver): # XPATH要素を取得するまで待機\n for x in range(10):\n try:\n elList = driver.find_elements_by_class_name(UIPATH)\n Flag = 1\n return True, elList\n except:\n Flag = 0\n if Flag == 0:\n return False\n\n\n# ----------------------------------------------------------------------------------------------------------------------\ndef DriverCheck(Hub, ObjName, driver): # XPATH要素を取得するまで待機\n for x in range(10):\n if Hub == \"AutomationID\":\n if (\n DriverUIWaitAutomationId(ObjName, driver) is True\n ): # OMSメニューの年調起動ボタンを判定して初期処理分け\n # 正常待機後処理\n driver.find_element_by_accessibility_id(ObjName) # 一括電子申告送信ボタン\n return True\n else:\n # 異常待機後処理\n print(\"要素取得に失敗しました。\")\n elif Hub == \"XPATH\":\n if DriverUIWaitXPATH(ObjName, driver) is True: # OMSメニューの年調起動ボタンを判定して初期処理分け\n # 正常待機後処理\n driver.find_element_by_xpath(ObjName) # 一括電子申告送信ボタン\n return True\n else:\n # 異常待機後処理\n print(\"要素取得に失敗しました。\")\n elif Hub == \"Name\":\n if DriverUIWaitName(ObjName, driver) is True: # OMSメニューの年調起動ボタンを判定して初期処理分け\n # 正常待機後処理\n driver.find_element_by_Name(ObjName) # 一括電子申告送信ボタン\n return True\n else:\n # 異常待機後処理\n print(\"要素取得に失敗しました。\")\n\n\n# ----------------------------------------------------------------------------------------------------------------------\ndef DriverClick(Hub, ObjName, driver):\n if Hub == \"AutomationID\":\n if (\n DriverUIWaitAutomationId(ObjName, driver) is True\n ): # OMSメニューの年調起動ボタンを判定して初期処理分け\n # 正常待機後処理\n OMSObj = driver.find_element_by_accessibility_id(ObjName) # 一括電子申告送信ボタン\n OMSObj.click()\n return OMSObj\n else:\n # 異常待機後処理\n print(\"要素取得に失敗しました。\")\n elif Hub == \"XPATH\":\n if DriverUIWaitXPATH(ObjName, driver) is True: # OMSメニューの年調起動ボタンを判定して初期処理分け\n # 正常待機後処理\n OMSObj = driver.find_element_by_xpath(ObjName) # 一括電子申告送信ボタン\n OMSObj.click()\n return OMSObj\n else:\n # 異��待機後処理\n print(\"要素取得に失敗しました。\")\n elif Hub == \"Name\":\n if DriverUIWaitName(ObjName, driver) is True: # OMSメニューの年調起動ボタンを判定して初期処理分け\n # 正常待機後処理\n OMSObj = driver.find_element_by_Name(ObjName) # 一括電子申告送信ボタン\n OMSObj.click()\n return OMSObj\n else:\n # 異常待機後処理\n print(\"要素取得に失敗しました。\")\n elif Hub == \"class_name\":\n if DriverUIWaitclassname(ObjName, driver) is True: # OMSメニューの年調起動ボタンを判定して初期処理分け\n # 正常待機後処理\n OMSObj = driver.find_element_by_class_name(ObjName) # 一括電子申告送信ボタン\n OMSObj.click()\n return OMSObj\n else:\n # 異常待機後処理\n print(\"要素取得に失敗しました。\")\n\n\n# ----------------------------------------------------------------------------------------------------------------------\ndef ImgCheck(FolURL2, FileName, conf, LoopVal): # 画像があればTrueを返す関数\n ImgURL = FolURL2 + \"/\" + FileName\n for x in range(LoopVal):\n try:\n p = pyautogui.locateOnScreen(ImgURL, confidence=conf)\n x, y = pyautogui.center(p)\n return True, x, y\n except:\n Flag = 0\n if Flag == 0:\n return False, \"\", \"\"\n\n\n# ----------------------------------------------------------------------------------------------------------------------\ndef ImgNothingCheck(FolURL2, FileName, conf, LoopVal): # 画像がなければTrueを返す\n ImgURL = FolURL2 + \"/\" + FileName\n for x in range(LoopVal):\n try:\n p = pyautogui.locateOnScreen(ImgURL, confidence=conf)\n x, y = pyautogui.center(p)\n return False\n except:\n Flag = 0\n if Flag == 0:\n return True\n\n\n# ----------------------------------------------------------------------------------------------------------------------\ndef ImgCheckForList(FolURL2, List, conf, LoopVal): # リスト内の画像があればTrueと画像名を返す\n for x in range(LoopVal):\n for ListItem in List:\n ImgURL = FolURL2 + \"/\" + ListItem\n try:\n p = pyautogui.locateOnScreen(ImgURL, confidence=conf)\n x, y = pyautogui.center(p)\n return True, ListItem, x, y\n break\n except:\n Flag = 0\n if Flag == 0:\n return False, \"\", \"\", \"\"\n\n\n# ----------------------------------------------------------------------------------------------------------------------\ndef ImgClick(FolURL2, FileName, conf, LoopVal): # 画像があればクリックしてx,y軸を返す\n ImgURL = FolURL2 + \"/\" + FileName\n for x in range(10):\n if (\n ImgCheck(FolURL2, FileName, conf, LoopVal)[0] is True\n ): # OMSメニューの年調起動ボタンを判定して初期処理分け\n # 正常待機後処理\n for y in range(10):\n try:\n p = pyautogui.locateOnScreen(ImgURL, confidence=conf)\n x, y = pyautogui.center(p)\n pyautogui.click(x, y)\n time.sleep(1)\n return x, y\n except:\n print(\"失敗\")\n else:\n # 異常待機後処理\n print(\"要素取得に失敗しました。\")\n\n\n# ------------------------------------------------------------------------------------------------------------------\n# RPA用画像フォルダの作成---------------------------------------------------------\nFolURL = os.getcwd().replace(\"\\\\\", \"/\") # 先\nTFolURL = FolURL + r\"\\RPAPhoto\\MJSKomonsakiUpDate\" # 先\nLURL = TFolURL + r\"\\顧問先-名称.csv\" # 処理状況CSVのURL\n# --------------------------------------------------------------------------------\nCSVs = FCO.CsvRead(LURL)[1]\nCSVLen = len(CSVs)\nfor CV in range(CSVLen):\n if CV > 2222:\n CSVsRow = CSVs.iloc[CV]\n if not CSVsRow[\"顧問先\"] == CSVsRow[\"顧問先\"]:\n print(\"nan\")\n else:\n print(CSVsRow[\"コード\"])\n KCB = ImgCheck(TFolURL, r\"\\K_CodeBox.png\", 0.9, 10)\n if KCB[0] is True:\n pyautogui.click(KCB[1] + 100, KCB[2]) # 横軸,縦軸\n pyperclip.copy(str(CSVsRow[\"コード\"]))\n pg.hotkey(\"ctrl\", \"v\")\n pg.press(\"return\")\n time.sleep(1)\n MB = ImgCheckForList(\n TFolURL,\n [\n r\"\\M_Bar.png\",\n r\"\\M_Bar2.png\",\n r\"\\M_Bar3.png\",\n r\"\\Name.png\",\n r\"\\Name2.png\",\n r\"\\Name3.png\",\n ],\n 0.9,\n 10,\n )\n if MB[0] is True:\n pyautogui.click(MB[2], MB[3]) # 横軸,縦軸\n RS = ImgCheckForList(\n TFolURL, [r\"\\R_Sou.png\", r\"\\R_Sou2.png\"], 0.9, 10\n )\n if RS[0] is True:\n pyautogui.click(RS[2] + 150, RS[3]) # 横軸,縦軸\n pg.press(\n [\n \"backspace\",\n \"backspace\",\n \"backspace\",\n \"backspace\",\n \"backspace\",\n \"backspace\",\n \"backspace\",\n \"backspace\",\n \"backspace\",\n \"backspace\",\n \"backspace\",\n \"backspace\",\n \"backspace\",\n \"backspace\",\n ]\n )\n pg.press(\n [\n \"delete\",\n \"delete\",\n \"delete\",\n \"delete\",\n \"delete\",\n \"delete\",\n \"delete\",\n \"delete\",\n \"delete\",\n \"delete\",\n \"delete\",\n \"delete\",\n \"delete\",\n \"delete\",\n ]\n )\n pyperclip.copy(str(CSVsRow[\"コード\"]))\n pg.hotkey(\"ctrl\", \"v\")\n ImgClick(TFolURL, r\"\\U_Btn.png\", 0.9, 10)\n time.sleep(1)\n while (\n pg.locateOnScreen(\n TFolURL + r\"\\\\\" + \"UpDateFlag.png\", confidence=0.9\n )\n is not None\n ):\n time.sleep(1)\n time.sleep(1)\n","repo_name":"hasegawakaikeirpa/RPAScript","sub_path":"MJSKomonsakiUpDate.py","file_name":"MJSKomonsakiUpDate.py","file_ext":"py","file_size_in_byte":13678,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"38007371934","text":"import pygame\ndef share_diagonal(x0, y0, x1, y1):\n \"\"\" Is (x0, y0) on a shared diagonal with (x1, y1)? \"\"\"\n dy = abs(y1 - y0) # Calc the absolute y distance\n dx = abs(x1 - x0) # CXalc the absolute x distance\n return dx == dy \n\ndef col_clashes(bs, c):\n \"\"\" Return True if the queen at column c clashes\n with any queen to its left.\n \"\"\"\n for i in range(c): # Look at all columns to the left of c\n if share_diagonal(i, bs[i], c, bs[c]):\n return True\n\n return False \n\ndef has_clashes(the_board):\n \"\"\" Determine whether we have any queens clashing on the diagonals.\n We're assuming here that the_board is a permutation of column\n numbers, so we're not explicitly checking row or column clashes.\n \"\"\"\n for col in range(1,len(the_board)):\n if col_clashes(the_board, col):\n return True\n return False\n\ngravity = 0.1\n\nclass QueenSprite:\n\n def __init__(self, img, target_posn):\n self.image = img\n self.target_posn = target_posn\n (x, y) = target_posn\n self.posn = (x, 0) # Start ball at top of its column\n self.y_velocity = 0 # with zero initial velocity\n\n def update(self):\n self.y_velocity += gravity\n (x, y) = self.posn\n new_y_pos = y + self.y_velocity\n (target_x, target_y) = self.target_posn # Unpack the position\n dist_to_go = target_y - new_y_pos # How far to our floor?\n \n if dist_to_go < 0: # Are we under floor?\n self.y_velocity = -0.65 * self.y_velocity # Bounce\n new_y_pos = target_y + dist_to_go # Move back above floor\n \n self.posn = (x, new_y_pos) # Set our new position.\n\n def draw(self, target_surface): # Same as before.\n target_surface.blit(self.image, self.posn)\n \n def contains_point(self, pt):\n \"\"\" Return True if my sprite rectangle contains point pt \"\"\"\n (my_x, my_y) = self.posn\n my_width = self.image.get_width()\n my_height = self.image.get_height()\n (x, y) = pt\n return ( x >= my_x and x < my_x + my_width and\n y >= my_y and y < my_y + my_height)\n \n def handle_click(self):\n self.y_velocity += -8 # Kick it up \n\n\n\"TWO\" \nclass DukeSprite:\n\n def __init__(self, img, target_posn):\n self.image = img\n self.posn = target_posn\n self.anim_frame_count = 0\n self.curr_patch_num = 0\n\n def update(self):\n if self.anim_frame_count > 0:\n self.anim_frame_count = (self.anim_frame_count + 1 ) % 60\n self.curr_patch_num = self.anim_frame_count // 6\n\n def draw(self, target_surface):\n patch_rect = (self.curr_patch_num * 50, 0,\n 50, self.image.get_height())\n target_surface.blit(self.image, self.posn, patch_rect)\n\n def contains_point(self, pt):\n (my_x, my_y) = self.posn\n my_width = self.image.get_width()/10\n #The reason that clicking on the squares to the right of Duke still triggered the animation was because the whole spritesheet was still displayed as the image, but only the first part, where he is shown as idle, can be seen. Thus, the solution is to split the width by however many different sprite states Duke's sheet has, in this case 10. \n my_height = self.image.get_height()\n (x, y) = pt\n return ( x >= my_x and x < my_x + my_width and\n y >= my_y and y < my_y + my_height)\n\n def handle_click(self):\n if self.anim_frame_count == 0:\n self.anim_frame_count = 5\n\ndef draw_board(the_board):\n pygame.init()\n my_clock = pygame.time.Clock()\n colors = [(255,0,0), (0,0,0)] # Set up colors [red, black]\n \n n = len(the_board) # This is an NxN chess board.\n surface_sz = 540 # Proposed physical surface size.\n sq_sz = surface_sz // n # sq_sz is length of a square.\n surface_sz = n * sq_sz # Adjust to exactly fit n squares.\n\n # Create the surface of (width, height), and its window.\n surface = pygame.display.set_mode((surface_sz, surface_sz))\n\n ball = pygame.image.load(\"ball.png\")\n ball_offset = (sq_sz-ball.get_width()) // 2\n all_sprites=[]\n\n \n for (col, row) in enumerate(the_board):\n a_queen = QueenSprite(ball,\n (col*sq_sz+ball_offset, row*sq_sz+ball_offset))\n all_sprites.append(a_queen)\n # Load the sprite sheet\n duke_sprite_sheet = pygame.image.load(\"duke_spritesheet.png\")\n \n # Instantiate two duke instances, put them on the chessboard\n duke1 = DukeSprite(duke_sprite_sheet,(sq_sz*2, 0))\n duke2 = DukeSprite(duke_sprite_sheet,(sq_sz*5, sq_sz))\n \n # Add them to the list of sprites which our game loop manages\n all_sprites.append(duke1)\n all_sprites.append(duke2)\n\n while True:\n # Look for an event from keyboard, mouse, etc.\n ev = pygame.event.poll()\n if ev.type == pygame.QUIT:\n break;\n if ev.type == pygame.KEYDOWN:\n key = ev.dict[\"key\"]\n if key == 27: # On Escape key ...\n break # leave the game loop.\n if key == ord(\"r\"):\n colors[0] = (255, 0, 0) # Change to red + black.\n elif key == ord(\"g\"):\n colors[0] = (0, 255, 0) # Change to green + black.\n elif key == ord(\"b\"):\n colors[0] = (0, 0, 255) # Change to blue + black.\n \n if ev.type == pygame.MOUSEBUTTONDOWN:\n posn_of_click = ev.dict[\"pos\"]\n for sprite in all_sprites:\n if sprite.contains_point(posn_of_click):\n sprite.handle_click()\n break \n\n # Ask every sprite to update itself.\n for sprite in all_sprites:\n sprite.update()\n\n # Draw a fresh background (a blank chess board)\n # ... same as before ...\n # Look for an event from keyboard, mouse, etc.\n ev = pygame.event.poll()\n if ev.type == pygame.QUIT:\n break\n\n # Draw a fresh background (a blank chess board)\n for row in range(n): # Draw each row of the board.\n c_indx = row % 2 # Alternate starting color\n for col in range(n): # Run through cols drawing squares\n the_square = (col*sq_sz, row*sq_sz, sq_sz, sq_sz)\n surface.fill(colors[c_indx], the_square)\n # Now flip the color index for the next square\n c_indx = (c_indx + 1) % 2\n \n # Ask every sprite to draw itself.\n for sprite in all_sprites:\n sprite.draw(surface)\n\n pygame.display.flip()\n my_clock.tick(60)\n \n pygame.quit() \n \n#draw_board([6, 4, 2, 0, 5, 7, 1, 3])\n \n\"THREE\"\ndef pokerhand():\n import random\n rng=random.Random()\n cards=list(range(1, 53))\n rng.shuffle(cards)\n cards=cards[:5]\n return cards\n\ndef displayhand():\n pygame.init()\n pygame.display.set_caption('Poker Hand')\n scr_width=400\n scr_height=400\n surface=pygame.display.set_mode((scr_width, scr_height))\n surface.fill((0, 100, 0)) \n cards=pygame.image.load(\"cards.jpg\")\n width=cards.get_width()\n height=cards.get_height() \n hand=pokerhand()\n print(hand)\n x=(scr_width-width*5/13)/2\n y=(scr_height-height/4)/2\n pos=(x,y) \n for i, num in enumerate(hand):\n card_width=width/13*((num-1)%13)\n card_height=height/4*((num-1) // 13)\n rect=(card_width, card_height, width/13, height/4)\n surface.blit(cards, pos, rect)\n x=x+width/13\n pos=(x,y) \n while True:\n ev=pygame.event.poll()\n if ev.type==pygame.QUIT:\n break\n \n pygame.display.flip()\n \n pygame.quit()\n \ndisplayhand()","repo_name":"JTTheAxis/thinklikeacompscientist","sub_path":"Chapter 17 Practice.py","file_name":"Chapter 17 Practice.py","file_ext":"py","file_size_in_byte":7997,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"34562028164","text":"import urllib.request\nfrom bs4 import BeautifulSoup\n\napi = 'https://www.kma.go.kr/weather/forecast/mid-term-rss3.jsp'\nurls = urllib.request.urlopen(api).read()\nsoup = BeautifulSoup(urls, 'html.parser')\n\ncities = soup.find_all(\"city\")\ndata = soup.find_all(\"data\")\ndates = soup.find(\"tmef\")\n\nprint(dates.string)\nfor i in range(len(cities)):\n print(f'{cities[i].string}의 날씨는 {data[i*13].find(\"wf\").string}입니다.')\n\nprint(len(cities), len(data))","repo_name":"voidgogo/BigData","sub_path":"week07_web02.py","file_name":"week07_web02.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"74380517631","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 17 12:09:42 2021\n\n@author: lowkg\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom scipy.optimize import curve_fit\n\nTe = ( 38.58, 95.09,211.81,420.01,855.64)\nFc= ( 3.42,14.70,20.28,25.21,34.5)\n'''\nTe = ( 43.18, 100.69,217.11,423.74,855.64)\nFc= ( 13.98,19.23,38.54,45.58,52.05)\n'''\nEq_Option = [0,1]\nlegend =[]\n\ndef Func_ScandinavianCode(Te, Fc_u, tau, a):\n # ScandinavianCode Equation to calculate Compressive Strength\n return Fc_u * (np.exp(-np.power((tau / Te), a)))\n\ndef Func_AmericanCode(Te, Fc_u, k, Te_0):\n # AmericanCode Equation to calculate Compressive Strength\n return Fc_u*((k*(Te-Te_0))/(1+k*(Te-Te_0)))\n\n# array with 3 elements in order of Fc_u,tau,a\ndef Fc_ParameterGraph(pars,option): \n df = pd.DataFrame()\n Te_graph = []\n Fc_graph = []\n \n Fc_u = round(pars[0],4)\n par1 = round(pars[1],4)\n par2 = round(pars[2],4)\n \n iLim = max(Te)\n i = 0.5\n while i < iLim:\n Te_plot = i\n if option == 0:\n Fc_cal = Fc_u * (np.exp(-np.power((par1 / Te_plot), par2)))\n PlotLegend = 'ScandinavianEq'\n elif option == 1:\n Fc_cal = Fc_u*((par1*(Te_plot-par2))/(1+par1*(Te_plot-par2)))\n PlotLegend = 'AmericanEq'\n Te_graph.append(i)\n Fc_graph.append(Fc_cal)\n i+=0.5\n\n df['Te'] = Te_graph\n df['Fc'] = Fc_graph\n \n plt.scatter(Te, Fc)\n legend.append(PlotLegend)\n plt.plot(df['Te'], df['Fc'],linestyle='dashdot')\n plt.legend(legend)\n\ndef DeviationCalculation(pars,Te,Fc,option):\n Fc_u = round(pars[0],4)\n par1 =round(pars[1],4)\n par2 = round(pars[2],4)\n \n for i in range(len(Te)):\n Te_input = Te[i]\n if option == 0:\n Fc_cal = Fc_u * (np.exp(-np.power((par1 /Te_input), par2)))\n elif option == 1:\n Fc_cal = Fc_u*((par1*(Te_input-par2))/(1+par1*(Te_input-par2)))\n \n Dev = round((Fc_cal - Fc[i])/Fc[i]*100,2)\n print(\"Fc: \" + str(Fc[i]) + \"\\t Deviation: \" + str(Dev))\n print(\"\")\n\ndef getStrengthParameter(Te,Fc):\n F_strength =[Func_ScandinavianCode,Func_AmericanCode]\n Fc_0 = 25\n for option in Eq_Option:\n # Guestimate initial parameter\n if option == 0:\n par1 = 10 ; par2 = 0.6;\n param = ['Fc_u','tau','a']\n \n elif option == 1:\n par1 = 0 ; par2 = 0\n param = ['Fc_u','k','Te_0']\n \n pars, cov = curve_fit(f=F_strength[option], xdata=Te, ydata=Fc,\n p0=[Fc_0,par1,par2])\n \n for i in range(len(param)):\n print(param[i] + ':' + str(round(pars[i],2)))\n \n Fc_ParameterGraph(pars,option)\n DeviationCalculation(pars, Te, Fc,option)\n \n return pars\n\nplt.figure(figsize=(6, 4))\npars = getStrengthParameter(Te,Fc)\n\n\n\n","repo_name":"uen6ueakKG/ParameterCurveFit","sub_path":"CalibrationParameter.py","file_name":"CalibrationParameter.py","file_ext":"py","file_size_in_byte":2886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"38952180636","text":"\n\nfrom AbstractSender import AbstractSender\nimport tempfile\nimport subprocess\nimport os\n\nclass PierfSender(AbstractSender):\n '''\n classdocs\n '''\n pierfXML_template =\\\n'''\n\n \n \n \n \n {1}\n \n \n \n \n\n'''\n def __init__(self, context):\n '''\n Constructor\n '''\n self.__defaults = {'interface':'eth0',\n 'pierf_app_full_file_name' : '/opt/pierf/pierf'}\n \n context.update(self.__defaults)\n self.__context = context\n \n def cutCRC32(self, packet ):\n return packet[0:-4]\n \n def convert2Hex(self, packet ):\n result = \"\"\n for byte in packet:\n result += \":\" + \"{0:2x}\".format(ord(byte)).replace(' ', '0').upper()\n \n return result\n \n def generatePierfXml(self, hexPacket):\n pierfXML = self.pierfXML_template.format(self.__context['interface'], \n hexPacket)\n return pierfXML\n \n def runPierf(self, configFileName):\n subprocess.call([self.__context['pierf_app_full_file_name'],configFileName])\n \n def sendPacket(self, packet): \n try:\n tmpFileName = None\n with tempfile.NamedTemporaryFile(mode='w+', delete=False) as tmpFile:\n tmpFile.write( \n self.generatePierfXml(\n self.convert2Hex(\n self.cutCRC32(packet))))\n \n tmpFile.flush()\n tmpFileName = tmpFile.name\n \n self.runPierf(tmpFileName)\n finally:\n if tmpFileName:\n os.unlink(tmpFileName)\n\n \n return None","repo_name":"olegh/packet_synthesier","sub_path":"src/PierfSender.py","file_name":"PierfSender.py","file_ext":"py","file_size_in_byte":1895,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"4108102230","text":"from .models import MainTable\nfrom .models import MainTable, CafedraClasses\n\n\nclass Rasp:\n\n # Функция устанавливает в главную таблицу преподователей\n def setTeacher(Table, PrepodArray, priority):\n print(priority)\n for x in range(len(Table)):\n for PrepodTable in PrepodArray:\n day = PrepodTable.filter(Date=Table[x].vremya)\n if day and Table[x].Prepod == \"\":\n mainTableday = Table.filter(vremya=Table[x].vremya)\n for lesson in day:\n for mainDay in mainTableday:\n if lesson.LessonNumber == mainDay.NLecii:\n if priority == 1:\n obj = Table.get(pk=mainDay.id)\n obj.Prepod = lesson.teacher.last_name + \" \" + lesson.teacher.first_name # надо откуда то брать имя препода\n obj.teacherId = lesson.teacher\n obj.save()\n\n if priority == 2:\n isBusy = MainTable.objects.filter(NLecii=mainDay.NLecii, vremya=mainDay.vremya)\n if isBusy:\n obj = Table.get(pk=mainDay.id + 1)\n obj.Prepod = lesson.teacher.last_name + \" \" + lesson.teacher.first_name # надо откуда то брать имя препода\n obj.teacherId = lesson.teacher\n obj.save()\n\n # функция устанавливает предмет в главную таблицу\n def setSubject(Table, studyPlan, hours):\n exTable = Table.exclude(Prepod=\"\")\n print(studyPlan)\n for x in range(len(exTable)):\n for y in range(len(studyPlan)):\n if exTable[x].teacherId == studyPlan[y].teacher and hours > 0:\n obj = Table.get(pk=exTable[x].id)\n hoursType = studyPlan[y].hours\n\n if studyPlan[y].typeSubject == \"Лекция\" and obj.Auditoriya == \"\" and studyPlan[\n y].remaningLectures > 0:\n clases = CafedraClasses.objects.filter(AllowedLections=\"True\")\n obj.Predmet = studyPlan[y].subject\n obj.Auditoriya = clases[0].ClassName\n obj.Podgruppa = studyPlan[y].typeSubject\n obj.save()\n studyPlan[y].remaningLectures -= 1\n studyPlan[y].save()\n hours -= 1\n continue\n\n if studyPlan[y].typeSubject == \"Практика\" and obj.Auditoriya == \"\" and studyPlan[\n y].remaningLectures > 0:\n clases = CafedraClasses.objects.filter(AllowedPractice=\"True\")\n obj.Predmet = studyPlan[y].subject\n obj.Auditoriya = clases[0].ClassName\n obj.Podgruppa = studyPlan[y].typeSubject\n obj.save()\n studyPlan[y].remaningLectures -= 1\n studyPlan[y].save()\n hours -= 1\n continue\n\n if studyPlan[y].typeSubject == \"ЛабРабота\" and obj.Auditoriya == \"\" and studyPlan[\n y].remaningLectures > 0:\n clases = CafedraClasses.objects.filter(AllowedLabs=\"True\")\n obj.Predmet = studyPlan[y].subject\n obj.Auditoriya = clases[0].ClassName\n obj.Podgruppa = studyPlan[y].typeSubject\n obj.save()\n\n studyPlan[y].remaningLectures -= 1\n studyPlan[y].save()\n hours -= 1\n continue\n\n def lastIterate(Table,studyPlan):\n Table = Table.filter(Prepod__exact='')\n teacherId = studyPlan[0].teacher\n for x in Table:\n x.Prepod = teacherId.last_name + \" \" + teacherId.first_name\n lections = studyPlan.filter(typeSubject=\"Лекция\")[0].remaningLectures\n practice = studyPlan.filter(typeSubject=\"Практика\")[0].remaningLectures\n for type in studyPlan:\n if type.typeSubject == \"Лекция\" and type.remaningLectures > 0:\n clases = CafedraClasses.objects.filter(AllowedLections=\"True\")\n x.Predmet = type.subject\n x.Auditoriya = clases[0].ClassName\n x.Podgruppa = type.typeSubject\n x.save()\n type.remaningLectures -= 1\n type.save()\n continue\n if type.typeSubject == \"Практика\" and type.remaningLectures > 0 and lections == 0:\n clases = CafedraClasses.objects.filter(AllowedPractice=\"True\")\n x.Predmet = type.subject\n x.Auditoriya = clases[0].ClassName\n x.Podgruppa = type.typeSubject\n x.save()\n type.remaningLectures -= 1\n type.save()\n\n continue\n if type.typeSubject == \"ЛабРабота\" and type.remaningLectures > 0 and lections == 0 and practice == 0:\n clases = CafedraClasses.objects.filter(AllowedLabs=\"True\")\n x.Predmet = type.subject\n x.Auditoriya = clases[0].ClassName\n x.Podgruppa = type.typeSubject\n x.save()\n type.remaningLectures -= 1\n type.save()\n\n continue\n","repo_name":"NavigatorZero/DiplomTimetable","sub_path":"Timetable/MainApp/Rasp.py","file_name":"Rasp.py","file_ext":"py","file_size_in_byte":5943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"26162330849","text":"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n\nimport sys\nimport socket\n\nCOMMON_DOMAINS = [\n 'api',\n 'callbacks',\n 'apt',\n 'rpm',\n 'repo',\n 'www',\n 'www1',\n 'www2',\n 'www3',\n 'www4',\n 'www5',\n 'www6',\n 'www7',\n 'www8',\n 'www9',\n 'ws',\n 'beta',\n 'web',\n 'ftp',\n 'irc',\n 'pptp',\n 'old',\n 'deprecated',\n 'new',\n 'dns',\n 'dev',\n 'test',\n 'test2',\n 'stage',\n 'staging',\n 'admin',\n 'auth',\n 'database',\n 'db',\n 'master',\n 'slave',\n 'mysql',\n 'mariadb',\n 'mongodb',\n 'postgres',\n 'webmail',\n 'pop',\n 'smtp',\n 'mail',\n 'intranet',\n 'vpn',\n 'vps',\n 'jump',\n 'jumpbox',\n 'tunnel',\n 'demo',\n 'temp',\n 'backend',\n 'cms',\n 'crm',\n 'support',\n 'blog',\n 'help',\n 'stats',\n 'statistics',\n 'health',\n 'status',\n 'owncloud',\n 'nextcloud',\n 'redmine',\n 'source',\n 'git',\n 'gitlab',\n 'svn',\n 'hg',\n 'mercurial',\n 'bitbucket',\n 'wiki',\n 'mediawiki',\n 'office',\n 'backup',\n 'virtual',\n 'docker',\n 'k8s',\n 'kubernetes',\n 'visma',\n 'logs',\n 'media',\n 'static',\n 'images',\n 'img',\n 'imgs',\n 'assets',\n 'cache',\n 'episerver',\n 'epi',\n 'monitor',\n 'monitoring',\n 'sip',\n 'a','b','c','d','e','f','g','h','i','j',\n 'k','l','m','n','o','p','q','r','s','t',\n 'u','v','w','x','y','z',\n 'www.m',\n 'shop',\n \"cart\",\n 'store',\n 'buy',\n 'app',\n 'cpanel',\n 'home',\n 'forum',\n 'cdn',\n 'secure',\n 'whm',\n 'files',\n 'filetransfer',\n 'portal',\n 'member',\n 'members',\n 'community',\n 'subscriber',\n 'subscribers',\n 'payment',\n 'payments',\n 'mobile',\n 'phpmyadmin',\n 'cloud',\n 'fake',\n 'login',\n 'account',\n 'accounts',\n 'people',\n 'live',\n 'apps',\n 'jira',\n 'internal',\n 'secret',\n 'feed',\n 'go',\n 'redirect',\n 'partners',\n 'labs',\n 'en',\n 'us',\n 'fr',\n 'se',\n 'uk',\n 'fi',\n 'dk',\n 'de',\n 'no',\n 'au',\n 'be',\n 'ru',\n 'ch',\n 'it',\n 'nl',\n 'com',\n 'next',\n 'one',\n 'two',\n 'three',\n 'four',\n 'five',\n 'six',\n 'seven',\n 'eight',\n 'nine',\n 'ten',\n 'open',\n 'info',\n 'developer',\n 'enterprise',\n 'console',\n 'try',\n 'discuss',\n 'docs',\n 'newsletter',\n 'premium',\n 'feedback',\n 'updates',\n 'update',\n 'download',\n 'downloads',\n 'projects',\n 'project',\n 'survey',\n 'my',\n 'server',\n 'controller',\n 'stream',\n 'id',\n 'marketing',\n 'tracking',\n 'campaign',\n 'investors',\n 'investor',\n 'signup',\n 'transition',\n 'link',\n 'links',\n 'group',\n 'ups',\n 'dhl',\n 'fedex',\n 'distribution',\n 'observer',\n 'shopify',\n 'wordpress',\n 'aws',\n 'heroku',\n 'google',\n 'squarespace',\n 'resources',\n 'preview',\n]\n\ndef getIP(domain):\n try:\n data = socket.gethostbyname_ex(domain)\n ipx = data[2]\n return ipx\n except:\n return []\n\ndef main():\n\n result = []\n\n if len(sys.argv) != 2:\n print(\"Requiers a domain as argument\")\n return 0;\n\n for i in COMMON_DOMAINS:\n domain = i+\".\"+sys.argv[1]\n ips = getIP(domain)\n if len(ips) != 0:\n sys.stdout.write(\"%s:\\n\" % domain)\n for ip in ips:\n sys.stdout.write(\"\\t\\t\\t%s\\n\" % ip)\n sys.stdout.write('\\n\\n')\n sys.stdout.flush()\n\n\n\nif __name__ == '__main__':\n sys.exit(main())\n\n\n","repo_name":"samiberndtson/dns.py","sub_path":"dns.py","file_name":"dns.py","file_ext":"py","file_size_in_byte":3602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"907354614","text":"import json\nimport fasttext\nimport pandas as pd\n\n# import os\n# os.chdir(\"/Users/a.orzikulov/Desktop/GitHub/Integro\") ################\n\nimport util ################\n\n\n# util.main() ################\n# file_path = \"output-25-8-2021.json\" ################\n\n\ndef main(file_path):\n with open(file_path, \"r\", encoding=\"UTF-8\") as file: ################\n data = json.load(file)\n\n LANGUAGE_MODEL_PATH = '../lid.176.bin'\n model = fasttext.load_model(LANGUAGE_MODEL_PATH)\n russian_texts = []\n uzbek_texts = []\n for channel, posts in data.items():\n for post_id, (date, views, post_text) in posts.items(): ## data[\"https://t.me/Buka_tumani\"].values()\n if not any([post_text == \"None\", post_text == \"\", post_text is None]):\n post_text = util.multiple_replace(util.REPLACEMENTS, post_text)\n prediction, score = model.predict(post_text)\n if prediction[0] == \"__label__ru\" and score[0] >= 0.8:\n russian_texts.append([channel, post_id, date, views, post_text])\n else:\n fixed_message = \"\"\n for idx, character in enumerate(post_text):\n if character == \"Е\" or character == \"е\":\n fixed_message += util.change_e(\n idx, post_text, util.TRANSLATOR, util.STOP_SYMBOLS, util.VOWELS)\n elif character == \"Ц\" or character == \"ц\":\n fixed_message += util.change_ts(\n idx, post_text, util.TRANSLATOR, util.STOP_SYMBOLS, util.VOWELS)\n elif character in util.TRANSLATOR:\n fixed_message += util.TRANSLATOR[character]\n else:\n fixed_message += character\n\n uzbek_texts.append([channel, post_id, date, views, fixed_message])\n\n util.log('info', f\"The number of Uzbek posts is {len(uzbek_texts)}\")\n util.log('info', f\"The number of Russian posts is {len(russian_texts)}\")\n header = [\"channel\", \"post_id\", \"date\", \"views\", \"post\"] # \"label\"\n if uzbek_texts:\n data_frame = pd.DataFrame(uzbek_texts)\n data_frame.to_excel('uzbek.xlsx', header=header, index=False)\n if russian_texts:\n data_frame = pd.DataFrame(russian_texts)\n data_frame.to_excel('russian.xlsx', header=header, index=False)\n\n util.log('success', \"Two Excel files have been created.\")\n\n# import numpy as np\n\n# with open(\"../uzbek_texts.txt\", \"w\", encoding=\"UTF-8\") as file:\n# for text in uzbek_texts:\n# file.writelines(text + \"\\n\")\n\n# with open(\"../russian_texts.txt\", \"w\", encoding=\"UTF-8\") as file:\n# for text in russian_texts:\n# file.writelines(text + \"\\n\")\n\n# russian_texts = []\n# uzbek_texts = []\n# for posts in data.values():\n# for post_text in posts.values(): ## data[\"https://t.me/Buka_tumani\"].values()\n# if not any([post_text == \"None\", post_text == \"\", post_text is None]):\n# if np.random.random() <= 0.03693444136657433:\n# post_text = util.multiple_replace(REPLACEMENTS, post_text)\n# prediction, score = model.predict(post_text)\n# if prediction[0] == \"__label__ru\" and score[0] >= 0.8:\n# russian_texts.append(post_text)\n# else:\n# uzbek_texts.append(post_text)\n\n# print(len(uzbek_texts))\n# print(len(russian_texts))\n","repo_name":"Asrorbek-Orzikulov/sentiment_analysis","sub_path":"action/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":3506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"12343359757","text":"#!/usr/bin/env python3\nimport jsonschema\nimport sys\nimport yaml\n\nif len(sys.argv) < 2:\n sys.exit(\"Usage: {} MANUAL_FILE ...\".format(sys.argv[0]))\n\nwith open(\"manual_schema.yml\", \"r\") as schema_file:\n manual_schema = yaml.safe_load(schema_file)\n\nfor path in sys.argv[1:]:\n with open(path, \"r\") as manual_file:\n manual_data = yaml.safe_load(manual_file)\n try:\n jsonschema.validate(instance=manual_data, schema=manual_schema)\n except jsonschema.exceptions.ValidationError as e:\n print(\"Failed to validate:\", path, file=sys.stderr)\n sys.exit(e)\n","repo_name":"jqlang/jq","sub_path":"docs/validate_manual_schema.py","file_name":"validate_manual_schema.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":27144,"dataset":"github-code","pt":"60"} +{"seq_id":"22611509401","text":"from __future__ import print_function\nfrom django.template import Context, loader, RequestContext\nfrom app.models import Contacts\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import authenticate, login, logout\n\n#ModelForms\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django import forms\nfrom django.utils import timezone\nfrom django.views.decorators import csrf\nfrom django.urls import reverse , resolve\nfrom django.core.mail import send_mail\n\nfrom .forms import ContactForm, LookupForm, CancelForm\n\n#Recycling logic\nfrom .recycle import parse_address, confirm_subscription, cancel_subscription, get_initial_message, insert_contact, get_zones, insert_initial_message, select_initial_message\n\n#Hash value for obfuscating primary key. stackoverflow.com/questions/10559935/django-how-do-i-hash-a-url-from-the-database-objects-primary-key\nfrom mysite.passwords import OBFUSCATE\n\n#Logo image\nlogo_image=\"recyclobuddy_logo.jpg\"\n\n# Create your views here.\n\n#See pydanny.com/core-concepts-django-modelforms.html, but note several errors in the example code.\ndef index(request):\n message = \"\"\n if request.method == \"POST\":\n form = LookupForm(request.POST)\n\n if form.is_valid():\n\n #Capture fields from the form\n municipality=form.cleaned_data['municipality']\n address=form.cleaned_data['address']\n zip=form.cleaned_data['zip']\n\n #Parse address to put into standard form. Check for error\n error_code, parsed_address = parse_address(address, municipality)\n\n\n if error_code == 1:\n #If error_code==1, failed to find street identifier\n message = \"Didn't work. Please check municipality and omit apartment or suite from address.\"\n subscribe_URL=\"\"\n\n else:\n #Looks good, go ahead with the process\n\n #Look up zone information and return a zone dictionary giving zone and day for recycling, trash and yard waste\n try: \n zone_dict = get_zones(municipality, parsed_address, zip)\n server_failed=False\n except Exception:\n zone_dict = False\n server_failed=True\n raise\n\n if zone_dict:\n #Do lookup from schedules table and get message\n messages=get_initial_message(municipality, zone_dict)\n\n #Copy result into contacts table\n primary_key=insert_contact(municipality, parsed_address, zip, zone_dict)\n\n #Copy message into initial_messages table\n insert_initial_message(primary_key, messages)\n\n #Obfuscate the primary key\n masked_key = primary_key ^ OBFUSCATE\n\n #Create URL for subscription with primary key\n subscribe_URL=\"subscribe_\" + str(masked_key)\n\n #Redirect to subscription page\n return HttpResponseRedirect(subscribe_URL)\n\n else:\n #Failed. Could be the address wasn't good, (server_failed is False), or that server can't be reached. \n if server_failed==False:\n message = \"Didn't work. City couldn't locate that address. Missing N-S-E-W? \"\n else:\n message = \"Can't reach city server. Could be down or heavily loaded.\"\n \n subscribe_URL=\"\"\n form=LookupForm(request.POST)\n\n #Failed to get address in usable form or failed in zone look up\n\n c = {\n 'app_template': 'app/basic_template.html',\n 'logo_image' : logo_image,\n 'message': message,\n 'subscribe_URL': subscribe_URL,\n 'form' : form\n }\n\n return render (request, \"app/index.html\", c )\n\n else:\n c = {\n 'app_template': 'app/basic_template.html',\n 'logo_image' : logo_image,\n 'message': 'Hit a snag. Omit any apt or suite info, use 5-digit zip',\n 'form' : form\n }\n\n return render (request, \"app/index.html\", c ) \n else:\n form = LookupForm(initial={'municipality': 'LOWER_MERION'})\n\n c = {\n 'app_template': 'app/basic_template.html',\n 'logo_image' : logo_image,\n 'form': form,\n }\n\n return render (request, \"app/index.html\", c ) \n\n#@login_required\ndef subscribe(request, masked_key):\n #undo obfuscation\n primary_key = int(masked_key) ^ OBFUSCATE\n\n cat = get_object_or_404(Contacts, index_key=primary_key)\n\n #If this request has already been submitted, show the acknowledge page to avoid exposing private data.\n if cat.request==True:\n c = {\n 'app_template': 'app/basic_template.html',\n 'logo_image' : logo_image,\n }\n #Send URL acknowledging request\n return render (request, \"app/acknowledge.html\", c )\n\n if request.method == \"POST\":\n #Using the instance here allows an update. docs.djangoproject.com/en/1.1/topics/forms/modelforms/#the-dave-method\n form = ContactForm(request.POST, instance=cat)\n\n if form.is_valid():\n model_instance = form.save()\n primary_key = model_instance.pk\n\n #Create message for confirmation\n confirmation_message=confirm_subscription(\n masked_key, \n model_instance.first_name,\n model_instance.last_name,\n model_instance.alert_day,\n model_instance.alert_time,\n model_instance.email_alert,\n model_instance.sms_alert,\n )\n \n #Update Contacts to reflect confirmation request\n c=Contacts.objects.get(pk=primary_key)\n c.request=True\n c.save()\n\n #Send mail message add try catch ???\n try:\n send_mail('Confirmation request', confirmation_message, 'recyclobuddy@recyclobuddy.com', [model_instance.email], fail_silently=False)\n except Exception:\n print (\"Failed to send confirmation email\\n\")\n\n\n c = {\n 'app_template': 'app/basic_template.html',\n 'logo_image' : logo_image,\n }\n #Send URL acknowledging request\n return render (request, \"app/acknowledge.html\", c )\n\n else:\n \n form=ContactForm(instance = cat)\n \n #You are here either because ir's presenting the form before data is added, or the data isn't valid.\n \n #Get message information\n messages=select_initial_message(primary_key)\n\n\n c = {\n 'app_template': 'app/basic_template.html',\n 'logo_image' : logo_image,\n 'message_1': messages[0],\n 'message_2': messages[1],\n 'message_3': messages[2],\n 'form': form,\n }\n\n return render(request, \"app/subscription.html\", c ) \n\n#@login_required\ndef confirm(request, masked_key):\n #undo obfuscation\n primary_key = int(masked_key) ^ OBFUSCATE\n\n #Check if there is a valid object for this primary key\n cat = get_object_or_404(Contacts, index_key=primary_key)\n\n #Do validation: Does object exist and is request outstanding?\n c=Contacts.objects.get(pk=primary_key)\n if c and c.request==True:\n valid=True\n else:\n valid=False\n\n #If validation passed, then send confirmation message\n if valid == True:\n #Update Contacts to reflect subscription\n c.subscribe=True\n c.save()\n\n\n #Send okay message\n\n c = {\n 'app_template': 'app/basic_template.html',\n 'logo_image' : logo_image,\n }\n return render(request, \"app/confirm.html\", c )\n\n else:\n #Failed, so send back to beginning.\n form=LookupForm()\n\n c = {\n 'app_template': 'app/basic_template.html',\n 'logo_image' : logo_image,\n 'form': form,\n }\n\n return render(request, \"app/try_again.html\", c ) \n\n#@login_required\ndef cancel(request):\n if request.method == \"POST\":\n form = CancelForm(request.POST)\n if form.is_valid():\n\n #Capture fields from the form\n email=form.cleaned_data['email']\n mobile=form.cleaned_data['mobile']\n\n #Check if email and mobile combination in database\n success=cancel_subscription(email, mobile)\n\n if success==True:\n #Cancellation worked.\n c = {\n 'app_template': 'app/basic_template.html',\n 'logo_image' : logo_image,\n }\n return render(request, \"app/gone.html\", c )\n\n else:\n #Cancellation failed\n form = CancelForm()\n message = \"We couldn't find that combination of email address and mobile number. Please try again.\"\n\n c = {\n 'app_template': 'app/basic_template.html',\n 'logo_image' : logo_image,\n 'message': message,\n 'form': form,\n }\n\n return render(request, \"app/cancel.html\", c )\n\n else:\n form = CancelForm()\n message = \"Sorry to see you go! Please enter email and mobile number to discontinue alerts.\"\n \n c = {\n 'app_template': 'app/basic_template.html',\n 'logo_image' : logo_image,\n 'message': message,\n 'form': form,\n }\n\n return render(request, \"app/cancel.html\", c )\n\ndef about(request):\n c = {\n 'app_template': 'app/basic_template.html',\n 'logo_image' : logo_image,\n }\n\n return render(request, \"app/about.html\", c )\n\ndef faq(request):\n c = {\n 'app_template': 'app/basic_template.html',\n 'logo_image' : logo_image,\n }\n\n return render(request, \"app/faq.html\", c )\n\ndef terms(request):\n c = {\n 'app_template': 'app/basic_template.html',\n 'logo_image' : logo_image,\n }\n\n return render(request, \"app/terms.html\", c )\n\ndef trash_talk(request):\n c = {\n 'app_template': 'app/basic_template.html',\n 'logo_image' : logo_image,\n }\n\n return render(request, \"app/trash-talk.html\", c )\n\ndef share(request):\n c = {\n 'app_template': 'app/basic_template.html',\n 'logo_image' : logo_image,\n }\n\n return render(request, \"app/share.html\", c )\n\n#@login_required\ndef test(request):\n c = {\n 'app_template': 'app/basic_template.html',\n 'logo_image' : logo_image,\n }\n\n return render(request, \"app/repairs.html\", c )\n\n#@login_required\ndef root_index(request):\n return HttpResponseRedirect('./app/')\n\n@login_required\ndef success(request):\n return HttpResponseRedirect('../app/index')\n\n#@login_required\ndef logout_view(request):\n logout(request)\n return HttpResponseRedirect('../')\n","repo_name":"hhummel/recyclobuddy","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"18233679661","text":"import sys\nfrom os.path import splitext\nfrom PIL import Image,ImageOps\n\nif len(sys.argv) == 3:\n extensions = [\".jpg\", \".jpeg\", \".png\"]\n extension1 = splitext(sys.argv[1])\n extension2 = splitext(sys.argv[2])\n if extension1[1] == extension2[1] and extension1[1] in extensions:\n try:\n BeforeImage = Image.open(sys.argv[1])\n except FileNotFoundError:\n sys.exit(\"File not found\")\n #Image.open\n shirt = Image.open(\"shirt.png\")\n size = shirt.size\n #ImageOps.fit\n muppet = ImageOps.fit(BeforeImage,size)\n #Image.paste\n muppet.paste(shirt, shirt)\n #Image.save\n muppet.save(sys.argv[2])\n\n elif extension1[1].lower() != extension2[1].lower():\n sys.exit(\"Input and output have different extensions\")\n else:\n sys.exit(\"Wrong Extension\")\n\nelif len(sys.argv) < 3:\n sys.exit(\"Too few command-line arguments\")\nelif len(sys.argv) > 3:\n sys.exit(\"Too many command-line arguments\")\nelse:\n sys.exit(\"Invalid input\")","repo_name":"Sowmika-Pulagam/Harvard_CS50","sub_path":"shirt/shirt.py","file_name":"shirt.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"36775368149","text":"import mysql.connector\nfrom dotenv import dotenv_values\n\nsecret = dotenv_values('.env')\n\ndef connectToDB():\n dbconfig = {\n \"host\": \"127.0.0.1\",\n \"port\": 3306,\n \"user\": secret[\"mysql_user\"], \n \"password\": secret[\"mysql_pwd\"],\n \"database\": \"taipei_trip\",\n }\n try:\n connectionPool = mysql.connector.pooling.MySQLConnectionPool(pool_name=\"taipei_trip\", pool_size=5, **dbconfig)\n print(\"建立connectionPool成功\")\n return connectionPool\n except Exception as ex:\n print(\"建立connectionPool失敗...\\n錯誤訊息:\",ex)\n return False \n\nconnectionPool = connectToDB()\n\ndef connectDB():\n if(connectionPool):\n return connectionPool.get_connection()\n else:\n return False\n\ndef insert(execute_str:str, execute_args: tuple):\n connection = connectDB()\n if(not connection):\n return False\n \n cursor = connection.cursor() \n try:\n print(\"開始執行insertData\")\n cursor.execute(execute_str, execute_args)\n connection.commit()\n except Exception as ex:\n print(f\"insert error msg = ${ex}\")\n return False\n finally:\n cursor.close()\n connection.close()\n print(\"insert Data 成功!!\")\n return True\n\ndef find(query_str, query_args=None):\n connection = connectDB()\n if(not connection):\n return False\n cursor = connection.cursor(dictionary=True)\n try:\n cursor.execute(query_str, query_args)\n result = cursor.fetchall()\n except Exception as ex:\n print(f\"memberInfo查詢失敗,錯誤訊息為\\n${ex}\")\n return False\n finally:\n cursor.close()\n connection.close()\n print(f\"dbConnector result = ${result}\")\n if(result):\n return result[0]\n else:\n return False\n \ndef delete(execute_str: str, execute_args: tuple):\n connection = connectDB()\n if(not connection):\n return False\n cursor = connection.cursor()\n try:\n cursor.execute(execute_str, execute_args)\n connection.commit()\n print(\"刪除成功!\")\n except Exception as e:\n print(\"刪除失敗,錯誤訊息\\n\",e)\n return False\n finally:\n cursor.close()\n connection.close()\n return True\n \ndef update(execute_str: str, execute_args: tuple):\n connection = connectDB()\n if(not connection):\n return False\n cursor = connection.cursor()\n try:\n cursor.execute(execute_str, execute_args)\n except Exception as e:\n print(\"更新失敗,錯誤訊息\\n\",e)\n return False\n finally:\n change_row = cursor.rowcount\n if(change_row == 0):\n print(\"更新失敗,尚未找到符合條件的目標。\")\n return False\n connection.commit()\n print(\"更新成功!\")\n cursor.close()\n connection.close()\n return True\n \nif __name__ == '__main__':\n pass\n\n\n","repo_name":"joeyliao127/Taipei_day_trip","sub_path":"packages/dbConnector.py","file_name":"dbConnector.py","file_ext":"py","file_size_in_byte":2954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"40058425","text":"import collections\nfrom collections.abc import Iterable\n\nfrom django.utils.module_loading import import_string\n\nfrom .models import EventType, EventSideEffectLog\n\n\nclass EventTypeRegister(collections.UserDict):\n \"\"\"A dictionary mapping fully qualified event type values with the event type.\"\"\"\n\n def __init__(self, event_type_classes):\n super().__init__()\n for event_type_class in event_type_classes:\n event_type_enum = import_string(event_type_class)\n for event_type in event_type_enum:\n self.data[event_type.fully_qualified_value] = event_type\n\n\nRegisteredSideEffect = collections.namedtuple(\n \"RegisteredSideEffect\", field_names=(\"callable\", \"condition\")\n)\n\n\nclass EventHandlerRegister:\n \"\"\"Stores event handlers.\"\"\"\n\n def __init__(self):\n self.handlers = collections.defaultdict(lambda: [])\n self.side_effects = collections.defaultdict(lambda: [])\n\n def register(self, *, event_type):\n def decorator(f):\n if isinstance(event_type, EventType):\n self.handlers[event_type].append(f)\n elif isinstance(event_type, Iterable):\n for type in event_type:\n self.handlers[type].append(f)\n else:\n raise TypeError(f\"Unknown event type: {event_type}\")\n\n return f\n\n return decorator\n\n def register_side_effect(self, callable, *, condition=None):\n def decorator(f):\n self.side_effects[f].append(RegisteredSideEffect(callable, condition))\n return f\n\n return decorator\n\n def _run_event_function(self, log, function, *args, **kwargs):\n result = None\n try:\n result = function(*args, **kwargs)\n log.status = log.Status.SUCCESS\n log.message = str(result)\n except Exception as error:\n log.status = log.Status.FAILED\n log.message = repr(error)\n finally:\n log.save()\n\n return result\n\n def handle(self, event, skip_side_effects=False):\n for handler in self.handlers[event.type]:\n handler_log = event.handler_logs.create_from_function(function=handler)\n result = self._run_event_function(handler_log, handler, event)\n\n if skip_side_effects or handler_log.failed:\n continue\n\n for registered_side_effect in self.side_effects[handler]:\n condition_class = registered_side_effect.condition\n should_run = (\n condition_class().has_condition(event) if condition_class else True\n )\n status = (\n EventSideEffectLog.Status.PROCESSING\n if should_run\n else EventSideEffectLog.Status.SKIPPED\n )\n side_effect_log = handler_log.side_effect_logs.create_from_function(\n function=registered_side_effect.callable, status=status\n )\n\n if should_run:\n self._run_event_function(\n side_effect_log, registered_side_effect.callable, result\n )\n","repo_name":"vikashtank/django-event-sourcing","sub_path":"django_event_sourcing/registers.py","file_name":"registers.py","file_ext":"py","file_size_in_byte":3185,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"36603342642","text":"#!/usr/bin/env python3\n# pylint: disable=missing-docstring,import-error,invalid-name\n\nimport ldap\nimport ldap3\nimport ssl\n\n\ndef python_ldap():\n print(\"python-ldap\")\n l = ldap.initialize(\"ldaps://ldap.su.se\") # noqa: E741\n\n l.sasl_gssapi_bind_s()\n print(\"whoami: {}\".format(l.whoami_s()))\n\n result = l.search_s(base=\"\",\n scope=ldap.SCOPE_SUBTREE,\n filterstr=\"(uid=simlu)\",\n attrlist=[\"eduPersonAffiliation\", \"displayName\"])\n\n for _dn, entry in result:\n for attribute in entry:\n print(\"attribute: {} is: {!r}\".format(\n attribute,\n # We need to decode the bytes to UTF-8 apparently\n [a.decode(\"utf-8\") for a in entry[attribute]]))\n\n\ndef ldaptre():\n print(\"ldap3\")\n conn = ldap3.Connection(\n server=ldap3.Server('ldap.su.se',\n use_ssl=True,\n tls=ldap3.Tls(\n validate=ssl.CERT_REQUIRED,\n )\n ),\n auto_bind=True,\n authentication=ldap3.SASL,\n sasl_mechanism=ldap3.GSSAPI,\n )\n print(\"whoami: {}\".format(conn.extend.standard.who_am_i()))\n conn.search(search_base='',\n search_scope=ldap3.SUBTREE,\n search_filter='(uid=simlu)',\n attributes=['eduPersonAffiliation', 'displayName'])\n for entry in conn.entries:\n for attribute in entry.entry_attributes:\n print(\"attribute: {} is: {!r}\".format(attribute,\n entry[attribute].values))\n\n\ndef main():\n python_ldap()\n print(\"\")\n ldaptre()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"simmel/affilaffe","sub_path":"affilaffe/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"39845013158","text":"class Node:\r\n def __init__(self, symbol, frequency, left=None, right=None) -> None:\r\n self.symbol = symbol\r\n self.frequency = frequency\r\n self.left = left\r\n self.right = right\r\n self.code = \"\"\r\n\r\nclass Haff:\r\n def create_nodes(self, frequency: dict) -> list[Node]:\r\n nodes = []\r\n for symbol in frequency:\r\n nodes.append(Node(symbol, frequency[symbol]))\r\n return nodes\r\n\r\n def calculate_frequency(self, message: str) -> dict:\r\n frequency = {}\r\n for i in list(set(message)):\r\n frequency[i] = message.count(i)\r\n return frequency\r\n\r\n def calculate_code(self, node: Node, code=\"\", codes={}) -> str:\r\n\r\n code = code + str(node.code)\r\n\r\n if node.left:\r\n self.calculate_code(node.left, code, codes)\r\n if node.right:\r\n self.calculate_code(node.right, code, codes)\r\n\r\n if not node.left and not node.right:\r\n codes[node.symbol] = code\r\n\r\n return codes\r\n\r\n def replace_encoded(self, message: str, code_map: dict) -> dict:\r\n for i in code_map:\r\n message = message.replace(i, code_map[i])\r\n return message\r\n\r\n def decode(self, message: str, huffman_tree: Node):\r\n tree_head = huffman_tree\r\n decoded = \"\"\r\n\r\n for i in message:\r\n if i == '1':\r\n huffman_tree = huffman_tree.right \r\n if i == '0':\r\n huffman_tree = huffman_tree.left\r\n try:\r\n if huffman_tree.left.symbol == None and huffman_tree.right.symbol == None:\r\n pass\r\n except AttributeError:\r\n decoded += huffman_tree.symbol\r\n huffman_tree = tree_head\r\n \r\n return decoded\r\n\r\n def encode(self, message: str) -> str:\r\n nodes = self.create_nodes(self.calculate_frequency(message))\r\n\r\n while len(nodes) > 1:\r\n nodes = sorted(nodes, key=lambda x: x.frequency)\r\n\r\n right_node: Node = nodes[0]\r\n left_node: Node = nodes[1]\r\n\r\n right_node.code = 1\r\n left_node.code = 0\r\n\r\n new_node = Node(left_node.symbol + right_node.symbol, left_node.frequency + right_node.frequency, left_node, right_node)\r\n\r\n nodes.remove(left_node)\r\n nodes.remove(right_node)\r\n nodes.append(new_node)\r\n \r\n self.haffman_tree = nodes[0]\r\n\r\n return self.replace_encoded(message, self.calculate_code(self.haffman_tree))\r\n\r\n def get_haffman_tree(self) -> Node:\r\n return self.haffman_tree\r\n\r\nhaff = Haff()\r\n\r\nwith open(\"sixthMessage.txt\", \"r\", encoding=\"utf-8\") as file:\r\n variable = file.read()\r\n\r\nprint(haff.encode(variable))\r\nprint(haff.decode(haff.encode(variable), haff.get_haffman_tree()))","repo_name":"THATISMYSTUDYACCOUNTIAMNOTSTREWDRAGON/university","sub_path":"3_theoretical_lab/sixthTask.py","file_name":"sixthTask.py","file_ext":"py","file_size_in_byte":2831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"14377844506","text":"#!/usr/bin/python3\n\nfrom networkx import write_edgelist, write_weighted_edgelist\nimport os\nfrom utils import *\nfrom optparse import OptionParser\nfrom multiprocessing import Process\nfrom time import time\nfrom datetime import datetime\n\ndef main(n_start, n_count=1, n_inc=1, c_in_start=10, c_in_count=1, c_in_inc=1, c_out_start=5, c_out_count=1, c_out_inc=1, comm_count = 2, DC=False, i=0):\n bp_uncertain = 'src/bp'\n\n edge_frac = 1.\n nonedge_mult = 5.\n b = 2\n trials = 2\n\n os.makedirs('out', exist_ok=True)\n os.makedirs('data', exist_ok=True)\n\n for n in custom_range(n_start, n_count, n_inc):\n for c_in in custom_range(c_in_start, c_in_count, c_in_inc):\n for c_out in custom_range(c_out_start, c_out_count, c_out_inc):\n original_net = 'data/original_net-%d-%f-%f-%f-%f-%f-%d.edges'%(n,c_in,c_out,b,edge_frac,nonedge_mult, i)\n uncertain_net = 'data/noisy_net-%d-%f-%f-%f-%f-%f-%d.edges'%(n,c_in,c_out,b,edge_frac,nonedge_mult, i)\n uncertain_comms = 'out/uncertain_comms-%d-%f-%f-%f-%f-%f-%d.out'%(n,c_in,c_out,b,edge_frac,nonedge_mult, i)\n \n print(\"making and fuzzing network\")\n G_orig = make_net(c_in, c_out, n)\n write_edgelist(G_orig, original_net)\n G, _ = fuzz_network(G_orig, 1, b, edge_frac, nonedge_mult)\n write_weighted_edgelist(G, uncertain_net)\n \n start1 = time()\n print(\"running belief propagation\")\n os.system('%s -i %s -o %s -c %d -l %d -n %d' % (bp_uncertain, uncertain_net, uncertain_comms, comm_count, 3, trials))\n end1 = time()\n\n with open('out/results.txt', 'a+') as out_file:\n out_file.write(\"%d %f %f\\t%f %f %f\\t %f %f\\t %s %d\\n\" %(n,\n c_in, c_out,\n b,edge_frac,nonedge_mult,\n evaluate(uncertain_comms, n), end1-start1,\n str(datetime.now()), i))\n\n\nif __name__ == '__main__':\n # parse command line options\n parser = OptionParser()\n parser.add_option('-n', type=int, dest = 'n', help='number of nodes in network', default=2000)\n parser.add_option('--c_in', type=float, dest = 'c_in', help='average within-community degree', default=40)\n parser.add_option('--c_out', type=float, dest = 'c_out', help='average between-community degree', default=10)\n parser.add_option('-k', type=int, dest = 'comm_count', help='number of communities', default=2)\n parser.add_option('--iters', type=int, help='number of instances to run, multithreaded', default = 1)\n\n (options, _) = parser.parse_args()\n\n ps = []\n # run multiple threads\n for i in range(options.iters):\n p = Process(target=main, args = (options.n, 1, 1, options.c_in, 1, 1, options.c_out, 1, 1, options.comm_count, False, i))\n ps.append(p)\n p.start()\n for p in ps:\n p.join()\n","repo_name":"nitramsivart/uncertain-networks","sub_path":"run_synthetic.py","file_name":"run_synthetic.py","file_ext":"py","file_size_in_byte":2995,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"60"} +{"seq_id":"1224960390","text":"import sys\r\n\r\nsys.setrecursionlimit(10**6)\r\ntable = dict()\r\n\r\n\r\ndef choose(n, r, mod=None): # no mod, or mod ≠ prime\r\n if r > n or r < 0:\r\n return 0\r\n if r == 0:\r\n return 1\r\n if (n, r) in table:\r\n return table[(n, r)]\r\n table[(n, r)] = choose(n - 1, r) + choose(n - 1, r - 1)\r\n return table[(n, r)]\r\n\r\n\r\nn, *a = map(int, sys.stdin.read().split())\r\na += [0]\r\n\r\n\r\ndef main():\r\n prev = 1001001001\r\n cnt = 0\r\n res = 0\r\n for x in a:\r\n if x <= prev:\r\n res += choose(cnt, 2) + cnt\r\n cnt = 1\r\n else:\r\n cnt += 1\r\n prev = x\r\n print(res)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"kagemeka/atcoder-submissions","sub_path":"jp.atcoder/abc038/abc038_c/11764080.py","file_name":"11764080.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"8541421352","text":"# -*- coding: utf-8 -*-\n\n# This sample demonstrates handling intents from an Alexa skill using the Alexa Skills Kit SDK for Python.\n# Please visit https://alexa.design/cookbook for additional examples on implementing slots, dialog management,\n# session persistence, api calls, and more.\n# This sample is built using the handler classes approach in skill builder.\nimport logging\nimport ask_sdk_core.utils as ask_utils\n\nfrom ask_sdk_core.skill_builder import SkillBuilder\nfrom ask_sdk_core.dispatch_components import AbstractRequestHandler\nfrom ask_sdk_core.dispatch_components import AbstractExceptionHandler\nfrom ask_sdk_core.handler_input import HandlerInput\nfrom ask_sdk_model.intent import Intent\nfrom ask_sdk_model.dialog import (ElicitSlotDirective, DelegateDirective)\nfrom ask_sdk_model.dialog_state import DialogState\nfrom ask_sdk_model.slu.entityresolution.status_code import StatusCode\n\nfrom ask_sdk_model import Response\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n### api ###\nimport tmdbv3api\nfrom tmdbv3api import TMDb\n\nfrom tmdbv3api import Movie\nfrom tmdbv3api import Discover\nfrom tmdbv3api import Person\nfrom tmdbv3api import Search\nfrom tmdbv3api import Genre\n\ntmdb = TMDb()\ntmdb.api_key = '93a8c2d922b414294a124ef8dc9c2428'\n\n### firebase ###\nimport firebase_admin \nfrom firebase_admin import credentials, firestore\n\ncred = credentials.Certificate(\"firebase.json\")\nfirebase_admin.initialize_app(cred)\n\nfirestore_db = firestore.client()\n\n### canonical slot value constants ###\nFEEDBACK_POSITIVE = 'good'\nFEEDBACK_NEGATIVE = 'bad'\n\nRECOMMENDATION_ACCEPTED = \"Okay\"\nRECOMMENDATION_REJECTED = \"Something else\"\n\n### response templates ###\nSENTENCE_FEEDBACK_BEFORE_RECOMMENDATION = \"I'm happy to take a new recommendation request, but please give me feedback on my last recommendation first, which was the film {}. \"\nSENTENCE_NO_RECOMMENDATION_FOUND = \"I'm sorry. I was not able to find a {} recommendation available on your platforms based on your input. \"\nSENTENCE_MOVIE_NOT_FOUND = \"Hmm, I don't know the film {}. \"\nSENTENCE_ACTOR_NOT_FOUND = \"Sorry, I don't know the actor or actress {}. \"\nSENTENCE_GENRE_NOT_FOUND = \"Hmm, I don't recognize {} as a genre. \"\nSENTENCE_DID_NOT_UNDERSTAND = \"Sorry, I didn't get that. \"\nSENTENCE_RECOMMENDATION_ACCEPTED = \"Great! Enjoy the film! \"\nSENTENCE_ANOTHER_RECOMMENDATION = \"Okay. \"\nSENTENCE_CANCELED = \"Okay, maybe another time. \"\nSENTENCE_FEEDBACK_ACCEPTED = \"Alright, I will try to work your feedback into my next recommendations. \"\nSENTENCE_BACK_TO_RECOMMENDATION = \"Back to your recommendation request... \"\nSENTENCE_FEEDBACK_WAS_POSITIVE = \"Great to hear! \"\nSENTENCE_FEEDBACK_WAS_NEGATIVE = \"I'm sorry to hear that. \"\n\nPROMPT_TRY_AGAIN = \"Why don't you try again with a different request? If you need help, just say: Help. \"\nPROMPT_MOVIE_NOT_FOUND = \"Please tell me the movie's official English title. \"\nPROMPT_ACTOR_NOT_FOUND = \"Please use their commonly known name. \"\nPROMPT_GENRE_NOT_FOUND = \"Try a more common synonym or a more widely known genre. \"\nPROMPT_RECOMMENDATION_CONFIRMATION = \"You can say Okay to accept this recommendation or request another one by saying: Another one. To cancel, just say Stop. \"\nPROMPT_ANOTHER_RECOMMENDATION = \"How about this one? \"\nPROMPT_FEEDBACK_ASPECTS = \"What specifically {} you like about {}? That could be the acting, the story, or the setting of the movie. \"\n\n### other templates ###\nDELDIR_FEEDBACK = DelegateDirective(Intent(\n name=\"feedbackIntent\",\n slots={\n \"feedbackGeneral\": {\n \"name\": \"feedbackGeneral\"\n },\n \"feedbackAspects\": {\n \"name\": \"feedbackAspects\"\n }\n }\n))\n\ndef makeESDir(slotname, intent=None):\n if intent:\n return (ElicitSlotDirective(\n slot_to_elicit = slotname,\n updated_intent = intent\n ))\n else:\n return (ElicitSlotDirective(\n slot_to_elicit = slotname\n ))\n\n\n### own functions ###\nfrom sentimentAnalysis import *\nfrom recommender_functions import *\nfrom user_firebase_functions import *\nfrom movie_api_functions import *\n\nclass LaunchRequestHandler(AbstractRequestHandler):\n \"\"\"Handler for Skill Launch.\"\"\"\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return ask_utils.is_request_type(\"LaunchRequest\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n\n responseBuilder = handler_input.response_builder\n userID = str(handler_input.request_envelope.context.system.user.user_id)\n speak_output = \"Welcome to Movie Tips! \"\n \n if check_user_exists_by_id(userID):\n speak_output = \"Welcome back {}! \".format(get_username_by_id(userID))\n if is_last_watched_movie_rated_by_id(userID):\n speak_output += \"Do you want me to recommend a movie? If so, just say: Recommend something.\"\n responseBuilder.speak(speak_output).ask(speak_output)\n else:\n speak_output += \"I see you haven't rated my last recommendation yet, which was the film {}. Let's do that now! \".format(get_last_watched_movie_by_id(userID))\n responseBuilder.speak(speak_output).add_directive(DELDIR_FEEDBACK)\n else:\n speak_output += \"I see you haven't set up a profile yet. Let's do that now! \"\n responseBuilder.speak(speak_output).add_directive(DelegateDirective(Intent(\n name=\"setupIntent\" #weirdly enough, we don't have to pass the slots here and it works anyway... as opposed to the feedbackIntent which has Alexa completely checking out after the first prompt unless the slots are given. What even is this madness\n )))\n \n return responseBuilder.response\n\n\n\nclass RecommendationByMovieIntentHandler(AbstractRequestHandler):\n \"\"\"Handler for Recommendation by Movie Intent.\"\"\"\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return ask_utils.is_intent_name(\"recommendationIntent_byMovie\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n request = handler_input.request_envelope.request\n intent = request.intent\n responseBuilder = handler_input.response_builder\n sessionAttribs = handler_input.attributes_manager.session_attributes\n \n userID = handler_input.request_envelope.context.system.user.user_id\n slot_movie = intent.slots[\"movieTitle\"]\n slot_confirmation = intent.slots[\"recommendationOK\"]\n \n if request.dialog_state == DialogState.STARTED or slot_movie.value is None:\n if is_last_watched_movie_rated_by_id(userID):\n \n #resolve \"my favorite movie\" if necessary\n resolution = slot_movie.resolutions.resolutions_per_authority[0]\n if resolution.status.code == StatusCode.ER_SUCCESS_MATCH:\n resolvedSlotID = resolution.values[0].value.id\n if resolvedSlotID == \"FAV\":\n slot_movie.value = get_favourite_movie_by_id(userID)\n \n movieID = getMovieID(slot_movie.value)\n movieName = getMovieName(movieID)\n \n if movieID == 0:\n speak_output = SENTENCE_MOVIE_NOT_FOUND.format(slot_movie.value)\n prompt_output = PROMPT_MOVIE_NOT_FOUND\n responseBuilder.speak(speak_output+prompt_output).ask(prompt_output).add_directive(makeESDir(\"movieTitle\"))\n else:\n slot_movie.value = movieName\n \n rec = recommendationSentenceFromMovieInput(userID, movieName)\n if rec['success']:\n speak_output = rec['sentence']\n responseBuilder.speak(speak_output).add_directive(DelegateDirective(intent))\n else:\n speak_output = SENTENCE_NO_RECOMMENDATION_FOUND.format(\"new\")\n prompt_output = PROMPT_TRY_AGAIN\n responseBuilder.speak(speak_output+prompt_output).ask(prompt_output)\n \n else:\n speak_output = SENTENCE_FEEDBACK_BEFORE_RECOMMENDATION.format(get_last_watched_movie_by_id(userID))\n responseBuilder.speak(speak_output).add_directive(DELDIR_FEEDBACK)\n \n \n elif request.dialog_state == DialogState.IN_PROGRESS: #confirmation given for recommendation\n resolution = slot_confirmation.resolutions.resolutions_per_authority[0]\n \n if resolution.status.code == StatusCode.ER_SUCCESS_MATCH:\n resolvedSlotName = resolution.values[0].value.name\n inputVal = resolvedSlotName\n slot_confirmation.value = inputVal\n if inputVal == RECOMMENDATION_ACCEPTED:\n responseBuilder.add_directive(DelegateDirective(intent))\n elif inputVal == RECOMMENDATION_REJECTED:\n rec = recommendationSentenceFromMovieInput(userID, slot_movie.value)\n if rec['success']:\n speak_output = SENTENCE_ANOTHER_RECOMMENDATION\n prompt_output = PROMPT_ANOTHER_RECOMMENDATION+rec['sentence']+\" \"+PROMPT_RECOMMENDATION_CONFIRMATION\n responseBuilder.speak(speak_output+prompt_output).ask(prompt_output).add_directive(makeESDir(\"recommendationOK\"))\n else:\n speak_output = SENTENCE_NO_RECOMMENDATION_FOUND.format(\"new\")\n prompt_output = PROMPT_TRY_AGAIN\n responseBuilder.speak(speak_output+prompt_output).ask(prompt_output)\n \n else: #We're told to cancel/stop\n speak_output = SENTENCE_CANCELED\n responseBuilder.speak(speak_output).add_directive(DelegateDirective(Intent(\n name=\"AMAZON.CancelIntent\"\n )))\n else:\n speak_output = SENTENCE_DID_NOT_UNDERSTAND\n prompt_output = PROMPT_RECOMMENDATION_CONFIRMATION\n responseBuilder.speak(speak_output+prompt_output).ask(prompt_output).add_directive(makeESDir(\"recommendationOK\"))\n \n else: #request.dialog_state == DialogState.COMPLETED; we have a green light on the recommendation at this point\n acceptRecommendation(userID)\n speak_output = SENTENCE_RECOMMENDATION_ACCEPTED\n responseBuilder.speak(speak_output)\n \n return responseBuilder.response\n\n\nclass RecommendationByActorIntentHandler(AbstractRequestHandler):\n \"\"\"Handler for Recommendation by Actor Intent.\"\"\"\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return ask_utils.is_intent_name(\"recommendationIntent_byActor\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n request = handler_input.request_envelope.request\n intent = request.intent\n responseBuilder = handler_input.response_builder\n sessionAttribs = handler_input.attributes_manager.session_attributes\n \n userID = handler_input.request_envelope.context.system.user.user_id\n slot_actor = intent.slots[\"actor\"]\n slot_confirmation = intent.slots[\"recommendationOK\"]\n \n if request.dialog_state == DialogState.STARTED or slot_actor.value is None:\n if is_last_watched_movie_rated_by_id(userID):\n \n #resolve \"my favorite actor\" if necessary\n resolution = slot_actor.resolutions.resolutions_per_authority[0]\n if resolution.status.code == StatusCode.ER_SUCCESS_MATCH:\n resolvedSlotID = resolution.values[0].value.id\n if resolvedSlotID == \"FAV\":\n slot_actor.value = get_favourite_actress_by_id(userID)\n \n actorID = getActressId(slot_actor.value)\n actorName = getActressName(actorID)\n \n if actorID == 0:\n speak_output = SENTENCE_ACTOR_NOT_FOUND.format(slot_actor.value)\n prompt_output = PROMPT_ACTOR_NOT_FOUND\n responseBuilder.speak(speak_output+prompt_output).ask(prompt_output).add_directive(makeESDir(\"actor\"))\n else:\n slot_actor.value = actorName\n\n rec = recommendationSentenceFromActressInput(userID, actorName)\n if rec['success']:\n speak_output = rec['sentence']\n responseBuilder.speak(speak_output).add_directive(DelegateDirective(intent))\n else:\n speak_output = SENTENCE_NO_RECOMMENDATION_FOUND.format(\"new\")\n prompt_output = PROMPT_TRY_AGAIN\n responseBuilder.speak(speak_output+prompt_output).ask(prompt_output)\n \n else:\n speak_output = SENTENCE_FEEDBACK_BEFORE_RECOMMENDATION.format(get_last_watched_movie_by_id(userID))\n responseBuilder.speak(speak_output).add_directive(DELDIR_FEEDBACK)\n \n \n elif request.dialog_state == DialogState.IN_PROGRESS: #confirmation given for recommendation\n resolution = slot_confirmation.resolutions.resolutions_per_authority[0]\n \n if resolution.status.code == StatusCode.ER_SUCCESS_MATCH:\n resolvedSlotName = resolution.values[0].value.name\n inputVal = resolvedSlotName\n slot_confirmation.value = inputVal\n if inputVal == RECOMMENDATION_ACCEPTED:\n responseBuilder.add_directive(DelegateDirective(intent))\n elif inputVal == RECOMMENDATION_REJECTED:\n rec = recommendationSentenceFromActressInput(userID, slot_actor.value)\n if rec['success']:\n speak_output = SENTENCE_ANOTHER_RECOMMENDATION\n prompt_output = PROMPT_ANOTHER_RECOMMENDATION+rec['sentence']+\" \"+PROMPT_RECOMMENDATION_CONFIRMATION\n responseBuilder.speak(speak_output+prompt_output).ask(prompt_output).add_directive(makeESDir(\"recommendationOK\"))\n else:\n speak_output = SENTENCE_NO_RECOMMENDATION_FOUND.format(\"new\")\n prompt_output = PROMPT_TRY_AGAIN\n responseBuilder.speak(speak_output+prompt_output).ask(prompt_output)\n \n else: #We're told to cancel/stop\n speak_output = SENTENCE_CANCELED\n responseBuilder.speak(speak_output).add_directive(DelegateDirective(Intent(\n name=\"AMAZON.CancelIntent\"\n )))\n else:\n speak_output = SENTENCE_DID_NOT_UNDERSTAND\n prompt_output = PROMPT_RECOMMENDATION_CONFIRMATION\n responseBuilder.speak(speak_output+prompt_output).ask(prompt_output).add_directive(makeESDir(\"recommendationOK\"))\n \n else: #request.dialog_state == DialogState.COMPLETED; we have a green light on the recommendation at this point\n acceptRecommendation(userID)\n speak_output = SENTENCE_RECOMMENDATION_ACCEPTED\n responseBuilder.speak(speak_output)\n \n return responseBuilder.response\n\n\nclass RecommendationByGenreIntentHandler(AbstractRequestHandler):\n \"\"\"Handler for Recommendation by Genre Intent.\"\"\"\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return ask_utils.is_intent_name(\"recommendationIntent_byGenre\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n request = handler_input.request_envelope.request\n intent = request.intent\n responseBuilder = handler_input.response_builder\n sessionAttribs = handler_input.attributes_manager.session_attributes\n \n userID = handler_input.request_envelope.context.system.user.user_id\n slot_genre = intent.slots[\"genre\"]\n slot_confirmation = intent.slots[\"recommendationOK\"]\n \n if request.dialog_state == DialogState.STARTED or slot_genre.value is None:\n if is_last_watched_movie_rated_by_id(userID):\n \n #resolve \"my favorite genre\" if necessary\n resolution = slot_genre.resolutions.resolutions_per_authority[0]\n if resolution.status.code == StatusCode.ER_SUCCESS_MATCH:\n resolvedSlotID = resolution.values[0].value.id\n resolvedSlotName = resolution.values[0].value.name\n if resolvedSlotID == \"FAV\":\n slot_genre.value = get_liked_genre_by_id(userID)\n else:\n slot_genre.value = resolvedSlotName\n \n rec = recommendationSentenceFromGenreInput(userID, slot_genre.value)\n if rec['success']:\n speak_output = rec['sentence']\n responseBuilder.speak(speak_output).add_directive(DelegateDirective(intent))\n else:\n speak_output = SENTENCE_NO_RECOMMENDATION_FOUND.format(\"new\")\n prompt_output = PROMPT_TRY_AGAIN\n responseBuilder.speak(speak_output+prompt_output).ask(prompt_output)\n \n else:\n speak_output = SENTENCE_GENRE_NOT_FOUND.format(slot_genre.value)\n prompt_output = PROMPT_GENRE_NOT_FOUND\n responseBuilder.speak(speak_output+prompt_output).ask(prompt_output).add_directive(makeESDir(\"genre\"))\n \n else:\n speak_output = SENTENCE_FEEDBACK_BEFORE_RECOMMENDATION.format(get_last_watched_movie_by_id(userID))\n responseBuilder.speak(speak_output).add_directive(DELDIR_FEEDBACK)\n \n \n elif request.dialog_state == DialogState.IN_PROGRESS: #confirmation given for recommendation\n resolution = slot_confirmation.resolutions.resolutions_per_authority[0]\n \n if resolution.status.code == StatusCode.ER_SUCCESS_MATCH:\n resolvedSlotName = resolution.values[0].value.name\n inputVal = resolvedSlotName\n slot_confirmation.value = inputVal\n if inputVal == RECOMMENDATION_ACCEPTED:\n responseBuilder.add_directive(DelegateDirective(intent))\n elif inputVal == RECOMMENDATION_REJECTED:\n rec = recommendationSentenceFromGenreInput(userID, slot_genre.value)\n if rec['success']:\n speak_output = SENTENCE_ANOTHER_RECOMMENDATION\n prompt_output = PROMPT_ANOTHER_RECOMMENDATION+rec['sentence']+\" \"+PROMPT_RECOMMENDATION_CONFIRMATION\n responseBuilder.speak(speak_output+prompt_output).ask(prompt_output).add_directive(makeESDir(\"recommendationOK\"))\n else:\n speak_output = SENTENCE_NO_RECOMMENDATION_FOUND.format(\"new\")\n prompt_output = PROMPT_TRY_AGAIN\n responseBuilder.speak(speak_output+prompt_output).ask(prompt_output)\n \n else: #We're told to cancel/stop\n speak_output = SENTENCE_CANCELED\n responseBuilder.speak(speak_output).add_directive(DelegateDirective(Intent(\n name=\"AMAZON.CancelIntent\"\n )))\n else:\n speak_output = SENTENCE_DID_NOT_UNDERSTAND\n prompt_output = PROMPT_RECOMMENDATION_CONFIRMATION\n responseBuilder.speak(speak_output+prompt_output).ask(prompt_output).add_directive(makeESDir(\"recommendationOK\"))\n \n else: #request.dialog_state == DialogState.COMPLETED; we have a green light on the recommendation at this point\n acceptRecommendation(userID)\n speak_output = SENTENCE_RECOMMENDATION_ACCEPTED\n responseBuilder.speak(speak_output)\n \n return responseBuilder.response\n\n\n\nclass RecommendationRewatchIntentHandler(AbstractRequestHandler):\n \"\"\"Handler for Recommendation Rewatch Intent.\"\"\"\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return ask_utils.is_intent_name(\"recommendationIntent_rewatch\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n request = handler_input.request_envelope.request\n intent = request.intent\n responseBuilder = handler_input.response_builder\n sessionAttribs = handler_input.attributes_manager.session_attributes\n \n userID = handler_input.request_envelope.context.system.user.user_id\n slot_confirmation = intent.slots[\"recommendationOK\"]\n \n if request.dialog_state == DialogState.STARTED: #\"recommend a rewatch\"/...\n if is_last_watched_movie_rated_by_id(userID):\n prevRecommendations = get_recommended_movies_by_id(userID)\n if prevRecommendations and len(prevRecommendations[0]): #check if we have recommendations at all\n rec = recommendationSentenceFromAgain(userID)\n if rec['success']:\n speak_output = rec['sentence']\n responseBuilder.speak(speak_output).add_directive(DelegateDirective(intent))\n else:\n speak_output = SENTENCE_NO_RECOMMENDATION_FOUND.format(\"rewatch\")\n prompt_output = PROMPT_TRY_AGAIN\n responseBuilder.speak(speak_output+prompt_output).ask(prompt_output)\n else:\n speak_output = \"Sorry, I don't have any previously accepted recommendations for you in my database that I could recommend again. Try using this feature again once you have accepted some new recommendations of mine. \"\n prompt_output = \"In the meantime... \"+PROMPT_TRY_AGAIN\n responseBuilder.speak(speak_output+prompt_output).ask(prompt_output)\n \n else:\n speak_output = SENTENCE_FEEDBACK_BEFORE_RECOMMENDATION.format(get_last_watched_movie_by_id(userID))\n responseBuilder.speak(speak_output).add_directive(DELDIR_FEEDBACK)\n \n elif request.dialog_state == DialogState.IN_PROGRESS: #confirmation given for recommendation\n resolution = slot_confirmation.resolutions.resolutions_per_authority[0]\n \n if resolution.status.code == StatusCode.ER_SUCCESS_MATCH:\n resolvedSlotName = resolution.values[0].value.name\n inputVal = resolvedSlotName\n slot_confirmation.value = inputVal\n if inputVal == RECOMMENDATION_ACCEPTED:\n responseBuilder.add_directive(DelegateDirective(intent))\n elif inputVal == RECOMMENDATION_REJECTED:\n rec = recommendationSentenceFromAgain(userID)\n if rec['success']:\n speak_output = SENTENCE_ANOTHER_RECOMMENDATION\n prompt_output = PROMPT_ANOTHER_RECOMMENDATION+rec['sentence']+\" \"+PROMPT_RECOMMENDATION_CONFIRMATION\n responseBuilder.speak(speak_output+prompt_output).ask(prompt_output).add_directive(makeESDir(\"recommendationOK\"))\n else:\n speak_output = SENTENCE_NO_RECOMMENDATION_FOUND.format(\"new rewatch\")\n prompt_output = PROMPT_TRY_AGAIN\n responseBuilder.speak(speak_output+prompt_output).ask(prompt_output)\n \n else: #We're told to cancel/stop\n speak_output = SENTENCE_CANCELED\n responseBuilder.speak(speak_output).add_directive(DelegateDirective(Intent(\n name=\"AMAZON.CancelIntent\"\n )))\n else:\n speak_output = SENTENCE_DID_NOT_UNDERSTAND\n prompt_output = PROMPT_RECOMMENDATION_CONFIRMATION\n responseBuilder.speak(speak_output+prompt_output).ask(prompt_output).add_directive(makeESDir(\"recommendationOK\"))\n \n else: #request.dialog_state == DialogState.COMPLETED; we have a green light on the recommendation at this point\n acceptRecommendation(userID)\n speak_output = SENTENCE_RECOMMENDATION_ACCEPTED\n responseBuilder.speak(speak_output)\n \n \n return responseBuilder.response\n\nclass RecommendationIntentHandler(AbstractRequestHandler):\n \"\"\"Handler for Recommendation Intent.\"\"\"\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return ask_utils.is_intent_name(\"recommendationIntent\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n request = handler_input.request_envelope.request\n intent = request.intent\n responseBuilder = handler_input.response_builder\n sessionAttribs = handler_input.attributes_manager.session_attributes\n \n userID = handler_input.request_envelope.context.system.user.user_id\n slot_confirmation = intent.slots[\"recommendationOK\"]\n \n if request.dialog_state == DialogState.STARTED: #\"i want to watch something\"/...\n if is_last_watched_movie_rated_by_id(userID):\n #####\n rec = generalRecommendation(userID)\n if rec['success']:\n speak_output = rec['sentence']\n responseBuilder.speak(speak_output).add_directive(DelegateDirective(intent))\n else:\n speak_output = SENTENCE_NO_RECOMMENDATION_FOUND.format(\"new\")\n prompt_output = PROMPT_TRY_AGAIN\n responseBuilder.speak(speak_output+prompt_output).ask(prompt_output)\n \n else:\n speak_output = SENTENCE_FEEDBACK_BEFORE_RECOMMENDATION.format(get_last_watched_movie_by_id(userID))\n responseBuilder.speak(speak_output).add_directive(DELDIR_FEEDBACK)\n \n elif request.dialog_state == DialogState.IN_PROGRESS: #confirmation given for recommendation\n resolution = slot_confirmation.resolutions.resolutions_per_authority[0]\n \n if resolution.status.code == StatusCode.ER_SUCCESS_MATCH:\n resolvedSlotName = resolution.values[0].value.name\n inputVal = resolvedSlotName\n slot_confirmation.value = inputVal\n if inputVal == RECOMMENDATION_ACCEPTED:\n responseBuilder.add_directive(DelegateDirective(intent))\n elif inputVal == RECOMMENDATION_REJECTED:\n rec = generalRecommendation(userID)\n if rec['success']:\n speak_output = SENTENCE_ANOTHER_RECOMMENDATION\n prompt_output = PROMPT_ANOTHER_RECOMMENDATION+rec['sentence']+\" \"+PROMPT_RECOMMENDATION_CONFIRMATION\n responseBuilder.speak(speak_output+prompt_output).ask(prompt_output).add_directive(makeESDir(\"recommendationOK\"))\n else:\n speak_output = SENTENCE_NO_RECOMMENDATION_FOUND.format(\"new\")\n prompt_output = PROMPT_TRY_AGAIN\n responseBuilder.speak(speak_output+prompt_output).ask(prompt_output)\n \n else: #We're told to cancel/stop\n speak_output = SENTENCE_CANCELED\n responseBuilder.speak(speak_output).add_directive(DelegateDirective(Intent(\n name=\"AMAZON.CancelIntent\"\n )))\n else:\n speak_output = SENTENCE_DID_NOT_UNDERSTAND\n prompt_output = PROMPT_RECOMMENDATION_CONFIRMATION\n responseBuilder.speak(speak_output+prompt_output).ask(prompt_output).add_directive(makeESDir(\"recommendationOK\"))\n \n else: #request.dialog_state == DialogState.COMPLETED; we have a green light on the recommendation at this point\n acceptRecommendation(userID)\n speak_output = SENTENCE_RECOMMENDATION_ACCEPTED\n responseBuilder.speak(speak_output)\n \n \n return responseBuilder.response\n\n\nclass SetupIntentHandler(AbstractRequestHandler):\n \"\"\"Handler for Setup Profile Intent.\"\"\"\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return ask_utils.is_intent_name(\"setupIntent\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n request = handler_input.request_envelope.request\n intent = request.intent\n responseBuilder = handler_input.response_builder\n sessionAttribs = handler_input.attributes_manager.session_attributes\n \n #at least for now, we only consider streaming options in Germany and ignore the language version of the films on a given streamer\n \n slot_name = intent.slots[\"name\"]\n slot_streamer = intent.slots[\"streamer\"]\n slot_favMovie = intent.slots[\"favMovie\"]\n slot_favActor = intent.slots[\"favActor\"]\n slot_favGenre = intent.slots[\"favGenre\"]\n slot_dislikedGenre = intent.slots[\"dislikedGenre\"]\n slot_finalConfirmation = intent.slots[\"finalConfirmation\"]\n \n #check where we are in the dialog:\n # 0 = no info given yet, need to get the name first\n # 1 = name is given and to be processed, nothing else yet. Get the streamers.\n # 2 = name (processed) and streamers (to be processed) are given, get the fav movie\n # 3 = name, streamers (processed) and fav movie (to be processed) are given, get the fav actors\n # 4 = name, streamers, fav movie (processed) and fav actors (to be processed) are given, get the liked genres\n # 5 = name, streamers, fav movie, fav actors (processed) and liked genres (to be processed) are given, get the disliked genres\n # 6 = name, streamers, fav movie, fav actors, liked genres (processed) and disliked genres (to be processed) are given, process the disliked genres\n # 7 = we have all we need, so we can give some confirmation and move on.\n \n setupPhase = 0\n if request.dialog_state == DialogState.STARTED:\n if slot_name.value is None:\n setupPhase = 0\n else:\n setupPhase = 1\n elif request.dialog_state == DialogState.IN_PROGRESS:\n if slot_streamer.value is None and slot_streamer.slot_value is None:\n setupPhase = 1\n elif slot_favMovie.value is None:\n setupPhase = 2\n elif slot_favActor.value is None:\n setupPhase = 3\n elif slot_favGenre.value is None:\n setupPhase = 4\n elif slot_dislikedGenre.value is None:\n setupPhase = 5\n elif slot_finalConfirmation.value is None:\n setupPhase = 6\n else:\n setupPhase = 7\n else: #request.dialog_state == DialogState.COMPLETED\n setupPhase = 8\n \n #respond per phase:\n if setupPhase == 0:\n responseBuilder.add_directive(DelegateDirective())\n \n elif setupPhase == 1:\n inputVal = slot_name.value\n speak_output = \"Hi {}! \".format(inputVal)\n responseBuilder.speak(speak_output).add_directive(DelegateDirective(intent))\n \n elif setupPhase == 2:\n #try to resolve slot input\n allInputs = []\n resolvedInputs = []\n if \"resolvedStreamers\" in sessionAttribs and sessionAttribs[\"resolvedStreamers\"] is not None:\n resolvedInputs = list(sessionAttribs[\"resolvedStreamers\"])\n sessionAttribs[\"resolvedStreamers\"] = None\n currentUnresolved = {}\n \n if slot_streamer.slot_value.object_type == \"List\":\n for v in slot_streamer.slot_value.values:\n allInputs.append(v)\n else:\n allInputs.append(slot_streamer.slot_value)\n \n for i in allInputs:\n resolution = i.resolutions.resolutions_per_authority[0]\n if resolution.status.code == StatusCode.ER_SUCCESS_MATCH:\n resolvedSlotName = resolution.values[0].value.name\n resolvedSlotID = resolution.values[0].value.id\n if resolvedSlotID in (\"ATV-X\", \"AMZ-X\", \"SKY-X\"):\n currentUnresolved = {\"id\": resolvedSlotID, \"value\": i.value}\n break\n else:\n resolvedInputs.append(resolvedSlotName)\n else:# if i.value not in (\"the\", \"and\", \"also\"): #don't ask for stuff that we don't need but Alexa can't (always) filter out by itself\n currentUnresolved = {\"id\": \"\", \"value\": i.value}\n break\n \n stringifiedResolved = resolvedInputs and ((\", \".join(resolvedInputs[:-1])+\" and \"+resolvedInputs[-1], resolvedInputs[0])[len(resolvedInputs) == 1])\n \n if currentUnresolved:\n #response part 1\n if resolvedInputs:\n speak_output = \"I understood {}. But \".format(stringifiedResolved)\n else:\n speak_output = \"Sorry, \"\n \n #response part 2\n if currentUnresolved[\"id\"] == \"ATV-X\":\n speak_output += \"I'm not sure about \"+currentUnresolved[\"value\"]+\". \"\n prompt_output = \"Do you mean Apple TV Plus or Apple iTunes, also known as Apple TV? \"\n elif currentUnresolved[\"id\"] == \"AMZ-X\":\n speak_output += \"I'm not sure about \"+currentUnresolved[\"value\"]+\". \"\n prompt_output = \"Do you mean Amazon Prime Video or Amazon Video? \" #Alexa cuts \"Amazon\" as the first word out of slot inputs, because why the hell not. Should be avoidable e.g. if we preface it with one of the defined carrier phrases like \"I have ...\". To be safe we defined synonyms for the option without the leading \"Amazon\".\n elif currentUnresolved[\"id\"] == \"SKY-X\":\n speak_output += \"I'm not sure about \"+currentUnresolved[\"value\"]+\". \"\n prompt_output = \"Do you mean Sky Ticket, Sky Go or the Sky Store? \"\n else:\n speak_output += \"I don't recognize {} as a streaming service. \".format(currentUnresolved[\"value\"])\n prompt_output = \"Please try a platform that is available in Germany. \"\n \n #give the response\n responseBuilder.speak(speak_output+prompt_output).ask(prompt_output).add_directive(makeESDir(\"streamer\"))\n #store the resolved values to pick up in the next go\n sessionAttribs[\"resolvedStreamers\"] = resolvedInputs\n else: #implies resolvedInputs is not empty\n inputVal = stringifiedResolved\n slot_streamer.value = \"|\".join(resolvedInputs) ###\n speak_output = \"Great! you shall only receive films available on {} as recommendations. \".format(inputVal)\n responseBuilder.speak(speak_output).add_directive(DelegateDirective(intent))\n \n \n #resolution = slot_streamer.resolutions.resolutions_per_authority[0]\n #\n #if resolution.status.code == StatusCode.ER_SUCCESS_MATCH:\n # resolvedSlotName = resolution.values[0].value.name\n # resolvedSlotID = resolution.values[0].value.id\n # if resolvedSlotID == \"ATV-X\":\n # prompt_output = \"Do you mean Apple TV Plus or Apple iTunes, also known as Apple TV? \"\n # responseBuilder.speak(prompt_output).ask(prompt_output).add_directive(esDir)\n # elif resolvedSlotID == \"AMZ-X\":\n # prompt_output = \"Do you mean Amazon Prime Video or Amazon Video? \" #Alexa cuts \"Amazon\" as the first word out of slot inputs, because why the hell not. Should be avoidable e.g. if we preface it with one of the defined carrier phrases like \"I have ...\". To be safe we defined synonyms for the option without the leading \"Amazon\".\n # responseBuilder.speak(prompt_output).ask(prompt_output).add_directive(esDir)\n # elif resolvedSlotID == \"SKY-X\":\n # prompt_output = \"Do you mean Sky Ticket, Sky Go or the Sky Store? \"\n # responseBuilder.speak(prompt_output).ask(prompt_output).add_directive(esDir)\n # else:\n # inputVal = resolvedSlotName\n # slot_streamer.value = inputVal\n # speak_output = \"Great! you shall only receive films available on {} as recommendations. \".format(inputVal)\n # responseBuilder.speak(speak_output).add_directive(DelegateDirective(intent))\n #else:\n # speak_output = \"Sorry, I don't recognize {} as a streaming service. \".format(slot_streamer.value)\n # prompt_output = \"Please try a platform that is available in Germany. \"\n # responseBuilder.speak(speak_output+prompt_output).ask(prompt_output).add_directive(esDir)\n \n elif setupPhase == 3:\n movieID = getMovieID(slot_favMovie.value)\n movieName = getMovieName(movieID)\n \n if movieID == 0:\n speak_output = SENTENCE_MOVIE_NOT_FOUND.format(slot_favMovie.value)\n prompt_output = PROMPT_MOVIE_NOT_FOUND\n responseBuilder.speak(speak_output+prompt_output).ask(prompt_output).add_directive(makeESDir(\"favMovie\"))\n else:\n inputVal = movieName\n slot_favMovie.value = inputVal\n speak_output = \"Alright! I will try to recommend films similar to {}. \".format(inputVal)\n responseBuilder.speak(speak_output).add_directive(DelegateDirective(intent))\n \n elif setupPhase == 4:\n actorID = getActressId(slot_favActor.value)\n actorName = getActressName(actorID)\n if actorID == 0:\n speak_output = SENTENCE_ACTOR_NOT_FOUND.format(slot_favActor.value)\n prompt_output = PROMPT_ACTOR_NOT_FOUND\n responseBuilder.speak(speak_output+prompt_output).ask(prompt_output).add_directive(makeESDir(\"favActor\"))\n else:\n inputVal = actorName\n slot_favActor.value = inputVal\n speak_output = \"Okay, I will try to recommend films with {}. \".format(inputVal)\n responseBuilder.speak(speak_output).add_directive(DelegateDirective(intent))\n \n elif setupPhase == 5:\n resolution = slot_favGenre.resolutions.resolutions_per_authority[0]\n \n if resolution.status.code == StatusCode.ER_SUCCESS_MATCH:\n resolvedSlotName = resolution.values[0].value.name\n resolvedSlotID = resolution.values[0].value.id #this is already the API-ready genre ID\n inputVal = resolvedSlotName\n slot_favGenre.value = inputVal\n if inputVal == \"TV Movie\": #edge case: We don't want to say \"more TV Movie movies\". Yes, this is unnecessary overengineering. No, I don't care.\n speak_output = \"Cool! I will try to recommend more TV Movies to you. \"\n else:\n speak_output = \"Cool! I will try to recommend more {} movies to you. \".format(inputVal)\n responseBuilder.speak(speak_output).add_directive(DelegateDirective(intent))\n else:\n speak_output = SENTENCE_GENRE_NOT_FOUND.format(slot_favGenre.value)\n prompt_output = PROMPT_GENRE_NOT_FOUND\n responseBuilder.speak(speak_output+prompt_output).ask(prompt_output).add_directive(makeESDir(\"favGenre\"))\n \n elif setupPhase == 6:\n resolution = slot_dislikedGenre.resolutions.resolutions_per_authority[0]\n \n if resolution.status.code == StatusCode.ER_SUCCESS_MATCH:\n resolvedSlotName = resolution.values[0].value.name\n resolvedSlotID = resolution.values[0].value.id #this is already the API-ready genre ID\n inputVal = resolvedSlotName\n slot_dislikedGenre.value = inputVal\n if inputVal == \"TV Movie\": #same edge case handling as above\n speak_output = \"Good to know, I will try to keep TV Movies out of your recommendations. \"\n else:\n speak_output = \"Good to know, I will try to keep {} films out of your recommendations. \".format(inputVal)\n responseBuilder.speak(speak_output).add_directive(DelegateDirective(intent))\n else:\n speak_output = SENTENCE_GENRE_NOT_FOUND.format(slot_dislikedGenre.value)\n prompt_output = PROMPT_GENRE_NOT_FOUND\n responseBuilder.speak(speak_output+prompt_output).ask(prompt_output).add_directive(makeESDir(\"dislikedGenre\"))\n \n elif setupPhase == 7:\n resolution = slot_finalConfirmation.resolutions.resolutions_per_authority[0]\n \n if resolution.status.code == StatusCode.ER_SUCCESS_MATCH:\n resolvedSlotName = resolution.values[0].value.name\n inputVal = resolvedSlotName\n slot_finalConfirmation.value = inputVal\n responseBuilder.add_directive(DelegateDirective(intent))\n else:\n speak_output = SENTENCE_DID_NOT_UNDERSTAND\n prompt_output = \"Please say Okay to save your profile or Stop to cancel.\"\n responseBuilder.speak(speak_output+prompt_output).ask(prompt_output).add_directive(makeESDir(\"finalConfirmation\"))\n \n else: #setupPhase == 8\n #send everything to Firebase\n #session attribs don't work because somehow the last one will only be available one intent call too late, so we wouldn't have the dislikedGenre here if stored via session attribs.\n if slot_finalConfirmation.value == \"Okay\":\n userID = handler_input.request_envelope.context.system.user.user_id\n knownUser = check_user_exists_by_id(userID)\n streamers = ([slot_streamer.value], slot_streamer.value.split(\"|\"))[\"|\" in slot_streamer.value]\n if knownUser:\n u = User(slot_dislikedGenre.value, slot_favActor.value, slot_favMovie.value, userID, \"DE\", [get_last_watched_movie_by_id(userID), is_last_watched_movie_rated_by_id(userID)], slot_favGenre.value, slot_name.value, get_recommended_movies_by_id(userID), streamers, get_likings_by_id(userID), [\"\"])\n else:\n u = User(slot_dislikedGenre.value, slot_favActor.value, slot_favMovie.value, userID, \"DE\", [\"\", True], slot_favGenre.value, slot_name.value, [\"\"], streamers, {'movie': 0, 'genre': 0, 'acting': 0}, [\"\"])\n add_user(u)\n speak_output = \"You're all set up! \"\n if not knownUser:\n speak_output += \"To get an overview of what you can ask me, just say: Help.\"\n else:\n speak_output = \"Okay, I will not save your new data to your profile.\"\n responseBuilder.speak(speak_output)\n \n return responseBuilder.response\n\n\nclass FeedbackIntentHandler(AbstractRequestHandler):\n \"\"\"Handler for Feedback Intent.\"\"\"\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return ask_utils.is_intent_name(\"feedbackIntent\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n request = handler_input.request_envelope.request\n intent = request.intent\n responseBuilder = handler_input.response_builder\n sessionAttribs = handler_input.attributes_manager.session_attributes\n \n #for debugging the mess that is the Python ASK:\n sessionAttribs[\"stringSlotPythonInput_feedbackAspects\"] = str(intent.slots[\"feedbackAspects\"]) # stringified version gives python-style attributes (slot_value instead of slotValue) and sets unset ones to None (those don't show up at all in the JSON version). hasattr seems to best map the stringified version.\n sessionAttribs[\"stringSlotPythonInput_feedbackGeneral\"] = str(intent.slots[\"feedbackGeneral\"])\n attribs = {\n 'slotValue': hasattr(intent.slots[\"feedbackAspects\"], 'slotValue'),\n 'slot_value': hasattr(intent.slots[\"feedbackAspects\"], 'slot_value')\n }\n if hasattr(intent.slots[\"feedbackAspects\"], 'slot_value') and intent.slots[\"feedbackAspects\"].slot_value is not None:\n attribs['.slot_value.resolutions'] = hasattr(intent.slots[\"feedbackAspects\"].slot_value, 'resolutions')\n if hasattr(intent.slots[\"feedbackAspects\"].slot_value, 'resolutions') and intent.slots[\"feedbackAspects\"].slot_value.resolutions is not None:\n attribs['.slot_value.resolutions.resolutionsPerAuthority'] = hasattr(intent.slots[\"feedbackAspects\"].slot_value.resolutions, 'resolutionsPerAuthority')\n attribs['.slot_value.resolutions.resolutions_per_authority'] = hasattr(intent.slots[\"feedbackAspects\"].slot_value.resolutions, 'resolutions_per_authority')\n if hasattr(intent.slots[\"feedbackAspects\"].slot_value.resolutions, 'resolutions_per_authority'):\n attribs['.slot_value.resolutions.resolutions_per_authority[0]'] = str(intent.slots[\"feedbackAspects\"].slot_value.resolutions.resolutions_per_authority[0])\n else:\n attribs['.slot_value.resolutions.resolutionsPerAuthority'] = \"failed, no .slot_value.resolutions\"\n attribs['.slot_value.resolutions.resolutions_per_authority'] = \"failed, no .slot_value.resolutions\"\n else:\n attribs['.slot_value.resolutions'] = \"failed, no .slot_value\"\n sessionAttribs[\"feedbackAspectsAttribs\"] = attribs\n \n feedbackGeneralRaw = intent.slots[\"feedbackGeneral\"].value\n \n #if no aspect is given yet, .slotValue isn't there and .value is None... SOMETIMES. Other random times, .slot_value is there. OR .slotValue. Depending on Alexa's mood of the day. I want to speak to the manager, Sir!\n #if one aspect is given, .value exists and .slotValue is:\n #{'type': 'Simple','value': 'writing', 'resolutions': {\n # 'resolutionsPerAuthority': [{'authority': '...', 'status': {'code': 'ER_SUCCESS_MATCH'}, 'values': [{'value': {'name': 'story', 'id': '...'}}]}]\n #}}\n #if multi are given, .value is None and .slotValue is:\n #{'type': 'List', 'values': [\n # {'type': 'Simple', 'value': 'story', 'resolutions': {\n # 'resolutionsPerAuthority': [{'authority': '...', 'status': {'code': 'ER_SUCCESS_MATCH'}, 'values': [{'value': {'name': 'story', 'id': '...'}}]}]\n # }},\n # {'type': 'Simple', 'value': 'score', 'resolutions': {\n # 'resolutionsPerAuthority': [{'authority': '...', 'status': {'code': 'ER_SUCCESS_MATCH'}, 'values': [{'value': {'name': 'music', 'id': '...'}}]}]\n # }}\n #]}\n \n if hasattr(intent.slots[\"feedbackAspects\"], 'slotValue'):\n feedbackAspectsSyntaxIsMessedUpAgain = True #JS-style syntax\n feedbackAspectsRaw = intent.slots[\"feedbackAspects\"].slotValue\n if feedbackAspectsRaw is None:\n feedbackAspectsIsGiven = False\n else:\n feedbackAspectsIsGiven = True\n if feedbackAspectsRaw['type'] == 'List' or feedbackAspectsRaw['object_type'] == 'List':\n feedbackAspectsIsMulti = True\n else: #feedbackAspectsRaw['type'] == 'Simple'\n feedbackAspectsIsMulti = False\n elif hasattr(intent.slots[\"feedbackAspects\"], 'slot_value'):\n feedbackAspectsSyntaxIsMessedUpAgain = False #python-style syntax\n feedbackAspectsRaw = intent.slots[\"feedbackAspects\"].slot_value\n if feedbackAspectsRaw is None:\n feedbackAspectsIsGiven = False\n else:\n feedbackAspectsIsGiven = True\n #feedbackAspectsRaw.type == 'List' does not work, because that would be too easy I guess.\n if feedbackAspectsRaw.object_type == 'List':\n feedbackAspectsIsMulti = True\n else: #feedbackAspectsRaw.object_type == 'Simple'\n feedbackAspectsIsMulti = False\n else:\n feedbackAspectsSyntaxIsMessedUpAgain = True\n feedbackAspectsRaw = intent.slots[\"feedbackAspects\"].value\n feedbackAspectsIsMulti = False\n if feedbackAspectsRaw is None:\n feedbackAspectsIsGiven = False\n else:\n feedbackAspectsIsGiven = True\n \n #check where we are in the feedback dialog:\n # 0 = no info given yet, need to get feedbackGeneral first\n # 1 = feedbackGeneral is given and to be processed, but we have yet to get feedbackAspects\n # 2 = feedbackGeneral and feedbackAspects are given, feedbackAspects has yet to be processed\n # 3 = we have all we need, so we can give some confirmation and move on.\n feedbackPhase = 0\n if request.dialog_state == DialogState.STARTED:\n if feedbackGeneralRaw is None:\n feedbackPhase = 0\n else:\n feedbackPhase = 1\n elif request.dialog_state == DialogState.IN_PROGRESS:\n if feedbackGeneralRaw is None: ##\n feedbackPhase = 0 ##\n elif not feedbackAspectsIsGiven:\n feedbackPhase = 1\n else:\n feedbackPhase = 2\n else: #request.dialog_state == DialogState.COMPLETED\n feedbackPhase = 3\n \n \n \n if feedbackPhase == 0: #prompt feedbackGeneral slot automatically\n responseBuilder.add_directive(DelegateDirective())\n \n elif feedbackPhase == 1: #we have feedbackGeneral, let's parse it and prompt feedbackAspects\n #process feedbackGeneralRaw\n isFeedbackPositive = isSentimentPositive(str(feedbackGeneralRaw))\n #store the canonical, simplified version of the feedback in the intent for later use\n intent.slots[\"feedbackGeneral\"].value = (FEEDBACK_NEGATIVE, FEEDBACK_POSITIVE)[isFeedbackPositive]\n \n speak_output = (SENTENCE_FEEDBACK_WAS_NEGATIVE,SENTENCE_FEEDBACK_WAS_POSITIVE)[isFeedbackPositive]\n prompt_output = PROMPT_FEEDBACK_ASPECTS.format((\"didn't\", \"did\")[isFeedbackPositive], \"it\")\n responseBuilder.speak(speak_output+prompt_output).ask(prompt_output).add_directive(makeESDir(\"feedbackAspects\", intent))\n \n elif feedbackPhase == 2:\n #resolve slots: try to match feedbackAspectsRaw to a canonical slot name\n slotResolutions = []\n if feedbackAspectsIsMulti == True:\n if feedbackAspectsSyntaxIsMessedUpAgain:\n allvalues = feedbackAspectsRaw['values']\n for val in allvalues:\n slotResolutions.append({\n \"orig\": val['value'],\n \"resolution\": val['resolutions']['resolutionsPerAuthority'][0]\n })\n else:\n allvalues = feedbackAspectsRaw.values\n for val in allvalues:\n slotResolutions.append({\n \"orig\": val.value,\n \"resolution\": val.resolutions.resolutions_per_authority[0]\n })\n else:\n if feedbackAspectsSyntaxIsMessedUpAgain:\n slotResolutions.append({\n \"orig\": feedbackAspectsRaw['value'],\n \"resolution\": feedbackAspectsRaw['resolutions']['resolutionsPerAuthority'][0]\n })\n else:\n slotResolutions.append({\n \"orig\": feedbackAspectsRaw.value, #['value'] tested, does not work. for reasons.\n \"resolution\": feedbackAspectsRaw.resolutions.resolutions_per_authority[0]\n })\n \n resolvedToSave = []\n unresolvedToSave = []\n if feedbackAspectsSyntaxIsMessedUpAgain:\n for val in slotResolutions:\n if val['resolution']['status']['code'] == 'ER_SUCCESS_MATCH':\n resolvedToSave.append(val['resolution']['values'][0]['value']['name']) #resolved to canonical slot value\n else:\n if(val['orig'] != 'the'): #sometimes, e.g. when saying \"the writing and the cast\", Alexa will fill the slots with \"writing\", \"the\" and \"cast\". It can't resolve \"the\", so throw all the \"the\"s out here.\n unresolvedToSave.append(val['orig']) #unresolved\n else:\n for val in slotResolutions:\n if val['resolution'].status.code == StatusCode.ER_SUCCESS_MATCH:\n resolvedToSave.append(val['resolution'].values[0].value.name) #resolved to canonical slot value\n else:\n if(val['orig'] != 'the'): #sometimes, e.g. when saying \"the writing and the cast\", Alexa will fill the slots with \"writing\", \"the\" and \"cast\". It can't resolve \"the\", so throw all the \"the\"s out here.\n unresolvedToSave.append(val['orig']) #unresolved\n \n handler_input.attributes_manager.session_attributes[\"resolvedMovieAspects\"] = resolvedToSave\n handler_input.attributes_manager.session_attributes[\"unresolvedMovieAspects\"] = unresolvedToSave\n \n if len(resolvedToSave)>0:\n #intent.slots[\"feedbackAspects\"].value = str(resolvedToSave)\n userID = handler_input.request_envelope.context.system.user.user_id\n update_likings_by_id(userID, {\n \"acting\": (\"acting\" in resolvedToSave),\n \"genre\": (\"genre\" in resolvedToSave),\n \"movie\": (\"movie\" in resolvedToSave)\n })\n responseBuilder.add_directive(DelegateDirective(intent))\n else:\n speak_output = SENTENCE_DID_NOT_UNDERSTAND\n prompt_output = PROMPT_FEEDBACK_ASPECTS.format((\"didn't\", \"did\")[intent.slots[\"feedbackGeneral\"].value is FEEDBACK_POSITIVE], \"the recommendation\")\n responseBuilder.speak(speak_output+prompt_output).ask(prompt_output).add_directive(makeESDir(\"finalConfirmation\"))\n \n #responseBuilder.add_directive(DelegateDirective(intent))\n \n else: # feedbackPhase == 3; we have all feedback values and can move on.\n userID = handler_input.request_envelope.context.system.user.user_id\n rate_last_watched_movie_by_id(userID, (feedbackGeneralRaw==FEEDBACK_POSITIVE))\n #aspects = intent.slots[\"feedbackAspects\"].value\n #update_likings_by_id(userID, {\n # \"acting\": (\"acting\" in aspects),\n # \"genre\": (\"genre\" in aspects),\n # \"movie\": (\"movie\" in aspects)\n #})\n \n speak_output = SENTENCE_FEEDBACK_ACCEPTED\n responseBuilder.speak(speak_output)\n \n \n return responseBuilder.response\n\n\nclass HelpIntentHandler(AbstractRequestHandler):\n \"\"\"Handler for Help Intent.\"\"\"\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return ask_utils.is_intent_name(\"AMAZON.HelpIntent\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n speak_output = \"To get a recommendation, you can just say: Recommend something. You can also request films similar to a given film, from a specific genre or with an actor or actress of your choice. To let me pick a good rewatch, just say: Recommend a rewatch. To change your profile, just say: Setup.\"\n\n return (\n handler_input.response_builder\n .speak(speak_output)\n .ask(speak_output)\n .response\n )\n\n\nclass CancelOrStopIntentHandler(AbstractRequestHandler):\n \"\"\"Single handler for Cancel and Stop Intent.\"\"\"\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return (ask_utils.is_intent_name(\"AMAZON.CancelIntent\")(handler_input) or\n ask_utils.is_intent_name(\"AMAZON.StopIntent\")(handler_input))\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n speak_output = \"Cancelling.\"\n\n return (\n handler_input.response_builder\n .speak(speak_output)\n .response\n )\n\n\nclass SessionEndedRequestHandler(AbstractRequestHandler):\n \"\"\"Handler for Session End.\"\"\"\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return ask_utils.is_request_type(\"SessionEndedRequest\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n\n # Any cleanup logic goes here.\n\n return handler_input.response_builder.response\n\n\nclass IntentReflectorHandler(AbstractRequestHandler):\n \"\"\"The intent reflector is used for interaction model testing and debugging.\n It will simply repeat the intent the user said. You can create custom handlers\n for your intents by defining them above, then also adding them to the request\n handler chain below.\n \"\"\"\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return ask_utils.is_request_type(\"IntentRequest\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n intent_name = ask_utils.get_intent_name(handler_input)\n speak_output = \"You just triggered \" + intent_name + \".\"\n\n return (\n handler_input.response_builder\n .speak(speak_output)\n # .ask(\"add a reprompt if you want to keep the session open for the user to respond\")\n .response\n )\n\n\nclass CatchAllExceptionHandler(AbstractExceptionHandler):\n \"\"\"Generic error handling to capture any syntax or routing errors. If you receive an error\n stating the request handler chain is not found, you have not implemented a handler for\n the intent being invoked or included it in the skill builder below.\n \"\"\"\n def can_handle(self, handler_input, exception):\n # type: (HandlerInput, Exception) -> bool\n return True\n\n def handle(self, handler_input, exception):\n # type: (HandlerInput, Exception) -> Response\n logger.error(exception, exc_info=True)\n\n speak_output = \"Sorry, I had trouble doing what you asked. Please try again.\"\n\n return (\n handler_input.response_builder\n .speak(speak_output)\n .ask(speak_output)\n .response\n )\n\n# The SkillBuilder object acts as the entry point for your skill, routing all request and response\n# payloads to the handlers above. Make sure any new handlers or interceptors you've\n# defined are included below. The order matters - they're processed top to bottom.\n\n\nsb = SkillBuilder()\n\nsb.add_request_handler(LaunchRequestHandler())\nsb.add_request_handler(RecommendationIntentHandler())\nsb.add_request_handler(RecommendationByMovieIntentHandler())\nsb.add_request_handler(RecommendationByActorIntentHandler())\nsb.add_request_handler(RecommendationByGenreIntentHandler())\nsb.add_request_handler(RecommendationRewatchIntentHandler())\nsb.add_request_handler(SetupIntentHandler())\nsb.add_request_handler(FeedbackIntentHandler())\n\nsb.add_request_handler(HelpIntentHandler())\nsb.add_request_handler(CancelOrStopIntentHandler())\nsb.add_request_handler(SessionEndedRequestHandler())\nsb.add_request_handler(IntentReflectorHandler()) # make sure IntentReflectorHandler is last so it doesn't override your custom intent handlers\n\nsb.add_exception_handler(CatchAllExceptionHandler())\n\nlambda_handler = sb.lambda_handler()","repo_name":"onumtiger/ultron","sub_path":"lambda/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":61283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"31076673013","text":"import re\n\n\ndef pretty_print_state(state):\n '''Pretty print the text of a state in the interactive fiction game'''\n state = str(state)\n state = state.replace('\\\\\\'', '\\'')\n pattern = re.compile(r'\\\\n|b\\'|b\"')\n state = re.sub(pattern, ' ', state)\n state = state.strip('\\'').strip('\\\"').strip()\n return state\n\n\ndef demo_n_games(agent, n_games=1000, mode='agent'):\n '''Run n game simulations and compute the average performance'''\n scores = []\n for _ in range(n_games):\n score = agent.demo_game(mode=mode, verbose=False)\n scores.append(score)\n return sum(scores)/n_games\n","repo_name":"bingwang32/RL_InteractiveFiction","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"72039902271","text":"from PySide2 import QtCore, QtGui, QtWidgets\nfrom PySide2.QtCore import (QCoreApplication, QPropertyAnimation, QDate, QDateTime, QMetaObject, QObject, QPoint, QRect,\n QSize, QTime, QUrl, Qt, QEvent)\nfrom PySide2.QtGui import (QBrush, QColor, QConicalGradient, QCursor, QFont, QFontDatabase, QIcon, QKeySequence,\n QLinearGradient, QPalette, QPainter, QPixmap, QRadialGradient)\nfrom PySide2.QtWidgets import *\n\n\nclass Ui_MainWindow(object):\n \"\"\"Clase autogenerada por pyuic que al lanzarla genera la ventana principal de la aplicación.\"\"\"\n\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(1000, 500)\n MainWindow.setMinimumSize(QSize(1000, 500))\n MainWindow.setStyleSheet(\"background-color: rgb(45, 45, 45);\")\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)\n self.verticalLayout.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout.setSpacing(0)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.Top_Bar = QtWidgets.QFrame(self.centralwidget)\n self.Top_Bar.setMaximumSize(QtCore.QSize(50000, 40))\n self.Top_Bar.setStyleSheet(\"background-color: rgb(35, 35, 35);\")\n self.Top_Bar.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.Top_Bar.setFrameShadow(QtWidgets.QFrame.Raised)\n self.Top_Bar.setObjectName(\"Top_Bar\")\n self.horizontalLayout = QtWidgets.QHBoxLayout(self.Top_Bar)\n self.horizontalLayout.setContentsMargins(0, 0, 0, 0)\n self.horizontalLayout.setSpacing(0)\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\n self.frame_toggle = QtWidgets.QFrame(self.Top_Bar)\n self.frame_toggle.setMaximumSize(QtCore.QSize(70, 40))\n self.frame_toggle.setStyleSheet(\"\")\n self.frame_toggle.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.frame_toggle.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame_toggle.setObjectName(\"frame_toggle\")\n self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.frame_toggle)\n self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout_2.setSpacing(0)\n self.verticalLayout_2.setObjectName(\"verticalLayout_2\")\n self.btn_toggle = QtWidgets.QPushButton(self.frame_toggle)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.btn_toggle.sizePolicy().hasHeightForWidth())\n self.btn_toggle.setSizePolicy(sizePolicy)\n self.btn_toggle.setStyleSheet(\"background-image: url(:/newPrefix/menu.png);\\n\"\n \"background-position: center;\\n\"\n \"background-repeat: no-reperat;\\n\"\n \"border: none;\\n\"\n \"background-color: rgb(27, 29, 35);\")\n self.btn_toggle.setText(\"\")\n self.btn_toggle.setObjectName(\"btn_toggle\")\n self.verticalLayout_2.addWidget(self.btn_toggle)\n self.horizontalLayout.addWidget(self.frame_toggle)\n self.frame_top = QtWidgets.QFrame(self.Top_Bar)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.frame_top.sizePolicy().hasHeightForWidth())\n self.frame_top.setSizePolicy(sizePolicy)\n self.frame_top.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.frame_top.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame_top.setObjectName(\"frame_top\")\n self.horizontalLayout.addWidget(self.frame_top)\n self.verticalLayout.addWidget(self.Top_Bar)\n self.Content = QtWidgets.QFrame(self.centralwidget)\n self.Content.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.Content.setFrameShadow(QtWidgets.QFrame.Raised)\n self.Content.setObjectName(\"Content\")\n self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.Content)\n self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)\n self.horizontalLayout_2.setSpacing(0)\n self.horizontalLayout_2.setObjectName(\"horizontalLayout_2\")\n self.frame_left_menu = QtWidgets.QFrame(self.Content)\n self.frame_left_menu.setMinimumSize(QtCore.QSize(70, 0))\n self.frame_left_menu.setMaximumSize(QtCore.QSize(70, 16777215))\n self.frame_left_menu.setStyleSheet(\"background-color: rgb(35, 35, 35);\")\n self.frame_left_menu.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame_left_menu.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame_left_menu.setObjectName(\"frame_left_menu\")\n self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.frame_left_menu)\n self.verticalLayout_3.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout_3.setSpacing(6)\n self.verticalLayout_3.setObjectName(\"verticalLayout_3\")\n self.frame_top_menus = QtWidgets.QFrame(self.frame_left_menu)\n self.frame_top_menus.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.frame_top_menus.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame_top_menus.setObjectName(\"frame_top_menus\")\n self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.frame_top_menus)\n self.verticalLayout_4.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout_4.setSpacing(0)\n self.verticalLayout_4.setObjectName(\"verticalLayout_4\")\n self.btn_menu_home = QtWidgets.QPushButton(self.frame_top_menus)\n self.btn_menu_home.setMinimumSize(QtCore.QSize(0, 40))\n self.btn_menu_home.setStyleSheet(\"QPushButton{\\n\"\n \" color:rgb(255, 255, 255);\\n\"\n \" background-color: rgb(35, 35, 35);\\n\"\n \" border: 0px solid;\\n\"\n \" background-image: url(:/newPrefix/home.png);\\n\"\n \" background-repeat: no-reperat;\\n\"\n \" border: none;\\n\"\n \" background-position: center;\\n\"\n \"}\\n\"\n \"\\n\"\n \"QPushButton:hover{\\n\"\n \" background-color: rgb(255, 204, 0);\\n\"\n \"}\")\n self.btn_menu_home.setText(\"\")\n self.btn_menu_home.setObjectName(\"btn_menu_home\")\n self.verticalLayout_4.addWidget(self.btn_menu_home)\n self.btn_menu_home_2 = QtWidgets.QPushButton(self.frame_top_menus)\n self.btn_menu_home_2.setMinimumSize(QtCore.QSize(0, 40))\n self.btn_menu_home_2.setStyleSheet(\"QPushButton{\\n\"\n \" color:rgb(255, 255, 255);\\n\"\n \" background-color: rgb(35, 35, 35);\\n\"\n \" border: 0px solid;\\n\"\n \" background-image: url(:/newPrefix/create.png);\\n\"\n \" background-repeat: no-reperat;\\n\"\n \" border: none;\\n\"\n \" background-position: center;\\n\"\n \"}\\n\"\n \"\\n\"\n \"QPushButton:hover{\\n\"\n \" \\n\"\n \" background-color: rgb(255, 204, 0);\\n\"\n \"}\")\n self.btn_menu_home_2.setText(\"\")\n self.btn_menu_home_2.setObjectName(\"btn_menu_home_2\")\n self.verticalLayout_4.addWidget(self.btn_menu_home_2)\n self.btn_menu_home_3 = QtWidgets.QPushButton(self.frame_top_menus)\n self.btn_menu_home_3.setMinimumSize(QtCore.QSize(0, 40))\n self.btn_menu_home_3.setStyleSheet(\"QPushButton{\\n\"\n \" color:rgb(255, 255, 255);\\n\"\n \" background-color: rgb(35, 35, 35);\\n\"\n \" \\n\"\n \" background-image: url(:/newPrefix/testList.png);\\n\"\n \" background-repeat: no-reperat;\\n\"\n \" border: none;\\n\"\n \" background-position: center;\\n\"\n \"}\\n\"\n \"\\n\"\n \"QPushButton:hover{\\n\"\n \" \\n\"\n \" background-color: rgb(255, 204, 0);\\n\"\n \"}\")\n self.btn_menu_home_3.setText(\"\")\n self.btn_menu_home_3.setObjectName(\"btn_menu_home_3\")\n self.verticalLayout_4.addWidget(self.btn_menu_home_3)\n self.verticalLayout_3.addWidget(self.frame_top_menus, 0, QtCore.Qt.AlignTop)\n self.horizontalLayout_2.addWidget(self.frame_left_menu)\n self.frame_pages = QtWidgets.QFrame(self.Content)\n self.frame_pages.setMaximumSize(QtCore.QSize(16777215, 16777215))\n self.frame_pages.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame_pages.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame_pages.setObjectName(\"frame_pages\")\n self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.frame_pages)\n self.verticalLayout_5.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout_5.setSpacing(0)\n self.verticalLayout_5.setObjectName(\"verticalLayout_5\")\n self.pages_widget = QtWidgets.QStackedWidget(self.frame_pages)\n self.pages_widget.setObjectName(\"pages_widget\")\n self.pg_home = QtWidgets.QWidget()\n self.pg_home.setObjectName(\"pg_home\")\n self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.pg_home)\n self.horizontalLayout_3.setObjectName(\"horizontalLayout_3\")\n self.frame = QtWidgets.QFrame(self.pg_home)\n self.frame.setMaximumSize(QtCore.QSize(700, 350))\n self.frame.setStyleSheet(\"QFrame{\\n\"\n \" background-color: rgb(56, 58, 89 );\\n\"\n \" color:rgb(220, 220, 220);\\n\"\n \" border-radius:10px;\\n\"\n \"}\\n\"\n \"\\n\"\n \"\")\n self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame.setObjectName(\"frame\")\n self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.frame)\n self.verticalLayout_6.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout_6.setSpacing(0)\n self.verticalLayout_6.setObjectName(\"verticalLayout_6\")\n self.label = QtWidgets.QLabel(self.frame)\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI\")\n font.setPointSize(90)\n font.setBold(False)\n font.setWeight(50)\n self.label.setFont(font)\n self.label.setStyleSheet(\"color:rgb(255, 204, 0);\")\n self.label.setScaledContents(False)\n self.label.setAlignment(QtCore.Qt.AlignCenter)\n self.label.setObjectName(\"label\")\n self.verticalLayout_6.addWidget(self.label)\n self.label_2 = QtWidgets.QLabel(self.frame)\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI\")\n font.setPointSize(23)\n self.label_2.setFont(font)\n self.label_2.setStyleSheet(\"color:rgb(98,114,164);\")\n self.label_2.setAlignment(QtCore.Qt.AlignCenter)\n self.label_2.setObjectName(\"label_2\")\n self.verticalLayout_6.addWidget(self.label_2)\n self.horizontalLayout_3.addWidget(self.frame)\n self.pages_widget.addWidget(self.pg_home)\n self.pg_create = QtWidgets.QWidget()\n self.pg_create.setObjectName(\"pg_create\")\n self.verticalLayout_8 = QtWidgets.QVBoxLayout(self.pg_create)\n self.verticalLayout_8.setContentsMargins(6, 10, 0, 0)\n self.verticalLayout_8.setSpacing(0)\n self.verticalLayout_8.setObjectName(\"verticalLayout_8\")\n self.pages_test_create = QtWidgets.QStackedWidget(self.pg_create)\n self.pages_test_create.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.pages_test_create.setObjectName(\"pages_test_create\")\n self.pg_testname = QtWidgets.QWidget()\n self.pg_testname.setObjectName(\"pg_testname\")\n self.horizontalLayout_4 = QtWidgets.QHBoxLayout(self.pg_testname)\n self.horizontalLayout_4.setContentsMargins(0, 0, 0, 0)\n self.horizontalLayout_4.setSpacing(0)\n self.horizontalLayout_4.setObjectName(\"horizontalLayout_4\")\n self.frame_2 = QtWidgets.QFrame(self.pg_testname)\n self.frame_2.setMaximumSize(QtCore.QSize(300, 300))\n self.frame_2.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame_2.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame_2.setLineWidth(1)\n self.frame_2.setObjectName(\"frame_2\")\n self.verticalLayout_9 = QtWidgets.QVBoxLayout(self.frame_2)\n self.verticalLayout_9.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout_9.setSpacing(0)\n self.verticalLayout_9.setObjectName(\"verticalLayout_9\")\n self.label_TituloTest = QtWidgets.QLabel(self.frame_2)\n self.label_TituloTest.setMaximumSize(QtCore.QSize(16777215, 30))\n font = QtGui.QFont()\n font.setPointSize(20)\n font.setBold(False)\n font.setWeight(50)\n self.label_TituloTest.setFont(font)\n self.label_TituloTest.setStyleSheet(\"color: rgb(255, 255, 255);\")\n self.label_TituloTest.setAlignment(QtCore.Qt.AlignCenter)\n self.label_TituloTest.setObjectName(\"label_TituloTest\")\n self.verticalLayout_9.addWidget(self.label_TituloTest)\n spacerItem = QtWidgets.QSpacerItem(20, 33, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)\n self.verticalLayout_9.addItem(spacerItem)\n self.lineEdit_nombreTest = QtWidgets.QLineEdit(self.frame_2)\n self.lineEdit_nombreTest.setMinimumSize(QtCore.QSize(0, 30))\n self.lineEdit_nombreTest.setStyleSheet(\"QLineEdit {\\n\"\n \" \\n\"\n \" color: rgb(255, 255, 255);\\n\"\n \" background-color: rgb(33, 37, 43);\\n\"\n \" border-radius: 5px;\\n\"\n \" border: 2px solid rgb(33, 37, 43);\\n\"\n \" padding-left: 10px;\\n\"\n \" selection-color: rgb(255, 255, 255);\\n\"\n \" selection-background-color: rgb(255, 121, 198);\\n\"\n \"}\\n\"\n \"QLineEdit:hover {\\n\"\n \" border: 2px solid rgb(64, 71, 88);\\n\"\n \"}\\n\"\n \"QLineEdit:focus {\\n\"\n \" border: 2px solid rgb(91, 101, 124);\\n\"\n \"}\")\n self.lineEdit_nombreTest.setText(\"\")\n self.lineEdit_nombreTest.setObjectName(\"lineEdit_nombreTest\")\n self.verticalLayout_9.addWidget(self.lineEdit_nombreTest)\n self.label_tituloTestError = QtWidgets.QLabel(self.frame_2)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.label_tituloTestError.sizePolicy().hasHeightForWidth())\n self.label_tituloTestError.setSizePolicy(sizePolicy)\n self.label_tituloTestError.setMaximumSize(QtCore.QSize(5000, 25))\n self.label_tituloTestError.setLayoutDirection(QtCore.Qt.LeftToRight)\n self.label_tituloTestError.setStyleSheet(\"color: rgb(255, 255, 255);\")\n self.label_tituloTestError.setLineWidth(1)\n self.label_tituloTestError.setText(\"\")\n self.label_tituloTestError.setScaledContents(False)\n self.label_tituloTestError.setAlignment(QtCore.Qt.AlignCenter)\n self.label_tituloTestError.setObjectName(\"label_tituloTestError\")\n self.verticalLayout_9.addWidget(self.label_tituloTestError)\n spacerItem1 = QtWidgets.QSpacerItem(20, 21, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)\n self.verticalLayout_9.addItem(spacerItem1)\n self.frame_3 = QtWidgets.QFrame(self.frame_2)\n self.frame_3.setMaximumSize(QtCore.QSize(16777215, 40))\n self.frame_3.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame_3.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame_3.setObjectName(\"frame_3\")\n self.horizontalLayout_5 = QtWidgets.QHBoxLayout(self.frame_3)\n self.horizontalLayout_5.setContentsMargins(0, 0, 0, 0)\n self.horizontalLayout_5.setSpacing(0)\n self.horizontalLayout_5.setObjectName(\"horizontalLayout_5\")\n self.btn_crearTest = QtWidgets.QPushButton(self.frame_3)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.btn_crearTest.sizePolicy().hasHeightForWidth())\n self.btn_crearTest.setSizePolicy(sizePolicy)\n self.btn_crearTest.setMaximumSize(QtCore.QSize(150, 30))\n self.btn_crearTest.setStyleSheet(\"QPushButton {\\n\"\n \" \\n\"\n \" color: rgb(255, 255, 255);\\n\"\n \" border: 2px solid rgb(52, 59, 72);\\n\"\n \" border-radius: 5px; \\n\"\n \" background-color: rgb(52, 59, 72);\\n\"\n \"}\\n\"\n \"QPushButton:hover {\\n\"\n \" background-color: rgb(57, 65, 80);\\n\"\n \" border: 2px solid rgb(61, 70, 86);\\n\"\n \"}\\n\"\n \"QPushButton:pressed { \\n\"\n \" background-color: rgb(35, 40, 49);\\n\"\n \" border: 2px solid rgb(43, 50, 61);\\n\"\n \"}\")\n self.btn_crearTest.setObjectName(\"btn_crearTest\")\n self.horizontalLayout_5.addWidget(self.btn_crearTest)\n self.verticalLayout_9.addWidget(self.frame_3)\n self.horizontalLayout_4.addWidget(self.frame_2)\n self.pages_test_create.addWidget(self.pg_testname)\n self.preguntas_test = QtWidgets.QWidget()\n self.preguntas_test.setObjectName(\"preguntas_test\")\n self.horizontalLayout_6 = QtWidgets.QHBoxLayout(self.preguntas_test)\n self.horizontalLayout_6.setContentsMargins(0, 0, 0, 0)\n self.horizontalLayout_6.setSpacing(0)\n self.horizontalLayout_6.setObjectName(\"horizontalLayout_6\")\n self.frame_4 = QtWidgets.QFrame(self.preguntas_test)\n self.frame_4.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame_4.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame_4.setObjectName(\"frame_4\")\n self.label_numeroPregunta = QtWidgets.QLabel(self.frame_4)\n self.label_numeroPregunta.setGeometry(QtCore.QRect(380, 10, 101, 51))\n self.label_numeroPregunta.setStyleSheet(\"color: rgb(255, 255, 255);\")\n self.label_numeroPregunta.setObjectName(\"label_numeroPregunta\")\n self.textEdit_Enunciado = QtWidgets.QTextEdit(self.frame_4)\n self.textEdit_Enunciado.setGeometry(QtCore.QRect(180, 90, 501, 31))\n self.textEdit_Enunciado.setStyleSheet(\"color: rgb(255, 255, 255);\")\n self.textEdit_Enunciado.setObjectName(\"textEdit_Enunciado\")\n self.label_Enunciado = QtWidgets.QLabel(self.frame_4)\n self.label_Enunciado.setGeometry(QtCore.QRect(90, 90, 71, 31))\n self.label_Enunciado.setStyleSheet(\"color: rgb(255, 255, 255);\")\n self.label_Enunciado.setObjectName(\"label_Enunciado\")\n self.frame_5 = QtWidgets.QFrame(self.frame_4)\n self.frame_5.setGeometry(QtCore.QRect(170, 130, 511, 231))\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.frame_5.sizePolicy().hasHeightForWidth())\n self.frame_5.setSizePolicy(sizePolicy)\n self.frame_5.setMaximumSize(QtCore.QSize(600, 300))\n self.frame_5.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame_5.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame_5.setObjectName(\"frame_5\")\n self.verticalLayout_10 = QtWidgets.QVBoxLayout(self.frame_5)\n self.verticalLayout_10.setObjectName(\"verticalLayout_10\")\n self.frame_6 = QtWidgets.QFrame(self.frame_5)\n self.frame_6.setMaximumSize(QtCore.QSize(16777215, 54))\n self.frame_6.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame_6.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame_6.setObjectName(\"frame_6\")\n self.horizontalLayout_7 = QtWidgets.QHBoxLayout(self.frame_6)\n self.horizontalLayout_7.setObjectName(\"horizontalLayout_7\")\n self.label_resA = QtWidgets.QLabel(self.frame_6)\n self.label_resA.setStyleSheet(\"color: rgb(255, 255, 255);\")\n self.label_resA.setObjectName(\"label_resA\")\n self.horizontalLayout_7.addWidget(self.label_resA)\n self.textEdit_resA = QtWidgets.QTextEdit(self.frame_6)\n self.textEdit_resA.setStyleSheet(\"color: rgb(255, 255, 255);\")\n self.textEdit_resA.setObjectName(\"textEdit_resA\")\n self.horizontalLayout_7.addWidget(self.textEdit_resA)\n self.checkBox_resA = QtWidgets.QCheckBox(self.frame_6)\n self.checkBox_resA.setStyleSheet(\"color: rgb(255, 255, 255);\")\n self.checkBox_resA.setObjectName(\"checkBox_resA\")\n self.horizontalLayout_7.addWidget(self.checkBox_resA)\n self.verticalLayout_10.addWidget(self.frame_6)\n self.frame_7 = QtWidgets.QFrame(self.frame_5)\n self.frame_7.setMaximumSize(QtCore.QSize(16777215, 54))\n self.frame_7.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame_7.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame_7.setObjectName(\"frame_7\")\n self.horizontalLayout_8 = QtWidgets.QHBoxLayout(self.frame_7)\n self.horizontalLayout_8.setObjectName(\"horizontalLayout_8\")\n self.label_resB = QtWidgets.QLabel(self.frame_7)\n self.label_resB.setStyleSheet(\"color: rgb(255, 255, 255);\")\n self.label_resB.setObjectName(\"label_resB\")\n self.horizontalLayout_8.addWidget(self.label_resB)\n self.textEdit_resB = QtWidgets.QTextEdit(self.frame_7)\n self.textEdit_resB.setStyleSheet(\"color: rgb(255, 255, 255);\")\n self.textEdit_resB.setObjectName(\"textEdit_resB\")\n self.horizontalLayout_8.addWidget(self.textEdit_resB)\n self.checkBox_resB = QtWidgets.QCheckBox(self.frame_7)\n self.checkBox_resB.setStyleSheet(\"color: rgb(255, 255, 255);\")\n self.checkBox_resB.setObjectName(\"checkBox_resB\")\n self.horizontalLayout_8.addWidget(self.checkBox_resB)\n self.verticalLayout_10.addWidget(self.frame_7)\n self.label_preguntaError = QtWidgets.QLabel(self.frame_5)\n self.label_preguntaError.setStyleSheet(\"color: rgb(255, 255, 255);\")\n self.label_preguntaError.setText(\"\")\n self.label_preguntaError.setAlignment(QtCore.Qt.AlignCenter)\n self.label_preguntaError.setObjectName(\"label_preguntaError\")\n self.verticalLayout_10.addWidget(self.label_preguntaError)\n self.frame_8 = QtWidgets.QFrame(self.frame_5)\n self.frame_8.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame_8.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame_8.setObjectName(\"frame_8\")\n self.horizontalLayout_9 = QtWidgets.QHBoxLayout(self.frame_8)\n self.horizontalLayout_9.setObjectName(\"horizontalLayout_9\")\n self.btn_siguientePregunta = QtWidgets.QPushButton(self.frame_8)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.btn_siguientePregunta.sizePolicy().hasHeightForWidth())\n self.btn_siguientePregunta.setSizePolicy(sizePolicy)\n self.btn_siguientePregunta.setMaximumSize(QtCore.QSize(150, 30))\n self.btn_siguientePregunta.setStyleSheet(\"QPushButton {\\n\"\n \" \\n\"\n \" color: rgb(255, 255, 255);\\n\"\n \" border: 2px solid rgb(52, 59, 72);\\n\"\n \" border-radius: 5px; \\n\"\n \" background-color: rgb(52, 59, 72);\\n\"\n \"}\\n\"\n \"QPushButton:hover {\\n\"\n \" background-color: rgb(57, 65, 80);\\n\"\n \" border: 2px solid rgb(61, 70, 86);\\n\"\n \"}\\n\"\n \"QPushButton:pressed { \\n\"\n \" background-color: rgb(35, 40, 49);\\n\"\n \" border: 2px solid rgb(43, 50, 61);\\n\"\n \"}\")\n self.btn_siguientePregunta.setObjectName(\"btn_siguientePregunta\")\n self.horizontalLayout_9.addWidget(self.btn_siguientePregunta)\n self.btn_finalizarTest = QtWidgets.QPushButton(self.frame_8)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.btn_finalizarTest.sizePolicy().hasHeightForWidth())\n self.btn_finalizarTest.setSizePolicy(sizePolicy)\n self.btn_finalizarTest.setMaximumSize(QtCore.QSize(150, 30))\n self.btn_finalizarTest.setStyleSheet(\"QPushButton {\\n\"\n \" \\n\"\n \" color: rgb(255, 255, 255);\\n\"\n \" border: 2px solid rgb(52, 59, 72);\\n\"\n \" border-radius: 5px; \\n\"\n \" background-color: rgb(52, 59, 72);\\n\"\n \"}\\n\"\n \"QPushButton:hover {\\n\"\n \" background-color: rgb(57, 65, 80);\\n\"\n \" border: 2px solid rgb(61, 70, 86);\\n\"\n \"}\\n\"\n \"QPushButton:pressed { \\n\"\n \" background-color: rgb(35, 40, 49);\\n\"\n \" border: 2px solid rgb(43, 50, 61);\\n\"\n \"}\")\n self.btn_finalizarTest.setObjectName(\"btn_finalizarTest\")\n self.horizontalLayout_9.addWidget(self.btn_finalizarTest)\n self.verticalLayout_10.addWidget(self.frame_8)\n self.horizontalLayout_6.addWidget(self.frame_4)\n self.pages_test_create.addWidget(self.preguntas_test)\n self.verticalLayout_8.addWidget(self.pages_test_create)\n self.pages_widget.addWidget(self.pg_create)\n self.pg_list = QtWidgets.QWidget()\n self.pg_list.setObjectName(\"pg_list\")\n self.verticalLayout_7 = QtWidgets.QVBoxLayout(self.pg_list)\n self.verticalLayout_7.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout_7.setSpacing(1)\n self.verticalLayout_7.setObjectName(\"verticalLayout_7\")\n self.label_TituloTest_2 = QtWidgets.QLabel(self.pg_list)\n self.label_TituloTest_2.setMaximumSize(QtCore.QSize(16777215, 30))\n font = QtGui.QFont()\n font.setPointSize(20)\n font.setBold(False)\n font.setWeight(50)\n self.label_TituloTest_2.setFont(font)\n self.label_TituloTest_2.setStyleSheet(\"color: rgb(255, 255, 255);\")\n self.label_TituloTest_2.setAlignment(QtCore.Qt.AlignCenter)\n self.label_TituloTest_2.setObjectName(\"label_TituloTest_2\")\n self.verticalLayout_7.addWidget(self.label_TituloTest_2)\n self.listWidgetTests = QtWidgets.QListWidget(self.pg_list)\n self.listWidgetTests.setStyleSheet(\"color: rgb(255, 255, 255);\")\n self.listWidgetTests.setObjectName(\"listWidgetTests\")\n self.verticalLayout_7.addWidget(self.listWidgetTests)\n self.pages_widget.addWidget(self.pg_list)\n self.verticalLayout_5.addWidget(self.pages_widget)\n self.horizontalLayout_2.addWidget(self.frame_pages)\n self.verticalLayout.addWidget(self.Content)\n MainWindow.setCentralWidget(self.centralwidget)\n\n self.retranslateUi(MainWindow)\n self.pages_widget.setCurrentIndex(2)\n self.pages_test_create.setCurrentIndex(0)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\n self.label.setText(_translate(\"MainWindow\", \"EzTests\"))\n self.label_2.setText(_translate(\"MainWindow\", \"Genera Test de manera sencilla\"))\n self.label_TituloTest.setText(_translate(\"MainWindow\", \"Titulo\"))\n self.lineEdit_nombreTest.setPlaceholderText(_translate(\"MainWindow\", \"Nombre del Test\"))\n self.btn_crearTest.setText(_translate(\"MainWindow\", \"Crear Test\"))\n self.label_numeroPregunta.setText(_translate(\"MainWindow\", \"Pregunta numero 1\"))\n self.textEdit_Enunciado.setPlaceholderText(_translate(\"MainWindow\", \"Enunciado de la pregunta\"))\n self.label_Enunciado.setText(_translate(\"MainWindow\", \"Pregunta:\"))\n self.label_resA.setText(_translate(\"MainWindow\", \"Respuesta A:\"))\n self.textEdit_resA.setPlaceholderText(_translate(\"MainWindow\", \"Texto de la respuesta\"))\n self.checkBox_resA.setText(_translate(\"MainWindow\", \"Correcta?\"))\n self.label_resB.setText(_translate(\"MainWindow\", \"Respuesta B:\"))\n self.textEdit_resB.setPlaceholderText(_translate(\"MainWindow\", \"Texto de la respuesta\"))\n self.checkBox_resB.setText(_translate(\"MainWindow\", \"Correcta?\"))\n self.btn_siguientePregunta.setText(_translate(\"MainWindow\", \"Siguiente pregunta\"))\n self.btn_finalizarTest.setText(_translate(\"MainWindow\", \"Finalizar Test\"))\n self.label_TituloTest_2.setText(_translate(\"MainWindow\", \"Lista de Test\"))\n\n\nfrom images import create_rc\nfrom images import home_rc\nfrom images import menu_rc\nfrom images import testList_rc\n","repo_name":"Jowy43/EzTestGenerator","sub_path":"UI/mainWindow.py","file_name":"mainWindow.py","file_ext":"py","file_size_in_byte":32188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"42205006641","text":"from board_env import SnapyEnv\n\nenv = SnapyEnv()\n\nepisodes = 100\n\nfor episode in range(episodes):\n done = False\n obs = env.reset()\n while True:\n random_action = env.action_space.sample()\n print('action',random_action)\n obs, reward, done, info = env.step(random_action)\n print('reward', reward)\n","repo_name":"here-and-now/SnaPy","sub_path":"tests/doublecheckenv.py","file_name":"doublecheckenv.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"73814059710","text":"import asyncio\nimport json\nfrom channels.generic.websocket import AsyncWebsocketConsumer\nfrom groupchat.models import Message\nfrom rest_framework_simplejwt.tokens import AccessToken\nfrom channels.db import database_sync_to_async\n\n\nclass GroupChatConsumer(AsyncWebsocketConsumer):\n\n @database_sync_to_async\n def save_message(self, group_id, sender_id, data):\n message = Message.objects.create(\n group_id=group_id,\n sender_id=sender_id,\n content=data\n )\n sender_username=message.sender.username\n return sender_username\n\n async def connect(self):\n path = self.scope['path']\n parts = path.split('/')\n self.group_id = parts[1] if len(parts) > 1 else None\n\n self.group_name = f'group_{self.group_id}'\n \n try:\n # Authenticate the user\n await self.authenticate()\n\n # If authentication is successful, accept the WebSocket connection\n await self.accept()\n # Join the group\n await self.channel_layer.group_add(self.group_name, self.channel_name)\n\n \n except Exception as e:\n # If authentication fails, reject the WebSocket connection\n await self.close()\n new_message = \"Error message: \" + str(e)\n raise Exception(new_message) from e\n \n async def authenticate(self):\n try: \n # Retrieve the JWT token from the WebSocket headers\n access_token=self.scope['headers'][0][1].split(' ')[1]\n\n # Decode and validate the JWT token\n access_token = AccessToken(access_token)\n\n # Check if the token is valid and not expired\n access_token.verify()\n\n decoded_token = access_token.payload\n is_admin = decoded_token['is_admin']\n self.user_id = decoded_token['user_id']\n\n\n if is_admin:\n raise Exception(\"You are not authorized to perform this action\")\n except Exception as e:\n new_message = \"Error message: \" + str(e)\n raise Exception(new_message) from e\n\n async def disconnect(self, close_code):\n # Leave the group\n await self.channel_layer.group_discard(self.group_name, self.channel_name)\n\n async def receive(self, text_data):\n try:\n data = json.loads(text_data)\n message_content = data['content']\n group_id= self.group_id\n\n try: \n sender_username=await self.save_message(group_id,self.user_id,message_content)\n \n # Broadcast the message to the group\n await self.channel_layer.group_send(self.group_name, {\n \"type\":\"group_message\",\n \"message\":message_content,\n \"sender_username\":sender_username\n })\n \n except asyncio.TimeoutError as e:\n return {\"results\": f\"timeout error on {e}\"}\n except Exception as e:\n return {\"results\": f\"error on {e}\"}\n \n except Exception as e:\n print(e)\n\n async def group_message(self, event):\n message = event['message']\n sender_username = event['sender_username']\n\n # Send the message to the WebSocket\n await self.send(text_data=json.dumps({\n 'message': message,\n 'sender_username': sender_username\n }))\n","repo_name":"shraddhaL/RealtimeChatConnect","sub_path":"ChatConnect/consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":3525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"25231149686","text":"#!/usr/bin/python2\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mlp import MLP\n\ndef esincos(x):\n return np.exp(x) - x * np.sin(x) * np.cos(x)\n\nif __name__ == \"__main__\":\n x = np.random.rand(1, 100) * 10\n #x = np.linspace(0,10,100)\n y = (np.sin(x) + 1) / 2\n\n plt.plot(x[0], y[0], \"*r\")\n plt.show()\n\n # print x, y\n\n net = MLP([1, 10, 1])\n \n for iter in range(10000):\n t1 = [net.forward(x.take([i],axis=1))[0][0] for i in range(100)]\n t = [(net.forward(x.take([i],axis=1)) - y.take([i],axis=1))[0][0] for i in range(100)]\n # print(t)\n #print(x[0])\n #print(t)\n if(iter % 2000 == 0):\n plt.plot(x[0], t1, \"g.\")\n plt.show()\n error = np.sum(np.abs(t))\n print(error)\n for i in range(100):\n #print \"x.take([i], axis=1)\"\n #print x.take([i], axis=1)\n net.calcdiff(x.take([i], axis=1), y.take([i], axis=1) ,1)\n net.update(0.1, 0.9)\n","repo_name":"daikankan/neural-network","sub_path":"python/test_mlp.py","file_name":"test_mlp.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"71058837312","text":"import numpy as np\nimport cv2\n\ncapture = cv2.VideoCapture(0)\ncapture.set(cv2.CAP_PROP_FRAME_WIDTH, 320)\ncapture.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)\n\nwhile True:\n if cv2.waitKey(10) > 0: \n break\n\n ret, frame = capture.read()\n\n cv2.putText(frame,'test',(0,25), cv2.FONT_HERSHEY_PLAIN,1,(255,255,255))\n cv2.imshow(\"camera test\", frame)\n\n\n\nimport tensorflow.keras\nimport numpy as np\nimport cv2\n\nmodel_filename ='C:\\\\AISpace\\\\keras_model.h5'\nmodel = tensorflow.keras.models.load_model(model_filename)\n\ncapture = cv2.VideoCapture(0)\ncapture.set(cv2.CAP_PROP_FRAME_WIDTH, 320)\ncapture.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)\n\n\ndef preprocessing(frame):\n size = (224, 224)\n frame_resized = cv2.resize(frame, size, interpolation=cv2.INTER_AREA)\n frame_normalized = (frame_resized.astype(np.float32) / 127.0) - 1\n frame_reshaped = frame_normalized.reshape((1, 224, 224, 3))\n\n return frame_reshaped\n\n\ndef predict(frame):\n prediction = model.predict(frame)\n return prediction\n\nwhile True:\n ret, frame = capture.read()\n\n if cv2.waitKey(100) > 0: \n break\n\n preprocessed = preprocessing(frame)\n prediction = predict(preprocessed)\n\n if (prediction[0,0] < prediction[0,1]):\n print('case1')\n cv2.putText(frame, 'case1', (0, 25), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 0))\n else:\n cv2.putText(frame, 'case2', (0, 25), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 0))\n print('case2')\n\n cv2.imshow(\"VideoFrame\", frame)\n \n","repo_name":"lcm412/Open_source_lab1","sub_path":"src/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"33205770426","text":"from Dataclasses.layer import Layer\nimport json\nimport torch\nfrom modelparser import ModelParser\nfrom Dataclasses.hyperpar import Hyperparameters\n\nclass MnistModel(torch.nn.Module):\n def __init__(self,layer_list):\n super().__init__()\n self.layer_list = layer_list\n\n def _activation_mapper(self, act_string):\n\n if act_string == \"ReLU\":\n return torch.nn.ReLU()\n \n elif act_string == 'Sigmoid':\n return torch.nn.Sigmoid()\n \n else:\n pass\n\n def build_model(self):\n \n module_list = list()\n\n for layer_ix, layer in enumerate(self.layer_list):\n \n if \"Linear\" in layer.Layer_name:\n module_list.append(torch.nn.Flatten())\n intilayer = torch.nn.Linear(layer.Num_inputs, layer.Num_outputs, bias = layer.Bias)\n if \"Conv\" in layer.Layer_name:\n intilayer = torch.nn.Conv2d(in_channels = layer.Num_inputs, out_channels = layer.Num_outputs, kernel_size = layer.kernel_size,\n stride = layer.stride, padding = layer.padding, bias = False)\n if \"MaxPool2d\" in layer.Layer_name:\n intilayer = torch.nn.MaxPool2d(kernel_size = layer.kernel_size)\n \n module_list.append(intilayer)\n\n act = self._activation_mapper(layer.Activation)\n if act != None:\n module_list.append(act)\n\n dpt = torch.nn.Dropout2d(layer.dropout) if layer.Dropout else False\n if dpt:\n module_list.append(dpt)\n\n print(module_list)\n self.pred = torch.nn.Sequential(*module_list)\n\n return self.pred\n\n\ndef inimodel():\n try:\n parser = ModelParser(\"../base_config.json\")\n except:\n print('Couldn\\'t import the configartion module')\n\n layers = parser.get_list()\n kwargs = parser.get_hp()[0]\n model = MnistModel(layers)\n \n return model.build_model()\n\nif __name__ == '__main__':\n model = inimodel()\n print(model[0].weight)","repo_name":"Ruturajrmane/Projects","sub_path":"ML Project Architecture/src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"27195342113","text":"# -*- coding: utf-8 -*-\n\"\"\"\n斗地主拆牌模块\n@author 江胤佐\n\"\"\"\nfrom __future__ import annotations\n\nimport math\nfrom abc import ABCMeta\nfrom collections import defaultdict\nfrom copy import deepcopy\nfrom functools import cmp_to_key\nfrom typing import Optional\n\nfrom duguai.card.cards import *\nfrom duguai.card.cards import card_lt2, card_split\nfrom duguai.card.combo import Combo\n\n\"\"\"顺子/连对/最小的长度\"\"\"\nKIND_TO_MIN_LEN = {1: 5, 2: 3}\nMAX_Q = 10000\n\n\ndef _most_value(x):\n return np.argmax(np.bincount(x))\n\n\nMAX_VALUE_CMP = cmp_to_key(lambda x, y: max(x) - max(y))\nMOST_VALUE_CMP = cmp_to_key(lambda x, y: _most_value(x) - _most_value(y))\n\n\nclass AbstractDecomposer(metaclass=ABCMeta):\n \"\"\"\n 拆牌类。该类只负责拆出较好的牌组,不考虑其它玩家手牌的情况。\n 状态(state)表示目前的手牌。\n 动作(action)表示待出的牌。\n Q值:状态-动作的价值,Q(s, a)值越大,则越要在状态s下采取行动a.\n q = d(next_state) + len(a)\n \"\"\"\n\n @classmethod\n def decompose_value(cls, card_after: np.ndarray) -> int:\n \"\"\"\n 获取一副牌的分解值\n \"\"\"\n if len(card_after) == 0:\n return 0\n card_after = card_lt2(card_after)\n di, d_value, _ = card_to_suffix_di(card_after)\n\n # 顺子/连对\n for t in range(1, 3):\n max_len = max(len(i) for i in card_split(di[t]))\n if max_len >= KIND_TO_MIN_LEN[t]:\n d_value = max(d_value, t * max_len)\n\n return d_value\n\n @classmethod\n def _delta_q(cls, _max_q, _q):\n return (_max_q - _q) if _max_q - _q < 1000 else (_max_q - MAX_Q + 1 - _q)\n\n def _calc_q(self, lt2_state: np.ndarray, actions: np.ndarray[np.ndarray]) -> np.ndarray:\n \"\"\"对每一种状态-动作计算其Q\"\"\"\n result = []\n for a in actions:\n\n reward: int = 0\n # 拆炸弹的惩罚值,保证在 5 5 5 5 6的情况下拆出炸弹而非三带一\n for card in a:\n if np.sum(lt2_state == card) == 4 and len(a) < 4:\n reward = -1\n break\n\n next_state: np.ndarray = get_next_state(lt2_state, a)\n if next_state.size > 0:\n d_value = self.decompose_value(next_state)\n result.append(d_value + reward + len(a))\n else:\n # 该动作打完就没牌了,故d值为最大值\n result.append(MAX_Q + reward + len(a))\n return np.array(result)\n\n def _eval_actions(self,\n func,\n lt2_state: np.ndarray,\n **kwargs) -> Tuple[np.ndarray, np.ndarray]:\n actions = np.array(\n func(lt2_state, kwargs['length']) if 'card_list' not in kwargs.keys() else func(kwargs['card_list'],\n kwargs['kind'],\n kwargs['length']))\n # q = d(next state) + len(a)\n # 计算lt2_state下每一个action的q值\n q_list: np.ndarray = self._calc_q(lt2_state, actions)\n if len(q_list) == 0:\n return np.array([], dtype=int), np.array([], dtype=int)\n\n return actions, q_list\n\n def _process_card(self, card: np.ndarray):\n\n # 将手牌分解成不连续的部分\n self._lt2_cards, eq2_cards, self._ghosts = card_lt2_two_g(card)\n self._lt2_states: List[np.ndarray] = card_split(self._lt2_cards)\n self.card2_count: int = len(eq2_cards)\n\n def _get_all_actions_and_q_lists(self, lt2_state: np.ndarray) -> int:\n \"\"\"获取一个lt2_state下所有的actions及其对应的q_lists\"\"\"\n\n di, max_count, max_card_value = card_to_suffix_di(lt2_state)\n\n # solo pair trio bomb plane other\n self._actions = [[], [], [], [], [], []]\n self._q_lists = [np.array([], dtype=int), np.array([], dtype=int), np.array([], dtype=int),\n np.array([], dtype=int), np.array([], dtype=int), np.array([], dtype=int)]\n\n # solo pair trio bomb\n for i in range(1, 5):\n self._actions[i - 1], self._q_lists[i - 1] = self._eval_actions(_get_single_actions, lt2_state, length=i)\n\n # plane\n for length in range(3, len(di[3]) + 1):\n seq_actions, seq_q_list = self._eval_actions(_get_seq_actions,\n lt2_state,\n card_list=di[3],\n kind=3,\n length=length)\n self._actions[4].extend(seq_actions)\n self._q_lists[4] = np.concatenate([self._q_lists[4], seq_q_list])\n\n # 拆出顺子、连对\n for k, min_len in KIND_TO_MIN_LEN.items():\n card_list = di[k]\n for length in range(min_len, len(card_list) + 1):\n seq_actions, seq_q_list = self._eval_actions(_get_seq_actions,\n lt2_state,\n card_list=card_list,\n kind=k,\n length=length)\n self._actions[5].extend(seq_actions)\n self._q_lists[5] = np.concatenate([self._q_lists[5], seq_q_list])\n\n max_q = 0\n for q_list in self._q_lists:\n if q_list.size:\n max_q = max(np.max(q_list), max_q)\n return max_q\n\n\nclass FollowDecomposer(AbstractDecomposer):\n \"\"\"\n 跟牌拆牌器\n \"\"\"\n\n def __init__(self):\n self._output: Optional[List[np.ndarray]] = None\n\n # 存放带牌的列表\n self._take_lists: Optional[Dict[int, List[np.ndarray]]] = None\n\n # 存放主牌的列表\n self._main_lists: Optional[Dict[int, List[np.ndarray]]] = None\n\n # 存放主牌+带牌的列表\n self._main_take_lists: Optional[Dict[int, List[np.ndarray]]] = None\n\n # 炸弹列表\n self._bomb_list: Optional[List[np.ndarray]] = None\n\n # 仅维护主牌大小\n self._max_combo: Optional[Combo] = None\n\n self._last_combo: Optional[Combo] = None\n\n self._main_kind: Optional[int] = None\n self._take_kind: Optional[int] = None\n\n def _add_bomb(self, bomb_list: list) -> None:\n \"\"\"添加炸弹\"\"\"\n\n self._bomb_list: List[np.ndarray] = []\n\n # 添加王炸\n if len(self._ghosts) == 2:\n self._bomb_list.append(self._ghosts)\n\n # 添加4个2炸弹\n if self.card2_count == 4:\n self._bomb_list.append(np.array([CARD_2] * 4))\n\n if self._last_combo.is_bomb():\n for card in bomb_list:\n if card > self._last_combo.value:\n self._bomb_list.append(np.array([card, card, card, card]))\n else:\n for card in bomb_list:\n self._bomb_list.append(np.array([card, card, card, card]))\n\n def _add_valid_ghost(self):\n \"\"\"加入单只王。在此之前先加入2\"\"\"\n\n if self._ghosts.size:\n if self._last_combo.is_solo() \\\n and self._last_combo.main_kind == 1 and self._last_combo.value < self._ghosts[-1]:\n self._main_lists[2].append(self._ghosts[-1:])\n self._max_combo.cards = self._ghosts[-1:]\n elif self._max_combo.take_kind == 1:\n self._take_lists[2].append(self._ghosts[-1:])\n\n def _add_valid_card2(self):\n \"\"\"加入合法的2,之后再加入王\"\"\"\n if self.card2_count:\n if self._last_combo.is_single() \\\n and self._last_combo.main_kind <= self.card2_count and self._last_combo.value < CARD_2:\n self._main_lists[self._max_combo.main_kind].append(np.array([CARD_2] * self._max_combo.main_kind))\n self._max_combo.cards = [CARD_2] * self._max_combo.main_kind\n if self._last_combo.take_kind <= self.card2_count:\n # 2的价值比正常牌+1\n self._take_lists[self._last_combo.take_kind + 1].append(np.array([CARD_2] * self._last_combo.take_kind))\n\n def __merge_takes_to_main_seq(self, main_q: int, main_seq: np.ndarray, take_count: int) -> Tuple[int, np.ndarray]:\n tk = 0\n main_takes: np.ndarray = np.array(main_seq)\n total_delta_q = main_q\n\n # 从小到大遍历_take_lists,保证先合并最佳takes\n for delta_q, take_list in sorted(self._take_lists.items()):\n for take in take_list:\n if take[0] not in main_seq:\n tk += 1\n total_delta_q += delta_q\n main_takes = np.concatenate((main_takes, take))\n if tk == take_count:\n return total_delta_q, main_takes\n return 0, np.array([])\n\n def _merge_valid_main_takes(self) -> None:\n \"\"\"将合法的主牌和带牌拼接起来\"\"\"\n\n # 非炸弹是3带1单/1对,炸弹是4带2\n take_count = self._last_combo.seq_len\n if self._last_combo.main_kind == 4:\n take_count *= 2\n\n self._main_take_lists = defaultdict(list)\n\n for take_list in self._take_lists.values():\n take_list.sort(key=MAX_VALUE_CMP)\n\n if self._main_lists:\n\n # 挑选最佳的main_list,并排序\n main_q = min(self._main_lists.keys())\n self._main_lists[main_q].sort(key=MAX_VALUE_CMP)\n\n for main_seq in self._main_lists[main_q]:\n total_delta_q, main_takes = self.__merge_takes_to_main_seq(main_q, main_seq, take_count)\n if main_takes.size > 0:\n # 将得到的main_takes根据价值好坏加入相应的列表中\n self._main_take_lists[total_delta_q].append(main_takes)\n\n # 得到最大的main_takes\n self._max_main_takes = self.__merge_takes_to_main_seq(0, self._max_combo.cards, take_count)[1]\n\n def _update_main_lists_and_find_max(self, a: np.ndarray, q: int, max_q: int) -> None:\n \"\"\"在action有效的情况下加入到主列表,并更新最大值\"\"\"\n main_kind = self._last_combo.main_kind\n seq_len = self._last_combo.seq_len\n value = self._last_combo.value\n\n combo = Combo()\n combo.cards = a\n # 筛选符合规则的主牌\n if combo.value > value and combo.main_kind == main_kind and combo.seq_len == seq_len:\n self._main_lists[self._delta_q(max_q, q)].append(a)\n # 仅对比主牌大小,不关心是否带了牌\n if combo.value > self._max_combo.value:\n self._max_combo = deepcopy(combo)\n\n def _best_main_takes(self):\n if not self._main_take_lists:\n return 0, []\n min_delta_q = min(self._main_take_lists.keys())\n self._main_take_lists[min_delta_q].sort(key=MOST_VALUE_CMP)\n return min_delta_q, self._main_take_lists[min_delta_q]\n\n def _append_takes(self, length: int, kind: int, max_q):\n for a, q in zip(self._actions[kind - 1], self._q_lists[kind - 1]):\n self._take_lists[self._delta_q(max_q, q)].append(a[:length])\n\n def _add_valid_lt2_actions(self):\n for lt2_state in self._lt2_states:\n if lt2_state.size > 0:\n max_q: int = super(FollowDecomposer, self)._get_all_actions_and_q_lists(lt2_state)\n\n # 把单或者对加入_take_lists,对子可以视为2个单加入take列表\n if self._take_kind == 1:\n self._append_takes(1, 1, max_q)\n self._append_takes(1, 2, max_q)\n elif self._take_kind == 2:\n self._append_takes(2, 2, max_q)\n\n for actions, q_list in zip(self._actions, self._q_lists):\n for a, q in zip(actions, q_list):\n # 将合法的action加入到_main_lists,同时更新最大的main_kind\n self._update_main_lists_and_find_max(a, q, max_q)\n\n def _thieve_valid_actions(self) -> Tuple[int, List[np.ndarray]]:\n \"\"\"根据last combo的限制,筛选出有效且较好的动作\"\"\"\n\n self._add_valid_card2()\n self._add_valid_ghost()\n self._add_valid_lt2_actions()\n\n if not self._main_lists:\n return 0, []\n\n if self._take_kind:\n self._merge_valid_main_takes()\n return self._best_main_takes()\n else:\n self._max_main_takes = self._max_combo.cards\n min_delta_q = min(self._main_lists.keys())\n\n self._main_lists[min_delta_q].sort(key=MAX_VALUE_CMP)\n return min_delta_q, self._main_lists[min_delta_q]\n\n def _init(self, last_combo: Combo):\n # 初始化,key代表max_q - q,key越小拆得越好,越要优先选择\n self._take_lists: Dict[int, List[np.ndarray]] = defaultdict(list)\n self._main_lists: Dict[int, List[np.ndarray]] = defaultdict(list)\n self._output = []\n\n # max_combo仅保留主要部分,忽略带的部分\n self._max_combo = deepcopy(last_combo)\n self._max_main_takes = self._max_combo.cards\n self._last_combo = last_combo\n\n self._main_kind = self._max_combo.main_kind\n self._take_kind = self._max_combo.take_kind\n\n def get_good_follows(self, state: np.ndarray, last_combo: Combo) \\\n -> Tuple[List[np.ndarray], int, List[np.ndarray], np.ndarray]:\n \"\"\"\n 尽量给出较好的跟牌行动。\n @param state: 当前手牌。\n @param last_combo: 上一次出牌\n @return: 四元组:炸弹, 最好的组合 - 最好的跟牌(数字越大越不应该这样拆牌), 好的出牌的数组, 最大的出牌\n \"\"\"\n if last_combo.is_rocket():\n return [], 0, [], np.array([], dtype=int)\n\n self._process_card(state)\n self._init(last_combo)\n\n min_delta_q, self._output = self._thieve_valid_actions()\n\n self._add_bomb(card_to_di(self._lt2_cards)[0][4])\n\n self._max_combo.cards = self._max_main_takes\n\n return self._bomb_list, min_delta_q, self._output, (\n self._max_main_takes if self._max_combo > last_combo else np.array([], dtype=int))\n\n\nclass PlayHand:\n \"\"\"\n 出牌时,根据d_actions对手牌进行进一步分类\n \"\"\"\n\n def __init__(self, min_solo: int, max_solo: int):\n \"\"\"\n 初始化Hand类\n @see PlayDecomposer\n \"\"\"\n # solo pair trio bomb\n self._singles: List[List[np.ndarray]] = [[], [], [], []]\n\n self._planes: List[np.ndarray] = []\n\n self._trios_take: List[np.ndarray] = []\n self._planes_take: List[np.ndarray] = []\n self._bombs_take: List[np.ndarray] = []\n\n self._seq_solo5: List[np.ndarray] = []\n self._other_seq: List[np.ndarray] = []\n self._has_rocket: bool = False\n\n self._min_solo: int = min_solo\n self._max_solo: int = max_solo\n\n def add_to_hand(self, card_lists: List[Dict[int, List[np.ndarray]]]):\n \"\"\"将各种类型牌加入到PlayHand中\"\"\"\n for i in range(4):\n if card_lists[i].keys():\n min_delta_q = min(card_lists[i].keys())\n self._singles[i] = card_lists[i][min_delta_q]\n self._singles[i].sort(key=MAX_VALUE_CMP)\n\n # plane\n if card_lists[4].keys():\n min_delta_q = min(card_lists[4].keys())\n self._planes = card_lists[4][min_delta_q]\n self._planes.sort(key=MAX_VALUE_CMP)\n\n if card_lists[5].keys():\n min_delta_q = min(card_lists[5].keys())\n for action in card_lists[5][min_delta_q]:\n if action.size == 5:\n self._seq_solo5.append(action)\n else:\n self._other_seq.append(action)\n self._seq_solo5.sort(key=MAX_VALUE_CMP)\n\n self._merge_main_takes(self._planes, self._planes_take)\n self._merge_main_takes(self._singles[2], self._trios_take)\n self._merge_main_takes(self._singles[3], self._bombs_take)\n\n i = 0\n while i < len(self._bombs_take):\n if self._bombs_take[i].size <= 4:\n del self._bombs_take[i]\n else:\n i += 1\n\n @staticmethod\n def _choose_takes(take_list: List[np.ndarray], main_part: np.ndarray, take_count: int, split_pair: bool = False):\n\n main_part = np.concatenate([main_part] + take_list[:take_count])\n if split_pair:\n main_part = np.concatenate([main_part, take_list[take_count][:1]])\n\n return main_part\n\n def _merge_main_takes(self, main_list: List[np.ndarray], extended_target: List[np.ndarray]):\n \"\"\"\n 合并主要部分与带的牌\n \"\"\"\n main_take_list: List[np.ndarray] = []\n for main_part in main_list:\n\n # 防止main part带上自己的部分,例如 7 7 7不能带7\n temp_pairs: List[np.ndarray] = [i for i in self._singles[1] if i[0] not in np.unique(main_part)]\n temp_solos: List[np.ndarray] = [i for i in self._singles[0] if\n i[0] not in np.unique(main_part) and i[0] not in np.unique(temp_pairs)]\n\n take_count: int = math.ceil(main_part.size / 3)\n if len(temp_solos) >= take_count and len(temp_pairs) >= take_count:\n if np.mean(temp_solos) > np.mean(temp_pairs):\n main_take_list.append(self._choose_takes(temp_solos, main_part, take_count))\n else:\n main_take_list.append(self._choose_takes(temp_pairs, main_part, take_count))\n elif len(temp_pairs) >= take_count:\n main_take_list.append(self._choose_takes(temp_pairs, main_part, take_count))\n elif len(temp_solos) >= take_count:\n main_take_list.append(self._choose_takes(temp_solos, main_part, take_count))\n elif len(temp_solos) + 2 * len(temp_pairs) >= take_count:\n len_solos = len(temp_solos)\n main_part = self._choose_takes(temp_solos, main_part, len_solos)\n main_take_list.append(\n self._choose_takes(\n temp_pairs, main_part, (take_count - len_solos) // 2, (take_count - len_solos) % 2 == 1\n )\n )\n else:\n main_take_list.append(main_part)\n extended_target.extend(main_take_list)\n extended_target.sort(key=MOST_VALUE_CMP)\n\n @property\n def solos(self) -> List[np.ndarray]:\n \"\"\"单\"\"\"\n return self._singles[0]\n\n @property\n def pairs(self) -> List[np.ndarray]:\n \"\"\"对\"\"\"\n return self._singles[1]\n\n @property\n def trios(self) -> List[np.ndarray]:\n \"\"\"三\"\"\"\n return self._singles[2]\n\n @property\n def trios_take(self):\n \"\"\"三带M\"\"\"\n return self._trios_take\n\n @property\n def bombs(self) -> List[np.ndarray]:\n \"\"\"炸弹\"\"\"\n return self._singles[3]\n\n @property\n def bombs_take(self) -> List[np.ndarray]:\n \"\"\"四带2\"\"\"\n return self._bombs_take\n\n @property\n def planes(self) -> List[np.ndarray]:\n \"\"\"飞机(不带M)\"\"\"\n return self._planes\n\n @property\n def planes_take(self):\n \"\"\"飞机(带M)\"\"\"\n return self._planes_take\n\n @property\n def other_seq(self) -> List[np.ndarray]:\n \"\"\"其它各种序列\"\"\"\n return self._other_seq\n\n @property\n def seq_solo5(self) -> List[np.ndarray]:\n \"\"\"长度为5的单顺\"\"\"\n return self._seq_solo5\n\n @property\n def has_rocket(self) -> bool:\n \"\"\"是否有王炸\"\"\"\n return self._has_rocket\n\n @property\n def min_solo(self) -> int:\n \"\"\"强拆的最小单牌\"\"\"\n return self._min_solo\n\n @property\n def max_solo(self) -> int:\n \"\"\"强拆的最大单牌\"\"\"\n return self._max_solo\n\n def __repr__(self):\n return 'PlayHand: ' + repr(self.__dict__)\n\n\nclass PlayDecomposer(AbstractDecomposer):\n \"\"\"\n 基于贪心法的斗地主出牌时的拆牌算法。\n 出牌时仅考虑强行拆最大和最小的单牌。其余牌型均按照最佳拆牌给出。\n\n 定义c为一张牌,由斗地主规则可知,c ∈ [1, 15] ∩ Z+。\n 定义s表示当前玩家拥有的所有牌的序列,s = (c1, c2, ..., ci)。\n 定义a为一次符合斗地主规则的出牌的序列,a = (c1, c2, ..., ci)。\n\n 记s下满足规则的所有拆牌动作的集合为A_s,a∈A_s。\n\n 用函数D(a)来计算a拆牌的好坏。D(a)定义如下:\n D(a) = len(a) + max( max(len(a')) , 1) - 拆炸弹的数量\n\n 其中定义域a∈A,值域D(a)∈Z+, a' ∈ s - a\n D(a)越大,拆牌越合理。\n\n 算法如下:\n 1. 将s分成连续的若干段、二和大小王,例如(1,1,2,2,5,5,7,10,13,14)分成(1,1,2,2) (5,5,) (7) (10) (13) (14)\n 2. 将大小王和二加入到最佳拆牌序列A‘中\n 3. 对每一段序列si,计算不带牌的动作a的 D(a)\n 4. 合并主牌和带牌的D(a)\n 5. 输出argmax(D(a))\n \"\"\"\n\n def __init__(self):\n self.cards_q_maps_list: Optional[List[Dict[int, List[np.ndarray]]]] = None\n\n def _map_actions(self, actions, q_list, max_q: int, idx: int):\n for a, q in zip(actions, q_list):\n if max_q == q:\n self.cards_q_maps_list[idx][max_q - q].append(a)\n\n def get_good_plays(self, cards: np.ndarray) -> PlayHand:\n \"\"\"\n 获取较好的出牌行动。\n @param cards: 当前手牌。\n @return: 包含所有好的出牌类型的数组\n \"\"\"\n self._process_card(cards)\n self.cards_q_maps_list = [defaultdict(list), defaultdict(list),\n defaultdict(list), defaultdict(list),\n defaultdict(list), defaultdict(list)]\n\n play_hand = PlayHand(np.min(cards), np.max(cards))\n\n for lt2_state in self._lt2_states:\n if lt2_state.size > 0:\n max_q = self._get_all_actions_and_q_lists(lt2_state)\n i = 0\n for actions, q_list in zip(self._actions, self._q_lists):\n self._map_actions(actions, q_list, max_q, i)\n i += 1\n\n if self.cards_q_maps_list[0].keys() and self.cards_q_maps_list[1].keys():\n min_key = min(self.cards_q_maps_list[0].keys())\n min_key2 = min(self.cards_q_maps_list[1].keys())\n self.cards_q_maps_list[0][min_key] = [i for i in self.cards_q_maps_list[0][min_key] if\n i[0] not in np.unique(self.cards_q_maps_list[1][min_key2])]\n if self.card2_count:\n self.cards_q_maps_list[self.card2_count - 1][0].append(np.array([CARD_2] * self.card2_count))\n\n if self._ghosts.size == 2:\n play_hand._has_rocket = True\n elif self._ghosts.size == 1:\n self.cards_q_maps_list[0][0].append(self._ghosts)\n\n play_hand.add_to_hand(self.cards_q_maps_list)\n return play_hand\n\n\ndef get_next_state(state: np.ndarray, action: np.ndarray) -> np.ndarray:\n \"\"\"\n 获取状态做出动作后的的下一个状态\n @param state: 状态\n @param action: 动作\n @return: 下一个状态\n \"\"\"\n next_state = list(state)\n for card in action:\n next_state.remove(card)\n return np.array(next_state)\n\n\ndef _get_single_actions(state: np.ndarray, length: int) -> List[List[int]]:\n \"\"\"\n 获取所有单种牌面的动作(单,对,三,炸弹)\n @param state: 状态\n @param length: 动作长度\n \"\"\"\n result = []\n last_card = -1\n state = list(state)\n for i in range(length, len(state) + 1):\n if state[i - 1] == state[i - length] and state[i - 1] != last_card and (\n state.count(state[i - 1]) < 4 or length % 2 == 0):\n last_card = state[i - 1]\n result.append([last_card] * length)\n return result\n\n\ndef _get_seq_actions(card_list: list, kind: int, length: int) -> List[List[int]]:\n \"\"\"\n 获取顺子/连对/飞机/炸弹的动作(单,对,三,炸弹)\n \"\"\"\n result = []\n for i in range(length - 1, len(card_list)):\n if card_list[i] == card_list[i - length + 1] + length - 1:\n result.append(sorted(card_list[i - length + 1: i + 1] * kind))\n return result\n","repo_name":"jiangyinzuo/du-guai","sub_path":"duguai/ai/decompose.py","file_name":"decompose.py","file_ext":"py","file_size_in_byte":24705,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"60"} +{"seq_id":"41278982284","text":"import requests\n\nclass BomberFriends:\n\tdef __init__(self) -> None:\n\t\tself.api = \"https://e1e6.playfabapi.com\"\n\t\tself.headers = {\n\t\t\t\"user-agent\": \"Dalvik/2.1.0 (Linux; U; Android 7.1.2; ASUS_Z01QD Build/QKQ1.190825.002)\",\n\t\t\t\"x-playfabsdk\": \"Cocos2d-xSDK-0.40.180529\",\n\t\t\t\"content-type\": \"application/json\",\n\t\t\t\"connection\": \"keep_alive\"\n\t\t}\n\t\tself.user_id = None\n\t\tself.title_id = \"E1E6\"\n\t\tself.entity_token = None\n\t\tself.session_ticket = None\n\t\t\n\tdef login_with_custom_id(\n\t\t\tself,\n\t\t\tcustom_id: str,\n\t\t\tcreate_account: bool = True) -> dict:\n\t\tdata = {\n\t\t\t\"CreateAccount\": create_account,\n\t\t\t\"CustomId\": custom_id,\n\t\t\t\"TitleId\": self.title_id\n\t\t}\n\t\tresponse = requests.post(\n\t\t\tf\"{self.api}/Client/LoginWithCustomID\",\n\t\t\tjson=data,\n\t\t\theaders=self.headers).json()\n\t\tif \"SessionTicket\" in response[\"data\"]:\n\t\t\tself.custom_id = custom_id\n\t\t\tself.user_id = response[\"data\"][\"PlayFabId\"]\n\t\t\tself.session_ticket = response[\"data\"][\"SessionTicket\"]\n\t\t\tself.entity_token = response[\"data\"][\"EntityToken\"][\"EntityToken\"]\n\t\t\tself.headers[\"x-authorization\"] = self.session_ticket\n\t\t\tself.headers[\"x-entitytoken\"] = self.entity_token\n\t\treturn response\n\n\tdef get_entity_token(self) -> dict:\n\t\treturn requests.post(\n\t\t\tf\"{self.api}/Authentication/GetEntityToken\",\n\t\t\theaders=self.headers).json()\n\n\tdef get_title_data(\n\t\t\tself,\n\t\t\tkeys: list = [\"ScriptVersionAndroid\"]) -> dict:\n\t\tdata = {\n\t\t\t\"Keys\": keys\n\t\t}\n\t\treturn requests.post(\n\t\t\tf\"{self.api}/Client/GetTitleData\",\n\t\t\tjson=data,\n\t\t\theaders=self.headers).json()\n\n\tdef get_server_status(\n\t\t\tself,\n\t\t\tplatform: str = \"android\",\n\t\t\tgenerate_play_stream_event: bool = False,\n\t\t\trevision_selection: str = \"Live\") -> dict:\n\t\tdata = {\n\t\t\t\"FunctionName\": \"getServerStatus\",\n\t\t\t\"FunctionParameter\": {\n\t\t\t\t\"platform\": platform\n\t\t\t},\n\t\t\t\"GeneratePlayStreamEvent\": generate_play_stream_event,\n\t\t\t\"RevisionSelection\": revision_selection\n\t\t}\n\t\treturn requests.post(\n\t\t\tf\"{self.api}/Client/ExecuteCloudScript\",\n\t\t\tjson=data,\n\t\t\theaders=self.headers).json()\n\n\tdef update_display_name(self, display_name: str) -> dict:\n\t\tdata = {\n\t\t\t\"DisplayName\": display_name\n\t\t}\n\t\treturn requests.post(\n\t\t\tf\"{self.api}/Client/UpdateUserTitleDisplayName\",\n\t\t\tjson=data,\n\t\t\theaders=self.headers).json()\n\n\tdef get_leaderboard(\n\t\t\tself,\n\t\t\tmax_results_count: int = 100,\n\t\t\tstart_position: int = 0,\n\t\t\tstatistic_name: str = \"Trophies\",\n\t\t\tshow_avatar_url: bool = False,\n\t\t\tshow_banned_until: bool = False,\n\t\t\tshow_campaign_attributions: bool = False,\n\t\t\tshow_contact_email_addresses: bool = False,\n\t\t\tshow_created: bool = False,\n\t\t\tshow_display_name: bool = True,\n\t\t\tshow_last_login: bool = False,\n\t\t\tshow_linked_accounts: bool = False,\n\t\t\tshow_locations: bool = False,\n\t\t\tshow_memberships: bool = False,\n\t\t\tshow_origiation: bool = False,\n\t\t\tshow_push_notification_registrations: bool = False,\n\t\t\tshow_statistics: bool = False,\n\t\t\tshow_tags: bool = True,\n\t\t\tshow_total_value_to_data_in_usd: bool = False,\n\t\t\tshow_values_to_date: bool = False) -> dict:\n\t\tdata = {\n\t\t\t\"MaxResultsCount\": max_results_count,\n\t\t\t\"ProfileConstraints\": {\n\t\t\t\t\"ShowAvatarUrl\": show_avatar_url,\n\t\t\t\t\"ShowBannedUntil\": show_banned_until,\n\t\t\t\t\"ShowCampaignAttributions\": show_campaign_attributions,\n\t\t\t\t\"ShowContactEmailAddresses\": show_contact_email_addresses,\n\t\t\t\t\"ShowCreated\": show_created,\n\t\t\t\t\"ShowDisplayName\": show_display_name,\n \t\t\t\"ShowLastLogin\": show_last_login,\n \t\t\t\"ShowLinkedAccounts\": show_linked_accounts,\n \t\t\t\"ShowLocations\": show_locations,\n \t\t\t\"ShowMemberships\": show_memberships,\n \t\t\t\"ShowOrigination\": show_origiation,\n \t\t\t\"ShowPushNotificationRegistrations\": show_push_notification_registrations,\n \t\t\t\"ShowStatistics\": show_statistics,\n \t\t\t\"ShowTags\": show_tags,\n \t\t\t\"ShowTotalValueToDateInUsd\": show_total_value_to_data_in_usd,\n \t\t\t\"ShowValuesToDate\": show_values_to_date\n\t\t\t},\n\t\t\t\"StartPosition\": start_position,\n\t\t\t\"StatisticName\": statistic_name\n\t\t}\n\t\treturn requests.post(\n\t\t\tf\"{self.api}/Client/GetLeaderboard\",\n\t\t\tjson=data,\n\t\t\theaders=self.headers).json()\n\n\tdef claim_season_prize(\n\t\t\tself,\n\t\t\tname: str,\n\t\t\tid: int = 0,\n\t\t\tis_free: bool = True,\n\t\t\tgenerate_play_stream_event: bool = False,\n\t\t\trevision_selection: str = \"Live\") -> dict:\n\t\tdata = {\n\t\t\t\"FunctionName\": \"claimSeasonPrize\",\n\t\t\t\"FunctionParameter\": {\n\t\t\t\t\"id\": id,\n\t\t\t\t\"isFree\": is_free,\n\t\t\t\t\"name\": name\n\t\t\t},\n\t\t\t\"GeneratePlayStreamEvent\": generate_play_stream_event,\n\t\t\t\"RevisionSelection\": revision_selection\n\t\t}\n\t\treturn requests.post(\n\t\t\tf\"{self.api}/Client/ExecuteCloudScript\",\n\t\t\tjson=data,\n\t\t\theaders=self.headers).json()\n\n\tdef get_initial_data(\n\t\t\tself,\n\t\t\tversion: int = 756,\n\t\t\tplatform: str = \"android\",\n\t\t\tpd_datas: list = [\"deckslots\", \"dungeonrundata\", \"testidata\"],\n\t\t\trod_datas: list = [\"_clandata\"],\n\t\t\tgenerate_play_stream_event: bool = False,\n\t\t\trevision_selection: str = \"Live\") -> dict:\n\t\tdata = {\n\t\t\t\"FunctionName\": \"getInitialData\",\n\t\t\t\"FunctionParameter\": {\n\t\t\t\t\"version\": version,\n\t\t\t\t\"platform\": platform,\n\t\t\t\t\"pddatas\": pd_datas,\n\t\t\t\t\"roddatas\": rod_datas\n\t\t\t},\n\t\t\t\"GeneratePlayStreamEvent\": generate_play_stream_event,\n\t\t\t\"RevisionSelection\": revision_selection\n\t\t}\n\t\treturn requests.post(\n\t\t\tf\"{self.api}/Client/ExecuteCloudScript\",\n\t\t\tjson=data,\n\t\t\theaders=self.headers).json()\n\n\tdef claim_reward(\n\t\t\tself,\n\t\t\tid: int = 0,\n\t\t\treward_spin: bool = False,\n\t\t\tname: str = \"wheel\"\t,\n\t\t\tgenerate_play_stream_event: bool = False,\n\t\t\trevision_selection: str = \"Live\") -> dict:\n\t\tdata = {\n\t\t\t\"FunctionName\": \"claimReward\",\n\t\t\t\"FunctionParameter\": {\n\t\t\t\t\"id\": id,\n\t\t\t\t\"reward_spin\": reward_spin,\n\t\t\t\t\"name\": name\n\t\t\t},\n\t\t\t\"GeneratePlayStreamEvent\": generate_play_stream_event,\n\t\t\t\"RevisionSelection\": revision_selection\n\t\t}\n\t\treturn requests.post(\n\t\t\tf\"{self.api}/Client/ExecuteCloudScript\",\n\t\t\tjson=data,\n\t\t\theaders=self.headers).json()\n\n\tdef get_single_player_reward(\n\t\t\tself,\n\t\t\tlevel: int,\n\t\t\ttype: int = 0,\n\t\t\tgenerate_play_stream_event: bool = False,\n\t\t\trevision_selection: str = \"Live\") -> dict:\n\t\tdata = {\n\t\t\t\"FunctionName\": \"getSinglePlayerReward\",\n\t\t\t\"FunctionParameter\": {\n\t\t\t\t\"level\": level,\n\t\t\t\t\"type\": 0\n\t\t\t},\n\t\t\t\"GeneratePlayStreamEvent\": generate_play_stream_event,\n\t\t\t\"RevisionSelection\": revision_selection\n\t\t}\n\t\treturn requests.post(\n\t\t\tf\"{self.api}/Client/ExecuteCloudScript\",\n\t\t\tjson=data,\n\t\t\theaders=self.headers).json()\n\n\tdef save_bomber_user_data(\n\t\t\tself,\n\t\t\tdata: str,\n\t\t\tgenerate_play_stream_event: bool = False,\n\t\t\trevision_selection: str = \"Live\") -> dict:\n\t\tdata = {\n\t\t\t\"FunctionName\": \"saveBomberUserData\",\n\t\t\t\"FunctionParameter\": {\n\t\t\t\t\"data\": data,\n\t\t\t},\n\t\t\t\"GeneratePlayStreamEvent\": generate_play_stream_event,\n\t\t\t\"RevisionSelection\": revision_selection\n\t\t}\n\t\treturn requests.post(\n\t\t\tf\"{self.api}/Client/ExecuteCloudScript\",\n\t\t\tjson=data,\n\t\t\theaders=self.headers).json()\n\n\tdef start_opening_slot(\n\t\t\tself,\n\t\t\tslot: int,\n\t\t\tgenerate_play_stream_event: bool = False,\n\t\t\trevision_selection: str = \"Live\") -> dict:\n\t\tdata = {\n\t\t\t\"FunctionName\": \"tryStartOpeningSlotChest\",\n\t\t\t\"FunctionParameter\": {\n\t\t\t\t\"Slot\": slot\n\t\t\t},\n\t\t\t\"GeneratePlayStreamEvent\": generate_play_stream_event,\n\t\t\t\"RevisionSelection\": revision_selection\n\t\t}\n\t\treturn requests.post(\n\t\t\tf\"{self.api}/Client/ExecuteCloudScript\",\n\t\t\tjson=data,\n\t\t\theaders=self.headers).json()\n\n\tdef open_slot(\n\t\t\tself,\n\t\t\tslot: int,\n\t\t\tcost: int = 0,\n\t\t\tgenerate_play_stream_event: bool = False,\n\t\t\trevision_selection: str = \"Live\") -> dict:\n\t\tdata = {\n\t\t\t\"FunctionName\": \"tryStartOpeningSlotChest\",\n\t\t\t\"FunctionParameter\": {\n\t\t\t\t\"Slot\": slot,\n\t\t\t\t\"cost\": cost\n\t\t\t},\n\t\t\t\"GeneratePlayStreamEvent\": generate_play_stream_event,\n\t\t\t\"RevisionSelection\": revision_selection\n\t\t}\n\t\treturn requests.post(\n\t\t\tf\"{self.api}/Client/ExecuteCloudScript\",\n\t\t\tjson=data,\n\t\t\theaders=self.headers).json()\n\n\tdef update_user_data(\n\t\t\tself,\n\t\t\tdata: dict,\n\t\t\tpermission: str = \"Private\") -> dict:\n\t\tdata = {\n\t\t\t\"Data\": data,\n\t\t\t\"Permission\": permission\n\t\t}\n\t\treturn requests.post(\n\t\t\tf\"{self.api}/Client/UpdateUserData\",\n\t\t\tjson=data,\n\t\t\theaders=self.headers).json()\n\n\tdef tutorial_won(\n\t\t\tself,\n\t\t\tstanding: int = 0,\n\t\t\tgenerate_play_stream_event: bool = False,\n\t\t\trevision_selection: str = \"Live\") -> dict:\n\t\tdata = {\n\t\t\t\"FunctionName\": \"tutorialWon\",\n\t\t\t\"FunctionParameter\": {\n\t\t\t\t\"standing\": standing\n\t\t\t},\n\t\t\t\"GeneratePlayStreamEvent\": generate_play_stream_event,\n\t\t\t\"RevisionSelection\": revision_selection\n\t\t}\n\t\treturn requests.post(\n\t\t\tf\"{self.api}/Client/ExecuteCloudScript\",\n\t\t\tjson=data,\n\t\t\theaders=self.headers).json()\n\n\tdef add_fashion_points(\n\t\t\tself,\n\t\t\tpoints: int,\n\t\t\tad: bool = False,\n\t\t\tgenerate_play_stream_event: bool = False,\n\t\t\trevision_selection: str = \"Live\") -> dict:\n\t\tdata = {\n\t\t\t\"FunctionName\": \"addFashionPoints\",\n\t\t\t\"FunctionParameter\": {\n\t\t\t\t\"points\": points,\n\t\t\t\t\"ad\": ad\n\t\t\t},\n\t\t\t\"GeneratePlayStreamEvent\": generate_play_stream_event,\n\t\t\t\"RevisionSelection\": revision_selection\n\t\t}\n\t\treturn requests.post(\n\t\t\tf\"{self.api}/Client/ExecuteCloudScript\",\n\t\t\tjson=data,\n\t\t\theaders=self.headers).json()\n\t\t\n\tdef get_user_data(self, user_id: str) -> dict:\n\t\tdata = {\n\t\t\t\"PlayFabId\": user_id\n\t\t}\n\t\treturn requests.post(\n\t\t\tf\"{self.api}/Client/GetUserData\",\n\t\t\tjson=data,\n\t\t\theaders=self.headers).json()\n\t\n\tdef get_player_statistics(self) -> dict:\n\t\treturn requests.post(\n\t\t\tf\"{self.api}/Client/GetPlayerStatistics\",\n\t\t\theaders=self.headers).json()\n\t\n\tdef get_inventory(self) -> dict:\n\t\treturn requests.post(\n\t\t\tf\"{self.api}/Client/GetUserInventory\",\n\t\t\theaders=self.headers).json()\n\t\n\n\tdef get_catalog_items(self) -> dict:\n\t\treturn requests.post(\n\t\t\tf\"{self.api}/Client/GetCatalogItems\",\n\t\t\theaders=self.headers).json()\n","repo_name":"zeviel/bomber_friends.py","sub_path":"src/bomber_friends.py","file_name":"bomber_friends.py","file_ext":"py","file_size_in_byte":9525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"32081428529","text":"import uuid\nfrom pathlib import Path\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import LabelEncoder\n\nfrom commons import CONFIGS_DIR, STATES_DIR\nfrom configuration import *\nfrom dataset_transforms import TransformsComposer, ToTensor, Rescale\nfrom classifier import Classifier\nfrom data_loader import DataLoader\nfrom m5 import M5\nfrom siamese import Siamese\nfrom similarity_classifier import SimilarityClassifier\nfrom similarity_dataset import SimilarityDataset\nfrom utils import create_results_directories\nfrom sample_logger import SampleLogger\n\nCONFIG_FILENAME = 'config.json'\nRESULTS_DIR = 'siamese_results/'\nSAMPLE_LOGGER_FILE = 'samples.json'\n\n\ndef main():\n config_filename = Path.cwd().joinpath(CONFIGS_DIR).joinpath(CONFIG_FILENAME)\n config = Configuration(config_filename)\n\n batch_size = 4\n epochs = 4\n\n results_dir_path = Path.cwd().joinpath(RESULTS_DIR)\n current_run_path = create_results_directories(results_dir_path)\n\n sample_logger_path = Path.cwd().joinpath(current_run_path).joinpath(SAMPLE_LOGGER_FILE)\n sample_logger = SampleLogger(sample_logger_path)\n\n transforms = TransformsComposer([Rescale(output_size=10000), ToTensor()])\n\n encoder = LabelEncoder()\n\n data_loader = DataLoader(config)\n x_train, y_train = data_loader.get_train_set()\n encoder.fit(y_train)\n\n classes = encoder.classes_\n classes_map = {}\n for i, category in enumerate(classes):\n classes_map[i] = category\n print(classes_map)\n\n y_train = encoder.transform(y_train)\n train_dataset = SimilarityDataset(x_train, y_train, classes_map, sample_logger, transforms)\n\n x_test, y_test = data_loader.get_test_set()\n y_test = encoder.transform(y_test)\n test_dataset = SimilarityDataset(x_test, y_test, classes_map, sample_logger, transforms)\n\n model = Siamese()\n\n states_dir = Path.cwd().joinpath(STATES_DIR)\n state_filename = f'{uuid.uuid1()}_state_{epochs}_epochs.pth'\n state_path = current_run_path.joinpath('best_snapshot').joinpath(state_filename)\n\n classifier = SimilarityClassifier(model=model, state_path=state_path)\n\n # Fit model on data\n train_loss_history, val_loss_history = classifier.fit(train_dataset, batch_size=batch_size, epochs=epochs,\n validation_data=test_dataset)\n\n sample_logger.save()\n\n # plt.figure()\n # plt.title(f'Model Loss for {epochs} epochs')\n # plt.xlabel('epoch')\n # plt.ylabel('loss')\n # plt.plot(train_loss_history, label='train')\n # plt.plot(val_loss_history, label='test')\n # plt.legend()\n # plt.show()\n\n predictions_path = Path.cwd().joinpath('./predicted.csv')\n validation_dataset = SimilarityDataset(x_test, y_test, classes_map, sample_logger, transforms)\n validation_model = Siamese(num_classes=len(classes_map))\n validation_classifier = SimilarityClassifier(validation_model, state_path=state_path)\n validation_classifier.predict(validation_dataset, batch_size=batch_size, output_filepath=predictions_path)\n\n\nif __name__ == '__main__':\n main()","repo_name":"Amnonop/voice-corruption-classifier","sub_path":"run_similarity.py","file_name":"run_similarity.py","file_ext":"py","file_size_in_byte":3109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"44607233294","text":"soma = cont = cont_mil = menor = 0\nbarato = ''\nprint('-' * 40)\nprint('\\tLOJA DO JENA')\nprint('-' * 40)\nwhile True:\n nome = str(input('Nome do Produto: '))\n preco = int(input('Preço: R$ '))\n cont += 1\n soma += preco\n if preco >= 1000:\n cont_mil += 1\n if cont == 1 or preco < menor: # maneira simplificada\n menor = preco\n barato = nome\n continuar = ' '\n while continuar not in 'SN':\n continuar = str(input('Quer continuar? [S/N] ')).strip().upper()[0]\n if continuar == 'N':\n break\nprint('{:-^40}'.format(' FIM DO PROGRAMA '))\nprint(f'O total da compra foi R${soma:.2f}')\nprint(f'Temos {cont_mil} produtos custando mais de R$1000.00')\nprint(f'O produto mais barato foi {barato} que custa R${menor:.2f}')\nprint()\n\nprodutos = [] # type: list\nprecos = [] # type: list\nwhile True:\n produto = str(input('Nome do produto: '))\n preco = int(input('Preço: R$ '))\n produtos.append(produto)\n precos.append(preco)\n continuar = ' '\n while continuar not in 'SN':\n continuar = str(input('Quer continuar? [S/N] ')).strip().upper()[0]\n if continuar == 'N':\n break\nprint('{:-^40}'.format(' FIM DO PROGRAMA '))\nprint(f'O total da compra foi R${sum(precos):.2f}')\nprint(f'Temos {len([i for i in precos if i > 1000])} produto(s) custando mais que R$1.000,00') # noqa\nprint(f'O produto mais barato foi {produtos[precos.index(min(precos))]} e custou R${min(precos)}') # noqa\n","repo_name":"JenaCarry/Curso-Python","sub_path":"desafios/desafio070.py","file_name":"desafio070.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"13913181820","text":"#!/usr/bin/env python3\n\n'''\n Python script to convert YouTube XML subtitles to SRT format\n It might be neater to use an XML library but I want to avoid \n external dependencies, and pretending that it's not XML, it\n resembles a fairly simple text file.\n \n Owain Kenway\n'''\n\n# Convert a numeric time in ms to format used in SRT\n# timestamp - the time in ms since start\n# delta - extra time for calculating durations.\ndef timeconvert(timestamp=0, delta=0):\n wn = timestamp + delta\n hours = int(wn/(3600 * 1000))\n wn = wn - (hours * 3600 * 1000)\n minutes = int(wn/(60 * 1000))\n wn = wn - (minutes*60*1000)\n seconds = int(wn/1000)\n ms = wn-(seconds * 1000)\n\n return (str(hours) + \":\" + str(minutes) + \":\" + str(seconds) + \",\" + str(ms))\n\n# Generate a title timestamp\n# SRT format for time is:\n# HH:mm:ss,ms --> HH:mm:ss,ms\ndef gents(timestamp=0, duration=0):\n\n b = timeconvert(timestamp, 0)\n e = timeconvert(timestamp, duration)\n\n return(b + \" --> \" + e)\n\n# Generate a title from an XML line\n# SRT format is:\n# counter\n# HH:mm:ss,ms --> HH:mm:ss,ms\n# text\n# \\n\ndef processline(line=\"\", counter=0):\n\n# ignore empty lines\n if line.strip() == \"\" :\n return \"\"\n\n t = 0\n d = 0\n text=\"\"\n\n# Tidy line\n line = line.strip()\n if (line[0:2] == \"\"):\n line = line[:-4]\n line = line.strip()\n\n# Split into block about timing, and text block.\n tokens = line.split(\">\", 1)\n preamble = tokens.pop(0)\n \n# Work out times from the preamble\n times = preamble.split(\" \")\n t = int(times[0].strip(\"t=\").strip('\"').strip())\n d = int(times[1].strip(\"d=\").strip('\"').strip())\n\n# The rest of the line is text.\n text = tokens[0]\n\n# Replace some HTML entitles.\n text = text.replace(\"'\", \"'\")\n text = text.replace(\""\", '\"')\n\n return (str(counter) + \"\\n\" + gents(t,d) + \"\\n\" + text + \"\\n\")\n\n# Process a file.\ndef processfile(filename):\n data = \"\"\n xf = open(filename, 'r')\n fail=0\n\n# read in the file and skip lines that aren't titles.\n for line in xf:\n k = line.strip()\n\n # ditch bad lines\n if k.startswith(\"\"):\n fail = fail + 1\n elif k.startswith(\"\"):\n fail = fail + 1\n elif k.startswith(\"\"):\n fail = fail + 1\n elif k.startswith(\"\")\n\n output = \"\"\n\n# Process the titles one at a time\n counter = 1\n for a in titles:\n output = output + \"\\n\" + processline(a, counter)\n counter = counter + 1 \n\n return output\n\n# Our normal main. \n# If we have an argument, convert otherwise print basic usage info.\nif __name__ == '__main__':\n import sys\n \n if len(sys.argv) > 1:\n file=sys.argv[1]\n print(processfile(file))\n else:\n print(\"Usage:\\n \" + sys.argv[0] + \" \")\n\n \n","repo_name":"owainkenwayucl/utils","sub_path":"src/ytsrt.py","file_name":"ytsrt.py","file_ext":"py","file_size_in_byte":3227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"72283474431","text":"# \n# This source file is part of the FabSim software toolkit, which is distributed under the BSD 3-Clause license. \n# Please refer to LICENSE for detailed information regarding the licensing.\n#\n# This file contains common routines used in data analysis.\n\nimport numpy as np\nfrom scipy.optimize import leastsq\n\ndef derivatives(x, y):\n num_x = len(x);\n deriv = np.zeros((len(x)))\n \n # If there for two input points, use a straight line as the derivative.\n if num_x == 2:\n deriv[0] = (y[1] - y[0]) / (x[1] - x[0])\n deriv[1] = deriv[0]\n return deriv\n\n # Calculate the derivatives for the interior points. This loop uses\n # a total of 6 points to calculate the derivative at any one\n # point. And when the loop moves along in increasing array\n # position, the same data point is used three times. So instead of\n # reading the correct value from the array three times, just shift\n # the values down by copying them from one variable to the next.\n xi = 2*x[0]-x[1] # 0.0\n xj = x[0]\n xk = x[1]\n yi = 2*y[0]-y[1] # 0.0\n yj = y[0]\n yk = y[1]\n\n for i in xrange(1, num_x-1): \n xi = xj\n xj = xk\n xk = x[i+1]\n yi = yj\n yj = yk\n yk = y[i+1]\n r1 = (xk - xj)*(xk - xj) + (yk - yj)*(yk - yj)\n r2 = (xj - xi)*(xj - xi) + (yj - yi)*(yj - yi)\n deriv[i] = ( (yj - yi)*r1 + (yk - yj)*r2 ) / ( (xj - xi)*r1 + (xk - xj)*r2 )\n\n # Calculate the derivative at the first point, (x(0),y(0)).\n slope = (y[1] - y[0]) / (x[1] - x[0])\n if ((slope >= 0) and (slope >= deriv[1])) or ((slope <= 0) and (slope <= deriv[1])):\n deriv[0] = 2 * slope - deriv[1]\n else:\n deriv[0] = slope + (abs(slope) * (slope - deriv[1])) / (abs(slope) + abs(slope - deriv[1]))\n\n # Calculate the derivative at the last point.\n slope = (y[num_x-1] - y[num_x-2]) / (x[num_x-1] - x[num_x-2])\n if ((slope >= 0) and (slope >= deriv[num_x-2])) or ((slope <= 0) and (slope <= deriv[num_x-2])):\n deriv[num_x-1] = 2 * slope - deriv[num_x-2]\n else:\n deriv[num_x-1] = slope + (abs(slope) * (slope - deriv[num_x-2])) / (abs(slope) + abs(slope - deriv[num_x-2]) )\n\n return deriv \n\n\ndef get_centre_of_mass(molecule_particles, bounds):\n# calculate centre of mass of a sheet in a periodic box. \n# Becomes incorrect if any structure extends beyond 0.5 of the box size.\n cm_rel = np.array(([0.0, 0.0, 0.0 ]))\n rp = molecule_particles[0] #reference particle\n \n for p in molecule_particles:\n \n for i in xrange(0,3):\n a = p[i] - rp[i]\n if a > 0.5 * bounds[i]:\n a = p[i] - rp[i] - bounds[i]\n elif a < -0.5 * bounds[i]:\n a = p[i] - rp[i] + bounds[i]\n cm_rel[i] += a\n \n cm_rel = cm_rel / len(molecule_particles) \n cm = rp + cm_rel\n \n cm[0] = cm[0] %bounds[0]\n cm[1] = cm[1] %bounds[1]\n cm[2] = cm[2] %bounds[2]\n \n #print cm\n #import sys\n #sys.exit()\n \n return cm\n \n\ndef f_min(X,p):\n plane_xyz = p[0:3]\n distance = (plane_xyz*X.T).sum(axis=1) + p[3]\n return distance / np.linalg.norm(plane_xyz) \n\ndef residuals(params, signal, X):\n return f_min(X, params)\n \ndef get_fitting_plane(points):\n# returns a,b,c,d in ax+by+cz+d=0. a,b,c are also the normal.\n\n pointsT = points.transpose()\n # Inital guess of the plane\n diff = points[0] - points[-1]\n \n p0 = np.array(([diff[0], diff[1], diff[2], 1.]))\n\n sol = leastsq(residuals, p0, args=(None, pointsT))[0]\n\n #print \"Solution: \", sol\n #print \"Old Error: \", (f_min(pointsT, p0)**2).sum()\n #print \"New Error: \", (f_min(pointsT, sol)**2).sum()\n \n return sol\n \n \ndef unit_vector(vector):\n \"\"\" Returns the unit vector of the vector. \"\"\"\n return vector / np.linalg.norm(vector)\n\ndef angle_between(v1, v2):\n# Returns the angle in radians between vectors 'v1' and 'v2' in radians.\n v1_u = unit_vector(v1)\n v2_u = unit_vector(v2)\n angle = np.arccos(np.dot(v1_u, v2_u))\n if np.isnan(angle):\n if (v1_u == v2_u).all():\n return 0.0\n else:\n return np.pi\n return angle \n","repo_name":"UCL-CCS/FabSim","sub_path":"python/lib/DataAnalysis.py","file_name":"DataAnalysis.py","file_ext":"py","file_size_in_byte":3935,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"60"} +{"seq_id":"22812272092","text":"# importamos las funciones de pyside6\r\nfrom unicodedata import category\r\nfrom PySide6.QtWidgets import QWidget\r\nfrom PySide6.QtCore import Qt\r\nfrom PySide6.QtGui import QPixmap\r\n# importamos las clases generalcustomUi y detail_widget de los scripts general_custom_ui y ui_maquina_detail_window \r\nfrom view.general_custom_ui import GeneralCustomUi\r\nfrom view.ui_maquina_detail_window import Detail_widget\r\n# importamos el script maquina de la carpeta database\r\nfrom database import maquina\r\n\r\n# la clase Detailwindowform contiene algunos datos extra de la maquina\r\nclass DetailWindowForm(QWidget, Detail_widget):\r\n def __init__(self, parent=None, maquina_id=None):\r\n super().__init__(parent)\r\n\r\n self.maquina_id = maquina_id\r\n print(\"en maquinas\",maquina_id)\r\n self.setupUi(self)\r\n self.ui = GeneralCustomUi(self)\r\n self.setWindowFlag(Qt.Window)\r\n self.fill_widgets()\r\n # funcion para dar evento al click de mouse\r\n def mousePressEvent(self, event):\r\n self.ui.mouse_press_event(event)\r\n # funcion para llenar los espacios vacios con datos segun el ID\r\n def fill_widgets(self):\r\n data = maquina.select_by_id(self.maquina_id)\r\n title= data[1]\r\n url = data[5]\r\n img_path= data [4]\r\n print(\"este si\",data)\r\n # Establecer los datos en los labels de detalles\r\n self.maquine_title_label.setText(title)\r\n self.set_manual_url(url)\r\n self.set_maquina_image(img_path)\r\n \r\n # funcion para que el url te mande a la pagina web\r\n def set_manual_url(self,url):\r\n url = f\"{url}\"\r\n self.maquina_cat_label.setOpenExternalLinks(True)\r\n self.maquina_cat_label.setText(url)\r\n # funcion para establecer la imagen\r\n def set_maquina_image (self, img_path):\r\n pix_map = QPixmap(img_path)\r\n self.maquina_pic_label.setPixmap(pix_map)\r\n self.maquina_pic_label.setScaledContents(True)","repo_name":"Codigo-Mto/codigo_mto","sub_path":"controllers/maquina_details_window.py","file_name":"maquina_details_window.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"37058388520","text":"#############################################################################################\n# SCRIPT INFORMATION\n#############################################################################################\n\n# LICENSE INFORMATION:\n# ---------------------\n# graphml_to_json.py\n# Converts a file in GraphML format to a json format compatible with the design dependency visualisation tool \n# Authors: Jérémy Bonvoisin\n# License: Apache License, Version 2.0, January 2004, http://www.apache.org/licenses/\n\n# PREREQUISITES: \n# ---------------\n# - read and write access in the current directory\n# - non standard libraries (install with \"pip install \"):\n# . NetworkX (https://networkx.github.io/documentation/stable/reference/index.html)\n\n# ARGUMENTS:\n# -----------\n# Mandatory: \"-i\" followed by the path of the file to be processed\n\nimport getopt\nimport networkx as nx\nimport sys\nimport pdb\nimport os\n\n\n# read command arguments\nfilename = \"\"\ntry:\n\toptions, remainder = getopt.getopt(sys.argv[1:], 'i:')\n\tfor opt, arg in options:\n\t\tif opt == \"-i\":\n\t\t\tfilename = arg\nexcept getopt.GetoptError as err:\n\tprint(str(err))\n\tsys.exit(2)\nif filename == \"\":\n\tprint (\"error: no reference of the file to be converted has been given. please use option '-i '\")\n\t\n# open graphml file\nnxGraph = nx.read_graphml(filename)\n\n# parse nodes\nindex = 0\nnodeListGraphml = nxGraph.nodes(data=True)\nnumberOfNodes = len(nodeListGraphml)\nnodeRefDict = {}\nnodeListJson = []\nprint (str(numberOfNodes) + \" nodes found\")\nfor node in nodeListGraphml:\n\tnodeRefDict[node[0]] = index\n\ttry:\n\t\tlabel = node[1]['label']\n\texcept KeyError as err:\n\t\tprint (\"error : node \" +str(index)+ \" has no label: \" + str(node))\n\t\tprint(str(err))\n\t\tsys.exit(2)\n\tif 'description' in node[1]:\n\t\tdescription = node[1]['description']\n\telse:\n\t\tdescription = \"\"\n\tnodeDict = {}\n\tnodeDict['id'] = index\n\tnodeDict['label'] = label.replace('\"', '*')\n\tnodeDict['description'] = description.replace('\"', '*')\n\tnodeDict['shape'] = \"ellipse\"\n\tnodeDict['color'] = \"rgba(80,80,80,0.6)\"\n\tnodeDict['tags'] = []\n\tnodeListJson.append(nodeDict)\n\tindex += 1\n\n# parse edges\nindex = 0\nedgeListJson = []\nedgeListGraphML = nxGraph.edges(data=True)\nnumberOfEdges = len(edgeListGraphML)\nprint (str(numberOfEdges) + \" edges found\")\nfor edge in edgeListGraphML:\n\tsource = edge[0]\n\ttarget = edge[1]\n\tedgeDict = {}\n\tedgeDict['id'] = index\n\tedgeDict['from'] = nodeRefDict[source]\n\tedgeDict['to'] = nodeRefDict[target]\n\tedgeDict['label'] = \"+\"\n\tedgeListJson.append(edgeDict)\n\tindex += 1\n\t\n# export JSON\njsonDict = {}\njsonDict['nodes'] = nodeListJson\njsonDict['edges'] = edgeListJson\n\noutputFileName = os.path.splitext(filename)[0]+'.json'\nwith open(outputFileName, 'w') as output:\n\toutput.write(str(jsonDict).replace(\"'\", '\"'))\n\nprint (outputFileName+\" extracted successfully\")","repo_name":"jbon/design-dependencies","sub_path":"util/graphml_to_json.py","file_name":"graphml_to_json.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"21465472487","text":"'''\nCreated on Apr 16, 2021\n\n@author: yann\n\nModule that correctly sets up the environment's variables before really going into it.\n'''\n\nimport os\nimport sys\nimport nltk\n\nMODULES_PATHS = [r\"/controller\",\n r\"/model\",\n r\"/model/analysis\",\n r\"/model/local_io\",\n r\"/model/data_extraction\",\n r\"/view\",\n r\"/setup\"]\n\nif getattr(sys, 'frozen', False):\n # If the application is run as a bundle, the PyInstaller bootloader\n # extends the sys module by a flag frozen=True and sets the app\n # path into variable _MEIPASS'.\n APPLICATION_PATH = sys._MEIPASS\nelse:\n APPLICATION_PATH = os.path.dirname(os.path.abspath(__file__))\n\nif APPLICATION_PATH:\n print(\"cwd: \" + APPLICATION_PATH)\n os.chdir(APPLICATION_PATH)\n\nfor rel_path in MODULES_PATHS: # correctly adds our modules to be PYTHONPATH\n abs_path = APPLICATION_PATH + rel_path\n if abs_path not in sys.path:\n sys.path.append(abs_path)\n\nnltk_files = [(r\"tokenizers/punkt\", 'punkt'),\n (r\"corpora/stopwords\", 'stopwords'),\n (r\"corpora/movie_reviews\", 'movie_reviews'),\n (r\"corpora/twitter_samples\", 'twitter_samples')]\n\nfor nltk_file in nltk_files: #  if files are missing because the user has not dowloaded them yet\n location, name = nltk_file\n try:\n nltk.data.find(location)\n except LookupError:\n nltk.download(name)\n","repo_name":"yanntrividic/sentiment-analysis-twitter-imdb-csv","sub_path":"src/_init.py","file_name":"_init.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"18768193345","text":"def arithmetic_arranger(problems, showResult=False):\n\n formatted_operations = \"\"\n\n first_line = \"\"\n second_line = \"\"\n third_line = \"\"\n fourth_line = \"\"\n\n if(len(problems) > 5):\n return \"Error: Too many problems.\"\n\n for index, problem in enumerate(problems):\n splited_problem = problem.split()\n first_number, second_number = splited_problem[0], splited_problem[2]\n operator = splited_problem[1]\n\n try:\n int(first_number)\n int(second_number)\n except:\n return \"Error: Numbers must only contain digits.\"\n\n if operator != \"+\" and operator != \"-\":\n return(f\"Error: Operator must be '+' or '-'.\")\n\n if len(first_number) > 4 or len(second_number) > 4:\n return \"Error: Numbers cannot be more than four digits.\"\n\n separator_number = max([len(first_number), len(second_number)]) + 2\n if index + 1 < len(problems):\n third_line += (\"-\" * separator_number) + \" \"\n first_line += f\"{' '* (separator_number - len(first_number))}{first_number} \"\n second_line += f\"{operator}{' ' * (((separator_number - len(second_number)))-1)}{second_number} \"\n else:\n third_line += (\"-\" * separator_number)\n first_line += f\"{' '* (separator_number - len(first_number))}{first_number}\"\n second_line += f\"{operator}{' ' * (((separator_number - len(second_number)))-1)}{second_number}\"\n\n formatted_operations = f\"{first_line}\\n{second_line}\\n{third_line}\"\n\n if showResult:\n mayor = max([len(first_number), len(second_number)])\n result = 0\n if operator == \"+\":\n result = int(first_number) + int(second_number)\n else:\n result = int(first_number) - int(second_number)\n\n if index + 1 < len(problems):\n if result < 0:\n fourth_line += f\"{' ' * (((separator_number - mayor))-1)}{result} \"\n else:\n fourth_line += f\"{' ' * ((separator_number - mayor))}{result} \"\n else:\n fourth_line += f\"{' ' * ((separator_number - mayor)-1)}{result}\"\n\n formatted_operations += f\"\\n{fourth_line}\"\n\n return formatted_operations\n\n\n# print(arithmetic_arranger([\"32 + 698\", \"3801 - 2\", \"45 + 43\", \"123 + 49\"]))\n# print(arithmetic_arranger(\n# [\"32 + 8\", \"1 - 3801\", \"9999 + 9999\", \"523 - 49\"], True))\n\n# print(arithmetic_arranger([\"3801 - 2\", \"123 + 49\"]))\n\n# print(arithmetic_arranger(['3 + 855', '988 + 40'], True))\n\n# print(arithmetic_arranger(['32 - 698', '1 - 3801',\n# '45 + 43', '123 + 49', '988 + 40'], True))\n","repo_name":"Robertron624/FCC-python-computing","sub_path":"Arithmetic Formatter/arithmetic_calculator.py","file_name":"arithmetic_calculator.py","file_ext":"py","file_size_in_byte":2702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"33063170926","text":"from abc import abstractmethod\n\nfrom quantum.api.v2 import attributes as attr\nfrom quantum.api.v2 import base\nfrom quantum.common import exceptions as qexception\nfrom quantum.extensions import extensions\nfrom quantum import manager\nfrom quantum.openstack.common import cfg\nfrom quantum import quota\n\n\n# Security group Exceptions\nclass SecurityGroupAlreadyExists(qexception.InUse):\n # This can only happen if the external_id database is cleared\n message = _(\"Security group %(name)s id %(external_id)s already exists\")\n\n\nclass SecurityGroupInvalidProtocolType(qexception.InvalidInput):\n message = _(\"Invalid protocol type %(value)s\")\n\n\nclass SecurityGroupInvalidEtherType(qexception.InvalidInput):\n message = _(\"Invalid/Unsupported ethertype %(value)s\")\n\n\nclass SecurityGroupInvalidPortRange(qexception.InvalidInput):\n message = _(\"For TCP/UDP protocols, port_range_min must be \"\n \"<= port_range_max\")\n\n\nclass SecurityGroupInvalidPortValue(qexception.InvalidInput):\n message = _(\"Invalid value for port %(port)s\")\n\n\nclass SecurityGroupInUse(qexception.InUse):\n message = _(\"Security Group %(id)s in use.\")\n\n\nclass SecurityGroupCannotRemoveDefault(qexception.InUse):\n message = _(\"Removing default security group not allowed.\")\n\n\nclass SecurityGroupDefaultAlreadyExists(qexception.InUse):\n message = _(\"Default security group already exists.\")\n\n\nclass SecurityGroupRuleInvalidProtocol(qexception.InUse):\n message = _(\"Security group rule protocol %(protocol)s not supported \"\n \"only protocol values %(values)s supported.\")\n\n\nclass SecurityGroupRulesNotSingleTenant(qexception.InvalidInput):\n message = _(\"Multiple tenant_ids in bulk security group rule create\"\n \" not allowed\")\n\n\nclass SecurityGroupSourceGroupAndIpPrefix(qexception.InvalidInput):\n message = _(\"Only source_ip_prefix or source_group_id may \"\n \"be provided.\")\n\n\nclass SecurityGroupProtocolRequiredWithPorts(qexception.InvalidInput):\n message = _(\"Must also specifiy protocol if port range is given.\")\n\n\nclass SecurityGroupNotSingleGroupRules(qexception.InvalidInput):\n message = _(\"Only allowed to update rules for \"\n \"one security profile at a time\")\n\n\nclass SecurityGroupSourceGroupNotFound(qexception.NotFound):\n message = _(\"source group id %(id)s does not exist\")\n\n\nclass SecurityGroupNotFound(qexception.NotFound):\n message = _(\"Security group %(id)s does not exist\")\n\n\nclass SecurityGroupRuleNotFound(qexception.NotFound):\n message = _(\"Security group rule %(id)s does not exist\")\n\n\nclass DuplicateSecurityGroupRuleInPost(qexception.InUse):\n message = _(\"Duplicate Security Group Rule in POST.\")\n\n\nclass SecurityGroupRuleExists(qexception.InUse):\n message = _(\"Security group rule exists %(rule)s\")\n\n\nclass SecurityGroupProxyMode(qexception.InUse):\n message = _(\"Did not recieve external id and in proxy mode\")\n\n\nclass SecurityGroupNotProxyMode(qexception.InUse):\n message = _(\"Recieve external id and not in proxy mode\")\n\n\nclass SecurityGroupProxyModeNotAdmin(qexception.InvalidExtenstionEnv):\n message = _(\"In Proxy Mode and not from admin\")\n\n\nclass SecurityGroupInvalidExternalID(qexception.InvalidInput):\n message = _(\"external_id wrong type %(data)s\")\n\n\ndef convert_validate_port_value(port):\n if port is None:\n return port\n try:\n val = int(port)\n except (ValueError, TypeError):\n raise SecurityGroupInvalidPortValue(port=port)\n\n if val >= 0 and val <= 65535:\n return val\n else:\n raise SecurityGroupInvalidPortValue(port=port)\n\n\ndef _validate_name_not_default(data, valid_values=None):\n if not cfg.CONF.SECURITYGROUP.proxy_mode and data == \"default\":\n raise SecurityGroupDefaultAlreadyExists()\n\n\ndef _validate_external_id_and_mode(external_id, valid_values=None):\n if not cfg.CONF.SECURITYGROUP.proxy_mode and not external_id:\n return\n elif not cfg.CONF.SECURITYGROUP.proxy_mode and external_id:\n raise SecurityGroupNotProxyMode()\n try:\n int(external_id)\n except (ValueError, TypeError):\n raise SecurityGroupInvalidExternalID(data=external_id)\n if cfg.CONF.SECURITYGROUP.proxy_mode and not external_id:\n raise SecurityGroupProxyMode()\n\nattr.validators['type:name_not_default'] = _validate_name_not_default\nattr.validators['type:external_id_and_mode'] = _validate_external_id_and_mode\n\n# Attribute Map\nRESOURCE_ATTRIBUTE_MAP = {\n 'security_groups': {\n 'id': {'allow_post': False, 'allow_put': False,\n 'validate': {'type:regex': attr.UUID_PATTERN},\n 'is_visible': True},\n 'name': {'allow_post': True, 'allow_put': False,\n 'is_visible': True, 'default': '',\n 'validate': {'type:name_not_default': None}},\n 'description': {'allow_post': True, 'allow_put': False,\n 'is_visible': True, 'default': ''},\n 'external_id': {'allow_post': True, 'allow_put': False,\n 'is_visible': True, 'default': None,\n 'validate': {'type:external_id_and_mode': None}},\n 'tenant_id': {'allow_post': True, 'allow_put': False,\n 'required_by_policy': True,\n 'is_visible': True},\n },\n 'security_group_rules': {\n 'id': {'allow_post': False, 'allow_put': False,\n 'validate': {'type:regex': attr.UUID_PATTERN},\n 'is_visible': True},\n # external_id can be used to be backwards compatible with nova\n 'external_id': {'allow_post': True, 'allow_put': False,\n 'is_visible': True, 'default': None,\n 'validate': {'type:external_id_and_mode': None}},\n 'security_group_id': {'allow_post': True, 'allow_put': False,\n 'is_visible': True, 'required_by_policy': True},\n 'source_group_id': {'allow_post': True, 'allow_put': False,\n 'default': None, 'is_visible': True},\n 'direction': {'allow_post': True, 'allow_put': True,\n 'is_visible': True,\n 'validate': {'type:values': ['ingress', 'egress']}},\n 'protocol': {'allow_post': True, 'allow_put': False,\n 'is_visible': True, 'default': None},\n 'port_range_min': {'allow_post': True, 'allow_put': False,\n 'convert_to': convert_validate_port_value,\n 'default': None, 'is_visible': True},\n 'port_range_max': {'allow_post': True, 'allow_put': False,\n 'convert_to': convert_validate_port_value,\n 'default': None, 'is_visible': True},\n 'ethertype': {'allow_post': True, 'allow_put': False,\n 'is_visible': True, 'default': 'IPv4'},\n 'source_ip_prefix': {'allow_post': True, 'allow_put': False,\n 'default': None, 'is_visible': True},\n 'tenant_id': {'allow_post': True, 'allow_put': False,\n 'required_by_policy': True,\n 'is_visible': True},\n }\n}\n\n\nSECURITYGROUP = 'security_groups'\nEXTENDED_ATTRIBUTES_2_0 = {\n 'ports': {SECURITYGROUP: {'allow_post': True,\n 'allow_put': True,\n 'is_visible': True,\n 'default': None}}}\nsecurity_group_quota_opts = [\n cfg.IntOpt('quota_security_group',\n default=10,\n help='number of security groups allowed per tenant,'\n '-1 for unlimited'),\n cfg.IntOpt('quota_security_group_rule',\n default=100,\n help='number of security rules allowed per tenant, '\n '-1 for unlimited'),\n]\ncfg.CONF.register_opts(security_group_quota_opts, 'QUOTAS')\n\nsecurity_group_opts = [\n cfg.StrOpt('proxy_mode', default=False)\n]\ncfg.CONF.register_opts(security_group_opts, 'SECURITYGROUP')\n\n\nclass Securitygroup(object):\n \"\"\" Security group extension\"\"\"\n\n @classmethod\n def get_name(cls):\n return \"security-group\"\n\n @classmethod\n def get_alias(cls):\n return \"security-group\"\n\n @classmethod\n def get_description(cls):\n return \"The security groups extension.\"\n\n @classmethod\n def get_namespace(cls):\n # todo\n return \"http://docs.openstack.org/ext/securitygroups/api/v2.0\"\n\n @classmethod\n def get_updated(cls):\n return \"2012-10-05T10:00:00-00:00\"\n\n @classmethod\n def get_resources(cls):\n \"\"\" Returns Ext Resources \"\"\"\n exts = []\n plugin = manager.QuantumManager.get_plugin()\n for resource_name in ['security_group', 'security_group_rule']:\n collection_name = resource_name.replace('_', '-') + \"s\"\n params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + \"s\", dict())\n quota.QUOTAS.register_resource_by_name(resource_name)\n controller = base.create_resource(collection_name,\n resource_name,\n plugin, params, allow_bulk=True)\n\n ex = extensions.ResourceExtension(collection_name,\n controller)\n exts.append(ex)\n\n return exts\n\n def get_extended_resources(self, version):\n if version == \"2.0\":\n return EXTENDED_ATTRIBUTES_2_0\n else:\n return {}\n\n\nclass SecurityGroupPluginBase(object):\n @abstractmethod\n def create_security_group(self, context, security_group):\n pass\n\n @abstractmethod\n def delete_security_group(self, context, security_group):\n pass\n\n @abstractmethod\n def update_security_group(self, context, security_group):\n pass\n\n @abstractmethod\n def get_security_groups(self, context, filters=None, fields=None):\n pass\n\n @abstractmethod\n def get_security_group(self, context, id, fields=None):\n pass\n\n @abstractmethod\n def create_security_group_rule(self, context, security_group_rule):\n pass\n\n @abstractmethod\n def delete_security_group_rule(self, context, sgrid):\n pass\n\n @abstractmethod\n def get_security_group_rules(self, context, filters=None, fields=None):\n pass\n\n @abstractmethod\n def get_security_group_rule(self, context, id, fields=None):\n pass\n","repo_name":"ruijie/quantum","sub_path":"quantum/extensions/securitygroup.py","file_name":"securitygroup.py","file_ext":"py","file_size_in_byte":10429,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"26776369146","text":"import cv2\r\nimport numpy as np\r\nimport win32gui\r\nimport win32ui\r\nimport win32con\r\nimport win32api\r\n\r\ndef get_fullscreen_size():\r\n width = win32api.GetSystemMetrics(win32con.SM_CXVIRTUALSCREEN)\r\n height = win32api.GetSystemMetrics(win32con.SM_CYVIRTUALSCREEN)\r\n left = win32api.GetSystemMetrics(win32con.SM_XVIRTUALSCREEN)\r\n top = win32api.GetSystemMetrics(win32con.SM_YVIRTUALSCREEN)\r\n return left, top, width, height\r\n\r\n\r\ndef get_hwnd(target=None, class_=None):\r\n return win32gui.FindWindow(class_, target)\r\n\r\n\r\ndef get_window_size(hWnd):\r\n if hWnd is 0:\r\n return None, None, None, None\r\n left, top, right, bot = win32gui.GetWindowRect(hWnd)\r\n width = right - left\r\n height = bot - top\r\n return left, top, width, height\r\n\r\n\r\ndef capture_app_win32(hWnd):\r\n if hWnd == 0:\r\n return None, None, None, None, None\r\n left, top, width, height = get_window_size(hWnd)\r\n hWndDC = win32gui.GetWindowDC(hWnd)\r\n mfcDC = win32ui.CreateDCFromHandle(hWndDC)\r\n saveDC = mfcDC.CreateCompatibleDC()\r\n saveBitMap = win32ui.CreateBitmap()\r\n saveBitMap.CreateCompatibleBitmap(mfcDC, width, height)\r\n saveDC.SelectObject(saveBitMap)\r\n saveDC.BitBlt((0, 0), (width, height), mfcDC, (0, 0), win32con.SRCCOPY)\r\n signedIntsArray = saveBitMap.GetBitmapBits(True)\r\n img = np.fromstring(signedIntsArray, dtype='uint8')\r\n img.shape = (height, width, 4)\r\n scs_img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)\r\n win32gui.DeleteObject(saveBitMap.GetHandle())\r\n saveDC.DeleteDC()\r\n mfcDC.DeleteDC()\r\n win32gui.ReleaseDC(hWnd, hWndDC)\r\n return scs_img, left, top, width, height","repo_name":"wqy224491/MSR-GAMING-ASSISTANT","sub_path":"capture.py","file_name":"capture.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"39031204768","text":"import axelrod as axl\n\ntrainer = axl.DQN()\n\ndef play_with(opp, num):\n players = [opp, trainer]\n match = axl.Match(players, 200)\n trainer.receive_match_attributes()\n for i in range(num):\n match.play()\n print (trainer.replay_memory)\n\ntemp = axl.CyclerDC()\nplay_with(temp, 1)\ntemp = axl.TitForTats()\nplay_with(temp, 30)\n\n\n#players = [axl.AntiTitForTat(),\n# axl.DQN_tester(), \n# axl.Alternator(), \n# axl.TitForTat(), \n# axl.Bully(), \n# axl.Cooperator(), \n# axl.CyclerDC(), \n# axl.Defector(), \n# axl.SuspiciousTitForTat()] \n\n#turns = 200\n#tournament = axl.Tournament(players, turns=turns, repetitions=1, seed=75)\n#results = tournament.play()\n#for average_score_per_turn in results.payoff_matrix[-2]:\n# print(round(average_score_per_turn, 3))","repo_name":"wogjs3503/axelrod_DQN","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"2934858724","text":"import argparse\nfrom pip._internal.commands import create_command\n\n# Depending on the version of pip/pip-tools this has a different name.\ntry:\n from pip._internal.utils.misc import get_installed_distributions\n get_installed_dists = get_installed_distributions\nexcept ImportError:\n from piptools.scripts.sync import _get_installed_distributions\n get_installed_dists = _get_installed_distributions\n\nfrom piptools import sync\nfrom piptools._compat.pip_compat import parse_requirements\n\ndef main(requirements):\n \"\"\" Get installed pip packages, compare them to the passed packages `requirements` file,\n install missing packages, uninstall packages not needed anymore\n \"\"\" \n install_command = create_command(\"install\")\n options, _ = install_command.parse_args([])\n session = install_command._build_session(options)\n finder = install_command._build_package_finder(options=options, session=session)\n\n requirements = parse_requirements(args.requirement_file, finder=finder, session=session)\n \n installed_dists = get_installed_dists(paths=[args.site_path])\n to_install, to_uninstall = sync.diff(requirements, installed_dists)\n sync.sync(to_install, to_uninstall)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('requirement_file', help='requirements.txt file')\n # Don't pick up the system pacakges, only the ones in *our* site packages\n parser.add_argument('site_path', help='path to the isolated site-packages')\n args = parser.parse_args()\n\n main(args)\n\n","repo_name":"WeakKnight/GDC23_PracticalMobileRendering","sub_path":"PracticalMobileRendering/LocalPackages/com.unity.scripting.python/Editor/PipPackages/update_packages.py","file_name":"update_packages.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","stars":272,"dataset":"github-code","pt":"60"} +{"seq_id":"3343410704","text":"# calcular serie com n termos S = 37*38/1 - 36*37/2 + 35*36/3 - 32*34/4 + ...\n\ndef serie (n, num1 = 37, num2 = 38, den = 1):\n \n termo = (num1*num2)/den\n\n if den % 2 == 0:\n termo = - termo \n \n if n > 1:\n termo = termo + serie(n-1,num1-1,num2-1,den+1)\n\n\n\n\n return termo\n\n\n\n\nn = int(input('Digite a quantidade de termos da série: '))\nwhile n <= 0 or n >= 37:\n n = int(input('ERRO. Digite a quantidade de termos positivo menor que 37: '))\n\nsoma = serie(n)\n\nprint(f'A soma da série com {n} termos é igual a {soma}')\n\n \n\n","repo_name":"luizppbarbosa/UFPE-Introducao-a-Programacao","sub_path":"Arquivos/slide 11 exemplo 1.py","file_name":"slide 11 exemplo 1.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"8922430194","text":"'''\n @author Sourabh Gome\n Time complexity : O(n log(n) + m log(m)) \n Space complexity : O(1)\n'''\n\ndef smallestDifference(a,b):\n a.sort()\n b.sort()\n smallest_dif=abs(a[0]-b[0])\n result=[a[0], b[0]]\n i=1\n j=1\n while i abs(a[i]-b[j]):\n smallest_dif=abs(a[i]-b[j])\n result=[a[i], b[j]]\n i+=1\n else:\n if smallest_dif > abs(a[i]-b[j]):\n smallest_dif=abs(a[i]-b[j])\n result=[a[i], b[j]]\n j+=1\n return result\n\n#Driver code\na=[-1, 5, 10, 20, 28, 3]\nb=[26, 134, 135, 15, 17]\nprint(smallestDifference(a, b))","repo_name":"sourabhgome/DataStructures","sub_path":"AEQuestions/0018 - Smallest_difference/sorting.py","file_name":"sorting.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"9161224252","text":"from django import forms\nfrom .models import Organization\nfrom .widgets import RORSelect2Widget\nimport json\nimport jsonschema\nimport os\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom crispy_forms.helper import FormHelper\nfrom crispy_bootstrap5.bootstrap5 import FloatingField, BS5Accordion\nfrom crispy_forms.layout import Layout, Field, ButtonHolder, Row, Column, Button, Div\nfrom crispy_forms.bootstrap import AccordionGroup\nimport requests\nfrom django.utils.translation import gettext as _\n\n\nclass ModelGetOrCreateField(forms.ModelMultipleChoiceField):\n\n def prepare_value(self, value):\n if value is None:\n return value\n value = super().prepare_value(value)\n key = self.to_field_name or \"pk\"\n for v in value:\n self.queryset.get_or_create(**{key: v})\n return super().prepare_value(value)\n\n\nclass ResearchOrganisationForm(forms.ModelForm):\n # the field with which to query the ROR database\n institution = forms.CharField(widget=RORSelect2Widget)\n\n # a hidden field for storing the results returned from ROR\n info = forms.JSONField(widget=forms.HiddenInput)\n\n class Meta:\n model = Organization\n exclude = ['users', 'slug', 'geom', 'domains']\n\n def full_clean(self):\n if self.data:\n # self.data is a querydict and is therefore immutable\n # convert to a regular dict so we can add stuff\n self.data = dict(self.data)\n\n # for whatever reason this is return as a 1 length array\n # which throws an error, retrieve the first item which is\n # a json string\n ROR = json.loads(self.data['info'][0])\n\n for field in ['_resultId', 'disabled',\n 'element', 'selected', 'text']:\n # these fields are added by select2 but will not be accepted when\n # validating against the jsonschema from ROR\n del ROR[field]\n\n self.data['info'] = json.dumps(ROR)\n\n # update the original data dict with result from the json array\n # which contains data from ROR\n self.data.update(ROR)\n\n return super().full_clean()\n\n def clean_name(self):\n \"\"\"Validate the incoming json structure agains the official ROR\n json schema (https://github.com/ror-community/ror-schema).\n\n .. note:\n\n We are validating in the 'name' field because the 'json' field is\n hidden and therefore feedback would not be visible to the user.\n\n Raises:\n ValidationError\n \"\"\"\n data = json.loads(self.data['info'])\n\n # retrieve the current ROR schema which is stored locally in the\n # static folder\n with open(os.path.join(settings.STATIC_ROOT, 'ror/schema/ror_schema.json')) as f:\n schema = json.load(f)\n\n # validate against current ROR schema\n try:\n jsonschema.validate(data, schema)\n except Exception as e:\n raise ValidationError(\n 'Validation against the ROR json schema failed with the following message:\\n\\n{}'.format(\n e.message))\n\n return self.cleaned_data['name']\n\n\nclass ResearchOrganizationHTMX(forms.Form):\n id = forms.CharField(\n label=_('Organisation'),\n widget=RORSelect2Widget(\n attrs={\n 'data-theme': 'bootstrap-5', }))\n\n class Meta:\n fields = ['id',]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper(self)\n self.helper.include_media = False\n self.helper.attrs = {\n 'hx-post': '',\n 'hx-trigger': 'change',\n 'hx-target': '#affiliationList',\n 'hx-swap': \"beforeend\",\n }\n\n def fetch(self, id):\n response = requests.get(f\"https://api.ror.org/organizations/{id}\")\n return response.json()\n\n def clean_id(self):\n id = self.cleaned_data['id']\n data = self.fetch(id)\n\n with open(os.path.join(settings.STATIC_ROOT, 'ror/schema/ror_schema.json')) as f:\n schema = json.load(f)\n\n # validate against current ROR schema\n try:\n jsonschema.validate(data, schema)\n except Exception as e:\n raise ValidationError(\n 'Validation against the ROR json schema failed with the following message:\\n\\n{}'.format(\n e.message))\n self.cleaned_data['info'] = data\n return self.cleaned_data['id']\n","repo_name":"SSJenny90/django-ror","sub_path":"ror/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":4574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"34026726074","text":"# Import the Maya commands library\r\nfrom maya import cmds\r\n\r\nappendString = 'null'\r\n\r\ndef showWindow():\r\n append_window = cmds.window( title=\"Name Append Tool\", iconName='NAT', widthHeight=(275, 150) )\r\n\r\n # As we add contents to the window, align them vertically\r\n cmds.columnLayout( adjustableColumn=True )\r\n\r\n cmds.rowLayout(numberOfColumns=2)\r\n\r\n cmds.text( 'Append String: ' )\r\n appendField = cmds.textField()\r\n cmds.textField( appendField, edit=True, aie=True, cc=lambda x: set_append_string(appendField) )\r\n\r\n cmds.setParent('..')\r\n\r\n\r\n cmds.rowLayout(numberOfColumns=2)\r\n\r\n\r\n cmds.button( label='Append String to Hierarchy', command=append_string )\r\n\r\n cmds.button( label='Close', command=('cmds.deleteUI(\\\"' + append_window + '\\\", window=True)') )\r\n\r\n cmds.setParent( '..' )\r\n\r\n cmds.showWindow( append_window )\r\n\r\ndef set_append_string(*args):\r\n global appendString\r\n appendString = cmds.textField(args[0], q=1, text=1)\r\n\r\n\r\ndef append_string(*args):\r\n children_nodes = cmds.listRelatives(allDescendents=True, s=False)\r\n cmds.select(children_nodes, add=True )\r\n nodes = cmds.ls(s=False, sl=True)\r\n for obj in nodes:\r\n if 'Shape' in obj:\r\n pass\r\n else:\r\n newName = obj + appendString\r\n cmds.rename(obj, newName)\r\n\r\n cmds.deleteUI('Name Append Tool') ","repo_name":"jhowell702/QuickCommands-MayaPython","sub_path":"appendName.py","file_name":"appendName.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"14313736424","text":"# -*- coding: utf-8 -*-\n#\n# Licensed under the terms of the BSD 3-Clause\n# (see cdl/LICENSE for details)\n\n\"\"\"\nImage Processor\n---------------\n\n\"\"\"\n\n# pylint: disable=invalid-name # Allows short reference names like x, y, ...\n\nfrom __future__ import annotations\n\nfrom collections.abc import Callable\n\nimport numpy as np\nfrom guidata.qthelpers import exec_dialog\nfrom numpy import ma\nfrom plotpy.widgets.resizedialog import ResizeDialog\nfrom qtpy import QtWidgets as QW\n\nimport cdl.core.computation.base as cpb\nimport cdl.core.computation.image as cpi\nimport cdl.core.computation.image.detection as cpi_det\nimport cdl.core.computation.image.edges as cpi_edg\nimport cdl.core.computation.image.exposure as cpi_exp\nimport cdl.core.computation.image.morphology as cpi_mor\nimport cdl.core.computation.image.restoration as cpi_res\nimport cdl.param\nfrom cdl.algorithms.image import distance_matrix\nfrom cdl.config import APP_NAME, Conf, _\nfrom cdl.core.gui.processor.base import BaseProcessor\nfrom cdl.core.model.base import ShapeTypes\nfrom cdl.core.model.image import ImageObj\nfrom cdl.utils.qthelpers import create_progress_bar, qt_try_except\n\n\nclass ImageProcessor(BaseProcessor):\n \"\"\"Object handling image processing: operations, processing, computing\"\"\"\n\n # pylint: disable=duplicate-code\n\n EDIT_ROI_PARAMS = True\n\n @qt_try_except()\n def compute_sum(self) -> None:\n \"\"\"Compute sum\"\"\"\n self.compute_n1(\"Σ\", cpi.compute_add, title=_(\"Sum\"))\n\n @qt_try_except()\n def compute_average(self) -> None:\n \"\"\"Compute average\"\"\"\n\n def func_objs(new_obj: ImageObj, old_objs: list[ImageObj]) -> None:\n \"\"\"Finalize average computation\"\"\"\n new_obj.data = new_obj.data / float(len(old_objs))\n\n self.compute_n1(\"μ\", cpi.compute_add, func_objs=func_objs, title=_(\"Average\"))\n\n @qt_try_except()\n def compute_product(self) -> None:\n \"\"\"Compute product\"\"\"\n self.compute_n1(\"Π\", cpi.compute_product, title=_(\"Product\"))\n\n @qt_try_except()\n def compute_logp1(self, param: cdl.param.LogP1Param | None = None) -> None:\n \"\"\"Compute base 10 logarithm\"\"\"\n self.compute_11(cpi.compute_logp1, param, cpi.LogP1Param, title=\"Log10\")\n\n @qt_try_except()\n def compute_rotate(self, param: cdl.param.RotateParam | None = None) -> None:\n \"\"\"Rotate data arbitrarily\"\"\"\n self.compute_11(cpi.compute_rotate, param, cpi.RotateParam, title=\"Rotate\")\n\n @qt_try_except()\n def compute_rotate90(self) -> None:\n \"\"\"Rotate data 90°\"\"\"\n self.compute_11(cpi.compute_rotate90, title=\"Rotate90\")\n\n @qt_try_except()\n def compute_rotate270(self) -> None:\n \"\"\"Rotate data 270°\"\"\"\n self.compute_11(cpi.compute_rotate270, title=\"Rotate270\")\n\n @qt_try_except()\n def compute_fliph(self) -> None:\n \"\"\"Flip data horizontally\"\"\"\n self.compute_11(cpi.compute_fliph, title=\"HFlip\")\n\n @qt_try_except()\n def compute_flipv(self) -> None:\n \"\"\"Flip data vertically\"\"\"\n self.compute_11(cpi.compute_flipv, title=\"VFlip\")\n\n @qt_try_except()\n def distribute_on_grid(self, param: cdl.param.GridParam | None = None) -> None:\n \"\"\"Distribute images on a grid\"\"\"\n title = _(\"Distribute on grid\")\n edit, param = self.init_param(param, cpi.GridParam, title)\n if edit and not param.edit(parent=self.panel.parent()):\n return\n objs = self.panel.objview.get_sel_objects(include_groups=True)\n g_row, g_col, x0, y0, x0_0, y0_0 = 0, 0, 0.0, 0.0, 0.0, 0.0\n delta_x0, delta_y0 = 0.0, 0.0\n with create_progress_bar(self.panel, title, max_=len(objs)) as progress:\n for i_row, obj in enumerate(objs):\n progress.setValue(i_row + 1)\n QW.QApplication.processEvents()\n if progress.wasCanceled():\n break\n if i_row == 0:\n x0_0, y0_0 = x0, y0 = obj.x0, obj.y0\n else:\n delta_x0, delta_y0 = x0 - obj.x0, y0 - obj.y0\n obj.x0 += delta_x0\n obj.y0 += delta_y0\n\n # pylint: disable=unused-argument\n def translate_coords(obj, orig, coords):\n \"\"\"Apply translation to coords\"\"\"\n coords[:, ::2] += delta_x0\n coords[:, 1::2] += delta_y0\n\n obj.transform_shapes(None, translate_coords)\n if param.direction == \"row\":\n # Distributing images over rows\n sign = np.sign(param.rows)\n g_row = (g_row + sign) % param.rows\n y0 += (obj.dy * obj.data.shape[0] + param.rowspac) * sign\n if g_row == 0:\n g_col += 1\n x0 += obj.dx * obj.data.shape[1] + param.colspac\n y0 = y0_0\n else:\n # Distributing images over columns\n sign = np.sign(param.cols)\n g_col = (g_col + sign) % param.cols\n x0 += (obj.dx * obj.data.shape[1] + param.colspac) * sign\n if g_col == 0:\n g_row += 1\n x0 = x0_0\n y0 += obj.dy * obj.data.shape[0] + param.rowspac\n self.panel.SIG_REFRESH_PLOT.emit(\"selected\", True)\n\n @qt_try_except()\n def reset_positions(self) -> None:\n \"\"\"Reset image positions\"\"\"\n x0_0, y0_0 = 0.0, 0.0\n delta_x0, delta_y0 = 0.0, 0.0\n objs = self.panel.objview.get_sel_objects(include_groups=True)\n for i_row, obj in enumerate(objs):\n if i_row == 0:\n x0_0, y0_0 = obj.x0, obj.y0\n else:\n delta_x0, delta_y0 = x0_0 - obj.x0, y0_0 - obj.y0\n obj.x0 += delta_x0\n obj.y0 += delta_y0\n\n # pylint: disable=unused-argument\n def translate_coords(obj, orig, coords):\n \"\"\"Apply translation to coords\"\"\"\n coords[:, ::2] += delta_x0\n coords[:, 1::2] += delta_y0\n\n obj.transform_shapes(None, translate_coords)\n self.panel.SIG_REFRESH_PLOT.emit(\"selected\", True)\n\n @qt_try_except()\n def compute_resize(self, param: cdl.param.ResizeParam | None = None) -> None:\n \"\"\"Resize image\"\"\"\n obj0 = self.panel.objview.get_sel_objects()[0]\n for obj in self.panel.objview.get_sel_objects():\n if obj.size != obj0.size:\n QW.QMessageBox.warning(\n self.panel.parent(),\n APP_NAME,\n _(\"Warning:\")\n + \"\\n\"\n + _(\"Selected images do not have the same size\"),\n )\n edit, param = self.init_param(param, cpi.ResizeParam, _(\"Resize\"))\n if edit:\n original_size = obj0.size\n dlg = ResizeDialog(\n self.plotwidget,\n new_size=original_size,\n old_size=original_size,\n text=_(\"Destination size:\"),\n )\n if not exec_dialog(dlg):\n return\n param.zoom = dlg.get_zoom()\n self.compute_11(cpi.compute_resize, param, title=_(\"Resize\"), edit=edit)\n\n @qt_try_except()\n def compute_binning(self, param: cdl.param.BinningParam | None = None) -> None:\n \"\"\"Binning image\"\"\"\n edit = param is None\n obj0 = self.panel.objview.get_sel_objects(include_groups=True)[0]\n input_dtype_str = str(obj0.data.dtype)\n title = _(\"Binning\")\n edit, param = self.init_param(param, cpi.BinningParam, title)\n if edit:\n param.dtype_str = input_dtype_str\n if param.dtype_str is None:\n param.dtype_str = input_dtype_str\n self.compute_11(cpi.compute_binning, param, title=title, edit=edit)\n\n @qt_try_except()\n def compute_roi_extraction(\n self, param: cdl.param.ROIDataParam | None = None\n ) -> None:\n \"\"\"Extract Region Of Interest (ROI) from data\"\"\"\n param = self._get_roidataparam(param)\n if param is None or param.is_empty:\n return\n obj = self.panel.objview.get_sel_objects()[0]\n group = obj.roidata_to_params(param.roidata)\n if param.singleobj:\n self.compute_11(cpi.extract_multiple_roi, group, title=_(\"Extract ROI\"))\n else:\n self.compute_1n(cpi.extract_single_roi, group.datasets, \"ROI\", edit=False)\n\n @qt_try_except()\n def compute_profile(self, param: cdl.param.ProfileParam | None = None) -> None:\n \"\"\"Compute profile\"\"\"\n self.compute_11(\n cpi.compute_profile, param, cdl.param.ProfileParam, title=_(\"Profile\")\n )\n\n @qt_try_except()\n def compute_average_profile(\n self, param: cdl.param.AverageProfileParam | None = None\n ) -> None:\n \"\"\"Compute average profile\"\"\"\n self.compute_11(\n cpi.compute_average_profile,\n param,\n cdl.param.AverageProfileParam,\n title=_(\"Average profile\"),\n )\n\n @qt_try_except()\n def compute_swap_axes(self) -> None:\n \"\"\"Swap data axes\"\"\"\n self.compute_11(cpi.compute_swap_axes, title=_(\"Swap axes\"))\n\n @qt_try_except()\n def compute_abs(self) -> None:\n \"\"\"Compute absolute value\"\"\"\n self.compute_11(cpi.compute_abs, title=_(\"Absolute value\"))\n\n @qt_try_except()\n def compute_re(self) -> None:\n \"\"\"Compute real part\"\"\"\n self.compute_11(cpi.compute_re, title=_(\"Real part\"))\n\n @qt_try_except()\n def compute_im(self) -> None:\n \"\"\"Compute imaginary part\"\"\"\n self.compute_11(cpi.compute_im, title=_(\"Imaginary part\"))\n\n @qt_try_except()\n def compute_astype(self, param: cdl.param.DataTypeIParam | None = None) -> None:\n \"\"\"Convert data type\"\"\"\n self.compute_11(\n cpi.compute_astype, param, cpi.DataTypeIParam, title=_(\"Convert data type\")\n )\n\n @qt_try_except()\n def compute_log10(self) -> None:\n \"\"\"Compute Log10\"\"\"\n self.compute_11(cpi.compute_log10, title=\"Log10\")\n\n @qt_try_except()\n def compute_difference(self, obj2: ImageObj | None = None) -> None:\n \"\"\"Compute difference between two images\"\"\"\n self.compute_n1n(\n obj2,\n _(\"image to subtract\"),\n cpi.compute_difference,\n title=_(\"Difference\"),\n )\n\n @qt_try_except()\n def compute_quadratic_difference(self, obj2: ImageObj | None = None) -> None:\n \"\"\"Compute quadratic difference between two images\"\"\"\n self.compute_n1n(\n obj2,\n _(\"image to subtract\"),\n cpi.compute_quadratic_difference,\n title=_(\"Quadratic difference\"),\n )\n\n @qt_try_except()\n def compute_division(self, obj2: ImageObj | None = None) -> None:\n \"\"\"Compute division between two images\"\"\"\n self.compute_n1n(\n obj2,\n _(\"divider\"),\n cpi.compute_division,\n title=_(\"Division\"),\n )\n\n @qt_try_except()\n def compute_flatfield(\n self,\n obj2: ImageObj | None = None,\n param: cdl.param.FlatFieldParam | None = None,\n ) -> None:\n \"\"\"Compute flat field correction\"\"\"\n edit, param = self.init_param(param, cpi.FlatFieldParam, _(\"Flat field\"))\n if edit:\n obj = self.panel.objview.get_sel_objects()[0]\n param.set_from_datatype(obj.data.dtype)\n self.compute_n1n(\n obj2,\n _(\"flat field image\"),\n cpi.compute_flatfield,\n param=param,\n title=_(\"Flat field correction\"),\n edit=edit,\n )\n\n # ------Image Processing\n @qt_try_except()\n def compute_calibration(\n self, param: cdl.param.ZCalibrateParam | None = None\n ) -> None:\n \"\"\"Compute data linear calibration\"\"\"\n self.compute_11(\n cpi.compute_calibration,\n param,\n cpi.ZCalibrateParam,\n _(\"Linear calibration\"),\n \"y = a.x + b\",\n )\n\n @qt_try_except()\n def compute_threshold(self, param: cpb.ThresholdParam | None = None) -> None:\n \"\"\"Compute threshold clipping\"\"\"\n self.compute_11(\n cpi.compute_threshold,\n param,\n cpb.ThresholdParam,\n _(\"Thresholding\"),\n )\n\n @qt_try_except()\n def compute_clip(self, param: cpb.ClipParam | None = None) -> None:\n \"\"\"Compute maximum data clipping\"\"\"\n self.compute_11(\n cpi.compute_clip,\n param,\n cpb.ClipParam,\n _(\"Clipping\"),\n )\n\n @qt_try_except()\n def compute_gaussian_filter(self, param: cpb.GaussianParam | None = None) -> None:\n \"\"\"Compute gaussian filter\"\"\"\n self.compute_11(\n cpi.compute_gaussian_filter, param, cpb.GaussianParam, _(\"Gaussian filter\")\n )\n\n @qt_try_except()\n def compute_moving_average(\n self, param: cpb.MovingAverageParam | None = None\n ) -> None:\n \"\"\"Compute moving average\"\"\"\n self.compute_11(\n cpi.compute_moving_average,\n param,\n cpb.MovingAverageParam,\n _(\"Moving average\"),\n )\n\n @qt_try_except()\n def compute_moving_median(self, param: cpb.MovingMedianParam | None = None) -> None:\n \"\"\"Compute moving median\"\"\"\n self.compute_11(\n cpi.compute_moving_median,\n param,\n cpb.MovingMedianParam,\n _(\"Moving median\"),\n )\n\n @qt_try_except()\n def compute_wiener(self) -> None:\n \"\"\"Compute Wiener filter\"\"\"\n self.compute_11(cpi.compute_wiener, title=_(\"Wiener filter\"))\n\n @qt_try_except()\n def compute_fft(self, param: cdl.param.FFTParam | None = None) -> None:\n \"\"\"Compute FFT\"\"\"\n if param is None:\n param = cpb.FFTParam.create(shift=Conf.proc.fft_shift_enabled.get())\n self.compute_11(cpi.compute_fft, param, title=\"FFT\", edit=False)\n\n @qt_try_except()\n def compute_ifft(self, param: cdl.param.FFTParam | None = None) -> None:\n \"Compute iFFT\" \"\"\n if param is None:\n param = cpb.FFTParam.create(shift=Conf.proc.fft_shift_enabled.get())\n self.compute_11(cpi.compute_ifft, param, title=\"iFFT\", edit=False)\n\n @qt_try_except()\n def compute_butterworth(\n self, param: cdl.param.ButterworthParam | None = None\n ) -> None:\n \"\"\"Compute Butterworth filter\"\"\"\n self.compute_11(\n cpi.compute_butterworth,\n param,\n cpi.ButterworthParam,\n _(\"Butterworth filter\"),\n )\n\n @qt_try_except()\n def compute_adjust_gamma(\n self, param: cdl.param.AdjustGammaParam | None = None\n ) -> None:\n \"\"\"Compute gamma correction\"\"\"\n self.compute_11(\n cpi_exp.compute_adjust_gamma,\n param,\n cpi_exp.AdjustGammaParam,\n _(\"Gamma correction\"),\n )\n\n @qt_try_except()\n def compute_adjust_log(self, param: cdl.param.AdjustLogParam | None = None) -> None:\n \"\"\"Compute log correction\"\"\"\n self.compute_11(\n cpi_exp.compute_adjust_log,\n param,\n cpi_exp.AdjustLogParam,\n _(\"Log correction\"),\n )\n\n @qt_try_except()\n def compute_adjust_sigmoid(\n self,\n param: cdl.param.AdjustSigmoidParam | None = None,\n ) -> None:\n \"\"\"Compute sigmoid correction\"\"\"\n self.compute_11(\n cpi_exp.compute_adjust_sigmoid,\n param,\n cpi_exp.AdjustSigmoidParam,\n _(\"Sigmoid correction\"),\n )\n\n @qt_try_except()\n def compute_rescale_intensity(\n self,\n param: cdl.param.RescaleIntensityParam | None = None,\n ) -> None:\n \"\"\"Rescale image intensity levels\"\"\"\n self.compute_11(\n cpi_exp.compute_rescale_intensity,\n param,\n cpi_exp.RescaleIntensityParam,\n _(\"Rescale intensity\"),\n )\n\n @qt_try_except()\n def compute_equalize_hist(\n self, param: cdl.param.EqualizeHistParam | None = None\n ) -> None:\n \"\"\"Histogram equalization\"\"\"\n self.compute_11(\n cpi_exp.compute_equalize_hist,\n param,\n cpi_exp.EqualizeHistParam,\n _(\"Histogram equalization\"),\n )\n\n @qt_try_except()\n def compute_equalize_adapthist(\n self,\n param: cdl.param.EqualizeAdaptHistParam | None = None,\n ) -> None:\n \"\"\"Adaptive histogram equalization\"\"\"\n self.compute_11(\n cpi_exp.compute_equalize_adapthist,\n param,\n cpi_exp.EqualizeAdaptHistParam,\n _(\"Adaptive histogram equalization\"),\n )\n\n @qt_try_except()\n def compute_denoise_tv(self, param: cdl.param.DenoiseTVParam | None = None) -> None:\n \"\"\"Compute Total Variation denoising\"\"\"\n self.compute_11(\n cpi_res.compute_denoise_tv,\n param,\n cpi_res.DenoiseTVParam,\n _(\"Total variation denoising\"),\n )\n\n @qt_try_except()\n def compute_denoise_bilateral(\n self,\n param: cdl.param.DenoiseBilateralParam | None = None,\n ) -> None:\n \"\"\"Compute bilateral filter denoising\"\"\"\n self.compute_11(\n cpi_res.compute_denoise_bilateral,\n param,\n cpi_res.DenoiseBilateralParam,\n _(\"Bilateral filter denoising\"),\n )\n\n @qt_try_except()\n def compute_denoise_wavelet(\n self,\n param: cdl.param.DenoiseWaveletParam | None = None,\n ) -> None:\n \"\"\"Compute Wavelet denoising\"\"\"\n self.compute_11(\n cpi_res.compute_denoise_wavelet,\n param,\n cpi_res.DenoiseWaveletParam,\n _(\"Wavelet denoising\"),\n )\n\n @qt_try_except()\n def compute_denoise_tophat(\n self, param: cdl.param.MorphologyParam | None = None\n ) -> None:\n \"\"\"Denoise using White Top-Hat\"\"\"\n self.compute_11(\n cpi_res.compute_denoise_tophat,\n param,\n cpi_mor.MorphologyParam,\n _(\"Denoise / Top-Hat\"),\n )\n\n @qt_try_except()\n def compute_all_denoise(self, params: list | None = None) -> None:\n \"\"\"Compute all denoising filters\"\"\"\n if params is not None:\n assert len(params) == 4, \"Wrong number of parameters (4 expected)\"\n funcs = [\n cpi_res.compute_denoise_tv,\n cpi_res.compute_denoise_bilateral,\n cpi_res.compute_denoise_wavelet,\n cpi_res.compute_denoise_tophat,\n ]\n edit = params is None\n if edit:\n params = []\n for paramclass, title in (\n (cpi_res.DenoiseTVParam, _(\"Total variation denoising\")),\n (cpi_res.DenoiseBilateralParam, _(\"Bilateral filter denoising\")),\n (cpi_res.DenoiseWaveletParam, _(\"Wavelet denoising\")),\n (cpi_mor.MorphologyParam, _(\"Denoise / Top-Hat\")),\n ):\n param = paramclass(title)\n self.update_param_defaults(param)\n params.append(param)\n self.compute_1n(funcs, params, \"Denoise\", edit=edit)\n\n @qt_try_except()\n def compute_white_tophat(\n self, param: cdl.param.MorphologyParam | None = None\n ) -> None:\n \"\"\"Compute White Top-Hat\"\"\"\n self.compute_11(\n cpi_mor.compute_white_tophat,\n param,\n cpi_mor.MorphologyParam,\n _(\"White Top-Hat\"),\n )\n\n @qt_try_except()\n def compute_black_tophat(\n self, param: cdl.param.MorphologyParam | None = None\n ) -> None:\n \"\"\"Compute Black Top-Hat\"\"\"\n self.compute_11(\n cpi_mor.compute_black_tophat,\n param,\n cpi_mor.MorphologyParam,\n _(\"Black Top-Hat\"),\n )\n\n @qt_try_except()\n def compute_erosion(self, param: cdl.param.MorphologyParam | None = None) -> None:\n \"\"\"Compute Erosion\"\"\"\n self.compute_11(\n cpi_mor.compute_erosion,\n param,\n cpi_mor.MorphologyParam,\n _(\"Erosion\"),\n )\n\n @qt_try_except()\n def compute_dilation(self, param: cdl.param.MorphologyParam | None = None) -> None:\n \"\"\"Compute Dilation\"\"\"\n self.compute_11(\n cpi_mor.compute_dilation,\n param,\n cpi_mor.MorphologyParam,\n _(\"Dilation\"),\n )\n\n @qt_try_except()\n def compute_opening(self, param: cdl.param.MorphologyParam | None = None) -> None:\n \"\"\"Compute morphological opening\"\"\"\n self.compute_11(\n cpi_mor.compute_opening,\n param,\n cpi_mor.MorphologyParam,\n _(\"Opening\"),\n )\n\n @qt_try_except()\n def compute_closing(self, param: cdl.param.MorphologyParam | None = None) -> None:\n \"\"\"Compute morphological closing\"\"\"\n self.compute_11(\n cpi_mor.compute_closing,\n param,\n cpi_mor.MorphologyParam,\n _(\"Closing\"),\n )\n\n @qt_try_except()\n def compute_all_morphology(\n self, param: cdl.param.MorphologyParam | None = None\n ) -> None:\n \"\"\"Compute all morphology filters\"\"\"\n if param is None:\n param = cpi_mor.MorphologyParam()\n if not param.edit(parent=self.panel.parent()):\n return\n funcs = [\n cpi_mor.compute_white_tophat,\n cpi_mor.compute_black_tophat,\n cpi_mor.compute_erosion,\n cpi_mor.compute_dilation,\n cpi_mor.compute_opening,\n cpi_mor.compute_closing,\n ]\n self.compute_1n(funcs, [param] * len(funcs), \"Morph\", edit=False)\n\n @qt_try_except()\n def compute_canny(self, param: cdl.param.CannyParam | None = None) -> None:\n \"\"\"Compute Canny filter\"\"\"\n self.compute_11(\n cpi_edg.compute_canny,\n param,\n cpi_edg.CannyParam,\n _(\"Canny filter\"),\n )\n\n @qt_try_except()\n def compute_roberts(self) -> None:\n \"\"\"Compute Roberts filter\"\"\"\n self.compute_11(cpi_edg.compute_roberts, title=_(\"Roberts filter\"))\n\n @qt_try_except()\n def compute_prewitt(self) -> None:\n \"\"\"Compute Prewitt filter\"\"\"\n self.compute_11(cpi_edg.compute_prewitt, title=_(\"Prewitt filter\"))\n\n @qt_try_except()\n def compute_prewitt_h(self) -> None:\n \"\"\"Compute Prewitt filter (horizontal)\"\"\"\n self.compute_11(\n cpi_edg.compute_prewitt_h,\n title=_(\"Prewitt filter (horizontal)\"),\n )\n\n @qt_try_except()\n def compute_prewitt_v(self) -> None:\n \"\"\"Compute Prewitt filter (vertical)\"\"\"\n self.compute_11(\n cpi_edg.compute_prewitt_v,\n title=_(\"Prewitt filter (vertical)\"),\n )\n\n @qt_try_except()\n def compute_sobel(self) -> None:\n \"\"\"Compute Sobel filter\"\"\"\n self.compute_11(cpi_edg.compute_sobel, title=_(\"Sobel filter\"))\n\n @qt_try_except()\n def compute_sobel_h(self) -> None:\n \"\"\"Compute Sobel filter (horizontal)\"\"\"\n self.compute_11(\n cpi_edg.compute_sobel_h,\n title=_(\"Sobel filter (horizontal)\"),\n )\n\n @qt_try_except()\n def compute_sobel_v(self) -> None:\n \"\"\"Compute Sobel filter (vertical)\"\"\"\n self.compute_11(\n cpi_edg.compute_sobel_v,\n title=_(\"Sobel filter (vertical)\"),\n )\n\n @qt_try_except()\n def compute_scharr(self) -> None:\n \"\"\"Compute Scharr filter\"\"\"\n self.compute_11(cpi_edg.compute_scharr, title=_(\"Scharr filter\"))\n\n @qt_try_except()\n def compute_scharr_h(self) -> None:\n \"\"\"Compute Scharr filter (horizontal)\"\"\"\n self.compute_11(\n cpi_edg.compute_scharr_h,\n title=_(\"Scharr filter (horizontal)\"),\n )\n\n @qt_try_except()\n def compute_scharr_v(self) -> None:\n \"\"\"Compute Scharr filter (vertical)\"\"\"\n self.compute_11(\n cpi_edg.compute_scharr_v,\n title=_(\"Scharr filter (vertical)\"),\n )\n\n @qt_try_except()\n def compute_farid(self) -> None:\n \"\"\"Compute Farid filter\"\"\"\n self.compute_11(cpi_edg.compute_farid, title=_(\"Farid filter\"))\n\n @qt_try_except()\n def compute_farid_h(self) -> None:\n \"\"\"Compute Farid filter (horizontal)\"\"\"\n self.compute_11(\n cpi_edg.compute_farid_h,\n title=_(\"Farid filter (horizontal)\"),\n )\n\n @qt_try_except()\n def compute_farid_v(self) -> None:\n \"\"\"Compute Farid filter (vertical)\"\"\"\n self.compute_11(\n cpi_edg.compute_farid_v,\n title=_(\"Farid filter (vertical)\"),\n )\n\n @qt_try_except()\n def compute_laplace(self) -> None:\n \"\"\"Compute Laplace filter\"\"\"\n self.compute_11(cpi_edg.compute_laplace, title=_(\"Laplace filter\"))\n\n @qt_try_except()\n def compute_all_edges(self) -> None:\n \"\"\"Compute all edges\"\"\"\n funcs = [\n cpi_edg.compute_roberts,\n cpi_edg.compute_prewitt,\n cpi_edg.compute_prewitt_h,\n cpi_edg.compute_prewitt_v,\n cpi_edg.compute_sobel,\n cpi_edg.compute_sobel_h,\n cpi_edg.compute_sobel_v,\n cpi_edg.compute_scharr,\n cpi_edg.compute_scharr_h,\n cpi_edg.compute_scharr_v,\n cpi_edg.compute_farid,\n cpi_edg.compute_farid_h,\n cpi_edg.compute_farid_v,\n cpi_edg.compute_laplace,\n ]\n self.compute_1n(funcs, None, \"Edges\")\n\n # ------Image Computing\n @qt_try_except()\n def compute_centroid(self) -> None:\n \"\"\"Compute image centroid\"\"\"\n self.compute_10(cpi.compute_centroid, ShapeTypes.MARKER, title=_(\"Centroid\"))\n\n @qt_try_except()\n def compute_enclosing_circle(self) -> None:\n \"\"\"Compute minimum enclosing circle\"\"\"\n # TODO: [P2] Find a way to add the circle to the computing results\n # as in \"enclosingcircle_test.py\"\n self.compute_10(\n cpi.compute_enclosing_circle, ShapeTypes.CIRCLE, title=_(\"Enclosing circle\")\n )\n\n @qt_try_except()\n def compute_peak_detection(\n self, param: cdl.param.Peak2DDetectionParam | None = None\n ) -> None:\n \"\"\"Compute 2D peak detection\"\"\"\n edit, param = self.init_param(\n param, cpi_det.Peak2DDetectionParam, _(\"Peak detection\")\n )\n if edit:\n data = self.panel.objview.get_sel_objects()[0].data\n param.size = max(min(data.shape) // 40, 50)\n\n results = self.compute_10(\n cpi_det.compute_peak_detection,\n ShapeTypes.POINT,\n param,\n edit=edit,\n title=_(\"Peak detection\"),\n )\n if results is not None and param.create_rois and len(results.items()) > 1:\n with create_progress_bar(\n self.panel, _(\"Create regions of interest\"), max_=len(results)\n ) as progress:\n for idx, (oid, result) in enumerate(results.items()):\n progress.setValue(idx + 1)\n QW.QApplication.processEvents()\n if progress.wasCanceled():\n break\n obj = self.panel.objmodel[oid]\n dist = distance_matrix(result.data)\n dist_min = dist[dist != 0].min()\n assert dist_min > 0\n radius = int(0.5 * dist_min / np.sqrt(2) - 1)\n assert radius >= 1\n roicoords = []\n ymax, xmax = obj.data.shape\n for x, y in result.data:\n coords = [\n max(x - radius, 0),\n max(y - radius, 0),\n min(x + radius, xmax),\n min(y + radius, ymax),\n ]\n roicoords.append(coords)\n obj.roi = np.array(roicoords, int)\n self.SIG_ADD_SHAPE.emit(obj.uuid)\n self.panel.SIG_REFRESH_PLOT.emit(obj.uuid, True)\n\n @qt_try_except()\n def compute_contour_shape(\n self, param: cdl.param.ContourShapeParam | None = None\n ) -> None:\n \"\"\"Compute contour shape fit\"\"\"\n edit, param = self.init_param(param, cpi_det.ContourShapeParam, _(\"Contour\"))\n self.compute_10(\n cpi_det.compute_contour_shape,\n shapetype=None, # Shape is defined by the dataset ContourShapeParam\n param=param,\n title=_(\"Contour\"),\n edit=edit,\n )\n\n @qt_try_except()\n def compute_hough_circle_peaks(\n self, param: cdl.param.HoughCircleParam | None = None\n ) -> None:\n \"\"\"Compute peak detection based on a circle Hough transform\"\"\"\n self.compute_10(\n cpi.compute_hough_circle_peaks,\n ShapeTypes.CIRCLE,\n param,\n cpi.HoughCircleParam,\n title=_(\"Hough circles\"),\n )\n\n @qt_try_except()\n def compute_blob_dog(self, param: cdl.param.BlobDOGParam | None = None) -> None:\n \"\"\"Compute blob detection using Difference of Gaussian method\"\"\"\n self.compute_10(\n cpi_det.compute_blob_dog,\n ShapeTypes.CIRCLE,\n param,\n cpi_det.BlobDOGParam,\n title=_(\"Blob detection (DOG)\"),\n )\n\n @qt_try_except()\n def compute_blob_doh(self, param: cdl.param.BlobDOHParam | None = None) -> None:\n \"\"\"Compute blob detection using Determinant of Hessian method\"\"\"\n self.compute_10(\n cpi_det.compute_blob_doh,\n ShapeTypes.CIRCLE,\n param,\n cpi_det.BlobDOHParam,\n title=_(\"Blob detection (DOH)\"),\n )\n\n @qt_try_except()\n def compute_blob_log(self, param: cdl.param.BlobLOGParam | None = None) -> None:\n \"\"\"Compute blob detection using Laplacian of Gaussian method\"\"\"\n self.compute_10(\n cpi_det.compute_blob_log,\n ShapeTypes.CIRCLE,\n param,\n cpi_det.BlobLOGParam,\n title=_(\"Blob detection (LOG)\"),\n )\n\n @qt_try_except()\n def compute_blob_opencv(\n self,\n param: cdl.param.BlobOpenCVParam | None = None,\n ) -> None:\n \"\"\"Compute blob detection using OpenCV\"\"\"\n self.compute_10(\n cpi_det.compute_blob_opencv,\n ShapeTypes.CIRCLE,\n param,\n cpi_det.BlobOpenCVParam,\n title=_(\"Blob detection (OpenCV)\"),\n )\n\n def _get_stat_funcs(self) -> list[tuple[str, Callable[[np.ndarray], float]]]:\n \"\"\"Return statistics functions list\"\"\"\n # Be careful to use systematically functions adapted to masked arrays\n # (e.g. numpy.ma median, and *not* numpy.median)\n return [\n (\"min(z)\", lambda z: z.min()),\n (\"max(z)\", lambda z: z.max()),\n (\"\", lambda z: z.mean()),\n (\"Median(z)\", ma.median),\n (\"σ(z)\", lambda z: z.std()),\n (\"Σ(z)\", lambda z: z.sum()),\n (\"/σ(z)\", lambda z: z.mean() / z.std()),\n ]\n","repo_name":"Codra-Ingenierie-Informatique/DataLab","sub_path":"cdl/core/gui/processor/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":31399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"6937998436","text":"from pickle import TRUE\nimport sys\nimport os\n# allows us to import flaskapp module\nsys.path.insert(1, os.path.join(sys.path[0], '..'))\n\nfrom flaskapp.dataPipeline.budgetReader import PDFImporter\nfrom flaskapp import create_app\n\nfrom fileinput import filename\nimport os\nimport unittest\nfrom io import BytesIO\nimport re\n\nTEST_DB = 'test.db'\napp = create_app()\n\n\nclass BasicTests(unittest.TestCase):\n ############################\n #### setup and teardown ####\n ############################\n # executed prior to each test\n def setUp(self):\n app.config['TESTING'] = True\n app.config['WTF_CSRF_ENABLED'] = False\n app.config['DEBUG'] = False\n self.app = app.test_client()\n\n # executed after each test\n def tearDown(Self):\n pass\n\n def getFiles(self):\n flist = []\n for file in os.listdir(\"tests/pdf_input\"):\n if file.endswith(\".pdf\"):\n f = os.path.join(\"tests/pdf_input\", file)\n flist.append(f)\n return flist\n\n ############################\n #### setup and teardown ####\n ############################\n\n def test_parsePDFWithCorrectEnding(self):\n '''\n test_parsePDFWithCorrectEnding() will check if the DF of the pdf will generate ONE count of the 'ending balance' string data\n '''\n fList = self.getFiles()\n print(\"Input File: %s\" % fList)\n\n # open pdf file and save content to variable file\n for f in fList:\n fileName = os.path.basename(f)\n endOfBalanceCount = 0\n with open(f, 'rb') as openedFile:\n pdfContent = openedFile.read()\n file = BytesIO(pdfContent)\n pdf = PDFImporter(pdfName=fileName, pdf=file, user_id=1)\n cleanedDF = pdf.cleanupPDF()\n transactionDetailList = cleanedDF['transactionDetail'].tolist()\n\n # check if there are exacly one 'Ending Balance' string in the transactionDetailList\n for data in transactionDetailList:\n if data == 'Ending Balance':\n endOfBalanceCount += 1\n\n print('*'*30)\n print('Testing file %s, file contains %s \"Ending Balance\" count' %\n (fileName, endOfBalanceCount))\n print('*'*30)\n if endOfBalanceCount != 1:\n break\n self.assertEqual(endOfBalanceCount, 1)\n\n def test_dateFieldPDF(self):\n '''\n test_dateFieteldPDF() will check if the date field in the DF is in the correct format, dd-mm\n '''\n fList = self.getFiles()\n print(\"Input File: %s\"%fList)\n\n # open pdf file and save content to variable file\n for f in fList:\n fileName = os.path.basename(f)\n\n print('*'*30)\n print('Testing date field for file: %s'%fileName)\n print('*'*30)\n\n with open(f, 'rb') as openedFile:\n pdfContent = openedFile.read()\n file = BytesIO(pdfContent)\n pdf = PDFImporter(pdfName=fileName,pdf=file, user_id=1)\n cleanedDF = pdf.cleanupPDF()\n dateList = cleanedDF['date'].tolist()\n\n # check if the date field is in date format\n for index in range(0,len(dateList)):\n try:\n if re.fullmatch(r'[0-9]{2}[-][0-9]{2}',dateList[index]) is None:\n print('Date field not valid')\n print('File: %s'%fileName)\n print('Row %s in Dataframe'%index)\n self.assertEqual(True, False)\n except ValueError as e:\n print('Date field not valid')\n print('File: %s'%fileName)\n print('Row %s in Dataframe'%index)\n self.assertEqual(True, False)\n\n self.assertEqual(True, True)\n\n def test_balanceCount(self):\n '''\n test_balanceCount() will check if the correct balance is displayed based on the amount added or removed\n '''\n fList = self.getFiles()\n print(\"Input File: %s\"%fList)\n\n # open pdf file and save content to variable file\n for f in fList:\n fileName = os.path.basename(f)\n print('*'*30)\n print('Testing accountBalamce count: %s'%fileName)\n print('*'*30)\n\n with open(f, 'rb') as openedFile:\n pdfContent = openedFile.read()\n file = BytesIO(pdfContent)\n pdf = PDFImporter(pdfName=fileName,pdf=file, user_id=1)\n cleanedDF = pdf.cleanupPDF()\n amountList = cleanedDF['amount'].tolist()\n balanceList = cleanedDF['accountBalance'].tolist()\n\n for index in range(0, len(balanceList)-2):\n if amountList[index+1][-1] == '-':\n amountList[index+1] = -abs(float(amountList[index+1][:-1].replace(',','')))\n else:\n amountList[index+1] = abs(float(amountList[index+1].replace(',','')))\n\n if round(float(balanceList[index].replace(',','')) + float(amountList[index+1]), 2) != float(balanceList[index+1].replace(',','')):\n\n print(round(float(balanceList[index].replace(',','')) + float(amountList[index+1]), 2))\n print(float(balanceList[index+1].replace(',','')))\n print(\"The values above DONT match\")\n self.assertEqual(round(float(balanceList[index].replace(',','')) + float(amountList[index+1]), 2), float(balanceList[index+1].replace(',','')))\n print(\"-\"*30)\n \n \n\n\nif __name__ == '__main__':\n # app.run(debug=True)\n unittest.main()","repo_name":"Dimas713/budget_web_app","sub_path":"tests/test_pdfParsing.py","file_name":"test_pdfParsing.py","file_ext":"py","file_size_in_byte":5879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"36943347883","text":"import plotly.graph_objects as go\nimport plotly.io as pio\nimport pandas as pd \nimport chart_studio.tools as tls\nimport chart_studio.plotly as py\n\n#hide the plotly logo\nconfig = {'displaylogo': False}\n\n#Create Dataframe for the visuals\ndf = pd.read_excel(\"/Users/niqtoliver/Desktop/ACES/aces_household_challenges_visualization/data.xlsx\", sheet_name=\"Sheet1\")\n\n\nfig = go.Figure()\nfig.add_trace(go.Bar(\n x=df.data_values,\n y=df.breakdown[0:3],\n orientation=\"h\",\n ))\n\nfig.update_traces( selector=dict(type='bar'))\n\n\ncols =[\"data_values\"]\n\nstatewide = [list(df[item][0:4]) for item in cols]\nasian = [list(df[item][4:8]) for item in cols]\nblack = [list(df[item][8:12]) for item in cols]\nhispa = [list(df[item][12:16]) for item in cols]\nwhite = [list(df[item][16:20]) for item in cols]\nfemale = [list(df[item][20:24]) for item in cols]\nmale = [list(df[item][24:28]) for item in cols]\nlgbt = [list(df[item][28:32]) for item in cols]\nunsure = [list(df[item][32:36]) for item in cols]\nhetero = [list(df[item][36:40]) for item in cols]\n\n# Chart should display only the chart for the specified button, and can toggle between other charts, going back to the original \ndropdown1 = dict(method = \"update\", \n args = [{'x': statewide}],\n label = \"Statewide\")\n\ndropdown2 = dict(method = \"update\",\n args = [{'x': asian}],\n label = \"Asian\")\n\ndropdown3 = dict(method = \"update\",\n args = [{'x': black}],\n label = \"Black\")\n\ndropdown4 = dict(method = \"update\",\n args = [{'x': hispa}],\n label = \"Hispanic\")\n\ndropdown5 = dict(method = \"update\",\n args = [{'x': white}],\n label = \"White\")\n\ndropdown6 = dict(method = \"update\",\n args = [{'x': female}],\n label = \"Female\")\ndropdown7 = dict(method = \"update\",\n args = [{'x': male}],\n label = \"Male\")\n\ndropdown8 = dict(method = \"update\",\n args = [{'x': lgbt}],\n label = \"Gay/Lesbian/Bisexual\")\n\ndropdown9 = dict(method = \"update\",\n args = [{'x': unsure}],\n label = \"Unsure\")\n\ndropdown10 = dict(method = \"update\",\n args = [{'x': hetero}],\n label = \"Heterosexual\")\n\nfig.update_layout(height=450,bargap=0.2, title=\"Child Abuse\", title_x=0.5,\n updatemenus=[dict(active=0,\n buttons=[dropdown1, dropdown2, dropdown3, dropdown4,\n dropdown5, dropdown6, dropdown7 ,dropdown8 ,\n dropdown9, dropdown10])\n \n ])\n\n# fig.show(config=config)\n\n# fig.write_html(\"index.html\", auto_open=True)\n\npio.write_html(fig, config=config, file='index.html', auto_open=True)","repo_name":"CT-Data-Collaborative/aces_household_challenges_vis","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"31315461628","text":"import ble # This is the file in the same folder, should be uploaded to Pico.\r\nimport uasyncio as asyncio\r\nimport time\r\n\r\n\r\nclass BLEBirdy: # \"Bidirectional\" to \"Bidir\" to \"Birdy\" lmao i hate myself\r\n def __init__(self, operation, line=\"DefaultLine\"):\r\n self.line = line\r\n self.operation = operation\t# operation can either be True or False.\r\n # True => Yell operator, False => Listen operator.\r\n # The operation type does not directly affect their\r\n # functionality.\r\n self.op = None # op stands for operator, either in Yell or Listen operations\r\n self.success = False\r\n \r\n def connect(self): # Connects one Pico to another Pico\r\n try:\r\n # Attempts to connect via bluetooth!\r\n if self.operation is True:\r\n self.op = ble.Yell(self.line) # establishes connection using the message in self.line\r\n else:\r\n self.op = ble.Listen(self.line) # establishes connection using the message in self.line\r\n if self.op.connect_up(): # Checks connection, connect_up() is currently blocking, should get around?\r\n self.success = True\r\n return self.success\r\n except Exception as e: # if any error occurs, disconnect if possible\r\n self.disconnect()\r\n print(e)\r\n \r\n def disconnect(self): # Disconnect the established connection, if possible\r\n if self.op is not None and self.success is True:\r\n self.op.disconnect()\r\n \r\n async def read(self):\r\n while True:\r\n print(\"entered read function\")\r\n if self.op.is_any(): # Read any received BLE comm.\r\n print(self.op.read())\r\n# self.op = self.op.decode()\r\n# print(f\" >> {self.op}\", end='')\r\n await asyncio.sleep_ms(10)\r\n \r\n async def send(self):\r\n while True:\r\n print(\"entered send function\")\r\n if self.operation is True:\r\n time.sleep(1)\r\n self.op.send(\"OI MATE\")\r\n else:\r\n time.sleep(1)\r\n self.op.write(\"NAHHH FACK YOU\")\r\n await asyncio.sleep_ms(10)\r\n \r\n async def main(self, duration):\r\n asyncio.create_task(self.read())\t\t\t# read task\r\n asyncio.create_task(self.send())\t# send task\r\n await asyncio.sleep(duration)\t\t# sleeps for duration secs\r\n \r\n def run(self, runtime):\r\n try:\r\n asyncio.run(self.main(runtime)) # runs everything for __ seconds\r\n except KeyboardInterrupt:\r\n print('Interrupted')\r\n finally:\r\n asyncio.new_event_loop() \r\n print('All set! There will be a clear state now.')\r\n\r\n \r\n","repo_name":"remren/me35hw6","sub_path":"workspaces/rren/gaaaarbage/ble/blebirdy_proto.py","file_name":"blebirdy_proto.py","file_ext":"py","file_size_in_byte":2861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"8737481932","text":"from datetime import datetime\nfrom pip._vendor import requests\nfrom Crypto.PublicKey import RSA\nfrom urllib.parse import urlparse\nfrom blockchain.modules.Escrow import Escrow\nfrom blockchain.modules.Lender import Lender\nfrom blockchain.modules.MusharakSmartContract import MusharakSmartContract\nfrom blockchain.modules.ScTransaction import ScTransaction\nfrom .Block import Block\nfrom .Transaction import Transaction\nfrom .Property import Property\n\n\nclass Blockchain:\n\n def __init__(self):\n self.chain = [self.create_genesis_block()]\n self.difficulty = 2\n self.pending_transactions = []\n self.mining_reward = 10\n self.url = \"\"\n self.nodes = set() # New\n\n def create_genesis_block(self):\n return Block(\"01/01/2022 8:90:23\", \"Genesis block\")\n\n def get_last_block(self):\n return self.chain[-1]\n\n def mine_pending_txs(self, miner_address):\n\n # First reward the miner, could be done last\n self.pending_transactions.append(Transaction(\n None, miner_address, self.mining_reward))\n\n # If I successfully mine send reward to miningRewardAddress\n # In a real blockchain there are too many transactions to be put\n # in the same block so miners choose which transactions to include\n\n block = Block(\n datetime.now().strftime(\n \"%m/%d/%Y, %H:%M:%S\"),\n self.pending_transactions,\n self.get_last_block().hash\n )\n\n block.hash = block.calculate_hash()\n\n block.mine_block(self.difficulty)\n\n print(\"Block Successfully Mined\")\n\n # rewarding the miner in the next block\n # If the miner adds extra rewards other nodes in the network will ignore it\n self.pending_transactions = []\n\n # broadcast to all other nodes\n self.broadcast_block(block)\n\n self.chain.append(block)\n\n return block\n\n def add_transaction(self, transaction): # New\n if (not transaction.sender):\n raise ValueError('Transacton must include from address')\n\n is_valid = transaction.is_valid_transaction()\n\n if (not is_valid):\n raise ValueError('Cannot add invalid transaction to chain')\n\n self.pending_transactions.append(transaction)\n\n return \"Transaction Added\"\n\n def broadcast_block(self, block):\n\n count = 0\n block_json = self.object_tojson(block)\n data_json = block_json['data']\n print('data_json')\n print(data_json)\n\n payload = {\n # ['timestamp', 'data', 'previous_hash', 'hash']\n \"timestamp\": block.timestamp,\n \"data\": data_json,\n \"previous_hash\": block.previous_hash,\n \"hash\": block.hash\n }\n\n for node in self.nodes:\n if (node != self.url):\n url = \"http://\"+node+\"/block_broadcast\"\n print(\"THE URL\" + url)\n response = requests.post(\n url, json=payload)\n # print(\"POST RESPONSE\")\n received_json = response.json()\n block_added = received_json.get('block_added')\n print(\"noded accepted block\")\n print(block_added)\n\n # response = requests.post(url)\n # response.json()\n\n def broadcast_pendingtx(self, transaction):\n # broadcast transaction to\n payload = self.object_tojson(transaction)\n for node in self.nodes:\n if (node != self.url):\n url = \"http://\"+node+\"/pendingtx_broadcast\"\n print(\"THE URL\" + url)\n response = requests.post(\n url, json=payload)\n # print(\"POST RESPONSE\")\n received_json = response.json()\n transaction_added = received_json.get('transaction_added')\n print(\"node accepted block\")\n print(transaction_added)\n\n def is_chain_valid(self, chain):\n # Check if the Genesis block hasn't been tampered with by comparing\n # the output of createGenesisBlock with the first block on our chain\n real_genesis = self.create_genesis_block().json_encode()\n\n if (real_genesis != self.chain[0].json_encode()):\n print(\"GNESIS IS NOT VALID\")\n return False\n\n for i in range(1, len(self.chain)):\n current_block = self.chain[i]\n previous_block = self.chain[i-1]\n\n print(\"############################\")\n print(current_block.data)\n print(current_block.hash)\n print(\"############################\")\n print(previous_block.data)\n\n if (current_block.hash != current_block.calculate_hash()):\n print('curren is invalid')\n return False\n\n if (current_block.previous_hash != previous_block.hash):\n print(current_block.previous_hash)\n print(previous_block.hash)\n print('previous is invalid')\n return False\n\n # Check if all transactions in the current block are valid\n if (not current_block.has_valid_transactions()):\n print('has invalid transactions')\n return False\n\n return True\n\n def get_balance(self, wallet_address):\n balance = 1000\n for i in range(1, len(self.chain)):\n block = self.chain[i]\n try:\n for j in range(0, len(block.data)):\n transaction = block.data[j]\n if hasattr(transaction, \"property\"):\n if (transaction.sender == wallet_address):\n balance -= int(transaction.downpayment)\n elif hasattr(transaction, \"func\"):\n if (transaction.sender == wallet_address):\n balance -= int(transaction.amount)\n else:\n if (transaction.sender == wallet_address):\n balance -= int(transaction.amount)\n if (transaction.receiver == wallet_address):\n balance += int(transaction.amount)\n except AttributeError:\n print(\"no transaction\")\n return balance\n\n # lenders are all those who sent money to the SC wallet address\n def get_lenders(self, sc_wallet_address):\n lenders = []\n for i in range(1, len(self.chain)):\n block = self.chain[i]\n try:\n for j in range(0, len(block.data)):\n transaction = block.data[j]\n if (transaction.reciever == sc_wallet_address):\n lenders.append(transaction.sender)\n except AttributeError:\n print(\"no transaction\")\n return lenders\n\n # def rentPaid():\n # TODO\n # distribute received rent over lenders\n\n # def rentNotPaid():\n # TODO\n # // distribute rent from borrowers position to lenders\n # // if borrower has not percentage from which rent can be\n # // taken our give the lender the option to sell\n\n # def sell():\n # TODO\n # // submit the property for sale\n\n # /*\n # we need pending transactions because we can only make a certain number of\n # transactions on a particular interval. Proof of Work makes sure only one block\n # is created every ten minutes. All other transactions are temporarily stored in this\n # pending transactions array\n #\n\n def add_node(self, address): # New\n self.nodes.add(address)\n # parsed_url = urlparse(address)\n # self.nodes.add(parsed_url.netloc)\n\n def replace_chain(self): # New\n longest_chain = None\n max_length = len(self.chain)\n for node in self.nodes:\n if node != self.url:\n print(\"NODE IS BCHAIN\" + str(self.nodes))\n response = requests.get(f'http://{node}/get_chain')\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n if length > max_length and self.is_chain_valid(chain):\n max_length = length\n longest_chain = chain\n if longest_chain:\n # TODO convert longest_chain to Blockchian class from json\n self.chain = self.chainJSONdecode(longest_chain)\n return longest_chain\n return {}\n\n # def execute_sc_transactions(self):\n # for block in self.chain:\n # for tx in block.data:\n # block = self.chain[tx.bl_idx]\n # smartcontract = block.data[tx.sctx_idx]\n # if hasattr(tx, \"func\"):\n # if tx.func == \"smartcontract.update_balance\":\n # smartcontract.update_balance(tx.amount)\n # elif tx.func == \"smartcontract.add_lender\":\n # smartcontract.add_lender(\n # Lender(tx.sender, tx.amount))\n\n def chainJSONdecode(self, chainJSON):\n chain = []\n for blockJSON in chainJSON:\n\n # check if block is the Genesis block\n if blockJSON['data'] == \"Genesis block\" or len(blockJSON['data']) == 0:\n block = Block(blockJSON['timestamp'], \"Genesis block\")\n block.hash = blockJSON['hash']\n block.previous_hash = blockJSON['previous_hash']\n block.nonce = blockJSON['nonce']\n chain.append(block)\n else:\n tArr = self.json_to_transactions(blockJSON['data'])\n\n block = Block(blockJSON['timestamp'], tArr)\n print(\"HASH#####################\")\n print(blockJSON['hash'])\n block.hash = blockJSON['hash']\n block.previous_hash = blockJSON['previous_hash']\n block.nonce = blockJSON['nonce']\n\n chain.append(block)\n\n return chain\n\n def json_to_transactions(self, tArrayJSON):\n\n tArr = []\n for tJSON in tArrayJSON:\n transaction = None\n # Check if it is a smartcontract\n if \"property\" in tJSON:\n lenders = []\n if (len(tJSON['lenders']) != 0):\n for lend in tJSON['lenders']:\n l = Lender(lend['wallet_address'],\n int(lend['loaned_amount']))\n lenders.append(l)\n\n esc = tJSON['escrow']\n escrow = esc\n if tJSON['escrow'] is not None:\n escrow = Escrow(\n esc['name'], esc['wallet_address'], esc['property_id'])\n\n prop = tJSON['property']\n property = Property(prop['address'],\n prop['price'], prop['seller'], prop['rent'])\n property.property_id = prop['property_id']\n\n transaction = MusharakSmartContract(\n int(tJSON['bl_idx']),\n int(tJSON['sctx_idx']),\n tJSON['wallet_address'],\n tJSON['sender'], property,\n int(tJSON['downpayment']),\n int(tJSON['loan_granted']),\n lenders, escrow, tJSON['timestamp'])\n\n transaction.signature = tJSON['signature']\n transaction.hash = tJSON['hash']\n # Check if it is smartcontract transaction\n elif \"func\" in tJSON:\n block = self.chain[int(tJSON['bl_idx'])]\n smartcontract = block.data[int(tJSON['sctx_idx'])]\n # getting the function using the funcname passed from get_chain\n func = getattr(smartcontract, tJSON['func'])\n\n transaction = ScTransaction(\n tJSON['bl_idx'], tJSON['sctx_idx'], tJSON['sender'],\n tJSON['receiver'], func, int(tJSON['amount']), tJSON['timestamp'])\n transaction.signature = tJSON['signature']\n transaction.hash = tJSON['hash']\n # If none of the above, it is a normal transaction\n else:\n # sender, receiver, amount, timestamp\n transaction = Transaction(tJSON['sender'], tJSON['receiver'], int(\n tJSON['amount']), tJSON['timestamp'])\n transaction.signature = tJSON['signature']\n transaction.hash = tJSON['hash']\n\n tArr.append(transaction)\n\n return tArr\n\n def object_tojson(self, ob):\n jsonOb = {}\n func = None\n if hasattr(ob, \"func\"):\n # print(ob.func.__name__)\n jsonOb[\"func\"] = ob.func.__name__\n for field in filter(lambda a: not a.startswith('__'), dir(ob)):\n attribute = getattr(ob, field)\n if (not callable(attribute)):\n if type(attribute) is Property:\n jsonOb[str(field)] = self.object_tojson(attribute)\n # print(\"PROPERTY CHANGED\")\n elif type(attribute) is list:\n # print(\"LENDER CHANGED\")\n lenders = []\n for lender in attribute:\n lnd = self.object_tojson(lender)\n lenders.append(lnd)\n jsonOb[str(field)] = lenders\n else:\n jsonOb[str(field)] = attribute\n return jsonOb\n","repo_name":"alkalaminstitute/bunyaan_poc","sub_path":"blockchain/modules/Blockchain.py","file_name":"Blockchain.py","file_ext":"py","file_size_in_byte":13554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"45510193663","text":"import os\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import cross_val_score\n\ndata_dir = 'data/'\n\n\ndef process_data():\n gene_file = 'gene_expression.csv'\n clinical_file = 'clinical.csv'\n\n gene_file_path = os.path.join(data_dir, gene_file)\n clinical_file_path = os.path.join(data_dir, clinical_file)\n\n gene_df = pd.read_csv(gene_file_path, header=0, index_col=0)\n gene_df = gene_df.transpose()\n clinical_df = pd.read_csv(clinical_file_path, header=0, index_col=0)\n\n clinical_df = clinical_df.loc[:, ['cohort', 'type_cancer_3']]\n\n df = gene_df.join(clinical_df)\n\n df['type_cancer_3'] = (df['type_cancer_3'] == 'CO').astype(int)\n\n X_df = df.loc[:, 'cg00009553': 'cohort']\n cohort = df.loc[:, 'cohort']\n Y_df = df.loc[:, 'type_cancer_3']\n\n X_train, X_test, y_train, y_test = train_test_split(X_df, Y_df, test_size=0.2, random_state=77, stratify=cohort)\n x_train_cohort = X_train.loc[:, 'cohort']\n X_train = X_train.loc[:, 'cg00009553': 'cg27666046']\n X_test = X_test.loc[:, 'cg00009553': 'cg27666046']\n\n return X_train, y_train, X_test, y_test\n\n\ndef gbm_hyperparameter_search(x_train, y_train):\n # Run for hyperparameter search takes ~30min to converge\n # results\n # {'loss': 'deviance', 'learning_rate': 0.05, 'min_samples_leaf': 3, 'n_estimators': 40, 'min_samples_split': 3,\n # 'max_features': 'sqrt', 'max_depth': 8}\n parameters = {\n \"loss\": [\"deviance\"],\n \"learning_rate\": [0.01, 0.025, 0.05, 0.075, 0.1, 0.15, 0.2],\n \"min_samples_split\": [3, 6],\n \"min_samples_leaf\": [1, 2, 3],\n \"max_depth\": [3, 5, 8],\n \"max_features\": [\"log2\", \"sqrt\"],\n \"subsample\": [0.6, 0.7, 0.75, 0.8, 0.85, 0.9],\n \"n_estimators\": range(10, 81, 10)\n }\n\n clf = GridSearchCV(GradientBoostingClassifier(), parameters, cv=10, n_jobs=-1)\n clf.fit(x_train, y_train)\n clf.score(x_train, y_train)\n\n print('Accuracy: ' + str (clf.sbest_score_))\n print('Best parameters: ' + str(clf.best_params_))\n return clf.best_params_\n\n\ndef rf_hyperparameter_search(x_train, y_train):\n parameters = {\n 'bootstrap': [True, False],\n 'max_depth': [30, 40, 50, 60, 70, 80, 90, 100, None],\n 'max_features': ['auto', 'sqrt'],\n 'min_samples_leaf': [1, 2, 3],\n 'min_samples_split': [2, 3, 6],\n 'n_estimators': [10, 500, 1000, 1500, 2000]\n }\n\n clf = GridSearchCV(RandomForestClassifier(), parameters, cv=10, n_jobs=-1)\n clf.fit(x_train, y_train)\n clf.score(x_train, y_train)\n\n print('Accuracy: ' + str(clf.best_score_))\n print('Best parameters: ' + str(clf.best_params_))\n return clf.best_params_\n\n\ndef linear_hyperparameter_Search(x_train, y_train):\n parameters = {\n 'C': [0.001, 0.01, 0.1, 1, 10, 100],\n 'penalty': ['l1', 'l2']\n }\n\n clf = GridSearchCV(LogisticRegression(), parameters, cv=10, n_jobs=-1)\n clf.fit(x_train, y_train)\n clf.score(x_train, y_train)\n\n print('Accuracy: ' + str(clf.best_score_))\n print('Best parameters: ' + str(clf.best_params_))\n return clf.best_params_\n\n\ndef main():\n # split into train and test data\n x_train, y_train, x_test, y_test = process_data()\n union_path = data_dir + 'unionall'\n atleast2_path = data_dir + 'atleast2'\n union_all = set(np.loadtxt(union_path, dtype= str))\n intersection_2 = set(np.loadtxt(atleast2_path, dtype= str))\n x_train_union = x_train.loc[:, union_all]\n x_train_intersection = x_train.loc[:, intersection_2]\n\n # GBM Model selection\n # print('GBM Union Set')\n # gbm_hyperparameter_search(x_train_union, y_train)\n # print('GBM Intersection Set')\n # gbm_hyperparameter_search(x_train_intersection, y_train)\n print(cross_val_score(GradientBoostingClassifier(loss='deviance', subsample=0.6, learning_rate=0.15,\n min_samples_leaf=3, n_estimators=50, min_samples_split=3,\n max_features='log2', max_depth=3), x_train_union, y_train, cv=10).mean())\n\n print(cross_val_score(GradientBoostingClassifier(loss='deviance', subsample=0.7, learning_rate=0.05,\n min_samples_leaf=1, n_estimators=60, min_samples_split=3,\n max_features='sqrt', max_depth=5), x_train_intersection, y_train, cv=10).mean())\n\n # rf Model selection\n print('RF Union Set')\n rf_hyperparameter_search(x_train_union, y_train)\n print('RF Intersection Set')\n rf_hyperparameter_search(x_train_intersection, y_train)\n\n # GBM Model selection\n print('Linear Union Set')\n linear_hyperparameter_Search(x_train_union, y_train)\n print('Linear Intersection Set')\n linear_hyperparameter_Search(x_train_intersection, y_train)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"neutron101/cs229Project","sub_path":"src/model_selection_ksamuelg.py","file_name":"model_selection_ksamuelg.py","file_ext":"py","file_size_in_byte":5132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"6151816274","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: jiankaiwang\n@usage:\n python set_training_configuration.py \\\n --you_own True \\\n --label_map /notebooks/object_detection/data/label_map.pbtxt \\\n --pipeline /notebooks/object_detection/ssd_mobilenet_v1_coco_2018_01_28_docker/pipeline.config \\\n --pipelineoutput /notebooks/object_detection/data/pipeline.config \\\n\"\"\"\n\n# In[]\n\nimport argparse\nimport os\nimport sys\nsys.path.append(\"/notebooks/models/research\")\n\nimport tensorflow as tf\n\nfrom google.protobuf import text_format\nfrom object_detection.utils import config_util\nimport parser\n\n# In[]\n\ndef set_num_class(LMAP_PATH, PIPE_PATH, OUTPUT_PATH):\n number, err = parser.getClassNumber(PIPE_PATH)\n \n if not err:\n try:\n data = parser.getNumberClassInLabelMap(LMAP_PATH)\n except Exception as e:\n raise Exception(str(e))\n return \"Parsing the label file is error.\", True\n else:\n return \"Parsing the config file is error.\", True\n \n content, err = parser.getConfigFromPipeline(PIPE_PATH)\n if number != len(data):\n if err: return \"Parsing the configuration file is error.\", True\n content[\"model\"].ssd.num_classes = len(data)\n \n return content, False\n\n# In[]\n\ndef write_new_config(config, OUTPUT_PATH):\n \"\"\"\n description: \n input:\n config: a dict-based config\n \"\"\"\n try:\n pipeline_config_msg = config_util.create_pipeline_proto_from_configs(config) \n pipeline_config_final = text_format.MessageToString(pipeline_config_msg)\n with tf.gfile.Open(OUTPUT_PATH, \"wb\") as f:\n f.write(pipeline_config_final)\n return None, False\n except Exception as e:\n return str(e), True\n\n# In[]\n\nif __name__ == \"__main__\":\n \n pas = argparse.ArgumentParser()\n\n pas.add_argument('--you_own', type=str, default=\"False\", help='use your own data')\n pas.add_argument('--label_map', type=str, default=\"\", help='label map path')\n pas.add_argument('--pipeline', type=str, default=\"\", help='pipe template path')\n pas.add_argument('--pipelineoutput', type=str, default=\"\", help='pipe output path')\n\n args = pas.parse_args()\n \n if str(args.you_own) == \"True\":\n LMAP_PATH = args.label_map\n PIPE_PATH = args.pipeline\n OUTPUT_PATH = args.pipelineoutput\n else:\n model_path = \"/notebooks/object_detection\"\n LMAP_PATH = os.path.join(model_path, \"data\", \"label_map.pbtxt\")\n PIPE_PATH = os.path.join(model_path, \"ssd_mobilenet_v1_coco_2018_01_28_docker\", \"pipeline.config\")\n OUTPUT_PATH = os.path.join(model_path, \"data\", \"pipeline.config\")\n \n assert os.path.exists(LMAP_PATH), \"Label map file is not found.\"\n assert os.path.exists(PIPE_PATH), \"Pipeline template file is not found.\"\n\n content, err = set_num_class(LMAP_PATH, PIPE_PATH, OUTPUT_PATH)\n if err: print(content)\n msg, err = write_new_config(content, OUTPUT_PATH)\n if err: print(msg)\n sys.exit(err)\n \n\n","repo_name":"qpjkw/tfod_ces2019","sub_path":"object_detection/helpers/set_training_configuration.py","file_name":"set_training_configuration.py","file_ext":"py","file_size_in_byte":3025,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"5897366489","text":"import colorsys\nimport json\n\nimport requests\n\nfrom SentimentCore.core.observer import Observer\nfrom SentimentCore.util.color_space_utils import red_white_green\nfrom SentimentCore.util.rgb_xy_converter import Converter\n\n\nclass LightConsumer(Observer):\n\n def __init__(self):\n self._token = \"U5n6uDzCWE0dyMGOU3auhvRKS7WaxCYiWHeMIAxx\"\n self._light_ids = [(1, 30), (2, 100)]\n self._converter = Converter()\n\n def update(self, tuple):\n sentence, emotional_score, _, start, end = tuple\n r, g, b = red_white_green(emotional_score)\n xy = self._converter.rgb_to_xy(r, g, b)\n state = True\n\n for light_id, brightness in self._light_ids:\n URL = \"http://192.168.1.2/api/{}/lights/{}/state\".format(self._token, light_id)\n BODY = {\"on\": state, \"xy\": xy, \"bri\": brightness, \"transitiontime\": 15}\n BODY = json.dumps(BODY)\n\n requests.put(url=URL, data=BODY)\n\n","repo_name":"infiniteam-lh19/SentimentCore","sub_path":"SentimentCore/consumer/light_consumer.py","file_name":"light_consumer.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"30816332123","text":"import torch.utils.data as data\nimport numpy as np\nimport pickle\nimport os\nimport dgl\nimport torch\n\n\nclass BaseDataset(data.Dataset):\n\n def __init__(self, opt):\n self.opt = opt\n self.mean = 0\n self.std = 1\n super(BaseDataset, self).__init__()\n\n def get_canoncial_etypes(self):\n for _, _, file_names in sorted(os.walk(self.dir)):\n for i in range(1):\n g_sample_name = file_names[i]\n g_sample_path = os.path.join(self.dir, g_sample_name)\n g_sample, _ = dgl.load_graphs(g_sample_path)\n g_sample = g_sample[0]\n return g_sample.canonical_etypes\n\n def get_mean_std(self):\n \"\"\" Computes Mean and Standard Deviation from Training Data\n If mean/std file doesn't exist, will compute one\n :returns\n mean: N-dimensional mean\n std: N-dimensional standard deviation\n \"\"\"\n\n mean_std_cache = os.path.join(self.root, 'mean_std_cache.p')\n if not os.path.isfile(mean_std_cache):\n print('computing mean std from train data...')\n\n # read the first graph to get the dimensions\n # _, _, g_samples = sorted(os.walk(self.dir))\n for _, _, file_names in sorted(os.walk(self.dir)):\n for i in range(1):\n g_sample_name = file_names[i]\n g_sample_path = os.path.join(self.dir, g_sample_name)\n g_sample, _ = dgl.load_graphs(g_sample_path)\n g_sample = g_sample[0]\n node_feat_len = g_sample.ndata['geometric_feat']['node'].shape[1]\n edge_feat_len = g_sample.ndata['geometric_feat']['edge'].shape[1]\n face_feat_len = g_sample.ndata['geometric_feat']['face'].shape[1]\n mean_node_feat, std_node_feat = torch.zeros((node_feat_len)), torch.zeros((node_feat_len))\n mean_edge_feat, std_edge_feat = torch.zeros((edge_feat_len)), torch.zeros((edge_feat_len))\n mean_face_feat, std_face_feat = torch.zeros((face_feat_len)), torch.zeros((face_feat_len))\n count = 0\n for root, _, fnames in sorted(os.walk(self.dir)):\n for fname in fnames:\n # if int(fname.split('_')[-3]) == 0:\n fpath = os.path.join(self.dir, fname)\n g_list, label_dict = dgl.load_graphs(fpath)\n g = g_list[0]\n\n # node features\n node_feat_mean_single_graph = g.ndata['geometric_feat']['node'].mean(axis=0)\n node_feat_std_single_graph = g.ndata['geometric_feat']['node'].std(axis=0)\n mean_node_feat = mean_node_feat + node_feat_mean_single_graph\n std_node_feat = std_node_feat + node_feat_std_single_graph\n\n # edge features\n edge_feat_mean_single_graph = g.ndata['geometric_feat']['edge'].mean(axis=0)\n edge_feat_std_single_graph = g.ndata['geometric_feat']['edge'].std(axis=0)\n mean_edge_feat = mean_edge_feat + edge_feat_mean_single_graph\n std_edge_feat = std_edge_feat + edge_feat_std_single_graph\n\n # face_features\n face_feat_mean_single_graph = g.ndata['geometric_feat']['face'].mean(axis=0)\n face_feat_std_single_graph = g.ndata['geometric_feat']['face'].std(axis=0)\n mean_face_feat = mean_face_feat + face_feat_mean_single_graph\n std_face_feat = std_face_feat + face_feat_std_single_graph\n count = count + 1\n\n mean_node_feat = mean_node_feat / count\n std_node_feat = std_node_feat / count\n mean_edge_feat = mean_edge_feat / count\n std_edge_feat = std_edge_feat / count\n mean_face_feat = mean_face_feat / count\n std_face_feat = std_face_feat / count\n transform_dict = {\n 'mean_node_feat': mean_node_feat,\n 'std_node_feat': std_node_feat,\n 'node_feat_len': node_feat_len,\n 'mean_edge_feat': mean_edge_feat,\n 'std_edge_feat': std_edge_feat,\n 'edge_feat_len': edge_feat_len,\n 'mean_face_feat': mean_face_feat,\n 'std_face_feat': std_face_feat,\n 'face_feat_len': face_feat_len\n }\n with open(mean_std_cache, 'wb') as f:\n pickle.dump(transform_dict, f)\n print('saved: ', mean_std_cache)\n\n # open mean/std from file\n with open(mean_std_cache, 'rb') as f:\n transform_dict = pickle.load(f)\n print('loaded mean / std from cache')\n self.mean_node_feat = transform_dict['mean_node_feat']\n self.std_node_feat = transform_dict['std_node_feat']\n self.node_feat_len = transform_dict['node_feat_len']\n self.mean_edge_feat = transform_dict['mean_edge_feat']\n self.std_edge_feat = transform_dict['std_edge_feat']\n self.edge_feat_len = transform_dict['edge_feat_len']\n self.mean_face_feat = transform_dict['mean_face_feat']\n self.std_face_feat = transform_dict['std_face_feat']\n self.face_feat_len = transform_dict['face_feat_len']\n\n\ndef collate_fn(samples):\n # paths, graphs, labels = map(list, zip(*samples))\n graphs, labels = map(list, zip(*samples))\n meta_labels = {}\n keys = labels[0].keys()\n batched_graph = dgl.batch(graphs)\n for key in keys:\n meta_labels.update({key: torch.cat([label[key] for label in labels])})\n return batched_graph, meta_labels\n\n\ndef collate_fn_2(samples):\n paths, graphs, labels = map(list, zip(*samples))\n meta_labels = {}\n keys = labels[0].keys()\n batched_graph = dgl.batch(graphs)\n for key in keys:\n meta_labels.update({key: torch.cat([label[key] for label in labels])})\n return paths, batched_graph, meta_labels","repo_name":"BSResearch/hynet","sub_path":"data/base_dataset.py","file_name":"base_dataset.py","file_ext":"py","file_size_in_byte":5942,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"16818051018","text":"import re\nimport sys\nfrom pathlib import Path\nfrom hashlib import md5 as hashlib_md5\n\nsys.path.append(str(Path(__file__).resolve().parents[1]))\nfrom utils.utils import *\n\nFILE = \"input/day14.txt\"\n\n\ndef main():\n pt1()\n pt2()\n\n\ndef pt1():\n # Test\n test(task1(\"abc\"), 22728)\n\n # Solution\n input = read_file(FILE)[0]\n print(f\"Task 1 solution: {task1(input)}\")\n\n\ndef pt2():\n # Test\n test(task2(\"abc\"), 22551)\n\n # Solution\n input = read_file(FILE)[0]\n print(f\"Task 2 solution: {task2(input)}\")\n\n\n#########################################################\n\n\ndef task1(input):\n idx = 0\n answers = []\n cache = {}\n while len(answers) < 64:\n hash = cache[idx] if cache.get(idx) else md5(f\"{input}{idx}\")\n has_triplet = contains_triplet(hash)\n\n if has_triplet != None:\n for x in range(1, 1000):\n next = cache[idx + x] if cache.get(idx + x) else md5(f\"{input}{idx+x}\")\n cache[idx + x] = next\n if contains_fifthlet_with(next, has_triplet):\n answers.append(idx)\n break\n idx += 1\n return answers[63]\n\n\ndef task2(input):\n idx = 0\n answers = []\n cache = {}\n while len(answers) < 64:\n hash = strech(f\"{input}{idx}\", cache)\n has_triplet = contains_triplet(hash)\n\n if has_triplet != None:\n for x in range(1, 1001):\n next = strech(f\"{input}{idx+x}\", cache)\n if contains_fifthlet_with(next, has_triplet):\n answers.append(idx)\n break\n idx += 1\n return answers[63]\n\n\ndef md5(input):\n md5_hash = hashlib_md5()\n md5_hash.update(input.encode(\"utf-8\"))\n return md5_hash.hexdigest()\n\n\nregex = re.compile(r\"(.)\\1{2}\")\n\n\ndef contains_triplet(input):\n found = regex.search(input)\n if not found:\n return None\n return found.group(0)[0]\n\n\ndef contains_fifthlet_with(input: str, withh):\n return (withh * 5) in input\n\n\ndef strech(input, cache={}):\n if cache.get(input):\n return cache[input]\n hash = input\n for _ in range(2017):\n hash = md5(hash)\n\n cache[input] = hash\n return hash\n\n\nif __name__ == \"__main__\":\n # benchmark(main)\n main()\n","repo_name":"GitDevla/AoC","sub_path":"2016/day14/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"70865520190","text":"import os\r\nimport re\r\nimport shutil\r\n\r\nfrom utils.helpers import use_dotenv, await_char, output_msg\r\nimport utils.prompts as pr\r\n\r\nuse_dotenv()\r\n\r\n\r\ndef archive_reports(server=False):\r\n output = \"\"\r\n\r\n report_directory = os.environ[\"EDM_DRV\"]\r\n\r\n data_directory = os.path.join(report_directory, \"rtd_data\")\r\n archive_directory = os.path.join(report_directory, \"ARCHIVE-PLANNING_PLAUSE\")\r\n\r\n # print(data_directory)\r\n # print(archive_directory)\r\n\r\n # archive data folder with rtd reports\r\n if os.path.isdir(data_directory):\r\n output += output_msg(f\"{pr.info}Archiving...\")\r\n for filename in os.listdir(data_directory):\r\n f = os.path.join(data_directory, filename)\r\n if os.path.isfile(f):\r\n shutil.move(f, archive_directory)\r\n\r\n # remove tmp dirs: data, input\r\n if os.path.isdir(report_directory):\r\n shutil.rmtree(data_directory)\r\n\r\n # find and remove output dir\r\n for filename in os.listdir(report_directory):\r\n match = re.search(r\"^\\d{2}\\-\\d{2}\\-\\d{4}\", filename)\r\n if match:\r\n if os.path.isdir(os.path.join(report_directory, filename)):\r\n shutil.rmtree(os.path.join(report_directory, filename))\r\n output += output_msg(f\"{pr.done}Complete\")\r\n\r\n if server:\r\n return output, \"Archive RTD reports\"\r\n else:\r\n await_char(\"y\", \"Completed. Press Y to continue.\")\r\n","repo_name":"zakrzaq/RA-LT-Sync","sub_path":"scripts/rtd_reports/archive_reports.py","file_name":"archive_reports.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"7849281594","text":"#!/usr/bin/env python3\n\"\"\"\nProblem:\nA palindromic number reads the same both ways. The largest\npalindrome made from the product of two 2-digit numbers is 9009 = 91 × 99.\n\nFind the largest palindrome made from the product of two 3-digit numbers.\n\"\"\"\n\n\"\"\"\nSolution Reasoning:\n\n998001 is 999*999, so the largest possible number that this palindrome could\ntheoritically be. Then we check each number to see first if it is a palindrome.\nOnce we are done with that, we can check if there are two 3 digit factors.\n\"\"\"\n\nimport math\n\n\ndef find_largest_palindrome():\n current_check = 998001\n while True:\n if is_palindrome(current_check):\n if is_double_three_digit_multiple(current_check):\n return current_check\n current_check-=1\n\ndef is_palindrome(number):\n str = '%s' % number\n number_length = len(str)\n for i in range(0, math.floor(number_length/2)):\n if str[i] != str[len(str)-1-i]:\n return False\n return True\n\n\ndef is_double_three_digit_multiple(number):\n for current in range(100, math.floor(math.sqrt(number))):\n if number%current==0:\n if len('%s' % (number//current))==3:\n print(number, current, number//current)\n return True\n return False\n\nfind_largest_palindrome()\n","repo_name":"cmediratta/project-euler","sub_path":"solutions/problem_004/solution4.py","file_name":"solution4.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"22552732823","text":"import csv\nimport os\n\nfrom django.core.management.base import BaseCommand\n\nfrom academica.models import Plan\nfrom curso.models import Periodo\nfrom persona.models import Persona, Estudiante, EstadoEstudiante\nfrom matricula.models import (\n ProcesoMatricula, GratuidadEstudiante, InhabilitantesMatricula,\n)\n\n\nclass Command(BaseCommand):\n help = '''Importa los archivos de estudiantes antiguos para el proceso de matrícula'''\n\n def add_arguments(self, parser):\n parser.add_argument('carpeta_archivos', type=str)\n parser.add_argument('-log-level', type=str, choices=range(2), default=1)\n\n def handle(self, *args, **options):\n importar_estudiantes_antiguos(options['carpeta_archivos'], log_level=options['log_level'])\n return\n\n\ndef importar_estudiantes_antiguos(carpeta: str, log_level=1):\n # buscar archivos\n for archivo in os.listdir(carpeta):\n nombre = archivo.lower()\n if 'planes_alums' in nombre:\n path_archivo_estudiantes = os.path.join(carpeta, archivo)\n if 'finanzas' in nombre:\n path_archivo_finanzas = os.path.join(carpeta, archivo)\n if 'biblioteca' in nombre:\n path_archivo_biblioteca = os.path.join(carpeta, archivo)\n if 'gratuidad' in nombre:\n path_archivo_gratuidad = os.path.join(carpeta, archivo)\n\n personas_no_encontradas = 0\n registros_no_encontrados = 0\n modificados = 0\n with open(path_archivo_estudiantes, 'r', encoding='utf-8') as archivo_estudiantes:\n estudiantes = csv.DictReader(archivo_estudiantes, delimiter=';')\n\n for estudiante in estudiantes:\n try:\n rut = estudiante['P_AL_RUT_ALUM']\n persona = Persona.objects.get(numero_documento=rut)\n planes = Plan.objects.filter(carrera__id_ucampus=estudiante['P_AL_C_CARRERA'])\n\n if not planes.exists():\n if estudiante['P_AL_LICENCIATURA'] != '0' and log_level >= 2:\n print(f\"carrera con id {estudiante['P_AL_C_CARRERA']} no existe\")\n continue\n\n estud = Estudiante.objects.get(persona=persona, plan__in=planes)\n estado = EstadoEstudiante.objects.get(id_ucampus=estudiante['P_AL_E_PLAN_ALUM'])\n\n if estud.estado_estudiante != estado:\n if log_level >= 1:\n print(\n f'cambio de estado {persona.numero_documento}: '\n f'{estud.estado_estudiante} -> {estado}'\n )\n\n estud.estado_estudiante = estado\n estud.save()\n modificados += 1\n\n if estudiante['P_AL_SEME_FIN'] != 'NULL' and not estud.periodo_egreso:\n periodo = Periodo.objects.get(\n ano=estudiante['P_AL_SEME_FIN'][:-1],\n numero=estudiante['P_AL_SEME_FIN'][-1],\n )\n estud.periodo_egreso = periodo\n estud.save()\n\n if log_level >= 1:\n print(f\"agrega semestre fin: {estudiante['P_AL_SEME_FIN']}\")\n\n except Persona.DoesNotExist:\n personas_no_encontradas += 1\n if log_level >= 2:\n print(f\"persona con rut {rut} no existe\")\n\n except Estudiante.DoesNotExist:\n registros_no_encontrados += 1\n if log_level >= 2:\n print(f\"estudiante {persona}-{planes.first().carrera} no existe\")\n\n except EstadoEstudiante.DoesNotExist:\n if log_level >= 1:\n print(f\"estado con id {estudiante['P_AL_C_CARRERA']} no existe\")\n\n print('\\narchivo estudiantes listo')\n print(f'{modificados} estudiantes actualizados')\n print(f'{personas_no_encontradas} personas no encontradas')\n print(f'{registros_no_encontrados} estudiantes no encontrados')\n\n # crear gratuidad estudiante e inhabilitantes estudiantes\n proceso_matricula = ProcesoMatricula.objects.get(activo=True)\n estudiantes_antiguos = Estudiante.objects.exclude(\n periodo_ingreso=proceso_matricula.periodo_ingreso,\n ).filter(\n estado_estudiante_id__in=[4, 1], # regular o congelado\n )\n modificados = 0\n\n for estudiante_antiguo in estudiantes_antiguos:\n InhabilitantesMatricula.objects.update_or_create(\n estudiante=estudiante_antiguo,\n proceso_matricula=proceso_matricula,\n defaults={\n 'tiene_deuda_finanzas': False,\n 'comentario_finanzas': '',\n 'tiene_deuda_biblioteca': False,\n 'comentario_biblioteca': '',\n }\n )\n GratuidadEstudiante.objects.update_or_create(\n estudiante=estudiante_antiguo,\n proceso_matricula=proceso_matricula,\n defaults={'tiene_gratuidad': False}\n )\n modificados += 1\n\n print(f'\\ninhabilitantes y gratuidad creados para {modificados} estudiantes antiguos\\n')\n\n # cargar deudas biblioteca\n modificados = 0\n personas_no_encontradas = 0\n with open(path_archivo_biblioteca, 'r', encoding='utf-8') as archivo_biblioteca:\n deudas_biblioteca = csv.DictReader(archivo_biblioteca, delimiter=',')\n\n for fila in deudas_biblioteca:\n try:\n # obtener inhabilitantes de la persona\n rut = fila['Rut'][:-1]\n persona = Persona.objects.get(numero_documento=rut)\n inhabilitantes = InhabilitantesMatricula.objects.filter(\n proceso_matricula=proceso_matricula,\n estudiante__persona=persona,\n )\n modificados += 1\n\n # actualizar comentario\n for inhabilitante in inhabilitantes:\n\n inhabilitante.tiene_deuda_biblioteca = True\n inhabilitante.comentario_biblioteca += (\n f'Debe {fila[\"Titulo\"]}. '\n )\n inhabilitante.save()\n\n except Persona.DoesNotExist:\n personas_no_encontradas += 1\n if log_level >= 1:\n print(f'persona con rut {rut} no existe')\n\n print('archivo biblioteca listo')\n print(f'{modificados} deudas registradas')\n print(f'{personas_no_encontradas} personas no encontradas\\n')\n\n # cargar deudas finanzas\n modificados = 0\n personas_no_encontradas = 0\n with open(path_archivo_finanzas, 'r', encoding='utf-8') as archivo_finanzas:\n deudas_finanzas = csv.DictReader(archivo_finanzas, delimiter=',')\n\n for fila in deudas_finanzas:\n try:\n # obtener inhabilitantes de la persona\n rut = fila['DOCUMENTO']\n persona = Persona.objects.get(numero_documento=rut)\n inhabilitantes = InhabilitantesMatricula.objects.filter(\n proceso_matricula=proceso_matricula,\n estudiante__persona=persona,\n )\n modificados += 1\n\n # actualizar comentario\n for inhabilitante in inhabilitantes:\n\n inhabilitante.tiene_deuda_finanzas = True\n # inhabilitante.comentario_finanzas += (\n # f'Debe {fila[\"Saldo 2022\"]} por la carrera {fila[\"PLAN\"]}. '\n # )\n inhabilitante.save()\n\n except Persona.DoesNotExist:\n personas_no_encontradas += 1\n if log_level >= 1:\n print(f'persona con rut {rut} no existe')\n\n print('archivo finanzas listo')\n print(f'{modificados} deudas registradas')\n print(f'{personas_no_encontradas} personas no encontradas\\n')\n\n # cargar gratuidad\n modificados = 0\n personas_no_encontradas = 0\n registros_no_encontrados = 0\n with open(path_archivo_gratuidad, 'r', encoding='utf-8') as archivo_biblioteca:\n gratuidad_estudiantes = csv.DictReader(archivo_biblioteca, delimiter=';')\n\n for fila in gratuidad_estudiantes:\n try:\n # obtener gratuidad de la persona\n rut = fila['numero_documento']\n persona = Persona.objects.get(numero_documento=rut)\n gratuidades = GratuidadEstudiante.objects.filter(\n proceso_matricula=proceso_matricula,\n estudiante__persona=persona,\n )\n\n if not gratuidades.exists():\n estado = Estudiante.objects.filter(persona=persona).first().estado_estudiante\n\n if estado.id in [1, 4]: # estudiante regular o postergado sin registro\n registros_no_encontrados += 1\n if log_level >= 1:\n print(f'no se ha encontrado un registro para el rut {rut}')\n\n # actualizar gratuidad\n if fila['GRATUIDAD'] == '1':\n for gratuidad in gratuidades:\n gratuidad.tiene_gratuidad = True\n gratuidad.save()\n modificados += 1\n\n except Persona.DoesNotExist:\n personas_no_encontradas += 1\n if log_level >= 1:\n print(f'persona con rut {rut} no existe')\n\n print('archivo gratuidad listo')\n print(f'{modificados} gratuidades actualizadas')\n print(f'{personas_no_encontradas} personas no encontradas')\n print(f'{registros_no_encontrados} registros no encontrados')\n return\n","repo_name":"cvargas91/sga","sub_path":"matricula/management/commands/importar_estudiantes_antiguos.py","file_name":"importar_estudiantes_antiguos.py","file_ext":"py","file_size_in_byte":9644,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"74459974590","text":"import boto3\nimport json\n\nclient = boto3.client('dynamodb')\n\ndef lambda_handler(event, context):\n # Ensure that the lambda function can be called using a GET method from an API gateway, as well as directly:\n if 'customer_id' not in event:\n event = event['queryStringParameters']\n \n customer_id = event['customer_id']\n\n # Find out what the existing basket content is:\n response = client.get_item(\n TableName='Basket',\n Key={'customer_id':{'S':customer_id}}\n )\n\n if 'Item' in response: # There is already a basket for the user:\n item = response['Item']\n content = item['content']['M']\n else: # Otherwise, display an empty basket:\n content = {}\n\n # Compress the DyndamoDB entries from \"N\":val to just val:\n for k, v in content.items():\n content[k] = v['N']\n\n response = {\n \"statusCode\": 200,\n \"body\": json.dumps(content)\n }\n \n return response\n","repo_name":"LasseD/SampleBasketService","sub_path":"python/ListBasket.py","file_name":"ListBasket.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"24090237295","text":"import unittest \nfrom LinkedList import LinkedList, Node\n\ndef kthFromlast(linkedList, k):\n sizeList = linkedList.size()\n count = 0\n stop = sizeList - k\n start = linkedList.head\n while count != stop: \n start = start.next_node\n count += 1\n return start.data\n\nclass TestKthFromlast(unittest.TestCase): \n test_cases = [(LinkedList(Node(1, Node(2, Node(3, Node(4))))), 2, 3)]\n def test_kth_from_last(self): \n for linkList, k, exptected in self.test_cases: \n result = kthFromlast(linkList, k)\n self.assertEqual(result, exptected, f\"Error at {linkList} got {result} instead of {exptected}\")\nunittest.main()","repo_name":"nikkivbenz/Interview-Prep","sub_path":"CTCI/LinkedListCh2/kthFromLast2.py","file_name":"kthFromLast2.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"2521626377","text":"# Problem Id: 692\n# Problem Name: Top K Frequent Words, 前K个高频单词\n# Problem Url: https://leetcode-cn.com/problems/top-k-frequent-words/\n# Problem Level: Medium\n# Language: Python3\n \nclass Solution:\n def topKFrequent(self, words: List[str], k: int) -> List[str]:\n d = {}\n for i in words:\n d[i] = d.get(i,0) + 1\n res = list(d.items())\n res.sort(key=lambda x:(-x[1],x[0]),reverse=False)\n return [i[0] for i in res[:k]]\n ","repo_name":"siru-xiong/leetcode-solutions","sub_path":"solutions/0692-前K个高频单词.py","file_name":"0692-前K个高频单词.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"14516158923","text":"#ursina\n#perlin_noise\n\nfrom ursina import *\nfrom ursina.prefabs.first_person_controller import FirstPersonController\n\njogo = Ursina()\nplayer = FirstPersonController(mouse_sensitivity=Vec2(100,100), position=(5, 8, 0))\n\"\"\"chao = Entity(\n model=\"plane\",\n scale=(100,1,100),\n texture=\"grass\",\n texture_scale=(10,10),\n collider=\"box\"\n)\"\"\"\nbloquinhoDaMao = Entity(\n parent=camera,\n model=\"cube\",\n color=color.white,\n scale=0.2,\n position=(0.35, -0.25, 0.5),\n texture_scale=(10,10),\n rotation=(-15, -30, -5)\n)\n\nboxes = []\nfor i in range(20):\n for z in range(2):\n for j in range(20):\n box = Button(color=color.white, model=\"cube\", position=(j,z,i), texture=\"grass\", parent=scene, origin_y=0.5)\n boxes.append(box)\n\ndef input(key):\n for box in boxes:\n if box.hovered:\n if key == \"right mouse down\":\n new = Button(color=color.white, model=\"cube\", position=box.position + mouse.normal, parent=scene, origin_y=0.5)\n boxes.append(new)\n if key == \"left mouse down\":\n boxes.remove(box)\n destroy(box)\n\njogo.run()\n","repo_name":"dr4e/Jogo-em-Python","sub_path":"mine.py","file_name":"mine.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"13139580922","text":"import asyncio\nimport aiohttp\nfrom pathlib import Path\nfrom yarl import URL\n\nfrom dotenv import load_dotenv\n\nload_dotenv()\nimport os\n\n# The following is an attempt to download MERRA data asynchronously, using aiohttp.\n# The authentication() and login() functions seem to work, successfully storing 3 cookies.\n# But when I try to use them in the actual download() GET request, I get redirected back to oauth.\n# So I can't download anything.\n\n# The multi-threaded requests-based downloader, which works, seems to have the exact same cookies at that point.\n# But then it picks up a session cookie when the first download is successful, which aiohttp does not.\n# This matches what I see in Chrome's network log when I download manually.\n\n# The download() GET request is identical to the authentication() GET request, except for the cookie.\n\nTEST_URL = \"https://goldsmr4.gesdisc.eosdis.nasa.gov/opendap/MERRA2/M2T1NXSLV.5.12.4/2020/03/MERRA2_400.tavg1_2d_slv_Nx.20200331.nc4.nc4?PS[0:23][232:254][117:139],T10M[0:23][232:254][117:139],U50M[0:23][232:254][117:139],V50M[0:23][232:254][117:139],time,lat[232:254],lon[117:139]\"\nauth = aiohttp.BasicAuth(os.getenv(\"MERRA2_USER\"), os.getenv(\"MERRA2_PASS\"))\ncookie_jar = aiohttp.CookieJar()\n\nCHROME_HEADERS = {\n \"Host\": \"goldsmr4.gesdisc.eosdis.nasa.gov\",\n \"Connection\": \"keep-alive\",\n \"Cache-Control\": \"max-age=0\",\n \"DNT\": \"1\",\n \"Upgrade-Insecure-Requests\": \"1\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\",\n \"Sec-Fetch-Site\": \"none\",\n \"Sec-Fetch-Mode\": \"navigate\",\n \"Sec-Fetch-User\": \"?1\",\n \"Sec-Fetch-Dest\": \"document\",\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Accept-Language\": \"en-US,en;q=0.9\",\n}\n\n\ndef set_cookies(response, cookie_jar, url):\n \"\"\"aiohttp can't recieve cookies because NASA uses an obsolete date format in its cookies (timezone as -0000 instead of GMT). Failure point is python's http.cookies.BaseCookie.load() fails to read the old timezones. Unfortunately this means I can't follow redirects\"\"\"\n for hdr in response.headers.getall(aiohttp.hdrs.SET_COOKIE, ()):\n response.cookies.load(hdr.replace(\" -0000\", \" GMT\"))\n cookie_jar.update_cookies(response.cookies, url)\n\n\nasync def login(session, url):\n r = await session.get(url, auth=auth, allow_redirects=False)\n set_cookies(r, cookie_jar, url)\n redirect_url = URL(\n r.headers.get(aiohttp.hdrs.LOCATION), encoded=not session._requote_redirect_url\n )\n r = await session.get(redirect_url, allow_redirects=False)\n set_cookies(r, cookie_jar, redirect_url)\n return URL(\n r.headers.get(aiohttp.hdrs.LOCATION), encoded=not session._requote_redirect_url\n )\n\n\ndef filename_from_url(url):\n return str(url).split(\"/\")[-1].split(\".nc4?\")[0]\n\n\nasync def download(\n session, url, directory: Path, write_async=False, chunk_size=512 * 1024\n):\n fname = directory / filename_from_url(url)\n async with session.get(url, allow_redirects=False) as resp:\n set_cookies(resp, cookie_jar, url)\n with open(fname, \"wb\") as fd:\n while True:\n chunk = await resp.content.read(chunk_size)\n if not chunk:\n break\n fd.write(chunk)\n print(\"downloaded\")\n\n\nasync def authentication(session, url):\n async with session.get(url) as resp:\n if resp.status == 401:\n new_url = await login(session, resp.url)\n old_url = URL(url, encoded=not session._requote_redirect_url)\n if old_url == new_url:\n return new_url\n else:\n raise Exception(\n f\"Login process has not returned original URL.\\nOriginal: {str(old_url)}\\nReturned: {str(new_url)}\"\n )\n\n else:\n return URL(url, encoded=not session._requote_redirect_url)\n\n\nasync def main():\n async with aiohttp.ClientSession(cookie_jar=cookie_jar) as client:\n repeat_url = await authentication(client, URL(TEST_URL))\n await download(client, repeat_url, Path(\"./\"))\n\n\nasync def on_request_end(session, trace_config_ctx, params):\n print(\n \"\\nEnding %s request for %s. I sent: %s\"\n % (params.method, params.url, params.headers)\n )\n print(\"\\nSent headers: %s\" % params.response.request_info.headers)\n\n\nasync def dl(url):\n # trace_config = aiohttp.TraceConfig()\n # trace_config.on_request_end.append(on_request_end)\n # async with aiohttp.ClientSession(cookie_jar=cookie_jar, trace_configs=[trace_config]) as client:\n async with aiohttp.ClientSession(cookie_jar=cookie_jar) as client:\n await download(client, first_url, Path(r\"./\"))\n\n\nasyncio.run(main())\n","repo_name":"TrentonBush/merra2_subsetter","sub_path":"scratch_work/aiohttp_attempt.py","file_name":"aiohttp_attempt.py","file_ext":"py","file_size_in_byte":4877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"71968490432","text":"import numpy as np\nimport math\nimport time\nimport matplotlib.pyplot as plt\n\nfrom Quadrotor import Quadrotor\nfrom Plotting import Plotting\nfrom MPCController import AltitudeMPC, AttitudeMPC, PositionMPC\n\nclass Trajectory:\n def __init__(self, sim_time=10.0, dt = 0.02):\n self.sim_time = sim_time\n self.dt = dt\n self.ref = self.desiredTrajectory()\n\n self.x_ref = np.array(self.ref)[:,0]\n self.y_ref = np.array(self.ref)[:,1]\n self.z_ref = np.array(self.ref)[:,2]\n self.psi_ref = np.array(self.ref)[:,3]\n \n def desiredTrajectory(self):\n ref = []\n for i in range(int(self.sim_time/self.dt)):\n t = i*self.dt\n x = 5*math.sin(2*math.pi*t/10)\n y = 5*math.cos(2*math.pi*t/10)\n z = -0.5*t\n yaw = 2*math.pi*t/10\n ref.append([x,y,z,yaw])\n return ref\n \n def desired_altitude(self, quad, idx, N_):\n # initial state / last state\n x_ = np.zeros((N_+1, 2))\n u_ = np.zeros((N_, 1))\n\n z_ref_ = self.z_ref[idx:(idx+N_)]\n length = len(z_ref_)\n if length < N_:\n z_ex = np.ones(N_ - length)*z_ref_[-1]\n z_ref_ = np.concatenate((z_ref_, z_ex), axis=None)\n \n dz_ref_ = np.diff(z_ref_)\n dz_ref_ = np.concatenate((quad.dpos[2], dz_ref_), axis=None)\n\n ddz_ref_ = np.diff(dz_ref_)\n ddz_ref_ = np.concatenate((ddz_ref_[0], ddz_ref_), axis=None)\n\n thrust_ref_ = (quad.g - ddz_ref_)*quad.mq\n \n x_ = np.array([z_ref_, dz_ref_]).T\n x_ = np.concatenate((np.array([[quad.pos[2], quad.dpos[2]]]), x_), axis=0)\n u_ = np.array([thrust_ref_]).T\n # print(x_)\n # print(u_)\n return x_, u_\n\n def desired_position(self, quad, idx, N_, thrust):\n # initial state / last state\n x_ = np.zeros((N_+1, 4))\n u_ = np.zeros((N_, 2))\n\n x_ref_ = self.x_ref[idx:(idx+N_)]\n y_ref_ = self.y_ref[idx:(idx+N_)]\n length = len(x_ref_)\n if length < N_:\n x_ex = np.ones(N_ - length)*x_ref_[-1]\n x_ref_ = np.concatenate((x_ref_, x_ex), axis=None)\n\n y_ex = np.ones(N_ - length)*y_ref_[-1]\n y_ref_ = np.concatenate((y_ref_, y_ex), axis=None)\n\n dx_ref_ = np.diff(x_ref_)\n dx_ref_ = np.concatenate((quad.dpos[0], dx_ref_), axis=None)\n dy_ref_ = np.diff(y_ref_)\n dy_ref_ = np.concatenate((quad.dpos[1], dy_ref_), axis=None)\n\n ddx_ref_ = np.diff(dx_ref_)\n ddx_ref_ = np.concatenate((ddx_ref_[0], ddx_ref_), axis=None)\n ddy_ref_ = np.diff(dy_ref_)\n ddy_ref_ = np.concatenate((ddy_ref_[0], ddy_ref_), axis=None)\n\n the_ref_ = np.arcsin(ddx_ref_*quad.mq/thrust)\n phi_ref_ = -np.arcsin(ddy_ref_*quad.mq/thrust)\n\n x_ = np.array([x_ref_, y_ref_, dx_ref_, dy_ref_]).T\n x_ = np.concatenate((np.array([[quad.pos[0], quad.pos[1], quad.dpos[0], quad.dpos[1]]]), x_), axis=0)\n u_ = np.array([phi_ref_, the_ref_]).T\n \n # print(x_)\n # print(u_)\n return x_, u_\n\n def desired_attitude(self, quad, idx, N_, phid, thed):\n # initial state / last state\n x_ = np.zeros((N_+1, 6))\n u_ = np.zeros((N_, 3))\n\n phi_ref_ = phid\n the_ref_ = thed\n\n psi_ref_ = self.psi_ref[idx:(idx+N_)]\n length = len(psi_ref_)\n if length < N_:\n psi_ex = np.ones(N_ - length)*psi_ref_[-1]\n psi_ref_ = np.concatenate((psi_ref_, psi_ex), axis=None)\n\n dphi_ref_ = np.diff(phi_ref_)\n dphi_ref_ = np.concatenate((quad.dori[0], dphi_ref_), axis=None)\n dthe_ref_ = np.diff(the_ref_)\n dthe_ref_ = np.concatenate((quad.dori[1], dthe_ref_), axis=None)\n dpsi_ref_ = np.diff(psi_ref_)\n dpsi_ref_ = np.concatenate((quad.dori[2], dpsi_ref_), axis=None)\n\n ddphi_ref_ = np.diff(dphi_ref_)\n ddphi_ref_ = np.concatenate((ddphi_ref_[0], ddphi_ref_), axis=None)\n ddthe_ref_ = np.diff(dthe_ref_)\n ddthe_ref_ = np.concatenate((ddthe_ref_[0], ddthe_ref_), axis=None)\n ddpsi_ref_ = np.diff(dpsi_ref_)\n ddpsi_ref_ = np.concatenate((ddpsi_ref_[0], ddpsi_ref_), axis=None)\n\n tau_phi_ref_ = (quad.Ix*ddphi_ref_ - dthe_ref_*dpsi_ref_*(quad.Iy-quad.Iz))/quad.la\n tau_the_ref_ = (quad.Iy*ddthe_ref_ - dphi_ref_*dpsi_ref_*(quad.Iz-quad.Ix))/quad.la\n tau_psi_ref_ = quad.Iz*ddpsi_ref_ - dphi_ref_*dthe_ref_*(quad.Ix-quad.Iy)\n\n x_ = np.array([phi_ref_, the_ref_, psi_ref_, dphi_ref_, dthe_ref_, dpsi_ref_]).T\n x_ = np.concatenate((np.array([[quad.ori[0], quad.ori[1], quad.ori[2], quad.dori[0], quad.dori[1], quad.dori[2]]]), x_), axis=0)\n u_ = np.array([tau_phi_ref_, tau_the_ref_, tau_psi_ref_]).T\n\n # print(x_)\n # print(u_)\n return x_, u_\n\n# quad = Quadrotor()\n# traj = Trajectory()\n# traj.desired_altitude(quad, 495, np.array([1,2]), 30)\n\n# exit()\n\nif __name__ == \"__main__\":\n quad = Quadrotor()\n\n dt = 0.02\n N = 50\n sim_time = 10.0\n iner = 0\n\n traj = Trajectory(sim_time, dt)\n\n al = AltitudeMPC(quad, T=dt, N=N)\n po = PositionMPC(quad, T=dt, N=N)\n at = AttitudeMPC(quad, T=dt, N=N)\n\n his_thrust = []; his_tau_phi = []; his_tau_the = []; his_tau_psi = []\n his_time = []\n\n while iner - sim_time/dt < 0.0:\n # print(iner)\n # Solve altitude -> thrust\n next_al_trajectories, next_al_controls = traj.desired_altitude(quad, iner, N)\n thrusts = al.solve(next_al_trajectories, next_al_controls)\n\n # Solve position -> phid, thed\n next_po_trajectories, next_po_controls = traj.desired_position(quad, iner, N, thrusts)\n phids, theds = po.solve(next_po_trajectories, next_po_controls, thrusts)\n\n # Solve attitude -> tau_phi, tau_the, tau_psi\n next_at_trajectories, next_at_controls = traj.desired_attitude(quad, iner, N, phids, theds)\n tau_phis, tau_thes, tau_psis = at.solve(next_at_trajectories, next_at_controls)\n\n quad.updateConfiguration(thrusts[0], tau_phis[0], tau_thes[0], tau_psis[0], dt)\n \n # Store values\n his_thrust.append(thrusts[0])\n his_tau_phi.append(tau_phis[0])\n his_tau_the.append(tau_thes[0])\n his_tau_psi.append(tau_psis[0])\n his_time.append(iner*dt)\n\n iner += 1\n \n print(np.array(quad.path))\n\n # Plot Drone\n plot = Plotting(\"Quadrotor\")\n plot.plot_path(quad.path)\n plot.plot_path(traj.ref)\n\n # Plot control\n plt.figure()\n plt.subplot(221)\n plt.plot(his_time, his_thrust)\n plt.title(\"The total thrust\")\n plt.xlabel(\"Time [s]\")\n plt.ylabel(\"Value [N]\")\n\n plt.subplot(222)\n plt.plot(his_time, his_tau_phi)\n plt.title(\"The tau phi\")\n plt.xlabel(\"Time [s]\")\n plt.ylabel(\"Value [N.m]\")\n\n plt.subplot(223)\n plt.plot(his_time, his_tau_the)\n plt.title(\"The tau theta\")\n plt.xlabel(\"Time [s]\")\n plt.ylabel(\"Value [N.m]\")\n\n plt.subplot(224)\n plt.plot(his_time, his_tau_psi)\n plt.title(\"The tau psi\")\n plt.xlabel(\"Time [s]\")\n plt.ylabel(\"Value [N.m]\")\n\n plt.show()","repo_name":"duynamrcv/quadrotor_mpc","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7121,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"60"} +{"seq_id":"73917463871","text":"\"\"\"\nThis problem was asked by Twitter.\n\nGiven a binary tree, find the lowest common ancestor (LCA) of two given nodes in the tree. Assume that each node in the tree also has a pointer to its parent.\n\nAccording to the definition of LCA on Wikipedia: “The lowest common ancestor is defined between two nodes v and w as the lowest node in T that has both v and w as descendants (where we allow a node to be a descendant of itself).”\n\"\"\"\nimport collections\n\n\nclass Node:\n def __init__(self, val):\n self.val = val\n self.parent = None\n\n\nparents = []\n\n\ndef get_lowest_common_ancestor(root_val, node1, node2):\n get_parent(node1)\n get_parent(node2)\n\n p_list = [item for item, count in collections.Counter(parents).items() if count > 1]\n if len(p_list) == 1:\n return root_val\n if len(p_list) == 2:\n p_list.remove(root_val)\n return p_list[0]\n\n\ndef get_parent(node):\n if node is None:\n return\n parents.append(node.val)\n return get_parent(node.parent)\n\n\na1 = Node(40)\na2 = Node(20)\na3 = Node(60)\na4 = Node(10)\na5 = Node(30)\na6 = Node(50)\na7 = Node(70)\n\na2.parent = a1\na3.parent = a1\na4.parent = a2\na5.parent = a2\na6.parent = a3\na7.parent = a3\n\nassert get_lowest_common_ancestor(a1.val, a6, a1) == 40\n","repo_name":"dombroks/Daily-Coding-Problems","sub_path":"Twitter_Problems/Twitter_Problem_2.py","file_name":"Twitter_Problem_2.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"60"} +{"seq_id":"1239583230","text":"import heapq\r\nimport sys\r\nimport typing\r\n\r\nimport numba as nb\r\nimport numpy as np\r\n\r\n\r\n@nb.njit((nb.i8[:, :], ), cache=True)\r\ndef solve(ab: np.ndarray) -> typing.NoReturn:\r\n n = len(ab)\r\n a, b = ab[:, 0], ab[:, 1]\r\n for i in range(n):\r\n b[i] += a[i]\r\n c = np.unique(np.hstack((a, b)))\r\n a = np.searchsorted(c, a)\r\n b = np.searchsorted(c, b)\r\n s = np.zeros(1 << 19, np.int64)\r\n for i in range(n):\r\n s[a[i]] += 1\r\n s[b[i]] -= 1\r\n s = s.cumsum()\r\n\r\n\r\n res = np.zeros(n + 1, np.int64)\r\n for i in range(1 << 19):\r\n x = s[i]\r\n if not x: continue\r\n l, r = c[i], c[i + 1]\r\n res[x] += r - l\r\n\r\n for x in res[1:]:\r\n print(x)\r\n\r\n\r\n\r\ndef main() -> typing.NoReturn:\r\n n = int(input())\r\n ab = np.array(\r\n sys.stdin.read().split(),\r\n dtype=np.int64,\r\n ).reshape(n, 2)\r\n solve(ab)\r\n\r\n\r\nmain()\n","repo_name":"kagemeka/atcoder-submissions","sub_path":"jp.atcoder/abc221/abc221_d/26294833.py","file_name":"26294833.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"4443956791","text":"from watertap.tools.parameter_sweep import PredeterminedFixedSample, parameter_sweep\nimport watertap.examples.flowsheets.RO_with_energy_recovery.RO_with_energy_recovery as RO\nfrom watertap.examples.flowsheets.RO_with_energy_recovery.RO_with_energy_recovery import (\n ERDtype,\n)\n\n\ndef set_up_sensitivity():\n outputs = {}\n\n m = RO.build(erd_type=ERDtype.pump_as_turbine)\n RO.set_operating_conditions(m)\n RO.initialize_system(m)\n RO.solve(m)\n m.fs.feed.properties[0].flow_mass_phase_comp.unfix()\n m.fs.feed.properties[0].flow_vol_phase[\"Liq\"].fix()\n m.fs.feed.properties[0].conc_mass_phase_comp[\"Liq\", \"NaCl\"].fix()\n RO.optimize_set_up(m)\n m.fs.eq_minimum_water_flux.deactivate()\n RO.solve(m)\n RO.display_system(m)\n RO.display_design(m)\n\n # uncomment to create outputs\n\n outputs[\"LCOW\"] = m.fs.costing.LCOW\n # outputs[\"RO Water Flux\"] = m.fs.RO.flux_mass_phase_comp[0, 1, \"Liq\", \"H2O\"]\n # outputs[\"RO Membrane Area\"] = m.fs.RO.area\n # outputs[\"RO Energy Consumption\"] = m.fs.costing.specific_energy_consumption\n # outputs[\"System Capital Cost\"] = m.fs.costing.aggregate_capital_cost\n # outputs[\"RO Capital Cost\"] = m.fs.RO.costing.capital_cost\n # outputs[\"Pump Capital Cost\"] = m.fs.P1.costing.capital_cost\n # outputs[\"ERD Capital Cost\"] = m.fs.ERD.costing.capital_cost\n # outputs[\"RO Operating Cost\"] = m.fs.RO.costing.fixed_operating_cost\n # outputs[\n # \"MLC Operating Cost\"\n # ] = m.fs.costing.maintenance_labor_chemical_operating_cost\n # outputs[\"Feed Flow Rate\"] = m.fs.feed.properties[0].flow_vol_phase[\"Liq\"]\n # outputs[\"Permeate Flow Rate\"] = m.fs.product.properties[0].flow_vol_phase[\"Liq\"]\n # outputs[\"Retentate Flow Rate\"] = m.fs.disposal.properties[0].flow_vol_phase[\"Liq\"]\n # outputs[\"RO Operating Pressure\"] = m.fs.RO.inlet.pressure[0]\n # outputs[\"RO Permeate H2O Mass Flow\"] = m.fs.RO.permeate.flow_mass_phase_comp[\n # 0, \"Liq\", \"H2O\"\n # ]\n # outputs[\"RO Permeate Salt Mass Flow\"] = m.fs.RO.permeate.flow_mass_phase_comp[\n # 0, \"Liq\", \"NaCl\"\n # ]\n # outputs[\"RO Retentate H2O Mass Flow\"] = m.fs.RO.retentate.flow_mass_phase_comp[\n # 0, \"Liq\", \"H2O\"\n # ]\n # outputs[\"RO Retentate Salt Mass Flow\"] = m.fs.RO.retentate.flow_mass_phase_comp[\n # 0, \"Liq\", \"NaCl\"\n # ]\n\n return outputs, m\n\n\ndef run_analysis(case_num=1, nx=5, interpolate_nan_outputs=True, output_filename=None):\n\n if output_filename is None:\n output_filename = \"sensitivity_\" + str(case_num) + \".csv\"\n\n # when from the command line\n case_num = int(case_num)\n interpolate_nan_outputs = bool(interpolate_nan_outputs)\n\n outputs, m = set_up_sensitivity()\n\n # choose parameter sweep from case structure\n sweep_params = {}\n\n if case_num == 1:\n # Need to unfix mass recovery of water (or simply sweep across it instead of recovery_vol)\n m.fs.RO.recovery_mass_phase_comp.unfix()\n\n sweep_params[\"mass_concentration\"] = PredeterminedFixedSample(\n m.fs.feed.properties[0].conc_mass_phase_comp[\"Liq\", \"NaCl\"],\n [0.963, 1.927, 4.816],\n )\n sweep_params[\"volumetric_recovery\"] = PredeterminedFixedSample(\n m.fs.RO.recovery_vol_phase[0, \"Liq\"], [0.7, 0.8, 0.9]\n )\n else:\n raise ValueError(f\"{case_num} is not yet implemented\")\n\n global_results = parameter_sweep(\n m,\n sweep_params,\n outputs,\n csv_results_file_name=output_filename,\n optimize_function=RO.solve,\n interpolate_nan_outputs=interpolate_nan_outputs,\n )\n\n return global_results, sweep_params, m\n\n\nif __name__ == \"__main__\":\n results, sweep_params, m = run_analysis()\n print(results)\n","repo_name":"MichaelPesce/watertap","sub_path":"watertap/examples/flowsheets/RO_with_energy_recovery/multi_sweep.py","file_name":"multi_sweep.py","file_ext":"py","file_size_in_byte":3725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"60"} +{"seq_id":"19692356294","text":"\"\"\"\nfind_elements_by_xxx()\n作用:\n 1). 查找定位所有符合条件的元素\n 2). 返回的定位元素格式为数组(列表)格式;\n说明 : 列表数据格式的读取需要指定下标(下标从0开始) 可遍历\n4.2 案例\n需求:打开注册A.html页面,完成以下操作\n 1).使用tag_name定位密码输入框(第二个input标签),并输入:123456\n 2).3秒后关闭浏览器窗口\n\"\"\"\n# 导包\nfrom selenium import webdriver\nfrom time import sleep\n\n# 获取浏览器驱动对象\ndriver = webdriver.Firefox()\n\n# 打开url\nurl = r\"D:\\python_heima_shipk\\软件测试\\二\\8天web自动化全套测试—资料\\web自动化_day01_课件+笔记+资料+代码\\02_其他资料\\注册A.html\"\ndriver.get(url)\n\n# 使用tag_name定位用户名 输入admin\n# 注意页面中如果存在多个相同的标签名称,默认返回第一个\n# 使用elements 可以实现找到指定位置的元素 返回结果:类型为列表,要对列表进行访问和操作必须指定下标或进行遍历,[下标从0开始]\ndriver.find_elements_by_tag_name(\"input\")[1].send_keys(\"123456\")\n\n# 暂停三秒\nsleep(3)\n\n# 关闭浏览器驱动\ndriver.quit()\n\n","repo_name":"liuyuhao817/pythonwork","sub_path":"web自动化/day02/day02_04_元素定位find_elements_by_xxx.py","file_name":"day02_04_元素定位find_elements_by_xxx.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"32021729191","text":"from django.db import models\nfrom general.models import BaseModel\n\n\nCOMPANY_PROFILE_JOB_DESIGNATION = (\n ('ceo', 'CEO'),\n ('managing_director', 'Managing Director'),\n ('language_trainer', 'Language Trainer'),\n ('admission_counsellor', 'Admission Counsellors'),\n ('ielts_trainer', 'IELTS Trainer'),\n ('video_presenter', 'Video Presenter'),\n ('content_writers', 'Content Writer'),\n ('telecaller', 'Telecaller')\n)\n\nCOMPANY_PROFILE_JOB_TYPE = (\n ('full_time', 'Full Time'),\n ('part_timr', 'Part Time')\n)\n\nCOMPANY_PROFILE_GALLERY_TYPE = (\n ('image', 'Image'),\n ('video', 'video'),\n ('link', 'Link')\n)\n\nCOMPANY_PROFILE_GALLERY_SLOT = (\n ('slot_1', 'Slot 1'),\n ('slot_2', 'Slot 2'),\n ('slot_3', 'Slot 3'),\n ('slot_4', 'Slot 4'),\n ('slot_5', 'Slot 5'),\n ('slot_6', 'Slot 6'),\n ('slot_7', 'Slot 7'),\n ('slot_8', 'Slot 8'),\n)\n\nclass Achievements(BaseModel):\n title = models.CharField(max_length=255)\n image = models.ImageField(upload_to=\"company-profile/images/\", blank=True, null=True)\n alt = models.CharField(max_length=255,null=True, blank=True)\n description = models.TextField(blank=True, null=True)\n\n class Meta:\n db_table = 'company_profile_achievements'\n verbose_name = ('Achievement')\n verbose_name_plural = ('Achievements')\n ordering = ('id',)\n\n def __str__(self):\n return self.title\n \n\nclass Testimonials(BaseModel):\n name = models.CharField(max_length=255)\n quote = models.TextField()\n rating_count = models.PositiveIntegerField()\n image = models.ImageField(upload_to=\"company_profile/testimonials/images\", null=True, blank=True)\n alt = models.CharField(max_length=255,null=True, blank=True)\n video = models.FileField(upload_to=\"company_profile/testimonials/video\")\n\n class Meta:\n db_table = 'company_profile_testimonials'\n verbose_name = ('Testimonial')\n verbose_name_plural = ('Testimonials')\n ordering = ('id',)\n\n def __str__(self):\n return self.name\n\n\nclass Department(BaseModel):\n name = models.CharField(max_length=255, null=True, blank=True)\n\n class Meta:\n db_table = 'company_profile_deparment'\n verbose_name = ('Department')\n verbose_name_plural = ('Department')\n ordering = ('id',)\n\n def __str__(self):\n return self.name\n\n\nclass OurTeam(BaseModel):\n name = models.CharField(max_length=255)\n photo = models.FileField(upload_to=\"company_profile/our_team/photo\", null=True, blank=True)\n designation = models.CharField(max_length=255)\n alt = models.CharField(max_length=255,null=True, blank=True)\n head = models.BooleanField(default=False)\n department = models.ForeignKey(\"company_profile.Department\", on_delete=models.CASCADE, null=True, blank=True)\n\n class Meta:\n db_table = 'company_profile_our_team'\n verbose_name = ('Our Team')\n verbose_name_plural = ('Our Teams')\n ordering = ('id',)\n\n def __str__(self):\n return self.name\n\n\nclass Career(BaseModel):\n designation = models.CharField(max_length=255)\n job_description = models.TextField()\n job_type = models.CharField(choices=COMPANY_PROFILE_JOB_TYPE, max_length=255)\n\n class Meta:\n db_table = 'company_profile_career'\n verbose_name = ('Career')\n verbose_name_plural = ('Careers')\n ordering = ('id',)\n\n def __str__(self):\n return self.designation\n \n\nclass CareerEnquiry(BaseModel):\n job = models.ForeignKey('company_profile.Career', on_delete=models.CASCADE)\n name = models.CharField(max_length=100)\n phone = models.CharField(max_length=110)\n email = models.CharField(max_length=110)\n cv = models.FileField(upload_to=\"company_profile/career_enquiry/cv/\")\n\n class Meta:\n db_table = 'company_profile_career_enquiry'\n verbose_name = ('Career Enquiry')\n verbose_name_plural = ('Careers Enquiry')\n ordering = ('id',)\n\n def __str__(self):\n return self.name\n \n\nclass Enquiry(BaseModel):\n name = models.CharField(max_length=255, null=True, blank=True)\n phone = models.CharField(max_length=255, null=True, blank=True)\n email = models.EmailField(null=True, blank=True)\n message = models.TextField(max_length=255, null=True, blank=True)\n\n class Meta:\n db_table = 'company_profile__enquiry'\n verbose_name = (' Enquiry')\n verbose_name_plural = ('Enquiries')\n ordering = ('id',)\n\n def __str__(self):\n return self.name\n \n\nclass CompanyCount(BaseModel):\n successfull_students = models.IntegerField( null=True, blank=True)\n languages_trainee = models.IntegerField( null=True, blank=True)\n awards_won = models.IntegerField( null=True, blank=True)\n courses = models.IntegerField( null=True, blank=True)\n\n class Meta:\n db_table = 'courses_company_count'\n verbose_name = ('Company Count')\n verbose_name_plural = ('Company Count')\n ordering = ('id',)\n \n def __str__(self):\n return str(self.successfull_students)\n \n\nclass Gallery(BaseModel):\n type = models.CharField(choices=COMPANY_PROFILE_GALLERY_TYPE, max_length=255)\n file = models.FileField(upload_to='company_profile/gallery/', null=True, blank=True)\n alt = models.CharField(max_length=255,null=True, blank=True)\n file_link = models.CharField(max_length=255, null=True, blank=True)\n slot = models.CharField(choices=COMPANY_PROFILE_GALLERY_SLOT, max_length=122, null=True, blank=True)\n thumbnail = models.ImageField(upload_to='company_profile/gallery/thumbnail/', null=True, blank=True)\n thumbnail_alt = models.CharField(max_length=255,null=True, blank=True)\n class Meta:\n db_table = 'courses_gallery'\n verbose_name = ('Gallery')\n verbose_name_plural = ('Galleries')\n ordering = ('id',)\n\n def __str__(self):\n return self.type\n\n\n","repo_name":"devaccolades/english-cafe","sub_path":"company_profile/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"3163358089","text":"from fastapi import FastAPI\n\nfrom . import db_database, models\nfrom .routers import txt_upload, user\n\napp = FastAPI()\n\n\n@app.get(\"/\")\nasync def read_main():\n return {\"msg\": \"Hello World\"}\n\n\nmodels.Base.metadata.create_all(bind=db_database.engine)\n\napp.include_router(user.router)\napp.include_router(txt_upload.router)\n\n\n# current_user: schemas.User = Depends(get_current_user)\n","repo_name":"rohitk523/docker-communication","sub_path":"FastAPI/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"5045333196","text":"# -*- coding: utf-8 -*-\n\n# 入力の数値\ncounter = int(input())\ninput_list = [input() for _ in range(counter)]\n\n# 切り下げる時\ndef roundDownTime(time_str):\n\t# 0~4は0に\n\tif 0 <= int(time_str[3:4]) <= 4:\n\t\treturn time_str[0:3] + \"0\"\n\t# 5~9は5に\n\tif 5 <= int(time_str[3:4]) <= 9:\n\t\treturn time_str[0:3] + \"5\"\n\n# 切り上げる時\ndef roundUpTime(time_str):\n\t# 0はそのまま\n\tif int(time_str[3:4]) == 0:\n\t\treturn time_str\n\t# 1~5は5に\n\tif 1 <= int(time_str[3:4]) <= 5:\n\t\treturn time_str[0:3] + \"5\"\n\t# 6~9は0に\n\tif 6 <= int(time_str[3:4]) <= 9:\n\t\t# 50分台だけは時のケタが動く\n\t\tif time_str[2:3] == \"5\":\n\t\t\treturn str(int(time_str[0:2]) + 1) + \"00\"\n\t\t# 通常は10分のケタが動く\n\t\telse:\n\t\t\treturn time_str[0:2] + str(int(time_str[2:3]) + 1) + \"0\"\n\n# 開始時刻と終了時刻のリスト\nround_from_list = []\nround_to_list = []\nfor input_str in input_list:\n\t# 丸め処理を完了させる\n\tround_from_list.append(roundDownTime(input_str[0:4]))\n\tround_to_list.append(roundUpTime(input_str[5:9]))\n\n# 開始時刻と終了時刻のペアで二次元リストを作る\ntime_list = [[0 for col in range(2)] for row in range(counter)]\nfor i in range(counter):\n\ttime_list[i][0] = round_from_list[i]\n\ttime_list[i][1] = round_to_list[i]\n\n# 5分毎の時刻を格納するリスト\ntime_table = []\nfor i in range(0, 2401, 5):\n\t# 時間なので下2桁60以上は必要なし\n\tif 0 <= int(str(i)[-2:]) < 60:\n\t\ttime_table.append([i, False])\n\n# 開始終了ペアの数でループ\nfor i in range(counter):\n\tstart_time = time_list[i][0]\n\tend_time = time_list[i][1]\n\t# 開始時刻から終了時刻までの辞書の値をTrueにする\n\tfor j in range(int(start_time), int(end_time), 5):\n\t\t# 時間なので下2桁60以上は必要なし\n\t\tif 0 <= int(str(j)[-2:]) < 60:\n\t\t\ttime_table[j][1] = True\n\n# フラグを使って開始終了の境界を判別する\nisRain = False\nfor key, value in time_table:\n\tif value is True and isRain is False:\n\t\tisRain = True\n\t\t# 桁埋めして文字列に戻す\n\t\tstart_time = str(\"%04d\" % key)\n\tif value is False and isRain is True:\n\t\tisRain = False\n\t\tend_time = str(\"%04d\" % key)\n\t\t# 条件に当てはまった開始終了ペアを出力\n\t\tprint(start_time + \"-\" + end_time)","repo_name":"Coki628/kyopro_submissions","sub_path":"AtCoder/ABC001d3.py","file_name":"ABC001d3.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"12417127719","text":"from dataclasses import dataclass\nfrom typing import Any, TypeAlias, Iterator, ClassVar\nfrom xml.dom.minidom import Entity\n\nfrom src.engine.acting.action import Action\nfrom src.lib.query import Q\nfrom src.lib.vector.vector import sub2, floordiv2\nfrom src.library.ai_modules.spacial_memory import SpacialMemory\nfrom src.library.ais.dummy_ai import DummyAi\nfrom src.library.ais.io import Notification\nfrom src.library.physical.player import Player\nfrom src.components import Genesis\nfrom src.library.special.level import Level\n\nScript: TypeAlias = Iterator[dict[Entity, Action | None] | None]\n\n\n@dataclass\nclass RailsApi:\n level: Level\n genesis: Genesis\n\n _player: Player | None = None\n\n _ai_storage: ClassVar[dict[Entity, Any]] = {}\n _ai_locks: ClassVar[dict[Entity, list[object]]] = {}\n\n _death_storage: ClassVar[dict[Entity, Any]] = {}\n _death_locks: ClassVar[dict[Entity, list[object]]] = {}\n\n def get_player(self) -> Player:\n if self._player is None:\n self._player = next(self.level.find(Player), None)\n\n return self._player\n\n def options(self, options: dict[str, Action]) -> Script:\n assert all(options.values()), \"Only actions are allowed; for no action use NoAction\"\n\n yield # TODO should this be needed? Investigate.\n self.get_player().ai.memory.options = options\n yield\n return self.get_player().ai.memory.last_selected_option\n\n def start_cutscene(self) -> Script:\n self.get_player().ai.memory.in_cutscene = True\n yield\n\n def end_cutscene(self) -> Script:\n yield\n self.get_player().ai.memory.in_cutscene = False\n\n def center_camera(self) -> Script:\n player = self.get_player()\n h, w = player.ai.output.game.curses_window.getmaxyx()\n player.ai.output.game.virtual_p = sub2(player.p, floordiv2((w, h), 2))\n yield\n\n def plane_shift(self, level, p) -> Script:\n yield # to display the last railed action before the shift\n Level.move(self.get_player(), p, level=level) # TODO is it needed? maybe use yield {entity: Teleport}?\n\n def create_entity(self, entity) -> Script: # TODO not needed, replace with genesis.push\n self.genesis.push(entity)\n yield\n\n def lock_complex_ai(self, entity, lock) -> Any:\n if entity not in self._ai_storage:\n self._ai_storage[entity] = entity.ai\n self._ai_locks[entity] = []\n\n entity.ai = DummyAi()\n entity.ai.composite[SpacialMemory].knows(self.level)\n\n assert lock not in self._ai_locks[entity]\n\n entity.ai.clear() # TODO LONG it interrupts previous scene. Is it appropriate?\n\n self._ai_locks[entity].append(lock)\n return lock\n\n def unlock_complex_ai(self, entity, lock):\n self._ai_locks[entity].remove(lock)\n\n if len(self._ai_locks[entity]) == 0:\n entity.ai = self._ai_storage[entity]\n del self._ai_storage[entity]\n del self._ai_locks[entity]\n\n def lock_dying(self, entity, lock) -> Any:\n if entity not in self._death_storage:\n self._death_storage[entity] = ~Q(entity).on_destruction or (lambda *_, **__: None)\n self._death_locks[entity] = []\n\n entity.on_destruction = (lambda *_, **__: True)\n\n assert lock not in self._death_locks[entity]\n\n self._death_locks[entity].append(lock)\n return lock\n\n def unlock_dying(self, entity, lock):\n self._death_locks[entity].remove(lock)\n\n if len(self._death_locks[entity]) == 0:\n entity.on_destruction = self._death_storage[entity]\n del self._death_storage[entity]\n del self._death_locks[entity]\n\n def notify(self, notification: Notification):\n self.get_player().ai.memory.notification_queue.append(notification)\n yield\n\n\n@dataclass(eq=False)\nclass Lock:\n info: Any = None\n\n def __eq__(self, other):\n return self is other\n","repo_name":"girvel/fallen","sub_path":"src/engine/rails/rails_api.py","file_name":"rails_api.py","file_ext":"py","file_size_in_byte":3959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"3349850570","text":"def next_position(command, row, col, steps):\n if command == \"up\":\n return row - steps, col\n if command == \"down\":\n return row + steps, col\n if command == \"left\":\n return row, col - steps\n if command == \"right\":\n return row, col + steps\n\n\ndef is_outside(row, col, size):\n return row < 0 or col < 0 or row >= size or col >= size\n\n\nsize = 5\n\nmatrix = []\nplayer_row = 0\nplayer_col = 0\ntargets = 0\n\nfor row in range(size):\n row_elements = input().split()\n for col in range(size):\n if row_elements[col] == \"A\":\n player_row = row\n player_col = col\n elif row_elements[col] == \"x\":\n targets += 1\n\n matrix.append(row_elements)\n\nnumber_of_commands = int(input())\nshot_targets = []\n\nfor _ in range(number_of_commands):\n command = input().split()\n direction = command[1]\n if command[0] == \"shoot\":\n steps = 1\n next_row, next_col = next_position(direction, player_row, player_col, steps)\n for _ in range(size):\n if is_outside(next_row, next_col, size):\n break\n elif matrix[next_row][next_col] == \"x\":\n targets -= 1\n matrix[next_row][next_col] = \".\"\n shot_targets.append([next_row, next_col])\n break\n next_row, next_col = next_position(direction, next_row, next_col, steps)\n\n elif command[0] == \"move\":\n steps = int(command[2])\n next_row, next_col = next_position(direction, player_row, player_col, steps)\n if is_outside(next_row, next_col, size) or matrix[next_row][next_col] == \"x\":\n continue\n matrix[player_row][player_col] = \".\"\n player_row, player_col = next_row, next_col\n matrix[player_row][player_col] = \"A\"\n\n if targets == 0:\n break\n\nif targets != 0:\n print(f\"Training not completed! {targets} targets left.\")\nelse:\n print(f\"Training completed! All {len(shot_targets)} targets hit.\")\n\nprint(*shot_targets, sep=\"\\n\")\n","repo_name":"Lubecruz-Moris/Softuni","sub_path":"Python Advanced/Multidimensional lists/range_day.py","file_name":"range_day.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"43038618889","text":"\"\"\" This module implements Theta*'s path planning algorithm.\n\nTwo variants are included: grid-based, and mesh-based.\n\n\"\"\"\n\n__author__ = \"Sergio de la Mata Moratilla; Javier Pastor Moreno\"\n__authors__ = [\"Sergio de la Mata Moratilla; Javier Pastor Moreno\"]\n__contact__ = \"sergio.matam@edu.uah.es; javier.pastor@edu.uah.es\"\n__copyright__ = \"Copyright 2020, UAH\"\n__credits__ = [\"Mario Cobos Maestre\"]\n__date__ = \"2020/05/22\"\n__deprecated__ = False\n__email__ = \"sergio.matam@edu.uah.es; javier.pastor@edu.uah.es\"\n__license__ = \"GPLv3\"\n__maintainer__ = \"Sergio de la Mata Moratilla; Javier Pastor Moreno\"\n__status__ = \"Development\"\n__version__ = \"0.0.1\"\n\n\"\"\"\n Code modified from https://https://github.com/ISG-UAH/R2P2\n\"\"\"\n\nimport path_planning as pp\n\ndef children(point,grid):\n \"\"\"\n Calculates the children of a given node over a grid.\n Inputs:\n - point: node for which to calculate children.\n - grid: grid over which to calculate children.\n Outputs:\n - list of children for the given node.\n \"\"\"\n x,y = point.grid_point\n if x > 0 and x < len(grid) - 1:\n if y > 0 and y < len(grid[0]) - 1:\n links = [grid[d[0]][d[1]] for d in\\\n [(x-1, y),(x,y - 1),(x,y + 1),(x+1,y),\\\n (x-1, y-1), (x-1, y+1), (x+1, y-1),\\\n (x+1, y+1)]]\n elif y > 0:\n links = [grid[d[0]][d[1]] for d in\\\n [(x-1, y),(x,y - 1),(x+1,y),\\\n (x-1, y-1), (x+1, y-1)]]\n else:\n links = [grid[d[0]][d[1]] for d in\\\n [(x-1, y),(x,y + 1),(x+1,y),\\\n (x-1, y+1), (x+1, y+1)]]\n elif x > 0:\n if y > 0 and y < len(grid[0]) - 1:\n links = [grid[d[0]][d[1]] for d in\\\n [(x-1, y),(x,y - 1),(x,y + 1),\\\n (x-1, y-1), (x-1, y+1)]]\n elif y > 0:\n links = [grid[d[0]][d[1]] for d in\\\n [(x-1, y),(x,y - 1),(x-1, y-1)]]\n else:\n links = [grid[d[0]][d[1]] for d in\\\n [(x-1, y), (x,y + 1), (x-1, y+1)]]\n else:\n if y > 0 and y < len(grid[0]) - 1:\n links = [grid[d[0]][d[1]] for d in\\\n [(x+1, y),(x,y - 1),(x,y + 1),\\\n (x+1, y-1), (x+1, y+1)]]\n elif y > 0:\n links = [grid[d[0]][d[1]] for d in\\\n [(x+1, y),(x,y - 1),(x+1, y-1)]]\n else:\n links = [grid[d[0]][d[1]] for d in\\\n [(x+1, y), (x,y + 1), (x+1, y+1)]]\n return [link for link in links if link.value != 9]\n\ndef checkObst(x, y, grid):\n \"\"\"\n Verify if there is any obstacle at the position studied.\n Inputs:\n - x: X coordinate from the position studied.\n - y: Y coordinate from the position studied.\n - grid: grid over which to execute the algorithm.\n Outputs:\n - boolean indicating if there is an obtacle in a position of the grid.\n \"\"\"\n return (grid[int(x)][int(y)].value >= 5)\n\ndef lineOfSight(current, node, grid):\n \"\"\"\n Know if there is a path between the current point studied and another.\n Inputs:\n - current: point studied.\n - node: node that needs to be reached.\n - grid: grid over which to execute the algorithm.\n Unused, kept to standarize input.\n Outputs:\n - boolean indicating if it is was found a path.\n \"\"\"\n #Sepation from the coordinates from each of the nodes\n x0, y0 = current.grid_point\n x1, y1 = node.grid_point\n #Difference between the x coordinates and y coordinates\n difference_posx = x1 - x0\n difference_posy = y1 - y0\n f = 0\n #Verify if the difference from y coordinates is lower than 0\n if difference_posy < 0:\n difference_posy = - difference_posy\n s_posy = -1\n else:\n s_posy = 1\n #Verify if the difference from x coordinates is lower than 0\n if difference_posx < 0:\n difference_posx = - difference_posx\n s_posx = -1\n else:\n s_posx = 1\n #Compare the differences from x and y coordinates to study according to the greatest\n if difference_posx >= difference_posy:\n #Make the study until the x values are equal\n while x0 != x1:\n #Add to the f variable the difference from the y coordinates \n f += difference_posy\n #Add to the f variable the difference from the x coordinates \n if f >= difference_posx:\n if checkObst(x0 + ((s_posx - 1)/2), y0 + ((s_posy - 1)/2), grid):\n return False\n y0 = y0 + s_posy\n f -= difference_posx\n #Verify if f variable is different to 0 and there is any obtascle near the position studied \n if (f != 0) and (checkObst(x0 + ((s_posx - 1)/2), y0 + ((s_posy - 1)/2), grid)):\n return False\n #Verify if the difference from y coodinates is 0 and check if there are obstables in relation to the initial y coordinate\n if (difference_posy == 0) and (checkObst(x0 + ((s_posx - 1)/2), y0,grid)) and (checkObst(x0 + ((s_posx - 1)/2), y0 - 1, grid)):\n return False\n x0 += s_posx\n else:\n #Make the study until the y values are equal\n while y0 != y1:\n #Add to the f variable the difference from the x coordinates \n f += difference_posx\n #Add to the f variable the difference from the y coordinates \n if f >= difference_posy:\n if checkObst(x0 + ((s_posx - 1)/2), y0 + ((s_posy - 1)/2), grid):\n return False\n x0 = x0 + s_posx\n f -= difference_posx\n #Verify if f variable is different to 0 and there is any obtascle near the position studied \n if (f != 0) and (checkObst(x0 + ((s_posx - 1)/2), y0 + ((s_posy - 1)/2), grid)):\n return False\n #Verify if the difference from x coodinates is 0 and check if there are obstables in relation to the initial x coordinate\n if (difference_posx == 0) and (checkObst(x0, y0 + ((s_posy - 1)/2), grid)) and (checkObst(x0 - 1, y0 + ((s_posy - 1)/2), grid)):\n return False\n y0 += s_posy\n return True\n\ndef thetaStar(start, goal, grid, heur='naive'):\n \"\"\"\n Executes the Theta* path planning algorithm over a given grid.\n Inputs:\n - origin: node at which to start.\n - goal: node that needs to be reached.\n - grid: grid over which to execute the algorithm.\n - heur: reference to a string representing an heuristic.\n Unused, kept to standarize input.\n Outputs:\n - ordered list of nodes representing the path found from\n origin to goal.\n \"\"\"\n #Opened and closed sets\n opened_set = set()\n closed_set = set()\n #Current position at the starting point\n current = start\n #Introduced the starting point to the opened set\n opened_set.add(current)\n #While the opened set has nodes to be studied\n while opened_set: \n #Search for its item with the lowest G + H value\n current = min(opened_set, key = lambda o:o.G + o.H)\n pp.expanded_nodes += 1\n # Current position is the same as the goal\n if current == goal: \n path = []\n #Include the positions to the path until the current position has not a parent\n while current.parent: \n path.append(current)\n current = current.parent\n path.append(current)\n return path[::-1]\n #Delete the item from the opened set used and introduce it to the closed set\n opened_set.remove(current)\n closed_set.add(current)\n \n #Go through the node's children/siblings\n for node in children(current, grid):\n #Skip the node if it is at the closed set \n if node in closed_set:\n continue\n #Verify if the node is at the opened set\n if node in opened_set:\n #Check if the obtained G value is lower to the one from the node\n g_aux = current.G + current.move_cost(node)\n # New g value is lower to the one which has the node\n if node.G > g_aux: \n #Update the node's parent\n node.G = g_aux\n node.parent = current\n else:\n if (current.parent != None) and (lineOfSight(current.parent, node,grid)):\n #Consider the case in which the current point studied has a parent \n # and that there is a path between start vertex and parent vertex \n node.G = current.parent.G + current.parent.move_cost(node)\n node.H = pp.heuristic[heur](node, goal)\n node.parent = current.parent\n opened_set.add(node)\n else:\n #Needed to be calculated the G and H values for the node studied\n node.G = current.G + current.move_cost(node)\n node.H = pp.heuristic[heur](node,goal)\n #Set the parent to current studied position & include the node to the openeed set\n node.parent = current\n opened_set.add(node)\n #Throw an exception if there is no path\n raise ValueError('No Path Found')\n\npp.register_search_method('Theta*', thetaStar)\n","repo_name":"SergiodelaMata/Integrate_path-planning_-_task-planning","sub_path":"r2p2/r2p2/thetaStar.py","file_name":"thetaStar.py","file_ext":"py","file_size_in_byte":9910,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"31691426852","text":"import os\nimport random\nimport shutil\nimport time\nimport warnings\nfrom tqdm import tqdm, trange\nimport sys\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.distributed as dist\nimport torch.optim\nimport torch.multiprocessing as mp\nimport torch.utils.data\nimport torch.utils.data.distributed\n\nfrom utils.logging import logger\nfrom utils.statistics import Statistics\nfrom models.Tree2Seq import *\n# from models.Mem2Seq_update import *\n\nimport utils.utils_kvr_tree as utils_tree\n# from utils.general_utils import to_device\n\nimport random\n\nrandom.seed(1234)\ntorch.manual_seed(1234)\n\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\n\n\ndef main_worker(args, gpu):\n\n model, train, dev, test = build_model(args, gpu)\n\n # Data loading code\n if args.distributed:\n train_sampler = torch.utils.data.distributed.DistributedSampler(train)\n dev_sampler = torch.utils.data.distributed.DistributedSampler(dev)\n test_sampler = torch.utils.data.distributed.DistributedSampler(test)\n\n else:\n train_sampler = None\n dev_sampler = None\n test_sampler = None\n\n # multiple workers are not allowed in multiprocessing !\n train_loader = torch.utils.data.DataLoader(\n train, batch_size=args.batch, shuffle=(train_sampler is None),\n pin_memory=False, sampler=train_sampler, collate_fn=utils_tree.collate_fn_new)\n\n\n val_loader = torch.utils.data.DataLoader(dev,\n batch_size=args.batch, shuffle=False, sampler=dev_sampler,\n pin_memory=False, collate_fn=utils_tree.collate_fn_new)\n\n test_loader = torch.utils.data.DataLoader(test,\n batch_size=args.batch, shuffle=False,sampler=test_sampler,\n pin_memory=False, collate_fn=utils_tree.collate_fn_new)\n\n best_bleu = 0.0\n best_f1 = 0.0\n trainer = Tree2SeqTrainer(model, lr=float(args.learn), args=args)\n scheduler = lr_scheduler.ReduceLROnPlateau(trainer.optimizer, mode='max', factor=0.8, patience=5,\n min_lr=0.0001, verbose=True)\n\n # optionally resume from a checkpoint\n if args.resume:\n if os.path.isfile(args.resume):\n logger.info(\"=> loading checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume)\n model.load_state_dict(checkpoint['state_dict'])\n # trainer.optimizer.load_state_dict(checkpoint['optimizer'])\n logger.info(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n else:\n logger.info(\"=> no checkpoint found at '{}'\".format(args.resume))\n\n # check for mode\n if args.mode == 'eval':\n logger.info('In eval mode.')\n validate_one_epoch(val_loader, model, trainer, args)\n return\n\n if args.mode == 'test':\n logger.info('In test mode.')\n # logger.info('Eval.')\n # validate_one_epoch(val_loader, model, trainer, args)\n logger.info('Test.')\n validate_one_epoch(test_loader, model, trainer, args)\n return\n\n best_f1s = []\n\n logger.info('In the training process now.')\n for epoch in range(0, args.max_epoch):\n if args.distributed:\n train_sampler.set_epoch(epoch)\n\n # adjust_learning_rate(optimizer, epoch, args)\n\n # train for one epoch\n train_one_epoch(train_loader, model, trainer, epoch, args)\n\n # evaluate on validation set\n bleu, f1s = validate_one_epoch(val_loader, model, trainer, args)\n\n # remember best acc@1 and save checkpoint\n is_best = f1s[0] > best_f1\n best_bleu = max(bleu, best_bleu)\n best_f1 = max(f1s[0], best_f1)\n if is_best:\n best_f1s = f1s\n best_bleu = bleu\n\n scheduler.step(f1s[0])\n\n if not args.distributed or (args.distributed and args.rank % args.world_size == 0):\n save_checkpoint({\n 'epoch': epoch + 1,\n 'arch': args.decoder,\n 'state_dict': model.state_dict(),\n 'best_bleu': best_bleu,\n # 'optimizer': optimizer.state_dict(),\n }, is_best, args.experiment)\n\n\n logger.info(\"BEST F1 SCORE:\\t{}\".format(str(best_f1s[0])))\n logger.info(\"\\tBEST CAL F1:\\t{}\".format(str(best_f1s[1])))\n logger.info(\"\\tBEST WET F1:\\t{}\".format(str(best_f1s[2])))\n logger.info(\"\\tBEST NAV F1:\\t{}\".format(str(best_f1s[3])))\n logger.info(\"\\tBEST BLEU:\\t{}\".format(str(best_bleu)))\n\n logger.info('Test')\n validate_one_epoch(test_loader, model, trainer, args)\n return\n\ndef build_model(args, gpu):\n # global best_acc1\n if gpu == -1:\n args.gpu = 0\n else:\n args.gpu = gpu\n\n if args.gpu is not None:\n logger.info(\"Use GPU: {} for training\".format(args.gpu))\n\n if args.distributed:\n # For multiprocessing distributed training, rank needs to be the\n # global rank among all the processes\n args.rank = gpu\n\n if args.distributed:\n # For multiprocessing distributed, DistributedDataParallel constructor\n # should always set the single device scope, otherwise,\n # DistributedDataParallel will use all available devices.\n if args.gpu is not None:\n # When using a single GPU per process and per\n # DistributedDataParallel, we need to divide the batch size\n # ourselves based on the total number of GPUs we have\n args.batch = int( args.batch / args.world_size)\n args.workers = int( args.workers / args.world_size)\n\n # read in the dataset\n # todo : clean this logic\n prepare_data_seq = utils_tree.prepare_data_seq\n # print(args.batch)\n logger.info('Batch-size per gpu: {}'.format(args.batch))\n train, dev, test, testOOV, lang, max_len, max_r = prepare_data_seq(vars(args),batch_size=int(args.batch),shuffle=True)\n # create model\n model = globals()[args.decoder](int(args.hidden),\n max_len,max_r,lang,args.path,args.task,\n lr=float(args.learn),\n n_layers=int(args.layer),\n dropout=float(args.drop),\n unk_mask=bool(int(args.unk_mask)),\n args=args\n )\n\n if args.distributed:\n if args.gpu is not None:\n torch.cuda.set_device(args.gpu)\n model.cuda(args.gpu)\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])\n else:\n model.cuda()\n # DistributedDataParallel will divide and allocate batch_size to all\n # available GPUs if device_ids are not set\n model = torch.nn.parallel.DistributedDataParallel(model)\n else:\n model.cuda()\n\n return model, train, dev, test\n\n\ndef train_one_epoch(train_loader, model, trainer, epoch, args):\n batch_time = AverageMeter('Time', ':6.3f')\n data_time = AverageMeter('Data', ':6.3f')\n Losses = AverageMeter('Loss', ':6.2f')\n V_Loss = AverageMeter('VL', ':6.2f')\n P_Loss = AverageMeter('PL', ':6.2f')\n progress = ProgressMeter(len(train_loader), batch_time, data_time, Losses, V_Loss,\n P_Loss, prefix=\"Epoch: [{}]\".format(epoch))\n\n # switch to train mode\n model.train()\n\n end = time.time()\n for i, data in enumerate(train_loader):\n # measure data loading time\n data_time.update(time.time() - end)\n\n # data = to_device(data, torch.device('cuda'))\n # compute output\n loss = trainer.train_batch(model, data, len(data['src_seqs']), 1.0, 0.5, i == 0)\n\n # for debug\n # for name, param in model.named_parameters():\n # print(name, param, True if param.grad is not None else False)\n # pdb.set_trace()\n\n # measure accuracy and record loss\n # acc1, acc5 = accuracy(output, target, topk=(1, 5))\n Losses.update(loss[0], data['src_seqs'].size(0))\n P_Loss.update(loss[1], data['src_seqs'].size(0))\n V_Loss.update(loss[2], data['src_seqs'].size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n progress.print(i)\n\n\ndef validate_one_epoch(val_loader, model, trainer, args):\n # switch to evaluate mode\n model.eval()\n\n val_stats = [Statistics() for i in range(5)]\n\n # read-in global entity list\n if args.dataset == 'kvr':\n with open('data/KVR/kvret_entities.json') as f:\n global_entity = json.load(f)\n global_entity_list = []\n for key in global_entity.keys():\n if key != 'poi':\n global_entity_list += [item.lower().replace(' ', '_') for item in global_entity[key]]\n else:\n for item in global_entity['poi']:\n global_entity_list += [item[k].lower().replace(' ', '_') for k in item.keys()]\n global_entity_list = list(set(global_entity_list))\n else:\n raise NotImplementedError('Not implemented this val for datasets other than kvr yet.')\n\n with torch.no_grad():\n end = time.time()\n # for i, data in tqdm(enumerate(val_loader)):\n cnt = 0\n for data in tqdm(val_loader):\n # data = to_device()\n # end = time.time()\n cnt += 1\n decoded_words = trainer.evaluate_batch(model, data)\n # logger.info(\"Decode Time cost: {}\".format(str(time.time() - end)))\n # end = time.time()\n # update val states for each batch.\n val_stats = compute_val_stat(data, decoded_words, global_entity_list, val_stats, args)\n # logger.info(\"Val Compute Time cost: {}\".format(str(time.time() - end)))\n\n if args.distributed:\n all_val_stats = Statistics.all_gather_stats_list(val_stats)\n else:\n all_val_stats = val_stats\n f1 = all_val_stats[0].accuracy()\n cal_f1 = all_val_stats[1].accuracy()\n wet_f1 = all_val_stats[2].accuracy()\n nav_f1 = all_val_stats[3].accuracy()\n\n logger.info(\"F1 SCORE:\\t{}\".format(str(f1)))\n logger.info(\"\\tCAL F1:\\t{}\".format(str(cal_f1)))\n logger.info(\"\\tWET F1:\\t{}\".format(str(wet_f1)))\n logger.info(\"\\tNAV F1:\\t{}\".format(str(nav_f1)))\n\n bleu_score = all_val_stats[4].accuracy() / 100.0\n # not validated yet.\n # bleu_score = 0.0\n logger.info(\"\\tBleu Score:\\t{}\".format(str(bleu_score)))\n\n return bleu_score, [f1, cal_f1, wet_f1, nav_f1]\n\n\ndef compute_val_stat(data_dev, words, global_entity_list, stats, args):\n w = 0\n temp_gen = []\n\n ref = []\n hyp = []\n src = []\n ref_s = \"\"\n hyp_s = \"\"\n src_s = \"\"\n\n microF1_PRED, microF1_PRED_cal, microF1_PRED_nav, microF1_PRED_wet = 0, 0, 0, 0\n microF1_TRUE, microF1_TRUE_cal, microF1_TRUE_nav, microF1_TRUE_wet = 0, 0, 0, 0\n\n for i, row in enumerate(np.transpose(words)):\n st = ''\n for e in row:\n if e == '':\n break\n else:\n st += e + ' '\n temp_gen.append(st)\n correct = data_dev['trg_plain'][i]\n ### compute F1 SCORE\n st = st.lstrip().rstrip()\n correct = correct.lstrip().rstrip()\n if args.dataset == 'kvr':\n f1_true, count = Tree2Seq.compute_prf(data_dev['entity'][i], st.split(), global_entity_list,\n data_dev['kb_plain'][i])\n microF1_TRUE += f1_true\n microF1_PRED += count\n f1_true, count = Tree2Seq.compute_prf(data_dev['entity_cal'][i], st.split(), global_entity_list,\n data_dev['kb_plain'][i])\n microF1_TRUE_cal += f1_true\n microF1_PRED_cal += count\n f1_true, count = Tree2Seq.compute_prf(data_dev['entity_nav'][i], st.split(), global_entity_list,\n data_dev['kb_plain'][i])\n microF1_TRUE_nav += f1_true\n microF1_PRED_nav += count\n f1_true, count = Tree2Seq.compute_prf(data_dev['entity_wet'][i], st.split(), global_entity_list,\n data_dev['kb_plain'][i])\n microF1_TRUE_wet += f1_true\n microF1_PRED_wet += count\n\n\n conv_src = [item[0] for item in data_dev['src_plain'][i] if '$' in item[1]]\n conv_src_s = \" \".join(conv_src)\n src_s += conv_src_s + '\\n'\n src.append(src_s)\n\n # w += wer(correct, st)\n ref.append(str(correct))\n hyp.append(str(st))\n\n ref_s += str(correct) + \"\\n\"\n hyp_s += str(st) + \"\\n\"\n\n with open('./tmp/{}_ref_s.txt'.format(args.experiment), 'a+') as f:\n f.write(ref_s)\n with open('./tmp/{}_hyp_s.txt'.format(args.experiment), 'a+') as f:\n f.write(hyp_s)\n with open('./tmp/{}_src_s.txt'.format(args.experiment), 'a+') as f:\n f.write(src_s)\n\n\n # compute the bleu score\n bleu_score = moses_multi_bleu(np.array(hyp), np.array(ref), lowercase=True)\n\n bleu_stat = Statistics(n_correct=bleu_score, n_words=1)\n\n entity_stat = Statistics(n_correct=microF1_TRUE, n_words=microF1_PRED)\n entity_cal_stat = Statistics(n_correct=microF1_TRUE_cal, n_words=microF1_PRED_cal)\n entity_nav_stat = Statistics(n_correct=microF1_TRUE_nav, n_words=microF1_PRED_nav)\n entity_wet_stat = Statistics(n_correct=microF1_TRUE_wet, n_words=microF1_PRED_wet)\n new_stats = [entity_stat, entity_cal_stat, entity_nav_stat, entity_wet_stat, bleu_stat]\n\n for i in range(5):\n stats[i].update(new_stats[i])\n\n return stats\n\ndef save_checkpoint(state, is_best, experiment='Tree2Seq'):\n filename = './model/{}.pt'.format(experiment)\n torch.save(state, filename)\n if is_best:\n best_filename = './model/{}_best.pt'.format(experiment)\n shutil.copyfile(filename, best_filename)\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self, name, fmt=':f'):\n self.name = name\n self.fmt = fmt\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def __str__(self):\n fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'\n return fmtstr.format(**self.__dict__)\n\n\nclass ProgressMeter(object):\n def __init__(self, num_batches, *meters, prefix=\"\"):\n self.batch_fmtstr = self._get_batch_fmtstr(num_batches)\n self.meters = meters\n self.prefix = prefix\n\n def print(self, batch):\n entries = [self.prefix + self.batch_fmtstr.format(batch)]\n entries += [str(meter) for meter in self.meters]\n logger.info('\\t'.join(entries))\n\n def _get_batch_fmtstr(self, num_batches):\n num_digits = len(str(num_batches // 1))\n fmt = '{:' + str(num_digits) + 'd}'\n return '[' + fmt + '/' + fmt.format(num_batches) + ']'\n\n\ndef adjust_learning_rate(optimizer, epoch, args):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 30 epochs\"\"\"\n lr = args.lr * (0.1 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n","repo_name":"ElliottYan/KB-Chat","sub_path":"single_train.py","file_name":"single_train.py","file_ext":"py","file_size_in_byte":16056,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"37824441339","text":"from collections import deque\n\n\ndef bfs(g, s):\n que = deque()\n colors[s] = 0\n que.append(s)\n\n while que:\n v = que.popleft()\n\n for next_v in g[v]:\n if colors[next_v] != -1:\n if colors[next_v] == colors[v]:\n return False\n continue\n colors[next_v] = 1 - colors[v]\n que.append(next_v)\n\n return True\n\n\nn, m = map(int, input().split())\ng = [[] for _ in range(n)]\nfor i in range(m):\n a, b = map(int, input().split())\n g[a].append(b)\n g[b].append(a)\n\ncolors = [-1 for _ in range(n)]\nis_bipartite = True\n\nfor v in range(n):\n if colors[v] != -1:\n continue\n if not bfs(g, v):\n is_bipartite = False\n\nif is_bipartite:\n print(\"Yes\")\nelse:\n print(\"No\")\n","repo_name":"yuyagishita/book_algorithm_solution","sub_path":"chap13/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"74850774910","text":"#!/usr/bin/python3\n\n__author__ = \"yang.dd\"\n\n\"\"\"\n example 066\n\"\"\"\n\nif __name__ == '__main__':\n nums = []\n for i in range(3):\n nums.append(int(input(\"请输入一个数字:\")))\n nums.sort()\n print(\"按顺序排列:\", nums)\n nums.reverse()\n print(\"按倒叙排列:\", nums)\n","repo_name":"yangdd1205/python3-100-examples","sub_path":"example66.py","file_name":"example66.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"zh","doc_type":"code","stars":17,"dataset":"github-code","pt":"60"} +{"seq_id":"39647174485","text":"# encoding: utf-8\nfrom com.util.dataAbout import dataLoad\nfrom sklearn.cluster import DBSCAN\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n\n'''\n0.获取数据\n'''\ndataSet = dataLoad('../../data/t8_6566.dat')\noutliers_fraction = 0.25 #异常样本比例\n'''\n1.拆分\n'''\ndb = DBSCAN(eps=20, min_samples=6).fit(dataSet)\nlabels = db.labels_\nnum_cluster = len(set(labels)) - (1 if -1 in labels else 0)\ngloable_outlier = dataSet[labels == -1]\nif len(gloable_outlier) > 0:\n plt.scatter(gloable_outlier[:, 0], gloable_outlier[:, 1], s=5, c='r', marker='*')\nmem = 1000\ngloable_outlier = dataSet[labels == -1]\nif len(gloable_outlier) > 0:\n plt.scatter(gloable_outlier[:, 0], gloable_outlier[:, 1], s=5, c='r', marker='*')\n plt.show()\n'''\n\n'''\nfor t in range(num_cluster):\n block = dataSet[labels == t]\n print(len(block))\n plt.scatter(block[:, 0], block[:, 1], s=5, c='b', marker='*')\n plt.show()","repo_name":"iiZhangJun/MyProject","sub_path":"com/main/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"12900655219","text":"\"\"\"\n environment.py\n\"\"\"\n\nimport math, random\nfrom functools import reduce\n#from PIL import Image\n#import numpy as np\nfrom numpy import array, dot\nfrom numpy.random import rand\nimport enum\n\nfrom organisms import Organism, Health\n\n\n\nclass Environment:\n\t\n def __init__(self, size, restriction=0.25):\n self.width = size[0]\n self.height = size[1]\n self.organisms = []\n self.colour = (255,255,255)\n self.elasticity = 1.\n self.colliding = True\n self.time_elapsed = 0\n self.restriction = restriction\n\n def add_organisms(self,n=1,**kwargs):\n \"\"\" \"\"\"\n #kwargs.get('x', ...)\n for i in range(n):\n space_occupied = True\n while space_occupied:\n o = Organism(random.uniform(0, self.width), random.uniform(0, self.height))\n space_occupied = False\n for j, o2 in enumerate(self.organisms):\n if abs(o.x - o2.x) < 1.5*o.size and abs(o.y - o2.y) < 1.5*o.size:\n #print(f\"{i} is occupied at ({o.x}, {o.y} by particle {j}\")\n space_occupied = True\n #print(i, space_occupied)\n \n if i >= math.floor(n*(1.-self.restriction)):\n o.speed = 0\n #print(f\"Created organism at ({o.x},{o.y})\")\n self.organisms.append(o)\n self.organisms[0].infect(self.time_elapsed)\n\n\n def update(self):\n \"\"\" update all organisms' health and position \"\"\"\n for i, o in enumerate(self.organisms):\n o.update_health(self.time_elapsed)\n o.update_position()\n self.bounce_off_wall(o)\n for j, o2 in enumerate(self.organisms[i+1:]):\n self.collide(o, o2)\n \n\n def remove_organism(self, organism):\n \"\"\" \"\"\"\n pass\n\n\n def collide(self, organism1, organism2):\n \"\"\" check if organisms collide \"\"\"\n distance = (organism1.x - organism2.x, organism1.y - organism2.y)\n r = reduce(lambda x,y: math.sqrt(x**2+y**2), distance)\n if r < organism1.size + organism2.size:\n # infect if either is sick\n if organism1.is_contageous(): organism2.infect(self.time_elapsed)\n if organism2.is_contageous(): organism1.infect(self.time_elapsed)\n\n if organism2.speed == 0:\n organism1.angle = 360.0 - organism1.angle\n #organism1.colour = (0,218,255)\n else:\n # swap angles due to collision\n organism2.angle, organism1.angle = (organism1.angle, organism2.angle)\n\n\n def bounce_off_wall(self,organism):\n \"\"\" check if (x,y) is off the screen, bounce off limits\"\"\"\n\n if organism.x > self.width - organism.size:\n organism.x = 2*(self.width - organism.size) - organism.x\n organism.angle = - organism.angle\n organism.speed *= self.elasticity\n \n elif organism.x < organism.size:\n organism.x = 2*organism.size - organism.x\n organism.angle = - organism.angle\n organism.speed *= self.elasticity\n \n if organism.y > self.height - organism.size:\n organism.y = 2*(self.height - organism.size) - organism.y\n organism.angle = math.pi - organism.angle\n organism.speed *= self.elasticity\n \n elif organism.y < organism.size:\n organism.y = 2*organism.size - organism.y\n organism.angle = math.pi - organism.angle\n organism.speed *= self.elasticity\n\n def health_overview(self):\n \"\"\" counts \"\"\"\n healthy, infected, contageous, sick, healed = 0,0,0,0,0\n for i, o in enumerate(self.organisms):\n if o.health == Health.healthy: healthy += 1\n elif o.health == Health.infected: infected += 1\n elif o.health == Health.contageous: contageous += 1\n elif o.health == Health.sick: sick += 1\n elif o.health == Health.healed: healed += 1\n return {\\\n Health.healthy: healthy,\n Health.infected: infected,\n Health.contageous: contageous,\n Health.sick: sick,\n Health.healed: healed}\n","repo_name":"markusdoppler/Pandemic","sub_path":"environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":4244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"34960266530","text":"#!/usr/bin/env python\n\n# # Using zip to create a data structure for the donor data\n# donor_names = [\"Bill\", \"Fred\"]\n# donations = [[200, 500], [2000, 3000]]\n# donors = list(zip(donor_names, donations))\n\n# However, in this case, unless that data is coming from elsewhere, you\n# might as well simply hard code the data directly:\n\ndonors = [(\"William Gates, III\", [653772.32, 12.17]),\n (\"Jeff Bezos\", [877.33]),\n (\"Paul Allen\", [663.23, 43.87, 1.32]),\n (\"Mark Zuckerberg\", [1663.23, 4300.87, 10432.0]),\n ]\n\n# Either way, you end up with a list of tuples -- each tuple is one \"record\",\n# and has the name as the zeroth element, and a list of donations as the other\n# element.\n# It's important the the list of donation is a mutable -- you need to be\n# able to append new donations to it.\n\n\ndef thank_you():\n print(\"this is the thank you function\")\n\ndef print_report():\n print(\"this is the print report function\")\n\n\ndef mainloop():\n \"\"\"\n “Send a Thank You”, “Create a Report” or “quit”)\n \"\"\"\n #result = input(\"type something > \")\n #print(\"you typed: \", result)\n\n while True:\n answer = int(input(\"Select from one of these options:\\n\"\n \"(1) Send a Thank You\\n\"\n \"(2) Create a Report\\n\"\n \"(3) quit\\n\"\n \"> \"))\n if answer == 3:\n break\n elif answer == 1:\n thank_you()\n elif answer == 2:\n print_report()\n else:\n print(\"Please type 1, 2, or 3\")\n\n\n\n\n\nif __name__ == \"__main__\":\n print('starting...')\n mainloop()\n\n\n\n","repo_name":"UWPCE-PythonCert/IntroPython-2017","sub_path":"students/Chris/session03/mailroom.py","file_name":"mailroom.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"60"} +{"seq_id":"2378405327","text":"from multiprocessing import context\nfrom urllib import response\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom .forms import Profile, TextCapForm,Mask\nfrom .face import compare_faces\nfrom .textcap import detect_text\nfrom .mask import detect_labels\nfrom .models import ProfileFace, ppe, textdetaction\n# Create your views here.\n\ndef home(request):\n return render(request,'home.html')\ndef faceCom(request):\n if request.method == 'POST':\n form = Profile(request.POST, request.FILES) \n if form.is_valid(): \n \n picture1=form.cleaned_data['picture1']\n picture2=form.cleaned_data['picture2']\n source_file = picture1\n # target_file='aws.jpg'\n target2_file = picture2\n\n face_matches = compare_faces(source_file, target2_file)\n print(\"Face matches: \" + str(face_matches))\n print(\"here is picture 1 input :\" , picture1)\n output=int(face_matches)\n obj=ProfileFace(picture1=picture1,picture2=picture2)\n obj.save()\n picture1=obj.picture1\n picture2=obj.picture2\n return render(request, 'facecompare/result.html',{'res':output,'pic1':picture1,'pic2':picture2})\n \n else: \n form = Profile() \n \n return render(request, 'facecompare/facecom.html', {'form': form})\n\ndef text_capture(request):\n if request.method==\"POST\":\n form=TextCapForm(request.POST, request.FILES)\n if form.is_valid():\n pic1=form.cleaned_data['pic1']\n bucket=''\n photo=pic1\n detect_text(photo,bucket)\n \n \n list1=detect_text.list1\n \n obj=textdetaction(picture1=pic1)\n obj.save()\n picture1=obj.picture1\n \n return render(request,'textcapture/textresult.html',{'list1':list1,'pic':picture1})\n \n else:\n form=TextCapForm()\n return render(request,'textcapture/textcap.html',{'form':form})\n\ndef maskdetect(request):\n if request.method==\"POST\":\n form=Mask(request.POST, request.FILES)\n if form.is_valid():\n pic1=form.cleaned_data['pic1']\n \n photo=pic1\n bucket=''\n person_count=detect_labels(photo)\n detect=ppe(picture1=photo)\n \n list1=detect_labels.informations\n detect.save()\n pic=detect.picture1\n return render(request,'maskdetect/result.html',{'list1':list1,'pic':pic})\n \n else:\n form=Mask()\n return render(request,'maskdetect/mask_home.html',{'form':form})","repo_name":"Sahilgupta9426/railways2s","sub_path":"pro/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"71136415552","text":"import unittest\nfrom random import random\nfrom functools import reduce\n\nfrom data_ingestion.dataset import BinaryDataset\nfrom data_ingestion.dataloader import BinaryDataLoader, MAX_DATASET_SIZE\n\n\nclass TestDataloader(unittest.TestCase):\n\n def setUp(self):\n self.dataset = BinaryDataset(kind='train')\n self.weights = [random() for _ in range(len(self.dataset))]\n self.loader = BinaryDataLoader(dataset=self.dataset,\n weights=self.weights)\n\n def test_split_dataset(self):\n # Check that the length of the original dataset is bigger than the\n # allowed by PyTorch 1.3.0\n self.assertTrue(len(self.dataset) > MAX_DATASET_SIZE)\n\n # The sum of the length of each dataset must be equal to the the length\n # of the original dataset\n cum_dataset_length = sum([len(d) for d in self.loader._datasets])\n self.assertEqual(len(self.dataset), cum_dataset_length)\n\n # Check that the length of each dataset is smaller than what PyTorch\n # allows\n for d in self.loader._datasets:\n self.assertTrue(len(d) <= MAX_DATASET_SIZE)\n\n # Check that indices of all datasets are different\n indices = [d.indices for d in self.loader._datasets]\n n = len(self.dataset) - 1\n self.assertEqual(sum([sum(i) for i in indices]), ((n * (n + 1)) / 2))\n\n # Check that no index is bigger than the original dataset length\n max_index = max([max(d.indices) for d in self.loader._datasets])\n self.assertTrue(max_index == (len(self.dataset) - 1))\n\n def test_split_weights(self):\n # The concatenation of the splitted weights must be equal to the\n # original weights\n concat_weights = reduce(lambda acc, v: acc + v,\n self.loader._weights,\n [])\n self.assertEqual(concat_weights, self.weights)\n","repo_name":"JHorcasitas/cnn_document_binarization","sub_path":"test/test_dataloader.py","file_name":"test_dataloader.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"60"} +{"seq_id":"30336234448","text":"# This is a lightweight ML agent trained by self-play.\n# After sharing this notebook,\n# we will add Hungry Geese environment in our HandyRL library.\n# https://github.com/DeNA/HandyRL\n# We hope you enjoy reinforcement learning!\n\n\nimport pickle\nimport bz2\nimport base64\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n \n\n# Input for Neural Network\n\ndef make_input(obses):\n b = np.zeros((17, 7 * 11), dtype=np.float32)\n obs = obses[-1]\n\n for p, pos_list in enumerate(obs['geese']):\n # head position\n for pos in pos_list[:1]:\n b[0 + (p - obs['index']) % 4, pos] = 1\n # tip position\n for pos in pos_list[-1:]:\n b[4 + (p - obs['index']) % 4, pos] = 1\n # whole position\n for pos in pos_list:\n b[8 + (p - obs['index']) % 4, pos] = 1\n \n # previous head position\n if len(obses) > 1:\n obs_prev = obses[-2]\n for p, pos_list in enumerate(obs_prev['geese']):\n for pos in pos_list[:1]:\n b[12 + (p - obs['index']) % 4, pos] = 1\n\n # food\n for pos in obs['food']:\n b[16, pos] = 1\n\n return b.reshape(-1, 7, 11)\n\n# Main Function of Agent\nfrom kaggle_environments.envs.hungry_geese.hungry_geese import Observation, Configuration, Action, GreedyAgent \na = GreedyAgent(Configuration({'rows': 7, 'columns': 11}))\ndef agent(obs, _):\n # print(\"obs\",obs)\n return a (Observation(obs))","repo_name":"zhoushiyang12/Hungry-goose-try","sub_path":"submission_rule.py","file_name":"submission_rule.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"12521712063","text":"import math\n\n\ndef SUM06(n):\n s = 0\n i = 1\n while i <= n:\n s += math.sqrt(2 + s)\n i += 1\n s = \"{:.5f}\".format(s)\n return s\n\n\ndef main():\n case = int(input())\n for i in range(0, case):\n n = int(input())\n print(SUM06(n))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"trihuynhnhut0107/Thor-LuyenCode","sub_path":"TuyetLoan/SUM06.py","file_name":"SUM06.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"3916940134","text":"#!/usr/bin/env python3\nimport sys\n\n# get args by `cat token` and encode it.\n# 'surrogateescape' for surrogate error\nvalue = sys.argv[1].encode(errors='surrogateescape')\n\n# parse the values get in token\nnew_value = ''.join([chr(v - i) for i, v in enumerate(value)])\n\n# print new values\nprint(new_value)\n","repo_name":"kev-ye/42_SnowCrash","sub_path":"level09/Ressources/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"19986357155","text":"import tkinter\nimport tkinter.messagebox\n\nclass MyGui:\n def __init__(self):\n self.main_window = tkinter.Tk()\n \n self.top_frame = tkinter.Frame(self.main_window)\n self.bottom_frame = tkinter.Frame(self.main_window)\n \n self.cb_var1 = tkinter.IntVar()\n self.cb_var2 = tkinter.IntVar()\n self.cb_var3 = tkinter.IntVar()\n self.cb_var4 = tkinter.IntVar()\n self.cb_var5 = tkinter.IntVar()\n self.cb_var6 = tkinter.IntVar()\n self.cb_var7 = tkinter.IntVar()\n \n self.cb_var1.set(0)\n self.cb_var2.set(0)\n self.cb_var3.set(0)\n self.cb_var4.set(0)\n self.cb_var5.set(0)\n self.cb_var6.set(0)\n self.cb_var7.set(0)\n \n self.cb1 = tkinter.Checkbutton(self.top_frame,\n text='Замена масла - $30.00',variable=self.cb_var1)\n self.cb2 = tkinter.Checkbutton(self.top_frame,\n text='Смазочные работы - $20.00', variable=self.cb_var2)\n self.cb3 = tkinter.Checkbutton(self.top_frame,\n text='Промывка радиатора - $40.00', variable=self.cb_var3)\n self.cb4 = tkinter.Checkbutton(self.top_frame,\n text='Замена жидкости в трансмиссии - $100.00', variable=self.cb_var4)\n self.cb5 = tkinter.Checkbutton(self.top_frame,\n text='Осмотр - $35.00', variable=self.cb_var5)\n self.cb6 = tkinter.Checkbutton(self.top_frame,\n text='Замена глушителя выхлопа - $200.00', variable=self.cb_var6)\n self.cb7 = tkinter.Checkbutton(self.top_frame,\n text='Перестановка шин - $20.00', variable=self.cb_var7)\n \n self.calc_button = tkinter.Button(self.bottom_frame,\n text = 'Показать стоимость', command=self.summary)\n self.quit_button = tkinter.Button(self.bottom_frame,\n text = 'Выйти', command=self.main_window.destroy)\n\n self.cb1.pack()\n self.cb2.pack()\n self.cb3.pack()\n self.cb4.pack()\n self.cb5.pack()\n self.cb6.pack()\n self.cb7.pack()\n \n self.calc_button.pack(side = \"left\")\n self.quit_button.pack()\n \n self.top_frame.pack()\n self.bottom_frame.pack()\n \n tkinter.mainloop()\n \n def summary(self):\n summ = 0\n self.message = 'Ваши затраты = $'\n\n if self.cb_var1.get() == 1:\n summ += 30\n if self.cb_var2.get() == 1:\n summ += 20\n if self.cb_var3.get() == 1:\n summ += 40\n if self.cb_var4.get() == 1:\n summ += 100\n if self.cb_var5.get() == 1:\n summ += 35\n if self.cb_var6.get() == 1:\n summ += 200\n if self.cb_var7.get() == 1:\n summ += 20\n self.message += str(summ) + '.00'\n \n tkinter.messagebox.showinfo('Общая стоимость', self.message)\n\n \na = MyGui()\n","repo_name":"AEsmur/my_python","sub_path":"3rd semester HWs/HW2/task4.py","file_name":"task4.py","file_ext":"py","file_size_in_byte":3141,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"36693075591","text":"from collections import defaultdict\n\ndef make_graph(tickets):\n\tgraph = defaultdict(list)\n\tfor s, t in tickets:\n\t\tgraph[s].append(t)\n\tfor name in graph:\n\t\tgraph[name].sort(reverse=True)\n\treturn graph\n\ndef solution(tickets):\n\tgraph, answer = make_graph(tickets), []\n\n\tdef dfs(cur):\n\t\twhile graph[cur]:\n\t\t\tdfs(graph[cur].pop())\n\t\telse:\n\t\t\tnonlocal answer\n\t\t\tanswer.append(cur)\n\tdfs('ICN')\n\treturn answer[::-1]\n\ndef solution2(tickets):\n routes = {}\n for t in tickets:\n routes[t[0]] = routes.get(t[0], []) + [t[1]]\n for r in routes:\n routes[r].sort(reverse=True)\n stack = ['ICN']\n path = []\n while stack:\n top = stack[-1]\n if top in routes and routes[top]:\n stack.append(routes[top].pop())\n else:\n path.append(stack.pop())\n return path[::-1]\n\nif __name__ == '__main__':\n\t# print(solution(\n\t# \t[[\"ICN\", \"JFK\"], [\"HND\", \"IAD\"], [\"JFK\", \"HND\"]]))\n\t# print(solution(\n\t# \t[[\"ICN\", \"SFO\"], [\"ICN\", \"ATL\"], [\"SFO\", \"ATL\"], [\"ATL\", \"ICN\"], [\"ATL\",\"SFO\"]]))\n\t# ['ICN', 'B', 'ICN', 'A', 'D', 'A']\n\tprint(solution(\n\t\t[['ICN','B'],['B','ICN'],['ICN','A'],['A','D'],['D','A']]))\n\tprint(solution2(\n\t\t[['ICN','B'],['B','ICN'],['ICN','A'],['A','D'],['D','A']]))\n\t","repo_name":"iml1111/algorithm-study","sub_path":"src/programmers/level3/여행경로.py","file_name":"여행경로.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"7343171710","text":"'''\r\nThe UI module\r\n'''\r\nimport Logic\r\n\r\nstoryes = [1,2,3,4,5,6,7]\r\nliftA = 1\r\nliftB = 7\r\n\r\nmatrix = [['Storyes', 'A','B'], # the default matrix\r\n ['7', '-','*'],\r\n ['6', '-','-'],\r\n ['5', '-','-'],\r\n ['4', '-','-'],\r\n ['3', '-','-'],\r\n ['2', '-','-'],\r\n ['1', '*','-']]\r\n\r\ndef showMenu():\r\n \"\"\"Prints the menu\r\n Input: no input\r\n Output: the menu\"\"\"\r\n\r\n print (\"PLEASE CHOOSE AN OPTION: \",'\\n')\r\n print (\"1.Show the elevator states \")\r\n print (\"2.Choose your floor and call the elevator \")\r\n print (\"0.Exit\")\r\n \r\ndef showElevatorStates(liftA,liftB):\r\n \"\"\"Prints the states of the elevators\r\n Input: no input\r\n Output: the states\"\"\"\r\n\r\n print (\"Elevator A is at floor \",liftA)\r\n print (\"Elevator B is at floor \",liftB,'\\n')\r\n\r\n\r\ndef secondOption(liftA,liftB):\r\n \"\"\"This is the second option that you can choose. You can choose a floor, the correct elevator comes to that floor,\r\n then you can choose the destination, and the elevator goes there;\r\n Input: the elevators\r\n Output: the states and for each floor, displays which elevator is going up/down\"\"\"\r\n\r\n Logic.remakeMatrix(0,0,liftA,liftB,matrix) #resets the matrix\r\n Logic.printTable(matrix)\r\n showElevatorStates(liftA,liftB)\r\n \r\n stop = False\r\n while stop == False: #a loop so you can choose multiple times before closing the program\r\n print(\"Call the elevator from floor: \")\r\n floor = int(input())\r\n if floor > 0 and floor < 8:\r\n if Logic.bestLift(liftA,liftB,floor) == liftB:\r\n print(\"The B elevator will come, from floor: \", liftB)\r\n prevB = liftB\r\n liftB = floor\r\n Logic.remakeMatrix(0,prevB,liftA,liftB,matrix) #prevA=0 means that the matrix is changing only for elevator B\r\n Logic.printTable(matrix)\r\n stop = True\r\n else:\r\n print(\"The A elevator will come, from floor: \", liftA)\r\n prevA = liftA\r\n liftA = floor\r\n Logic.remakeMatrix(prevA,0,liftA,liftB,matrix)\r\n Logic.printTable(matrix)\r\n stop = True\r\n showElevatorStates(liftA,liftB)\r\n else:\r\n print(\"Invalid floor\")\r\n \r\n stop = False\r\n while stop == False: #same here\r\n print(\"Choose the destination: \")\r\n if floor > 0 and floor < 8: #without this line, if the floor is incorrect, previousfloor will become an incorrect floor\r\n previousfloor = floor\r\n floor = int(input())\r\n if floor > 0 and floor < 8:\r\n if liftA == previousfloor:\r\n liftA = floor\r\n Logic.remakeMatrix(previousfloor,0,liftA,liftB,matrix)\r\n Logic.printTable(matrix)\r\n stop = True\r\n showElevatorStates(liftA,liftB)\r\n if liftB == previousfloor:\r\n liftB = floor\r\n Logic.remakeMatrix(0,previousfloor,liftA,liftB,matrix)\r\n Logic.printTable(matrix)\r\n stop = True\r\n showElevatorStates(liftA,liftB)\r\n else:\r\n print(\"Invalid floor\")\r\n\r\n\r\ndef start(liftA,liftB):\r\n \"\"\"This is the start function, and the controller for the menu\r\n input: the lifts\r\n output: \"\"\"\r\n\r\n stop = False\r\n while stop == False: # and here\r\n showMenu()\r\n option=int(input())\r\n if option == 1:\r\n showElevatorStates(liftA,liftB)\r\n elif option == 2:\r\n secondOption(liftA,liftB)\r\n elif option == 0:\r\n stop = True\r\n else:\r\n print(\"Invalid option\")\r\n\r\n\r\nstart(liftA,liftB)","repo_name":"TitusPop/FunStuff","sub_path":"Elevator/UI.py","file_name":"UI.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"22953879142","text":"import subprocess\n\ndef run_dalfox(target_url, options, output_file):\n command = f'dalfox scan {target_url} {options}' # Lệnh DalFox cần chạy\n\n try:\n result = subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT)\n result = result.decode('utf-8') # Chuyển đổi kết quả thành chuỗi văn bản\n \n if output_file:\n with open(output_file, 'w') as file:\n file.write(result) # Ghi kết quả vào tệp văn bản\n print(f'Kết quả đã được xuất ra file {output_file}')\n else:\n print(result) # In kết quả của DalFox\n except subprocess.CalledProcessError as e:\n print(f'Lỗi khi chạy DalFox: {e.output.decode(\"utf-8\")}')\n\n# Lấy thông tin từ người dùng\ntarget_url = input('Nhập URL cần kiểm tra: ')\noptions = input('Nhập các tùy chọn của DalFox (nếu có): ')\noutput_file = input('Nhập tên tệp văn bản để xuất kết quả (hoặc nhấn Enter để không xuất file): ')\n\nrun_dalfox(target_url, options, output_file)\n\n","repo_name":"akaisaiza/ToolBurp","sub_path":"dalfox.py","file_name":"dalfox.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"17844756854","text":"import tensorflow as tf\nfrom tensorflow import keras\nimport numpy as np\ntry:\n from utils import filters\nexcept ModuleNotFoundError:\n import filters\n\ndef focalSoftMSE(y_true, y_pred, alpha=0.1, beta=10.0):\n\n squareError = tf.math.squared_difference(y_true, y_pred)\n softplus = tf.math.log(1+alpha*tf.math.exp(squareError * beta + 3))\n loss = squareError * softplus\n\n return tf.reduce_mean(loss)\n\ndef focalImageLoss(y_true, y_pred, threshold):\n return\n\ndef focalMSE(y_true, y_pred, alpha=0.1, gamma=2.0):\n \"\"\"\n Description: focal MSE loss\n \"\"\"\n mse = keras.losses.mse(y_true, y_pred)\n loss =alpha* tf.pow(1-y_true, gamma) * mse[:,:,:,:,tf.newaxis]\n\n return tf.reduce_sum(loss)\n\ndef meanGradientError(y_true, y_pred):\n \"\"\"\n Description: mean gradient error\n \"\"\"\n mge = keras.losses.mse(filters.sobelFilter3D(y_true), filters.sobelFilter3D(y_pred))\n\n return tf.reduce_mean(mge)\n\n\ndef mixedGradeintError(y_true, y_pred, alpha=0.001):\n \"\"\"\n Description: Mixed gradient error\n \"\"\"\n mge = keras.losses.mse(filters.sobelFilter3D(y_true), filters.laplacianFilter3D(y_pred))\n mse = keras.losses.mse(y_true, y_pred)\n\n return tf.reduce_mean(alpha * mge + (1-alpha)*mse)\n\nclass mixedMSE():\n def __init__(self, filter, mode=\"add\", alpha=0.001, **kwargs):\n self.filter = filter\n self.alpha = alpha\n self.kwargs = kwargs\n self.mode = mode\n\n def __call__(self, y_true, y_pred):\n tobemix = keras.losses.mse(self.filter(y_true, **self.kwargs), self.filter(y_pred, **self.kwargs))\n mse = keras.losses.mse(y_true, y_pred)\n if self.mode==\"add\":\n loss = tf.reduce_mean(self.alpha * tobemix + mse)\n elif self.mode==\"blend\":\n loss = tf.reduce_mean(self.alpha * tobemix + (1-self.alpha)*mse)\n \n \n return loss\n\ndef cDice(y_true, y_pred):\n by = tf.cast(tf.where(y_true>0, 1, 0), tf.float32)\n intersect = tf.reduce_sum(by*y_pred)\n if intersect >0:\n c = tf.reduce_sum(by*y_pred)/tf.reduce_sum(tf.cast(tf.where(y_pred>0,1,0), tf.float32)*by)\n else:\n c = 1\n cdice = 2*intersect / (c*tf.reduce_sum(y_pred)+tf.reduce_sum(by))\n return cdice\n\nclass mixedDiceMSE():\n def __init__(self, filter, mode=\"add\", alpha=0.001, **kwargs):\n self.filter = filter\n self.alpha = alpha\n self.kwargs = kwargs\n self.mode = mode\n \n def cDice(self, y_true, y_pred):\n # by = tf.cast(tf.where(y_true>0, 1, 0), tf.float32)\n by = y_true\n intersect = tf.reduce_sum(by*y_pred)\n # if intersect >0:\n # c = tf.reduce_sum(by*y_pred)/tf.reduce_sum(tf.cast(tf.where(y_pred>0,1,0), tf.float32)*by)\n # else:\n # c = 1\n cdice = 2*intersect / (tf.reduce_sum(y_pred**2)+tf.reduce_sum(by**2))\n return cdice\n \n def __call__(self, y_true, y_pred):\n tobemix = 1 - self.cDice(y_true, y_pred)\n mse = keras.losses.mse(y_true, y_pred)\n if self.mode==\"add\":\n loss = tf.reduce_mean(self.alpha * tobemix + mse)\n elif self.mode==\"blend\":\n loss = tf.reduce_mean(self.alpha * tobemix + (1-self.alpha)*mse)\n return loss\n\nclass focalSoftDice():\n def __init__(self, filter, mode=\"add\", alpha=0.001, **kwargs):\n self.filter = filter\n self.alpha = alpha\n self.kwargs = kwargs\n self.mode = mode\n\n def __call__(self, y_true, y_pred):\n y_true_filtered = self.filter(y_true, **self.kwargs)\n dice = 2 * tf.reduce_sum(y_true_filtered*y_pred) / tf.reduce_sum(y_true_filtered+y_pred)\n\n tobemix = keras.losses.mse(self.filter(y_true, **self.kwargs), self.filter(y_pred, **self.kwargs))\n mse = keras.losses.mse(y_true, y_pred)\n if self.mode==\"add\":\n loss = tf.reduce_mean(self.alpha * tobemix + mse)\n elif self.mode==\"blend\":\n loss = tf.reduce_mean(self.alpha * tobemix + (1-self.alpha)*mse)\n \n \n return loss\n \nif __name__==\"__main__\":\n \n import SimpleITK as sitk\n\n #image = sitk.GetArrayFromImage(sitk.ReadImage(r\"/uCTGan/data/unitTest/test_t1_brain.nii.gz\"))\n image = sitk.GetArrayFromImage(sitk.ReadImage(r\"C:\\Users\\wangs\\Documents\\35_um_data_100x100x48 niis\\Data\\236LT_w1.nii.gz\"))\n #lossfn = tf.keras.losses.MeanSquaredError(reduction=\"auto\", name=\"mean_squared_error\")\n lossfn = focalSoftMSE\n tf.random.set_seed(42)\n\n tfimage = tf.convert_to_tensor(image, dtype=tf.float32) / tf.reduce_max(image)\n tfimage = tfimage[tf.newaxis,:,:,:,tf.newaxis]\n #tfimage = tf.reshape(tfimage, shape=(1,*tfimage.shape,1))\n tfzero = tf.zeros_like(tfimage)\n\n input = keras.layers.Input(shape =tfimage.shape[1:])\n convlayer = keras.layers.Conv3D(filters=1, kernel_size=3, strides=(1,)*3, padding=\"SAME\", use_bias=False, kernel_initializer=\"he_normal\")\n output = convlayer(input)\n minimodel = tf.keras.models.Model(input, output)\n optimizer = tf.keras.optimizers.RMSprop(learning_rate=0.001)\n\n train_loss_results = []\n train_accuracy_results = []\n\n for epoch in range(1000):\n epoch_loss_avg = tf.keras.metrics.Mean()\n epoch_accuracy = tf.keras.metrics.MeanAbsolutePercentageError()\n\n with tf.GradientTape() as tape:\n trueimg = filters.gaussianFilter3D(tfimage, 0.5, 3)\n predimg = minimodel(tfimage)\n loss = lossfn(trueimg, predimg)\n \n grads = tape.gradient(loss, minimodel.trainable_weights)\n optimizer.apply_gradients(zip(grads, minimodel.trainable_variables))\n\n epoch_loss_avg.update_state(loss)\n epoch_accuracy.update_state(trueimg, minimodel(tfimage))\n\n train_loss_results.append(epoch_loss_avg.result())\n train_accuracy_results.append(epoch_accuracy.result())\n\n if epoch % 200 == 0:\n #print(loss)\n print(np.squeeze(minimodel.layers[-1].weights[0].numpy()))\n print(\"Epoch {:03d}: Loss: {:.3f}, Accuracy: {:.3%}\".format(epoch,\n epoch_loss_avg.result(),\n epoch_accuracy.result()))\n \n print(tf.reduce_mean(\n keras.losses.mse(\n filters.gaussianFilter3D(tfimage, 0.5,3), minimodel(tfimage)\n )\n ))","repo_name":"wangshubo90/GAN_3D_Autoencoder","sub_path":"utils/losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":6291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"1167386230","text":"\"\"\"Quick Sort is a Divide and Conquer algorithm. It picks an element as \npivot and partitions the given array around the picked pivot.\nGiven an array arr[], its starting position low and its ending position high.\n\nImplement the partition() and quickSort() functions to sort the array.\n\n\"\"\"\n\n\nclass Solution:\n #Function to sort a list using quick sort algorithm.\n def quickSort(self,arr,l,h):\n # code here\n while lpivot):\n j-=1\n if i>=j:\n return j\n \n arr[i], arr[j] = arr[j], arr[i]\n \n \n\n#{ \n# Driver Code Starts\n#Initial Template for Python 3\n\nif __name__ == \"__main__\":\n t=int(input())\n for i in range(t):\n n=int(input())\n arr=list(map(int,input().split()))\n Solution().quickSort(arr,0,n-1)\n for i in range(n):\n print(arr[i],end=\" \")\n print()\n\n# } Driver Code Ends","repo_name":"deepzsenu/python-dsa","sub_path":"Amazon_Practice/03.sorting/02.Quick Sort.py","file_name":"02.Quick Sort.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"6130481884","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom appdata.DbAccess import * \nfrom appdata.model import *\nfrom appdata.option import *\nimport pg8000\nimport datetime\n\nclass vehicle():\n __vehicleList = []\n def __init__(self, pModel, pInsertionDate, pDbId = None):\n self.__dbId = pDbId\n self.__model = pModel\n self.__optionList = option.FindByVehicle(pDbId)\n self.__insertionDate = pInsertionDate\n \n #get\n def DbId(self):\n return self.__dbId\n \n #get/set\n def Model(self, pModel = None):\n if(pModel == None):\n return self.__model\n else:\n self.__model = pModel\n\n #get\n def OptionList(self):\n return self.__optionList\n \n #get\n def InsertionDate(self):\n return self.__insertionDate\n\n #chargement de tout les véhicules\n @classmethod\n def FindAll(cls):\n provVehicleList = []\n cursor = DbAccess.Querry(\"SELECT * FROM vehicule;\")\n results = None\n if(cursor != None):\n results = cursor.fetchall()\n for row in results:\n id, modelId, insertionDate = row\n aModel = model.FindById(modelId)\n aVehicle = vehicle(aModel, insertionDate, id)\n vehicleList.append(aVehicle)\n cls.__vehicleList = provVehicleList\n return cls.__vehicleList\n\n #chargement de l'orrurence correspondant à un id passé en paramètre\n @classmethod\n def FindById(cls, pId):\n vehicleToReturn = None\n for aVehicle in cls.__vehicleList:\n if (aVehicle.DbId()) == pId:\n vehicleToReturn = aVehicle\n if (vehicleToReturn == None):\n cursor = DbAccess.Querry(\"SELECT * FROM vehicule WHERE vehicule_id = \" + str(pId) + \";\")\n results = None\n if(cursor != None):\n results = cursor.fetchall()\n for row in results:\n id, modelId, insertionDate = row\n aModel = model.FindById(modelId)\n aVehicle = vehicle(aModel, insertionDate, id)\n cls.__vehicleList.append(aVehicle)\n vehicleToReturn = aVehicle\n return vehicleToReturn","repo_name":"paccalin/projets3","sub_path":"raspberry/appdata/vehicle.py","file_name":"vehicle.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"15391890959","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom selenium import webdriver\nfrom time import sleep\nfrom selenium.common.exceptions import ElementClickInterceptedException\n\n\n# In[2]:\n\n\ndriver = webdriver.Chrome()\nurl = 'https://jeux.loro.ch/horses/races'\ndriver.get(url)\n\n### sélection la course terminé\ndriver.find_element_by_xpath(\"\"\"\n//*[@id=\"root\"]/main/section/article/div/div/nav[2]/ul/li[2]\"\"\").click()\nnbCourse = driver.find_element_by_xpath(\"\"\"\n//*[@id=\"root\"]/main/section/article/div/div/nav[2]/ul/li[2]/p[2]\"\"\")\nprint(\"le nombre de courses terminées: {0}\".format(nbCourse.text[:2]))\nnombredecourse = int(nbCourse.text[:2])\n\n### sélectionner la section des courses terminées\ndriver.find_element_by_xpath(\"\"\"\n//*[@id=\"root\"]/main/section/article/div/div/section/ul/li[2]/a/article/section/div[1]\"\"\").click()\n\n### le compteur pour la boucle while\ncounter = 1 \n### le temps d'attente entre les clicks \ntempsAttente = 1\n## fiche de cheval\nlisteInfos = [1,2,3,4,8,9,10]\n### maintenant on va écrire les données dans un fichier csv \n\n### nom du fichier csv\nname = driver.find_element_by_xpath(\"\"\"//*[@id=\"root\"]/main/div/article[1]/section[2]/article/h3/span[1]\"\"\")\ndate = driver.find_element_by_xpath(\"\"\"//*[@id=\"root\"]/main/div/article[1]/div/p/span\"\"\")\nnameCsv = date.text.replace(' ','_') + name.text.replace(' ','_') \nprint(\"Nom du ficher csv : \" + nameCsv)\n\n### écrire les données dans la un fichier csv\ndataFile = 'data_{0}.csv'.format(nameCsv)\nwith open(dataFile, mode='w') as horseRacing:\n horseRacing.write('NumeroCheval,NomCheval,entraineur,proprietaire,sex,age,race,poid,corde,distanceCourse,cote,nomdeMaman,nomdePapa,courseCourue,victoire,place,gainCarriere,positionArrive\\n')\n while counter < nombredecourse : \n checkCateg = driver.find_element_by_xpath(\"\"\"\n //*[@id=\"root\"]/main/div/article[3]/section/div/div[1]/div[2]/p\"\"\")\n if checkCateg.text == 'PLAT':\n print(\"plat\")\n ## cliquer sur arrivé\n driver.find_element_by_xpath(\"\"\"//*[@id=\"root\"]/main/section/div/div/section/nav/ul/li[2]\"\"\").click()\n\n listePosition = {}\n ### collecter la liste des positions terminées de la course\n position = driver.find_elements_by_class_name(\"arrivals-tab-content__position-number\")\n\n numberX = driver.find_elements_by_class_name(\"arrivals-tab-content__separator\")\n for p,n in zip(position,numberX):\n # print(p.text)\n # on ignore les chevaux non-partants\n if p.text != 'NP':\n if p.text == \"N/C\":\n valeur = '0'\n else:\n valeur = p.text\n listePosition[int(n.text)] = valeur\n # print(listePosition)\n\n ## Trier les clés dans l'ordre\n listePoX ={}\n for k in sorted(listePosition.keys()):\n # print(\"%s: %s\" % (k, listePosition[k]))\n listePoX[k] = listePosition[k]\n print(\"---\"* 30 )\n print(\"La position arrivée et le nombre de cheval correspondant: \")\n print(listePoX)\n\n ## La liste des chevaux partants\n listePartante = []\n [listePartante.append(k) for k in listePoX.keys()]\n print(\"---\"* 30 )\n print(\"la liste des chevaux partants: \")\n print(listePartante)\n print(\"---\"* 30 )\n ### on va maintenant scraper les données des chevaux\n driver.find_element_by_xpath(\"\"\"//*[@id=\"root\"]/main/section/div/div/section/nav/ul/li[3]/p\"\"\").click()\n ### supprimer des lettres spéciales\n def delSpeLetter(text):\n \"\"\"\n Remplacer les caractères spéciaux dans les lignes de textes\n \"\"\"\n for x in text:\n if x not in 'abcdefghijklmnopqrstuvwyzABCDEFGHIJKLMNOPQRSTUVWYZ ':\n text = text.replace(x,'')\n return text\n\n ### on va regarder la liste des cheveaux qui vont faire de la course\n\n #sleep(tempsAttente)\n\n ###chercher les informations des chevaux de la course\n links = driver.find_elements_by_class_name('collapsible__section')\n counterLinks = 0\n ### main programe\n for link in links:\n if counterLinks < len(listePartante):\n ### le numéro de chaque cheval\n print('--'*20)\n print(listePartante[counterLinks])\n number = listePartante[counterLinks]\n print('--'*20)\n\n ### nom des chevaux\n horseName =driver.find_element_by_xpath(\"\"\"\n //*[@id=\"root\"]/main/section/div/div/section/section/div[1]\n /div/section[{0}]/header/div/div[1]/div/div/div/h2/span[2]\n \"\"\".format(listePartante[counterLinks]))\n print('Nom du cheval: ' + horseName.text)\n horse_name = delSpeLetter(horseName.text)\n\n ### la côte de chaque cheval\n odd = driver.find_element_by_xpath(\"\"\"\n //*[@id=\"root\"]/main/section/div/div/section/section/div[1]\n /div/section[{0}]/header/div/div[2]/span\n \"\"\".format(listePartante[counterLinks]))\n print('la côte: ' + odd.text)\n cote = odd.text\n\n ### cliquer sur les informations de chaque cheval\n link =driver.find_element_by_xpath(\"\"\"\n //*[@id=\"root\"]/main/section/div/div/section/section/div[1]/div/section[{0}]\n \"\"\".format(listePartante[counterLinks]))\n link.click()\n sleep(tempsAttente)\n\n ### fiche du cheval\n for x in range(1,11):\n linke = driver.find_element_by_xpath(\n \"\"\"\n //*[@id=\"root\"]/main/section/div/div/section/section/div[1]\n /div/section[{0}]/article/section/div/div/div[2]/div/div[2]\n /div/table/tbody/tr[{1}]/td\n \"\"\".format(listePartante[counterLinks],x))\n\n if linke != None:\n if x == 1 : \n print('Entraîneur: ' + linke.text)\n entraineur = delSpeLetter(linke.text)\n elif x == 2 :\n print('Propriétaire: ' + linke.text)\n proprietaire = delSpeLetter(linke.text)\n elif x == 3 :\n print('Sex: ' + linke.text)\n sex = linke.text\n if sex == 'MÂLE':\n sex = 'MALE'\n elif x == 4 :\n print('Âge: ' + linke.text)\n age = linke.text\n elif x ==8 : \n print('Race: ' + linke.text)\n race = delSpeLetter(linke.text)\n elif x == 9 :\n print('Poids(kg): ' + linke.text.replace('Kg',''))\n poid = linke.text.replace('Kg','')\n elif x == 10:\n print('Corde: ' + linke.text)\n corde = linke.text\n\n else:\n print('hallo quoi !')\n #sleep(tempsAttente)\n\n ### changer de la fiche \n sleep(tempsAttente)\n changeFiche = driver.find_element_by_xpath(\"\"\"\n //*[@id=\"root\"]/main/section/div/div/section/section/div[1]\n /div/section[{0}]/article/section/div/div/div[3]\n \"\"\".format(listePartante[counterLinks]))\n changeFiche.click()\n sleep(tempsAttente)\n\n ### Ascendance\n for z in range (1,3):\n linkAsc = driver.find_element_by_xpath(\n \"\"\"\n //*[@id=\"root\"]/main/section/div/div/section/section/div[1]\n /div/section[{0}]/article/section/div/div/div[2]/div/div[3]/\n div/table[1]/tbody/tr[{1}]/td\n \"\"\".format(listePartante[counterLinks],z))\n if z == 1 : \n print(\"Nom de la mère: \" + linkAsc.text)\n nomdeMaman = delSpeLetter(linkAsc.text)\n else:\n print(\"Nom du père : \"+ linkAsc.text )\n nomdePapa = delSpeLetter(linkAsc.text)\n\n ### performance\n for y in range(1,5):\n linkPerf = driver.find_element_by_xpath(\"\"\"\n //*[@id=\"root\"]/main/section/div/div/section/section/div[1]\n /div/section[{0}]/article/section/div/div/div[2]/div/div[3]\n /div/table[2]/tbody/tr[{1}]/td\n \"\"\".format(listePartante[counterLinks],y))\n if y == 1:\n print('Courses courues: ' + linkPerf.text)\n courseCourue = linkPerf.text\n elif y ==2: \n print('victoires: ' + linkPerf.text)\n victoire = linkPerf.text\n elif y == 3 :\n print( 'Places: ' + linkPerf.text)\n place = linkPerf.text\n elif y == 4: \n print('Gain en carrière(frs): ' + linkPerf.text.replace(\"'\",\"\"))\n gainCarriere = linkPerf.text.replace(\"'\",\"\")\n\n #sleep(tempsAttente)\n\n ### Distance de la course \n distance = driver.find_element_by_xpath(\"\"\"\n //*[@id=\"root\"]/main/div/article[3]/section/div/div[3]/div[1]/p\n \"\"\")\n print('Distance(m): ' + distance.text.replace('m',''))\n distanceCourse = distance.text.replace('m','')\n\n ### La position arrivée des chevaux\n for key in listePoX.keys():\n if listePartante[counterLinks] == key:\n positionArrive = listePoX[key]\n print(\"La position arrivée : \" + positionArrive)\n\n\n counterLinks += 1\n sleep(tempsAttente)\n\n else:\n break\n\n sleep(tempsAttente)\n horseRacing.write(str(number) + ','+ \n horse_name + ','+ \n entraineur + ','+\n proprietaire + ','+ \n sex + ','+\n age + ','+\n race + ','+\n poid + ','+\n corde + ','+ \n distanceCourse + ','+\n cote + ','+ \n nomdeMaman + ','+ \n nomdePapa + ','+ \n courseCourue + ','+\n victoire + ','+\n place + ','+\n gainCarriere + ','+ \n positionArrive + '\\n')\n sleep(1)\n target = driver.find_element_by_xpath(\"\"\"\n //*[@id=\"root\"]/main/div/section/nav/div/ul/li[2]\"\"\")\n target.location_once_scrolled_into_view\n\n\n else:\n print(\"Trot attelé\")\n ### changer de course\n clickCourse = driver.find_element_by_xpath(\"\"\"\n //*[@id=\"root\"]/main/div/article[1]/section[2]/div[1]\"\"\")\n clickCourse.click()\n counter += 1","repo_name":"douchuis/forfunclock","sub_path":"data/scraperdata.py","file_name":"scraperdata.py","file_ext":"py","file_size_in_byte":12386,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"25284105094","text":"from django.forms import ModelForm\nfrom django.utils.translation import ugettext as _\n\nfrom .models import News\n\n\nclass NewsForm(ModelForm):\n class Meta():\n model = News\n exclude = ('author',)\n fields = [\n \"title\",\n \"header_image\",\n \"content\",\n ]\n labels = {\n 'title': _('Title'),\n 'header_image': _('Header image'),\n 'content': _('Content'),\n }\n","repo_name":"piotrantosz/Cyfrowa-szkola","sub_path":"portfolios/news/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"60"} +{"seq_id":"36780500599","text":"__docformat__ = \"reStructuredText\"\nimport zope.i18nmessageid\nimport zope.interface\nimport zope.schema\nfrom zope.interface.common import mapping\nfrom zope.location.interfaces import ILocation\n\n\nMessageFactory = _ = zope.i18nmessageid.MessageFactory('z3c.form')\n\nINPUT_MODE = 'input'\nDISPLAY_MODE = 'display'\nHIDDEN_MODE = 'hidden'\n\n\nclass NOT_CHANGED:\n def __repr__(self):\n return ''\n\n\nNOT_CHANGED = NOT_CHANGED()\n\n\nclass NO_VALUE:\n def __repr__(self):\n return ''\n\n\nNO_VALUE = NO_VALUE()\n# BBB: the object was renamed to follow common naming style\nNOVALUE = NO_VALUE\n\n# ----[ Layer Declaration ]--------------------------------------------------\n\n\nclass IFormLayer(zope.interface.Interface):\n \"\"\"A layer that contains all registrations of this package.\n\n It is intended that someone can just use this layer as a base layer when\n using this package.\n\n Since version 2.4.2, this layer doesn't provide IBrowserRequst anymore.\n This makes it possible to use the IFormLayer within z3c.jsonrpc without\n to apply the IBrowserRequest into the jsonrpc request.\n \"\"\"\n\n\n# ----[ Generic Manager Interfaces ]-----------------------------------------\n\nclass IManager(mapping.IEnumerableMapping):\n \"\"\"A manager of some kind of items.\n\n *Important*: While managers are mappings, the order of the items is\n assumed to be important! Effectively a manager is an ordered mapping.\n\n In general, managers do not have to support a manipulation\n API. Oftentimes, managers are populated during initialization or while\n updating.\n \"\"\"\n\n\nclass ISelectionManager(IManager):\n \"\"\"Managers that support item selection and management.\n\n This manager allows one to more carefully specify the contained items.\n\n *Important*: The API is chosen in a way, that the manager is still\n immutable. All methods in this interface must return *new* instances of\n the manager.\n \"\"\"\n\n def __add__(other):\n \"\"\"Used for merge two managers.\"\"\"\n\n def select(*names):\n \"\"\"Return a modified instance with an ordered subset of items.\"\"\"\n\n def omit(*names):\n \"\"\"Return a modified instance omitting given items.\"\"\"\n\n def copy():\n \"\"\"Copy all items to a new instance and return it.\"\"\"\n\n\n# ----[ Validators ]---------------------------------------------------------\n\nclass IData(zope.interface.Interface):\n \"\"\"A proxy object for form data.\n\n The object will make all keys within its data attribute available as\n attributes. The schema that is represented by the data will be directly\n provided by instances.\n \"\"\"\n\n def __init__(schema, data, context):\n \"\"\"The data proxy is instantiated using the schema it represents, the\n data fulfilling the schema and the context in which the data are\n validated.\n \"\"\"\n\n __context__ = zope.schema.Field(\n title=_('Context'),\n description=_('The context in which the data are validated.'),\n required=True)\n\n\nclass IValidator(zope.interface.Interface):\n \"\"\"A validator for a particular value.\"\"\"\n\n def validate(value, force=False):\n \"\"\"Validate the value.\n\n If successful, return ``None``. Otherwise raise an ``Invalid`` error.\n \"\"\"\n\n\nclass IManagerValidator(zope.interface.Interface):\n \"\"\"A validator that validates a set of data.\"\"\"\n\n def validate(data):\n \"\"\"Validate a dictionary of data.\n\n This method is only responsible of validating relationships between\n the values in the data. It can be assumed that all values have been\n validated in isolation before.\n\n The return value of this method is a tuple of errors that occurred\n during the validation process.\n \"\"\"\n\n def validateObject(obj):\n \"\"\"Validate an object.\n\n The same semantics as in ``validate()`` apply, except that the values\n are retrieved from the object and not the data dictionary.\n \"\"\"\n\n\n# ----[ Errors ]--------------------------------------------------------------\n\nclass IErrorViewSnippet(zope.interface.Interface):\n \"\"\"A view providing a view for an error\"\"\"\n\n widget = zope.schema.Field(\n title=_(\"Widget\"),\n description=_(\"The widget that the view is on\"),\n required=True)\n\n error = zope.schema.Field(\n title=_('Error'),\n description=_('Error the view is for.'),\n required=True)\n\n def update():\n \"\"\"Update view.\"\"\"\n\n def render():\n \"\"\"Render view.\"\"\"\n\n\nclass IMultipleErrors(zope.interface.Interface):\n \"\"\"An error that contains many errors\"\"\"\n\n errors = zope.interface.Attribute(\"List of errors\")\n\n# ----[ Fields ]--------------------------------------------------------------\n\n\nclass IField(zope.interface.Interface):\n \"\"\"Field wrapping a schema field used in the form.\"\"\"\n\n __name__ = zope.schema.TextLine(\n title=_('Title'),\n description=_('The name of the field within the form.'),\n required=True)\n\n field = zope.schema.Field(\n title=_('Schema Field'),\n description=_('The schema field that is to be rendered.'),\n required=True)\n\n prefix = zope.schema.Field(\n title=_('Prefix'),\n description=_('The prefix of the field used to avoid name clashes.'),\n required=True)\n\n mode = zope.schema.Field(\n title=_('Mode'),\n description=_('The mode in which to render the widget for the field.'),\n required=True)\n\n interface = zope.schema.Field(\n title=_('Interface'),\n description=_('The interface from which the field is coming.'),\n required=True)\n\n ignoreContext = zope.schema.Bool(\n title=_('Ignore Context'),\n description=_('A flag, when set, forces the widget not to look at '\n 'the context for a value.'),\n required=False)\n\n widgetFactory = zope.schema.Field(\n title=_('Widget Factory'),\n description=_('The widget factory.'),\n required=False,\n default=None,\n missing_value=None)\n\n showDefault = zope.schema.Bool(\n title=_('Show default value'),\n description=_('A flag, when set, makes the widget to display'\n 'field|adapter provided default values.'),\n default=True,\n required=False)\n\n\nclass IFields(ISelectionManager):\n \"\"\"IField manager.\"\"\"\n\n def select(prefix=None, interface=None, *names):\n \"\"\"Return a modified instance with an ordered subset of items.\n\n This extension to the ``ISelectionManager`` allows for handling cases\n with name-conflicts better by separating field selection and prefix\n specification.\n \"\"\"\n\n def omit(prefix=None, interface=None, *names):\n \"\"\"Return a modified instance omitting given items.\n\n This extension to the ``ISelectionManager`` allows for handling cases\n with name-conflicts better by separating field selection and prefix\n specification.\n \"\"\"\n\n\nclass IContentProviders(IManager):\n \"\"\"\n A content provider manager\n \"\"\"\n\n# ----[ Data Managers ]------------------------------------------------------\n\n\nclass IDataManager(zope.interface.Interface):\n \"\"\"Data manager.\"\"\"\n\n def get():\n \"\"\"Get the value.\n\n If no value can be found, raise an error\n \"\"\"\n\n def query(default=NO_VALUE):\n \"\"\"Get the value.\n\n If no value can be found, return the default value.\n If access is forbidden, raise an error.\n \"\"\"\n\n def set(value):\n \"\"\"Set the value\"\"\"\n\n def canAccess():\n \"\"\"Can the value be accessed.\"\"\"\n\n def canWrite():\n \"\"\"Can the data manager write a value.\"\"\"\n\n\n# ----[ Data Converters ]----------------------------------------------------\n\nclass IDataConverter(zope.interface.Interface):\n \"\"\"A data converter from field to widget values and vice versa.\"\"\"\n\n def toWidgetValue(value):\n \"\"\"Convert the field value to a widget output value.\n\n If conversion fails or is not possible, a ``ValueError`` *must* be\n raised. However, this method should effectively never fail, because\n incoming value is well-defined.\n \"\"\"\n\n def toFieldValue(value):\n \"\"\"Convert an input value to a field/system internal value.\n\n This methods *must* also validate the converted value against the\n field.\n\n If the conversion fails, a ``ValueError`` *must* be raised. If\n the validation fails, a ``ValidationError`` *must* be raised.\n \"\"\"\n\n\n# value interfaces\nclass IValue(zope.interface.Interface):\n \"\"\"A value.\"\"\"\n\n def get():\n \"\"\"Returns the value.\"\"\"\n\n\n# term interfaces\nclass ITerms(zope.interface.Interface):\n \"\"\" \"\"\"\n\n context = zope.schema.Field()\n request = zope.schema.Field()\n form = zope.schema.Field()\n field = zope.schema.Field()\n widget = zope.schema.Field()\n\n def getTerm(value):\n \"\"\"Return an ITitledTokenizedTerm object for the given value\n\n LookupError is raised if the value isn't in the source\n \"\"\"\n\n def getTermByToken(token):\n \"\"\"Return an ITokenizedTerm for the passed-in token.\n\n If `token` is not represented in the vocabulary, `LookupError`\n is raised.\n \"\"\"\n\n def getValue(token):\n \"\"\"Return a value for a given identifier token\n\n LookupError is raised if there isn't a value in the source.\n \"\"\"\n\n def __iter__():\n \"\"\"Iterate over terms.\"\"\"\n\n def __len__():\n \"\"\"Return number of terms.\"\"\"\n\n def __contains__(value):\n \"\"\"Check wether terms containes the ``value``.\"\"\"\n\n\nclass IBoolTerms(ITerms):\n \"\"\"A specialization that handles boolean choices.\"\"\"\n\n trueLabel = zope.schema.TextLine(\n title=_('True-value Label'),\n description=_('The label for a true value of the Bool field.'),\n required=True)\n\n falseLabel = zope.schema.TextLine(\n title=_('False-value Label'),\n description=_('The label for a false value of the Bool field.'),\n required=False)\n\n\n# ----[ Object factory ]-----------------------------------------------------\n\nclass IObjectFactory(zope.interface.Interface):\n \"\"\"Factory that will instantiate our objects for ObjectWidget.\n It could also pre-populate properties as it gets the values extracted\n from the form passed in ``value``.\n \"\"\"\n\n def __call__(value):\n \"\"\"Return a default object created to be populated.\n \"\"\"\n\n\n# ----[ Widget layout template ]----------------------------------------------\n\nclass IWidgetLayoutTemplate(zope.interface.Interface):\n \"\"\"Widget layout template marker used for render the widget layout.\n\n It is important that we don't inherit this template from IPageTemplate.\n otherwise we will get into trouble since we lookup an IPageTemplate\n in the widget/render method.\n\n \"\"\"\n\n# ----[ Widgets ]------------------------------------------------------------\n\n\nclass IWidget(ILocation):\n \"\"\"A widget within a form\"\"\"\n\n name = zope.schema.ASCIILine(\n title=_('Name'),\n description=_('The name the widget is known under.'),\n required=True)\n\n label = zope.schema.TextLine(\n title=_('Label'),\n description=_('''\n The widget label.\n\n Label may be translated for the request.\n\n The attribute may be implemented as either a read-write or read-only\n property, depending on the requirements for a specific implementation.\n '''),\n required=True)\n\n mode = zope.schema.ASCIILine(\n title=_('Mode'),\n description=_('A widget mode.'),\n default=INPUT_MODE,\n required=True)\n\n required = zope.schema.Bool(\n title=_('Required'),\n description=_('If true the widget should be displayed as required '\n 'input.'),\n default=False,\n required=True)\n\n error = zope.schema.Field(\n title=_('Error'),\n description=_('If an error occurred during any step, the error view '\n 'stored here.'),\n required=False)\n\n value = zope.schema.Field(\n title=_('Value'),\n description=_('The value that the widget represents.'),\n required=False)\n\n template = zope.interface.Attribute('''The widget template''')\n layout = zope.interface.Attribute('''The widget layout template''')\n\n ignoreRequest = zope.schema.Bool(\n title=_('Ignore Request'),\n description=_('A flag, when set, forces the widget not to look at '\n 'the request for a value.'),\n default=False,\n required=False)\n\n # ugly thing to remove setErrors parameter from extract\n setErrors = zope.schema.Bool(\n title=_('Set errors'),\n description=_('A flag, when set, the widget sets error messages '\n 'on calling extract().'),\n default=True,\n required=False)\n\n # a bit different from ignoreRequiredOnExtract, because we record\n # here the fact, but for IValidator, because the check happens there\n ignoreRequiredOnValidation = zope.schema.Bool(\n title=_('Ignore Required validation'),\n description=_(\"If set then required fields will pass validation \"\n \"regardless whether they're filled in or not\"),\n default=False,\n required=True)\n\n showDefault = zope.schema.Bool(\n title=_('Show default value'),\n description=_('A flag, when set, makes the widget to display'\n 'field|adapter provided default values.'),\n default=True,\n required=False)\n\n def extract(default=NO_VALUE):\n \"\"\"Extract the string value(s) of the widget from the form.\n\n The return value may be any Python construct, but is typically a\n simple string, sequence of strings or a dictionary.\n\n The value *must not* be converted into a native format.\n\n If an error occurs during the extraction, the default value should be\n returned. Since this should never happen, if the widget is properly\n designed and used, it is okay to NOT raise an error here, since we do\n not want to crash the system during an inproper request.\n\n If there is no value to extract, the default is to be returned.\n \"\"\"\n\n def update():\n \"\"\"Setup all of the widget information used for displaying.\"\"\"\n\n def render():\n \"\"\"Render the plain widget without additional layout\"\"\"\n\n def json_data():\n \"\"\"Returns a dictionary for the widget\"\"\"\n\n def __call__():\n \"\"\"Render a layout template which is calling widget/render\"\"\"\n\n\nclass ISequenceWidget(IWidget):\n \"\"\"Term based sequence widget base.\n\n The sequence widget is used for select items from a sequence. Don't get\n confused, this widget does support to choose one or more values from a\n sequence. The word sequence is not used for the schema field, it's used\n for the values where this widget can choose from.\n\n This widget base class is used for build single or sequence values based\n on a sequence which is in most use case a collection. e.g.\n IList of IChoice for sequence values or IChoice for single values.\n\n See also the MultiWidget for build sequence values based on none collection\n based values. e.g. IList of ITextLine\n \"\"\"\n\n noValueToken = zope.schema.ASCIILine(\n title=_('NO_VALUE Token'),\n description=_('The token to be used, if no value has been selected.'))\n\n terms = zope.schema.Object(\n title=_('Terms'),\n description=_('A component that provides the options for selection.'),\n schema=ITerms)\n\n def updateTerms():\n \"\"\"Update the widget's ``terms`` attribute and return the terms.\n\n This method can be used by external components to get the terms\n without having to worry whether they are already created or not.\n \"\"\"\n\n\nclass IMultiWidget(IWidget):\n \"\"\"None Term based sequence widget base.\n\n The multi widget is used for ITuple, IList or IDict if no other widget is\n defined.\n\n Some IList or ITuple are using another specialized widget if they can\n choose from a collection. e.g. a IList of IChoice. The base class of such\n widget is the ISequenceWidget.\n\n This widget can handle none collection based sequences and offers add or\n remove values to or from the sequence. Each sequence value get rendered by\n it's own relevant widget. e.g. IList of ITextLine or ITuple of IInt\n \"\"\"\n\n\nclass ISelectWidget(ISequenceWidget):\n \"\"\"Select widget with ITerms option.\"\"\"\n\n prompt = zope.schema.Bool(\n title=_('Prompt'),\n description=_('A flag, when set, enables a choice explicitely '\n 'requesting the user to choose a value.'),\n default=False)\n\n items = zope.schema.Tuple(\n title=_('Items'),\n description=_('A collection of dictionaries containing all pieces of '\n 'information for rendering. The following keys must '\n 'be in each dictionary: id, value, content, selected'))\n\n noValueMessage = zope.schema.Text(\n title=_('No-Value Message'),\n description=_('A human-readable text that is displayed to refer the '\n 'missing value.'))\n\n promptMessage = zope.schema.Text(\n title=_('Prompt Message'),\n description=_('A human-readable text that is displayed to refer the '\n 'missing value.'))\n\n\nclass IOrderedSelectWidget(ISequenceWidget):\n \"\"\"Ordered Select widget with ITerms option.\"\"\"\n\n\nclass ICheckBoxWidget(ISequenceWidget):\n \"\"\"Checbox widget.\"\"\"\n\n\nclass ISingleCheckBoxWidget(ICheckBoxWidget):\n \"\"\"Single Checbox widget.\"\"\"\n\n\nclass IRadioWidget(ISequenceWidget):\n \"\"\"Radio widget.\"\"\"\n\n def renderForValue(value):\n \"\"\"Render a single radio button element for a given value.\n\n Here the word ``value`` is used in the HTML sense, in other\n words it is a term token.\n \"\"\"\n\n\nclass ISubmitWidget(IWidget):\n \"\"\"Submit widget.\"\"\"\n\n\nclass IImageWidget(IWidget):\n \"\"\"Submit widget.\"\"\"\n\n\nclass IButtonWidget(IWidget):\n \"\"\"Button widget.\"\"\"\n\n\nclass ITextAreaWidget(IWidget):\n \"\"\"Text widget.\"\"\"\n\n\nclass ITextLinesWidget(IWidget):\n \"\"\"Text lines widget.\"\"\"\n\n\nclass ITextWidget(IWidget):\n \"\"\"Text widget.\"\"\"\n\n\nclass IFileWidget(ITextWidget):\n \"\"\"File widget.\"\"\"\n\n\nclass IPasswordWidget(ITextWidget):\n \"\"\"Password widget.\"\"\"\n\n\nclass IObjectWidget(IWidget):\n \"\"\"Object widget.\"\"\"\n\n def setupFields():\n \"\"\"setup fields on the widget, by default taking the fields of\n self.schema\"\"\"\n\n\nclass IWidgets(IManager):\n \"\"\"A widget manager\"\"\"\n\n prefix = zope.schema.ASCIILine(\n title=_('Prefix'),\n description=_('The prefix of the widgets.'),\n default='widgets.',\n required=True)\n\n mode = zope.schema.ASCIILine(\n title=_('Prefix'),\n description=_('The prefix of the widgets.'),\n default=INPUT_MODE,\n required=True)\n\n errors = zope.schema.Field(\n title=_('Errors'),\n description=_('The collection of errors that occured during '\n 'validation.'),\n default=(),\n required=True)\n\n ignoreContext = zope.schema.Bool(\n title=_('Ignore Context'),\n description=_('If set the context is ignored to retrieve a value.'),\n default=False,\n required=True)\n\n ignoreRequest = zope.schema.Bool(\n title=_('Ignore Request'),\n description=_('If set the request is ignored to retrieve a value.'),\n default=False,\n required=True)\n\n ignoreReadonly = zope.schema.Bool(\n title=_('Ignore Readonly'),\n description=_('If set then readonly fields will also be shown.'),\n default=False,\n required=True)\n\n ignoreRequiredOnExtract = zope.schema.Bool(\n title=_('Ignore Required validation on extract'),\n description=_(\n \"If set then required fields will pass validation \"\n \"on extract regardless whether they're filled in or not\"),\n default=False,\n required=True)\n\n hasRequiredFields = zope.schema.Bool(\n title=_('Has required fields'),\n description=_('A flag set when at least one field is marked as '\n 'required'),\n default=False,\n required=False)\n\n # ugly thing to remove setErrors parameter from extract\n setErrors = zope.schema.Bool(\n title=_('Set errors'),\n description=_('A flag, when set, the contained widgets set error '\n 'messages on calling extract().'),\n default=True,\n required=False)\n\n def update():\n \"\"\"Setup widgets.\"\"\"\n\n def extract():\n \"\"\"Extract the values from the widgets and validate them.\n \"\"\"\n\n def extractRaw():\n \"\"\"Extract the RAW/string values from the widgets and validate them.\n \"\"\"\n\n\nclass IFieldWidget(zope.interface.Interface):\n \"\"\"Offers a field attribute.\n\n For advanced uses the widget will make decisions based on the field\n it is rendered for.\n \"\"\"\n\n field = zope.schema.Field(\n title=_('Field'),\n description=_('The schema field which the widget is representing.'),\n required=True)\n\n# ----[ Actions ]------------------------------------------------------------\n\n\nclass ActionExecutionError(Exception):\n \"\"\"An error that occurs during the execution of an action handler.\"\"\"\n\n def __init__(self, error):\n self.error = error\n\n def __repr__(self):\n return '<{} wrapping {!r}>'.format(self.__class__.__name__, self.error)\n\n\nclass WidgetActionExecutionError(ActionExecutionError):\n \"\"\"An action execution error that occurred due to a widget value being\n incorrect.\"\"\"\n\n def __init__(self, widgetName, error):\n ActionExecutionError.__init__(self, error)\n self.widgetName = widgetName\n\n\nclass IAction(zope.interface.Interface):\n \"\"\"Action\"\"\"\n\n __name__ = zope.schema.TextLine(\n title=_('Name'),\n description=_('The object name.'),\n required=False,\n default=None)\n\n title = zope.schema.TextLine(\n title=_('Title'),\n description=_('The action title.'),\n required=True)\n\n def isExecuted():\n \"\"\"Determine whether the action has been executed.\"\"\"\n\n\nclass IActionHandler(zope.interface.Interface):\n \"\"\"Action handler.\"\"\"\n\n\nclass IActionEvent(zope.interface.Interface):\n \"\"\"An event specific for an action.\"\"\"\n\n action = zope.schema.Object(\n title=_('Action'),\n description=_('The action for which the event is created.'),\n schema=IAction,\n required=True)\n\n\nclass IActionErrorEvent(IActionEvent):\n \"\"\"An action event that is created when an error occurred.\"\"\"\n\n error = zope.schema.Field(\n title=_('Error'),\n description=_('The error that occurred during the action.'),\n required=True)\n\n\nclass IActions(IManager):\n \"\"\"A action manager\"\"\"\n\n executedActions = zope.interface.Attribute(\n '''An iterable of all executed actions (usually just one).''')\n\n def update():\n \"\"\"Setup actions.\"\"\"\n\n def execute():\n \"\"\"Exceute actions.\n\n If an action execution error is raised, the system is notified using\n the action occurred error; on the other hand, if successful, the\n action successfull event is sent to the system.\n \"\"\"\n\n\nclass IButton(zope.schema.interfaces.IField):\n \"\"\"A button in a form.\"\"\"\n\n accessKey = zope.schema.TextLine(\n title=_('Access Key'),\n description=_('The key when pressed causes the button to be pressed.'),\n min_length=1,\n max_length=1,\n required=False)\n\n actionFactory = zope.schema.Field(\n title=_('Action Factory'),\n description=_('The action factory.'),\n required=False,\n default=None,\n missing_value=None)\n\n\nclass IImageButton(IButton):\n \"\"\"An image button in a form.\"\"\"\n\n image = zope.schema.TextLine(\n title=_('Image Path'),\n description=_('A relative image path to the root of the resources.'),\n required=True)\n\n\nclass IButtons(ISelectionManager):\n \"\"\"Button manager.\"\"\"\n\n\nclass IButtonAction(IAction, IWidget, IFieldWidget):\n \"\"\"Button action.\"\"\"\n\n\nclass IButtonHandlers(zope.interface.Interface):\n \"\"\"A collection of handlers for buttons.\"\"\"\n\n def addHandler(button, handler):\n \"\"\"Add a new handler for a button.\"\"\"\n\n def getHandler(button):\n \"\"\"Get the handler for the button.\"\"\"\n\n def copy():\n \"\"\"Copy this object and return the copy.\"\"\"\n\n def __add__(other):\n \"\"\"Add another handlers object.\n\n During the process a copy of the current handlers object should be\n created and the other one is added to the copy. The return value is\n the copy.\n \"\"\"\n\n\nclass IButtonHandler(zope.interface.Interface):\n \"\"\"A handler managed by the button handlers.\"\"\"\n\n def __call__(form, action):\n \"\"\"Execute the handler.\"\"\"\n\n\n# ----[ Forms ]--------------------------------------------------------------\n\nclass IHandlerForm(zope.interface.Interface):\n \"\"\"A form that stores the handlers locally.\"\"\"\n\n handlers = zope.schema.Object(\n title=_('Handlers'),\n description=_('A list of action handlers defined on the form.'),\n schema=IButtonHandlers,\n required=True)\n\n\nclass IActionForm(zope.interface.Interface):\n \"\"\"A form that stores executable actions\"\"\"\n\n actions = zope.schema.Object(\n title=_('Actions'),\n description=_('A list of actions defined on the form'),\n schema=IActions,\n required=True)\n\n refreshActions = zope.schema.Bool(\n title=_('Refresh actions'),\n description=_('A flag, when set, causes form actions to be '\n 'updated again after their execution.'),\n default=False,\n required=True)\n\n\nclass IContextAware(zope.interface.Interface):\n \"\"\"Offers a context attribute.\n\n For advanced uses, the widget will make decisions based on the context\n it is rendered in.\n \"\"\"\n\n context = zope.schema.Field(\n title=_('Context'),\n description=_('The context in which the widget is displayed.'),\n required=True)\n\n ignoreContext = zope.schema.Bool(\n title=_('Ignore Context'),\n description=_('A flag, when set, forces the widget not to look at '\n 'the context for a value.'),\n default=False,\n required=False)\n\n\nclass IFormAware(zope.interface.Interface):\n \"\"\"Offers a form attribute.\n\n For advanced uses the widget will make decisions based on the form\n it is rendered in.\n \"\"\"\n\n form = zope.schema.Field()\n\n\nclass IForm(zope.interface.Interface):\n \"\"\"Form\"\"\"\n\n mode = zope.schema.Field(\n title=_('Mode'),\n description=_('The mode in which to render the widgets.'),\n required=True)\n\n ignoreContext = zope.schema.Bool(\n title=_('Ignore Context'),\n description=_('If set the context is ignored to retrieve a value.'),\n default=False,\n required=True)\n\n ignoreRequest = zope.schema.Bool(\n title=_('Ignore Request'),\n description=_('If set the request is ignored to retrieve a value.'),\n default=False,\n required=True)\n\n ignoreReadonly = zope.schema.Bool(\n title=_('Ignore Readonly'),\n description=_('If set then readonly fields will also be shown.'),\n default=False,\n required=True)\n\n ignoreRequiredOnExtract = zope.schema.Bool(\n title=_('Ignore Required validation on extract'),\n description=_(\n \"If set then required fields will pass validation \"\n \"on extract regardless whether they're filled in or not\"),\n default=False,\n required=True)\n\n widgets = zope.schema.Object(\n title=_('Widgets'),\n description=_('A widget manager containing the widgets to be used in '\n 'the form.'),\n schema=IWidgets)\n\n label = zope.schema.TextLine(\n title=_('Label'),\n description=_('A human readable text describing the form that can be '\n 'used in the UI.'),\n required=False)\n\n labelRequired = zope.schema.TextLine(\n title=_('Label required'),\n description=_('A human readable text describing the form that can be '\n 'used in the UI for rendering a required info legend.'),\n required=False)\n\n prefix = zope.schema.ASCIILine(\n title=_('Prefix'),\n description=_('The prefix of the form used to uniquely identify it.'),\n default='form.')\n\n status = zope.schema.Text(\n title=_('Status'),\n description=_('The status message of the form.'),\n default=None,\n required=False)\n\n def getContent():\n '''Return the content to be displayed and/or edited.'''\n\n def updateWidgets(prefix=None):\n '''Update the widgets for the form.\n\n This method is commonly called from the ``update()`` method and is\n mainly meant to be a hook for subclasses.\n\n Note that you can pass an argument for ``prefix`` to override\n the default value of ``\"widgets.\"``.\n '''\n\n def extractData(setErrors=True):\n '''Extract the data of the form.\n\n setErrors: needs to be passed to extract() and to sub-widgets'''\n\n def update():\n '''Update the form.'''\n\n def render():\n '''Render the form.'''\n\n def json():\n '''Returns the form in json format'''\n\n\nclass ISubForm(IForm):\n \"\"\"A subform.\"\"\"\n\n\nclass IDisplayForm(IForm):\n \"\"\"Mark a form as display form, used for templates.\"\"\"\n\n\nclass IInputForm(zope.interface.Interface):\n \"\"\"A form that is meant to process the input of the form controls.\"\"\"\n\n action = zope.schema.URI(\n title=_('Action'),\n description=_('The action defines the URI to which the form data are '\n 'sent.'),\n required=True)\n\n name = zope.schema.TextLine(\n title=_('Name'),\n description=_('The name of the form used to identify it.'),\n required=False)\n\n id = zope.schema.TextLine(\n title=_('Id'),\n description=_('The id of the form used to identify it.'),\n required=False)\n\n method = zope.schema.Choice(\n title=_('Method'),\n description=_('The HTTP method used to submit the form.'),\n values=('get', 'post'),\n default='post',\n required=False)\n\n enctype = zope.schema.ASCIILine(\n title=_('Encoding Type'),\n description=_('The data encoding used to submit the data safely.'),\n default='multipart/form-data',\n required=False)\n\n acceptCharset = zope.schema.ASCIILine(\n title=_('Accepted Character Sets'),\n description=_('This is a list of character sets the server accepts. '\n 'By default this is unknown.'),\n required=False)\n\n accept = zope.schema.ASCIILine(\n title=_('Accepted Content Types'),\n description=_('This is a list of content types the server can '\n 'safely handle.'),\n required=False)\n\n\nclass IAddForm(IForm):\n \"\"\"A form to create and add a new component.\"\"\"\n\n def create(data):\n \"\"\"Create the new object using the given data.\n\n Returns the newly created object.\n \"\"\"\n\n def add(object):\n \"\"\"Add the object somewhere.\"\"\"\n\n def createAndAdd(data):\n \"\"\"Call create and add.\n\n This method can be used for keep all attributes internal during create\n and add calls. On sucess we return the new created and added object.\n If something fails, we return None. The default handleAdd method will\n only set the _finishedAdd marker on sucess.\n \"\"\"\n\n\nclass IEditForm(IForm):\n \"\"\"A form to edit data of a component.\"\"\"\n\n def applyChanges(data):\n \"\"\"Apply the changes to the content component.\"\"\"\n\n\nclass IFieldsForm(IForm):\n \"\"\"A form that is based upon defined fields.\"\"\"\n\n fields = zope.schema.Object(\n title=_('Fields'),\n description=_('A field manager describing the fields to be used for '\n 'the form.'),\n schema=IFields)\n\n\nclass IFieldsAndContentProvidersForm(IForm):\n \"\"\"A form that is based upon defined fields and content providers\"\"\"\n\n contentProviders = zope.schema.Object(\n title=_('Content providers'), description=_(\n 'A manager describing the content providers to be used for '\n 'the form.'), schema=IContentProviders)\n\n\nclass IButtonForm(IForm):\n \"\"\"A form that is based upon defined buttons.\"\"\"\n\n buttons = zope.schema.Object(\n title=_('Buttons'),\n description=_('A button manager describing the buttons to be used for '\n 'the form.'),\n schema=IButtons)\n\n\nclass IGroup(IForm):\n \"\"\"A group of fields/widgets within a form.\"\"\"\n\n\nclass IGroupForm(IForm):\n \"\"\"A form that supports groups.\"\"\"\n\n groups = zope.schema.Tuple(\n title='Groups',\n description=('Initially a collection of group classes, which are '\n 'converted to group instances when the form is '\n 'updated.'))\n\n\n# ----[ Events ]--------------------------------------------------------------\n\n\nclass IWidgetEvent(zope.interface.Interface):\n \"\"\"A simple widget event.\"\"\"\n\n widget = zope.schema.Object(\n title=_('Widget'),\n description=_('The widget for which the event was created.'),\n schema=IWidget)\n\n\nclass IAfterWidgetUpdateEvent(IWidgetEvent):\n \"\"\"An event sent out after the widget was updated.\"\"\"\n\n\nclass IDataExtractedEvent(zope.interface.Interface):\n \"\"\"Event sent after data and errors are extracted from widgets.\n \"\"\"\n data = zope.interface.Attribute(\n \"Extracted form data. Usually, the widgets extract field names from \"\n \"the request and return a dictionary of field names and field values.\"\n )\n errors = zope.interface.Attribute(\n \"Tuple of errors providing IErrorViewSnippet.\"\n )\n form = zope.interface.Attribute(\"Form instance.\")\n","repo_name":"zopefoundation/z3c.form","sub_path":"src/z3c/form/interfaces.py","file_name":"interfaces.py","file_ext":"py","file_size_in_byte":34188,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"60"} +{"seq_id":"18297583301","text":"#Pokemon Showdown Voice Controller\n#Training Data Generation Script\n#Aditya Pandey, Nitish Mallick, Savya Sachi Pandey, Vivek Kumar\n#Note: Check the README and report for a full understanding of how this works\n\nfrom poke_env import PlayerConfiguration\nfrom poke_env.player import Player, RandomPlayer\nfrom difflib import SequenceMatcher\nimport asyncio\nimport time\nimport speech_recognition as sr\nimport keyboard\nimport pandas as pd\nimport random\nimport requests\nimport openai\nimport wave\nimport pyaudio\n\n\n\n########################\ndef MicMove():\n\t'''This Mic Move is identical to the one used in the main game, with the caveat that this\n\t only takes in the shift key press to activate '''\n\n\n\t# Initialize the recognizer\n\tr = sr.Recognizer()\n\tstart=time.time()\n\n\t# Start listening for audio input from the microphone\n\twith sr.Microphone() as source:\n\n\t\tdelay=0\n\n\t\twhile True:\n\n\t\t if keyboard.is_pressed('shift') or time.time()-delay<5: #Essentially, we wanted the mic to keep listening 5 seconds after key released.\n\n\t\t print(\"Listening...\")\n\n\t\t #Ambient Noise Adjustment\n\t\t r.adjust_for_ambient_noise(source)\n\t\t audio = r.listen(source)\n\n\t\t #Time Checks for System Status\n\t\t stop=time.time()\n\t\t if (stop-start)>30:\n\t\t \tprint(\"Time Passed:\",round(stop-start),\" s\")\n\n\t\t try:\n\t\t #Google SR\n\t\t text = r.recognize_google(audio)\n\t\t print(\"Text detected:\",text)\n\t\t return text\n\t\t \n\t\t except sr.UnknownValueError:\n\t\t print(\"Unknown Value Error\")\n\t\t except sr.RequestError as e:\n\t\t print(\"API Request Error {0}\".format(e))\n\n\t\t #Starts the Delay Check\n\t\t if keyboard.is_pressed('shift')==False:\n\t\t \t#Delay timer\n\t\t \tdelay=time.time()\n\n\ndef test1(n=40):\n\t'''This function generates n random attack prompts for users to create data off'''\n\n\tpokemons=pd.read_csv('data/pokemon.csv')['Name']\n\tmoves=pd.read_csv('data/Pokemon-Moves.csv')['Name ']\n\trandom.shuffle(pokemons)\n\trandom.shuffle(moves)\n\n\tInputs=[]\n\tCorrect=[]\n\n\tfor i in range(0,n):\n\t\tprint('Test ',i)\n\t\tmove_string=pokemons[i]+' use '+ moves[i]\n\t\tprint(move_string)\n\t\tthis_move=MicMove()\n\t\tprint('Text Parsed: ',this_move)\n\t\tInputs.append(this_move)\n\t\tCorrect.append(move_string)\n\t\tprint()\n\n\tOutput_DF=pd.DataFrame()\n\tOutput_DF['Voice_Text']=Inputs\n\tOutput_DF['Actual_Text']=Correct\n\tpd.to_csv('data/test1.csv',index=False)\n\n\treturn\n\ndef test2(n=40):\n\t'''This function generates n random switch prompts for users to create data off'''\n\tpokemons=pd.read_csv('data/pokemon.csv')['Name']\n\tpokemons2=pd.read_csv('data/pokemon.csv')['Name']\n\trandom.shuffle(pokemons)\n\trandom.shuffle(pokemons2)\n\n\tInputs=[]\n\tCorrect=[]\n\n\tstatements=[' come back. Go ',' switch for ',' swap for ']\n\n\tfor i in range(0,n):\n\t\tprint('Test ',i)\n\t\tmove_string=pokemons[i]+statements[i%3]+ pokemons2[i]\n\t\tprint(move_string)\n\t\tthis_move=MicMove()\n\t\tprint('Text Parsed: ',this_move)\n\t\tInputs.append(this_move)\n\t\tCorrect.append(move_string)\n\t\tprint()\n\n\tOutput_DF=pd.DataFrame()\n\tOutput_DF['Voice_Text']=Inputs\n\tOutput_DF['Actual_Text']=Correct\n\tpd.to_csv('data/test1.csv',index=False)\n\n\treturn","repo_name":"AdityaPandey0901/PokemonShowdownVoice","sub_path":"Train_Data_Generator.py","file_name":"Train_Data_Generator.py","file_ext":"py","file_size_in_byte":3192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"29635833858","text":"# -*- coding: utf-8; -*-k\nfrom __future__ import unicode_literals\nimport re\nimport os\nimport json\nfrom collections import OrderedDict\nfrom difflib import get_close_matches\n\nimport six\nfrom yaml import SafeLoader, YAMLError\nfrom yaml.constructor import SafeConstructor\n\nfrom .utils import logger, normalize_keys, normalize_value\n\n\nACTIONS_SCHEME = {\n 'deploy': {'inherit': '$action'},\n 'offload': {'inherit': '$action'},\n 'test': {'inherit': '$action'},\n 'export': {'inherit': '$action'},\n 'quality_control': {'inherit': '$action'}\n}\n\nENVIRONMENTS_SCHEME = {\n 'is': {\n 'type': dict,\n 'one_of': (\n 'ci',\n 'dev',\n 'development',\n 'test',\n 'testing',\n 'perf',\n 'performance',\n 'stage',\n 'staging',\n 'beta',\n 'integration',\n 'prod',\n 'production'\n )\n },\n 'dev': {'inherit': '$environment'},\n 'development': {'inherit': '$environment'},\n 'ci': {'inherit': '$environment'},\n 'test': {'inherit': '$environment'},\n 'testing': {'inherit': '$environment'},\n 'perf': {'inherit': '$environment'},\n 'performance': {'inherit': '$environment'},\n 'stage': {'inherit': '$environment'},\n 'staging': {'inherit': '$environment'},\n 'beta': {'inherit': '$environment'},\n 'integration': {'inherit': '$environment'},\n 'prod': {'inherit': '$environment'},\n 'production': {'inherit': '$environment'},\n '@services': {},\n\n '*': {\n 'ref': 'hosts',\n 'is': {\n 'one_of': [\n {\n 'is': {\n 'type': list,\n 'items': {\n 'inherit': '$host'\n }\n }\n },\n {'inherit': '$host'}\n ]\n\n }\n }\n}\nREGISTRIES_SCHEME = {\n 'is': {\n 'type': dict,\n },\n '*': {\n 'ref': 'registries',\n 'inherit': '$host',\n 'auth': {\n 'inherit': '$host',\n 'is': {\n 'type': dict,\n 'required': ['type']\n },\n 'type': {\n 'is': {\n 'one_of': ('basic', 'registry_rubber'),\n 'type': six.string_types,\n }\n }\n },\n 'verify': {\n 'is': {\n 'type': bool\n }\n }\n }\n}\n\nROOT_SCHEME = {\n 'is': {\n 'required': [\n 'environments',\n 'project',\n 'team',\n 'repository'\n ]\n },\n 'environments': ENVIRONMENTS_SCHEME,\n 'project': {\n 'is': {\n 'type': six.string_types,\n }\n },\n 'team': {\n 'is': {\n 'type': six.string_types,\n }\n },\n 'registries': REGISTRIES_SCHEME,\n 'repository': {\n 'is': {\n 'type': six.string_types,\n }\n },\n '*': {\n 'ref': 'services',\n 'inherit': '$service',\n 'is': {\n 'type': dict,\n 'one_of': ('image', 'build')\n # 'min': 1\n }\n },\n '_': {\n 'action': {\n 'ref': 'actions',\n 'is': {\n 'type': dict,\n 'one_of': '~services'\n },\n '@services': {}\n },\n 'service': {\n 'is': {\n 'type': dict,\n 'one_of': ('image', 'build')\n },\n 'cascading': True,\n 'attach_stderr': {\n 'is': {\n 'type': bool\n }\n },\n 'attach_stdin': {\n 'is': {\n 'type': bool\n }\n },\n 'attach_stdout': {\n 'is': {\n 'type': bool\n }\n },\n 'build': {\n 'is': {\n 'type': six.string_types\n }\n },\n 'binds': {\n 'is': {\n 'type': list\n }\n },\n 'cap_add': {\n 'is': {\n 'type': list,\n 'items': {\n 'is': {\n 'type': six.string_types\n }\n }\n }\n },\n 'cap_drop': {\n 'is': {\n 'type': list,\n 'items': {\n 'is': {\n 'type': six.string_types\n }\n }\n }\n },\n 'cgroup_parent': {\n 'is': {\n 'type': six.string_types\n }\n },\n 'command': {\n 'is': {\n 'one_of': [\n {\n 'is': {\n 'type': six.string_types,\n }\n },\n {\n 'is': {\n 'type': list,\n 'items': {\n 'is': {\n 'type': six.string_types\n }\n }\n },\n }\n\n ]\n }\n },\n 'cmd': {\n 'is': {\n 'one_of': [\n {\n 'is': {\n 'type': six.string_types,\n }\n },\n {\n 'is': {\n 'type': list,\n 'items': {\n 'is': {\n 'type': six.string_types\n }\n }\n },\n }\n\n ]\n }\n },\n 'cpu_shares': {\n 'is': {\n 'type': int\n }\n },\n 'devices': {\n 'is': {\n 'type': list\n }\n },\n 'domain_name': {\n 'is': {\n 'type': six.string_types\n }\n },\n 'domainname': {\n 'is': {\n 'type': six.string_types\n }\n },\n 'detach': {\n 'is': {\n 'type': bool\n }\n },\n 'dns': {\n 'is': {\n 'one_of': [\n {\n 'is': {\n 'type': six.string_types,\n }\n },\n {\n 'is': {\n 'type': list,\n 'items': {\n 'is': {\n 'type': six.string_types\n }\n }\n },\n }\n\n ]\n }\n },\n 'dns_search': {\n 'is': {\n 'one_of': [\n {\n 'is': {\n 'type': six.string_types,\n }\n },\n {\n 'is': {\n 'type': list,\n 'items': {\n 'is': {\n 'type': six.string_types\n }\n }\n },\n }\n\n ]\n }\n },\n 'entrypoint': {\n # TODO: add one_of\n 'is': {\n 'type': (list, six.string_types)\n }\n },\n 'entry_point': {\n # TODO: add one of\n 'is': {\n 'type': (list, six.string_types)\n }\n },\n 'env': {\n 'is': {\n 'type': list,\n 'items': {\n 'is': {\n 'type': six.string_types\n }\n }\n }\n },\n 'env_vars': {\n 'is': {\n 'type': list,\n 'items': {\n 'is': {\n 'type': six.string_types\n }\n }\n }\n },\n 'export_to': {\n 'is': {\n 'one_of': '~registries',\n 'type': six.string_types\n }\n },\n 'exposed_ports': {\n 'is': {\n 'one_of': [\n {\n 'is': {\n 'type': dict,\n },\n '*': {\n 'is': {\n 'type': six.string_types\n }\n }\n },\n {\n 'is': {\n 'type': list,\n 'items': {\n 'is': {\n 'type': six.string_types\n }\n }\n },\n }\n\n ]\n },\n '*': {\n 'is': {\n 'type': six.string_types\n }\n }\n },\n 'extra_hosts': {\n 'is': {\n 'one_of': [\n {\n 'is': {\n 'type': dict,\n },\n '*': {\n 'is': {\n 'type': six.string_types\n }\n }\n },\n {\n 'is': {\n 'type': list,\n 'items': {\n 'is': {\n 'type': six.string_types\n }\n }\n },\n }\n\n ]\n },\n '*': {\n 'is': {\n 'type': six.string_types\n }\n }\n },\n 'hostname': {\n 'is': {\n 'type': six.string_types\n }\n },\n 'image': {\n 'is': {\n 'type': six.string_types,\n }\n },\n 'links': {\n 'is': {\n 'one_of': [\n {\n 'is': {\n 'type': dict,\n },\n '*': {\n 'is': {\n 'type': six.string_types\n }\n }\n },\n {\n 'is': {\n 'type': list,\n 'items': {\n 'is': {\n 'type': six.string_types\n }\n }\n },\n }\n\n ]\n },\n '*': {\n 'is': {\n 'type': six.string_types\n }\n }\n },\n 'labels': {\n 'is': {\n 'type': dict\n }\n },\n 'log_config': {\n 'is': {\n 'type': dict,\n 'required': ['type', 'config']\n },\n 'type': {\n 'is': {\n 'type': six.string_types\n }\n },\n 'config': {\n 'is': {\n 'required': ['syslog-facility'],\n 'type': dict\n },\n 'syslog-facility': {\n 'is': {\n 'type': six.string_types\n }\n },\n 'syslog-tag': {\n 'is': {\n 'type': six.string_types\n }\n }\n }\n },\n 'lxc_conf': {\n 'is': {\n 'one_of': [\n {\n 'is': {\n 'type': dict,\n },\n '*': {\n 'is': {\n 'type': six.string_types\n }\n }\n },\n {\n 'is': {\n 'type': list,\n 'items': {\n 'is': {\n 'type': six.string_types\n }\n }\n },\n }\n\n ]\n },\n '*': {\n 'is': {\n 'type': six.string_types\n }\n }\n },\n 'memory': {\n 'is': {\n 'type': int\n }\n },\n 'memory_swap': {\n 'is': {\n 'type': int\n }\n },\n 'network_disabled': {\n 'is': {\n 'type': bool\n }\n },\n 'network_mode': {\n 'is': {\n 'type': six.string_types,\n }\n },\n 'open_stdin': {\n 'is': {\n 'type': bool\n }\n },\n 'ports': {\n 'is': {\n 'one_of': [\n {\n 'is': {\n 'type': dict,\n },\n '*': {\n 'is': {\n 'type': list\n }\n }\n },\n {\n 'is': {\n 'type': list,\n 'items': {\n 'is': {\n 'type': six.string_types\n }\n }\n },\n }\n\n ]\n },\n '*': {\n 'is': {\n 'type': list\n }\n }\n },\n 'ports_bindings': {\n 'is': {\n 'one_of': [\n {\n 'is': {\n 'type': dict,\n },\n '*': {\n 'is': {\n 'type': list\n }\n }\n },\n {\n 'is': {\n 'type': list,\n 'items': {\n 'is': {\n 'type': six.string_types\n }\n }\n },\n }\n\n ]\n },\n '*': {\n 'is': {\n 'type': list\n }\n }\n },\n 'privileged': {\n 'is': {\n 'type': bool\n }\n },\n 'publish_all_ports': {\n 'is': {\n 'type': bool\n }\n },\n 'readonly_root_fs': {\n 'is': {\n 'type': bool\n }\n },\n 'readonly_rootfs': {\n 'is': {\n 'type': bool\n }\n },\n 'restart': {\n 'is': {\n 'type': dict\n }\n },\n 'security_opt': {\n 'is': {\n 'type': list\n }\n },\n 'stdin_once': {\n 'is': {\n 'type': bool\n }\n },\n 'tags': {\n 'is': {\n 'type': list,\n 'items': {\n 'is': {\n 'type': six.string_types\n }\n }\n }\n },\n 'test': {\n 'is': {\n 'type': six.string_types,\n }\n },\n 'tty': {\n 'is': {\n 'type': bool\n }\n },\n 'ulimits': {\n 'is': {\n 'type': list\n }\n\n },\n 'user': {\n 'is': {\n 'type': six.string_types\n }\n },\n 'volumes': {\n 'is': {\n 'one_of': [\n {\n 'is': {\n 'type': dict,\n },\n '*': {\n 'is': {\n 'type': six.string_types\n }\n }\n },\n {\n 'is': {\n 'type': list,\n 'items': {\n 'is': {\n 'type': six.string_types\n }\n }\n },\n }\n\n ]\n },\n '*': {\n 'is': {\n 'type': six.string_types\n }\n }\n },\n 'volumes_from': {\n 'is': {\n 'type': (list, six.string_types)\n }\n },\n 'working_dir': {\n 'is': {\n 'type': six.string_types\n }\n }\n },\n 'environment': {\n 'ref': 'environments',\n 'is': {\n 'type': dict,\n },\n '@services': {},\n 'hosts': {\n 'is': {\n 'type': dict,\n },\n 'default': {\n 'is': {\n 'type': list,\n 'items': {\n 'inherit': '$host'\n }\n }\n },\n 'export': {\n 'is': {\n 'type': list,\n 'items': {\n 'inherit': '$host'\n },\n 'max': 1\n }\n },\n '~services': {\n 'is': {\n 'type': list,\n 'items': {\n 'inherit': '$host'\n }\n }\n }\n },\n '*': {\n 'ref': 'data_centers',\n 'inherit': '$data_center'\n }\n },\n\n # TODO: make object more bullet proof\n 'data_center': {\n 'inherit': '$actions',\n 'is': {\n 'type': dict,\n },\n '@services': {},\n 'hosts': {\n 'cascading': True,\n 'is': {\n 'type': dict\n },\n 'default': {\n 'is': {\n 'type': list,\n 'items': {\n 'inherit': '$host'\n }\n }\n },\n 'export': {\n 'is': {\n 'type': list,\n 'items': {\n 'inherit': '$host'\n },\n 'max': 1\n }\n },\n '~services': {\n 'is': {\n 'type': list,\n 'items': {\n 'inherit': '$host'\n }\n }\n }\n }\n },\n\n 'actions': ACTIONS_SCHEME,\n\n 'host': {\n 'is': {\n 'type': dict,\n 'required': ['address']\n },\n 'address': {\n 'is': {\n 'type': six.string_types\n }\n },\n 'ssl_cert_path': {\n 'is': {\n 'type': six.string_types\n }\n },\n 'verify': {\n 'is': {\n 'type': bool\n }\n }\n\n }\n }\n}\n\nVALIDATORS = (\n 'required',\n 'must_include',\n 'type',\n 'one_of',\n 'if',\n 'any_of',\n 'items',\n 'not',\n 'max',\n 'min',\n)\n\nRESERVED_SCHEME_KEYS = (\n 'is',\n 'ref',\n 'inherit',\n 'cascading',\n '_',\n '*',\n '~',\n '$',\n '@',\n)\n\nextract_doc_string = re.compile(\n r'''^ (@|) # explicit module name\n (\\w+): # thing name\n \\s?(.*)$ # rest should be comma seperated values.\n ''', re.VERBOSE)\n\n\nclass Config(object):\n \"\"\" A representation of a freight-forwarder configuration file.\n\n :param verbose: A :boolean:, will provide verbose validation output. defaults to True\n * not yet implemented.\n :param path_override: A :string:, a path to a configuration to override. defaults to cwd\n \"\"\"\n def __init__(self, path_override=None, verbose=True):\n self._scheme_references = {}\n self._data = None\n\n if path_override and not isinstance(path_override, six.string_types):\n raise TypeError(logger.error(\"path_override must be a string.\"))\n\n self._path = path_override if path_override else os.getcwd()\n self._verbose = verbose\n\n self._load()\n\n def get(self, attr_name, *args):\n \"\"\" Get the most retrieval attribute in the configuration file. This method\n will recursively look through the configuration file for the attribute specified\n and return the last found value or None. The values can be referenced by\n the key name provided in the configuration file or that value normalized with\n snake_casing.\n\n Usage::\n >>> from freight_forwarder.config import Config\n >>>\n >>> config = Config()\n >>> thing = config.get('thing', 'grandparent', 'parent')\n\n :param attr_name: A :string: The configuration property name to get.\n :param *args: A :tuple: if :strings: parent objects in which to look for attr. This is optional.\n :return attr value:\n \"\"\"\n if not isinstance(attr_name, six.string_types):\n raise TypeError('attr_name must be a str.')\n\n # allow retrieval of data with alias or normalized name\n if '-' in attr_name:\n attr_name = attr_name.replace('-', '_')\n\n parent_attr = self\n attr = getattr(parent_attr, attr_name, None)\n\n for arg in args:\n if not isinstance(arg, six.string_types):\n raise TypeError(\n 'each additional argument must be a string. {0} was not a string'.format(arg)\n )\n\n if hasattr(parent_attr, arg):\n parent_attr = getattr(parent_attr, arg)\n\n if hasattr(parent_attr, attr_name):\n attr = getattr(parent_attr, attr_name)\n else:\n pass\n\n return attr\n\n # TODO: revisit these reference functions to validate there usefulness.\n @property\n def service_references(self):\n \"\"\" returns a list of service names\n \"\"\"\n services_blue_print = self._scheme_references.get('services')\n if services_blue_print is None:\n raise LookupError('unable to find any services in the config.')\n\n # TODO: this needs to be cleaned up and made solid. maybe when creating the blueprint ref normalize the damn keys\n return {key.replace('-', '_'): key for key in services_blue_print['keys']}\n\n def environment_references(self):\n services_blue_print = self._scheme_references.get('environments')\n if services_blue_print is None:\n raise LookupError('unable to find any environments in the config.')\n\n # TODO: this needs to be cleaned up and made solid. maybe when creating the blueprint ref normalize the damn keys\n return {key.replace('-', '_'): key for key in services_blue_print['keys']}\n\n def data_center_references(self, environment):\n \"\"\"\n\n :param string environment:\n :return dict:\n \"\"\"\n services_blue_print = self._scheme_references.get('data_centers')\n if services_blue_print is None:\n raise LookupError('unable to find any data_centers in the config.')\n\n if environment not in self.environments:\n raise LookupError('unable to find {0} in the following environments: {1}'.format(\n environment, ', '.join(self.environment_references().keys())\n ))\n\n environment = self.environments[environment]\n # TODO: this needs to be cleaned up and made solid. maybe when creating the blueprint ref normalize the damn keys\n return {key.replace('-', '_'): key for key in services_blue_print['keys'] if key.replace('-', '_') in environment}\n\n def validate(self):\n \"\"\" Validate the contents of the configuration file. Will return None if validation is successful or\n raise an error if not.\n \"\"\"\n if not isinstance(self._data, dict):\n raise TypeError('freight forwarder configuration file must be a dict.')\n\n current_log_level = logger.get_level()\n\n if self._verbose:\n logger.set_level('DEBUG')\n else:\n logger.set_level('ERROR')\n\n logger.info('Starting configuration validation', extra={\"formatter\": 'config-start'})\n\n # copy config dict to allow config data to stay in its original state.\n config_data = self._data.copy()\n\n try:\n self._walk_tree(config_data, ROOT_SCHEME)\n except ConfigValidationException as e:\n e.log_error()\n raise\n\n logger.info(\"Config validation passed.\", extra={'formatter': 'config-success'})\n\n logger.set_level(current_log_level)\n\n ##\n # private methods\n ##\n def _load(self):\n \"\"\" Load a configuration file. This method will be called when the Config class is instantiated. The\n configuration file can be json or yaml.\n \"\"\"\n if os.path.isdir(self._path):\n for file_ext in ('yml', 'yaml', 'json'):\n test_path = os.path.join(self._path, 'freight-forwarder.{0}'.format(file_ext))\n\n if os.path.isfile(test_path):\n self._path = test_path\n break\n\n if os.path.isfile(self._path):\n file_name, file_extension = os.path.splitext(self._path)\n\n with open(self._path, 'r') as config_file:\n if file_extension in ('.yaml', '.yml'):\n self._load_yml_config(config_file.read())\n elif file_extension == '.json':\n try:\n config_data = json.loads(config_file.read())\n self._data = normalize_keys(config_data)\n except Exception:\n raise SyntaxError(\"There is a syntax error in your freight-forwarder config.\")\n else:\n raise TypeError(\"Configuration file most be yaml or json.\")\n else:\n raise LookupError(\"Was unable to find a freight-forwarder configuration file.\")\n\n def _load_yml_config(self, config_file):\n \"\"\" loads a yaml str, creates a few constructs for pyaml, serializes and normalized the config data. Then\n assigns the config data to self._data.\n\n :param config_file: A :string: loaded from a yaml file.\n \"\"\"\n if not isinstance(config_file, six.string_types):\n raise TypeError('config_file must be a str.')\n\n try:\n def construct_yaml_int(self, node):\n obj = SafeConstructor.construct_yaml_int(self, node)\n data = ConfigInt(\n obj,\n node.start_mark,\n node.end_mark\n )\n\n return data\n\n def construct_yaml_float(self, node):\n obj, = SafeConstructor.construct_yaml_float(self, node)\n data = ConfigFloat(\n obj,\n node.start_mark,\n node.end_mark\n )\n\n return data\n\n def construct_yaml_str(self, node):\n # Override the default string handling function\n # to always return unicode objects\n obj = SafeConstructor.construct_scalar(self, node)\n\n assert isinstance(obj, six.string_types)\n data = ConfigUnicode(\n obj,\n node.start_mark,\n node.end_mark\n )\n\n return data\n\n def construct_yaml_mapping(self, node):\n obj, = SafeConstructor.construct_yaml_map(self, node)\n data = ConfigDict(\n obj,\n node.start_mark,\n node.end_mark\n )\n\n return data\n\n def construct_yaml_seq(self, node):\n obj, = SafeConstructor.construct_yaml_seq(self, node)\n data = ConfigSeq(\n obj,\n node.start_mark,\n node.end_mark\n )\n\n return data\n\n # SafeConstructor.add_constructor(u'tag:yaml.org,2002:bool', construct_yaml_bool)\n SafeConstructor.add_constructor(u'tag:yaml.org,2002:float', construct_yaml_float)\n SafeConstructor.add_constructor(u'tag:yaml.org,2002:int', construct_yaml_int)\n SafeConstructor.add_constructor(u'tag:yaml.org,2002:map', construct_yaml_mapping)\n SafeConstructor.add_constructor(u'tag:yaml.org,2002:seq', construct_yaml_seq)\n SafeConstructor.add_constructor(u'tag:yaml.org,2002:str', construct_yaml_str)\n\n data = SafeLoader(config_file).get_data()\n if data is None:\n raise AttributeError('The configuration file needs to have data in it.')\n\n self._data = normalize_keys(data, snake_case=False)\n except YAMLError as e:\n if hasattr(e, 'problem_mark'):\n mark = e.problem_mark\n raise SyntaxError(\n \"There is a syntax error in your freight-forwarder config file line: {0} column: {1}\".format(\n mark.line + 1,\n mark.column + 1\n )\n )\n else:\n raise SyntaxError(\"There is a syntax error in your freight-forwarder config.\")\n\n def _merge_config(self, config_override):\n \"\"\" overrides and/or adds data to the current configuration file.\n\n ** This has not been implemented yet.\n\n :param config_override: A :string: config data to add to or override the current config.\n \"\"\"\n if not isinstance(config_override, dict):\n raise TypeError(\"config override must be a dict\")\n\n def recursion(config_value, override_value):\n for key, value in override_value.items():\n if key in config_value:\n if isinstance(value, dict) and isinstance(config_value[key], dict):\n recursion(config_value[key], value)\n else:\n config_value[key] = value\n else:\n config_value[key] = value\n\n for key, value in config_override.items():\n if key in self._data:\n recursion(self._data[key], value)\n else:\n self._data[key] = value\n\n def _create_attr(self, property_key, data, ancestors):\n \"\"\" Dynamically Creates attributes on for a Config. Also adds name and alias to each Config object.\n\n :param property_key: A :string: configuration property name.\n :param data: The adds the user supplied for this specific property.\n :param ancestors: A :OrderedDict: that provides a history of its ancestors.\n \"\"\"\n if not isinstance(property_key, six.string_types):\n raise TypeError(\"property_key must be a string. type: {0} was passed.\".format(type(property_key)))\n\n if not isinstance(ancestors, OrderedDict):\n raise TypeError(\"ancestors must be an OrderedDict. type: {0} was passed.\".format(type(ancestors)))\n\n previous_element = self\n normalized_key = normalize_value(property_key).replace('-', '_')\n normalized_ancestor_key = None\n\n # TODO: clean up and validation\n if ancestors:\n for ancestor_key, ancestors_value in six.iteritems(ancestors):\n normalized_ancestor_key = normalize_value(ancestor_key).replace('-', '_')\n\n if normalized_ancestor_key.lower() == 'root':\n continue\n\n if not hasattr(previous_element, normalized_ancestor_key):\n config_attr = ConfigDict({}, ancestors_value.start_mark, ancestors_value.end_mark)\n config_attr.name = normalized_ancestor_key\n config_attr.alias = ancestor_key\n\n setattr(\n previous_element,\n normalized_ancestor_key,\n config_attr\n )\n\n previous_element = getattr(previous_element, normalized_ancestor_key)\n\n if normalized_key == normalized_ancestor_key:\n pass\n else:\n if isinstance(data, ConfigNode):\n data.name = normalized_key\n data.alias = property_key\n\n setattr(previous_element, normalized_key, data)\n\n def _collect_unrecognized_values(self, scheme, data, ancestors):\n \"\"\" Looks for values that aren't defined in the scheme and returns a dict with any unrecognized values found.\n\n :param scheme: A :dict:, The scheme defining the validations.\n :param data: A :dict: user supplied for this specific property.\n :param ancestors: A :OrderedDict: that provides a history of its ancestors.\n :rtype: A :dict: of unrecognized configuration properties.\n \"\"\"\n if not isinstance(ancestors, OrderedDict):\n raise TypeError(\"ancestors must be an OrderedDict. type: {0} was passed.\".format(type(ancestors)))\n\n if not isinstance(scheme, dict):\n raise TypeError('scheme must be a dict. type: {0} was passed'.format(type(scheme)))\n\n unrecognized_values = {}\n if isinstance(data, dict):\n pruned_scheme = [key for key in scheme.keys() if key not in RESERVED_SCHEME_KEYS and key[0] not in RESERVED_SCHEME_KEYS]\n\n for key, value in six.iteritems(data):\n if key in pruned_scheme:\n continue\n\n unrecognized_values[key] = value\n\n validations = scheme.get('is')\n if validations and 'one_of' in validations:\n for nested_scheme in validations['one_of']:\n if isinstance(nested_scheme, dict):\n\n updated_scheme = self._update_scheme(nested_scheme, ancestors)\n pruned_scheme = [key for key in updated_scheme.keys() if key not in RESERVED_SCHEME_KEYS and key[0] not in RESERVED_SCHEME_KEYS]\n for key in pruned_scheme:\n if key in unrecognized_values:\n del unrecognized_values[key]\n else:\n # TODO: maybe return an error?\n pass\n\n return unrecognized_values\n\n def _update_scheme(self, scheme, ancestors):\n \"\"\" Updates the current scheme based off special pre-defined keys and retruns a new updated scheme.\n\n :param scheme: A :dict:, The scheme defining the validations.\n :param ancestors: A :OrderedDict: that provides a history of its ancestors.\n :rtype: A new :dict: with updated scheme values.\n \"\"\"\n if not isinstance(ancestors, OrderedDict):\n raise TypeError(\"ancestors must be an OrderedDict. type: {0} was passed.\".format(type(ancestors)))\n\n if not isinstance(scheme, dict):\n raise TypeError('scheme must be a dict. type: {0} was passed'.format(type(scheme)))\n\n # TODO: what if we have more than one scheme :P need to fix this.\n definitions = ROOT_SCHEME.get('_')\n if 'inherit' in scheme:\n scheme = self._scheme_propagation(scheme, definitions)\n\n updated_scheme = {}\n for scheme_key in six.iterkeys(scheme):\n if not isinstance(scheme_key, six.string_types):\n raise TypeError('scheme keys are required to be strings. type: {0} was passed.'.format(scheme_key))\n\n if '@' in scheme_key:\n ref = scheme_key[1:]\n\n scheme_reference = self._scheme_references.get(ref)\n if not scheme_reference:\n raise ConfigValidationException(ancestors, ref, scheme_reference, 'required', scheme)\n\n for reference_key in scheme_reference['keys']:\n scheme_reference['scheme'].update(scheme[scheme_key])\n updated_scheme[reference_key] = scheme_reference['scheme']\n\n elif '~' in scheme_key:\n ref = scheme_key[1:]\n\n scheme_reference = self._scheme_references.get(ref)\n if not scheme_reference:\n raise LookupError(\"was unable to find {0} in scheme reference.\".format(ref))\n\n for reference_key in scheme_reference['keys']:\n updated_scheme[reference_key] = scheme[scheme_key]\n\n scheme.update(updated_scheme)\n return scheme\n\n def _get_cascading_attr(self, attr_name, *args):\n \"\"\" Will traverse the configuration data looking for attr_name provided. It will know where to look for the\n attribute based on the *args that are passed to the method. It treats the args as ancestors starting with the\n oldest `root -> grandparent -> parent -> attr_name`. The findings will be updated with the last found object.\n The properties in the last found object will overwrite those in the previous. If the attribute isn't found\n will return None\n\n Usage::\n >>> attr = self._get_cascading_attr('attribute_name', 'root', 'grandparent', 'parent')\n >>> if attr is not None:\n >>> do_thing(attr)\n\n :param attr_name: A string, the configuration attribute name\n :param *args: A list, A list of strings that that represent the attributes ancestry. (how to find the obj)\n :rtype: Any defined configuration value :dict:, :string:, :int:, :list:, :float: `attr`\n \"\"\"\n attr = self._data.get(attr_name, None)\n if isinstance(attr, ConfigDict):\n attr = ConfigDict(attr, attr.start_mark, attr.end_mark)\n elif isinstance(attr, dict):\n attr = attr.copy()\n\n cascading = self._data\n if args:\n\n for key in args:\n if not isinstance(key, six.string_types):\n raise TypeError('Every key in args must be a string.')\n\n if key in cascading:\n config_object = cascading.get(key)\n\n if config_object:\n if attr_name in config_object:\n if isinstance(attr, dict):\n value = config_object[attr_name]\n\n if value is None:\n attr = value\n elif isinstance(value, dict):\n attr.update(config_object[attr_name])\n else:\n raise LookupError(\n \"Unable to find '{0}'. Obj structure: {1}\".format(\n attr_name, \" -> \".join(args)\n )\n )\n else:\n attr = config_object[attr_name]\n\n cascading = config_object\n continue\n else:\n break\n\n return attr\n\n def _walk_tree(self, data, scheme, ancestors=None, property_name=None, prefix=None):\n \"\"\" This function takes configuration data and a validation scheme\n then walk the configuration tree validating the configuraton data agenst\n the scheme provided. Will raise error on failure otherwise return None.\n\n Usage::\n >>> self._walk_tree(\n >>> OrderedDict([('root', config_data)]),\n >>> registries,\n >>> REGISTRIES_SCHEME\n >>> )\n\n\n :param ancestors: A :OrderedDict:, The first element of the dict must be 'root'.\n :param data: The data that needs to be validated agents the scheme.\n :param scheme: A :dict:, The scheme defining the validations.\n :param property_name: A :string:, This is the name of the data getting validated.\n :param prefix:\n :rtype: :None: will raise error if a validation fails.\n \"\"\"\n if property_name is None:\n property_name = 'root'\n # hack until i add this to references\n # reorder validates putting required first. If the data doesn't exist there is no need to continue.\n order = ['registries'] + [key for key in scheme.keys() if key not in ('registries',)]\n scheme = OrderedDict(sorted(scheme.items(), key=lambda x: order.index(x[0])))\n\n if data is None:\n return\n\n elif not isinstance(property_name, six.string_types):\n raise TypeError('property_name must be a string.')\n\n ancestors = self._update_ancestors(data, property_name, ancestors)\n if isinstance(ancestors, OrderedDict):\n if list(ancestors)[0] != 'root':\n raise LookupError('root must be the first item in ancestors.')\n else:\n raise TypeError('ancestors must be an OrderedDict. {0} was passed'.format(type(ancestors)))\n\n if not isinstance(scheme, dict):\n raise TypeError('scheme must be a dict. {0} was passed.'.format(type(scheme)))\n scheme = self._update_scheme(scheme, ancestors)\n\n if property_name is not None and data:\n data = self._get_cascading_attr(\n property_name, *list(ancestors)[1:]\n ) if scheme.get('cascading', False) else data\n\n for err in self.__execute_validations(scheme.get('is', {}), data, property_name, ancestors, prefix=prefix):\n if err:\n raise err\n else:\n self._create_attr(property_name, data, ancestors)\n\n self.__validate_unrecognized_values(scheme, data, ancestors, prefix)\n\n self.__populate_scheme_references(scheme, property_name)\n\n self.__validate_config_properties(scheme, data, ancestors, prefix)\n\n def _update_ancestors(self, config_data, property_name, ancestors=None):\n \"\"\" Update ancestors for a specific property.\n\n :param ancestors: A :OrderedDict:, representing the ancestors of a property.\n :param config_data: The data that needs to be validated agents the scheme.\n :param property_name: A :string: of the properties name.\n :rtype: A :OrderDict: that has been updated with new parents.\n \"\"\"\n if not isinstance(property_name, six.string_types):\n raise TypeError(\"property_key must be a string. type: {0} was passed.\".format(type(property_name)))\n\n if ancestors is None:\n ancestors = OrderedDict([('root', config_data)])\n\n elif not isinstance(ancestors, OrderedDict):\n raise TypeError(\"ancestors must be an OrderedDict. type: {0} was passed.\".format(type(ancestors)))\n\n elif 'root' not in ancestors:\n raise LookupError(\n 'root must be in ancestors. currently in the ancestors chain {0}'.format(', '.join(ancestors.keys()))\n )\n\n ancestors = ancestors.copy()\n\n for previous_key in list(ancestors)[::-1]:\n previous_item = ancestors[previous_key]\n\n if isinstance(config_data, dict):\n if property_name in previous_item:\n ancestors[property_name] = config_data\n break\n\n return ancestors\n\n def _reference_keys(self, reference):\n \"\"\" Returns a list of all of keys for a given reference.\n\n :param reference: a :string:\n :rtype: A :list: of reference keys.\n \"\"\"\n if not isinstance(reference, six.string_types):\n raise TypeError(\n 'When using ~ to reference dynamic attributes ref must be a str. a {0} was provided.'.format(type(reference).__name__)\n )\n\n if '~' in reference:\n reference = reference[1:]\n\n scheme = self._scheme_references.get(reference)\n if not scheme:\n # TODO: need to create nice error here as well and print pretty message.\n raise LookupError(\n \"Was unable to find {0} in the scheme references. \"\n \"available references {1}\".format(reference, ', '.join(self._scheme_references.keys()))\n )\n\n return scheme['keys']\n else:\n raise AttributeError('references must start with ~. Please update {0} and retry.'.format(reference))\n\n def __populate_scheme_references(self, scheme, property_name):\n reference_scheme = scheme.get('ref')\n if reference_scheme:\n if reference_scheme in self._scheme_references:\n if property_name not in self._scheme_references[reference_scheme]['keys']:\n self._scheme_references[reference_scheme]['keys'].append(property_name)\n else:\n self._scheme_references[reference_scheme] = {'keys': [property_name], 'scheme': scheme}\n\n def __validate_config_properties(self, scheme, data, ancestors, prefix=None):\n\n for scheme_key, child_scheme in six.iteritems(scheme):\n if scheme_key in RESERVED_SCHEME_KEYS:\n continue\n\n if isinstance(data, dict):\n if scheme_key in data or child_scheme.get('is', {}).get('required', False):\n self._walk_tree(data.get(scheme_key), child_scheme.copy(), ancestors, scheme_key, prefix=prefix)\n else:\n # TODO: update error\n raise AttributeError(\n '{0} is an unrecognized scheme key. replace with one of the following: {1}'.format(\n scheme_key, ', '.join(RESERVED_SCHEME_KEYS)\n )\n )\n\n def __validate_unrecognized_values(self, scheme, data, ancestors, prefix=None):\n unrecognized_values = self._collect_unrecognized_values(scheme, data, ancestors)\n default_scheme = scheme.get('*')\n\n if unrecognized_values and default_scheme:\n for key in list(unrecognized_values):\n\n value = unrecognized_values.pop(key)\n try:\n self._walk_tree(value, default_scheme.copy(), ancestors, key, prefix=prefix)\n except ConfigValidationException:\n error = ConfigValidationException(ancestors, key, value, 'unrecognized', scheme)\n\n if error.is_potential_fix_valid():\n raise error\n else:\n raise\n\n if unrecognized_values:\n for key, value in six.iteritems(unrecognized_values):\n logger.info(\n self.__build_validation_message(ancestors, key, 'unrecognized', key),\n extra={'formatter': 'config-failure', 'prefix': prefix}\n )\n\n raise ConfigValidationException(ancestors, key, unrecognized_values, 'unrecognized', scheme)\n\n def __execute_validations(self, validations, data, property_name, ancestors, negation=False, prefix=None):\n \"\"\" Validate the data for a specific configuration value. This method will look up all of the validations provided\n and dynamically call any validation methods. If a validation fails a error will be thrown. If no errors are found\n a attributes will be dynamically created on the Config object for the configuration value.\n\n :param validations: A :dict: with any required validations and expected values.\n :param data: the data to validate.\n :param property_name: A :string:, the properties name.\n :param ancestors: A :OrderedDict:, representing the ancestors of a property.\n \"\"\"\n if not isinstance(ancestors, OrderedDict):\n raise TypeError(\"ancestors must be an OrderedDict. type: {0} was passed.\".format(type(ancestors)))\n\n if not isinstance(validations, dict):\n raise TypeError('validations is required to be a dict. type: {1} was passed.'.format(type(validations)))\n\n if not isinstance(property_name, six.string_types):\n raise TypeError(\"property_key must be a string. type: {0} was passed.\".format(type(property_name)))\n\n # reorder validates putting required first. If the data doesn't exist there is no need to continue.\n order = ['type', 'required'] + [key for key in validations.keys() if key not in ('required', 'type')]\n ordered_validations = OrderedDict(sorted(validations.items(), key=lambda x: order.index(x[0])))\n\n for validation, value in six.iteritems(ordered_validations):\n if validation in VALIDATORS:\n\n if validation == 'not':\n # TODO: need to test to make sure this works\n for err in self.__execute_validations(value, data, property_name, ancestors, negation, prefix):\n yield err\n\n continue\n\n for err in getattr(self, '_{0}'.format(validation))(value, data, property_name, ancestors, negation, prefix):\n yield err\n\n else:\n raise LookupError(\"{0} isn't a validator or reserved scheme key.\".format(validation))\n\n def _type(self, expected, value, key, ancestors, negation, prefix):\n error = None\n formatter = 'config-success'\n if isinstance(expected, tuple):\n data_type = []\n for dt in expected:\n if isinstance(dt, tuple):\n data_type.extend([element for element in dt])\n else:\n data_type.append(dt)\n\n data_type = ' or '.join([dt.__name__ for dt in data_type])\n else:\n data_type = expected.__name__\n\n # TODO: update not logic.\n if negation:\n if isinstance(value, expected):\n error = ConfigValidationException(ancestors, key, value, 'type', data_type)\n formatter = 'config-failure'\n\n elif not isinstance(value, expected):\n error = ConfigValidationException(ancestors, key, value, 'type', data_type)\n formatter = 'config-failure'\n\n logger.info(\n self.__build_validation_message(ancestors, key, 'type', data_type),\n extra={'formatter': formatter, 'prefix': prefix}\n )\n\n yield error\n\n def _required(self, expected, value, key, ancestors, negation, prefix):\n error = None\n if not isinstance(expected, (list, tuple)):\n raise TypeError('included is required to be a list or tuple. {0} was passed'.format(type(expected).__str__))\n\n matches = []\n\n for expected_key in expected:\n if not isinstance(expected_key, six.string_types):\n raise TypeError('each value in the included list must be a string.'.format(type(expected_key).__str__))\n\n if expected_key in value and value.get(expected_key) is not None:\n logger.info(\n self.__build_validation_message(ancestors, key, 'required', expected_key),\n extra={'formatter': 'config-success', 'prefix': prefix}\n )\n else:\n logger.info(\n self.__build_validation_message(ancestors, key, 'required', expected_key),\n extra={'formatter': 'config-failure', 'prefix': prefix}\n )\n matches.append(expected_key)\n\n if matches:\n error = ConfigValidationException(ancestors, key, value, 'required', matches)\n\n yield error\n\n def _items(self, expected, values, key, ancestors, negation, prefix):\n error = None\n items_prefix = ' \\u21B3'\n item_identifier = ' \\u2605'\n\n if prefix:\n items_prefix = items_prefix.rjust(4)\n item_identifier = item_identifier.rjust(4)\n\n if isinstance(values, (list, tuple)):\n # validate each value.\n for i, value in enumerate(values):\n\n logger.info(\n self.__build_validation_message(ancestors, key, 'item[{0}]'.format(i), value),\n extra={'formatter': 'config-message', 'prefix': item_identifier}\n )\n\n try:\n self._walk_tree(value, expected, ancestors, key, prefix=items_prefix)\n except ConfigValidationException as e:\n error = e\n else:\n raise TypeError('Can\\'t validate items if a list or tuple isn\\'t passed.')\n\n yield error\n\n def _max(self, expected, value, key, ancestors, negation, prefix):\n \"\"\"\n\n :param expected:\n :param value:\n :param key:\n :param ancestors:\n :param negation:\n :return:\n \"\"\"\n error = None\n formatter = 'config-success'\n length = len(value) if value is not None else expected\n\n if length > expected:\n formatter = 'config-failure'\n error = ConfigValidationException(ancestors, key, value, 'max', expected)\n\n logger.info(\n self.__build_validation_message(ancestors, key, 'max', expected),\n extra={'formatter': formatter, 'prefix': prefix}\n )\n\n yield error\n\n def _one_of(self, expected, values, key, ancestors, negation, prefix):\n error = None\n valid = 0\n one_of_prefix = ' \\u21B3'\n\n if prefix:\n one_of_prefix = one_of_prefix.rjust(4)\n\n if '~' in expected:\n expected = self._reference_keys(expected)\n\n logger.info(\" \\u2605 {0}\".format(self.__build_validation_message(ancestors, key, 'one_of', '\\u2605')))\n for i, expected_value in enumerate(expected):\n logger.info(\" \\u2605 {0}\".format(\n self.__build_validation_message(ancestors, key, 'one_of', 'item[{0}]'.format(i)))\n )\n\n if isinstance(expected_value, dict):\n try:\n self._walk_tree(values, expected_value, ancestors.copy(), key, prefix=one_of_prefix)\n\n valid += 1\n error = None\n\n except ConfigValidationException as e:\n if not valid and error is None:\n error = ConfigOneOfException(ancestors, key, values, 'one_of', None)\n error.additional_messages.append(e.message)\n\n pass\n except Exception:\n pass\n else:\n if self.__one_of_validation(values, expected_value):\n valid += 1\n error = None\n formatter = 'config-success'\n else:\n formatter = 'config-failure'\n if not valid:\n error = ConfigValidationException(ancestors, key, values, 'one_of', expected)\n\n logger.info(\n self.__build_validation_message(ancestors, key, 'one_of', expected_value),\n extra={'formatter': formatter, 'prefix': one_of_prefix}\n )\n\n yield error\n\n def __one_of_validation(self, values, current_expected_value):\n if isinstance(values, (dict, list, tuple)):\n return values and current_expected_value in values\n else:\n return values and current_expected_value == values\n\n def _scheme_propagation(self, scheme, definitions):\n \"\"\" Will updated a scheme based on inheritance. This is defined in a scheme objects with ``'inherit': '$definition'``.\n Will also updated parent objects for nested inheritance.\n\n Usage::\n >>> SCHEME = {\n >>> 'thing1': {\n >>> 'inherit': '$thing2'\n >>> },\n >>> '_': {\n >>> 'thing2': {\n >>> 'this_is': 'thing2 is a definition'\n >>> }\n >>> }\n >>> }\n >>> scheme = SCHEME.get('thing1')\n >>> if 'inherit' in scheme:\n >>> scheme = self._scheme_propagation(scheme, SCHEME.get('_'))\n >>>\n >>> scheme.get('some_data')\n\n :param scheme: A dict, should be a scheme defining validation.\n :param definitions: A dict, should be defined in the scheme using '_'.\n :rtype: A :dict: will return a updated copy of the scheme.\n \"\"\"\n if not isinstance(scheme, dict):\n raise TypeError('scheme must be a dict to propagate.')\n\n inherit_from = scheme.get('inherit')\n\n if isinstance(inherit_from, six.string_types):\n if not inherit_from.startswith('$'):\n raise AttributeError('When inheriting from an object it must start with a $.')\n\n if inherit_from.count('$') > 1:\n raise AttributeError('When inheriting an object it can only have one $.')\n\n if not isinstance(definitions, dict):\n raise AttributeError(\"Must define definitions in the root of the SCHEME. \"\n \"It is done so with '_': { objs }.\")\n name = inherit_from[1:]\n definition = definitions.copy().get(name)\n\n if not definition:\n raise LookupError(\n 'Was unable to find {0} in definitions. The follow are available: {1}.'.format(name, definitions)\n )\n\n else:\n raise AttributeError('inherit must be defined in your scheme and be a string value. format: $variable.')\n\n updated_scheme = {key: value for key, value in six.iteritems(scheme) if key not in definition}\n nested_scheme = None\n for key, value in six.iteritems(definition):\n if key in scheme:\n updated_scheme[key] = scheme[key]\n else:\n updated_scheme[key] = value\n\n if key == 'inherit':\n nested_scheme = self._scheme_propagation(definition, definitions)\n\n # remove inherit key\n if 'inherit' in updated_scheme:\n del updated_scheme['inherit']\n\n if nested_scheme is not None:\n updated_scheme.update(nested_scheme)\n\n return updated_scheme\n\n def __build_validation_message(self, ancestors, property_name, validation, expected_value):\n msg = \" \\u27A4 \".join(ancestors.keys())\n\n if isinstance(expected_value, dict):\n expected_value = expected_value.keys()\n\n if property_name == list(ancestors)[-1]:\n msg = \"{0} \\u27A4 {1}: {2}\".format(msg, validation, expected_value)\n else:\n msg = '{0} \\u27A4 {1} \\u27A4 {2}: {3}'.format(msg, property_name, validation, expected_value)\n\n return msg\n\n\n##\n# Config Data Type Classes\n##\nclass ConfigNode(object):\n def __init__(self, start_mark, end_mark):\n self._alias = None\n self._name = None\n self._start_mark = start_mark\n self._end_mark = end_mark\n\n @property\n def alias(self):\n return self._alias\n\n @alias.setter\n def alias(self, value):\n if value is not None and not isinstance(value, six.string_types):\n raise TypeError('name must be a string.')\n\n self._alias = value\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, value):\n if value is not None and not isinstance(value, six.string_types):\n raise TypeError('name must be a string.')\n self._name = value\n\n @property\n def start_mark(self):\n return self._start_mark\n\n @property\n def end_mark(self):\n return self._end_mark\n\n\nclass ConfigDict(dict, ConfigNode):\n def __init__(self, node_data, start_mark=None, end_mark=None):\n super(ConfigDict, self).__init__(node_data)\n ConfigNode.__init__(self, start_mark, end_mark)\n\n def __contains__(self, item):\n try:\n return super(ConfigDict, self).__contains__(item) or hasattr(self, item)\n except:\n return False\n\n def __delattr__(self, item):\n try:\n object.__getattribute__(self, item)\n except AttributeError:\n try:\n del self[item]\n except KeyError:\n raise AttributeError(item)\n else:\n object.__delattr__(self, item)\n\n def __getattr__(self, item):\n \"\"\"\n \"\"\"\n try:\n return object.__getattribute__(self, item)\n except:\n try:\n return self[item]\n except:\n raise AttributeError(item)\n\n def __setattr__(self, key, value):\n try:\n object.__getattribute__(self, key)\n except AttributeError:\n try:\n # allow for specific properties to be set on the base class and not be part of the dict.\n if key.startswith('_') and key[1:] in dir(self):\n object.__setattr__(self, key, value)\n else:\n self[key] = value\n except:\n raise AttributeError(key)\n else:\n object.__setattr__(self, key, value)\n\n\nclass ConfigSeq(list, ConfigNode):\n def __init__(self, node_data, start_mark=None, end_mark=None):\n list.__init__(self, node_data)\n ConfigNode.__init__(self, start_mark, end_mark)\n\n\nclass ConfigUnicode(str, ConfigNode):\n def __init__(self, node_data, start_mark=None, end_mark=None):\n ConfigNode.__init__(self, start_mark, end_mark)\n\n def __new__(cls, node_data, start_mark=None, end_mark=None):\n obj = super(ConfigUnicode, cls).__new__(cls, node_data)\n\n return obj\n\n\nclass ConfigInt(int, ConfigNode):\n def __init__(self, node_data, start_mark=None, end_mark=None):\n ConfigNode.__init__(self, start_mark, end_mark)\n\n def __new__(cls, node_data, start_mark=None, end_mark=None):\n return super(ConfigInt, cls).__new__(cls, node_data)\n\n\nclass ConfigFloat(float, ConfigNode):\n def __init__(self, node_data, start_mark=None, end_mark=None):\n ConfigNode.__init__(self, start_mark, end_mark)\n\n def __new__(cls, node_data, start_mark=None, end_mark=None):\n try:\n return float.__new__(cls, node_data)\n except TypeError:\n raise TypeError(node_data)\n except ValueError:\n raise ValueError(node_data)\n\n\n##\n# exception classes\n##\nclass ConfigValidationException(Exception):\n def __init__(self, ancestors, property_name, data, validation_type, validation_value):\n self.ancestors = ancestors\n self.data = data\n self.property_name = property_name\n self.validation_type = validation_type\n self.validation_value = validation_value\n self.parent_key = list(self.ancestors)[-1]\n self.parent_value = ancestors.get(self.parent_key)\n self._property_location = None\n self._potential_fixes = []\n\n if isinstance(data, ConfigNode):\n self._property_location = 'line: {0} column: {1}.'.format(data.start_mark.line, data.start_mark.column)\n\n super(ConfigValidationException, self).__init__(self.message)\n\n @property\n def message(self):\n msg = '{0} failed validation: {1}.'.format(self.property_name, self.validation_type)\n\n if self._property_location:\n msg = '{0} {1}'.format(msg, self._property_location)\n\n fixes = self.potential_fixes()\n if fixes:\n msg = '{0} {1}'.format(msg, fixes)\n\n return msg\n\n def log_error(self):\n logger.error(self.message)\n\n def potential_fixes(self):\n # TODO: need to make this solid and provide better searching.\n potential_parent_fixes = None\n msg = ''\n\n if self.validation_type == 'unrecognized':\n potential_parent_fixes = self.__search_ancestors()\n\n if isinstance(self.validation_value, dict):\n self._potential_fixes = get_close_matches(self.property_name, self.validation_value.keys(), cutoff=0.5)\n\n if not self._potential_fixes:\n if potential_parent_fixes:\n self._potential_fixes = potential_parent_fixes\n else:\n self._potential_fixes = self.validation_value.keys()\n\n elif isinstance(self.validation_value, (list, tuple)):\n self._potential_fixes = get_close_matches(self.property_name, self.validation_value, cutoff=0.5)\n\n if not self._potential_fixes:\n if potential_parent_fixes:\n self._potential_fixes = potential_parent_fixes\n else:\n self._potential_fixes = self.validation_value\n else:\n self._potential_fixes.append(six.text_type(self.validation_value))\n\n if not msg:\n msg = 'Potential fixes in {0} add/delete/update {1}.'.format(self.property_name, ', '.join(self._potential_fixes))\n\n return msg if self._potential_fixes else None\n\n def is_potential_fix_valid(self):\n return bool(get_close_matches(self.property_name, self._potential_fixes, cutoff=0.6))\n\n @property\n def property_location(self):\n return self._property_location\n\n def __str__(self):\n return super(ConfigValidationException, self).__str__()\n\n def __search_ancestors(self):\n # TODO: fix messaging here.\n potential_fixes = []\n\n for ancestor_key, ancestor_item in six.iteritems(self.ancestors):\n if ancestor_key == self.parent_key:\n break\n\n for match in get_close_matches(self.parent_key, ancestor_item, cutoff=0.4):\n if match == self.parent_key:\n continue\n\n potential_fixes.append(match)\n\n return potential_fixes\n\n\nclass ConfigOneOfException(ConfigValidationException):\n def __init__(self, ancestors, property_name, data, validation_type, validation_value):\n self.additional_messages = []\n self.property_name = property_name\n super(ConfigOneOfException, self).__init__(ancestors, property_name, data, validation_type, validation_value)\n\n @property\n def message(self):\n msg = '{0} failed validation: {1}.'.format(self.property_name, self.validation_type)\n\n if self._property_location:\n msg = '{0} {1}'.format(msg, self._property_location)\n\n if self.additional_messages:\n msg = ' {0}\\nPotential fixes:'.format(msg)\n\n for message in self.additional_messages:\n msg = '{0}\\n - {1}'.format(msg, message)\n\n return msg\n","repo_name":"TUNE-Archive/freight_forwarder","sub_path":"freight_forwarder/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":70723,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"60"} +{"seq_id":"2368417518","text":"import requests\r\n\r\n# Base Url for geocoding\r\nurl = \"https://us1.locationiq.com/v1/search.php\"\r\n\r\naddress = input(\"Input the address: \")\r\n\r\n\r\nprivate_token = \"pk.7519b33c3300b85c7f98b663c9c46596\"\r\n\r\ndata = {\r\n 'key': private_token,\r\n 'q': address,\r\n 'format': 'json'\r\n}\r\n\r\nresponse = requests.get(url, params=data)\r\n\r\nlatitude = response.json()[0]['lat']\r\nlongitude = response.json()[0]['lon']\r\n\r\nprint(f\"The latitude of the given address is: {latitude}\")\r\nprint(f\"The longitude of the given address is: {longitude}\")\r\n","repo_name":"PatheticScum/geolocation-projects","sub_path":"gecoding.py","file_name":"gecoding.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"11358938042","text":"import time\n\ndef fib(x):\n\tif x ==0:\n\t\treturn 0\n\t\n\telif x == 1:\n\t\treturn 1\n\telse:\n\t\treturn fib(x-1) + fib(x-2)\n\nstartTime = time.time()\n\nprint(\"%-14s:%d\" % (\"Result:\" , fib(32)))\nprint(\"%-14s:%.4f seconds\" % (\"Elapsed time: \", time.time() - startTime))\n\n","repo_name":"manankshastri/self-d","sub_path":"extra/dynamic1.py","file_name":"dynamic1.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"35971259829","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\nN = int(input().rstrip())\r\n\r\nresult = 0\r\n\r\nfor i in range(1, N+1) :\r\n list_ = list(map(int, str(i)))\r\n\r\n result = i + sum(list_)\r\n\r\n if result == N :\r\n print(i)\r\n break\r\n\r\n if i == N:\r\n print(0)","repo_name":"sinji2102/boj-python","sub_path":"백준/Bronze/2231. 분해합/분해합.py","file_name":"분해합.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"71317602750","text":"from enum import unique\nimport sched\nimport requests\nfrom datetime import datetime, timedelta\nfrom bs4 import BeautifulSoup\nimport calendar\nimport pandas as pd\nimport time\n\n\ndef parse_month(url):\n time.sleep(5) # Adding a sleep, otherwise, we get 429 error\n # Create a BeautifulSoup object from the response content\n response = requests.get(url)\n if response.status_code == 429:\n retry_after = response.headers.get('Retry-After')\n print(\"You received a 429 status code. Retry after:\", retry_after)\n else:\n print(\"Request was successful.\")\n\n soup = BeautifulSoup(response.content, 'html.parser')\n\n # Find the table containing the schedule\n table = soup.find('table', id='schedule')\n\n # Extract the schedule data from the table\n for row in table.find_all('tr'):\n try:\n cells = row.find_all('td')\n if len(cells) >= 5:\n ad = row.find_all('a')\n date = f'{ad[0].text} {cells[0].text}'\n visitor_team = cells[1].text\n visitor_score = cells[2].text\n home_team = cells[3].text\n home_score = cells[4].text\n datetimeobj = datetime.strptime(\n f'{date}m', \"%a, %b %d, %Y %I:%M%p\")\n\n end_of_regular_season = datetime(2023, 4, 10)\n # print(date, visitor_team, visitor_score, home_team, home_score)\n if datetimeobj >= end_of_regular_season:\n continue\n\n yield (date, datetimeobj, visitor_team, int(visitor_score), home_team, int(home_score))\n except ValueError as e:\n # Exception handling\n print(\"Caught an exception:\", e)\n\n\ndef get_url(year, month):\n return f'https://www.basketball-reference.com/leagues/NBA_{year}_games-{month}.html'\n\n\nunique_teams = {'Orlando Magic', 'Boston Celtics', 'Philadelphia 76ers', 'Atlanta Hawks', 'Washington Wizards', 'New York Knicks', 'Minnesota Timberwolves', 'San Antonio Spurs', 'Brooklyn Nets', 'Houston Rockets', 'Golden State Warriors', 'Miami Heat', 'Indiana Pacers', 'New Orleans Pelicans', 'Oklahoma City Thunder',\n 'Denver Nuggets', 'Los Angeles Clippers', 'Los Angeles Lakers', 'Dallas Mavericks', 'Chicago Bulls', 'Toronto Raptors', 'Memphis Grizzlies', 'Detroit Pistons', 'Portland Trail Blazers', 'Sacramento Kings', 'Cleveland Cavaliers', 'Phoenix Suns', 'Milwaukee Bucks', 'Utah Jazz', 'Charlotte Hornets'}\n\n\ndef get_team_schedule_analysis(year, team):\n # Get a list of month names\n month_strings = list(calendar.month_name)\n\n # Remove the empty first element\n month_strings = [month.lower() for month in month_strings[1:]]\n season_months = month_strings[9:] + month_strings[:4]\n\n record = (0, 0)\n b2b_games = 0\n road_b2b_games = 0\n\n for month in season_months:\n url = get_url(year, month)\n prev_data = None\n for date, datetimeobj, visitor, vscore, home, hscore in parse_month(url):\n\n w, l = record\n if visitor == team:\n if vscore > hscore:\n record = (w + 1, l)\n else:\n record = (w, l + 1)\n\n if prev_data != None and (datetimeobj - prev_data[1]) < timedelta(hours=40):\n b2b_games += 1\n\n if prev_data[2] == team:\n road_b2b_games += 1\n prev_data = date, datetimeobj, visitor, vscore, home, hscore\n elif home == team:\n if hscore > vscore:\n record = (w + 1, l)\n\n else:\n record = (w, l + 1)\n\n if prev_data != None and (datetimeobj - prev_data[1]) < timedelta(hours=40):\n b2b_games += 1\n\n prev_data = (date, datetimeobj, visitor, vscore, home, hscore)\n\n return record, b2b_games, road_b2b_games\n\n\ndef add_data_frame(data):\n data = (('John', 25, 'USA'), ('Emily', 30, 'Canada'), ('Michael', 35, 'UK'))\n\n df = pd.DataFrame(data, columns=[\n 'Team', 'Record', 'Back-To-Back Games', 'Road Back-To-Back Games'])\n\n return df\n\n\nif __name__ == \"__main__\":\n year = 2023 # Applies to 2022-2023 season\n team = 'Golden State Warriors'\n\n record, b2b_games, road_b2b_games = get_team_schedule_analysis(\n year, team)\n\n print(f'{team} Record {record}, Back-To-Back Games {b2b_games}, Road Back-To-Back Games {road_b2b_games}')\n","repo_name":"deanagan/NBA-Analysis","sub_path":"src/bkref-scraper.py","file_name":"bkref-scraper.py","file_ext":"py","file_size_in_byte":4478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"19712232865","text":"def new_game():\r\n \r\n guesses = []\r\n correct_guesses = 0\r\n question_num = 0\r\n\r\n for key in questions:\r\n print(\"---------------------\")\r\n print(key)\r\n\r\n for i in options[question_num]:\r\n print(i)\r\n guess = input(\"Enter (A , B, C, or D: )\")\r\n guess = guess.upper()\r\n guesses.append(guess)\r\n\r\n correct_guesses += chech_answer(questions.get(key),guess)\r\n question_num += 1\r\n display_score(correct_guesses, guesses)\r\n\r\n\r\ndef chech_answer(answer, guess):\r\n \r\n if answer == guess:\r\n print(\"CORRECT!\")\r\n return 1\r\n else:\r\n print(\"WRONG!\")\r\n return 0 \r\n\r\ndef display_score(correct_guesses, guesses):\r\n print(\"----------------------\")\r\n print(\"RESULT\")\r\n print(\"----------------------\")\r\n\r\n print(\"Answers :\", end=\"\")\r\n for i in questions:\r\n print(questions.get(i), end=\" \")\r\n print()\r\n\r\n print(\"Guesses :\", end=\"\")\r\n for i in guesses:\r\n print(i, end=\" \")\r\n print()\r\n\r\n score = int((correct_guesses/len(questions))*100)\r\n print(\"Your score is: \" + str(score) + \"%\")\r\ndef play_again():\r\n response = input(\" Do you want play again?(yes/no): \")\r\n response = response.upper()\r\n\r\n if response == \"YES\":\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n\r\nquestions = {\r\n \"1 - 2 = ?\": \"A\",\r\n \"are you dumb? \": \"B\",\r\n \"are you in college? \": \"C\",\r\n \"what's your age? \": \"A\"\r\n} \r\n\r\noptions = [[\"A. -1\", \"B. 1\",\"C. 10\",\"D. 2\"],\r\n [\"A. yes\", \"B. ofcourse\", \"C. no, i am chutiya\", \"D. WTF!\"],\r\n [\"A. No\", \"B. Yes\", \"C. i at home\", \"D. Yes, it is bad\"],\r\n [\"A. 1\", \"B. 18\", \"C. 16\", \"D. bachanpan se ka 18 hun\"]]\r\n\r\n\r\nnew_game()\r\n\r\nwhile play_again():\r\n new_game()\r\n\r\nprint(\"byeee\")","repo_name":"Nikhil690/Having-fun","sub_path":"game2.py","file_name":"game2.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"43352461274","text":"import os\nfrom typing import Any\n\nimport torch\nimport torch.multiprocessing\nfrom cyy_naive_lib.data_structure.task_queue import BatchPolicy, TaskQueue\nfrom cyy_torch_toolbox.device import get_device_memory_info, get_devices\n\n\nclass CUDABatchPolicy(BatchPolicy):\n def adjust_batch_size(self, batch_size: int, **kwargs: Any) -> int:\n device = kwargs[\"device\"]\n if (\n batch_size + 1 not in self._processing_times\n or self._processing_times[batch_size + 1]\n < self._processing_times[batch_size]\n ):\n memory_info = get_device_memory_info(device=device, consider_cache=True)\n if memory_info[device].free / memory_info[device].total > 0.2:\n return batch_size + 1\n return batch_size\n\n\nclass TorchTaskQueue(TaskQueue):\n def __init__(self, worker_num: int | None = None, **kwargs: Any) -> None:\n self._devices: list = get_devices()\n if worker_num is None:\n worker_num = len(self._devices)\n if \"cpu\" in self._devices[0].type.lower():\n worker_num = os.cpu_count()\n super().__init__(worker_num=worker_num, **kwargs)\n\n def _get_task_kwargs(self, worker_id: int) -> dict:\n kwargs = super()._get_task_kwargs(worker_id) | {\n \"device\": self._devices[worker_id % len(self._devices)]\n }\n if self._batch_process and torch.cuda.is_available():\n kwargs[\"batch_policy\"] = CUDABatchPolicy()\n return kwargs\n","repo_name":"cyyever/torch_toolbox","sub_path":"cyy_torch_toolbox/data_structure/torch_task_queue.py","file_name":"torch_task_queue.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"60"} +{"seq_id":"14746199499","text":"from threading import Thread\nfrom threading import Lock\nfrom multiprocessing.dummy import Pool as ThreadPool\n\nclass Counter:\n def __init__(self):\n self.count = 0\n\n @property\n def increment(self):\n self.count += 1 \n \n\nclass Worker:\n def __call__(self,counter,how_many):\n for _ in range(how_many):\n counter.increment\n\ndef run_threads():\n how_many = 10**5\n counter = Counter()\n threads = []\n worker = Worker()\n for i in range(5):\n thread = Thread(target = worker, args = (counter,how_many))\n threads.append(thread)\n thread.start()\n for thread in threads:\n thread.join()\n print(\"true output is: 5000000, but the real output is: {}\".format(counter.count)) \n\n\n\n#-------------lock--------------------\n\n\nclass LockCounter:\n def __init__(self):\n self.lock = Lock() \n self.count = 0\n\n @property\n def increment(self):\n with self.lock:\n self.count += 1\n\n\ndef run_threads_withlock():\n how_many = 10**5\n counter = LockCounter()\n threads = []\n pool = ThreadPool(5)\n worker = Worker()\n for i in range(10):\n pool.apply_async(func = worker,args = (counter,how_many))\n \n pool.close()\n pool.join()\n print(\"true output is: 1000000, but the real output is: {}\".format(counter.count)) \n\n\n\n\nif __name__ == '__main__':\n run_threads() \n run_threads_withlock() \n\n\n","repo_name":"zhangruochi/PythonLittleSkills","sub_path":"lock.py","file_name":"lock.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"338747798","text":"def maxDepth(s: str) -> int:\n stack = []\n count = 0\n m = 0\n for i in s:\n if m < count:\n m = count\n if i == \"(\":\n count += 1\n stack.append(i)\n if i == \")\":\n count -= 1\n stack.pop(-1)\n return m\n\n\nif __name__ == '__main__':\n s = \"(1+(2*3)+((8)/4))+1\"\n res = maxDepth(s)\n print(res)\n","repo_name":"cj495840252/leetcode","sub_path":"牛客/括号序列.py","file_name":"括号序列.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"32596980476","text":"# -*- coding: utf-8 -*-\nimport pymongo\nimport csv\n\n\nclass ShopAnalysis(object):\n\n def __init__(self):\n self.client = pymongo.MongoClient(host=\"localhost\")\n self.db = self.client[\"food_gg\"]\n self.tb = self.db[\"shops\"]\n\n def shops(self, db, table, out):\n self.db = self.client[db]\n self.tb = self.db[table]\n\n with open(\"./shop/{}\".format(out), \"w\", newline='') as f:\n title = [\"店铺ID\",\"名称\", \"地址\", \"坐标\", \"月售\"]\n\n writer = csv.writer(f)\n\n writer.writerow(title)\n\n result = self.tb.find().sort(\"sales\", -1)\n\n for record in result:\n result_row = [record.get(\"id\"),\n record.get(\"name\"),\n record.get(\"address\"),\n record.get(\"position\"),\n record.get(\"sales\"), ]\n\n try:\n writer.writerow(result_row)\n except Exception as ex:\n print(ex)\n\n print(self.tb.count())","repo_name":"sondeer/spider_analysis","sub_path":"ele_analysis/shops.py","file_name":"shops.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"1729797974","text":"# -*- coding: utf-8 -*-\n\"\"\"\nTasks for br_ibge_pnadc\n\"\"\"\nimport os\n\n# pylint: disable=invalid-name,unnecessary-dunder-call\nimport zipfile\nfrom glob import glob\n\nimport numpy as np\nimport pandas as pd\nimport requests\nfrom prefect import task\nfrom tqdm import tqdm\n\nfrom pipelines.datasets.br_ibge_pnadc.constants import constants as pnad_constants\nfrom pipelines.utils.utils import log\n\n\n@task\ndef get_year_quarter(year, quarter):\n return f\"{year}-{quarter}\"\n\n\n@task\ndef get_url_from_template(year: int, quarter: int) -> str:\n \"\"\"Return the url for the PNAD microdata file for a given year and month.\n Args:\n year (int): Year of the microdata file.\n quarter (int): Quarter of the microdata file.\n Returns:\n str: url\n \"\"\"\n download_page = f\"https://ftp.ibge.gov.br/Trabalho_e_Rendimento/Pesquisa_Nacional_por_Amostra_de_Domicilios_continua/Trimestral/Microdados/{year}/\"\n response = requests.get(download_page, timeout=5)\n\n if response.status_code >= 400 and response.status_code <= 599:\n raise Exception(f\"Erro de requisição: status code {response.status_code}\")\n\n else:\n hrefs = [k for k in response.text.split('href=\"')[1:] if \"zip\" in k]\n hrefs = [k.split('\"')[0] for k in hrefs]\n filename = None\n for href in hrefs:\n if f\"0{quarter}{year}\" in href:\n filename = href\n if not filename:\n raise Exception(\"Erro: o atributo href não existe.\")\n\n url = pnad_constants.URL_PREFIX.value + \"/{year}/{filename}\"\n return url.format(year=year, filename=filename)\n\n\n@task\ndef download_txt(url, chunk_size=128, mkdir=False) -> str:\n \"\"\"\n Gets all csv files from a url and saves them to a directory.\n \"\"\"\n if mkdir:\n os.system(\"mkdir -p /tmp/data/input/\")\n\n request_headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n }\n r = requests.get(url, headers=request_headers, stream=True, timeout=10)\n save_path = \"/tmp/data/\"\n save_path = save_path + url.split(\"/\")[-1]\n with open(save_path, \"wb\") as fd:\n for chunk in tqdm(r.iter_content(chunk_size=chunk_size)):\n fd.write(chunk)\n\n with zipfile.ZipFile(save_path) as z:\n z.extractall(\"/tmp/data/input\")\n os.system('cd /tmp/data/input; find . -type f ! -iname \"*.txt\" -delete')\n filepath = glob(\"/tmp/data/input/*.txt\")[0]\n\n log(f\"Using file {filepath}\")\n\n return filepath\n\n\n@task\ndef build_parquet_files(filepath: str) -> str:\n \"\"\"\n Build parquets from txt original file.\n \"\"\"\n\n os.system(\"mkdir -p /tmp/data/staging/\")\n # read file\n chunks = pd.read_fwf(\n filepath,\n widths=pnad_constants.COLUMNS_WIDTHS.value,\n names=pnad_constants.COLUMNS_NAMES.value,\n header=None,\n encoding=\"utf-8\",\n dtype=str,\n chunksize=10000,\n )\n\n for i, chunk in enumerate(chunks):\n # partition by year, quarter and region\n chunk.rename(\n columns={\n \"UF\": \"id_uf\",\n \"Estrato\": \"id_estrato\",\n \"UPA\": \"id_upa\",\n \"Capital\": \"capital\",\n \"RM_RIDE\": \"rm_ride\",\n \"Trimestre\": \"trimestre\",\n \"Ano\": \"ano\",\n },\n inplace=True,\n )\n chunk[\"sigla_uf\"] = chunk[\"id_uf\"].map(pnad_constants.map_codigo_sigla_uf.value)\n chunk[\"id_domicilio\"] = chunk[\"id_estrato\"] + chunk[\"V1008\"] + chunk[\"V1014\"]\n\n chunk[\"habitual\"] = [np.nan] * len(chunk)\n chunk[\"efetivo\"] = [np.nan] * len(chunk)\n ordered_columns = pnad_constants.COLUMNS_ORDER.value\n chunk = chunk[ordered_columns]\n\n # save to parquet\n chunk.to_parquet(\n f\"/tmp/data/staging/microdados_{i}.parquet\",\n index=False,\n )\n\n # print number of parquet files\n total_files = len(glob(\"/tmp/data/staging/*.parquet\"))\n log(f\"Total of {total_files} parquet files created.\")\n\n return \"/tmp/data/staging/\"\n\n\n@task\ndef save_partitions(filepath: str) -> str:\n \"\"\"\n Save partitions to disk.\n\n Args:\n filepath (str): Path to the file used to build the partitions.\n\n Returns:\n str: Path to the saved file.\n\n \"\"\"\n os.system(\"mkdir -p /tmp/data/output/\")\n\n # get all parquet files\n parquet_files = glob(f\"{filepath}*.parquet\")\n # read all parquet files\n df = pd.concat([pd.read_parquet(f) for f in parquet_files])\n\n trimestre = df[\"trimestre\"].unique()[0]\n ano = df[\"ano\"].unique()[0]\n df.drop(columns=[\"trimestre\", \"ano\"], inplace=True)\n ufs = df[\"sigla_uf\"].unique()\n\n for uf in ufs:\n df_uf = df[df[\"sigla_uf\"] == uf]\n df_uf.drop(columns=[\"sigla_uf\"], inplace=True)\n os.system(\n \"mkdir -p /tmp/data/output/ano={ano}/trimestre={trimestre}/sigla_uf={uf}\".format(\n ano=ano, trimestre=trimestre, uf=uf\n )\n )\n df_uf.to_csv(\n f\"/tmp/data/output/ano={ano}/trimestre={trimestre}/sigla_uf={uf}/microdados.csv\",\n index=False,\n )\n\n return \"/tmp/data/output/\"\n","repo_name":"basedosdados/pipelines","sub_path":"pipelines/datasets/br_ibge_pnadc/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":5183,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"60"} +{"seq_id":"71238514750","text":"# parsing texts\n\n# -------------------------------------------------------\n# regex in Python\n# * vs +\n# () for grouping\n\nimport re\nif re.match(\"brr+um\", \"brrrrrum!!!!\"):\n print(\"match\")\n\n# match vs search\n# match - prefix of string\n# search - anywhere in the string\n\n# returns match object or None\n\n# compile regex if you use it a lot\n# refer to string-matching algorithms (and how they build somewhat finite automata for given pattern)\nautomata = re.compile('brr+um')\nres = automata.search('test brrrum')\nprint(res)\nprint(res.group())\nprint(res.start())\n\n# example: find all links to other sites\n# we assume some stuff for simplification\n\naddress = '([a-zA-Z]+.)*[a-zA-Z]+'\nregex_cmpl = re.compile('http://' + address)\n\nimport urllib.request\n\nhost = \"http://www.ii.uni.wroc.pl\"\n\nwith urllib.request.urlopen(host) as f:\n text = f.read().decode('utf-8')\n\nprint([url.group() for url in regex_cmpl.finditer(text)])\n\n# more symobls\n# {m, n} -> at least m, at most n\n# ? -> 0/1 appearance\n# w1|w2 -> alternative\n# . -> any symbol\n# \\d, \\w, \\Z -> digit, alphanumeric, end of text\n\n# escaping for special char is easy\n# but how to find '\\['\nre.match(\"\\\\\\\\\\[\", \"[\")\n# how does it happen?\n# first python converts it using escapes to `\\\\\\[`\n# then according to regex rules `\\\\` -> `\\`, `\\[` -> `[`\n# we can use raw strings to cut down on processing steps\n\n# grouping fragments of regex\nres = re.match(\"a(b*)a.*(a)\", \"abbabbba\")\nprint(res.groups())\n\n# grouping expression\n# (?Pregexp)\n\n# example: get day/month/year from year\npattern = \"(?P\\d{4})(?P\\d{2})(?P\\d{2})\"\nres = re.search(pattern, \"in the date 20211116 there is sth\")\nprint(res.group(\"year\"), \"-\", res.group(\"month\"), \"-\", res.group(\"day\"))\n\n# re.sub() example on slides (converting date format)\n# re.sub(pattern, func, text) where func takes match object and returns string\n\n# -------------------------------------------------------\n# processing HTML\n# HTML is a sequence of tags\n# we have and tags\n\n# ---\n# html.parser.HTMLParser\n# has handlers for start/end tag and data, attributes\n# class MyHTMLParser(html.parser.HTMLParser)\n# myparser.feed(page)\n\n# example: print all \"href\" links\nimport html.parser\n\n\nclass MyHTMLParser(html.parser.HTMLParser):\n def handle_starttag(self, tag, attrs):\n if tag == 'a':\n for (atr, val) in attr:\n if atr == 'href':\n print(val)\n\n\nmy_parser = MyHTMLParser()\n\n# with open('python.html') as data:\n# my_parser.feed(data)\n\n# ---\n# BeautifulSoup -> (not in standard :( )\n# import bs4\n# and we can access by '.' methods after parsing the page\n# for example\n# data.title.string\n# data.title.parent.name\n\n# example: list of links\n# [link.get('href') for link in dane.find_all('a')]\n\n# example: all links to thumbnails\n# find all images where there is attribute that matches to the regex\n# data.find_all('img', { 'src': re.compile('.*thumbnail.*') })\n\n# -------------------------------------------------------\n# XML parsing\n\n# ---\n# 1st strategy -> similar to above html.parser.HTMLParser\n# xml.sax -> read-only, fast, but memory-heavy\n# class handle.ContextHandler\n\n# example: spreadsheet structure (both for .ods, and .xlsx) is .xml (content.xml)\n# tip: read it using a browser (like firefox)\n# implementing .ods parser for the spreadsheet\n# zipfile lib usage -> unzipping \"temporary in memory\" & just using zf.open() for files inside\n\n# 2nd strategy -> creating DOM (Document Object Model) tree representing XML\n# Node object -> name, value, attributes, childNodes\n# we can then parse node by node\n# appending/removing/replacing children\n\n# BeautifulSoup can also be used for xml parsing\n","repo_name":"arturJan4/University","sub_path":"5_Semester/KJP/Wyklady/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":3670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"8009428217","text":"a=int(input('Sayı1:'))\r\nb=int(input('Sayı2:'))\r\nc=int(input('Sayı3:'))\r\n\r\nif(a>=b) and (a>=c):\r\n buyuk=a\r\nelif(b>=a) and (b>=c):\r\n buyuk=b\r\nelse:\r\n buyuk=c\r\n if (a <= b) and (a <= c):\r\n kucuk = a\r\n elif (b <= a) and (b <= c):\r\n kucuk = b\r\n else:\r\n kucuk = c\r\nprint(a, b, \"ve\", c, \"sayıları içinde en küçük olan sayı:\", kucuk)\r\nprint(a,b, \"ve\", c , \"sayıları içinde en büyük olan sayı:\",buyuk)","repo_name":"Ugur361/pythonAssignments","sub_path":"python ödev1.py","file_name":"python ödev1.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"4699332020","text":"import numpy as np\nimport pandas as pd\nimport pytest\n\nfrom etna.analysis.decomposition.utils import _get_labels_names\nfrom etna.analysis.decomposition.utils import _resample\nfrom etna.analysis.decomposition.utils import _seasonal_split\nfrom etna.datasets import TSDataset\nfrom etna.transforms import LinearTrendTransform\nfrom etna.transforms import TheilSenTrendTransform\n\n\n@pytest.mark.parametrize(\n \"poly_degree, expect_values, trend_class\",\n (\n [1, True, LinearTrendTransform],\n [2, False, LinearTrendTransform],\n [1, True, TheilSenTrendTransform],\n [2, False, TheilSenTrendTransform],\n ),\n)\ndef test_get_labels_names_linear_coeffs(example_tsdf, poly_degree, expect_values, trend_class):\n ln_tr = trend_class(in_column=\"target\", poly_degree=poly_degree)\n ln_tr.fit_transform(example_tsdf)\n segments = example_tsdf.segments\n _, linear_coeffs = _get_labels_names([ln_tr], segments)\n if expect_values:\n assert list(linear_coeffs.values()) != [\"\", \"\"]\n else:\n assert list(linear_coeffs.values()) == [\"\", \"\"]\n\n\n@pytest.mark.parametrize(\n \"timestamp, cycle, expected_cycle_names, expected_in_cycle_nums, expected_in_cycle_names\",\n [\n (\n pd.date_range(start=\"2020-01-01\", periods=5, freq=\"D\"),\n 3,\n [\"1\", \"1\", \"1\", \"2\", \"2\"],\n [0, 1, 2, 0, 1],\n [\"0\", \"1\", \"2\", \"0\", \"1\"],\n ),\n (\n pd.date_range(start=\"2020-01-01\", periods=6, freq=\"15T\"),\n \"hour\",\n [\"2020-01-01 00\"] * 4 + [\"2020-01-01 01\"] * 2,\n [0, 1, 2, 3, 0, 1],\n [\"0\", \"1\", \"2\", \"3\", \"0\", \"1\"],\n ),\n (\n pd.date_range(start=\"2020-01-01\", periods=26, freq=\"H\"),\n \"day\",\n [\"2020-01-01\"] * 24 + [\"2020-01-02\"] * 2,\n [i % 24 for i in range(26)],\n [str(i % 24) for i in range(26)],\n ),\n (\n pd.date_range(start=\"2020-01-01\", periods=10, freq=\"D\"),\n \"week\",\n [\"2020-00\"] * 5 + [\"2020-01\"] * 5,\n [2, 3, 4, 5, 6, 0, 1, 2, 3, 4],\n [\"Wed\", \"Thu\", \"Fri\", \"Sat\", \"Sun\", \"Mon\", \"Tue\", \"Wed\", \"Thu\", \"Fri\"],\n ),\n (\n pd.date_range(start=\"2020-01-03\", periods=40, freq=\"D\"),\n \"month\",\n [\"2020-Jan\"] * 29 + [\"2020-Feb\"] * 11,\n list(range(3, 32)) + list(range(1, 12)),\n [str(i) for i in range(3, 32)] + [str(i) for i in range(1, 12)],\n ),\n (\n pd.date_range(start=\"2020-01-01\", periods=14, freq=\"M\"),\n \"quarter\",\n [\"2020-1\"] * 3 + [\"2020-2\"] * 3 + [\"2020-3\"] * 3 + [\"2020-4\"] * 3 + [\"2021-1\"] * 2,\n [i % 3 for i in range(14)],\n [str(i % 3) for i in range(14)],\n ),\n (\n pd.date_range(start=\"2020-01-01\", periods=14, freq=\"M\"),\n \"year\",\n [\"2020\"] * 12 + [\"2021\"] * 2,\n [i % 12 + 1 for i in range(14)],\n [\"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\", \"Jun\", \"Jul\", \"Aug\", \"Sep\", \"Oct\", \"Nov\", \"Dec\", \"Jan\", \"Feb\"],\n ),\n ],\n)\ndef test_seasonal_split(timestamp, cycle, expected_cycle_names, expected_in_cycle_nums, expected_in_cycle_names):\n cycle_df = _seasonal_split(timestamp=timestamp.to_series(), freq=timestamp.freq.freqstr, cycle=cycle)\n assert cycle_df[\"cycle_name\"].tolist() == expected_cycle_names\n assert cycle_df[\"in_cycle_num\"].tolist() == expected_in_cycle_nums\n assert cycle_df[\"in_cycle_name\"].tolist() == expected_in_cycle_names\n\n\n@pytest.mark.parametrize(\n \"timestamp, values, resample_freq, aggregation, expected_timestamp, expected_values\",\n [\n (\n pd.date_range(start=\"2020-01-01\", periods=14, freq=\"Q\"),\n [np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, 10, 16, 10, 5, 7, 5, 7, 3, 3],\n \"Y\",\n \"sum\",\n pd.date_range(start=\"2020-01-01\", periods=4, freq=\"Y\"),\n [np.NaN, 36.0, 24.0, 6.0],\n ),\n (\n pd.date_range(start=\"2020-01-01\", periods=14, freq=\"Q\"),\n [np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, 10, 16, 10, 5, 7, 5, 7, 3, 3],\n \"Y\",\n \"mean\",\n pd.date_range(start=\"2020-01-01\", periods=4, freq=\"Y\"),\n [np.NaN, 12.0, 6.0, 3.0],\n ),\n ],\n)\ndef test_resample(timestamp, values, resample_freq, aggregation, expected_timestamp, expected_values):\n df = pd.DataFrame({\"timestamp\": timestamp.tolist(), \"target\": values, \"segment\": len(timestamp) * [\"segment_0\"]})\n df_wide = TSDataset.to_dataset(df)\n df_resampled = _resample(df=df_wide, freq=resample_freq, aggregation=aggregation)\n assert df_resampled.index.tolist() == expected_timestamp.tolist()\n assert (\n df_resampled.loc[:, pd.IndexSlice[\"segment_0\", \"target\"]]\n .reset_index(drop=True)\n .equals(pd.Series(expected_values))\n )\n","repo_name":"tinkoff-ai/etna","sub_path":"tests/test_analysis/test_decomposition/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":4876,"program_lang":"python","lang":"en","doc_type":"code","stars":796,"dataset":"github-code","pt":"60"} +{"seq_id":"71279606272","text":"import requests\nfrom bs4 import BeautifulSoup\n\nfrom flask import Flask, render_template\n\napp =Flask(__name__)\n\n@app.route('/')\ndef home():\n return render_template('index.html')\n@app.route('/detikpopuler')\ndef detik_populer():\n html_doc = requests.get('https://www.detik.com/terpopuler')\n\n soup = BeautifulSoup(html_doc.text, 'html.parser')\n\n populer_area = soup.find(attrs={'class': 'nhl indeks mgb-24'})\n\n titles = populer_area.findAll(attrs={'class': 'media__title'})\n\n image = populer_area.findAll(attrs={'class': 'media__image'})\n\n return render_template('index.html', images = image)\n\n\n@app.route('/idr rates')\ndef idr_rates():\n source = requests.get('http://www.floatrates.com/daily/idr.json')\n jason_data = source.json()\n return render_template('idr-rates.html', datas=jason_data)\n\n\n\nif __name__=='__main__':\n app.run(debug=True)","repo_name":"Ardianwi/Python-Mastery","sub_path":"Scrape/Scrapecurrency/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"40196418683","text":"def is_ok(n, a, b):\n if a > b:\n print(0)\n return False\n elif a == b:\n print(1)\n return False\n elif n == 1 and a != b:\n print(0)\n return False\n else:\n return True\n \ndef main():\n N, A, B = map(int, input().split())\n if not is_ok(N, A, B):\n return 0\n print((N-2) * (B-A) + 1)\n return 0\n\nmain()","repo_name":"kyug3/my-atcoder","sub_path":"AGC/AGC015/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"13797952973","text":"class Solution:\n def findNumbers(self, nums: List[int]) -> int:\n \n # # -----x-----x-----\n # # Solution 1 - One pass\n # # n=len(nums), k=worst-case number in nums\n # # Time complexity = O(n*log10(k))\n # # Space complexity = O(1)\n \n # count = 0\n \n # for n in nums:\n # digits = 0\n # while n:\n # n //= 10\n # digits +=1\n \n # if digits%2 == 0:\n # count += 1\n \n # return count\n # # -----x-----x-----\n\n # # -----x-----x-----\n # # Solution 2 - One pass\n # # n=len(nums), k=worst-case number in nums\n # # Time complexity = O(n*log10(k))\n # # Space complexity = O(1)\n \n count = 0\n \n for n in nums:\n if len(str(n))%2 == 0:\n count += 1\n \n return count\n # # -----x-----x-----\n ","repo_name":"adivc21/Leetcode","sub_path":"1295_Find_Numbers_with_Even_Number_of_Digits.py","file_name":"1295_Find_Numbers_with_Even_Number_of_Digits.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"28297373243","text":"#prepare a list of any items in it, print only numbers gr8er than 6\nlist = [];\nwhile(1):\n print(\"Enter value into list n press c to stop adding\");\n try:\n x = input();\n list.append(int(x));\n except:\n if (x=='c'):\n break;\nprint(\"The items in List are\",list);\nprint(\"The Items gr8er than 6 are\");\nfor y in list:\n if(y > 6):\n print(y);\n else:\n continue;\n\n","repo_name":"hrithikm007/Python_H007","sub_path":"Older/list_print_gr8er_den_6.py","file_name":"list_print_gr8er_den_6.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"12750932341","text":"#!/usr/bin/env python3\n\nimport sys\nimport threading\nfrom MAVLinkDriver import uav_connect, get_mission_item, heartbeat_handler, set_px4_mission\n\nDEFAULT_MISSION = \"mission.txt\"\n# DEFAULT_CONNECTION = \"udp:127.0.0.1:14540\"\nDEFAULT_CONNECTION = \"serial:/dev/ttyACM0\"\nDEFAULT_BAUDRATE = 57600\n\nMASTER = None\nSTATUS = [0]\nEVENT_KILL_HANDLER = threading.Event()\n\n\ndef read_mission_from_file(filename):\n items = []\n\n file = open(filename, 'r')\n text = file.read()\n for i, line in enumerate(text.split(\"\\n\")):\n if i == 0:\n continue\n elif line == \"\":\n break\n\n tokens = line.split(\"\\t\", 12)\n system = None\n component = None\n if MASTER is not None:\n system = MASTER.target_system\n component = MASTER.target_component\n\n items.append(get_mission_item(system, component, tokens))\n\n file.close()\n return items\n\n\ndef do_send(*args):\n data = args[0]\n if not data:\n mission_file = DEFAULT_MISSION\n else:\n mission_file = data[0]\n\n if MASTER is None:\n print(\"[Error] Establish connection first.\", file=sys.stderr)\n items = read_mission_from_file(mission_file)\n result = set_px4_mission(MASTER, items)\n if result == 0:\n print(\"Mission sent correctly.\")\n else:\n print(\"[Error] Mission sending failed.\", file=sys.stderr)\n\n\ndef do_show(*args):\n data = args[0]\n if not data:\n mission_file = DEFAULT_MISSION\n else:\n mission_file = data[0]\n\n items = read_mission_from_file(mission_file)\n for item in items:\n print(item)\n\n\ndef do_connect(*args):\n data = args[0]\n if not data:\n connection = DEFAULT_CONNECTION\n baudrate = DEFAULT_BAUDRATE\n elif len(data) == 1:\n connection = data[0]\n baudrate = DEFAULT_BAUDRATE\n elif len(data) > 1:\n connection = data[0]\n baudrate = data[1]\n\n type_con = connection.split(\":\")[0]\n if type_con == \"serial\":\n connection = connection.split(\":\")[-1]\n\n master = uav_connect(connection, baudrate)\n if master is not None:\n handler = threading.Thread(target=heartbeat_handler, args=(\n master, STATUS, EVENT_KILL_HANDLER), name='heartbeat_handler')\n handler.start()\n\n global MASTER\n MASTER = master\n\n\ndef do_help(*args):\n data = args[0]\n if not data:\n print(\"Command Ground Control, version 1.0.0\")\n print(\"Commands are defined internally. Type help' to si this list.\")\n print(\"Type 'help cmd' to know more about the command 'cmd'.\")\n print(\"Type 'help cgc' to know more about Command Ground Control in general.\")\n print(\"\")\n print(\"connect [connection] [baudrate]\")\n print(\"help [cmd]\")\n print(\"send [filename]\")\n print(\"show [filename]\")\n elif data[0] == \"cgc\":\n print(\"Command Ground Control, version 1.0.0\")\n print(\"Author: Pedro Arias Perez\")\n print(\"\")\n print(\"Description: WORK IN PROGRESS\")\n elif data[0] == \"connect\":\n print(\"connect: connect [connection] [baudrate]\")\n print(\"WORK IN PROGRESS\")\n elif data[0] == \"help\":\n print(\"help: help [cmd]\")\n print(\"WORK IN PROGRESS\")\n elif data[0] == \"send\":\n print(\"send: send [filename]\")\n print(\"WORK IN PROGRESS\")\n elif data[0] == \"show\":\n print(\"show: show [filename]\")\n print(\"WORK IN PROGRESS\")\n else:\n print(\"[Error] Invalid command. Please type 'help' to see available commands.\", file=sys.stderr)\n\n\ndef process_line(s):\n tokens = s.split(\" \")\n if tokens[0] == \"help\":\n do_help(tokens[1:])\n elif tokens[0] == \"connect\":\n do_connect(tokens[1:])\n elif tokens[0] == \"show\":\n do_show(tokens[1:])\n elif tokens[0] == \"send\":\n do_send(tokens[1:])\n elif tokens[0] == \"\":\n pass\n else:\n print(\"[Error] Invalid command. Please type 'help' to see available commands.\", file=sys.stderr)\n\n\ndef main():\n while True:\n try:\n s = input(\"> \")\n except EOFError:\n break\n process_line(s)\n\n EVENT_KILL_HANDLER.set()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"pariaspe/cgc","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"74317143551","text":"import numpy as np\nimport sys\nimport json\nimport pandas as pd\nfrom collections import defaultdict\n\n\ndef depth(kmer):\n \"\"\"\n Compute depth of kmer.\n Depth of kmer is len(kmer1)/2 + 1.\n For example:\n Depth (A) = 1\n Depth (TAT) = 3/2 + 1 = 2\n Depth (GTATG) = 5/2 + 1 = 3...\n \"\"\"\n return int(len(kmer)/2 + 1)\n\n\ndef compute_distance_between_kmers(kmer1, kmer2, mc_tree):\n \"\"\"\n Naive Algorithm to find the Least Common Ancestor:\n Algorithm 1:\n Data: 1. Depth of kmer1 and kmer2\n 2. Parent of each kmer.\n This information is already stored in the state dictionary.\n\n Algorithm:\n while depth(kmer1) != depth(kmer2):\n if depth(kmer1) > depth(kmer2):\n kmer2 <- parent(kmer2)\n elif depth(kmer2) > depth(kmer1):\n kmer1 <- parent(kmer1)\n # at this point, the depths of \"kmer1\" and \"kmer2\" should be the same.\n\n while kmer1 != kmer2:\n kmer1 <- parent(kmer1)\n kmer2 <- parent(kmer2)\n LCA <- kmer1\n return LCA\n\n Algorithm 2:\n Data: kmer1, kmer2 and LCA\n Distance(kmer1, kmer2) = Distance (kmer1, LCA) + Distance(kmer2, LCA)\n\n Parameters:\n kmer1 (str): kmer1\n kmer2 (str): kmer2\n mc_tree (dict) : mc_tree\n\n Returns:\n Distance(kmer1, kmer2)\n \"\"\"\n node1 = kmer1\n node2 = kmer2\n\n while depth(node1) != depth(node2):\n if depth(node1) > depth(node2):\n node1 = mc_tree[node1]['parent']\n elif depth(node2) > depth(node1):\n node2 = mc_tree[node2]['parent']\n\n assert depth(node1) == depth(node2)\n\n while node1 != node2:\n node1 = mc_tree[node1]['parent']\n node2 = mc_tree[node2]['parent']\n\n assert node1 == node2\n lca = node1\n distance_between_nodes = (depth(kmer1) - depth(lca)) + (depth(kmer2) - depth(lca))\n return distance_between_nodes\n\n\ndef compute_distance_matrix():\n pass\n\n\ndef main():\n tree_dictionary_fp = sys.argv[1]\n top_kmers_outfile = sys.argv[2]\n\n top_kmers = pd.read_csv(top_kmers_outfile, sep='\\t',\n header=0)\n\n with open(tree_dictionary_fp) as json_file:\n data_dictionary = json.load(json_file)\n\n # print(top_kmers)\n # top_kmers is a pandas DataFrame:\n # Kmer Score\n # 0 ACAAATGTAAA 0.75\n # 1 ATGTGTCATTA 0.75\n # 2 ATTGTTTACAA 0.73\n # 3 ATTCATTTGTG 0.73\n # print(data_dictionary)\n\n compute_distance_between_kmers('CATGAG', 'ACAAATGAGTC',\n mc_tree=data_dictionary)\n\n # To do:\n # compute the tree-based distance matrix & perform hierarchical clustering.\n # use the levenstein distance to perform hierarchical clustering.\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"DivyanshiSrivastava/MonteCarlo-TF","sub_path":"find_subtree.py","file_name":"find_subtree.py","file_ext":"py","file_size_in_byte":2758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"4040007640","text":"import numpy\n\n\nfrom syned.beamline.optical_elements.absorbers.filter import Filter\nfrom wofrysrw.beamline.optical_elements.absorbers.srw_transmission import SRWTransmission\nfrom wofrysrw.util.srw_absorption import get_transmission_amplitudes, get_transmission_optical_path_difference, add_thickness_error_to_thickness_profile\n\nclass SRWFilter(Filter, SRWTransmission):\n def __init__(self,\n name=\"Undefined\",\n material=\"Be\",\n thickness=1e-3,\n attenuation_length=1.0,\n delta=1e-6,\n x_range=[-1e-3, 1e-3],\n y_range=[-1e-3, 1e-3],\n n_points_x=100,\n n_points_y=100,\n energy=15000,\n thickness_error_profile_file=None,\n scaling_factor=1.0):\n Filter.__init__(self, name=name, material=material, thickness=thickness)\n\n thickness_profile = numpy.ones((n_points_x, n_points_y))*self.get_thickness()\n\n if not thickness_error_profile_file is None: add_thickness_error_to_thickness_profile(thickness_profile,\n thickness_error_profile_file,\n scaling_factor,\n x_range,\n y_range,\n n_points_x,\n n_points_y)\n\n SRWTransmission.__init__(self,\n x_range=x_range,\n y_range=y_range,\n transmission_amplitudes=get_transmission_amplitudes(thickness_profile, attenuation_length),\n transmission_optical_path_difference=get_transmission_optical_path_difference(thickness_profile, delta),\n energy=energy)\n\n\n\n","repo_name":"oasys-kit/wofrysrw","sub_path":"wofrysrw/beamline/optical_elements/absorbers/srw_filter.py","file_name":"srw_filter.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"3980190200","text":"from imutils.video import VideoStream\nimport numpy as np\nimport imutils\nimport time\nimport json\nimport cv2\nimport os\n\n# from score import init_predictor, predict\nimport requests\n\nprint(\"[INFO] starting video stream...\")\nvs = VideoStream().start()\ntime.sleep(2.0)\n# init_predictor()\nwhile True:\n\tframe = vs.read()\n\tframe = imutils.resize(frame, width=400)\n\n\tcols, rows, _ = frame.shape\n\tbrightness = np.sum(frame) / (255 * cols * rows)\n\tratio = brightness / 1.0\n\tif ratio < 1.0:\n\t\tframe = cv2.convertScaleAbs(frame, alpha = 1 / ratio, beta = 50)\n\t\n\tframe_data = frame\n\tframe_data = np.array(frame)\n\tframe_data = json.dumps({'data': frame_data.tolist()})\n\t# mask_results = json.loads(predict(frame_data))\n\tmask_results = json.loads(requests.post('http://localhost:5000/predict',json = frame_data).text)\n\tlocs = mask_results[\"locs\"]\n\tpreds = mask_results[\"predictions\"]\n\n\t# (locs, preds) = predict(frame_data)\n\t\n\tfor (box, pred) in zip(locs, preds):\n\t\t(startX, startY, endX, endY) = box\n\t\t(mask, withoutMask) = pred\n\n\t\tlabel = \"Mask\" if mask > withoutMask else \"No Mask\"\n\t\tcolor = (0, 255, 0) if label == \"Mask\" else (0, 0, 255)\n\t\tlabel = \"{}: {:.2f}%\".format(label, max(mask, withoutMask) * 100)\n\t\tcv2.putText(frame, label, (startX, startY - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.50, color, 2)\n\t\tcv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)\n\n\tcv2.imshow(\"Frame\", frame)\n\tkey = cv2.waitKey(1) & 0xFF\n\tif key == ord(\"q\"):\n\t\tbreak\n\ncv2.destroyAllWindows()\nvs.stop()","repo_name":"saahil-jain/Microsoft_Vision","sub_path":"face_mask_detection/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"70133977793","text":"\"\"\" TCP/IP PORT LİSTESİ:\nFTP:21\nSSH:22\nTELNET:23\nSMTP:25\nHTTP:80\nNETBIOS:139\nHTTPS:443\nSMB:445\nRDB:3389\n\"\"\"\n\nfrom tkinter import*\nimport socket\n\n\ndef tarama():\n s1=str(enturl.get())\n liste=[21, 22, 23, 25, 53, 80, 110, 135, 139, 143, 443, \n 445, 587, 993, 1433, 3306, 3389, 5900, 8080]\n try:\n for port in liste:\n sock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n result=sock.connect_ex((s1,port))\n if result==0:\n listsonuc.insert(1,\"Port{} açık\".format(port))\n else:\n listsonuc.insert(1,\"Port{} kapalı\".format(port))\n sock.close()\n except socket.error:\n print(\"Bilgisayara ulaşılamadı\")\n\npen=Tk()\npen.geometry(\"330x500\")\npen.title(\"Açık port tarama\")\npen.resizable(FALSE,FALSE)\n\nlblurl=Label(pen,text=\"URL veya IP ADRESİ\",font=\"Verdana 12 bold\",fg=\"white\",bg=\"black\")\nlblurl.place(x=60,y=20)\nlistsonuc=Listbox(pen,font=\"Verdana 12 bold\",width=25,height=17,fg=\"white\",bg=\"black\")\nlistsonuc.place(x=27,y=140)\nenturl=Entry(pen,font=\"Verdana 12 bold\",fg=\"blue\")\nenturl.place(x=50,y=50)\nbtntara=Button(pen,text=\"Taramayi Başlat\",font=\"Verdana 12 bold\",fg=\"white\",bg=\"black\",command=tarama)\nbtntara.place(x=80,y=90)\n \n \n \n\n\npen.mainloop()","repo_name":"1omerozturk/ipandportscanning","sub_path":"portscaning.py","file_name":"portscaning.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"19103152148","text":"import abc\r\nimport math\r\n\r\nimport numpy as np\r\nimport pygame\r\nimport random\r\n\r\nimport sprites\r\nfrom . import items\r\nfrom .base import BaseObject\r\nfrom .items import Inventory, BlackMonster, WhiteMonster, DiamondSword, Consumable\r\n\r\nfrom pathlib import Path\r\n\r\ngame_folder = Path(__file__).parent.parent\r\nimg_folder = game_folder / 'img'\r\n\r\nDir = {\r\n sprites.CharacterState.IDLE: np.array([0, 0]),\r\n sprites.CharacterState.UP: np.array([0, -1]),\r\n sprites.CharacterState.LEFT: np.array([-1, 0]),\r\n sprites.CharacterState.DOWN: np.array([0, 1]),\r\n sprites.CharacterState.RIGHT: np.array([1, 0]),\r\n}\r\n\r\n\r\nclass AP(pygame.sprite.Sprite):\r\n def __init__(self, value, max, owner, transparent=True):\r\n super().__init__()\r\n self.max = max\r\n self.value = value\r\n self.owner = owner\r\n self.transparent = transparent\r\n self.draw_ap()\r\n\r\n def reset_ap(self, new_ap):\r\n if new_ap <= self.max:\r\n self.value = new_ap\r\n self.draw_ap()\r\n\r\n def draw_ap(self):\r\n if not self.transparent:\r\n digit1 = self.value % 10 if self.value < 10 else self.value // 10\r\n digit2 = None if self.value < 10 else self.value % 10\r\n pic1 = sprites.img_folder / (f\"number{digit1}.png\" if digit1 is not None else \"player1.png\")\r\n pic2 = sprites.img_folder / (f\"number{digit2}.png\" if digit2 is not None else \"player1.png\")\r\n self.image1 = pygame.image.load(pic1).convert()\r\n self.image2 = pygame.image.load(pic2).convert()\r\n self.image1.set_colorkey((0, 0, 0))\r\n self.image2.set_colorkey((0, 0, 0))\r\n if digit2 is None:\r\n self.image2.set_alpha(0)\r\n else:\r\n self.image2.set_alpha(255)\r\n self.rect1 = self.image1.get_rect()\r\n self.rect1.topleft = (25, 25)\r\n self.rect2 = self.image2.get_rect()\r\n self.rect2.topleft = (75, 25)\r\n self.surf1 = pygame.Surface((50, 50))\r\n self.surf2 = pygame.Surface((50, 50))\r\n self.owner.game.screen.blit(self.image1, self.rect1)\r\n self.owner.game.screen.blit(self.image2, self.rect2)\r\n\r\n\r\nclass HP(pygame.sprite.Sprite):\r\n def __init__(self, value, max, owner, transparent=True):\r\n super().__init__()\r\n self.max = max\r\n self.value = value\r\n self.owner = owner\r\n self.transparent = transparent\r\n self.draw_hp()\r\n\r\n def draw_hp(self):\r\n if not self.transparent:\r\n digit1 = self.value % 10 if self.value < 10 else self.value // 10\r\n digit2 = None if self.value < 10 else self.value % 10\r\n pic1 = sprites.img_folder / (f\"hp{digit1}.png\" if digit1 is not None else \"player1.png\")\r\n pic2 = sprites.img_folder / (f\"hp{digit2}.png\" if digit2 is not None else \"player1.png\")\r\n self.image1 = pygame.image.load(pic1).convert()\r\n self.image2 = pygame.image.load(pic2).convert()\r\n self.image1.set_colorkey((0, 0, 0))\r\n self.image2.set_colorkey((0, 0, 0))\r\n if digit2 is None:\r\n self.image2.set_alpha(0)\r\n else:\r\n self.image2.set_alpha(255)\r\n self.rect1 = self.image1.get_rect()\r\n self.rect1.topleft = (25 + 17 * 50, 25)\r\n self.rect2 = self.image2.get_rect()\r\n self.rect2.topleft = (25 + 18 * 50, 25)\r\n self.surf1 = pygame.Surface((50, 50))\r\n self.surf2 = pygame.Surface((50, 50))\r\n self.owner.game.screen.blit(self.image1, self.rect1)\r\n self.owner.game.screen.blit(self.image2, self.rect2)\r\n\r\n def reset_hp(self, new_ap):\r\n if new_ap <= self.max:\r\n self.value = new_ap\r\n self.draw_hp()\r\n\r\n\r\nclass DamageSprite(pygame.sprite.Sprite):\r\n def __init__(self, value, ttl, owner):\r\n super().__init__()\r\n self.value = value\r\n self.owner = owner\r\n self.max_ttl = ttl\r\n self.ttl = 0\r\n\r\n def reset_ttl(self):\r\n self.ttl = self.max_ttl\r\n\r\n def show_once(self):\r\n digit1 = self.value % 10 if self.value < 10 else self.value // 10\r\n digit2 = None if self.value < 10 else self.value % 10\r\n pic1 = sprites.img_folder / (f\"enemyhp{digit1}.png\" if digit1 is not None else \"enemyhp0.png\")\r\n pic2 = sprites.img_folder / (f\"enemyhp{digit2}.png\" if digit2 is not None else \"enemyhp0.png\")\r\n self.image1 = pygame.image.load(pic1).convert()\r\n self.image2 = pygame.image.load(pic2).convert()\r\n self.image1.set_colorkey((0, 0, 0))\r\n self.image2.set_colorkey((0, 0, 0))\r\n if digit2 is None:\r\n self.image2.set_alpha(0)\r\n else:\r\n self.image2.set_alpha(255)\r\n self.rect1 = self.image1.get_rect()\r\n self.rect1.topleft = (self.owner.rect.x, self.owner.rect.y - 30)\r\n self.rect2 = self.image2.get_rect()\r\n self.rect2.topleft = (self.owner.rect.x + 25, self.owner.rect.y - 30)\r\n self.surf1 = pygame.Surface((50, 50))\r\n self.surf2 = pygame.Surface((50, 50))\r\n\r\n if self.ttl > 0:\r\n self.ttl -= 1\r\n if self.ttl == 0:\r\n self.image1.set_alpha(0)\r\n self.image2.set_alpha(0)\r\n\r\n self.owner.game.screen.blit(self.image1, self.rect1)\r\n self.owner.game.screen.blit(self.image2, self.rect2)\r\n\r\n\r\nclass Character(BaseObject):\r\n ANIMATION = 10\r\n\r\n def __init__(self, x, y, game, ap_transparency=True):\r\n self.is_active = False\r\n self.pos = np.array([x, y])\r\n self.game = game\r\n\r\n self.region = self.game.getregion(self.pos)\r\n self.next_region = self.game.getregion(self.pos)\r\n\r\n self.ap = AP(10, 10, self, ap_transparency)\r\n self.hp = HP(50, 50, self, ap_transparency)\r\n self.damage = DamageSprite(0, 5, self)\r\n\r\n self.state = sprites.CharacterState.IDLE\r\n self.moves_count = 0\r\n\r\n self.inventory = Inventory(max_slots=5)\r\n\r\n def update(self):\r\n if self.is_active:\r\n if self.moves_count > 0:\r\n self.moves_count -= 1\r\n self.image = self.get_image()\r\n elif self.moves_count == 0:\r\n self.reset_ap()\r\n next_action = self.get_next_action()\r\n next_object = self.game.get_nearby_object(self, next_action)\r\n if self.ap.value != 0:\r\n self.interact(next_object)\r\n else:\r\n self.state = sprites.CharacterState.IDLE\r\n else:\r\n assert False, \"moves_count < 0\"\r\n else:\r\n self.state = sprites.CharacterState.IDLE\r\n self.damage.show_once()\r\n self.ap.draw_ap()\r\n self.hp.draw_hp()\r\n super().update()\r\n\r\n def interact(self, other: \"BaseObject\"):\r\n if isinstance(other, sprites.EnemySprite)\\\r\n or isinstance(other, Player):\r\n self.attack_enemy(other)\r\n self.state = sprites.CharacterState.IDLE\r\n self.moves_count = self.ANIMATION - 1\r\n elif not isinstance(other, sprites.Floor):\r\n self.state = sprites.CharacterState.IDLE\r\n if isinstance(self, Enemy):\r\n self.moves_count = self.ANIMATION - 1\r\n else:\r\n self.moves_count = 0\r\n elif self.state != sprites.CharacterState.IDLE:\r\n self.moves_count = self.ANIMATION - 1\r\n self.region = self.game.getregion(self.pos)\r\n self.pos += Dir[self.state]\r\n self.next_region = self.game.getregion(self.pos)\r\n\r\n def attack_enemy(self, enemy):\r\n active_item = self.inventory.active_item\r\n if isinstance(active_item, items.Weapon) and self.ap.value >= active_item.ap:\r\n distance = math.hypot(self.rect.centerx - enemy.rect.centerx, self.rect.centery - enemy.rect.centery) / 50\r\n if distance <= active_item.range:\r\n # print(f\"{self} attacked {enemy} with {active_item.name}\")\r\n # print(f\"this consumed {active_item.ap} APs\")\r\n self.ap.reset_ap(self.ap.value - active_item.ap)\r\n enemy.take_damage(active_item.damage)\r\n\r\n def take_damage(self, damage):\r\n self.show_damage(damage)\r\n if damage > self.hp.value:\r\n self.kill()\r\n else:\r\n self.hp.reset_hp(self.hp.value - damage)\r\n\r\n # print(f\"shit, got {damage} HP hit\")\r\n\r\n def show_damage(self, damage):\r\n self.damage.value = damage\r\n self.damage.reset_ttl()\r\n\r\n\r\nclass Enemy(Character, sprites.EnemySprite):\r\n def __init__(self, x, y, game):\r\n sprites.EnemySprite.__init__(self, x, y, game)\r\n Character.__init__(self, x, y, game)\r\n # self.state = sprites.CharacterState.LEFT\r\n\r\n def get_next_action(self):\r\n x, y = (self.game.player.pos - self.pos)\r\n if abs(x) < abs(y):\r\n if y < 0:\r\n self.state = sprites.CharacterState.UP\r\n else:\r\n self.state = sprites.CharacterState.DOWN\r\n else:\r\n if x < 0:\r\n self.state = sprites.CharacterState.LEFT\r\n else:\r\n self.state = sprites.CharacterState.RIGHT\r\n return self.state\r\n\r\n def reset_ap(self):\r\n self.ap.reset_ap(self.ap.value - 1)\r\n # print(f\"enemy AP: {self.ap.value}\")\r\n\r\n def get_damage(self):\r\n return random.randint(5, 15)\r\n\r\n def attack_enemy(self, enemy):\r\n damage = self.get_damage()\r\n # print(f\"{self} attacked {enemy}\")\r\n # print(f\"this consumed 1 APs\")\r\n self.ap.reset_ap(self.ap.value - 1)\r\n enemy.take_damage(damage)\r\n\r\n\r\nclass Player(Character, sprites.Player):\r\n ANIMATION = 10\r\n\r\n def __init__(self, game):\r\n Character.__init__(self, 1, 1, game, ap_transparency=False)\r\n sprites.Player.__init__(self, game)\r\n\r\n self.inventory.add(BlackMonster())\r\n self.inventory.add(WhiteMonster())\r\n self.inventory.add(DiamondSword())\r\n\r\n def get_next_action(self):\r\n keystate = pygame.key.get_pressed()\r\n\r\n if keystate[pygame.K_a]:\r\n state = sprites.CharacterState.LEFT\r\n elif keystate[pygame.K_d]:\r\n state = sprites.CharacterState.RIGHT\r\n elif keystate[pygame.K_s]:\r\n state = sprites.CharacterState.DOWN\r\n elif keystate[pygame.K_w]:\r\n state = sprites.CharacterState.UP\r\n else:\r\n state = sprites.CharacterState.IDLE\r\n\r\n if keystate[pygame.K_1]:\r\n self.inventory.select_active_item(0)\r\n elif keystate[pygame.K_2]:\r\n self.inventory.select_active_item(1)\r\n elif keystate[pygame.K_3]:\r\n self.inventory.select_active_item(2)\r\n elif keystate[pygame.K_4]:\r\n self.inventory.select_active_item(3)\r\n elif keystate[pygame.K_5]:\r\n self.inventory.select_active_item(4)\r\n elif keystate[pygame.K_q]:\r\n self.inventory.select_active_item(None)\r\n active_item = self.inventory.active_item\r\n if active_item is not None:\r\n if keystate[pygame.K_e] and isinstance(active_item, Consumable):\r\n active_item.consume(self)\r\n\r\n self.state = state\r\n return state\r\n\r\n def reset_ap(self):\r\n if self.state != sprites.CharacterState.IDLE:\r\n self.ap.reset_ap(self.ap.value - 1)\r\n # print(f\"player AP: {self.ap.value}\")\r\n","repo_name":"emanresuaretne/rogue","sub_path":"logic/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":11532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"5119962026","text":"import ipaddress\nimport logging\nimport threading\nimport time\nimport typing\n\nfrom .configuration import Subordinate as SubordinateConf\nfrom .forwarder import Forwarder\nfrom .logger import LoggingMixin\n\n\nclass ForwarderSupervisor(LoggingMixin):\n \"\"\"Operations which should be performed with the forwared should be put here\n\n It should handle reconnects and determine to what (ip, port) to connect\n \"\"\"\n\n NEXT_IP_TIMEOUT = 30.0 # in seconds\n ZCONF_BUFFER_COUNT = 100\n\n class NetlocStat:\n \"\"\"Netloc triage statistics used for sorting netloc in the list\"\"\"\n\n def __init__(self, fail_count: int, when: float):\n self.fail_count = fail_count\n self.when = when\n\n def __eq__(self, other):\n return self.fail_count == other.fail_count and self.when == other.when\n\n def __gt__(self, other):\n return not (self.__eq__(other)) and not (self.__lt__(other))\n\n def __lt__(self, other):\n if self.fail_count == other.fail_count:\n return self.when > other.when # youngest first\n else:\n return self.fail_count < other.fail_count # lowest count first\n\n def __str__(self):\n return f\"{self.fail_count}-{self.when}\"\n\n logger = logging.getLogger(__file__)\n\n def __init__(self, forwarder: Forwarder):\n self.subordinate_controller_id = forwarder.subordinate.controller_id\n self.forwarder = forwarder\n self.lock = threading.RLock()\n self.connected = False\n\n # (IP, port) -> (failed_attempt_count, time)\n # initalizes with subordinate netloc\n self._netlocs: typing.Dict[typing.Tuple[ipaddress.IPv4Address, int], ForwarderSupervisor.NetlocStat] = {\n (\n ipaddress.ip_address(self.forwarder.subordinate.settings.host),\n self.forwarder.subordinate.settings.port,\n ): ForwarderSupervisor.NetlocStat(0, 0.0)\n }\n self.current_netloc: typing.Tuple[ipaddress.IPv4Address, int] = self.netlocs[0]\n self.current_netloc_start: float = time.monotonic()\n\n # start forwarder in background\n self.forwarder.start()\n\n def terminate(self):\n \"\"\"causes that forwarder eventually terminates\"\"\"\n self.debug(\"Supervisor terminating\")\n\n self.forwarder.stop()\n\n def zconf_update(self, ips: typing.List[ipaddress.IPv4Address], port: int):\n \"\"\"update ips obtained using zconf\"\"\"\n now = time.monotonic()\n\n self.info(f\"Got addresses from zconf: {[str(e) for e in ips]} :{port}\")\n\n with self.lock:\n # merge two lists\n for ip in ips:\n count = self._netlocs.get((ip, port), ForwarderSupervisor.NetlocStat(0, 0.0)).fail_count\n self._netlocs[(ip, port)] = ForwarderSupervisor.NetlocStat(count, now)\n\n # sort and fit to buffer\n sorted_netlocs = sorted(((ip, stat) for ip, stat in self._netlocs.items()), key=lambda x: x[1])[\n : ForwarderSupervisor.ZCONF_BUFFER_COUNT\n ]\n res = {}\n for (ip, port), stat in sorted_netlocs:\n res[(ip, port)] = stat\n\n self._netlocs = res\n\n @property\n def netlocs(self) -> typing.List[typing.Tuple[ipaddress.IPv4Address, int]]:\n \"\"\"Return current network locations where subordinate server might be running\n\n Addresses with better score first (min fail_count + most reacent)\n \"\"\"\n with self.lock:\n return [e[0] for e in sorted([(k, v) for k, v in self._netlocs.items()], key=lambda x: x[1])]\n\n def subsubordinates_config_update(self, subordinates):\n # TODO\n raise NotImplementedError()\n\n def subordinate_config_update(self, subordinate_conf: SubordinateConf):\n self.forwarder.reload_subordinate(subordinate_conf)\n\n def check(self):\n now = time.monotonic()\n\n if self.forwarder.subordinate.connected:\n # clean attempts for current netloc to keep working address high in the list\n with self.lock:\n self.current_netloc_start = now\n record = self._netlocs.get(self.current_netloc)\n if record:\n record.fail_count = 0\n record.when = time.monotonic()\n return\n\n with self.lock:\n if self.current_netloc_start + ForwarderSupervisor.NEXT_IP_TIMEOUT < now:\n # time up, lets use new netloc\n record = self._netlocs.get(self.current_netloc)\n if record:\n record.fail_count += 1\n\n # Lets try new address\n self.current_netloc = self.netlocs[0]\n self.current_netloc_start = now\n\n # Reload subordinate with a new config\n ip, port = self.current_netloc\n new_config = self.forwarder.subordinate_conf.clone_with_overrides(ip=ip, port=port)\n self.forwarder.reload_subordinate(new_config)\n\n def __str__(self):\n return f\"supervisor-{self.forwarder.subordinate.controller_id}\"\n","repo_name":"turris-cz/foris-controller-foris-forwarder","sub_path":"foris_forwarder/supervisor.py","file_name":"supervisor.py","file_ext":"py","file_size_in_byte":5125,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"20739939295","text":"import argparse\nimport collections\nimport datetime\nimport glob\nimport sys\n\nimport apache_log_parser\nfrom tqdm import tqdm\n\n\ndef day_checker(date):\n try:\n return datetime.datetime.strptime(date, '%Y/%m/%d')\n except ValueError:\n print(\"Please input correct date format\")\n print(\"Correct format is YYYY/MM/dd, like {}\".format(\n datetime.datetime.now().strftime(\"%Y/%m/%d\")))\n sys.exit()\n\n\ndef write_csv(name, save_dict):\n with open(name, 'w') as f:\n f.write(\"time,# access\\n\")\n\n for key in save_dict.keys():\n f.write(\"%s,%s\\n\" % (key, save_dict[key]))\n print(\"save csv!\")\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Appach log profiler')\n\n parser.add_argument('--file', '-f', action='append',\n help=\"Input log file path\")\n parser.add_argument('--directory', '-d', action='append',\n help=\"Input directory path in log files\")\n parser.add_argument('--start', default='1976/03/25',\n help=\"Input the first day of the period you want to profile\")\n parser.add_argument('--end', default='2020/06/26',\n help=\"Input the last day of the period you want to profile\")\n parser.add_argument('--output_name', default='time_result.csv',\n help=\"Input output file name\")\n\n args = parser.parse_args()\n # firstとlastの指定があっているかのエラー\n date_first = day_checker(args.start)\n date_end = day_checker(args.end)\n log_line_data = []\n line_parser = apache_log_parser.make_parser(\n \"%h %l %u %t \\\"%r\\\" %>s %b \\\"%{Referer}i\\\" \\\"%{User-Agent}i\\\"\")\n if args.file is not None:\n for file_path in args.file:\n with open(file_path) as f:\n log = f.readlines()\n for i in log:\n log_line_data.append(line_parser(i))\n if args.directory is not None:\n for dir_path in args.directory:\n for file_path in tqdm(glob.glob(dir_path + \"*\"), desc='file loading...'):\n with open(file_path) as f:\n log = f.readlines()\n for i in log:\n log_line_data.append(line_parser(i))\n l_time = sorted([d.get('time_received_datetimeobj').replace(minute=0, second=0, microsecond=0)\n for d in log_line_data if date_first <= d.get('time_received_datetimeobj') and d.get('time_received_datetimeobj') <= date_end])\n c_time = collections.Counter(l_time)\n l_host = sorted([d.get('remote_host') for d in log_line_data if date_first <= d.get(\n 'time_received_datetimeobj') and d.get('time_received_datetimeobj') <= date_end])\n c_host = collections.Counter(l_host)\n write_csv(args.output_name, c_time)\n for host, count in c_host.most_common():\n print(\"ホスト名:{}, アクセス回数:{}\".format(host, count))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"1234567890Joe/Apache_log_profiler","sub_path":"for_big_memory.py","file_name":"for_big_memory.py","file_ext":"py","file_size_in_byte":2951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"27686856064","text":"# -*- coding: utf-8 -*-\n\nimport logging\n\nfrom flask import request\nfrom flask_restful import Resource\n\nfrom flask_jwt import jwt_required\nfrom exceptions import PreconditionFailException\nfrom services import OrderService\n\n\nclass OrderHandler(Resource):\n\n def __init__(self):\n self.service = OrderService()\n super(OrderHandler, self).__init__()\n\n @staticmethod\n def _error_message(message, reason, status_code):\n return {\"message\": message, \"reason\": reason}, status_code\n\n @jwt_required()\n def get(self, id=None):\n if id is None:\n return self.service.list()\n else:\n item = self.service.read(id)\n if not item:\n return {}, 404\n return item\n\n @jwt_required()\n def post(self):\n data = request.get_json()\n try:\n item = self.service.create(data)\n return item, 200\n except Exception as e:\n logging.error(\"Cannot create a new Product %s\" % e)\n return {\"message\": \"Cannot create a new Product\", \"reason\": \"Unknown reason.\"}, 400\n\n @jwt_required()\n def delete(self, id=None):\n try:\n self.service.delete(id)\n except PreconditionFailException as e:\n logging.error(\"Cannot delete data: %s\" % e)\n return {\"message\": \"You must provide an id to delete\"}, 412\n except Exception as e:\n logging.error(\"Cannot delete data: %s\" % e)\n\n return {}, 200\n","repo_name":"geraldoandradee/orders-microservice","sub_path":"handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"28158281328","text":"#!/usr/bin/env python3\n\n# Miscellaneous operating system interfaces\n# https://docs.python.org/3/library/os.html\nimport os\n\n# Object-oriented filesystem paths\n# https://docs.python.org/3/library/pathlib.html\nimport pathlib\n\n# URL handling modules\n# https://docs.python.org/3/library/urllib.html\nimport urllib\n\n# Support for gzip files\n# https://docs.python.org/3/library/gzip.html\nimport gzip\n\nfrom dprs.exceptions import ContentsFileURLNotFound\n\n\ndef download_contents_file(contents_file_url: str = None,\n output_dir: str = os.getcwd()+os.sep+\"outputs\",\n reuse_contents_file: bool = True) -> str:\n \"\"\"\n This function takes a Debian Contents-*.gz file and extracts it to the output folder.\n\n Arguments:\n contents_file_url: Debian Contents-*.gz file URL\n output_dir: Output directory where the file will be downloaded and extracted.\n reuse_contents_file: Reuse a previously downloaded Debian Contents-*.gz file.\n\n Returns:\n str: path of the content index file that was downloaded and extracted.\n \"\"\"\n # Check URL list length\n if len(contents_file_url) == 0:\n raise ContentsFileURLNotFound(\"Contents-*.gz file URL is not found!\")\n\n # Check output directory address\n if output_dir is None:\n output_dir = os.getcwd()+os.sep+\"outputs\"\n\n # Check output directory and create it\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n basename = os.path.basename(contents_file_url)\n file_name = os.path.splitext(basename)[0]\n\n # Contents-*.gz file path\n output_gz_file = pathlib.Path(output_dir) / basename\n\n # Extracted Contents-*.gz file path\n output_file = pathlib.Path(output_dir) / file_name\n\n # If Contents-*.gz file exists check reuse flag\n if output_file.exists():\n if reuse_contents_file:\n return output_file\n\n # Download the Contents-*.gz file\n with urllib.request.urlopen(contents_file_url) as response:\n data = response.read()\n with open(output_gz_file, \"wb\") as buffer:\n buffer.write(data)\n\n # Extract the Contents-*.gz file\n with gzip.open(output_gz_file, \"rb\") as buffer:\n data = buffer.read()\n with open(output_file, \"wb\") as buffer:\n buffer.write(data)\n\n return output_file\n","repo_name":"kaan-keskin/debian-package-repository-statistics","sub_path":"dprs/download_contents_file.py","file_name":"download_contents_file.py","file_ext":"py","file_size_in_byte":2310,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"42116645525","text":"ongoingallowance = 500\r\nsavings = 500\r\nsavings +=300\r\n\r\nongoingallowance-=50\r\nnumberofdaystosave = (5000 - ongoingallowance)/500\r\nongoingallowance = ongoingallowance + (30-10)*7\r\nprint(savings %500)\r\nprint(9**3)\r\nprint (savings // 500)\r\n\r\ntext = \"My name is haseeb mehmood dhatt.That's it. Thats the code\"\r\ntext = 'my name is haseeb dhatt'\r\n\r\na=7.5\r\nb=3.234234\r\nc=a/b\r\n\r\nfirstcity=\"New York\"\r\nsecondcity=\"Seattle\"\r\nnumberoftimes = 5\r\nfavourite= \"My favourite city are \" + firstcity + \" and \"+ secondcity + \" And travelled there \"+str(numberoftimes) +\" times\"\r\nprint(favourite)\r\n","repo_name":"haseeb549/Python-Basics","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"17042259916","text":"import requests\nimport re\nfrom subprocess import Popen, PIPE\n\nBOT_TOKEN = \"YOUR_BOT_TOKEN\"\nbase_url = f\"https://api.telegram.org/bot{BOT_TOKEN}\"\n\n\n# create func that get chat id\ndef get_chat_id(update):\n chat_id = update[\"message\"][\"chat\"][\"id\"]\n return chat_id\n\n\n# create func that get message text\ndef get_message_text(update):\n message_text = update[\"message\"][\"text\"]\n return message_text\n\n\n# create func that get last_update\ndef last_update(req):\n response = requests.get(req + \"getUpdates\")\n response = response.json()\n result = response[\"result\"]\n total_updates = len(result) - 1\n return result[total_updates]\n\n\n# create func that let bot send message to user\ndef send_message(chat_id, message_text):\n params = {\"chat_id\": chat_id, \"text\": message_text}\n response = requests.post(base_url + \"sendMessage\", data=params)\n return response\n\n\n# create func that get dog picture\ndef get_dog_pic():\n allowed_extension = ['jpg', 'jpeg', 'png']\n file_extension = ''\n while file_extension not in allowed_extension: # Get valid url for picture (jpg, jpeg, png)\n contents = requests.get('https://random.dog/woof.json').json()\n photo = contents['url']\n print(photo)\n file_extension = re.search(\"([^.]*)$\", photo).group(1).lower()\n return photo\n\n\n# create func that let bot send photo to user\ndef send_dog_photo(chat_id, photo_url):\n params = {\"chat_id\": chat_id, \"photo\": photo_url}\n response = requests.post(base_url + \"sendPhoto\", data=params)\n return response\n\n\n# create func that get dog gif\ndef get_dog_gif():\n allowed_extension = ['mp4', 'gif']\n file_extension = ''\n while file_extension not in allowed_extension: # Get valid url for gif (mp4, gif)\n contents = requests.get('https://random.dog/woof.json').json()\n gif = contents['url']\n print(gif)\n file_extension = re.search(\"([^.]*)$\", gif).group(1).lower()\n return gif\n\n\n# create func that let bot send gif to user\ndef send_dog_gif(chat_id, gif_url):\n params = {\"chat_id\": chat_id, \"document\": gif_url}\n response = requests.post(base_url + \"sendDocument\", data=params)\n return response\n\n\n# create func that get link video of youtube\ndef get_links(link):\n # time.sleep(7)\n # link = get_message_text(update)\n cmd = \"youtube-dl -g {}\".format(link)\n r = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE).communicate()\n result = []\n res = r[0].split()\n for i in range(len(res)):\n result.append(res[i])\n return result\n\n\n# create main func for navigate or reply message back\ndef main():\n update_id = last_update(req=base_url)[\"update_id\"]\n while True:\n update = last_update(req=base_url)\n if update_id == update[\"update_id\"]:\n if get_message_text(update).lower() == \"hi\" or get_message_text(\n update) == \"/start\" or get_message_text(update).lower() == \"hello\":\n send_message(get_chat_id(update),\n \"Hello Welcome To My bot.\\nCommands(#Fun)👇 \\ndog: send photo of dog\\ndog gif: send gif of dog\\nAbout Command👇\\nabout: information of me\\n\\n👉 and for get download link from youtube send youtube video link\")\n\n elif get_message_text(update).lower() == \"dog\":\n send_dog_photo(get_chat_id(update), get_dog_pic())\n print(f\"\\033[33mDog Picture Sended. \\nDog Picture Url -> {get_dog_pic()}\\033[39m\")\n\n elif get_message_text(update).lower() == \"dog gif\":\n send_dog_gif(get_chat_id(update), get_dog_gif())\n print(f\"\\033[33mDog Gif Sended. \\nDog Gif Url -> {get_dog_gif()}\\033[39m\")\n\n elif \"https://www.youtube.com\" in get_message_text(\n update).lower() or \"http://www.youtube.com\" in get_message_text(\n update) or \"youtube.com\" in get_message_text(update) or \"m.youtube.com\" in get_message_text(\n update) or \"https://m.youtube.com\" in get_message_text(\n update) or \"http://m.youtube.com\" in get_message_text(update):\n print(f\"\\033[33mYoutube Link -> {get_message_text(update)}\\033[39m\")\n links = get_links(get_message_text(update))\n send_message(get_chat_id(update), \"video 👇\\n{0}\\nAudio 👇\\n{1}\".format(links[0], links[1]))\n print(\"\\033[33mDownload Links Sended.\\033[39m\")\n\n elif get_message_text(update).lower() == \"about\":\n send_message(get_chat_id(update),\n \"#Author : Sullivan[Epic_R_R]\\n#Email : salar.z@ourlook.de\\n\")\n\n else:\n send_message(get_chat_id(update), \"Sorry Not Understand What You Inputted :(\")\n update_id += 1\n\n\nif __name__ == \"__main__\":\n print(\"\\033[32mBot Starting\\033[39m\")\n main()\n","repo_name":"Epic-R-R/YoutubeDL_Bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4820,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"39284032486","text":"#Examples that breakdown for loops in python\n\n#In this example we are going over how for loops are used in Python\nnames = [\"Adam\",\"Alex\",\"Mariah\",\"Martine\",\"Columbus\"]\n\nfor val in names:\n print(val)\n\n#================================================\n#this example of a for loop goes through a dictionary\nprint(\"\\n\")\nwebster = {\n \"Aardvark\" : \"A star of a popular children's cartoon show.\",\n \"Baa\" : \"The sound a goat makes.\",\n \"Carpet\": \"Goes on the floor.\",\n \"Dab\": \"A small amount.\"\n}\n\n# Add your code below!\nfor val in webster:\n print(\"This will print the keys \" + val)\n print(\"This will print the value \" + webster[val])\n \n\n#================================\n#An example of a for loop breakdown using if condition\na = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]\n\nfor num in a:\n if num % 2==0:\n print (num)\n\n\n#===============================\n#Another example that looks at a Python breakdown\n# Write your function below!\ndef fizz_count(x):\n count = 0\n \n for item in x:\n if item == \"fizz\":\n count += 1\n \n return count\n\nprint(fizz_count([\"fizz\", \"cat\", \"fizz\"]))\n\n#==========================================\n#An example that goes over a while loop\ncount = 0\n\nwhile count < 10: # Add a colon\n print(count)\n count +=1\n\n#==========================================\n#Classic for loop breakdown\n#instead of normally of how we do for(i=0;i<3;i++), we do the\n#following like down below, it will print out 0,1,2\nfor x in range(0, 3):\n print (\"We're on time %d\" % (x))\n\n\n#===========================================\n#Doing the for loop like so in Python will print every number from 0 to 7\nfor value in range(7):\n print (value)\n\n\n#===========================================\n#A more complex version of for loop in Python\nhobbies = []\n\n# Add your code below!\n\nfor num in range(3):\n hobby = input(\"Tell me one of your favorite hobbies: \")\n hobbies.append(hobby)\n\nprint(hobbies)\n\n\n#==============================================\n#An example that goes over every character in a string using a for loop\nthing = \"spam!\"\n\nfor c in thing:\n print(c)\n\nprint(\"\\n\")\nword = \"eggs!\"\n\nfor item in word:\n print(item)\n\n#================================================\n#This example look at printing out multiple variables in print\nd = {'a': 'apple', 'b': 'berry', 'c': 'cherry'}\n\nfor key in d:\n print (key, d[key])","repo_name":"JPrendy/python-exercises-and-notes","sub_path":"PythonForLoopsBreakdown.py","file_name":"PythonForLoopsBreakdown.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"31138731389","text":"def getScore(s):\n s_len = len(s)\n l = [[1 for x in range(s_len+1)] for _ in range(s_len+1)]\n maxx = 1\n maxx2 = 0\n maxxRanges = []\n\n for threshold in range(2, s_len+1):\n for i in range(s_len-threshold+1):\n j = i+threshold-1\n if s[i] == s[j] and threshold == 2:\n l[i][j] = 2\n elif s[i] == s[j]:\n l[i][j] = l[i+1][j-1] + 2\n else:\n l[i][j] = max(l[i][j-1], l[i+1][j]);\n\n if l[i][j] > maxx:\n maxx = l[i][j]\n maxxRanges.append((i,j))\n\n \n maxProduct = 0\n for i in range(len(maxxRanges)-1, -1, -1):\n maxxRange = maxxRanges[i]\n maxx = l[maxxRange[0]][maxxRange[1]]\n\n if s[0:maxxRange[0]] and s[maxxRange[1]+1:s_len]:\n maxx2 = max(l[0][maxxRange[0]-1], l[maxxRange[1]+1][s_len])\n elif s[0:maxxRange[0]]:\n maxx2 = l[0][maxxRange[0]-1]\n elif s[maxxRange[1]+1:s_len]:\n maxx2 = l[maxxRange[1]+1][s_len-1]\n tmpProduct = 1\n if maxx*maxx2:\n tmpProduct = maxx*maxx2\n\n if tmpProduct > maxProduct:\n maxProduct = tmpProduct\n\n for row in l:\n print(row)\n return maxProduct\n\ns = 'abbbdbbad'\nprint(getScore(s))","repo_name":"erjantj/hackerrank","sub_path":"fun-with-polindromes.py","file_name":"fun-with-polindromes.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"18159069745","text":"from collections import deque\n\n\ndef solution(bridge_length, weight, truck_weights):\n leftTruck = deque(truck_weights)\n answer = 0\n q = deque()\n nowTruckWeight = 0\n # 아직 다리를 건널 트럭이 있을때까지 루프\n while leftTruck or q:\n # 트럭이 다리를 다 건넜으면 빼준다.\n if q and q[0][1] == bridge_length:\n nowTruckWeight -= q[0][0]\n q.popleft()\n # 건너지 못한 트럭이있고 다리에 오를수있으면 트럭을 다리에 올린다.\n if leftTruck and nowTruckWeight + leftTruck[0] <= weight and len(q) < bridge_length:\n x = leftTruck.popleft()\n q.append([x, 0])\n nowTruckWeight += x\n\n for x in range(len(q)):\n q[x][1] += 1\n # print(\"%d초\" % answer)\n # print(q)\n answer += 1\n\n return answer\n\n\nprint(solution(100, 100, [10, 10, 10, 10, 10, 10, 10, 10, 10, 10]))\n","repo_name":"Kyun2da/Algorithm","sub_path":"python/programmers/level2/다리를지나는트럭.py","file_name":"다리를지나는트럭.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"31990404145","text":"import asyncio\nfrom time import sleep\n\nfrom garagepi.common.const import *\nfrom garagepi.data.rpi import gpio\n\nCONFIG_PIN_MODES = {\n CONF_CLOSE_GARAGE_PIN: gpio.IN,\n CONF_OPEN_GARAGE_PIN: gpio.IN,\n CONF_POSITIONS: gpio.OUT\n}\n\n\nclass Interactive:\n should_quit = False\n\n def run(self):\n while not self.should_quit:\n print('Enter pin: ')\n pin = int(input())\n print('1 or 0? ')\n value = int(input())\n gpio.output(pin, value)\n\n\nclass GaragePiAssistant:\n \"\"\"Main class for interacting with the garage door and data layer.\"\"\"\n\n __slots__ = ['config', 'api', 'interactive', '_open_garage_pin', '_close_garage_pin',\n '_position_pins', '_pin_positions']\n\n def __init__(self, config, api, interactive=None):\n self._open_garage_pin = None\n self._close_garage_pin = None\n self._position_pins = {}\n self._pin_positions = {}\n self.config = config\n self.api = api\n self.interactive = None if interactive is None else Interactive()\n self._setup_gpio_pins()\n\n def run(self):\n asyncio.get_event_loop().create_task(self.api.get_updates())\n if self.interactive is not None:\n self.interactive.run()\n\n def _setup_gpio_pins(self):\n for config, pin_mode in CONFIG_PIN_MODES.items():\n print('Config key {}, mode {}'.format(config, pin_mode))\n config_value = self.config.get(config, None)\n if config_value is None:\n continue\n print('Config value {} {}'.format(config, config_value))\n if not isinstance(config_value, dict) and not isinstance(config_value, list):\n self._setup_pin(config, config_value)\n elif isinstance(config_value, dict):\n for key, value in config_value.items():\n self._setup_pin(key, value)\n\n def _setup_pin(self, config_key, position_pin):\n if CONFIG_PIN_MODES.get(config_key, gpio.OUT) == gpio.IN:\n gpio.setup(\n position_pin,\n gpio.IN,\n pull_up_down=gpio.PUD_DOWN)\n if config_key == CONF_OPEN_GARAGE_PIN:\n self._open_garage_pin = position_pin\n gpio.add_event_callback(position_pin, self._handle_open_command)\n elif config_key == CONF_CLOSE_GARAGE_PIN:\n self._close_garage_pin = position_pin\n gpio.add_event_callback(position_pin, self._handle_close_command)\n else:\n gpio.setup(\n position_pin,\n gpio.OUT)\n self._position_pins[config_key] = position_pin\n # gpio.add_event_callback(gpio, self._handle_output_change)\n\n def _handle_open_command(self, channel, value):\n print('Open handler {} {}'.format(channel, value))\n if channel != self._open_garage_pin and value != 1:\n return\n print('Opening')\n self._open()\n\n def _open(self):\n for position in ['75', '50', '25']:\n if position not in self._position_pins:\n print('{} not in {}'.format(position, self._position_pins))\n continue\n print(position.replace('_pin', ''))\n sleep(3)\n\n def _handle_close_command(self, channel, value):\n print('Close handler {} {}'.format(channel, value))\n if channel != self._close_garage_pin and value != 1:\n return\n print('Closing')\n self._close()\n\n def _close(self):\n for position in ['25', '50', '75']:\n if position not in self._position_pins:\n continue\n print(position.replace('_pin', ''))\n sleep(3)\n","repo_name":"constructorfleet/GaragePi-Assistant","sub_path":"garagepi/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"28408868272","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\n# Copy 'sam2coverage.py' script into '/data1/bio/projects/ndanilova/colitis_crohn/test/' dir\n\n# CLI preparing:\n\nexport DOCKER_IMAGE_NAME=ivasilyev/bwt_filtering_pipeline_worker:latest && \\\ndocker pull ${DOCKER_IMAGE_NAME} && \\\ndocker run --rm -v /data:/data -v /data1:/data1 -v /data2:/data2 --net=host -it ${DOCKER_IMAGE_NAME} python3\n\"\"\"\n\nimport subprocess\nimport multiprocessing\n\n\ndef external_route(*args):\n process = subprocess.Popen(args, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n (output, error) = process.communicate()\n process.wait()\n if error:\n print(error)\n if output:\n print(output.decode(\"utf-8\"))\n\n\ndef multi_core_queue(function_name, queue):\n pool = multiprocessing.Pool()\n pool.map(function_name, queue)\n pool.close()\n pool.join()\n\n\n\"\"\"\n# Manual checkout\n\nexport DOCKER_ALIAS='docker run --rm -v /data:/data -v /data1:/data1 -v /data2:/data2 --net=host -it ivasilyev/bwt_filtering_pipeline_worker ' \n\nrm -f /data2/bio/Metagenomes/custom/25_ecoli_genes/Mapped_reads/136VZK_no_hg19_25_ecoli_genes.bam \\\n/data2/bio/Metagenomes/custom/25_ecoli_genes/Mapped_reads/136VZK_no_hg19_25_ecoli_genes_sorted.bam \\\n/data1/bio/projects/ndanilova/colitis_crohn/test/136VZK_no_hg19_25_ecoli_genes.tsv\n\n${DOCKER_ALIAS} samtools view -t /data/reference/custom/25_ecoli_genes/index/25_ecoli_genes_samtools.fai \\\n-bS /data2/bio/Metagenomes/custom/25_ecoli_genes/Mapped_reads/136VZK_no_hg19_25_ecoli_genes.sam \\\n-o /data2/bio/Metagenomes/custom/25_ecoli_genes/Mapped_reads/136VZK_no_hg19_25_ecoli_genes.bam \\\n-@ 32\n\nfile /data2/bio/Metagenomes/custom/25_ecoli_genes/Mapped_reads/136VZK_no_hg19_25_ecoli_genes.bam\n\n${DOCKER_ALIAS} samtools sort /data2/bio/Metagenomes/custom/25_ecoli_genes/Mapped_reads/136VZK_no_hg19_25_ecoli_genes.bam \\\n-o /data2/bio/Metagenomes/custom/25_ecoli_genes/Mapped_reads/136VZK_no_hg19_25_ecoli_genes_sorted.bam \\\n-@ 32 \n\nfile /data2/bio/Metagenomes/custom/25_ecoli_genes/Mapped_reads/136VZK_no_hg19_25_ecoli_genes_sorted.bam\n\n${DOCKER_ALIAS} genomeCoverageBed -ibam /data2/bio/Metagenomes/custom/25_ecoli_genes/Mapped_reads/136VZK_no_hg19_25_ecoli_genes_sorted.bam \\\n> /data1/bio/projects/ndanilova/colitis_crohn/test/136VZK_no_hg19_25_ecoli_genes.tsv\n\nfile /data1/bio/projects/ndanilova/colitis_crohn/test/136VZK_no_hg19_25_ecoli_genes.tsv\n\n\"\"\"\n\n\ndef mp_get_coverage(external_input):\n external_route('python3', '/data1/bio/projects/ndanilova/colitis_crohn/test/sam2coverage.py',\n '-i', external_input,\n '-f', \"/data/reference/custom/25_ecoli_genes/index/25_ecoli_genes_samtools.fai\",\n '-g', \"/data/reference/custom/25_ecoli_genes/index/25_ecoli_genes_samtools.genome\",\n \"-a\", \"/data/reference/custom/25_ecoli_genes/index/25_ecoli_genes_annotation.txt\",\n '-o', \"/data2/bio/Metagenomes/custom/25_ecoli_genes\")\n\n\ndef sam2coverage():\n [mp_get_coverage(i) for i in samFileNamesList]\n # multi_core_queue(mp_get_coverage, samFileNamesList)\n\n\nsamFileNamesList = sorted(subprocess.getoutput(\"ls -d /data2/bio/Metagenomes/custom/25_ecoli_genes/Mapped_reads/*.sam\").split(\"\\n\"))\nsam2coverage()\n\n# subprocess.getoutput(\"samtools view -Su /data2/bio/Metagenomes/custom/25_ecoli_genes/Mapped_reads/136VZK_no_hg19_25_ecoli_genes.sam -@ 32 | \\\n# samtools sort - -o /data1/bio/projects/ndanilova/colitis_crohn/test/136VZK_no_hg19_25_ecoli_genes.sorted.bam -@ 32 \")\n","repo_name":"ivasilyev/curated_projects","sub_path":"tgrigoreva/25_ecoli_genes/samtools_prod_fix.py","file_name":"samtools_prod_fix.py","file_ext":"py","file_size_in_byte":3509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"19421527100","text":"# Start with your program from Exercise 8-7. Write a while\n# loop that allows users to enter an album’s artist and title. Once you have that\n# information, call make_album() with the user’s input and print the dictionary\n# that’s created. Be sure to include a quit value in the while loop\n\n\ndef make_album():\n artist_message = \"Enter album's artist: \\n\"\n title_message = \"Enter album's title: \\n\"\n album_dictionary = {}\n\n while True:\n artist = input(artist_message)\n if artist == \"quit\":\n break\n title = input(title_message)\n if title == \"quit\":\n break\n album_dictionary[\"artist\"] = artist.title()\n album_dictionary[\"title\"] = title.title()\n print(\"This album is make by \" + album_dictionary[\"artist\"] + \" and the name is \" + album_dictionary[\"title\"] + \".\")\n\nmake_album()\n","repo_name":"yylhyyw/Python_Crash_Course_Exercise","sub_path":"Chapter8/8-8_User_Albums.py","file_name":"8-8_User_Albums.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"70618368510","text":"import uuid as uuid\n\nfrom colorfield.fields import ColorField\nfrom django.core.validators import MaxValueValidator, MinValueValidator\nfrom django.db import models\nfrom django.utils.translation import gettext as _\nfrom django_countries.fields import CountryField\nfrom phonenumber_field.modelfields import PhoneNumberField\n\nfrom accounts.models import Customer\n\n\nclass BaseModel(models.Model):\n class Meta:\n abstract = True\n\n create_datetime = models.DateTimeField(null=True, auto_now_add=True)\n last_update = models.DateTimeField(null=True, auto_now=True)\n\n\nclass Retailer(BaseModel):\n user = models.OneToOneField(Customer, on_delete=models.CASCADE, related_name=\"retailer\", null=False)\n uuid = models.UUIDField(default=uuid.uuid4, db_index=True, unique=True)\n company_name = models.CharField(max_length=50, blank=True, null=False)\n phone_number = PhoneNumberField(blank=True, null=False)\n email = models.EmailField(max_length=128)\n address = models.CharField(_(\"address\"), max_length=128, null=False, blank=True)\n city = models.CharField(_(\"city\"), max_length=64, blank=True, null=False)\n country = CountryField(blank=True, null=False)\n zip_code = models.CharField(_(\"zip code\"), max_length=5, blank=True, null=False)\n description = models.TextField(\n _(\"description\"),\n max_length=500,\n null=False,\n blank=True,\n )\n photo = models.ImageField(upload_to=\"users_photo/\", null=True, blank=True, default=\"users_photo/default.jpg\")\n service_fee = models.DecimalField(\n max_digits=19,\n decimal_places=2,\n null=False,\n blank=True,\n default=1.00,\n validators=[MinValueValidator(1.00)],\n )\n\n def __str__(self):\n return f\"{self.company_name} - {self.city} ({self.get_country_display()})\"\n\n def vehicles_count(self):\n return self.vehicle.count()\n\n def save(self, *args, **kwargs):\n self.full_clean()\n return super().save(*args, **kwargs)\n\n\nclass Vehicle(BaseModel):\n YEAR_OF_PRODUCTION_MIN = 1900\n YEAR_OF_PRODUCTION_MAX = 2000\n\n class VEHICLE_CATEGORY_CHOICES(models.IntegerChoices):\n EU_ELITE = 0, \"European Elite Classic\"\n US_RETRO = 1, \"US Retro\"\n EU_LEGEND = 2, \"European Legend\"\n RESTORED_OLDTIMER = 3, \"Fully Restored Oldtimer\"\n US_MUSCLE = 4, \"US Muscle Car\"\n OFFROAD = 5, \"Offroad Classic\"\n ASIAN_LEGEND = 6, \"Asian Legends\"\n RARE_SUPERCAR = 7, \"Rare Supercar\"\n CUSTOM = 8, \"Deep Tuned Custom\"\n EU_RACING = 9, \"European Racing Classics\"\n OTHER = 10, \" Other\"\n\n class VEHICLE_CONDITION_CHOICES(models.IntegerChoices):\n EXCELLENT = 0, \"Excellent\"\n VERY_GOOD = 1, \"Very Good\"\n GOOD = 2, \"Good\"\n SATISFACTORY = 3, \"Satisfactory\"\n POOR = 4, \"Poor\"\n OUT_OF_ORDER = 5, \"Out of order\"\n OTHER = 6, \"Other\"\n\n class VEHICLE_TRANSMISSION_CHOICES(models.IntegerChoices):\n M = 0, \"Manual\"\n A = 1, \"Automatic\"\n OTHER = 2, \"Other\"\n\n owner = models.ForeignKey(\n Customer,\n related_name=\"vehicle\",\n on_delete=models.CASCADE,\n null=True,\n blank=True,\n )\n retailer = models.ForeignKey(\n Retailer,\n related_name=\"vehicle\",\n on_delete=models.SET_DEFAULT,\n null=True,\n blank=True,\n default=1,\n )\n vin = models.CharField(null=False, blank=False, unique=True, max_length=17, primary_key=True, db_index=True)\n category = models.PositiveSmallIntegerField(\n choices=VEHICLE_CATEGORY_CHOICES.choices,\n default=VEHICLE_CATEGORY_CHOICES.OTHER,\n null=False,\n blank=True,\n )\n brand = models.CharField(\n max_length=50,\n null=False,\n blank=True,\n )\n model = models.CharField(\n max_length=50,\n null=False,\n blank=True,\n )\n production_year = models.PositiveSmallIntegerField(\n _(\"year\"),\n null=True,\n blank=True,\n validators=[MaxValueValidator(YEAR_OF_PRODUCTION_MAX), MinValueValidator(YEAR_OF_PRODUCTION_MIN)],\n )\n condition = models.PositiveSmallIntegerField(\n choices=VEHICLE_CONDITION_CHOICES.choices,\n default=VEHICLE_CONDITION_CHOICES.OTHER,\n null=True,\n blank=True,\n )\n mileage = models.PositiveBigIntegerField(null=True, blank=True, validators=[MaxValueValidator(3000000)])\n seats = models.SmallIntegerField(null=True, blank=True, validators=[MaxValueValidator(7), MinValueValidator(1)])\n color = ColorField(blank=True, null=True)\n transmission = models.PositiveSmallIntegerField(\n choices=VEHICLE_TRANSMISSION_CHOICES.choices,\n default=VEHICLE_TRANSMISSION_CHOICES.OTHER,\n null=True,\n blank=True,\n )\n image = models.ImageField(\n default=\"default.png\",\n upload_to=\"media/vehicle\",\n null=True,\n blank=True,\n )\n price = models.DecimalField(\n max_digits=19, decimal_places=2, null=True, blank=True, validators=[MinValueValidator(1.00)]\n )\n description = models.TextField(\n max_length=500,\n null=False,\n blank=True,\n )\n\n def __str__(self):\n return f\"{self.brand} {self.model} Price: {self.price} $ \"\n\n def save(self, *args, **kwargs):\n self.full_clean()\n return super().save(*args, **kwargs)\n\n\nclass Employee(BaseModel):\n class EMPLOYEE_RANK_CHOICES(models.IntegerChoices):\n CEO = 0, \"Chief Executive Officer\"\n GENERAL_MANAGER = 1, \"General Manager / Administrator\"\n SALES_MANAGER = 2, \"Sales Manager\"\n\n user = models.OneToOneField(Customer, on_delete=models.CASCADE, related_name=\"employee\")\n retailer = models.ForeignKey(to=\"Retailer\", related_name=\"employees\", on_delete=models.CASCADE)\n uuid = models.UUIDField(\n primary_key=True,\n unique=True,\n editable=False,\n default=uuid.uuid4,\n )\n first_name = models.CharField(\n max_length=128,\n null=False,\n blank=True,\n )\n last_name = models.CharField(\n max_length=128,\n null=False,\n blank=True,\n )\n rank = models.PositiveSmallIntegerField(\n choices=EMPLOYEE_RANK_CHOICES.choices, default=EMPLOYEE_RANK_CHOICES.SALES_MANAGER\n )\n email = models.EmailField(\n max_length=128,\n null=False,\n blank=True,\n )\n\n\nclass DeliveryService(BaseModel):\n uuid = models.UUIDField(default=uuid.uuid4, db_index=True, unique=True)\n company_name = models.CharField(max_length=50, null=False, blank=True)\n phone_number = PhoneNumberField(null=False, blank=True)\n email = models.EmailField(\n max_length=128,\n null=False,\n blank=True,\n )\n address = models.CharField(\n _(\"address\"),\n max_length=128,\n null=False,\n blank=True,\n )\n city = models.CharField(\n _(\"city\"),\n max_length=64,\n null=False,\n blank=True,\n )\n country = CountryField(\n null=False,\n blank=True,\n )\n zip_code = models.CharField(\n _(\"zip code\"),\n max_length=5,\n null=False,\n blank=False,\n )\n international_delivery = models.BooleanField(default=False)\n\n\nclass Invoices(BaseModel):\n invoice_date = models.DateTimeField(\n null=False,\n blank=False,\n )\n billing_address = models.CharField(_(\"address\"), max_length=128, default=\"N/A\")\n billing_city = models.CharField(\n _(\"city\"),\n max_length=64,\n blank=True,\n null=False,\n )\n billing_country = CountryField(\n null=False,\n blank=True,\n )\n billing_zip_code = models.CharField(\n _(\"zip code\"),\n max_length=5,\n null=False,\n blank=False,\n )\n total = models.DecimalField(\n max_digits=19,\n decimal_places=2,\n null=False,\n blank=False,\n )\n\n\nclass InvoiceItems(BaseModel):\n vehicle = models.ForeignKey(to=\"Vehicle\", related_name=\"invoice_items_vehicle\", on_delete=models.CASCADE)\n delivery_service_id = models.ForeignKey(\n to=\"DeliveryService\",\n related_name=\"invoice_items\",\n on_delete=models.CASCADE,\n null=False,\n blank=False,\n )\n invoices = models.ForeignKey(\n to=\"Invoices\",\n related_name=\"invoice_items\",\n on_delete=models.CASCADE,\n null=False,\n blank=False,\n )\n invoice_line_id = models.IntegerField(primary_key=True, blank=False, db_index=True, default=\"N/A\")\n price = models.ForeignKey(\n to=\"Vehicle\",\n related_name=\"invoice_items_price\",\n on_delete=models.CASCADE,\n null=False,\n blank=False,\n )\n delivery_fee = models.DecimalField(\n max_digits=19,\n decimal_places=2,\n null=False,\n blank=False,\n )\n insurance_fee = models.DecimalField(\n max_digits=19,\n decimal_places=2,\n null=False,\n blank=False,\n )\n","repo_name":"DelDmc/OldTimers","sub_path":"src/oldtimers/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"70607885632","text":"# -*- coding: utf-8 -*-\nfrom accelbrainbase.observabledata._torch.neural_networks import NeuralNetworks\nfrom accelbrainbase.iteratable_data import IteratableData\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.optim.optimizer import Optimizer, required\nfrom torch.optim.adam import Adam\nfrom logging import getLogger\n\n\nclass AutoEncoder(NeuralNetworks):\n '''\n Auto-Encoder.\n\n References:\n - Kamyshanska, H., & Memisevic, R. (2014). The potential energy of an autoencoder. IEEE transactions on pattern analysis and machine intelligence, 37(6), 1261-1273.\n '''\n\n # `bool` that means initialization in this class will be deferred or not.\n __init_deferred_flag = False\n\n def __init__(\n self,\n encoder,\n decoder,\n computable_loss,\n learning_rate=1e-05,\n ctx=\"cpu\",\n tied_weights_flag=False,\n not_init_flag=False,\n ):\n '''\n Init.\n\n Args:\n encoder: is-a `NeuralNetworks`.\n decoder: is-a `NeuralNetworks`.\n computable_loss: is-a `ComputableLoss` or `mxnet.gluon.loss`.\n learning_rate: `float` of learning rate.\n observed_activation: `act_type` in `mxnet.ndarray.Activation` or `mxnet.symbol.Activation` \n that activates observed data points.\n\n ctx: Context-manager that changes the selected device.\n tied_weights_flag: `bool` of flag to tied weights or not.\n not_init_flag: `bool` of whether initialize parameters or not.\n '''\n if isinstance(encoder, NeuralNetworks) is False:\n raise TypeError(\"The type of `encoder` must be `NeuralNetworks`.\")\n if isinstance(decoder, NeuralNetworks) is False:\n raise TypeError(\"The type of `decoder` must be `NeuralNetworks`.\")\n\n init_deferred_flag = self.init_deferred_flag\n self.init_deferred_flag = True\n super(AutoEncoder, self).__init__(computable_loss=computable_loss)\n self.init_deferred_flag = init_deferred_flag\n\n logger = getLogger(\"accelbrainbase\")\n self.__logger = logger\n self.encoder = encoder\n self.decoder = decoder\n self.__tied_weights_flag = tied_weights_flag\n\n self.__computable_loss = computable_loss\n\n self.encoder_optimizer = None\n self.decoder_optimizer = None\n\n self.epoch = 0\n self.__learning_rate = learning_rate\n self.__not_init_flag = not_init_flag\n\n self.__ctx = ctx\n self.__encoder_input_dim = None\n self.__decoder_input_dim = None\n self.__loss_list = []\n\n def parameters(self):\n '''\n '''\n return [\n {\n \"params\": self.encoder.parameters(),\n },\n {\n \"params\": self.decoder.parameters(),\n }\n ]\n\n def initialize_params(self, input_dim):\n '''\n Initialize params.\n '''\n if self.encoder_optimizer is not None and self.decoder_optimizer is not None:\n return\n\n self.__encoder_input_dim = input_dim\n self.__decoder_input_dim = self.encoder.units_list[-1]\n if self.init_deferred_flag is False:\n if self.__not_init_flag is False:\n self.encoder.initialize_params(self.__encoder_input_dim)\n self.decoder.initialize_params(self.__decoder_input_dim)\n self.encoder_optimizer = self.encoder.optimizer\n self.decoder_optimizer = self.decoder.optimizer\n\n def learn(self, iteratable_data):\n '''\n Learn samples drawn by `IteratableData.generate_learned_samples()`.\n\n Args:\n iteratable_data: is-a `IteratableData`.\n '''\n if isinstance(iteratable_data, IteratableData) is False:\n raise TypeError(\"The type of `iteratable_data` must be `IteratableData`.\")\n\n self.__loss_list = []\n try:\n epoch = self.epoch\n iter_n = 0\n for batch_observed_arr, batch_target_arr, test_batch_observed_arr, test_batch_target_arr in iteratable_data.generate_learned_samples():\n batch_size = batch_observed_arr.shape[0]\n self.initialize_params(\n input_dim=batch_observed_arr.reshape(batch_size, -1).shape[1]\n )\n self.encoder_optimizer.zero_grad()\n self.decoder_optimizer.zero_grad()\n # rank-3\n pred_arr = self.inference(batch_observed_arr)\n loss = self.compute_loss(\n pred_arr,\n batch_target_arr\n )\n loss.backward()\n self.encoder_optimizer.step()\n self.decoder_optimizer.step()\n self.regularize()\n\n if (iter_n+1) % int(iteratable_data.iter_n / iteratable_data.epochs) == 0:\n with torch.inference_mode():\n # rank-3\n test_pred_arr = self.inference(test_batch_observed_arr)\n\n test_loss = self.compute_loss(\n test_pred_arr,\n test_batch_target_arr\n )\n _loss = loss.to('cpu').detach().numpy().copy()\n _test_loss = test_loss.to('cpu').detach().numpy().copy()\n self.__loss_list.append((_loss, _test_loss))\n self.__logger.debug(\"Epochs: \" + str(epoch + 1) + \" Train loss: \" + str(_loss) + \" Test loss: \" + str(_test_loss))\n epoch += 1\n iter_n += 1\n\n except KeyboardInterrupt:\n self.__logger.debug(\"Interrupt.\")\n\n self.epoch = epoch\n self.__logger.debug(\"end. \")\n\n def inference(self, observed_arr):\n '''\n Inference the feature points.\n\n Args:\n observed_arr: rank-2 Array like or sparse matrix as the observed data points.\n The shape is: (batch size, feature points)\n\n Returns:\n `tensor` of inferenced feature points.\n '''\n pred_arr = self.forward(observed_arr)\n return pred_arr\n\n def compute_loss(self, pred_arr, labeled_arr):\n '''\n Compute loss.\n\n Args:\n pred_arr: `tensor`.\n labeled_arr: `tensor`.\n\n Returns:\n loss.\n '''\n return self.__computable_loss(pred_arr, labeled_arr)\n\n def extract_feature_points(self):\n '''\n Extract the activities in hidden layer and reset it, \n considering this method will be called per one cycle in instances of time-series.\n\n Returns:\n The `mxnet.ndarray` of array like or sparse matrix of feature points or virtual visible observed data points.\n '''\n return self.feature_points_arr\n\n def extract_learned_dict(self):\n '''\n Extract (pre-) learned parameters.\n\n Returns:\n `dict` of the parameters.\n '''\n params_arr_dict = {}\n\n params_dict = self.encoder.extract_learned_dict()\n for k in params_dict:\n params_arr_dict.setdefault(k, params_dict[k].data())\n\n params_dict = self.decoder.extract_learned_dict()\n for k in params_dict:\n params_arr_dict.setdefault(k, params_dict[k].data())\n\n return params_arr_dict\n\n def forward(self, x):\n '''\n Forward with torch.\n\n Args:\n x: `tensor` of observed data points.\n \n Returns:\n `tensor` of inferenced feature points.\n '''\n batch_size = x.shape[0]\n self.initialize_params(\n input_dim=x.reshape(batch_size, -1).shape[1]\n )\n\n encoded_arr = self.encoder.inference(x)\n self.feature_points_arr = encoded_arr\n decoded_arr = self.decoder.inference(encoded_arr)\n self.__pred_arr = decoded_arr\n return decoded_arr\n\n def regularize(self):\n '''\n Regularization.\n '''\n self.encoder.regularize()\n self.decoder.regularize()\n self.__tie_weights()\n\n def __tie_weights(self):\n if self.__tied_weights_flag is True:\n encoder_params_dict = self.encoder.extract_learned_dict()\n decoder_params_dict = self.decoder.extract_learned_dict()\n encoder_weight_keys_list = [key for key in encoder_params_dict.keys() if \"fc_list\" in key and \"weight\" in key]\n decoder_weight_keys_list = [key for key in decoder_params_dict.keys() if \"fc_list\" in key and \"weight\" in key]\n\n if len(encoder_weight_keys_list) != len(decoder_weight_keys_list):\n raise ValueError(\n \"The number of layers is invalid.\"\n )\n\n for i in range(len(self.encoder.units_list)):\n encoder_layer = i\n decoder_layer = len(self.encoder.units_list) - i - 1\n encoder_weight_keys, decoder_weight_keys = None, None\n for _encoder_weight_keys in encoder_weight_keys_list:\n if \"fc_list.\" + str(encoder_layer) + \".weight\" in _encoder_weight_keys:\n encoder_weight_keys = _encoder_weight_keys\n break\n\n for _decoder_weight_keys in decoder_weight_keys_list:\n if \"fc_list.\" + str(decoder_layer) + \".weight\" in _decoder_weight_keys:\n decoder_weight_keys = _decoder_weight_keys\n break\n\n if encoder_weight_keys is not None and decoder_weight_keys is not None:\n try:\n decoder_params_dict[decoder_weight_keys] = encoder_params_dict[encoder_weight_keys].T\n except AssertionError:\n raise ValueError(\n \"The shapes of weight matrixs must be equivalents in encoder layer \" + str(encoder_layer) + \" and decoder layer \" + str(decoder_layer)\n )\n\n for k, params in decoder_params_dict.items():\n if k in decoder_weight_keys_list:\n self.decoder.load_state_dict({k: params}, strict=False)\n\n def __rename_file(self, filename):\n filename_list = filename.split(\".\")\n _format = filename_list[-1]\n encoder_filename = filename.replace(\".\" + _format, \"_encoder.\" + _format)\n decoder_filename = filename.replace(\".\" + _format, \"_decoder.\" + _format)\n return encoder_filename, decoder_filename\n\n def save_parameters(self, filename):\n '''\n Save parameters to files.\n\n Args:\n filename: File name.\n '''\n encoder_filename, decoder_filename = self.__rename_file(filename)\n self.encoder.save_parameters(encoder_filename)\n self.decoder.save_parameters(decoder_filename)\n torch.save(\n {\n 'epoch': self.epoch,\n 'loss': self.loss_arr,\n }, \n filename\n )\n\n def load_parameters(self, filename, ctx=None, strict=True):\n '''\n Load parameters to files.\n\n Args:\n filename: File name.\n ctx: Context-manager that changes the selected device.\n strict: Whether to strictly enforce that the keys in state_dict match the keys returned by this module’s state_dict() function. Default: `True`.\n '''\n encoder_filename, decoder_filename = self.__rename_file(filename)\n self.encoder.load_parameters(encoder_filename, ctx=ctx, strict=strict)\n self.decoder.load_parameters(decoder_filename, ctx=ctx, strict=strict)\n\n checkpoint = torch.load(filename)\n self.epoch = checkpoint['epoch']\n self.__loss_list = checkpoint['loss'].tolist()\n\n def set_readonly(self, value):\n ''' setter '''\n raise TypeError(\"This property must be read-only.\")\n\n def get_loss_arr(self):\n ''' getter for losses. '''\n return np.array(self.__loss_list)\n\n loss_arr = property(get_loss_arr, set_readonly)\n\n def get_init_deferred_flag(self):\n ''' getter for `bool` that means initialization in this class will be deferred or not.'''\n return self.__init_deferred_flag\n \n def set_init_deferred_flag(self, value):\n ''' setter for `bool` that means initialization in this class will be deferred or not.'''\n self.__init_deferred_flag = value\n\n init_deferred_flag = property(get_init_deferred_flag, set_init_deferred_flag)\n","repo_name":"accel-brain/accel-brain-code","sub_path":"Accel-Brain-Base/accelbrainbase/observabledata/_torch/neuralnetworks/auto_encoder.py","file_name":"auto_encoder.py","file_ext":"py","file_size_in_byte":12802,"program_lang":"python","lang":"en","doc_type":"code","stars":288,"dataset":"github-code","pt":"60"} +{"seq_id":"39757480628","text":"def get_digits(n, mod=10):\n\t# returns a list containing digits of the number n\n\tres = []\n\n\tnmod_prev = n\n\ti = 1\n\n\twhile(nmod_prev != 0):\n\t\tremainder = int(nmod_prev % mod)\n\t\tres = [remainder] + res\n\t\tnmod_prev = int((nmod_prev - remainder) / mod)\n\t\ti += 1\n\n\treturn res\n\ndef pad_digits(a_digits):\n\t# takes a list of numbers represented as digits\n\t# and pads them with 0s in front\n\t# returns a list of digits padded to the same length\n\n\tassert(len(a_digits) > 0)\n\n\tmax_len = len(a_digits[0])\n\tfor i in range(1, len(a_digits)):\n\t\tif(max_len < len(a_digits[i])):\n\t\t\tmax_len = len(a_digits[i])\n\n\ta_digits_padded = []\n\n\tfor i in range(len(a_digits)):\n\t\tcur_a = a_digits[i]\n\t\tfor j in range(max_len - len(a_digits[i])):\n\t\t\tcur_a = [0] + cur_a\n\n\t\ta_digits_padded.append(cur_a)\n\n\treturn a_digits_padded, max_len\n\ndef stable_insertion_sort_on_digits(A, ind):\n\tassert(len(A) > 0)\n\tassert(ind < len(A[0]))\n\tfor j in range(1, len(A)):\n\t\tkey_arr = A[j]\n\t\tkey = A[j][ind]\n\t\ti = j-1\n\t\twhile(i >= 0 and A[i][ind] > key):\n\t\t\tA[i+1] = A[i]\n\t\t\ti -= 1\n\t\tA[i+1] = key_arr\n\ndef radix_sort_on_digit(A, d):\n\t# implements textbook radix sort\n\t# A - a list of input numbers, each represented by \n\t# a list of digits, padded in front to the same length\n\t# d - number of digits, i.e. length of each list of digits for each number in A\n\n\t# returns a radix-sorted list of digits\n\n\tfor i in range(0, d):\n\t\tstable_insertion_sort_on_digits(A, d-i-1)\n\ndef digits_to_numbers(A, mod):\n\t# converts digits to numbers\n\tres = []\n\tfor i in range(len(A)):\n\t\tdigits = A[i]\n\t\tcur_num = 0\n\t\tfor j in range(len(digits)):\n\t\t\tcur_num += digits[len(digits)-j-1] * pow(mod, j) #mod^j\n\t\tres.append(cur_num)\n\n\treturn res\n\ndef radix_sort(a, mod=10):\n\n\tfor i in range(len(a)):\n\t\tassert(a[i] > 0)\n\t\tassert(int(a[i]) == a[i])\n\n\ta_digits = [get_digits(el, mod) for el in a]\n\ta_digits_padded, d = pad_digits(a_digits)\n\tradix_sort_on_digit(a_digits_padded, d)\n\ta_sorted = digits_to_numbers(a_digits_padded, mod)\n\n\treturn a_sorted\n\ndef test():\n\tprint(\"Running tests\")\n\ta = [329, 457, 657, 839, 436, 720, 355]\n\ta_sorted = radix_sort(a)\n\tassert(a_sorted == [329, 355, 436, 457, 657, 720, 839])\n\tprint(\"All tests passed\")\n\tprint(\"\")\n\ndef main():\n\n\ttest()\n\ta = [10, 4, 6 , 108, 1, 3, 99, 203, 5, 5, 10001]\n\n\tprint(\"Before sorting: \" + str(a))\n\n\ta_sorted = radix_sort(a)\n\n\tprint(\"After sorting: \" + str(a_sorted))\n\nif __name__ == \"__main__\":\n\tmain()","repo_name":"avalouev/basic_algorithms","sub_path":"radix_sort/radix_sort.py","file_name":"radix_sort.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"4327060222","text":"import argparse\nimport requests\nimport json\nimport datetime\nimport ast\nimport pdb\nimport logging\nimport inspect\nimport sys, os, django\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(BASE_DIR)\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"app.settings\")\ndjango.setup()\n\nfrom collect.models import *\n\nclass restbus_api:\n\tdef __init__(self):\n\t\t'''The RestBus API setup'''\n\t\tself.base_url = 'http://restbus.info/api/agencies/sf-muni/'\n\t\tself.routes_url = 'routes/'\n\t\tself.vehicles_url = 'vehicles/'\n\t\tself.headers = {'Content-Type': 'application/json'}\n\n\tdef get_json(self,content_url):\n\n\t\ts = requests.Session()\n\t\ts.headers.update(self.headers)\n\t\turl = self.base_url+content_url\n\n\t\tstatus = s.get(url).json()\n\n\t\ts.close()\n\n\t\treturn status\n\n\nif __name__ == \"__main__\":\n\t'''Script to read data via the RESTFUL API. '''\n\tparser = argparse.ArgumentParser(description='Read the RestBus API')\n\n\targs = parser.parse_args()\n\n\tapi1 = restbus_api()\n\n\t# Go grab the latest routes\n\n\troutes = api1.get_json('routes/')\n\n\tfor route in routes:\n\t\trid = route['id']\n\t\ttitle = route['title']\n\t\tr1 = Route.objects.filter(rid=rid).first()\n\n\t\tif not r1:\n\t\t\tr2 = Route(rid=rid,title=title)\n\t\t\tr2.save()\n","repo_name":"jarieb/sfmuni-trip-stats","sub_path":"app/gather_sfmta_data.py","file_name":"gather_sfmta_data.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"23776543108","text":"import numpy as np\nfrom openmdao.api import ExplicitComponent\n\n\"The RHS of psi double dot\"\n\nclass PathpointsComp(ExplicitComponent):\n\n def initialize(self):\n self.options.declare('tube_nbr', default=3, types=int)\n self.options.declare('k', default=3, types=int)\n self.options.declare('num_nodes', default=4, types=int)\n\n \n\n \n def setup(self):\n num_nodes = self.options['num_nodes']\n k = self.options['k']\n\n #Inputs\n self.add_input('p', shape=(num_nodes,k,3,1))\n self.add_input('desptsconstraints', shape=(k,3))\n\n # outputs\n self.add_output('pathconstraints',shape=(k,3))\n \n row_indices = np.outer(np.arange(k*3),np.ones(3)).flatten()\n col_indices = np.outer(np.ones(k),np.outer(np.ones(3),np.array([0,1,2])).flatten()) + (np.arange(0,k*3,3).reshape(-1,1))\n # row_indices_p = np.outer(np.arange(k*3),np.ones(num_nodes*3)).flatten()\n # row_indices_p = np.arange(num_nodes*k*3).flatten()\n # col_indices_p = np.outer(np.ones(k*num_nodes),np.outer(np.ones(3),np.array([0,1,2])).flatten()) + (np.arange(0,num_nodes*k*3,3).reshape(-1,1))\n row_indices_p = np.outer(np.arange(k*3),np.ones(num_nodes)).flatten()\n col_indices_p = np.outer(np.ones(num_nodes*k),np.outer(np.ones(3),np.array([0,1,2])).flatten()) + (np.arange(0,num_nodes*k*3,3).reshape(-1,1))\n row_indices_K = np.outer(np.ones(num_nodes),np.arange(k*3)).flatten()\n col_indices_K = np.arange(num_nodes*k*3).flatten()\n self.declare_partials('pathconstraints', 'p')#,rows= row_indices_K , cols=col_indices_K)#,rows=row_indices, cols=col_indices.flatten())#,rows=row_indices,cols=col_indices)\n self.declare_partials('pathconstraints', 'desptsconstraints')\n \n \n def compute(self,inputs,outputs):\n\n k = self.options['k']\n num_nodes= self.options['num_nodes']\n p = inputs['p']\n tip = inputs['desptsconstraints']\n p = np.reshape(p,(num_nodes,k,3))\n # change here\n idx = np.linspace(20,100,k-1,dtype = int, endpoint=False)\n self.idx = idx\n path = np.zeros((k,3))\n path[0,:] = tip[0,:]\n path[1:,:] = tip[1:,:] - p[idx,0,:]\n \n outputs['pathconstraints'] = path\n\n def compute_partials(self,inputs,partials):\n \"\"\" partials Jacobian of partial derivatives.\"\"\"\n num_nodes = self.options['num_nodes']\n k = self.options['k']\n p = inputs['p']\n node_idx = self.idx\n node_idx = np.asarray(node_idx)\n '''Computing Partials'''\n pp_pp = np.zeros((k*3,num_nodes*k*3))\n k_idx = np.arange(k)*3\n \n pp_pp[k_idx[1:],(3*node_idx*k).astype(int)] = -1\n pp_pp[k_idx[1:]+1,(3*node_idx*k+1).astype(int)] = -1\n pp_pp[k_idx[1:]+2,(3*node_idx*k+2).astype(int)] = -1\n # k_ = np.arange(0,k*3,3)\n # pd_pp[np.arange(k)*3,(interpolation_idx_r)*k*3+k_] = tmp\n # pd_pp[np.arange(k)*3+1,(interpolation_idx_r)*k*3+k_+1] = tmp\n # pd_pp[np.arange(k)*3+2,(interpolation_idx_r)*k*3+k_+2] = tmp\n # pd_pp[np.arange(k)*3,(interpolation_idx_l)*k*3+k_] = 1-tmp\n # pd_pp[np.arange(k)*3+1,(interpolation_idx_l)*k*3+k_+1] = 1-tmp\n # pd_pp[np.arange(k)*3+2,(interpolation_idx_l)*k*3+k_+2] = 1-tmp\n \n \n pp_pd = np.identity((k*3))\n # pp_pd[:3,:3] = np.identity(3)\n # pd_pt[:,0] = (p[interpolation_idx_r,np.arange(k),0] - p[interpolation_idx_l,np.arange(k),0]).squeeze()\n # pd_pt[:,3] = (p[interpolation_idx_r,np.arange(k),1] - p[interpolation_idx_l,np.arange(k),1]).squeeze()\n # pd_pt[:,6] = (p[interpolation_idx_r,np.arange(k),2] - p[interpolation_idx_l,np.arange(k),2]).squeeze()\n # pd_pt[:,3] = (p[interpolation_idx_r,:,1] - p[interpolation_idx_l,:,1])\n # pd_pt[:,6] = (p[interpolation_idx_r,:,2] - p[interpolation_idx_l,:,2])\n\n partials['pathconstraints','desptsconstraints'][:] = pp_pd\n partials['pathconstraints','p'][:]= pp_pp\n\nif __name__ == '__main__':\n \n from openmdao.api import Problem, Group\n \n from openmdao.api import IndepVarComp\n \n group = Group()\n n=81\n k=3\n comp = IndepVarComp()\n comp.add_output('p', val=np.random.random((n,k,3,1)))\n comp.add_output('desptsconstraints', val=np.random.random((k,3)))\n \n group.add_subsystem('IndepVarComp', comp, promotes = ['*'])\n \n \n comp = PathpointsComp(num_nodes=n,k=k)\n group.add_subsystem('desiredpointscomp', comp, promotes = ['*'])\n \n prob = Problem()\n prob.model = group\n \n prob.setup()\n prob.run_model()\n prob.model.list_outputs()\n\n \n prob.check_partials(compact_print=False)\n prob.check_partials(compact_print=True)\n","repo_name":"UCSDMorimotoLab/CTRoptimization","sub_path":"ctr_framework/pathpoints_comp.py","file_name":"pathpoints_comp.py","file_ext":"py","file_size_in_byte":4754,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"60"} +{"seq_id":"2131347730","text":"import dash\n# Code from: https://github.com/plotly/dash-labs/tree/main/docs/demos/multi_page_example1\ndash.register_page(__name__, path=\"/\")\n\nfrom dash import Dash, dcc, html, Input, Output, callback\nimport dash_bootstrap_components as dbc\nimport PIL\nimport plotly.express as px\n\nBACKGROUND_IMAGE_PATH = 'pages/assets/background.PNG'\nimg = PIL.Image.open(BACKGROUND_IMAGE_PATH)\n\nmyCard = dbc.Card([\n dbc.CardImg(src=BACKGROUND_IMAGE_PATH, top=True, alt='Cant load background'),\n dbc.CardImgOverlay(\n dbc.Col(html.H1(f'To demo that the Path works on PIL library, img size is:{img.size}', className='text-center'), style={'textAlign': 'center'})\n )])\n\nlayout = html.Div(\n [\n myCard\n ]\n)\n","repo_name":"MoritzFranke/DashMultiPageQuestion","sub_path":"pages/page.py","file_name":"page.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"34060232715","text":"from flask import Flask, render_template, request\nimport requests\nimport flipkartscrapper\nimport seaborn as sns\nimport numpy as np\nimport os\nimport os.path\nfrom os import path\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n# For visualizations\nimport matplotlib.pyplot as plt\n# For regular expressions\nimport re\n# For handling string\nimport string\napp = Flask(__name__)\nheaders = {'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/80.0.3987.106 Safari/537.36',\n 'referrer': 'https://flipkart.com'\n }\n\n\n@app.route('/')\ndef index():\n if path.exists('./scraped_data.csv'):\n os.remove('./scrapped_data.csv')\n return render_template('index.html')\n\n\n@app.route('/process', methods=['POST'])\ndef process():\n url = request.form.get('url')\n # path = os.getcwd()\n #\n # print(path)\n if 'flipkart' in url:\n product_title = str(flipkartscrapper.flipscrapper(url))\n page = requests.get(url, headers=headers, timeout=2)\n soup = BeautifulSoup(page.text, 'html.parser')\n total_reviews = soup.find('span', attrs={'class': '_2_R_DZ'}).text\n \n\n df = pd.read_csv('./scrapped_data.csv')\n df = df[['Rating', 'Review']] # getting required columns\n df['cleaned'] = df['Review'].apply(lambda x: x.lower()) # to lower letter\n df.isnull().sum() # to check null values\n df.dropna(inplace=True) # to remove null values if there\n df.isnull().sum()\n df['cleaned'] = df['cleaned'].apply(lambda x: re.sub(\n '\\w*\\d\\w*', '', x)) # remove digits in data\n df['cleaned'] = df['cleaned'].apply(\n lambda x: re.sub('[%s]' % re.escape(string.punctuation), '', x)) # Remove Punctuations\n df['cleaned'] = df['cleaned'].apply(\n lambda x: re.sub(' +', ' ', x)) # Removing extra spaces\n # stopwords removal and lemmatization\n # Importing spacy\n import spacy\n # Loading model\n nlp = spacy.load('en_core_web_sm', disable=['parser', 'ner'])\n\n # Lemmatization with stopwords removal\n df['lemmatized'] = df['cleaned'].apply(\n lambda x: ' '.join([token.lemma_ for token in list(nlp(x)) if (token.is_stop == False)]))\n\n # keeping reqired columns because the abbove cell appends extera columns\n df = df[['Rating', 'lemmatized']]\n df_new = df.rename(columns={'lemmatized': 'Review'})\n df = df_new\n from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n sent = SentimentIntensityAnalyzer()\n sentiment_dict = []\n for i in range(0, len(df)):\n sentiment_dict.append(sent.polarity_scores(df.iloc[i, 1]))\n positive = []\n neutral = []\n negative = []\n compound = []\n for item in sentiment_dict:\n positive.append(item['pos'])\n neutral.append(item['neu'])\n negative.append(item['neg'])\n compound.append(item['compound'])\n sentiment_df = pd.DataFrame(list(zip(positive, neutral, negative, compound)), columns=[\n 'Positive', 'Neutral', 'Negative', 'Compound'])\n df['Positive'] = sentiment_df['Positive']\n df['Negative'] = sentiment_df['Negative']\n df['Neutral'] = sentiment_df['Neutral']\n df['Compound'] = sentiment_df['Compound']\n df_temp = df[['Rating', 'Review']]\n df_temp = df_temp.assign(new=\"1\")\n df_grouped = df_temp[['new', 'Review']].groupby(\n by='new').agg(lambda x: ' '.join(x))\n # Creating Document Term Matrix\n from sklearn.feature_extraction.text import CountVectorizer\n cv = CountVectorizer(analyzer='word')\n data = cv.fit_transform(df_grouped['Review'])\n df_dtm = pd.DataFrame(data.toarray(), columns=cv.get_feature_names())\n df_dtm.index = df_grouped.index\n # Importing wordcloud for plotting word clouds and textwrap for wrapping longer text\n from wordcloud import WordCloud\n from textwrap import wrap\n # Function for generating word clouds\n\n def generate_wordcloud(data, title):\n wc = WordCloud(width=400, height=330, max_words=150,\n background_color='white'). generate_from_frequencies(data)\n plt.figure(figsize=(10, 8))\n plt.imshow(wc, interpolation='bilinear')\n plt.axis(\"off\")\n plt.title('\\n'.join(wrap(title, 60)), fontsize=13)\n plt.show()\n if path.exists('./static/wordcloud.jpg'):\n os.remove('./static/wordcloud.jpg')\n wc.to_file(\"./static/wordcloud.jpg\")\n # Transposing document term matrix\n df_dtm = df_dtm.transpose()\n # Plotting word cloud for each product\n for index, product in enumerate(df_dtm.columns):\n generate_wordcloud(df_dtm[product], product)\n highest_polarity = pd.DataFrame(columns=['Reviews'])\n lowest_polarity = pd.DataFrame(columns=['Reviews'])\n\n from textblob import TextBlob\n df['polarity'] = df['Review'].apply(\n lambda x: TextBlob(x).sentiment.polarity)\n for index, Review in enumerate(df.iloc[df['polarity'].sort_values(ascending=False)[:3].index]['Review']):\n highest_polarity = highest_polarity.append(\n {'Reviews': str(Review)}, ignore_index=True)\n\n for index, Review in enumerate(df.iloc[df['polarity'].sort_values(ascending=True)[:3].index]['Review']):\n lowest_polarity = lowest_polarity.append(\n {'Reviews': str(Review)}, ignore_index=True)\n # print('the highest: ', highest_polarity)\n # # print('the lowest: ', lowest_polarity)\n #\n # return render_template('process.html',title=product_title,tables=[highest_polarity.to_html(classes='data', index=False)],titles=highest_polarity.columns.values,total_reviews=total_reviews, pos = str(df['Positive'].mean() * 10)[0:3],\n # neg= str(df['Neutral'].mean() * 10)[0:3], neutral=str(df['Negative'].mean() * 10)[0:3])\n if float(df['Positive'].mean() * 10) > 6:\n verdict = 'This product is highly recommended!!!'\n elif float(df['Negative'].mean() * 10) < 0:\n verdict = 'This product is not recommended!'\n else:\n verdict = 'This product is recommended'\n return render_template('process.html', title=product_title, row_data=list(highest_polarity.values.tolist()), row2_data=list(lowest_polarity.values.tolist()), titles=highest_polarity.columns.values, total_reviews=total_reviews, pos=str(df['Positive'].mean() * 10)[0:3], neg=str(df['Neutral'].mean() * 10)[0:3], neutral=str(df['Negative'].mean() * 10)[0:3], verdict=verdict)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"Dhruvawara/Final_Year_Project","sub_path":"TEST/Fllipsc/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"31081980456","text":"import torch\r\nimport pandas as pd\r\nimport torch.nn as nn\r\nfrom torchcrf import CRF\r\nfrom torch.utils import data\r\n\r\n\r\nTRAINDATA = 'BiFile/OutputFile/trainData.txt'\r\nTESTDATA = 'BiFile/OutputFile/testData.txt'\r\nMODEL = 'BiFile/OutputFile/model/model_90.pth'\r\nVOCAB_PATH = 'BiFile/OutputFile/vocab.txt'\r\nLABEL_PATH = 'BiFile/OutputFile/label.txt'\r\nUSERDICT = 'BiFile/userDict.txt'\r\nWORD_PAD_ID = 0\r\nEMBEDDING_DIM = 100\r\nHIDDEN_SIZE = 256\r\nTARGET_SIZE = 3\r\nVOCAB_SIZE = 3000\r\nWORD_UNK = ''\r\nLR = 1e-3\r\nBATCH = 64\r\nEPOCH = 100\r\nMODEL_DIR = 'BiFile/OutputFile/model/'\r\nLABEL_O_ID = 0\r\nMODEL_NAME = 'model_100.pth'\r\n\r\n\r\nclass Dataset(data.Dataset):\r\n def __init__(self, type='train', base_len=50):\r\n super().__init__()\r\n self.base_len = base_len\r\n data_path = TRAINDATA if type == 'train' else TESTDATA\r\n self.df = pd.read_csv(data_path, encoding='utf-8', names=['word', 'label'])\r\n _, self.word2id = self.__get_vocab__()\r\n _, self.label2id = self.__get_label__()\r\n self.get_points()\r\n\r\n\r\n # 找切分点\r\n def get_points(self):\r\n self.points = [0]\r\n i = 0\r\n while True:\r\n if i + self.base_len >= len(self.df):\r\n self.points.append(len(self.df))\r\n break\r\n if self.df.loc[i + self.base_len, 'label'] == 'O':\r\n i += self.base_len\r\n self.points.append(i)\r\n else:\r\n i += 1\r\n\r\n\r\n def __len__(self):\r\n return len(self.points) - 1\r\n\r\n \r\n # 向量化\r\n def __getitem__(self, index):\r\n df = self.df[self.points[index] : self.points[index + 1]]\r\n wordUnkId = self.word2id[WORD_UNK]\r\n labelOId = self.label2id['O']\r\n input = [self.word2id.get(w, wordUnkId) for w in df['word']]\r\n target = [self.label2id.get(l, labelOId) for l in df['label']]\r\n return input, target \r\n \r\n def __get_vocab__(self):\r\n df = pd.read_csv(VOCAB_PATH, names=['word', 'id'])\r\n return list(df['word']), dict(df.values)\r\n\r\n def __get_label__(self):\r\n df = pd.read_csv(LABEL_PATH, names=['label', 'id'])\r\n return list(df['label']), dict(df.values)\r\n\r\n\r\nclass Model(nn.Module):\r\n def __init__(self):\r\n super().__init__()\r\n self.embed = nn.Embedding(VOCAB_SIZE, EMBEDDING_DIM, WORD_PAD_ID)\r\n self.lstm = nn.LSTM(\r\n EMBEDDING_DIM,\r\n HIDDEN_SIZE,\r\n batch_first = True,\r\n bidirectional = True\r\n )\r\n self.linear = nn.Linear(2 * HIDDEN_SIZE, TARGET_SIZE)\r\n self.crf = CRF(TARGET_SIZE)\r\n \r\n\r\n def _get_lstm_feature(self, input):\r\n out = self.embed(input)\r\n out, _ = self.lstm(out)\r\n return self.linear(out)\r\n\r\n\r\n def forward(self, input, mask):\r\n out = self._get_lstm_feature(input)\r\n return self.crf.decode(out, mask)\r\n\r\n \r\n def loss_fn(self, input, target, mask):\r\n y_pred = self._get_lstm_feature(input)\r\n return -self.crf.forward(y_pred, target, mask, reduction='mean')\r\n\r\n\r\nclass Skill():\r\n def __init__(self):\r\n _, self.word2id = self.__get_vocab__()\r\n self.id2label, _ = self.__get_label__()\r\n self.model = torch.load(MODEL)\r\n\r\n def __get_vocab__(self):\r\n df = pd.read_csv(VOCAB_PATH, names=['word', 'id'])\r\n return list(df['word']), dict(df.values)\r\n\r\n\r\n def __get_label__(self):\r\n df = pd.read_csv(LABEL_PATH, names=['label', 'id'])\r\n return list(df['label']), dict(df.values)\r\n\r\n\r\n def __extract__(self, label, text):\r\n i = 0\r\n res = []\r\n while i < len(label):\r\n if label[i] != 'O':\r\n prefix, name = label[i].split('-')\r\n start = end = i\r\n i += 1\r\n while i < len(label) and label[i] == 'I-' + name:\r\n end = i\r\n i += 1\r\n \r\n res.append(text[start:end + 1])\r\n else:\r\n i += 1\r\n \r\n return res\r\n\r\n\r\n def __get_user_words__(self):\r\n userWord = []\r\n with open(USERDICT, encoding='utf-8') as file:\r\n for l in file.readlines():\r\n userWord.append(l.split('\\n')[0])\r\n return userWord\r\n\r\n\r\n def user_add(self, word):\r\n userWord = self.__get_user_words__()\r\n\r\n if word in userWord:\r\n print(f\"{word} is already in the dict!\")\r\n else:\r\n with open(USERDICT, 'a', encoding='utf-8') as file:\r\n file.write(word + '\\n')\r\n print(\"Added successfully!\")\r\n\r\n\r\n def user_remove(self, word):\r\n userWord = self.__get_user_words__()\r\n if word in userWord:\r\n with open(USERDICT, 'r', encoding='utf-8') as file:\r\n lines = file.readlines()\r\n with open(USERDICT, 'w', encoding='utf-8') as file:\r\n for l in lines:\r\n if word == l.split('\\n')[0]:\r\n continue\r\n else:\r\n file.write(l)\r\n print('Remove successfully!')\r\n else:\r\n print(f\"{word} is not in the dict!\")\r\n\r\n\r\n def get_skill(self, text):\r\n input = torch.tensor([[self.word2id.get(w, WORD_PAD_ID) for w in text]])\r\n mask = torch.tensor([[1] * len(text)]).bool()\r\n\r\n y_pred = self.model(input, mask)\r\n\r\n label = []\r\n for l in y_pred:\r\n label.append(self.id2label[l[0]])\r\n \r\n res = self.__extract__(label, text)\r\n res = self.__filter__(text, res)\r\n res = list(set(res))\r\n return res\r\n\r\n\r\n def __filter__(self, text, arr):\r\n userWord = self.__get_user_words__()\r\n res = arr\r\n for uWord in userWord:\r\n if uWord in text:\r\n res.append(uWord)\r\n for aWord in arr:\r\n if uWord in aWord:\r\n res.append(uWord)\r\n res.remove(aWord)\r\n return res\r\n\r\n\r\nclass Train():\r\n def __init__(self):\r\n self.dataset = Dataset()\r\n self.loader = data.DataLoader(\r\n self.dataset,\r\n batch_size=BATCH,\r\n shuffle=True,\r\n collate_fn=self.__collate_fn__\r\n )\r\n self.model = Model()\r\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=LR)\r\n\r\n\r\n def __collate_fn__(self, batch):\r\n batch.sort(key=lambda x: len(x[0]), reverse=True)\r\n max_len = len(batch[0][0])\r\n input = []\r\n target = []\r\n mask = []\r\n\r\n for item in batch:\r\n pad_len = max_len - len(item[0])\r\n input.append(item[0] + [WORD_PAD_ID] * pad_len)\r\n target.append(item[1] + [LABEL_O_ID] * pad_len)\r\n mask.append([1] * len(item[0]) + [0] * pad_len)\r\n \r\n return torch.tensor(input), torch.tensor(target), torch.tensor(mask).bool()\r\n\r\n\r\n def train_net(self):\r\n for e in range(EPOCH):\r\n for b, (input, target, mask) in enumerate(self.loader):\r\n y_pred = self.model(input, mask)\r\n loss = self.model.loss_fn(input, target, mask)\r\n\r\n self.optimizer.zero_grad()\r\n loss.backward()\r\n self.optimizer.step()\r\n\r\n if b % 10 == 0:\r\n print(f'Epoch: {e} Loss: {loss.item()}')\r\n if e % 10 == 0:\r\n torch.save(self.model, MODEL_DIR + f'model_{e}.pth')\r\n\r\n\r\nclass Test():\r\n def __init__(self):\r\n self.dataset = Dataset('test')\r\n self.loader = data.DataLoader(\r\n self.dataset,\r\n batch_size=BATCH,\r\n collate_fn=self.__collate_fn__\r\n )\r\n\r\n\r\n def __collate_fn__(self, batch):\r\n batch.sort(key=lambda x: len(x[0]), reverse=True)\r\n max_len = len(batch[0][0])\r\n input = []\r\n target = []\r\n mask = []\r\n\r\n for item in batch:\r\n pad_len = max_len - len(item[0])\r\n input.append(item[0] + [WORD_PAD_ID] * pad_len)\r\n target.append(item[1] + [LABEL_O_ID] * pad_len)\r\n mask.append([1] * len(item[0]) + [0] * pad_len)\r\n \r\n return torch.tensor(input), torch.tensor(target), torch.tensor(mask).bool()\r\n\r\n \r\n def test_net(self):\r\n with torch.no_grad():\r\n model = torch.load(MODEL_DIR + MODEL_NAME)\r\n\r\n y_ture_list = []\r\n y_pred_list = []\r\n\r\n for b, (input, target, mask) in enumerate(self.loader):\r\n y_pred = model(input, mask)\r\n # loss = model.loss_fn(input, target, mask)\r\n\r\n for lst in y_pred:\r\n y_pred_list += lst\r\n\r\n for y, m in zip(target, mask):\r\n y_ture_list += y[m == True].tolist()\r\n\r\n y_ture_tensor = torch.tensor(y_ture_list)\r\n y_pred_tensor = torch.tensor(y_pred_list)\r\n\r\n accuracy = (y_ture_tensor == y_pred_tensor).sum() / len(y_ture_tensor)\r\n print(f'total: {len(y_ture_tensor)} accuracy: {accuracy.item()}')\r\n\r\n","repo_name":"openeuler-mirror/open-source-summer","sub_path":"开发者技能提取/BiFile/OSPP_T.py","file_name":"OSPP_T.py","file_ext":"py","file_size_in_byte":9109,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"12953454378","text":"#!/usr/bin/env python\nimport subprocess, time, os, shutil, sys\n\"\"\" Script for generating random queries\n\nInput: List of model files\nOutput: Files where each line contains a random query\n\"\"\"\n\n# Memory bound in KiB\nMemoryBound = 1024*1024*1024*2\n\n# Time to wait between process polls\nPollTime = 2\n\n# Start with interval at InitialPollTime and increase to PollTime is reached (Set 0 to disable)\nInitialPollTime = 0.1\n\n# Seconds before timeout\nTimeOut = 60*2\n\nStrategies = [\n\"BestFS Ultimate Edition DFS\",\n\"BestFS Ultimate Edition BFS\",\n]\n\n# NULL-Device\nFNULL = open('/dev/null', 'w')\n\nKanban = [\n#\"Kanban5.pet\", \n#\"Kanban10.pet\", \n#\"Kanban20.pet\", \n#\"Kanban50.pet\", \n\"Kanban100.pet\", \n\"Kanban200.pet\", \n\"Kanban500.pet\", \n\"Kanban1000.pet\"\n]\nFMS = [\n#\"FMS2.pet\", \n#\"FMS10.pet\",\n#\"FMS20.pet\", \n\"FMS50.pet\", \n\"FMS100.pet\", \n\"FMS200.pet\", \n\"FMS500.pet\"\n]\nMAPK = [\n#\"MAPK8.pet\", \n#\"MAPK40.pet\", \n#\"MAPK80.pet\", \n#\"MAPK160.pet\", \n\"MAPK320.pet\",\n\"MAPK640.pet\",\n\"MAPK1280.pet\",\n\"MAPK2560.pet\"\n]\n\nModels = Kanban + FMS + MAPK\n\nModelDir = \"Samples/\"\n\nPeTer = \"../PeTe-build-desktop/PeTer/PeTer\"\nOutputDir = \"TestData/Computability/\"\n\npeterbin = os.path.abspath(PeTer)\nmodeldir = os.path.abspath(ModelDir) + \"/\"\noutputdir = os.path.abspath(OutputDir) + \"/\"\n\n# Copy PeTe to /tmp during tests\nshutil.copyfile(peterbin, \"/tmp/PeTer-Test-Bin\")\nshutil.copystat(peterbin, \"/tmp/PeTer-Test-Bin\")\npeterbin = \"/tmp/PeTer-Test-Bin\"\n\ndef getMemory(pid):\n\targvs = [\"ps\", \"-p\", str(pid), \"-o\", \"vsz=\"]\n\tp2 = subprocess.Popen(argvs, stdout=subprocess.PIPE)\n\tmemory = 0;\n\tfor l in p2.stdout.readlines():\n\t\ttry:\n\t\t\tmemory = int(l)\n\t\texcept: pass\n\tp2.wait()\n\treturn memory\n\ndef genQuery(model):\n\tglobal peterbin, modeldir\n\targvs = [peterbin, \"--test\", modeldir + model, \"--gen-query\"]\n\tp = subprocess.Popen(argvs, stdout=subprocess.PIPE, stderr=FNULL)\n\tp.wait()\n\tfor line in p.stdout.readlines():\n\t\tif line.strip() != \"\":\n\t\t\treturn line.strip()\n\treturn None\n\ndef runQuery(model, strategy, query):\n\tglobal TimeOut, MemoryBound, QuickStepPoll, PollTime, peterbin, modeldir\n\targvs = [peterbin, \"--test\", modeldir + model, \"--strategy\", strategy , \"--literal-query\", query]\n\tp = subprocess.Popen(argvs, stdout=subprocess.PIPE, stderr=FNULL)\n\tt = 0.0\n\tpeekMem = 0\n\tif InitialPollTime == 0:\n\t\tpt = PollTime\n\telse:\n\t\tpt = InitialPollTime\n\twhile t < TimeOut and p.poll() == None:\n\t\tmem = getMemory(p.pid)\n\t\tif mem > peekMem: peekMem = mem\n\t\tif peekMem > MemoryBound:\n\t\t\tbreak\n\t\tif InitialPollTime > 0 and pt < PollTime:\n\t\t\tpt += InitialPollTime\n\t\t\tif pt > PollTime: pt = PollTime\n\t\tt += pt\n\t\ttime.sleep(pt)\n\tif p.poll() == None:\n\t\tp.kill()\n\t\tp.wait()\n\t\treturn False\n\treturn p.poll() == 0\n\nFiles = {}\n\n#Setup headers and create files\nfor model in Models:\n\tfp = outputdir + model + \".result\"\n\tif os.path.isfile(fp):\n\t\tFiles[model] = open(fp, \"a\")\n\telse:\n\t\tFiles[model] = open(fp, \"w\")\n\t\tresult = \"Query,\\t\"\n\t\tfor strategy in Strategies:\n\t\t\tresult += strategy.replace(\",\",\"-\") + \",\"\n\t\tFiles[model].write(result + \"\\n\")\n\nwhile True:\n\tfor model in Models:\n\t\tf = Files[model]\n\t\tquery = genQuery(model)\n\t\tresult = query + \",\\t\"\n\t\tfor strategy in Strategies:\n\t\t\tif runQuery(model, strategy, query):\n\t\t\t\tresult += \"1,\\t\"\n\t\t\telse:\n\t\t\t\tresult += \"0,\\t\"\n\t\tf.write(result + \"\\n\")\n\t\tf.flush()\n\n","repo_name":"jonasfj/PeTe","sub_path":"ComputabilityTest.py","file_name":"ComputabilityTest.py","file_ext":"py","file_size_in_byte":3240,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"60"} +{"seq_id":"10956016553","text":"from dalle2_laion import DalleModelManager, ModelLoadConfig, utils\nfrom dalle2_laion.scripts import BasicInference, ImageVariation, BasicInpainting\nfrom typing import List\nimport os\nimport click\nfrom pathlib import Path\nimport json\nimport torch\n\n@click.group()\n@click.option('--verbose', '-v', is_flag=True, default=False, help='Print verbose output.')\n@click.option('--suppress-updates', '-s', is_flag=True, default=False, help='Suppress updating models if checksums do not match.')\n@click.pass_context\ndef inference(ctx, verbose, suppress_updates):\n ctx.obj['verbose'] = verbose\n ctx.obj['suppress_updates'] = suppress_updates\n\n@inference.command()\n@click.option('--model-config', default='./configs/upsampler.example.json', help='Path to model config file')\n@click.pass_context\ndef test(ctx, model_config):\n model_config = ModelLoadConfig.from_json_path(model_config)\n if model_config.decoder is not None:\n for unet_source in model_config.decoder.unet_sources:\n print('Checksum:', unet_source.load_model_from.checksum_file_path)\n if model_config.prior is not None:\n print('Checksum:', model_config.prior.load_model_from.checksum_file_path)\n model_manager = DalleModelManager(model_config, check_updates=not ctx.obj['suppress_updates'])\n\n@inference.command()\n@click.option('--model-config', default='./configs/upsampler.example.json', help='Path to model config file')\n@click.option('--output-path', default='./output/basic/', help='Path to output directory')\n@click.option('--decoder-batch-size', default=10, help='Batch size for decoder')\n@click.pass_context\ndef dream(ctx, model_config: str, output_path: str, decoder_batch_size: int):\n verbose = ctx.obj['verbose']\n prompts = []\n print(\"Enter your prompts one by one. Enter an empty prompt to finish.\")\n while True:\n prompt = click.prompt(f'Prompt {len(prompts)+1}', default='', type=str, show_default=False)\n if prompt == '':\n break\n prompt_file = Path(prompt)\n if utils.is_text_file(prompt_file):\n # Then we can read the prompts line by line\n with open(prompt_file, 'r') as f:\n for line in f:\n prompts.append(line.strip())\n elif utils.is_json_file(prompt_file):\n # Then we assume this is an array of prompts\n with open(prompt_file, 'r') as f:\n prompts.extend(json.load(f))\n else:\n prompts.append(prompt)\n num_prior_samples = click.prompt('How many samples would you like to generate for each prompt?', default=1, type=int)\n\n dreamer: BasicInference = BasicInference.create(model_config, verbose=verbose, check_updates=not ctx.obj['suppress_updates'])\n output_map = dreamer.run(prompts, prior_sample_count=num_prior_samples, decoder_batch_size=decoder_batch_size)\n os.makedirs(output_path, exist_ok=True)\n for text in output_map:\n for embedding_index in output_map[text]:\n for image in output_map[text][embedding_index]:\n image.save(os.path.join(output_path, f\"{text}_{embedding_index}.png\"))\n\n@inference.command()\n@click.option('--model-config', default='./configs/variation.example.json', help='Path to model config file')\n@click.option('--output-path', default='./output/variations/', help='Path to output directory')\n@click.option('--decoder-batch-size', default=10, help='Batch size for decoder')\n@click.pass_context\ndef variation(ctx, model_config: str, output_path: str, decoder_batch_size: int):\n verbose = ctx.obj['verbose']\n variation: ImageVariation = ImageVariation.create(model_config, verbose=verbose, check_updates=not ctx.obj['suppress_updates'])\n decoder_data_requirements = variation.model_manager.decoder_info.data_requirements\n image_filepaths: List[Path] = []\n text_prompts: List[str] = [] if decoder_data_requirements.text_encoding else None\n\n print(\"Enter paths to your images. If you specify a directory all images within will be added. Enter an empty line to finish.\")\n if decoder_data_requirements.text_encoding:\n print(\"This decoder was also conditioned on text. You will need to enter a prompt for each image you use.\")\n\n while True:\n image_filepath: Path = click.prompt(f'File {len(image_filepaths)+1}', default=Path(), type=Path, show_default=False)\n if image_filepath == Path():\n break\n if image_filepath.is_dir():\n new_image_paths = utils.get_images_in_dir(image_filepath)\n elif utils.is_image_file(image_filepath):\n new_image_paths = [image_filepath]\n else:\n print(f\"{image_filepath} is not a valid image file.\")\n continue\n\n if decoder_data_requirements.text_encoding:\n for image_path in new_image_paths:\n text_prompt = click.prompt(f'Prompt for {image_path.name}', default=utils.get_prompt_from_filestem(image_path.stem), type=str, show_default=True)\n text_prompts.append(text_prompt)\n image_filepaths.extend(new_image_paths)\n\n print(f\"Found {len(image_filepaths)} images.\")\n images = utils.get_images_from_paths(image_filepaths)\n num_samples = click.prompt('How many samples would you like to generate for each image?', default=1, type=int)\n\n output_map = variation.run(images, text=text_prompts, sample_count=num_samples, batch_size=decoder_batch_size)\n os.makedirs(output_path, exist_ok=True)\n for file_index, generation_list in output_map.items():\n file = image_filepaths[file_index].stem\n for i, image in enumerate(generation_list):\n image.save(os.path.join(output_path, f\"{file}_{i}.png\"))\n\n@inference.command()\n@click.option('--model-config', default='./configs/upsampler.example.json', help='Path to model config file')\n@click.option('--output-path', default='./output/inpaint/', help='Path to output directory')\n@click.pass_context\ndef inpaint(ctx, model_config: str, output_path: str):\n verbose = ctx.obj['verbose']\n inpainting: BasicInpainting = BasicInpainting.create(model_config, verbose=verbose, check_updates=not ctx.obj['suppress_updates'])\n image_filepaths: List[Path] = []\n mask_filepaths: List[Path] = []\n text_prompts: List[str] = []\n print(\"You will be entering the paths to your images and masks one at a time. Enter an empty image path to continue\")\n while True:\n image_filepath: Path = click.prompt(f'File {len(image_filepaths)+1}', default=Path(), type=Path, show_default=False)\n if image_filepath == Path():\n break\n if not utils.is_image_file(image_filepath):\n print(f\"{image_filepath} is not a valid image file.\")\n continue\n mask_filepath: Path = click.prompt(f'Mask for {image_filepath.name}', default=Path(), type=Path, show_default=False)\n if not utils.is_image_file(mask_filepath):\n print(f\"{mask_filepath} is not a valid image file.\")\n continue\n text_prompt = click.prompt(f'Prompt for {image_filepath.name}', default=utils.get_prompt_from_filestem(image_filepath.stem), type=str, show_default=True)\n\n image_filepaths.append(image_filepath)\n mask_filepaths.append(mask_filepath)\n text_prompts.append(text_prompt)\n \n print(f\"Found {len(image_filepaths)} images.\")\n images = utils.get_images_from_paths(image_filepaths)\n mask_images = utils.get_images_from_paths(mask_filepaths)\n min_image_size = float('inf')\n for i, image, mask_image, filepath in zip(range(len(images)), images, mask_images, image_filepaths):\n assert image.size == mask_image.size, f\"Image {filepath.name} has different dimensions than mask {mask_filepaths[i].name}\"\n if min(image.size) < min_image_size:\n min_image_size = min(image.size)\n if image.size[1] != image.size[0]:\n print(f\"{filepath.name} is not a square image. It will be center cropped into a square.\")\n images[i] = utils.center_crop_to_square(image)\n mask_images[i] = utils.center_crop_to_square(mask_image)\n print(f\"Minimum image size is {min_image_size}. All images will be resized to this size for inference.\")\n images = [image.resize((min_image_size, min_image_size)) for image in images]\n mask_images = [mask_image.resize((min_image_size, min_image_size)) for mask_image in mask_images]\n\n masks = [utils.get_mask_from_image(mask_image) for mask_image in mask_images]\n num_samples = click.prompt('How many samples would you like to generate for each image?', default=1, type=int)\n output_map = inpainting.run(images, masks, text=text_prompts, sample_count=num_samples)\n os.makedirs(output_path, exist_ok=True)\n for file_index, generation_list in output_map.items():\n file = image_filepaths[file_index].stem\n for i, image in enumerate(generation_list):\n image.save(os.path.join(output_path, f\"{file}_{i}.png\"))\n\n\nif __name__ == \"__main__\":\n inference(obj={})","repo_name":"LAION-AI/dalle2-laion","sub_path":"example_inference.py","file_name":"example_inference.py","file_ext":"py","file_size_in_byte":8977,"program_lang":"python","lang":"en","doc_type":"code","stars":486,"dataset":"github-code","pt":"60"} +{"seq_id":"39109327904","text":"#\n# psuutil.py\n# Platform-specific PSU status interface for SONiC\n#\n\n\nimport os.path\nimport subprocess\nimport logging\nfrom sonic_py_common.general import check_output_pipe\n\ntry:\n from sonic_psu.psu_base import PsuBase\nexcept ImportError as e:\n raise ImportError(str(e) + \"- required module not found\")\n\nDEBUG = False\n\n\ndef show_log(txt):\n if DEBUG == True:\n print(\"[IX2]\"+txt)\n return\n\n\ndef exec_cmd(cmd_args, out_file, show):\n cmd = ' '.join(cmd_args) + ' > ' + out_file\n logging.info('Run :'+cmd)\n try:\n with open(out_file, 'w') as f:\n output = subprocess.check_output(cmd_args, stdout=f, universal_newlines=True)\n show_log(cmd + \"output:\"+str(output))\n except subprocess.CalledProcessError as e:\n logging.info(\"Failed :\"+cmd)\n if show:\n print(\"Failed :\"+cmd + \"returncode = {}, err msg: {}\".format(e.returncode, e.output))\n return output\n\n\ndef my_log(txt):\n if DEBUG == True:\n print(\"[QUANTA DBG]: \"+txt)\n return\n\n\ndef log_os_system(cmd1_args, cmd2_args, show):\n cmd = ' '.join(cmd1_args) + ' | ' + ' '.join(cmd2_args)\n logging.info('Run :'+cmd)\n status = 1\n output = \"\"\n try:\n output = check_output_pipe(cmd1_args, cmd2_args)\n my_log(cmd + \"output:\"+str(output))\n except subprocess.CalledProcessError as e:\n logging.info('Failed :'+cmd)\n if show:\n print(\"Failed :\"+cmd + \"returncode = {}, err msg: {}\".format(e.returncode, e.output))\n return output\n\n\ndef gpio16_exist():\n ls = log_os_system([\"ls\", \"/sys/class/gpio/\"], [\"grep\", \"gpio16\"], 0)\n logging.info('mods:'+ls)\n if len(ls) == 0:\n return False\n\n\ndef gpio17_exist():\n ls = log_os_system([\"ls\", \"/sys/class/gpio/\"], [\"grep\", \"gpio17\"], 0)\n logging.info('mods:'+ls)\n if len(ls) == 0:\n return False\n\n\ndef gpio19_exist():\n ls = log_os_system([\"ls\", \"/sys/class/gpio/\"], [\"grep\", \"gpio19\"], 0)\n logging.info('mods:'+ls)\n if len(ls) == 0:\n return False\n\n\ndef gpio20_exist():\n ls = log_os_system([\"ls\", \"/sys/class/gpio/\"], [\"grep\", \"gpio20\"], 0)\n logging.info('mods:'+ls)\n if len(ls) == 0:\n return False\n\n\nclass PsuUtil(PsuBase):\n \"\"\"Platform-specific PSUutil class\"\"\"\n\n SYSFS_PSU_PRESENT_DIR = [\"/sys/class/gpio/gpio16\",\n \"/sys/class/gpio/gpio19\"]\n\n SYSFS_PSU_POWERGOOD_DIR = [\"/sys/class/gpio/gpio17\",\n \"/sys/class/gpio/gpio20\"]\n\n def __init__(self):\n PsuBase.__init__(self)\n\n if gpio16_exist() == False:\n output = exec_cmd([\"echo\", \"16\"], \"/sys/class/gpio/export\", 1)\n output = exec_cmd([\"echo\", \"in\"], \"/sys/class/gpio/gpio16/direction\", 1)\n\n if gpio17_exist() == False:\n output = exec_cmd([\"echo\", \"17\"], \"/sys/class/gpio/export\", 1)\n output = exec_cmd([\"echo\", \"in\"], \"/sys/class/gpio/gpio17/direction\", 1)\n\n if gpio19_exist() == False:\n output = exec_cmd([\"echo\", \"19\"], \"/sys/class/gpio/export\", 1)\n output = exec_cmd([\"echo\", \"in\"], \"/sys/class/gpio/gpio19/direction\", 1)\n\n if gpio20_exist() == False:\n output = exec_cmd([\"echo\", \"20\"], \"/sys/class/gpio/export\", 1)\n output = exec_cmd([\"echo\", \"in\"], \"/sys/class/gpio/gpio20/direction\", 1)\n\n # Get sysfs attribute\n def get_attr_value(self, attr_path):\n\n retval = 'ERR'\n if (not os.path.isfile(attr_path)):\n return retval\n\n try:\n with open(attr_path, 'r') as fd:\n retval = fd.read()\n except Exception as error:\n logging.error(\"Unable to open \", attr_path, \" file !\")\n\n retval = retval.rstrip('\\r\\n')\n return retval\n\n def get_num_psus(self):\n \"\"\"\n Retrieves the number of PSUs available on the device\n :return: An integer, the number of PSUs available on the device\n \"\"\"\n MAX_PSUS = 2\n return MAX_PSUS\n\n def get_psu_status(self, index):\n \"\"\"\n Retrieves the oprational status of power supply unit (PSU) defined\n by index \n :param index: An integer, index of the PSU of which to query status\n :return: Boolean, True if PSU is operating properly, False if PSU is\\\n faulty\n \"\"\"\n status = 0\n attr_file = 'value'\n attr_path = self.SYSFS_PSU_POWERGOOD_DIR[index-1] + '/' + attr_file\n\n attr_value = self.get_attr_value(attr_path)\n\n if (attr_value != 'ERR'):\n attr_value = int(attr_value, 16)\n # Check for PSU status\n if (attr_value == 1):\n status = 1\n\n return status\n\n def get_psu_presence(self, index):\n \"\"\"\n Retrieves the presence status of power supply unit (PSU) defined\n by index \n :param index: An integer, index of the PSU of which to query status\n :return: Boolean, True if PSU is plugged, False if not\n \"\"\"\n status = 0\n psu_absent = 0\n attr_file = 'value'\n attr_path = self.SYSFS_PSU_PRESENT_DIR[index-1] + '/' + attr_file\n\n attr_value = self.get_attr_value(attr_path)\n\n if (attr_value != 'ERR'):\n attr_value = int(attr_value, 16)\n # Check for PSU presence\n if (attr_value == 0):\n status = 1\n\n return status\n","repo_name":"sonic-net/sonic-buildimage","sub_path":"device/quanta/x86_64-quanta_ix1b_rglbmc-r0/plugins/psuutil.py","file_name":"psuutil.py","file_ext":"py","file_size_in_byte":5393,"program_lang":"python","lang":"en","doc_type":"code","stars":614,"dataset":"github-code","pt":"60"} +{"seq_id":"74758042432","text":"#!/usr/bin/python\n\nfrom math import *\nimport sys\n\nif len(sys.argv) != 2:\n sys.stderr.write(\"Need exactly one command line argument (output file name)\")\n sys.exit(1)\n\nkeyboard = \"\"\"\n`1234567890-=\n qwertyuiop[]\n asdfghjkl;'\\\\\n zxcvbnm,./\"\"\"\nshifted = \"\"\"\n~!@#$%^&*()_+\n QWERTYUIOP{}\n ASDFGHJKL:\"|\n ZXCVBNM<>?\"\"\"\n\nshifted = [list(l) for l in shifted.splitlines()[1:]]\nkeyboard = [list(l) for l in keyboard.splitlines()[1:]]\n\n# find the position of a character on the keyboard\ndef findpos(c):\n for i, row in enumerate(keyboard):\n if c in row:\n return (i, row.index(c), 'k')\n for i, row in enumerate(shifted):\n if c in row:\n return (i, row.index(c), 's')\n return False\n\ncharrange = list(range(ord(' '), ord('~') + 1))\n\noffset = 2\n\nmatrix = [ list(charrange) for n in charrange ]\n\nfor n in charrange:\n #print chr(n).ljust(2),\n for m in charrange:\n if m == ord(' ') and n == ord(' '):\n matrix[0][0] = 3\n continue\n if m == ord(' ') or n == ord(' '):\n matrix[n-ord(' ')][m-ord(' ')] = -3\n matrix[m-ord(' ')][n-ord(' ')] = -3\n continue\n N = findpos(chr(n))\n M = findpos(chr(m))\n dist = int(floor(sqrt((N[0]-M[0])**2 + (N[1]-M[1])**2)))\n if N[2] != M[2]:\n dist += 1\n matrix[m-ord(' ')][n-ord(' ')] = offset - dist\n matrix[n-ord(' ')][m-ord(' ')] = offset - dist\n\noutput = open(sys.argv[1],'w')\noutput.write(\"/* This file is automatically generated - do not edit */\\n\")\noutput.write(\"int qwertyDistanceSimilarity(char a, char b){\\n\")\noutput.write(\" char qwertyDist[1 + '~' - ' '][1 + '~' - ' '] = {{\" + ( '},\\n {'.join(','.join(str(c) for c in n) for n in matrix) ) + '}};\\n')\noutput.write(\" return qwertyDist[a-' '][b-' '];\\n}\\n\");\n\noutput.close()\n\n","repo_name":"grodtron/NeedlemanWunsch","sub_path":"src/makeQwertyDistance.py","file_name":"makeQwertyDistance.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"60"} +{"seq_id":"29545890928","text":"import string\r\nfrom tkinter import *\r\nfrom tkinter import messagebox\r\nimport time\r\nimport pyautogui\r\n\r\n\r\n\r\n\r\nwin = Tk()\r\n\r\nwin.geometry(\"701x500\")\r\nwin.configure(bg = \"#ffffff\")\r\nwin.title(\"Spammer\")\r\n\r\n\r\nspam_img = PhotoImage(file=\"spam.png\")\r\nwin.iconphoto(False, spam_img)\r\n\r\ncanvas = Canvas(\r\n win,\r\n bg = \"#ffffff\",\r\n height = 500,\r\n width = 701,\r\n bd = 0,\r\n highlightthickness = 0,\r\n relief = \"ridge\")\r\ncanvas.place(x = 0, y = 0)\r\n\r\nbackground_img = PhotoImage(file = f\"background.png\")\r\nbackground = canvas.create_image(\r\n 378.0, 147.0,\r\n image=background_img)\r\n\r\nentry0_img = PhotoImage(file = f\"img_textBox0.png\")\r\nentry0_bg = canvas.create_image(\r\n 362.0, 231.0,\r\n image = entry0_img)\r\n\r\nentry0 = Entry(\r\n bd = 0,\r\n bg = \"#feecec\",\r\n highlightthickness = 0)\r\n\r\nentry0.place(\r\n x = 267.0, y = 201,\r\n width = 190.0,\r\n height = 58)\r\n\r\nentry1_img = PhotoImage(file = f\"img_textBox1.png\")\r\nentry1_bg = canvas.create_image(\r\n 362.0, 332.0,\r\n image = entry1_img)\r\n\r\nentry1 = Entry(\r\n bd = 0,\r\n bg = \"#feecec\",\r\n highlightthickness = 0)\r\n\r\nentry1.place(\r\n x = 267.0, y = 302,\r\n width = 190.0,\r\n height = 58)\r\n\r\nentry2_img = PhotoImage(file = f\"img_textBox2.png\")\r\nentry2_bg = canvas.create_image(\r\n 362.0, 130.0,\r\n image = entry2_img)\r\n\r\nentry2 = Entry(\r\n bd = 0,\r\n bg = \"#feecec\",\r\n highlightthickness = 0)\r\n\r\nentry2.place(\r\n x = 267.0, y = 100,\r\n width = 190.0,\r\n height = 58)\r\n\r\n\r\ndef label():\r\n response = messagebox.askokcancel(title=\"Spam!\", message=f\"The app will spam {entry2.get()} {entry0.get()} times after 5 seconds do you wish to proceed?\")\r\n\r\n\r\n\r\n \r\n i=0\r\n if response == True:\r\n for i in range(int(entry0.get())):\r\n \r\n if str(entry1.get()) == \"\":\r\n messagebox.showerror(title=\"Error!\",message=\"You have to write a number in the delay box if you don't want a delay you can write 0!\")\r\n \r\n if type(entry1.get()) == string:\r\n messagebox.showerror(title=\"Error!\",message=\"You have to write a number in the delay1!\")\r\n else:\r\n time.sleep(5)\r\n pyautogui.write(f\"{i}, {entry2.get()}\")\r\n float(entry1.get())\r\n time.sleep(float(entry1.get()))\r\n \r\n\r\n pyautogui.press(\"enter\")\r\n i+=1\r\n\r\n\r\n elif response == False:\r\n pass\r\n\r\n\r\nimg0 = PhotoImage(file = f\"img0.png\")\r\nb0 = Button(\r\n image = img0,\r\n borderwidth = 0,\r\n highlightthickness = 0,\r\n command = label,\r\n relief = \"flat\")\r\n\r\nb0.place(\r\n x = 260, y = 403,\r\n width = 166,\r\n height = 45)\r\n\r\nwin.resizable(False, False)\r\nwin.mainloop()\r\n","repo_name":"abodi555e/SpammerBot","sub_path":"window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":2732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"34904650774","text":"from src import create_game, play_a_turn\n\nif __name__ == \"__main__\":\n players, deck = create_game()\n\n history = []\n auctioneer = 0\n \n for top_card in deck:\n history.append(\n play_a_turn(\n players(auctioneer),\n [i for i in players if i != auctioneer],\n top_card\n )\n )\n \n # save history:\n with open(r'history.txt', 'w') as fp:\n for item in history:\n fp.write(\"%s\\n\" % item)\n \n print('Done')\n","repo_name":"Cartar/game-simulator","sub_path":"archive/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"43547704077","text":"import os\nimport cv2\nimport socket\nfrom sys import argv, platform\nfrom time import sleep\nfrom struct import pack\nfrom pickle import dumps\nfrom threading import Thread\nfrom numpy import zeros, uint8\n\nwhite = (255, 255, 255)\n\nserver = None\nrtsp_clients = []\n\ndef rtsp_setup(port):\n address = os.environ[\"HOSTNAME\"]\n print(address, port)\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server.bind((\"\", port))\n server.listen(10)\n return server\n\ndef main():\n global server\n\n # set defaults args\n cap_indx = 0\n port = 8089\n output_size = (480, 640)\n\n # set args if passed\n if len(argv) == 4:\n cap_indx = int(argv[1])\n port = int(argv[2])\n size = argv[3].split(\"x\")\n output_size = (int(size[1]), int(size[0]))\n\n cap = cv2.VideoCapture(cap_indx)\n\n server = rtsp_setup(port)\n\n clientLookup = RTSPClientLookup()\n clientLookup.start()\n\n while True:\n ret, img = cap.read()\n\n if ret == False and img is None:\n # don't send any data unless you have a stream\n cap.release() # release\n cap = cv2.VideoCapture(cap_indx) # retry\n cv2.waitKey(1)\n continue\n else:\n img = cv2.flip(img, 1)\n\n # resized to desired shape\n if img.shape[0] != output_size[0] or img.shape[1] != output_size[1]: \n img = cv2.resize(img, output_size)\n\n data = dumps(img)\n \n for client in rtsp_clients:\n try:\n client[0].sendall(pack(\"L\", len(data)) + data)\n except Exception as err:\n rtsp_clients.remove(client)\n print(\"Connections dropped: \", client[1], err)\n client[0].close()\n # mandatory for resource conservation\n cv2.waitKey(1)\n # stop looking for new clients\n clientLookup.stop()\n\n# Lookup for new clients to stream to\n# This is standard boiler plate code\nclass RTSPClientLookup(Thread):\n def __init__(self):\n self.running = True\n super(RTSPClientLookup, self).__init__(name = \"RTSP Client Lookup\")\n\n def stop(self): self.running = False\n\n def run(self):\n global server\n global rtsp_clients\n\n try:\n while self.running:\n try:\n if len(rtsp_clients) <= 5:\n conn, addr = server.accept()\n print(\"Connection established to:\", addr)\n rtsp_clients.append((conn, addr))\n except:\n print(\"Server connection dropped\")\n except Exception as err:\n print(\"RTSP Client Lookup Thread stopped:\", str(err))\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"adrianvellamlt/homeio-webcam-over-ip","sub_path":"__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"17193513075","text":"\n#the dish must start and end with the same letters as the animal's name\n#o prato deve começar e terminar com as mesmas letras do nome do animal\n#chickadee is bringing chocolate cake\n\n\ndef feast(animal_name, dish):\n\tfirst_letter_animal = animal_name[0:1]\n\tlast_letter_animal = animal_name[-1:]\n\tfirst_letter_dish = dish[0:1]\n\tlast_letter_dish = dish[-1:]\n\ttest = first_letter_dish == first_letter_animal and last_letter_dish == last_letter_animal\n\treturn test\n\n","repo_name":"thiago-abc/code-wars","sub_path":"animal_feast.py","file_name":"animal_feast.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"26395064262","text":"import gym\nimport robo_gym\nimport math\nimport numpy as np \nimport pytest\n\nur_models = [pytest.param('ur3', marks=pytest.mark.nightly), \\\n pytest.param('ur3e', marks=pytest.mark.nightly), \\\n pytest.param('ur5', marks=pytest.mark.commit), \\\n pytest.param('ur5e', marks=pytest.mark.nightly), \\\n pytest.param('ur10', marks=pytest.mark.nightly), \\\n pytest.param('ur10e', marks=pytest.mark.nightly), \\\n pytest.param('ur16e', marks=pytest.mark.nightly), \\\n]\n\n@pytest.fixture(scope='module', params=ur_models)\ndef env(request):\n env = gym.make('EmptyEnvironmentURSim-v0', ip='robot-servers', ur_model=request.param, fix_wrist_3=True)\n env.request_param = request.param\n yield env\n env.kill_sim()\n\n@pytest.mark.commit \ndef test_initialization(env):\n assert env.ur.model == env.request_param\n env.reset()\n done = False\n env.step([0,0,0,0,0])\n for _ in range(10):\n if not done:\n action = env.action_space.sample()\n observation, _, done, _ = env.step(action)\n\n assert env.observation_space.contains(observation)\n\n@pytest.mark.commit \n@pytest.mark.flaky(reruns=3)\ndef test_self_collision(env):\n collision_joint_config = {'ur3': [0.0, 0.0, -3.14, -1.77, 1.0], \\\n 'ur3e': [0.0, -1.88, 2.8, -0.75, -1.88], \\\n 'ur5': [0.0, -1.26, -3.14, 0.0, 0.0], \\\n 'ur5e': [0.0, -0.50, -3.14, 3.14, 0.0], \\\n 'ur10': [0.0, -1.5, 3.14, 0.0, 0.0], \\\n 'ur10e': [0.0, -0.15, -2.83, -2.51, 1.63], \\\n 'ur16e': [0.0, -1.15, 2.9, -0.19, 0.42]}\n env.reset()\n action = env.ur.normalize_joint_values(collision_joint_config[env.ur.model])\n done = False\n while not done:\n _, _, done, info = env.step(action) \n assert info['final_status'] == 'collision'\n\n@pytest.mark.commit \n@pytest.mark.flaky(reruns=3)\ndef test_collision_with_ground(env):\n collision_joint_config = {'ur3': [0.0, 2.64, -1.95, -2.98, 0.41], \\\n 'ur3e': [1.13, 1.88, -2.19, -3.43, 2.43], \\\n 'ur5': [0.0, 1.0, 1.8, 0.0, 0.0], \\\n 'ur5e': [0.0, 3.52, -2.58, 0.0, 0.0], \\\n 'ur10': [0.0, 1.0, 1.15, 0.0, 0.0], \\\n 'ur10e': [-2.14, -0.13, 0.63, -1.13, 1.63], \\\n 'ur16e': [0.0, -0.15, 1.32, 0.0, 1.63]}\n env.reset()\n action = env.ur.normalize_joint_values(collision_joint_config[env.ur.model])\n done = False\n while not done:\n _, _, done, info = env.step(action) \n assert info['final_status'] == 'collision'\n\n@pytest.mark.commit \ndef test_reset_joint_positions(env):\n joint_positions = [0.2, -2.5, 1.1, -2.0, -1.2, 1.2]\n\n state = env.reset(joint_positions = joint_positions)\n assert np.isclose(env.ur.normalize_joint_values(joint_positions), state[0:6], atol=0.1).all()\n\n\ntest_ur_fixed_joints = [\n ('EmptyEnvironmentURSim-v0', True, False, False, False, False, False, 'ur3'), # fixed shoulder_pan\n ('EmptyEnvironmentURSim-v0', False, True, False, False, False, False, 'ur3e'), # fixed shoulder_lift\n ('EmptyEnvironmentURSim-v0', False, False, False, False, False, True, 'ur5'), # fixed wrist_3\n ('EmptyEnvironmentURSim-v0', True, False, True, False, False, False, 'ur5e'), # fixed Base and Elbow\n ('EmptyEnvironmentURSim-v0', False, False, True, False, False, False, 'ur10'), # fixed elbow\n ('EmptyEnvironmentURSim-v0', False, False, False, True, False, False, 'ur10e'), # fixed wrist_1\n ('EmptyEnvironmentURSim-v0', False, False, False, False, True, False, 'ur16e'), # fixed wrist_2\n]\n\n@pytest.mark.nightly\n@pytest.mark.parametrize('env_name, fix_base, fix_shoulder, fix_elbow, fix_wrist_1, fix_wrist_2, fix_wrist_3, ur_model', test_ur_fixed_joints)\n@pytest.mark.flaky(reruns=3)\ndef test_fixed_joints(env_name, fix_base, fix_shoulder, fix_elbow, fix_wrist_1, fix_wrist_2, fix_wrist_3, ur_model):\n env = gym.make(env_name, ip='robot-servers', fix_base=fix_base, fix_shoulder=fix_shoulder, fix_elbow=fix_elbow, \n fix_wrist_1=fix_wrist_1, fix_wrist_2=fix_wrist_2, fix_wrist_3=fix_wrist_3, ur_model=ur_model)\n state = env.reset()\n initial_joint_positions = state[0:6]\n # Take 20 actions\n action = env.action_space.sample()\n for _ in range(20):\n state, _, _, _ = env.step(action)\n joint_positions = state[0:6]\n\n if fix_base:\n assert math.isclose(initial_joint_positions[0], joint_positions[0], abs_tol=0.05)\n if fix_shoulder:\n assert math.isclose(initial_joint_positions[1], joint_positions[1], abs_tol=0.05)\n if fix_elbow:\n assert math.isclose(initial_joint_positions[2], joint_positions[2], abs_tol=0.05)\n if fix_wrist_1:\n assert math.isclose(initial_joint_positions[3], joint_positions[3], abs_tol=0.05)\n if fix_wrist_2:\n assert math.isclose(initial_joint_positions[4], joint_positions[4], abs_tol=0.05)\n if fix_wrist_3:\n assert math.isclose(initial_joint_positions[5], joint_positions[5], abs_tol=0.05)\n\n env.kill_sim()","repo_name":"jr-robotics/robo-gym","sub_path":"tests/robo-gym/envs/ur/test_ur_base_env.py","file_name":"test_ur_base_env.py","file_ext":"py","file_size_in_byte":5235,"program_lang":"python","lang":"en","doc_type":"code","stars":351,"dataset":"github-code","pt":"60"} +{"seq_id":"21923429009","text":"# Compiles statistics on ConLL-U files\n# Usage: python stats.py -i -o -e \n\nimport os, argparse\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport re\n# Can't use `conllu` module because it doesn't support multiple heads\n\n# Defaults\nINPUT_DIR = 'conllu'\nOUTPUT_DIR = 'tex'\nENCODING = 'utf-8'\nMEANS_FILE = 'stats_means.tex'\nTOTALS_FILE = 'stats_totals.tex'\n\ndef get_stats(file, encoding=ENCODING, meta={}):\n stats = []\n init_sent = {'tokens': 0, 'sentences': 0}\n with open(file, 'r', encoding=encoding) as f:\n lines = f.readlines()\n sent = init_sent.copy()\n for line in lines:\n if not line.strip():\n sent['toks_per_sent'] = sent['tokens'] / sent['sentences']\n sent.update(meta)\n stats.append(sent)\n sent = init_sent.copy()\n elif line.startswith('#'):\n feat, value = line[1:].split('=', 1)\n sent[feat.strip()] = value.strip()\n elif line.split('\\t', 2)[1] == '':\n sent['sentences'] += 1\n elif line.split('\\t', 2)[1] == '.':\n sent['tokens'] += 1\n sent['sentences'] += 1\n else:\n sent['tokens'] += 1\n return pd.DataFrame(stats)\n\ndef main(corpus=INPUT_DIR, encoding=ENCODING):\n stats = pd.DataFrame()\n splits = [f for f in os.listdir(corpus) if os.path.isdir(os.path.join(corpus, f))]\n for split in splits:\n files = [f for f in os.listdir(os.path.join(corpus, split)) if f.endswith('.conllu')]\n for file in files:\n order, layer = re.match(r'(\\d+)-(\\w+)\\.conllu', file).groups()\n order = int(order)\n path = os.path.join(corpus, split, file)\n meta = {'split': split, 'file': file, 'layer': layer, 'order': order}\n file_stats = get_stats(path, encoding, meta)\n stats = pd.concat([stats, file_stats], ignore_index=True)\n return stats\n\n# Execute from command line\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Compiles statistics on ConLL-U files')\n parser.add_argument('-i', '--input', help='Directory where .conllu files reside', default=INPUT_DIR)\n parser.add_argument('-o', '--output', help='Directory where to save the statistics in LaTeX', default=OUTPUT_DIR)\n parser.add_argument('-e', '--encoding', help='Character encoding (same for all input and output files)', default=ENCODING)\n args = parser.parse_args()\n\n print(f\"Compiling statistics for `{args.input}`...\")\n stats = main(args.input, args.encoding)\n stats['Layer'] = stats['file'].str.split('-').str[1].str.split('.').str[0]\n stats = stats[['Layer', 'tokens', 'sentences', 'toks_per_sent', 'order']]\n \n stats_mean = stats.groupby(['Layer']).mean().round(1).sort_values(by='order')\n stats_mean.drop(['order'], axis=1, inplace=True)\n stats_mean.columns = ['Tokens', 'Sentences', 'Tokens/Sentence']\n stats_mean.reset_index(inplace=True)\n stats_mean.style.to_latex(os.path.join(args.output, MEANS_FILE))\n print(f\" saved `{args.output}/{MEANS_FILE}`\")\n\n stats_total = stats.groupby(['Layer']).sum().sort_values(by='order')\n stats_total.drop(['order', 'toks_per_sent'], axis=1, inplace=True)\n stats_total.columns = ['Tokens', 'Sentences']\n stats_total.reset_index(inplace=True)\n stats_total.style.to_latex(os.path.join(args.output, TOTALS_FILE))\n print(f\" saved `{args.output}/{TOTALS_FILE}`\")\n\n# FOR DEV USE ONLY\n# class Args():\n# input = '../conllu'\n# output = '../tex'\n# encoding = 'utf-8'\n# args = Args()","repo_name":"mille-s/Mod-D2T","sub_path":"scripts/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":3663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"41275646612","text":"import unittest\nfrom check_eleven import check_eleven_\n\nclass TestCheckEleven(unittest.TestCase):\n\n def test_obvious(self):\n y = 0\n divisor = 11\n while y < 25000:\n #if there is no remainder after division\n if y % divisor == 0:\n #it is divisible by ___ and should be rejected\n self.assertTrue(check_eleven_(str(y)))\n else:\n #otherwise it should not be eliminated, (aka remain a candidate)\n self.assertFalse(check_eleven_(str(y)))\n y+=1\n\n def test_long(self):\n self.assertFalse(check_eleven_('123456789100987654321'))\n self.assertFalse(check_eleven_('1234567891234567899'))\n self.assertTrue(check_eleven_('135802468010'))\n self.assertTrue(check_eleven_('1358024680358024679'))\n \n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"PrimeNumbers/primes_search","sub_path":"test_check_eleven.py","file_name":"test_check_eleven.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"8553558725","text":"# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution(object):\n \"\"\"\n二叉树数据结构TreeNode可用来表示单向链表(其中left置空,right为下一个链表节点)。\n实现一个方法,把二叉搜索树转换为单向链表,要求依然符合二叉搜索树的性质,\n转换操作应是原址的,也就是在原始的二叉搜索树上直接修改。\n返回转换后的单向链表的头节点。\n注意:本题相对原题稍作改动\n输入: [4,2,5,1,3,null,6,0]\n输出: [0,null,1,null,2,null,3,null,4,null,5,null,6]\n 4\n 2 5\n 1 3 N 6\n0\n提示:\n节点数量不会超过 100000。\n链接:https://leetcode-cn.com/problems/binode-lcci\n \"\"\"\n def convertBiNode(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: TreeNode\n \"\"\"\n if not root:\n return None\n\n def dfs(): # 中序遍历\n que, p = [], root\n while que or p:\n while p:\n que.append(p)\n p = p.left\n p = que.pop()\n rec.append(p)\n p = p.right\n\n rec = []\n dfs()\n for i in range(len(rec) - 1):\n rec[i].left = None\n rec[i].right = rec[i + 1]\n rec[-1].left = rec[-1].right = None\n return rec[0]\n\n\ndef create(nums):\n if not nums:\n return None\n root = TreeNode(nums.pop(0))\n que = [root]\n while que:\n node = que.pop(0)\n left = nums.pop(0) if nums else None\n right = nums.pop(0) if nums else None\n node.left = TreeNode(left) if left is not None else None\n node.right = TreeNode(right) if right is not None else None\n if node.left:\n que.append(node.left)\n if node.right:\n que.append(node.right)\n return root\n\n\ndef main():\n nums = [4, 2, 5, 1, 3, None, 6, 0]\n root = create(nums)\n test = Solution()\n ret = test.convertBiNode(root)\n print(ret)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Jackyzzk/Cracking-the-Coding-Interview-6","sub_path":"py-程序员面试金典-面试题 17.12. BiNode.py","file_name":"py-程序员面试金典-面试题 17.12. BiNode.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"11516391888","text":"# -- coding: utf-8 --\n# Count the Number of Nodes in Binary Tree using DFS and BFS\n# 用dfs和bfs来计算二叉树有多少个节点\n\n# 我们拿���根节点然后 + 左边所有节点数 + 右边所有节点数 = 总数\n# 其实到这我们也应该能感觉到了 这种不断深入去查找计数等等的尝试的 都可以用dfs 也就是不断深入尝试\ndef countWithDfs(root):\n if root is None:\n return 0\n return 1 + countWithDfs(root.left) + countWithDfs(root.right)\n\n\n# bfs 就是要用队列 没别的说的\ndef countWithBfs(root):\n if root is None:\n return 0\n q = [root]\n ans = 0\n while len(q) > 0:\n p = q.pop(0)\n ans += 1\n if p.left:\n q.append(p.left)\n if p.right:\n q.append(p.right)\n return ans\n","repo_name":"Ander456/py_program","sub_path":"day34.py","file_name":"day34.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"40024514237","text":"from typing import List\n\n\nclass Solution:\n def nearestValidPoint(self, x: int, y: int, points: List[List[int]]) -> int:\n answer = -1\n distance = float(\"inf\")\n for i, (a, b) in enumerate(points):\n if a == x or b == y:\n cur = abs(a - x) + abs(b - y)\n if cur < distance:\n distance = cur\n answer = i\n return answer\n","repo_name":"yqkcn/leetcode","sub_path":"find-nearest-point-that-has-the-same-x-or-y-coordinate.py","file_name":"find-nearest-point-that-has-the-same-x-or-y-coordinate.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"4614077849","text":"# Given a string containing digits from 2-9 inclusive, return all possible letter combinations that the number could represent. Return the answer in any order.\n# A mapping of digit to letters (just like on the telephone buttons) is given below. Note that 1 does not map to any letters.\n# # https://leetcode-cn.com/problems/letter-combinations-of-a-phone-number/description/\nclass Solution:\n def letterCombinations(self, digits: str) -> List[str]:\n phoneMap = {\n '2':['a','b','c'],\n '3':['d','e','f'],\n '4':['g','h','i'],\n '5':['j','k','l'],\n '6':['m','n','o'],\n '7':['p','q','r','s'],\n '8':['t','u','v'],\n '9':['w','x','y','z'],\n }\n if len(digits)==0 or not digits:\n return []\n\n res = []\n\n def backtrack(combination, nextdigits):\n if len(combination)==len(digits):\n res.append(combination)\n else:\n for letter in phoneMap[nextdigits[0]]:\n backtrack(combination + letter, nextdigits[1:])\n \n backtrack(\"\",digits)\n return res","repo_name":"Aprilluabsinthe/leet","sub_path":"python/17_Letter_Combinations_of_a_Phone_Number.py","file_name":"17_Letter_Combinations_of_a_Phone_Number.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"46615299463","text":"# http://www.gsm-rainbow.ru/sites/default/files/l80_gps_protocol_specification_v1.0.pdf\nimport re\nimport io\nimport sys\nimport time\nimport serial\nimport logging\nimport datetime\nimport threading\nimport subprocess\n# logging.basicConfig(level=logging.DEBUG)\n\n\n# Test these with `echo -e \"\\$PMTK161,0*28\\r\\n\" > /dev/ttyAMA0`\nPMTK_STANDBY = '$PMTK161,0*28\\r\\n'\nPMTK_SET_PERIODIC_MODE_NORMAL = '$PMTK225,0*2B\\r\\n'\nPMTK_SET_PERIODIC_MODE_AUTO_LOCATE_STANDBY = '$PMTK225,8*23\\r\\n'\nPMTK_SET_PERIODIC_MODE_SLEEP = '$PMTK225,2,3000,12000,18000,72000*15\\r\\n'\nPMTK_LOCUS_QUERY_STATUS = '$PMTK183*38\\r\\n'\nPMTK_LOCUS_ERASE_FLASH = '$PMTK184,1*22\\r\\n'\nPMTK_LOCUS_STOP_LOGGER = '$PMTK185,1*23\\r\\n'\nPMTK_LOCUS_START_LOGGER = '$PMTK185,0*22\\r\\n'\nPMTK_Q_LOCUS_DATA_FULL = '$PMTK622,0*28\\r\\n'\nPMTK_Q_LOCUS_DATA_PARTIAL = '$PMTK622,1*29\\r\\n'\n\n\n# setup default GPS device (different on Raspberry Pi 3 and above)\ndef get_rpi_revision():\n \"\"\"Returns the version number from the revision line.\"\"\"\n for line in open(\"/proc/cpuinfo\"):\n if \"Revision\" in line:\n return re.sub('Revision\\t: ([a-z0-9]+)\\n', r'\\1', line)\n\n\nrpi_revision = get_rpi_revision()\nif (rpi_revision and\n (rpi_revision != 'Beta') and\n (int('0x'+rpi_revision, 16) >= 0xa02082)):\n # RPi 3 and above\n DEFAULT_GPS_DEVICE = '/dev/ttyS0'\nelse:\n # RPi 2 and below\n DEFAULT_GPS_DEVICE = '/dev/ttyAMA0'\n\n\nclass NMEAPacketNotFoundError(Exception):\n pass\n\n\nclass DataInvalidError(Exception):\n pass\n\n\nclass LOCUSQueryDataError(Exception):\n pass\n\n\nclass L80GPS(object):\n \"\"\"Thread that reads a stream of L80 GPS protocol lines and stores the\n information. Methods may raise exceptions if data is invalid (usually\n becasue of a poor GPS reception - try moving the GPS module outside).\n \"\"\"\n\n def __init__(self, device=DEFAULT_GPS_DEVICE):\n self.device_tx_rx = serial.Serial(device,\n baudrate=9600,\n bytesize=8,\n parity='N',\n stopbits=1,\n timeout=0.5,\n rtscts=0)\n\n ####################################################################\n # REMOVE PROPERTIES\n ####################################################################\n def _property_depricated_warning(self, p):\n print('WARNING: properties will be removed in next major version. '\n 'Use `l80gps.get_{}()` instead of `l80gps.{}`'.format(p),\n file=sys.stderr)\n\n @property\n def gprmc(self):\n self._property_depricated_warning('gprmc')\n return self.get_gprmc()\n\n @property\n def gpvtg(self):\n self._property_depricated_warning('gpvtg')\n return self.get_gpvtg()\n\n @property\n def gpgga(self):\n self._property_depricated_warning('gpgga')\n return self.get_gpgga()\n\n @property\n def gpgsa(self):\n self._property_depricated_warning('gpgsa')\n return self.get_gpgsa()\n\n @property\n def gpgsv(self):\n self._property_depricated_warning('gpgsv')\n return self.get_gpgsv()\n\n @property\n def gpgll(self):\n self._property_depricated_warning('gpgll')\n return self.get_gpgll()\n\n @property\n def gptxt(self):\n self._property_depricated_warning('gptxt')\n return self.get_gptxt()\n ####################################################################\n\n def get_gprmc(self):\n \"\"\"Returns the latest GPRMC message.\n\n :rasies: DataInvalidError\n \"\"\"\n pkt = self.get_nmea_pkt('GPRMC')\n gprmc_dict, checksum = gprmc_as_dict(pkt)\n if gprmc_dict['data_valid'] == \"A\":\n return gprmc_dict\n else:\n raise DataInvalidError(\"Indicated by data_valid field.\")\n\n def get_gpvtg(self):\n \"\"\"Returns the latest GPVTG message.\"\"\"\n pkt = self.get_nmea_pkt('GPVTG')\n gpvtg_dict, checksum = gpvtg_as_dict(pkt)\n return gpvtg_dict\n\n def get_gpgga(self):\n \"\"\"Returns the latest GPGGA message.\"\"\"\n pkt = self.get_nmea_pkt('GPGGA')\n gpgga_dict, checksum = gpgga_as_dict(pkt)\n return gpgga_dict\n\n def get_gpgsa(self):\n \"\"\"Returns the latest GPGSA message.\"\"\"\n pkt = self.get_nmea_pkt('GPGSA')\n gpgsa_dict, checksum = gpgsa_as_dict(pkt)\n return gpgsa_dict\n\n def get_gpgsv(self):\n \"\"\"Returns the latest GPGSV message.\"\"\"\n pkt = self.get_nmea_pkt('GPGSV')\n gpgsv_dict, checksum = gpgsv_as_dict(pkt)\n return gpgsv_dict\n\n def get_gpgll(self):\n \"\"\"Returns the latest GPGLL message.\n\n :rasies: DataInvalidError\n \"\"\"\n pkt = self.get_nmea_pkt('GPGLL')\n gpgll_dict, checksum = gpgll_as_dict(pkt)\n if gpgll_dict['data_valid'] == \"A\":\n return gpgll_dict\n else:\n raise DataInvalidError(\"Indicated by data_valid field.\")\n\n def get_gptxt(self):\n \"\"\"Returns the latest GPTXT message.\"\"\"\n pkt = self.get_nmea_pkt('GPTXT')\n gptxt_dict, checksum = gptxt_as_dict(pkt)\n return gptxt_dict\n\n def check_pmtk_ack(self):\n '''Waits for an validates a PMTK_ACK. Raises an exception if\n PMTK_ACK reports error.\n '''\n pkt = self.get_nmea_pkt('$PMTK001')\n ack_data = pkt.split('*').split(',')\n flag = int(ack_data[2])\n if flag == 0:\n raise PMTKACKError('Invalid packet')\n elif flag == 1:\n raise PMTKACKError('Unsupported packet type')\n elif flag == 2:\n raise PMTKACKError('Valid packet but action failed')\n elif flag == 3:\n return # success!\n else:\n raise PMTKACKError('Unknown flag in ack.')\n\n def standby(self):\n '''Puts the GPS into standby mode.'''\n self.send_nmea_pkt(PMTK_STANDBY)\n self.check_pmtk_ack()\n\n def always_locate(self):\n '''Turns on AlwaysLocate(TM). Turn off with `set_periodic_normal`.'''\n self.send_nmea_pkt(PMTK_SET_PERIODIC_MODE_AUTO_LOCATE_STANDBY)\n self.check_pmtk_ack()\n\n def sleep(self):\n '''Puts the GPS into sleep mode. Wake with `set_periodic_normal`.'''\n self.send_nmea_pkt(PMTK_SET_PERIODIC_MODE_SLEEP)\n self.check_pmtk_ack()\n\n def set_periodic_normal(self):\n '''Sets the periodic mode to normal.'''\n self.send_nmea_pkt(PMTK_SET_PERIODIC_MODE_NORMAL)\n self.check_pmtk_ack()\n\n def locus_query(self):\n \"\"\"Returns the status of the locus logger.\"\"\"\n self.send_nmea_pkt(PMTK_LOCUS_QUERY_STATUS)\n pmtklog_dict, checksum = pmtklog_as_dict(self.get_nmea_pkt('PMTKLOG'))\n return pmtklog_dict\n\n def locus_erase(self):\n \"\"\"Erases the internal log.\"\"\"\n self.send_nmea_pkt(PMTK_LOCUS_ERASE_FLASH)\n\n def locus_start(self):\n \"\"\"Starts the logger.\"\"\"\n self.send_nmea_pkt(PMTK_LOCUS_START_LOGGER)\n\n def locus_stop(self):\n \"\"\"Stops the logger.\"\"\"\n self.send_nmea_pkt(PMTK_LOCUS_STOP_LOGGER)\n\n def locus_query_data(self, raw=False, num_attempts=5):\n \"\"\"Returns a list of parsed LOCUS log data.\n\n :param raw: Return raw bytearray instead of list of dict's.\n :type raw: boolean\n :param num_attempts: Number of attempts to get raw data (it sometimes\n fails)\n :type num_attempts: int\n :rasies: LOCUSQueryDataError\n \"\"\"\n attempt = 0\n success = False\n while success == False and attempt < num_attempts:\n try:\n data = self._locus_query_data_raw()\n except NMEAPacketNotFoundError:\n attempt += 1\n else:\n success = True\n if not success:\n raise LOCUSQueryDataError(\n \"Max number of attempts ({}) reached.\".format(num_attempts))\n elif raw:\n return data\n else:\n return parse_locus_data(data)\n\n def _locus_query_data_raw(self):\n \"\"\"Returns a byte array of the log data (you can parse this later).\n\n Example packets returned:\n\n\n Data: $PMTKLOX,1,0,0100010B,1F000000,0F000000,0000100B,00000000,\n 00000000,00000003,FFFFFFFF,FFFFFFFF,FFFFFFFF,FFFFFFFF,\n FFFFFFFF,FFFFFFFF,FFFFFFFF,FFFFFFFF,00FC8C1C,0DE9E753,\n 02A54356,42777508,C0A300C9,1CE9E753,02A14356,42397508,\n C0A30092*2E\n\n \"\"\"\n locus_data_ptn_start = 'PMTKLOX,0'\n locus_data_ptn = 'PMTKLOX,1'\n locus_data_ptn_end = 'PMTKLOX,2'\n self.send_nmea_pkt(PMTK_Q_LOCUS_DATA_PARTIAL)\n\n # get the start packet (not that we do anything with it, the returned\n # number of packets doesn't even equal what this packet says it will!)\n pkt = self.get_nmea_pkt(locus_data_ptn_start)\n pkt, checksum = pkt.split('*') # data is already confirmed valid\n message_id, type, num_pkts = pkt.split(',')\n\n # get all the data packets until the end packet pattern if found\n databytes = bytearray()\n for i in range(int(num_pkts)):\n pkt = self.get_nmea_pkt(locus_data_ptn)\n pkt, checksum = pkt.split('*') # data is already confirmed valid\n pkt_list = pkt.split(',')\n # do some sanity checking\n message_id, lox_type, index = pkt_list[:3]\n # print(\"Hi\", message_id, lox_type, index)\n assert message_id == '$PMTKLOX'\n assert lox_type == '1'\n assert int(index) == i\n # put the data into bytes\n for hexstr in pkt_list[3:]:\n databytes += hexstr2bytearray(hexstr)\n # end pkt\n pkt = self.get_nmea_pkt(locus_data_ptn_end)\n return databytes\n\n def get_nmea_pkt(self, pattern):\n \"\"\"Returns the next valid NMEA string which contains the pattern\n provided. For example:\n\n >>> gps.get_nmea_pkt('GPRMC')\n '$GPRMC,013732.000,A,3150.7238,N,11711.7278,E,0.00,0.00,220413,,,A*68'\n\n \"\"\"\n pattern_bytes = bytes(pattern, 'utf-8')\n while True:\n line = self.device_tx_rx.readline()\n # logging.debug(\"L80GPS:readline returned - \"+str(line))\n if line == b'':\n raise NMEAPacketNotFoundError(\n \"Timed out before valid '{}'.\".format(pattern))\n elif not l80gps_checksum_is_valid(line):\n continue\n elif pattern_bytes in line:\n return str(line, 'utf-8')\n\n def send_nmea_pkt(self, pkt):\n \"\"\"Write pkt to the serial port.\"\"\"\n self.device_tx_rx.write(bytes(pkt, 'utf-8'))\n\n\ndef parse_locus_data(data, format='basic'):\n \"\"\"Returns the LOCUS data in a sensible structure according to the format.\"\"\"\n # assuming basic format\n # utc (4), fix (1), latitude (4), longitude (4), altitude (2), checksum (1)\n # sample data\n data.reverse()\n parsed_data = []\n while True:\n try:\n data_bytes = [data.pop() for i in range(16)]\n except IndexError:\n return parsed_data\n else:\n utc = parse_long(data_bytes[:4])\n checksum = data_bytes[15]\n if not checksum_is_valid(data_bytes[:15], checksum):\n # invalid checksum, this datum is useless\n continue\n elif utc >= 0xffffffff:\n # data is empty, don't add to parsed_data\n continue\n else:\n parsed_data.append(\n {'utc': datetime.datetime.fromtimestamp(utc),\n 'fix': data_bytes[4],\n 'latitude': parse_float(data_bytes[5:9]),\n 'longitude': parse_float(data_bytes[9:13]),\n 'altitude': parse_int(data_bytes[13:15]),\n 'checksum': checksum})\n\n\ndef gprmc_as_dict(gprmc_str):\n \"\"\"Returns the GPRMC as a dictionary and the checksum.\n\n >>> gprmc_as_dict('$GPRMC,013732.000,A,3150.7238,N,11711.7278,E,0.00,0.00,220413,,,A*68')\n ({'message_id': 'GPRMC',\n 'utc': 0.0,\n 'data_valid': 'A',\n 'latitude': 3150.7238,\n 'ns': 0.0,\n 'longitude': 0.1,\n 'ew': 'A',\n 'speed':,\n 'cog':,\n 'date':,\n 'mag_var':,\n 'eq':,\n 'pos_mode':},\n 0C)\n \"\"\"\n gprmc, checksum = gprmc_str.split('*')\n message_id, utc, data_valid, latitude, ns, longitude, ew, speed, cog, \\\n date, mag_var, eq, pos_mode = gprmc.split(',')\n utc = 0.0 if utc == '' else utc\n latitude = 0.0 if latitude == '' else latitude\n longitude = 0.0 if longitude == '' else longitude\n gprmc_dict = {'message_id': message_id,\n 'utc': float(utc),\n 'data_valid': data_valid,\n 'latitude': dm2d(float(latitude), ns),\n 'ns': ns,\n 'longitude': dm2d(float(longitude), ew),\n 'ew': ew,\n 'speed': speed,\n 'cog': cog,\n 'date': date,\n 'mag_var': mag_var,\n 'eq': eq,\n 'pos_mode': pos_mode}\n return (gprmc_dict, checksum)\n\ndef gpvtg_as_dict(gpvtg_str):\n \"\"\"Returns the GPVTG as a dictionary and the checksum.\n\n >>> gpvtg_as_dict('$GPVTG,0.0,T,,M,0.0,N,0.1,K,A*0C')\n ({'message_id': 'GPVTG',\n 'cogt': 0.0,\n 't': 'A',\n 'cogm': '',\n 'speedn': 0.0,\n 'speedk': 0.1,\n 'pos_mode': 'A'},\n 0C)\n \"\"\"\n gpvtg, checksum = gpvtg_str.split('*')\n message_id, cogt, t, cogm, m, speedn, n, speedk, k, pos_mode = \\\n gpvtg.split(',')\n gpvtg_dict = {'message_id': message_id,\n 'cogt': cogt,\n 'cogm': cogm,\n 'speedn': float(speedn),\n 'speedk': float(speedk),\n 'pos_mode': pos_mode}\n return (gpvtg_dict, checksum)\n\ndef gpgga_as_dict(gpgga_str):\n \"\"\"Returns the GPGGA as a dictionary and the checksum.\n\n Returns latitude and longitude as degrees.\n\n >>> gpgga_as_dict('$GPGGA,015540.000,A,3150.68378,N,11711.93139,E,1,17,0.6,0051.6,M,0.0,M,,*58')\n ({'message_id': 'GPGGA',\n 'utc': 015540.000,\n 'latitude': 3150.68378,\n 'ns': 'N',\n 'longitude': 11711.93139,\n 'ew': 'E',\n 'fix': 1,\n 'number_of_sv': 17,\n 'hdop': 0.6,\n 'altitude': 0051.6,\n 'geoid_seperation': 0.0,\n 'dgps_age': '',\n 'dgps_station_id': ''},\n 77)\n \"\"\"\n gpgga, checksum = gpgga_str.split('*')\n print(gpgga_str)\n message_id, utc, latitude, ns, longitude, ew, fix, \\\n number_of_sv, hdop, altitude, m, geoid_seperation, m, dgps_age, \\\n dgps_station_id = gpgga.split(',')\n utc = 0.0 if utc == '' else utc\n latitude = 0.0 if latitude == '' else latitude\n longitude = 0.0 if longitude == '' else longitude\n gpgga_dict = {'message_id': message_id,\n 'utc': float(utc),\n 'latitude': dm2d(float(latitude), ns),\n 'ns': ns,\n 'longitude': dm2d(float(longitude), ew),\n 'ew': ew,\n 'fix': fix,\n 'number_of_sv': number_of_sv,\n 'hdop': hdop,\n 'altitude': altitude,\n 'geoid_seperation': geoid_seperation,\n 'dgps_age': dgps_age,\n 'dgps_station_id': dgps_station_id}\n return (gpgga_dict, checksum)\n\n\ndef gpgsa_as_dict(gpgsa_str):\n \"\"\"Returns the GPGSA as a dictionary and the checksum.\n\n >>> gpgsa_as_dict('$GPGSA,A,3,14,06,16,31,23,,,,,,,,1.66,1.42,0.84*0F')\n ({'message_id': 'GPGSA',\n 'mode': 'A',\n 'fix': 3,\n 'satellites_on_channel': [14, 06, 16, 31, 23, 0, 0, 0, 0, 0, 0, 0],\n 'pdop': 1.66,\n 'hdop': 1.42,\n 'vdop': 0.84},\n 77)\n \"\"\"\n gpgsa, checksum = gpgsa_str[1:].split(\"*\") # remove `$` split *\n gpgsa_data = gpgsa.split(',')\n message_id, mode, fix = gpgsa_data[:3]\n satellites_on_ch = gpgsa_data[3:-3]\n pdop, hdop, vdop = gpgsa_data[-3:]\n # set all blank channels to 0\n satellites_on_ch = map(lambda s: 0 if s == '' else s, satellites_on_ch)\n gpgsa_dict = {'message_id': message_id,\n 'mode': mode,\n 'fix': fix,\n 'satellites_on_channel': satellites_on_ch,\n 'pdop': pdop,\n 'hdop': hdop,\n 'vdop': vdop}\n return (gpgsa_dict, checksum)\n\ndef gpgsv_as_dict(gpgsv_str):\n \"\"\"Returns the GPGSV as a dictionary and the checksum.\n\n >>> gpgsv_as_dict('$GPGSV,3,1,12,01,05,060,18,02,17,259,43,04,56,287,28,09,08,277,28*77')\n ({'message_id': 'GPGSV',\n 'num_messages': 3,\n 'sequence_num': 1,\n 'satellites_in_view': 12,\n 'satellite': [{'id': 01,\n 'elevation': 05,\n 'azimuth': 060,\n 'snr': 18},\n {'id': 02,\n 'elevation': 17,\n 'azimuth': 259,\n 'snr': 43},\n {'id': 04,\n 'elevation': 56,\n 'azimuth': 287,\n 'snr': 28},\n {'id': 09,\n 'elevation': 08,\n 'azimuth': 277,\n 'snr': 28}]},\n 77)\n \"\"\"\n # TODO varaible length string depending on number of satellites\n gpgsv, checksum = gpgsv_str[1:].split(\"*\") # remove `$` split *\n message_id, num_messages, sequence_num, satellites_in_view, \\\n satellite_1_id, satellite_1_elevation, satellite_1_azimuth, \\\n satellite_1_snr, satellite_2_id, satellite_2_elevation, \\\n satellite_2_azimuth, satellite_2_snr, satellite_3_id, \\\n satellite_3_elevation, satellite_3_azimuth, satellite_3_snr, \\\n satellite_4_id, satellite_4_elevation, satellite_4_azimuth, \\\n satellite_4_snr = gpgsv.split(\",\")\n gpgsv_dict = {'message_id': message_id,\n 'num_messages': num_messages,\n 'sequence_num': sequence_num,\n 'satellites_in_view': satellites_in_view,\n 'satellite': [{'id': satellite_1_id,\n 'elevation': satellite_1_elevation,\n 'azimuth': satellite_1_azimuth,\n 'snr': satellite_1_snr},\n {'id': satellite_2_id,\n 'elevation': satellite_2_elevation,\n 'azimuth': satellite_2_azimuth,\n 'snr': satellite_2_snr},\n {'id': satellite_3_id,\n 'elevation': satellite_3_elevation,\n 'azimuth': satellite_3_azimuth,\n 'snr': satellite_3_snr},\n {'id': satellite_4_id,\n 'elevation': satellite_4_elevation,\n 'azimuth': satellite_4_azimuth,\n 'snr': satellite_4_snr}]}\n return (gpgsv_dict, checksum)\n\n\ndef gpgll_as_dict(gpgll_str):\n \"\"\"Returns the GPGLL as a dictionary and the checksum.\n\n >>> gpgll_as_dict('$GPGLL,3110.2908,N,12123.2348,E,041139.000,A,A*59')\n ({'message_id': GPGLL,\n 'latitude': 3110.2908,\n 'ns': 'N',\n 'longitude': 12123.2348,\n 'ew': 'E',\n 'utc': 041139.000,\n 'data_valid': 'A',\n 'pos_mode': 'A'},\n 59)\n\n \"\"\"\n gpgll, checksum = gpgll_str[1:].split(\"*\") # remove `$` split *\n message_id, latitude, ns, longitude, ew, utc, data_valid, pos_mode = \\\n gpgll.split(\",\")\n latitude = 0.0 if latitude == '' else latitude\n longitude = 0.0 if longitude == '' else longitude\n utc = 0.0 if utc == '' else utc\n gpgll_dict = ({\"message_id\": message_id,\n \"latitude\": dm2d(float(latitude), ns),\n \"ns\": ns,\n \"longitude\": dm2d(float(longitude), ew),\n \"ew\": ew,\n \"utc\": float(utc),\n \"data_valid\": data_valid,\n \"pos_mode\": pos_mode},\n checksum)\n # logging.debug(\"L80GPS:Converting '{}'\\ninto: {}\".format(gpgll_str, gpgll_dict))\n return gpgll_dict\n\n\ndef gptxt_as_dict(self):\n \"\"\"\n GPTXT Message ID\n XX Total number of messages in this transmission. (01~99)\n YY Message number in this transmission. (01~99)\n ZZ\n Severity of the message\n ‘00’= ERROR\n ‘01’= WARNING\n ‘02’= NOTICE\n ‘07’= USER\n Text messasage\n \"\"\"\n # TODO fix this\n pass\n\n\ndef pmtklog_as_dict(pmtklog_str):\n \"\"\"Returns the PMTKLOG as a dictionary and the checksum.\n\n >>> pmtklog_as_dict('$PMTKLOG,456,0,11,31,2,0,0,0,3769,46*48')\n ({'message_id': 'PMTKLOG',\n 'serial': 456,\n 'type': 0,\n 'mode': 11,\n 'content': 31,\n 'interval': 2,\n 'distance': 0,\n 'speed': 0,\n 'status': 0,\n 'number': 3769,\n 'percent': 46},\n 48)\n\n \"\"\"\n pmtklog, checksum = pmtklog_str[1:].split('*') # remove `$` split *\n message_id, serial, type, mode, content, interval, distance, speed,\\\n status, number, percent = pmtklog.split(',')\n pmtklog_dict = ({'message_id': 'PMTKLOG',\n 'serial': serial,\n 'type': type,\n 'mode': int(mode, 16),\n 'content': content,\n 'interval': interval,\n 'distance': distance,\n 'speed': speed,\n 'status': status,\n 'number': number,\n 'percent': percent},\n checksum)\n return pmtklog_dict\n\n\ndef l80gps_checksum_is_valid(gps_str):\n \"\"\"Returns True if the checksum is valid in an GPS L80 protocol line.\n\n !!!! This method assumes gps_str is a byte string. !!!!\n\n \"\"\"\n if gps_str[0] != ord(b'$'):\n return False\n try:\n gpgll, checksum = gps_str[1:].split(b'*') # remove `$` split *\n except:\n # logging.debug(\"L80GPS:Invalid GPS str\")\n # logging.debug(gps_str)\n return False\n else:\n return checksum_is_valid(gpgll, int(checksum, 16))\n\n\ndef checksum_is_valid(data_bytes, checksum):\n \"\"\"Returns True is the logical OR of each consecutive databyte is\n the same as the checksum.\n \"\"\"\n check = 0\n for b in data_bytes:\n check ^= b\n return check == checksum\n\n\ndef hexstr2bytearray(s):\n \"\"\"Converts a string of hex characters to bytes.\n\n 'DEADBEEF'\n\n becomes\n\n bytearray(b'\\\\\\\\xde\\\\\\\\xad\\\\\\\\xbe\\\\\\\\xef')\n\n \"\"\"\n # split the string into a list of strings, each two characters long\n # then turn each two character string into an int (base 16) and put\n # that array into a bytearray\n n = 2\n return bytearray([int(s[i:i+n], 16) for i in range(0, len(s), n)])\n\n\ndef parse_float(bytes):\n \"\"\"Converts four bytes into a float.\"\"\"\n longValue = parse_long(bytes)\n exponent = ((longValue >> 23) & 0xff) # float\n exponent -= 127.0\n exponent = pow(2,exponent)\n mantissa = (longValue & 0x7fffff)\n mantissa = 1.0 + (mantissa/8388607.0)\n floatValue = mantissa * exponent\n if ((longValue & 0x80000000) == 0x80000000):\n floatValue = -floatValue\n return floatValue\n\n\ndef parse_long(bytes):\n \"\"\"Converts four bytes into a long integer.\"\"\"\n assert len(bytes) == 4\n return ((0xFF & bytes[3]) << 24 |\n (0xFF & bytes[2]) << 16 |\n (0xFF & bytes[1]) << 8 |\n (0xFF & bytes[0]))\n\n\ndef parse_int(bytes):\n \"\"\"Converts two bytes into an interger.\"\"\"\n assert len(bytes) == 2\n number = ((0xFF & bytes[1]) << 8 | (0xFF & bytes[0]))\n return number\n\n\ndef dm2d(degrees_and_minutes, direction):\n \"\"\"Converts dddmm.mmmm to ddd.dddd...\n direction's 's' and 'w' are negative.\n \"\"\"\n degrees = int(degrees_and_minutes / 100)\n minutes = degrees_and_minutes % 100\n degrees += (minutes / 60)\n if direction.lower() == 's' or direction.lower() == 'w':\n return -1 * degrees\n else:\n return degrees\n","repo_name":"microstack-IoT/python3-microstacknode","sub_path":"microstacknode/hardware/gps/l80gps.py","file_name":"l80gps.py","file_ext":"py","file_size_in_byte":24614,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"60"} +{"seq_id":"3039714922","text":"from django.contrib.auth import get_user_model\nfrom django.shortcuts import render, redirect\nfrom django.views.generic.edit import FormView\nfrom django.contrib import messages\nfrom django.contrib.auth import logout\n\nfrom .forms import EditPhoneNoForm, EditProfilePictureForm, DeleteAccountForm\nfrom django.utils.translation import gettext_lazy as _\nfrom django.contrib.auth.decorators import login_required\nfrom meta.views import Meta\n\nUser = get_user_model()\n\n\ndef settings(request):\n meta = Meta(title=_(\"Settings\"), description=_(\"User Settings\"))\n return render(request, 'users/settings.html', {\"meta\": meta})\n\n\nclass EditPhoneNoView(FormView):\n template_name = 'users/edit_phone_no.html'\n form_class = EditPhoneNoForm\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"meta\"] = Meta(title=_(\"Edit Phone No.\"),\n description=_(\"Edit Phone No.\"))\n return context\n\n def get_initial(self):\n initial = super().get_initial()\n\n initial.update({'phone_no': self.request.user.profile.phone_no})\n return initial\n\n def form_valid(self, form):\n user = self.request.user\n user.profile.phone_no = form.cleaned_data.get('phone_no')\n user.profile.save()\n messages.success(self.request, _(\"Phone No. Updated\"))\n return redirect('settings')\n\n\nclass UpdateProfilePictureView(FormView):\n template_name = 'users/update_profile_picture.html'\n form_class = EditProfilePictureForm\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"meta\"] = Meta(\n title=_(\"Update Profile Picture\"), description=_(\"Update Profile Picture\"))\n return context\n\n def form_valid(self, form): # pragma: no cover\n user = self.request.user\n user.profile.image = form.cleaned_data.get('profile_picture')\n user.profile.save()\n messages.success(self.request, _(\"Profice Picture Updated\"))\n return redirect('settings')\n\n\n@login_required\ndef delete_account(request):\n if request.method == 'POST':\n form = DeleteAccountForm(request.POST)\n\n if form.is_valid():\n if request.POST[\"delete_checkbox\"]:\n rem = User.objects.get(username=request.user)\n rem.delete()\n logout(request)\n messages.info(\n request, _(\"Your account has been deleted.\"))\n return redirect(\"index\")\n else:\n form = DeleteAccountForm()\n meta = Meta(title=_(\"Delete Account\"), description=_(\n \"Are you sure you want to delete your account?\"))\n context = {'form': form, \"meta\": meta}\n return render(request, 'users/delete_account.html', context)\n","repo_name":"1hanzla100/Django-ecommerce","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2802,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"19400208843","text":"\"\"\"Evaluate trading strategy performance over time interval.\"\"\"\nfrom typing import List, Tuple, Dict\nfrom datetime import datetime\nimport pickle\n\n\"\"\" All trading strategies should have standardized input to work in automated\n downstream testing.\"\"\"\nclass StockSimulation:\n def __init__(self, historical_data, cash=1000000, tikrs = ['aapl','msft']):\n\n \"\"\"\n Initialize a StockSimulation instance.\n\n Parameters\n ----------\n cash : float\n The initial amount of cash to invest, by default 1000000\n tikrs : List[str]\n The list of stock symbols to invest in, by default ['aapl', 'msft']\n historical_data : Dict[str, pd.DataFrame]\n A dictionary of historical stock data for each stock symbol in `tikrs`.\n Each key is a stock symbol, and each value is a pandas DataFrame containing the\n historical data.\n Returns\n -------\n None\n \"\"\"\n\n self.cash = cash\n self.tikrs = tikrs\n self.portfolio = {}\n for tikr in tikrs:\n self.portfolio[tikr] = 0\n self.active_log = []\n self.transaction_log = []\n self.transaction_cost = 0.01\n self.historical_data = historical_data\n \n def get_price(self, tikr, date):\n \"\"\"\n Get the opening price of a stock on a given date.\n\n Parameters\n ----------\n tikr : str\n The stock symbol to get the price for.\n date : str or datetime\n The date to get the price for. If str, must be in the format 'YYYYMMDD'.\n\n Returns\n -------\n float\n The opening price of the stock on the given date.\n \"\"\"\n\n start = date\n if type(start) is str:\n start = datetime.strptime(start, '%Y%m%d')\n \n date = start.strftime('%Y-%m-%d')\n\n company_df = self.historical_data[tikr]\n return company_df[company_df['Date'] > date].iloc[0]['Open']\n\n\n def buy(self, tikr, date, allocated_money):\n \"\"\"\n Buy shares of a stock.\n\n Parameters\n ----------\n tikr : str\n The stock symbol to buy.\n date : str or datetime\n The date to buy the stock. If str, must be in the format 'YYYYMMDD'.\n allocated_money : float\n The amount of money to allocate to the stock.\n\n Returns\n -------\n None\n \"\"\"\n # check if there is enough cash to buy\n if self.cash < allocated_money:\n print(\"Not enough cash to buy\")\n return\n\n # calculate number of shares to buy\n price = self.get_price(tikr, date)\n shares = allocated_money / price\n\n # update portfolio and cash balance\n if tikr in self.active_log:\n self.portfolio[tikr] += shares\n else:\n self.portfolio[tikr] = shares\n self.active_log.append(tikr)\n # minus the cost of stock\n stock_cost = shares * price\n #self.cash -= stock_cost * (1 + self.transaction_cost)\n self.cash -= stock_cost \n\n\n # log transaction\n self.transaction_log.append({\n \"type\": \"buy\",\n \"tikr\": tikr,\n \"date\": date,\n \"shares\": shares,\n \"price\": price,\n \"amount\": stock_cost ,\n \"transaction_cost\": 0,\n \"net_worth\": self.get_net_worth(date)\n })\n\n def sell(self, tikr, date, allocated_money):\n \"\"\"\n Sell shares of a stock.\n\n Parameters\n ----------\n tikr : str\n The stock symbol to sell.\n date : str or datetime\n The date to sell the stock. If str, must be in the format 'YYYYMMDD'.\n allocated_money : float\n The amount of money to allocate to the stock.\n\n Returns\n -------\n None\n \"\"\"\n # check if there are enough shares to sell\n if tikr not in self.portfolio:\n print(\"No shares of {} in portfolio\".format(tikr))\n return\n\n\n # calculate number of shares to sell\n price = self.get_price(tikr, date)\n shares = allocated_money / price\n\n\n if self.portfolio[tikr] < shares:\n print(\"Not enough shares of {} to sell\".format(tikr))\n return\n\n # update portfolio and cash balance\n self.portfolio[tikr] -= shares\n if self.portfolio[tikr] == 0:\n self.active_log.remove(tikr)\n\n # plus the earning\n stock_cost = shares * price\n self.cash += stock_cost\n # minus trasaction cost\n self.cash -= stock_cost * self.transaction_cost\n\n\n # log transaction\n self.transaction_log.append({\n \"type\": \"sell\",\n \"tikr\": tikr,\n \"date\": date,\n \"shares\": shares,\n \"price\": price,\n \"amount\": stock_cost ,\n \"transaction_cost\": stock_cost * self.transaction_cost,\n \"net_worth\": self.get_net_worth(date)\n })\n\n # #TODO\n # def get_next_trading_date(self,tikr, date):\n \n # return None\n\n def rebalance(self, percentage, date):\n \"\"\"\n Rebalance the portfolio according to a target allocation.\n\n Parameters\n ----------\n percentage : List[float]\n The target allocation percentages for each stock in the portfolio.\n date : str or datetime\n The date to rebalance the portfolio. If str, must be in the format 'YYYYMMDD'.\n\n Returns\n -------\n None\n \"\"\"\n\n buy = []\n \n # We multiply the net worth by 0.99 to account for transaction\n # costs incurred during selling. Specifically, the\n # `sell()` method has a transaction cost of 1 percent, \n # so we reduce the net worth by 1% to account for this cost.#\n true_balance = self.get_net_worth(date) * 0.99\n\n for tikr, percent in zip(self.tikrs, percentage):\n price = self.get_price(tikr, date)\n expected_value = true_balance * percent\n current_value = self.portfolio[tikr] * price\n allocated_money = expected_value - current_value\n\n if allocated_money < 0:\n self.sell(tikr, date, -1 * allocated_money)\n else:\n buy += [(tikr, allocated_money )]\n\n for tikr, allocated_money in buy:\n self.buy(tikr, date, allocated_money )\n\n\n\n\n def get_net_worth(self, date):\n \"\"\"\n Calculates the net worth of the portfolio on a given date, including\n cash and holdings of all active stocks in the portfolio.\n \n Parameters\n ----------\n date : datetime.datetime or str\n The date on which to calculate the active balance of the portfolio.\n If str, the accepted format is \"year_month_day\".\n \n Returns\n -------\n balance : float\n The net worth of the portfolio on the given date.\n \"\"\"\n balance = self.cash\n for tikr in self.active_log:\n price = self.get_price(tikr, date)\n balance += self.portfolio[tikr] * price\n return balance\n \n def print_portfolio(self, date):\n \"\"\"\n Prints the percentage of the portfolio holdings that are invested in each\n active stock in the portfolio, based on the net worth on the given date.\n \n Parameters\n ----------\n date : datetime.datetime or str\n The date on which to calculate the active balance of the portfolio.\n If str, the accepted format is \"year_month_day\".\n \n Returns\n -------\n None\n \"\"\"\n net_worth = self.get_net_worth(date)\n print(\"portfolio_allocation on\", date)\n for tikr in self.tikrs:\n price = self.get_price(tikr, date)\n tikr_holding = self.portfolio[tikr] * price\n \n print(tikr, tikr_holding/net_worth)\n print('cash', self.cash)\n print()\n \n\n def transaction_summary(self):\n \"\"\"\n Prints a summary of all transactions made in the portfolio, including\n the date, type, number of shares, stock ticker, price per share,\n transaction amount, transaction cost, and remaining balance.\n \n Returns\n -------\n None\n \"\"\"\n for txn in self.transaction_log:\n print(\"{} {} {} shares of {} at ${:.2f} for ${:.2f} (transaction cost: ${:.2f}), balance: ${:.2f}\".format(\n txn['date'], txn['type'], txn['shares'], txn['tikr'], txn['price'], txn['amount'], txn['transaction_cost'], txn['net_worth']))\n\n\"\"\"\ndef trading_strategy(\n predictions: List[tuple],\n company_list: List[str]\n ) -> Dict[datetime, List[float]]:\n #Calculate portfolio holdings at time intervals given 8-K labels.\n\n strategy = dict()\n\n return strategy\n\"\"\"\n\ndef load_historical_data(filename):\n with open(filename, 'rb') as handle:\n tikr_dict = pickle.load(handle)\n return tikr_dict\n\ndef get_strategy_annual_return(\n strategy: Dict[datetime, List[float]],\n company_list: List[str],\n end_date: datetime,\n starting_balance: int = 1e7,\n start_date: datetime = '20000101' ,\n silence = True) -> float:\n \"\"\"\n Calculate annual return over time period.\n \n Parameters\n ----------\n strategy: Dict[datetime, List[float]]\n A dictionary of portfolio rebalance dates associated with portfolio\n allocation percentages. The portfolio allocation percentages share the\n same indexing as company_list.\n company_list: List[str]\n The companies invested into by the trading strategy percent allocation.\n start_date: datetime, str\n Accepted format \"year_month_day\", or datetime object. The first day of\n trading.\n end_date: datetime, str\n Accepted format \"year_month_day\", or datetime object. The final day of\n trading used to determine net worth and annualized return.\n starting_balance: int\n The initial amount of money invested.\n\n Returns\n -------\n Annualized Return: float\n The annualized return rate, where 0% return indicates 1.00.\n \"\"\"\n TIKRS_dat = load_historical_data('TIKR_DATA.pickle')\n\n if type(start_date) is str:\n start_date = datetime.strptime(start_date, \"%Y%m%d\")\n if type(end_date) is str:\n end_date = datetime.strptime(end_date, \"%Y%m%d\")\n\n s = StockSimulation(TIKRS_dat, cash = starting_balance, tikrs = company_list)\n for date, portfolio_allocation in strategy.items():\n # At each date, rebalance current networth to be distributed\n # percentage-wise between companies in portfolio_allocations\n s.rebalance(percentage= portfolio_allocation, date = date)\n if not silence:\n s.print_portfolio(date)\n if not silence:\n s.transaction_summary()\n\n # Calculate the number of days between the start and end dates\n delta_days = (end_date - start_date).days\n\n # Calculate the fractional number of years using the total number of days\n n = delta_days / 365.25 # assuming a leap year every 4 years \n\n\n return (s.get_net_worth(end_date)/starting_balance) \\\n ** ( 1 / n) - 1\n","repo_name":"kamilkrukowski/sec-sentiment-pred","sub_path":"portfolio_eval.py","file_name":"portfolio_eval.py","file_ext":"py","file_size_in_byte":11360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"40953583674","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 20 12:04:57 2018\n\n@author: Steve\n\"\"\"\n\n# This script visualizes binding curves\n'''--------------Import Libraries--------------------'''\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom matplotlib.pyplot import *\nimport scipy.cluster.hierarchy as sch\nimport sklearn.decomposition as skd\nfrom scipy.cluster.hierarchy import cophenet\nfrom scipy.spatial.distance import pdist\nfrom scipy.cluster.hierarchy import fcluster\nfrom sklearn.neighbors import NearestNeighbors\n#%% TAKES A LONG TIME SO ONLY RUN ONCE!!!!!!!!!\n\n\ndata_path = '/Users/Steve/Desktop/Data_analysis_code/Data/'\n\n#binding data contains the averaged data for all clusters for each variant\n#This data apparently still has not been averaged across datasets.\n#So, for GAAA at 30 mM Mg, there are 2 datasets, and this suggests that the \n#data that I have been using for the rest of the analysis consists of the \n#average between two of the binding data files. \nbinding_data = pd.read_csv(data_path + 'Mut2_GAAA_1.PerVariant.CPseries',\n delim_whitespace = True)\nbinding_data_GUAA = pd.read_csv(data_path + 'Mut2_GUAA_1.PerVariant.CPseries',\n delim_whitespace = True)\n\nbinding_data = binding_data.set_index('variant_number')\nbinding_data_GUAA = binding_data_GUAA.set_index('variant_number')\n\n#%%\n#the fitted data file contains the fitted parameters for each cluster. \nfitted_data_GAAA1 = pd.read_csv(data_path + 'Mut2_GAAA_1.CPfitted',\n delim_whitespace = True) \n\nfitted_data_GAAA2 = pd.read_csv(data_path + 'Mut2_GAAA_2.CPfitted',\n delim_whitespace = True) \n#%% Contains the parameters that went into the spreadsheets that I have been using\n#for the rest of the analysis\nerror_scaled_GUAA = pd.read_csv(data_path + 'Mut2_GUAA_1.error_scaled.CPvariant',\n delim_whitespace = True)\n\nerror_scaled_GAAA1 = pd.read_csv(data_path + 'Mut2_GAAA_1.error_scaled.CPvariant',\n delim_whitespace = True)\n\nerror_scaled_GAAA2 = pd.read_csv(data_path + 'Mut2_GAAA_2.error_scaled.CPvariant',\n delim_whitespace = True)\n\n\n\n\n#%% get data from spreadsheet--> this is the summarized data\nlib_data = pd.read_csv(data_path + 'tectorna_results_tertcontacts.180122.csv')\nlib_data['new_name'] = lib_data.r_seq + lib_data.r_name\nprint(lib_data.shape)\nlib_data = lib_data.drop_duplicates(subset='seq')\nprint(lib_data.shape)\nlib_data = lib_data.set_index('variant_number')\nWT_data = lib_data[lib_data.r_seq == 'UAUGG_CCUAAG']\n#%%\nall_data = pd.read_csv(data_path + 'tectorna_results_tertcontacts.180122.csv')\nall_data = all_data.drop_duplicates(subset='seq')\nall_11ntR = pd.read_csv(data_path + 'all_11ntRs_unique.csv' )\nmask = ((all_11ntR.b_name == 'normal') & (all_11ntR.no_mutations == 1)) | ((all_11ntR.b_name == 'normal') & (all_11ntR.no_mutations == 0))\nsingle_11ntR_mutants = all_11ntR[mask].copy()\nsingle_11ntR_mutants['new_name'] = single_11ntR_mutants['r_name'] + '_' + single_11ntR_mutants['r_seq']\nunique_receptors = list(set(single_11ntR_mutants['r_seq']))\n\n#%%get only the data that made it to the summary table after filtering\nA = all_data.copy()\nA = A.set_index('variant_number')\ndG_30mM = A['dG_Mut2_GAAA']\ndG_30mM = dG_30mM.dropna()\n\nfiltered_GAAA_1 = error_scaled_GAAA1.loc[dG_30mM.index]\nfiltered_GAAA_2 = error_scaled_GAAA2.loc[dG_30mM.index]\nplt.scatter(filtered_GAAA_1['dG'],filtered_GAAA_2['dG'])\nplt.xlim(-14,-6)\nplt.ylim(-14,-6)\ndifferences = filtered_GAAA_1['dG'] - filtered_GAAA_2['dG']\ndifferences = differences.apply('abs') \n\ndG_30mM = A['dG_Mut2_GAAA'].copy()\ndG_30mM[dG_30mM > 7.1] = np.nan\ndG_30mM = dG_30mM.dropna()\n\nfiltered_GAAA_1 = error_scaled_GAAA1.loc[dG_30mM.index]\nfiltered_GAAA_2 = error_scaled_GAAA2.loc[dG_30mM.index]\ndifferences = filtered_GAAA_1['dG'] - filtered_GAAA_2['dG']\ndifferences = differences.apply('abs') \nfraction_below_0p5 = len(differences[differences > 0.5])/len(differences)\nprint('fraction below 0.5 kcal/mol: ' + str(1 - fraction_below_0p5))\n\n\nfraction_below_1 = len(differences[differences > 1])/len(differences)\nprint('fraction below 1 kcal/mol: ' + str(1 - fraction_below_1))\n#%%\n\n\n\n\n\n\n#%%\n#this table tracks each variant to all the cluster IDs correspondinf to that variant.\ntable_variants = pd.read_csv(data_path + 'tecto_undetermined.CPannot.CPannot',\n delim_whitespace = True)\ngroup_cluster_variants = table_variants.groupby(by='variant_number')\n#%%\nfitted_data_GAAA1 = fitted_data_GAAA1.set_index('clusterID')\nfitted_data_GAAA2 = fitted_data_GAAA2.set_index('clusterID')\n\n#%% I was doing this to see which data were missing\n#after discussing with Sarah, we determined that the data that was missing\n#belongs to variants that did not have enough clusters to pass the threshold\n#of 5. \nnan_GUAA = all_data[all_data['dG_Mut2_GUAA_1'].isna()]\nprint('number of missing data with GUAA: ' + str(len(nan_GUAA)))\nvariants_nan_GUAA = list(nan_GUAA['variant_number'])\noriginal_data_GUAA = pd.DataFrame(index = variants_nan_GUAA)\ndG_list = []\nub_list = []\nlb_list = []\ncluster_list = []\nfor variants in variants_nan_GUAA:\n err_scaled_GUAA_variant = error_scaled_GUAA.loc[variants]['dG']\n up_bound = error_scaled_GUAA.loc[variants]['dG_ub']\n low_bound = error_scaled_GUAA.loc[variants]['dG_lb']\n dG_list.append(err_scaled_GUAA_variant)\n ub_list.append(up_bound)\n lb_list.append(low_bound)\n cluster_list.append(error_scaled_GUAA.loc[variants]['numClusters'])\noriginal_data_GUAA['dG'] = dG_list\noriginal_data_GUAA['dG_lb'] = lb_list\noriginal_data_GUAA['dG_ub'] = ub_list\noriginal_data_GUAA['numClusters'] = cluster_list\n\nplt.hist(original_data_GUAA.numClusters.dropna(),bins=range(0,50))\n#%%This code below I wrote to figure out that there was a correspondence between\n#the values in the summary spreadsheet and the values in the error scaled files.\ntemp_WT = all_data[all_data.r_seq == 'UAUGG_CCUAAG']\ntemp_WT = temp_WT.set_index('variant_number')\nidx = 2\nprint('test variant number :' + str(temp_WT.index[idx]))\ndG_GUAA = temp_WT.loc[temp_WT.index[idx]]['dG_Mut2_GUAA_1']\nprint(dG_GUAA)\n\n#the variant # agrees in both the cvs spreadsheet and the error scaled file\nerr_scaled_GUAA_variant = error_scaled_GUAA.loc[temp_WT.index[idx]]['dG']\nprint('entered in cvs table is :' + str(err_scaled_GUAA_variant))\n#%%\n#for variant # (240 in this case )\n#find all clusters associated with that variant and plot the median dG\nvariant = 44500\nclusters_variant = group_cluster_variants.get_group(variant)\n\n\ndata_variant = fitted_data_GAAA1.loc[list(clusters_variant['clusterID'])]\nprint('the median from individual clusters is ' + str(data_variant['dG'].median()))\n\ndata_variant2 = fitted_data_GAAA2.loc[list(clusters_variant['clusterID'])]\nprint('the median from individual clusters is ' + str(data_variant2['dG'].median()))\n\n# this number should roughly agree with that in the spreadshet\ndG_variant = all_data[all_data['variant_number'] == variant]['dG_Mut2_GAAA']\nprint('the value reported in the spreadsheet is ' + str(dG_variant))\n\n\n#%%\n\n\n\n\n\n#%%\ntemp = WT_data[WT_data.old_idx == '240']\n#%%\n#data point for WT at high affinity ~12 kcal/mol\nWT_data = WT_data[WT_data.b_name == 'normal']\nall_WT_variants = list(WT_data.index)\n#%%\nfor variant in all_WT_variants[20:30]:\n fluorescence = binding_data.loc[variant]\n concentration = pd.Series([0.91e-9,2.74e-9,8.23e-9,24.7e-9,74.1e-9,222e-9,667e-9,2e-6])\n plt.figure()\n ax = plt.gca()\n ax.scatter(concentration,fluorescence)\n ax.set_xlim(0.1e-9,2000e-9)\n ax.set_xscale('log')\n#%%\nvariant = 12563\nfluorescence = binding_data.loc[variant]\nconcentration = pd.Series([0.91e-9,2.74e-9,8.23e-9,24.7e-9,74.1e-9,222e-9,667e-9,2e-6])\nplt.figure()\nax = plt.gca()\nax.scatter(concentration,fluorescence)\nax.set_xlim(0.1e-9,2000e-9)\nax.set_xscale('log')\nax.set_title('GAAA') \n#%% \n \nfluorescence = binding_data_GUAA.loc[variant]\nconcentration = pd.Series([0.91e-9,2.74e-9,8.23e-9,24.7e-9,74.1e-9,222e-9,667e-9,2e-6])\nplt.figure()\nax = plt.gca()\nax.scatter(concentration,fluorescence)\nax.set_xlim(0.1e-9,2000e-9)\nax.set_xscale('log')\n#%%\nreceptor = 'CAUGG_CCUAAG'\nreceptor_data = single_11ntR_mutants[single_11ntR_mutants.r_seq == receptor].copy()\nvariants = list(receptor_data.variant_number)\ndG_GUAA = list(receptor_data.dG_Mut2_GUAA_1)\n\ncounter = -1\nfor variant in variants:\n counter += 1\n fluorescence = binding_data_GUAA.loc[variant]\n concentration = pd.Series([0.91e-9,2.74e-9,8.23e-9,24.7e-9,74.1e-9,222e-9,667e-9,2e-6])\n plt.figure()\n ax = plt.gca()\n ax.scatter(concentration,fluorescence)\n ax.set_xlim(0.1e-9,2000e-9)\n ax.set_xscale('log')\n ax.set_title(str(dG_GUAA[counter]))\n \n \n#%%\nnan_GUAA = all_data[all_data['dG_Mut2_GUAA_1'].isna()]\nprint ('missing values with GUAA: ' + str(len(nan_GUAA)))\nnan_GUAA_variants = list(nan_GUAA.variant_number)\nnan_GUAA_dG = list(nan_GUAA.dG_Mut2_GUAA_1)\ncounter = -1\nfor variant in nan_GUAA_variants[10:20]:\n counter += 1\n fluorescence = binding_data_GUAA.loc[variant]\n concentration = pd.Series([0.91e-9,2.74e-9,8.23e-9,24.7e-9,74.1e-9,222e-9,667e-9,2e-6])\n plt.figure()\n ax = plt.gca()\n ax.scatter(concentration,fluorescence)\n ax.set_xlim(0.1e-9,2000e-9)\n ax.set_xscale('log')\n ax.set_title('variant number' + str (variant))\n#%%\nnan_GAAA = all_data[all_data['dG_Mut2_GAAA'].isna()]\nprint ('missing values with GAAA: ' + str(len(nan_GAAA)))\nnan_GAAA_variants = list(nan_GAAA.variant_number)\nnan_GAAA_dG = list(nan_GAAA.dG_Mut2_GAAA)\ncounter = -1\nfor variant in nan_GAAA_variants[20:30]:\n counter += 1\n fluorescence = binding_data.loc[variant]\n concentration = pd.Series([0.91e-9,2.74e-9,8.23e-9,24.7e-9,74.1e-9,222e-9,667e-9,2e-6])\n plt.figure()\n ax = plt.gca()\n ax.scatter(concentration,fluorescence)\n ax.set_xlim(0.1e-9,2000e-9)\n ax.set_xscale('log')\n ax.set_title('variant number: ' + str(variant))\n#%%\n \nx = all_data[all_data.variant_number == 11332]\n","repo_name":"bonils/TL_TLR_CHIP_Data","sub_path":"binding_curve_analysis.py","file_name":"binding_curve_analysis.py","file_ext":"py","file_size_in_byte":10096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"462444831","text":"import os\nfrom tkinter import *\nfrom tkinter import filedialog\nfrom tkinter import messagebox\nfrom tkinter.ttk import Progressbar, Style\nfrom PIL import Image\n\n\ndef select_images():\n global selected_images\n selected_images = filedialog.askopenfilenames(\n filetypes=((\"JPEG files\", \"*.jpg;*.jpeg\"),\n (\"PNG files\", \"*.png\"), (\"All files\", \"*.*\")))\n image_listbox.delete(0, END)\n for filename in selected_images:\n image_listbox.insert(END, filename)\n if selected_images:\n convert_button['state'] = 'normal'\n else:\n convert_button['state'] = 'disabled'\n download_button['state'] = 'disabled'\n\n\ndef convert_to_webp(filename):\n img = Image.open(filename)\n webp_filename = os.path.splitext(filename)[0] + \".webp\"\n img.save(webp_filename, \"WebP\", lossless=True)\n\n\ndef convert_images():\n global converted_images\n converted_images = []\n progress_bar[\"maximum\"] = len(selected_images)\n for i, filename in enumerate(selected_images, start=1):\n convert_to_webp(filename)\n progress_bar[\"value\"] = i\n root.update_idletasks()\n converted_images.append(os.path.splitext(filename)[0] + \".webp\")\n messagebox.showinfo(\"Success\", \"Conversion complete\")\n download_button['state'] = 'normal'\n\n\ndef download_images():\n filetypes = ((\"WebP files\", \"*.webp\"), (\"All files\", \"*.*\"))\n filename = filedialog.asksaveasfilename(\n title=\"Save Images\", filetypes=filetypes)\n if filename:\n for image in converted_images:\n webp_filename = os.path.join(\".\", image)\n with open(webp_filename, \"rb\") as f:\n with open(filename, \"ab\") as out_file:\n out_file.write(f.read())\n\n\nselected_images = []\nconverted_images = []\n\nroot = Tk()\nroot.title(\"Image Converter\")\nroot.geometry(\"400x400\")\n\nstyle = Style()\nstyle.configure(\"TButton\", font=(\"Helvetica\", 12), padding=10)\n\nselect_button = Button(root, text=\"Select Images\", command=select_images)\nselect_button.pack(padx=10, pady=10)\n\nimage_listbox = Listbox(root, selectmode=MULTIPLE, height=10)\nimage_listbox.pack(padx=10, pady=10)\n\nconvert_button = Button(root, text=\"Convert to WebP\",\n command=convert_images, state='disabled')\nconvert_button.pack(padx=10, pady=10)\n\nprogress_bar = Progressbar(root, orient=HORIZONTAL,\n length=300, mode='determinate')\nprogress_bar.pack(padx=10, pady=10)\n\ndownload_button = Button(root, text=\"Download WebP Images\",\n command=download_images, state='disabled')\ndownload_button.pack(padx=10, pady=10)\n\nroot.mainloop()\n","repo_name":"ryanbakkerNZ/webp-convert","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"17325140223","text":"#!/usr/bin/env python\nfrom functools import reduce\n\n\ndef gcd(a, b):\n \"\"\"最大公約数\n \"\"\"\n while b:\n a, b = b, a % b\n return a\n\n\ndef lcm(a, b):\n \"\"\"最小公倍数\n \"\"\"\n return a * b // gcd(a, b)\n\n\nif __name__ == \"__main__\":\n # 複数あるとき\n X = [4, 6, 8, 40, 128]\n print(\"X\", X)\n print(\"gcd\", reduce(gcd, X))\n print(\"lcm\", reduce(lcm, X))\n","repo_name":"HiroshiOkada/a-small-collection-of-algorithms","sub_path":"gcd_lcm.py","file_name":"gcd_lcm.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"12370402252","text":"import displayio\nimport time\n#import board\n#import digitalio\nimport terminalio\nfrom adafruit_display_text.label import Label\nfrom adafruit_gizmo import tft_gizmo\n#from adafruit_circuitplayground import cp\n\ndisplay = tft_gizmo.TFT_Gizmo()\n\ndef imagedisplay(word):\n #display_group = displayio.Group()\n # Display graphic from the root directory of the CIRCUITPY drive\n filename = \"/\" + word + \".bmp\"\n file = open(filename, \"rb\")\n picture = displayio.OnDiskBitmap(file)\n # Create a Tilegrid with the bitmap and put in the displayio group\n sprite = displayio.TileGrid(picture, pixel_shader=displayio.ColorConverter())\n group.append(sprite)\n #display.refresh(target_frames_per_second=60)\n # Place the display group on the screen\n #display.show(group)\n #display.refresh()\n\ndef message(text, axisX, axisY, color):\n text_group = displayio.Group(max_size=1, scale=2)\n label = Label(terminalio.FONT, text=text)\n if color == \"white\":\n color = 0xFFFFFF\n if color == \"red\":\n color = 0xFF0000\n if color == \"blue\": \n color = 0x0000FF\n if color == \"green\":\n color = 0x00CC00\n label.color = color\n #if bgcolor == \"white\":\n # bgcolor = 0xFFFFFF\n #if bgcolor == \"red\":\n # bgcolor = 0xFF0000\n #if bgcolor == \"blue\": \n # bgcolor = 0x0000FF\n #if bgcolor == \"green\":\n # bgcolor = 0x00FF00\n #label.background_color = bgcolor\n (x, y, w, h) = label.bounding_box\n #label.x = (80 - w // 2)\n label.x = axisX\n #label.y = (64 - h // 2)\n label.y = axisY\n text_group.append(label)\n #group.append(label)\n group.append(text_group)\n\ndef splashScreen(color):\n splashBitmap = displayio.Bitmap(20, 20, 1)\n splashPalette = displayio.Palette(1)\n if color == \"white\":\n color = 0xFFFFFF\n if color == \"red\":\n color = 0xFF0000\n if color == \"blue\": \n color = 0x0000FF\n if color == \"green\":\n color = 0x00FF00\n splashPalette[0] = color\n splashSprite = displayio.TileGrid(splashBitmap, pixel_shader=splashPalette, x=0, y=0)\n group.append(splashSprite)\n\ngroup = displayio.Group(max_size=3)\ndisplay.show(group)\nimagedisplay(\"white\")\ntime.sleep(4)\ndef displayloop():\n imagedisplay(\"swimbikeruntop\")\n #time.sleep(2)\n #imagedisplay(\"bikerunswimside\")\n #time.sleep(2)\n #message(\"Total Activities: %s\" % summaryList[\"Total Activities\"], 30, 40)\n message(\"7 Day Totals \\n Activities: %s \\n Time: %s\" %(summaryList[\"Total Activities\"], summaryList[\"Total Time\"]), 1, 70, \"green\")\n time.sleep(8)\n #group.pop()\n group.pop()\n group.pop()\n time.sleep(2)\n #for i in range(100):\n # display.brightness = 0.01 * i\n # time.sleep(0.05)\n imagedisplay(\"bike\")\n message(\"Rides: %s \\n Distance: %s \\n Avg Watts: %s\" %(summaryList[\"Total Rides\"], summaryList[\"Total Ride Miles\"], summaryList[\"Average Watts\"]), 1, 90, \"blue\")\n time.sleep(8)\n group.pop()\n group.pop()\n time.sleep(2)\n imagedisplay(\"run\")\n message(\"Runs: %s \\n Distance: %s \\n Avg Pace: %s\" %(summaryList[\"Total Runs\"], summaryList[\"Total Run Miles\"], summaryList[\"Average Run Pace\"]), 1, 90, \"red\")\n time.sleep(8)\n group.pop()\n group.pop()\n time.sleep(2)\n\nwith open('/stats.json') as filehandle:\n filecontents = filehandle.readlines()\nfilehandle.close()\n\nsummaryList = {}\nfor line in filecontents:\n # remove linebreak which is the last character of the string\n if len((line[:-1]).split(\":\")) == 2:\n key = (line[:-1]).split(\":\")[0]\n value = line[:-1].split(\":\")[1]\n summaryList[key] = value\n else:\n key = (line[:-1]).split(\":\")[0]\n value = line[:-1].split(\":\")[1] + \":\" + line[:-1].split(\":\")[2]\n summaryList[key] = value\n\nwhile True:\n displayloop()\n #pass","repo_name":"jchand7751/maker-public","sub_path":"sportdisplay_small/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":3793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"4458585180","text":"\"\"\"\nDate:17/06/2021\nThe following program is for conversion from decimal to given base and vice versa\n\"\"\"\n\n#conversion of num from Base:10 to Base:base \ndef dec_to_base(num,base):\n ans=0\n i=0\n while(num):\n rem=num%base\n ans+=pow(10,i)*rem\n num//=base\n i+=1\n return ans\n\n#conversion of number from Base:base to base:10\ndef base_to_decimal(num,base):\n ans=0\n i=0\n while(num):\n #ans*=10\n rem=num%10\n ans+=pow(base,i)*rem\n num//=10\n i+=1\n\n return ans\n \nprint(base_to_decimal(dec_to_base(1000,4),4))","repo_name":"SandeepPadhi/Algorithmic_Database","sub_path":"Math/Base_Conversion.py","file_name":"Base_Conversion.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"60"} +{"seq_id":"26832334528","text":"import torch\nimport torch.multiprocessing as mp\n\nfrom dp.preprocess import preprocess\nfrom dp.train import train\n\nif __name__ == '__main__':\n\n train_data = [('en_us', 'young', 'jʌŋ'),\n ('de', 'benützten', 'bənʏt͡stn̩'),\n ('de', 'gewürz', 'ɡəvʏʁt͡s')] * 1000\n\n val_data = [('en_us', 'young', 'jʌŋ'),\n ('de', 'benützten', 'bənʏt͡stn̩')] * 100\n\n config_file = 'dp/configs/forward_config.yaml'\n\n preprocess(config_file=config_file,\n train_data=train_data,\n val_data=val_data,\n deduplicate_train_data=False)\n\n num_gpus = torch.cuda.device_count()\n\n if num_gpus > 1:\n mp.spawn(train, nprocs=num_gpus, args=(num_gpus, config_file))\n else:\n train(rank=0, num_gpus=num_gpus, config_file=config_file)","repo_name":"yuhangear/DeepPhonemizer","sub_path":"run_training.py","file_name":"run_training.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"60"} +{"seq_id":"6630983353","text":"import unittest\nfrom unittest.mock import MagicMock, call\n\nfrom poker.card import Card\nfrom poker.game import Game\n\n\nclass GameTest(unittest.TestCase):\n def setUp(self):\n self.first_two_cards = [\n Card(rank=\"3\", suit=\"Hearts\"),\n Card(rank=\"9\", suit=\"Diamonds\"),\n ]\n self.second_two_cards = [\n Card(rank=\"Jack\", suit=\"Clubs\"),\n Card(rank=\"10\", suit=\"Clubs\"),\n ]\n self.flop_cards = [\n Card(rank=\"2\", suit=\"Diamonds\"),\n Card(rank=\"4\", suit=\"Hearts\"),\n Card(rank=\"10\", suit=\"Spades\"),\n ]\n self.turn_card = [Card(rank=\"Ace\", suit=\"Clubs\")]\n self.river_card = [Card(rank=\"10\", suit=\"Hearts\")]\n\n def test_stores_deck_and_players(self):\n deck = MagicMock()\n players = [MagicMock(), MagicMock()]\n\n game = Game(deck=deck, players=players)\n\n self.assertEqual(game.deck, deck)\n\n self.assertEqual(game.players, players)\n\n def test_game_play_shuffles_deck(self):\n mock_deck = MagicMock()\n\n players = [MagicMock(), MagicMock()]\n\n game = Game(deck=mock_deck, players=players)\n\n game.play()\n mock_deck.shuffle.assert_called_once()\n\n def test_deals_two_initial_cards_from_deck_to_each_player(self):\n mock_deck = MagicMock()\n mock_deck.remove_cards.side_effect = [\n self.first_two_cards,\n self.second_two_cards,\n self.flop_cards,\n self.turn_card,\n self.river_card,\n ]\n\n mock_player1 = MagicMock()\n mock_player2 = MagicMock()\n players = [mock_player1, mock_player2]\n\n game = Game(deck=mock_deck, players=players)\n\n game.play()\n\n # Test that remove_cards has been called twice\n # Test that remove_cards was called with the argument 2 each time\n mock_deck.remove_cards.assert_has_calls([call(2), call(2)])\n\n # Test that, at some point in the execution,\n # add_cards was called with the first two cards drawn from the deck\n mock_player1.add_cards.assert_has_calls([call(self.first_two_cards)])\n mock_player2.add_cards.assert_has_calls([call(self.second_two_cards)])\n\n def test_removes_player_if_not_willing_to_make_bet(self):\n mock_deck = MagicMock()\n mock_player1 = MagicMock()\n mock_player1.wants_to_fold.return_value = True\n mock_player2 = MagicMock()\n mock_player2.wants_to_fold.return_value = False\n\n players = [mock_player1, mock_player2]\n\n game = Game(deck=mock_deck, players=players)\n game.play()\n\n self.assertEqual(game.players, [mock_player2])\n\n def test_deals_each_player_3_flop_1_turn_and_1_river_card(self):\n mock_player1 = MagicMock()\n mock_player1.wants_to_fold.return_value = False\n mock_player2 = MagicMock()\n mock_player2.wants_to_fold.return_value = False\n players = [mock_player1, mock_player2]\n\n mock_deck = MagicMock()\n mock_deck.remove_cards.side_effect = [\n self.first_two_cards,\n self.second_two_cards,\n self.flop_cards,\n self.turn_card,\n self.river_card,\n ]\n\n game = Game(deck=mock_deck, players=players)\n game.play()\n # Test that we made a call to the Deck with the remove_cards method\n # with an argument of 3\n mock_deck.remove_cards.assert_has_calls([call(3), call(1), call(1)])\n # Test that we've called the 'add_cards' method on each player\n # with all three community calls\n calls = [call(self.flop_cards), call(self.turn_card), call(self.river_card)]\n\n for player in players:\n player.add_cards.assert_has_calls(calls)\n","repo_name":"nualagr/poker","sub_path":"tests/test_game.py","file_name":"test_game.py","file_ext":"py","file_size_in_byte":3757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"72269503232","text":"# Question 5 (Special Diet for weight loss)\n# Your friend told you about a special diet for losing weight that you seem interested about.\n# The special diet allows you to consistently lose some weight every month.\n# Write a program that allows you to enter you current weight(in pounds) and the weight (in pounds)\n# that will be lost every month. Your program should display what your new weight will be at the\n# end of every month until you have lost at most half of your weight.\n# Your program should also display a message that says\n# \"In n months of joining the weight loss program, you would have lost half of your weight.\",\n# where n is the months it takes to be at most half the size of your weight\n\n# Get the initial weight as input\nweight = int(input(\"Enter your current weight: \"))\npound_lost = int(input(\"Enter the pounds you will lose every month: \"))\ncount = 0\n\n# print the heading\nprint('Weight','\\t','Projected Weight')\n\nDOUBLE_WEIGHT = weight*0.5\nwhile weight > DOUBLE_WEIGHT:\n count += 1\n weight-=pound_lost\n print(count,'\\t', weight)\n\n# Print summary\nprint(\"In\", count,\"months of joining the weight loss program you would have lost half of your current weight\")\n\n \n","repo_name":"gracomot/Basic-Python-For-College-Students","sub_path":"ProgrammingExercises/Lesson 4/question5.py","file_name":"question5.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"32910830845","text":"import array\nfrom nltk.stem.snowball import SnowballStemmer\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nimport string, re\n\n\nclass CompressedPostings:\n @staticmethod\n def VBEncodeNum(n):\n byte = []\n while True:\n byte.append(n % 128)\n if n < 128:\n break\n n //= 128\n byte[0] += 128\n return byte[::-1]\n\n @staticmethod\n def VBEncode(n_list):\n b = []\n for n in n_list:\n b.extend(CompressedPostings.VBEncodeNum(n))\n return b\n\n @staticmethod\n def VBDecode(bs):\n n_list = []\n n = 0\n for b in bs:\n if b < 128:\n n = 128*n + b\n else:\n n = 128*n + b - 128\n n_list.append(n)\n n = 0\n return n_list\n \n\n @staticmethod\n def encode(postings_list):\n \"\"\"Encodes `postings_list` using gap encoding with variable byte \n encoding for each gap\n \n Args\n \n postings_list: List[int]\n The postings list to be encoded\n \n Returns\n \n bytes: \n Bytes reprsentation of the compressed postings list \n (as produced by `array.tobytes` function)\n \"\"\"\n p = postings_list.copy()\n for i in range(1, len(p))[::-1]:\n p[i] -= p[i-1]\n vb = CompressedPostings.VBEncode(p)\n return array.array('B', vb).tobytes()\n \n\n @staticmethod\n def decode(encoded_postings_list):\n \"\"\"Decodes a byte representation of compressed postings list\n \n Args\n \n encoded_postings_list: bytes\n Bytes representation as produced by `CompressedPostings.encode` \n \n Returns\n \n List[int]\n Decoded postings list (each posting is a docIds)\n \"\"\"\n vb = array.array('B')\n vb.frombytes(encoded_postings_list)\n postings_list = CompressedPostings.VBDecode(vb.tolist())\n for i in range(1, len(postings_list)):\n postings_list[i] += postings_list[i-1]\n return postings_list\n\n\ndef sorted_intersect(list1, list2):\n \"\"\"Intersects two (ascending) sorted lists and returns the sorted result\n \n Args\n \n list1: List[Comparable]\n list2: List[Comparable]\n Sorted lists to be intersected\n \n Returns\n \n List[Comparable]\n Sorted intersection \n \"\"\"\n idx1 = idx2 = 0\n intersect = []\n while idx1 < len(list1) and idx2 < len(list2):\n if list1[idx1] < list2[idx2]:\n idx1 += 1\n elif list2[idx2] < list1[idx1]:\n idx2 += 1\n else:\n intersect.append(list1[idx1])\n idx1 += 1\n idx2 += 1\n return intersect\n\n\ndef tokenize_text(text):\n stemmer = SnowballStemmer(language='english')\n stop_words = set(stopwords.words('english'))\n tokens = [token for token in word_tokenize(text) if token not in stop_words and token.isalnum()]\n return [stemmer.stem(token) for token in tokens]\n\n\ndef clean_text(text):\n if type(text) != str or text == \"[removed]\" or text == 'nan':\n return \"\"\n regex = re.compile('[%s]' % re.escape(string.punctuation))\n return regex.sub(\"\", text)","repo_name":"mysterious-progression/reddit_IR_project","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"32644929563","text":"# encoding: utf-8\n\nk_bootstrap_code = 'bootstrap_code'\nk_MBR = 'MBR'\nk_Partition = 'Partition'\nk_PartitionEntry = 'PartitionEntry'\nk_PartitionEntries = 'PartitionEntries'\nk_status = 'status'\n\nk_starting_chs_address = 'starting_chs_address'\nk_ignored = '__ignored__'\nk_partition_type = 'partition_type'\nk_ending_chs_address = 'ending_chs_address'\nk_first_sector_address = 'first_sector_address'\nk_first_byte_address = 'first_byte_address'\nk_number_of_sectors = 'number_of_sectors'\nk_size = 'size'\n\nk_boot_signature = 'boot_signature'\n\nk_FAT32BootSector = 'FAT32BootSector'\nk_jump_instruction = 'jump_instruction'\nk_OEM_name = 'OEM_name'\nk_bytes_per_sector = 'bytes_per_sector'\nk_sectors_per_cluster = 'sectors_per_cluster'\nk_number_of_reserved_sectors = 'number_of_reserved_sectors'\nk_number_of_FATs = 'number_of_FATs'\nk_media_descriptor = 'media_descriptor'\nk_sectors_per_track = 'sectors_per_track'\nk_number_of_heads = 'number_of_heads'\nk_number_of_hidden_sectors = 'number_of_hidden_sectors'\nk_sectors_per_FAT = 'sectors_per_FAT'\nk_drive_description = 'drive_description'\nk_version = 'version'\nk_cluster_number_of_root_directory_start =\\\n 'cluster_number_of_root_directory_start'\nk_sector_number_of_FS_info_sector = 'sector_number_of_FS_info_sector'\nk_sector_number_of_boot_sectors_backup = 'sector_number_of_boot_sectors_backup'\nk_drive_number = 'drive_number'\nk_extended_boot_signature = 'extended_boot_signature'\nk_volume_id = 'volume_id'\nk_volume_label = 'volume_label'\nk_filesystem_type = 'filesystem_type'\n\nk_FAT32FileAllocationTable = 'FAT32FileAllocationTable'\n\nk_Drive = 'Drive'\n\nk_FAT32 = 'FAT32'\nk_NTFS = 'NTFS'\nk_ExtendedPartition = 'ExtendedPartition'\n\nk_reserved = 'reserved'\n\nk_FAT32DirectoryTableEntry = 'FAT32DirectoryTableEntry'\nk_short_file_name = 'short_file_name'\nk_short_extension = 'short_extension'\nk_attribute = 'attribute'\nk_create_time_10ms = 'create_time_10ms'\nk_create_time = 'create_time'\nk_create_date = 'create_date'\nk_access_date = 'access_date'\nk_higher_cluster = 'higher_cluster'\nk_modify_time = 'modify_time'\nk_modify_date = 'modify_date'\nk_lower_cluster = 'lower_cluster'\nk_file_length = 'file_length'\n\nk_FAT32LongFilenameEntry = 'FAT32LongFilenameEntry'\nk_sequence_number = 'sequence_number'\nk_name_1 = 'name_1'\nk_type = 'type'\nk_checksum = 'checksum'\nk_name_2 = 'name_2'\nk_first_cluster = 'first_cluster'\nk_name_3 = 'name_3'\n\nk_filename = 'filename'\nk_full_path = 'full_path'\nk_path = 'path'\nk_extension = 'k_extension'\nk_cluster_list = 'cluster_list'\n","repo_name":"palewithout/createfile","sub_path":"drive/keys.py","file_name":"keys.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"34778166973","text":"import time\nimport ray\nimport ray.rllib.agents.ppo as ppo\nfrom ray.tune.logger import pretty_print\nfrom env import CustomSkipFrame, KukaCamReachEnv\nfrom ray import tune\nfrom ray.tune import grid_search\nfrom ray.rllib.env.env_context import EnvContext\nfrom ray.tune.registry import register_env, register_trainable\nfrom ray.rllib.agents.ppo import PPOTrainer\nfrom ray.rllib.agents.impala import ImpalaTrainer\n\nif __name__=='__main__':\n \n ray.shutdown()\n ray.init(ignore_reinit_error=True)\n\n # env_config={\n # \"is_render\":False,\n # \"is_good_view\":False,\n # \"max_steps_one_episode\":1000,\n # }\n # env=KukaCamReachEnv(env_config)\n # env=CustomSkipFrame(env)\n \n register_env(\"kuka_env\",lambda config: CustomSkipFrame(KukaCamReachEnv(config)))\n #register_env(\"kuka_env\",lambda config: KukaCamReachEnv(config))\n \n \n config = {\n \"env\": \"kuka_env\",\n \"model\":{\n \"conv_filters\":[[32,[3,3],2],[32,[3,3],2],[32,[3,3],2],[32,[3,3],2],[1152,[6,6],1]],\n # \"conv_filters\":\"relu\",\n \"post_fcnet_hiddens\":[512,251],\n \"post_fcnet_activation\":\"relu\",\n },\n \"env_config\":{\n \"is_render\":False,\n \"is_good_view\":False,\n \"max_steps_one_episode\":1000,\n },\n \"num_workers\":10,\n \"num_gpus\":1,\n \"framework\":\"torch\",\n # \"render_env\":False,\n # \"num_gpus_per_worker\":0,\n # \"num_envs_per_worker\":5,\n # \"rollout_fragment_length\":1000,\n # \"train_batch_size\":4000,\n # \"batch_mode\":\"complete_episodes\",\n #\"lr\":0.0001,\n # \"lr\":grid_search([5e-5,0.0001])\n }\n\n config_for_trainer = {\n \"env\": \"kuka_env\",\n # \"model\":{\n # \"conv_filters\":[[32,[3,3],2],[32,[3,3],2],[32,[3,3],2],[32,[3,3],2],[1152,[6,6],1]],\n # # \"conv_filters\":\"relu\",\n # \"post_fcnet_hiddens\":[512,251],\n # \"post_fcnet_activation\":\"relu\",\n # },\n \"env_config\":{\n \"is_render\":False,\n \"is_good_view\":False,\n \"max_steps_one_episode\":1000,\n },\n \"num_workers\":1,\n \"num_gpus\":1,\n \"framework\":\"torch\",\n }\n\n stop = {\n \"episode_reward_mean\": 0.99,\n \"training_iteration\":200,\n }\n\n # trainer=PPOTrainer(config=config_for_trainer)\n # print(trainer.get_policy().model)\n #\n # trainer=ImpalaTrainer(config=config_for_trainer)\n # print(trainer.get_policy().model)\n \n \n results = tune.run(\n \"SAC\", # Specify the algorithm to train\n config=config,\n stop=stop,\n checkpoint_freq=1,\n )\n\n metric=\"episode_reward_mean\"\n best_trial = results.get_best_trial(metric=metric, mode=\"max\", scope=\"all\")\n best_checkpoint=results.get_best_checkpoint(best_trial,metric=metric,mode=\"max\")\n print('best checkpoint: ',best_checkpoint)\n \n ray.shutdown()","repo_name":"borninfreedom/deep-rl-with-robots","sub_path":"src/train/kuka_reach/with_image/train_with_rllib.py","file_name":"train_with_rllib.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"37438880788","text":"import os\nos.system(\"git clone https://github.com/gasharper/linux-fonts.git\")\nos.system(\"pip install arabic_reshaper\")\nos.system(\"pip install python-bidi\")\nimport cv2 \nimport arabic_reshaper\nfrom bidi.algorithm import get_display\nimport numpy as np\nfrom PIL import ImageFont, ImageDraw, Image\n\ndef put_ar_text(img, text, cor=(0,0), font_size=32, fontpath=\"./linux-fonts/arial.ttf\", fill_color=(255,255,0), windows=False):\n font = ImageFont.truetype(fontpath, font_size)\n img_pil = Image.fromarray(img)\n draw = ImageDraw.Draw(img_pil)\n reshaped_text = arabic_reshaper.reshape(text)\n bidi_text = get_display(reshaped_text) \n draw = ImageDraw.Draw(img_pil)\n draw.text(cor, bidi_text, font = font, fill=fill_color)\n img = np.array(img_pil)\n if windows:\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n return img\n","repo_name":"samfathy74/snippets-code-with-python","sub_path":"put_arabic_text_above_image.py","file_name":"put_arabic_text_above_image.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"32588254029","text":"import math\nimport logging\nfrom operator import neg\n\nfrom sympy.logic.boolalg import to_cnf, And, Or, Equivalent, Implies, disjuncts\nfrom Belief import Belief\nimport calculations\n\n\nclass BeliefBase:\n def __init__(self):\n self.beliefsSetOriginal = []\n self.beliefsSetCNF = []\n\n def __eq__(self, other):\n return self.beliefsSetOriginal == other.beliefsSetOriginal\n\n def addBelief(self, belief):\n if self.deleteSameBelief(belief) == 1:\n \n x = Belief(belief)\n self.beliefsSetOriginal.append(x)\n self.calcutatePlausibilityOrders(self.beliefsSetOriginal)\n\n def addBliefWithOrder(self, belief, plausibilityOrder):\n x = Belief(belief)\n x.plausibilityOrder = plausibilityOrder\n self.beliefsSetOriginal.append(x)\n\n def addBlindly(self, belief):\n x = Belief(belief)\n self.beliefsSetOriginal.append(x)\n self.calcutatePlausibilityOrders(self.beliefsSetOriginal)\n\n def convertToCNF(self):\n self.beliefsSetCNF = []\n for belief in self.beliefsSetOriginal:\n cnfBelief = to_cnf(belief.belief)\n separatedBeliefs = calculations.splitFormula('&', cnfBelief)\n for b in separatedBeliefs:\n x = Belief(b)\n x.plausibilityOrder = belief.plausibilityOrder\n self.beliefsSetCNF.append(x)\n\n def printCNF(self):\n print('PRINTING CNF: ')\n for belief in self.beliefsSetCNF:\n print(belief.belief, belief.plausibilityOrder)\n\n def calcutatePlausibilityOrders(self, beliefsSet):\n if len(beliefsSet) == 1:\n pass\n else:\n ratio = 1 / len(beliefsSet)\n i = 0\n for i in range(0, len(beliefsSet)):\n beliefsSet[i].plausibilityOrder = round((i + 1) * ratio, 2)\n\n # when a new belief come, check if it has same in the original part\n def deleteSameBelief(self, newBelief):\n\n for belief in self.beliefsSetOriginal:\n if to_cnf(belief.belief) == to_cnf(newBelief):\n #print('Previous belief: ', belief.belief, 'order:', belief.plausibilityOrder, 'is the same.')\n #print('Deleting it before adding new belief', newBelief, 'with order 1' )\n self.beliefsSetOriginal.remove(belief)\n self.addBelief(newBelief)\n return 0\n return 1\n\n def AGMContractionSuccess(self, newBelief):\n for belief in self.beliefsSetOriginal:\n if to_cnf(belief.belief) == to_cnf(newBelief):\n return 0\n return 1\n\n def AGMContractionInclusion(self, oriBeliefSet):\n counter = 0\n for belief in self.beliefsSetOriginal:\n for oribelief in oriBeliefSet.beliefsSetOriginal:\n if belief.belief == oribelief.belief:\n counter += 1\n if counter == len(self.beliefsSetOriginal):\n return 1\n return 0\n\n\n def __repr__(self):\n if len(self.beliefsSetOriginal) == 0:\n return 'empty'\n return '\\n'.join(str(x) for x in self.beliefsSetOriginal)\n","repo_name":"SeleneQAQ/belief_revision","sub_path":"BeliefBase.py","file_name":"BeliefBase.py","file_ext":"py","file_size_in_byte":3128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"8605811973","text":"list_of_numbers = []\nlist_of_even = []\nlist_of_odd = []\n\nwhile True:\n user_num = int(input(\"Write a number => \"))\n list_of_numbers.append(user_num)\n\n if user_num != 0:\n if user_num % 2 == 0:\n list_of_even.append(user_num)\n else:\n list_of_odd.append(user_num)\n\n question = input(\"Do you want continue? [Y/N] => \").strip().upper()[0]\n if \"N\" in question:\n break\n\nprint(f\"The complete list is: {list_of_numbers}\")\nprint(f\"The even list is: {list_of_even}\")\nprint(f\"The odd list is: {list_of_odd}\")\n","repo_name":"israellinofaustino/python3_learning","sub_path":"exercises_third_world/ex082.py","file_name":"ex082.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"60"} +{"seq_id":"9562510313","text":"from __future__ import print_function\nfrom __future__ import division\n\nimport torch\nimport torch.nn as nn\n\n\nclass CNN(nn.Module):\n def __init__(self, in_dim, n_class):\n super(CNN, self).__init__()\n\n self.conv = nn.Sequential(\n nn.Conv2d(in_dim, 6, 3, stride=1, padding=1),\n nn.BatchNorm2d(6),\n nn.ReLU(True),\n nn.Conv2d(6, 16, 3, stride=1, padding=0),\n nn.BatchNorm2d(16),\n nn.ReLU(True),\n nn.MaxPool2d(2, 2)\n )\n\n self.fc = nn.Sequential(\n nn.Linear(144, 512),\n nn.Linear(512, 256),\n nn.Linear(256, n_class)\n )\n\n def forward(self, x):\n out = self.conv(x)\n out = out.view(out.size(0), -1)\n out = self.fc(out)\n return out\n","repo_name":"Bingmang/kddcup99-cnn","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"60"} +{"seq_id":"43153126739","text":"# Created by fshaw at 03/04/2020\nimport os\nimport re\nimport uuid\nimport pickle\nimport importlib\nfrom os.path import join, isfile\nfrom pathlib import Path\nfrom shutil import rmtree\nfrom urllib.error import HTTPError\nimport jsonpath_rw_ext as jp\nimport pandas\nfrom django.conf import settings\nfrom django.core.files.storage import default_storage\nfrom django_tools.middlewares import ThreadLocal\nimport common.schemas.utils.data_utils as d_utils\nfrom common.utils.helpers import map_to_dict, get_datetime, notify_frontend\nfrom common.dal.copo_da import Sample, DataFile, Profile, ValidationQueue\nfrom .copo_email import Email\nfrom common.lookup import lookup as lk\nfrom common.lookup.lookup import SRA_SETTINGS\nfrom common.schemas.utils.data_utils import json_to_pytype\nfrom .helpers import query_public_name_service\nfrom common.schema_versions.lookup import dtol_lookups as lookup\n#from common.schema_versions import optional_field_dtol_validators as optional_validators, \\\n# taxon_validators\n#from common.schema_versions import required_field_dtol_validators as required_validators\nfrom common.utils.logger import Logger\nfrom PIL import Image\n\nImage.MAX_IMAGE_PIXELS = None\n\nl = Logger()\n\ndef make_target_sample(sample):\n # need to pop taxon info, and add back into sample_list\n if not \"species_list\" in sample:\n sample[\"species_list\"] = list()\n out = dict()\n symbiont = sample.pop(\"SYMBIONT\")\n if symbiont.upper() not in [\"SYMBIONT\", \"TARGET\"]:\n if symbiont:\n out[\"SYMBIONT_SOP2dot2\"] = symbiont\n symbiont = \"TARGET\"\n\n out[\"SYMBIONT\"] = symbiont.upper()\n out[\"TAXON_ID\"] = sample.pop(\"TAXON_ID\")\n out[\"ORDER_OR_GROUP\"] = sample.pop(\"ORDER_OR_GROUP\")\n out[\"FAMILY\"] = sample.pop(\"FAMILY\")\n out[\"GENUS\"] = sample.pop(\"GENUS\")\n out[\"SCIENTIFIC_NAME\"] = sample.pop(\"SCIENTIFIC_NAME\")\n out[\"INFRASPECIFIC_EPITHET\"] = sample.pop(\"INFRASPECIFIC_EPITHET\")\n out[\"CULTURE_OR_STRAIN_ID\"] = sample.pop(\"CULTURE_OR_STRAIN_ID\")\n out[\"COMMON_NAME\"] = sample.pop(\"COMMON_NAME\")\n out[\"TAXON_REMARKS\"] = sample.pop(\"TAXON_REMARKS\")\n sample[\"species_list\"].append(out)\n\n return sample\n\n\ndef make_species_list(sample):\n # need to pop taxon info, and add back into sample_list\n if not \"species_list\" in sample:\n sample[\"species_list\"] = list()\n out = dict()\n symbiont = sample.get(\"SYMBIONT\")\n if symbiont.upper() not in [\"SYMBIONT\", \"TARGET\"]:\n if symbiont:\n out[\"SYMBIONT_SOP2dot2\"] = symbiont\n symbiont = \"TARGET\"\n\n out[\"SYMBIONT\"] = symbiont.upper()\n out[\"TAXON_ID\"] = sample.get(\"TAXON_ID\", \"\")\n out[\"ORDER_OR_GROUP\"] = sample.get(\"ORDER_OR_GROUP\", \"\")\n out[\"FAMILY\"] = sample.get(\"FAMILY\", \"\")\n out[\"GENUS\"] = sample.get(\"GENUS\", \"\")\n out[\"SCIENTIFIC_NAME\"] = sample.get(\"SCIENTIFIC_NAME\", \"\")\n out[\"INFRASPECIFIC_EPITHET\"] = sample.get(\"INFRASPECIFIC_EPITHET\", \"\")\n out[\"CULTURE_OR_STRAIN_ID\"] = sample.get(\"CULTURE_OR_STRAIN_ID\", \"\")\n out[\"COMMON_NAME\"] = sample.get(\"COMMON_NAME\", \"\")\n out[\"TAXON_REMARKS\"] = sample.get(\"TAXON_REMARKS\", \"\")\n sample[\"species_list\"].append(out)\n return sample\n\ndef update_permit_filename(sample, permit_filename_mapping):\n # Update/Set permit filename to a unique name if it exists and it is not equal to \"NOT_APPLICABLE\"\n # for ERGA manifests\n for col_name in lookup.PERMIT_FILENAME_COLUMN_NAMES:\n if sample.get(col_name, \"\") and sample.get(col_name, \"\") not in lookup.BLANK_VALS:\n sample[col_name] = permit_filename_mapping.get(sample.get(col_name, \"\"), \"\")\n return sample\n\n\nclass DtolSpreadsheet:\n fields = \"\"\n sra_settings = d_utils.json_to_pytype(SRA_SETTINGS, compatibility_mode=False).get(\"properties\", dict())\n\n def __init__(self, file=None, p_id=\"\", validation_record_id=\"\"):\n self.req = ThreadLocal.get_current_request()\n if p_id == \"\" and validation_record_id:\n self.vr = ValidationQueue().get_record(validation_record_id)\n p_id = self.vr.get(\"profile_id\", \"\")\n if file:\n self.file = file\n else:\n #self.sample_data = self.req.session.get(\"sample_data\", \"\")\n #if self.sample_data == \"\":\n # self.sample_data = pickle.loads(self.vr[\"manifest_data\"])\n self.sample_data = pickle.loads(self.vr[\"manifest_data\"])\n self.isupdate = self.req.session.get(\"isupdate\", False)\n self.profile_id = p_id\n\n sample_images = Path(settings.MEDIA_ROOT) / \"sample_images\"\n sample_permits = Path(settings.MEDIA_ROOT) / \"sample_permits\"\n display_images = Path(settings.MEDIA_URL) / \"sample_images\"\n self.these_images = sample_images\n self.these_permits = sample_permits / self.profile_id\n self.display_images = display_images\n self.data = None\n #self.required_field_validators = list()\n #self.optional_field_validators = list()\n #self.taxon_field_validators = list()\n #self.DtolSpreadsheet = optional_validators\n #self.required_validators = required_validators\n #self.taxon_validators = taxon_validators\n #self.symbiont_list = []\n #self.validator_list = []\n # if a file is passed in, then this is the first time we have seen the spreadsheet,\n # if not then we are looking at creating samples having previously validated\n\n # get type of manifest\n t = Profile().get_type(self.profile_id)\n if \"ASG\" in t:\n self.type = \"ASG\"\n elif \"ERGA\" in t:\n self.type = \"ERGA\"\n elif \"DTOL_ENV\" in t:\n self.type = \"DTOL_ENV\"\n else:\n self.type = \"DTOL\"\n self.current_schema_version = settings.MANIFEST_VERSION.get(self.type, \"\")\n # get associated profile type(s) of manifest\n associated_type_lst = Profile().get_associated_type(self.profile_id, value=True, label=False)\n # Get associated type(s) as string separated by '|' symbol\n self.associated_type = \" | \".join(associated_type_lst)\n\n '''\n # create list of required validators\n required = dict(globals().items())[\"required_validators\"]\n for element_name in dir(required):\n element = getattr(required, element_name)\n if inspect.isclass(element) and issubclass(element, Validator) and not element.__name__ == \"Validator\":\n self.required_field_validators.append(element)\n # create list of optional validators\n optional = dict(globals().items())[\"optional_validators\"]\n for element_name in dir(optional):\n element = getattr(optional, element_name)\n if inspect.isclass(element) and issubclass(element, Validator) and not element.__name__ == \"Validator\":\n self.optional_field_validators.append(element)\n # create list of taxon validators\n optional = dict(globals().items())[\"taxon_validators\"]\n for element_name in dir(optional):\n element = getattr(optional, element_name)\n if inspect.isclass(element) and issubclass(element, Validator) and not element.__name__ == \"Validator\":\n self.taxon_field_validators.append(element)\n '''\n\n def loadManifest(self, m_format):\n\n if self.profile_id is not None:\n notify_frontend(data={\"profile_id\": self.profile_id}, msg=\"Loading..\", action=\"info\",\n html_id=\"sample_info\")\n try:\n # read excel and convert all to string\n if m_format == \"xls\":\n self.data = pandas.read_excel(self.file, keep_default_na=False,\n na_values=lookup.NA_VALS)\n elif m_format == \"csv\":\n self.data = pandas.read_csv(self.file, keep_default_na=False,\n na_values=lookup.NA_VALS)\n self.data = self.data.loc[:, ~self.data.columns.str.contains('^Unnamed')]\n '''\n for column in self.allowed_empty:\n self.data[column] = self.data[column].fillna(\"\")\n '''\n self.data = self.data.apply(lambda x: x.astype(str))\n self.data = self.data.apply(lambda x: x.str.strip())\n self.data.columns = self.data.columns.str.replace(\" \", \"\")\n except Exception as e:\n # if error notify via web socket\n notify_frontend(data={\"profile_id\": self.profile_id}, msg=\"Unable to load file. \" + str(e),\n action=\"error\",\n html_id=\"sample_info\")\n l.exception(e)\n return False\n return True\n\n \"\"\"\n def validate(self):\n flag = True\n errors = []\n warnings = []\n self.isupdate = False\n\n try:\n # get definitive list of mandatory DTOL fields from schema\n s = json_to_pytype(lk.WIZARD_FILES[\"sample_details\"], compatibility_mode=False)\n self.fields = jp.match(\n '$.properties[?(@.specifications[*] == \"' + self.type.lower() + '\" & @.required==\"true\" & @.manifest_version[*]== \"' + self.current_schema_version + '\")].versions[0]',\n s)\n\n # validate for required fields\n for v in self.required_field_validators:\n errors, warnings, flag, self.isupdate = v(profile_id=self.profile_id, fields=self.fields,\n data=self.data,\n errors=errors, warnings=warnings, flag=flag,\n isupdate=self.isupdate).validate()\n\n # get list of all DTOL fields from schemas\n self.fields = jp.match(\n '$.properties[?(@.specifications[*] == ' + self.type.lower() + '\"& @.manifest_version[*]==\"' + self.current_schema_version + '\")].versions[0]',\n s)\n\n # validate for optional dtol fields\n for v in self.optional_field_validators:\n errors, warnings, flag = v(profile_id=self.profile_id, fields=self.fields, data=self.data,\n errors=errors, warnings=warnings, flag=flag).validate()\n\n # send warnings\n if warnings:\n notify_frontend(data={\"profile_id\": self.profile_id},\n msg=\"
\".join(warnings),\n action=\"warning\",\n html_id=\"warning_info2\")\n # if flag is false, compile list of errors\n if not flag:\n errors = list(map(lambda x: \"
  • \" + x + \"
  • \", errors))\n errors = \"\".join(errors)\n\n notify_frontend(data={\"profile_id\": self.profile_id},\n msg=\"

    \" + self.file.name + \"

      \" + errors + \"
    \",\n action=\"error\",\n html_id=\"sample_info\")\n return False\n\n\n\n except Exception as e:\n l.exception(e)\n error_message = str(e).replace(\"<\", \"\").replace(\">\", \"\")\n notify_frontend(data={\"profile_id\": self.profile_id}, msg=\"Server Error - \" + error_message,\n action=\"error\",\n html_id=\"sample_info\")\n raise\n\n # if we get here we have a valid spreadsheet\n notify_frontend(data={\"profile_id\": self.profile_id}, msg=\"Spreadsheet is Valid\", action=\"info\",\n html_id=\"sample_info\")\n notify_frontend(data={\"profile_id\": self.profile_id}, msg=\"\", action=\"close\", html_id=\"upload_controls\")\n notify_frontend(data={\"profile_id\": self.profile_id}, msg=\"\", action=\"make_valid\", html_id=\"sample_info\")\n\n return True\n \"\"\"\n \n def validate_taxonomy(self):\n ''' check if provided scientific name, TAXON ID,\n family and order are consistent with each other in known taxonomy'''\n\n errors = []\n warnings = []\n flag = True\n try:\n # validate for optional dtol fields\n for v in self.taxon_field_validators:\n errors, warnings, flag = v(profile_id=self.profile_id, fields=self.fields, data=self.data,\n errors=errors, warnings=warnings, flag=flag).validate()\n\n # send warnings\n if warnings:\n notify_frontend(data={\"profile_id\": self.profile_id},\n msg=\"
    \".join(warnings),\n action=\"warning\",\n html_id=\"warning_info\")\n\n if not flag:\n errors = list(map(lambda x: \"
  • \" + x + \"
  • \", errors))\n errors = \"\".join(errors)\n notify_frontend(data={\"profile_id\": self.profile_id},\n msg=\"

    \" + self.file.name + \"

      \" + errors + \"
    \",\n action=\"error\",\n html_id=\"sample_info\")\n return False\n\n else:\n return True\n\n except HTTPError as e:\n\n error_message = str(e).replace(\"<\", \"\").replace(\">\", \"\")\n notify_frontend(data={\"profile_id\": self.profile_id},\n msg=\"Service Error - The NCBI Taxonomy service may be down, please try again later.\",\n action=\"error\",\n html_id=\"sample_info\")\n return False\n except Exception as e:\n l.exception(e)\n error_message = str(e).replace(\"<\", \"\").replace(\">\", \"\")\n notify_frontend(data={\"profile_id\": self.profile_id}, msg=\"Server Error - \" + error_message,\n action=\"error\",\n html_id=\"sample_info\")\n return False\n\n def check_image_names(self, files):\n # compare list of sample names with specimen ids already uploaded\n samples = self.sample_data\n # get list of specimen_ids in sample\n # specimen_id_column_index = 0\n output = list()\n # for num, col_name in enumerate(samples.columns):\n # if col_name == \"SPECIMEN_ID\":\n # specimen_id_column_index = num\n # break\n # if os.path.isdir(self.these_images):\n # rmtree(self.these_images)\n\n # find distinct specimenId\n specimentIds = samples[\"SPECIMEN_ID\"].drop_duplicates().dropna()\n\n thumbnail_folder = self.these_images / \"thumbnail\"\n thumbnail_folder.mkdir(parents=True, exist_ok=True)\n\n image_path = Path(self.these_images)\n display_path = Path(self.display_images)\n # image_path = Path(settings.MEDIA_ROOT) / \"sample_images\" / self.profile_id\n existing_images = DataFile().get_datafile_names_by_name_regx(specimentIds)\n\n for f in files:\n file = files[f]\n\n # file_path = image_path / file.name\n # write full sized image to large storage\n file_path = image_path / file.name\n thumbnail_path = thumbnail_folder / file.name\n thumbnail_display_path = display_path / \"thumbnail\" / file.name\n file_display_path = display_path / file.name\n\n filename = os.path.splitext(file.name)[0].upper()\n # now iterate through samples data to see if there is a match between specimen_id and image name\n found = False\n size = 128, 128\n for specimenId in specimentIds:\n if filename.startswith(specimenId + \"-\"):\n found = True\n if file.name in existing_images:\n output.append(\n {\"file_name\": str(file_display_path), \"thumbnail\": \"\", \"specimen_id\": \"Duplicated\",\n \"name\": \"\"})\n break\n # we have a match\n output.append({\"file_name\": str(file_display_path), \"thumbnail\": str(thumbnail_display_path),\n \"specimen_id\": specimenId, \"name\": file.name})\n\n # logging.info(\"writing \" + str(file_path))\n with default_storage.open(file_path, 'wb+') as destination:\n for chunk in file.chunks():\n destination.write(chunk)\n\n im = Image.open(file_path)\n im.thumbnail(size)\n im.save(thumbnail_path)\n # logging.info(\"written \" + str(file_path))\n break\n if not found:\n output.append({\"file_name\": str(file_display_path), \"specimen_id\": \"\", \"name\": \"\"})\n # save to session\n request = ThreadLocal.get_current_request()\n request.session[\"image_specimen_match\"] = output\n notify_frontend(data={\"profile_id\": self.profile_id}, msg=output, action=\"make_images_table\",\n html_id=\"images\")\n return output\n\n def check_permit_names(self, files):\n # compare list of sample names with specimen ids already uploaded\n samples = self.sample_data\n # get list of specimen_ids in sample\n\n specimen_id_column_index, sampling_permits_required_index, ethics_permits_required_index, nagoya_permits_required_index, sampling_permits_filename_index, ethics_permits_filename_index, nagoya_permits_filename_index = 0, 0, 0, 0, 0, 0, 0\n\n output = list()\n for num, col_name in enumerate(samples.columns):\n if col_name == \"SPECIMEN_ID\":\n specimen_id_column_index = num\n elif col_name == \"SAMPLING_PERMITS_REQUIRED\":\n sampling_permits_required_index = num\n elif col_name == \"ETHICS_PERMITS_REQUIRED\":\n ethics_permits_required_index = num\n elif col_name == \"NAGOYA_PERMITS_REQUIRED\":\n nagoya_permits_required_index = num\n elif col_name == \"SAMPLING_PERMITS_FILENAME\":\n sampling_permits_filename_index = num\n elif col_name == \"ETHICS_PERMITS_FILENAME\":\n ethics_permits_filename_index = num\n elif col_name == \"NAGOYA_PERMITS_FILENAME\":\n nagoya_permits_filename_index = num\n\n if os.path.isdir(self.these_permits):\n rmtree(self.these_permits)\n self.these_permits.mkdir(parents=True)\n\n write_path = Path(self.these_permits)\n # display_write_path = Path(self.display_images)\n for f in files:\n file = files[f]\n\n file_path = write_path / file.name\n file_path = Path(settings.MEDIA_ROOT) / \"sample_permits\" / self.profile_id / file.name\n with default_storage.open(file_path, 'wb+') as destination:\n for chunk in file.chunks():\n destination.write(chunk)\n\n filename = os.path.splitext(file.name)[0].upper()\n # now iterate through samples data to see if there is a match between specimen_id and permit name\n permit_path = Path(settings.MEDIA_ROOT) / \"sample_permits\" / self.profile_id\n fail_flag = False\n for num, sample in enumerate(samples.values):\n\n specimen_id = sample[specimen_id_column_index].upper()\n\n file_list = [f for f in os.listdir(permit_path) if isfile(join(permit_path, f))]\n file_list = set(file_list) # Remove duplicate filenames\n\n if sample[ethics_permits_required_index] == \"Y\":\n found = False\n for filename in file_list:\n if filename == sample[ethics_permits_filename_index]:\n p = Path(settings.MEDIA_URL) / \"sample_permits\" / self.profile_id / filename\n output.append(\n {\"file_name\": str(p), \"specimen_id\": specimen_id, \"permit_type\": \"Ethics Permit\"})\n found = True\n break\n if not found:\n output.append({\n \"file_name\": \"None\", \"specimen_id\": \"No Ethics Permits found for \" + specimen_id\n + \"\",\n \"file_name_expected\": sample[ethics_permits_filename_index],\n \"permit_type\": \"Ethics Permit\"\n })\n fail_flag = True\n if sample[sampling_permits_required_index] == \"Y\":\n found = False\n for filename in file_list:\n if filename == sample[sampling_permits_filename_index]:\n p = Path(settings.MEDIA_URL) / \"sample_permits\" / self.profile_id / filename\n output.append(\n {\"file_name\": str(p), \"specimen_id\": specimen_id, \"permit_type\": \"Sampling Permit\"})\n found = True\n break\n if not found:\n output.append({\n \"file_name\": \"None\", \"specimen_id\": \"No Sampling Permits found for \" + specimen_id\n + \"\",\n \"file_name_expected\": sample[sampling_permits_filename_index],\n \"permit_type\": \"Sampling Permit\"\n })\n fail_flag = True\n if sample[nagoya_permits_required_index] == \"Y\":\n found = False\n for filename in file_list:\n if filename == sample[nagoya_permits_filename_index]:\n p = Path(settings.MEDIA_URL) / \"sample_permits\" / self.profile_id / filename\n output.append(\n {\"file_name\": str(p), \"specimen_id\": specimen_id, \"permit_type\": \"Nagoya Permit\"})\n found = True\n break\n if not found:\n output.append({\n \"file_name\": \"None\", \"specimen_id\": \"No Nagoya Permits found for \" + specimen_id\n + \"\",\n \"file_name_expected\": sample[nagoya_permits_filename_index],\n \"permit_type\": \"Nagoya Permit\"\n })\n fail_flag = True\n # save to session\n request = ThreadLocal.get_current_request()\n request.session[\"permit_specimen_match\"] = output\n notify_frontend(data={\"profile_id\": self.profile_id, \"fail_flag\": fail_flag}, msg=output,\n action=\"make_permits_table\",\n html_id=\"permits\")\n return output\n\n\n def save_records(self):\n # create mongo sample objects from info parsed from manifest and saved to session variable\n # sample_data = self.sample_data\n\n binary = pickle.loads(self.vr[\"manifest_data\"])\n try:\n sample_data = pandas.read_excel(binary, keep_default_na=False,\n na_values=lookup.NA_VALS)\n except ValueError:\n sample_data = binary\n sample_data = sample_data.loc[:, ~sample_data.columns.str.contains('^Unnamed')]\n '''\n for column in self.allowed_empty:\n self.data[column] = self.data[column].fillna(\"\")\n '''\n sample_data = sample_data.apply(lambda x: x.astype(str))\n sample_data = sample_data.apply(lambda x: x.str.strip())\n sample_data.columns = sample_data.columns.str.replace(\" \", \"\")\n manifest_id = str(uuid.uuid4())\n request = ThreadLocal.get_current_request()\n image_data = request.session.get(\"image_specimen_match\", [])\n\n for im in image_data:\n # create matching DataFile object for image is provided\n if im[\"name\"]:\n df = DataFile().get_records_by_fields({\"name\": im[\"name\"]})\n if (len(df) == 0):\n fields = {\"file_location\": im[\"file_name\"], \"name\": im[\"name\"]}\n DataFile().save_record({}, **fields)\n\n public_name_list = list()\n x = json_to_pytype(lk.WIZARD_FILES[\"sample_details\"], compatibility_mode=False)\n self.fields = jp.match(\n '$.properties[?(@.specifications[*] == \"' + self.type.lower() + '\"& @.manifest_version[*]==\"' + self.current_schema_version + '\")].versions[0]',\n x)\n\n # Create a permit filename mapping\n permit_filename_mapping = dict()\n permit_filename_lst = list()\n\n for col_name in lookup.PERMIT_FILENAME_COLUMN_NAMES:\n if col_name in sample_data.columns:\n permit_filename_lst.extend(sample_data[col_name].unique().tolist())\n\n # Iterate the list of permit filenames and create a mapping\n for permit_filename in permit_filename_lst:\n if permit_filename.endswith(\".pdf\"):\n current_date = get_datetime().strftime('%Y%m%d')\n new_permit_filename = permit_filename.replace('.pdf', \"_\" + str(current_date) + \".pdf\")\n permit_filename_mapping[permit_filename] = new_permit_filename\n\n sample_data[\"_id\"] = \"\"\n for index, p in sample_data.iterrows():\n s = dict(p)\n type = \"\"\n # store manifest version for posterity. If unknown store as 0\n if \"asg\" in self.type.lower():\n type = \"ASG\"\n elif \"dtolenv\" in self.type.lower():\n type = \"DTOLENV\"\n elif \"dtol\" in self.type.lower():\n type = \"DTOL\"\n elif \"erga\" in self.type.lower():\n type = \"ERGA\"\n\n s[\"manifest_version\"] = settings.MANIFEST_VERSION.get(type, \"0\")\n s[\"sample_type\"] = self.type.lower()\n s[\"tol_project\"] = self.type\n s[\"associated_tol_project\"] = self.associated_type\n s[\"biosample_accession\"] = []\n s[\"manifest_id\"] = manifest_id\n if \"erga\" in self.type.lower() and s[\"ASSOCIATED_TRADITIONAL_KNOWLEDGE_OR_BIOCULTURAL_PROJECT_ID\"]:\n s[\"status\"] = \"private\"\n else:\n s[\"status\"] = \"pending\"\n s[\"rack_tube\"] = s.get(\"RACK_OR_PLATE_ID\", \"\") + \"/\" + s[\"TUBE_OR_WELL_ID\"]\n notify_frontend(data={\"profile_id\": self.profile_id},\n msg=\"Creating Sample with ID: \" + s.get(\"TUBE_OR_WELL_ID\") + \"/\" + s[\"SPECIMEN_ID\"],\n action=\"info\",\n html_id=\"sample_info\")\n\n # change fields for symbiont\n if s[\"SYMBIONT\"] == \"SYMBIONT\":\n s[\"ORGANISM_PART\"] = \"WHOLE_ORGANISM\"\n for field in self.fields:\n if field not in lookup.SYMBIONT_FIELDS:\n target = Sample().get_target_by_field(\"rack_tube\", s[\"rack_tube\"])\n if target:\n s[field] = target[0].get(field, \"\")\n else:\n for p in range(1, len(sample_data)):\n row = (map_to_dict(sample_data[0], sample_data[p]))\n if row.get(\"RACK_OR_PLATE_ID\", \"\") == s.get(\"RACK_OR_PLATE_ID\", \"\") and row.get(\n \"TUBE_OR_WELL_ID\", \"\") == s.get(\"TUBE_OR_WELL_ID\", \"\") and row.get(\"SYMBIONT\",\n \"\") == \"TARGET\":\n s[field] = row.get(field, \"\")\n # if ASG change also sex to not collected\n if s[\"tol_project\"] == \"ASG\":\n s[\"SEX\"] = \"NOT_COLLECTED\"\n\n # Update permit filename\n s = update_permit_filename(s, permit_filename_mapping)\n\n s = make_species_list(s)\n sampl = Sample(profile_id=self.profile_id).save_record(auto_fields={}, **s)\n Sample().timestamp_dtol_sample_created(sampl[\"_id\"])\n # update permit filename in the database i.e. set unique filename as the permit filename\n\n if not sampl[\"species_list\"][0][\"SYMBIONT\"] or sampl[\"species_list\"][0][\"SYMBIONT\"] == \"TARGET\":\n public_name_list.append(\n {\"taxonomyId\": int(sampl[\"species_list\"][0][\"TAXON_ID\"]), \"specimenId\": sampl[\"SPECIMEN_ID\"],\n \"sample_id\": str(sampl[\"_id\"])})\n\n p[\"_id\"] = sampl[\"_id\"]\n\n # for im in image_data:\n # # create matching DataFile object for image is provided\n # if s[\"SPECIMEN_ID\"] in im[\"specimen_id\"]:\n # DataFile().insert_sample_id(im[\"name\"], sampl[\"_id\"])\n\n for im in image_data:\n # create matching DataFile object for image is provided\n samplelist = sample_data.loc[sample_data[\"SPECIMEN_ID\"] == im[\"specimen_id\"]][\"_id\"].tolist()\n DataFile().insert_sample_ids(im[\"name\"], samplelist)\n\n uri = request.build_absolute_uri('/')\n # query public service service a first time now to trigger request for public names that don't exist\n public_names = query_public_name_service(public_name_list)\n for name in public_names:\n if name.get(\"status\", \"\") == \"Rejected\":\n Sample().add_rejected_status_for_tolid(name['specimen'][\"specimenId\"])\n continue\n Sample().update_public_name(name)\n profile_id = request.session[\"profile_id\"]\n profile = Profile().get_record(profile_id)\n title = profile[\"title\"]\n description = profile[\"description\"]\n Email().notify_manifest_pending_approval(uri + 'copo/accept_reject_sample/', title=title,\n description=description,\n project=self.type.upper(), is_new=True)\n \n def update_records(self):\n binary = pickle.loads(self.vr[\"manifest_data\"])\n try:\n sample_data = pandas.read_excel(binary, keep_default_na=False,\n na_values=lookup.NA_VALS)\n except ValueError:\n sample_data = binary\n\n request = ThreadLocal.get_current_request()\n public_name_list = list()\n sample_data[\"_id\"] = \"\"\n need_send_email = False\n\n # Create a permit filename mapping\n permit_filename_mapping = dict()\n permit_filename_lst = list()\n\n for col_name in lookup.PERMIT_FILENAME_COLUMN_NAMES:\n if col_name in sample_data.columns:\n permit_filename_lst.extend(sample_data[col_name].unique().tolist())\n\n # Iterate the list of permit filenames and create a mapping\n for permit_filename in permit_filename_lst:\n if permit_filename.endswith(\".pdf\"):\n current_date = get_datetime().strftime('%Y%m%d')\n new_permit_filename = permit_filename.replace('.pdf', \"_\" + str(current_date) + \".pdf\")\n permit_filename_mapping[permit_filename] = new_permit_filename\n\n for p in range(0, len(sample_data)):\n s = map_to_dict(sample_data.columns, sample_data.iloc[p, :])\n notify_frontend(data={\"profile_id\": self.profile_id},\n msg=\"Updating Sample with ID: \" + s[\"TUBE_OR_WELL_ID\"] + \"/\" + s[\"SPECIMEN_ID\"],\n action=\"info\",\n html_id=\"sample_info\")\n rack_tube = s.get(\"RACK_OR_PLATE_ID\", \"\") + \"/\" + s[\"TUBE_OR_WELL_ID\"]\n recorded_sample = Sample().get_target_by_field(\"rack_tube\", rack_tube)[0]\n sample_data.at[p, '_id'] = recorded_sample[\"_id\"]\n is_updated = False\n\n # Update permit filename\n for col_name in lookup.PERMIT_FILENAME_COLUMN_NAMES:\n if s.get(col_name, \"\") and s.get(col_name, \"\") not in lookup.BLANK_VALS:\n s[col_name] = permit_filename_mapping.get(s.get(col_name, \"\"), \"\")\n\n for field in s.keys():\n if s[field] != recorded_sample.get(field, \"\") and s[field].strip() != recorded_sample[\"species_list\"][\n 0].get(field, \"\"):\n if field in lookup.SPECIES_LIST_FIELDS:\n # record change\n Sample().record_user_update(field, recorded_sample[\"species_list\"][0][field], s[field],\n recorded_sample[\"_id\"])\n # update sample\n Sample().add_field(\"species_list.0.\" + str(field), s[field], recorded_sample[\"_id\"])\n is_updated = True\n else:\n # record change\n Sample().record_user_update(field, recorded_sample[field], s[field], recorded_sample[\"_id\"])\n # update sample\n Sample().add_field(field, s[field], recorded_sample[\"_id\"])\n is_updated = True\n\n if recorded_sample[\"biosampleAccession\"] and is_updated:\n Sample().mark_pending(recorded_sample[\"_id\"])\n need_send_email = True\n\n uri = request.build_absolute_uri('/')\n # query public service service a first time now to trigger request for public names that don't exist\n public_names = query_public_name_service(public_name_list)\n for name in public_names:\n if name.get(\"status\", \"\") == \"Rejected\":\n Sample().add_rejected_status_for_tolid(name['specimen'][\"specimenId\"])\n continue\n Sample().update_public_name(name)\n profile_id = request.session[\"profile_id\"]\n profile = Profile().get_record(profile_id)\n title = profile[\"title\"]\n description = profile[\"description\"]\n\n if need_send_email:\n Email().notify_manifest_pending_approval(uri + 'copo/accept_reject_sample/', title=title,\n description=description,\n project=self.type.upper(), is_new=False)\n\n image_data = request.session.get(\"image_specimen_match\", [])\n for im in image_data:\n if im[\"name\"]:\n samplelist = sample_data.loc[sample_data[\"SPECIMEN_ID\"] == im[\"specimen_id\"]][\"_id\"].tolist()\n df = DataFile().get_records_by_fields({\"name\": im[\"name\"]})\n if (len(df) == 0):\n fields = {\"file_location\": im[\"file_name\"], \"name\": im[\"name\"]}\n df = DataFile().save_record({}, **fields)\n DataFile().insert_sample_ids(im[\"name\"], samplelist)\n else:\n orginallist = df[0][\"description\"][\"attributes\"][\"attach_samples\"][\"study_samples\"]\n resultlist = [sam for sam in samplelist if sam not in orginallist]\n DataFile().insert_sample_ids(im[\"name\"], resultlist)\n","repo_name":"collaborative-open-plant-omics/COPO-production","sub_path":"src/apps/copo_dtol_upload/utils/Dtol_Spreadsheet.py","file_name":"Dtol_Spreadsheet.py","file_ext":"py","file_size_in_byte":35347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"14659471273","text":"# Tee ohjelma, joka kysyy oppilaitten nimiä niin kauan kunnes käyttäjä antaa tyhjän syötteen. \r\n# Ohjelma kertoo tämän jälkeen montako nimeä annettiin ja näyttää ne yhtenä rivinä pilkulla erotettuna.\r\n\r\nLkm = 0\r\nYhteenlasku = \" \"\r\n\r\nwhile True:\r\n Nimi = str(input(\"Anna Nimiä, tyhjä kenttä lopettaa kysymyksen: \"))\r\n if not Nimi:\r\n break\r\n else:\r\n Nimi2 = str(Nimi)\r\n Lkm = Lkm + 1\r\n Yhteenlasku = (str(Yhteenlasku)) + Nimi2 + \",\"\r\nprint(\"Syötit nimiä\", Lkm, \"kappaletta\")\r\nprint(\"Nimien määrä:\",Yhteenlasku)","repo_name":"Kaltsucoding/Basics-of-Python-Programming","sub_path":"examples/11-debugger/L11T03.py","file_name":"L11T03.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"fi","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"54915210","text":"# -*- coding: UTF-8 -*-\nfrom __future__ import absolute_import, unicode_literals\n\nimport fnmatch\nimport os\nimport sys\nimport logging\n\nfrom six import iteritems\n\nfrom .util import ensure_iterable\n\nlogger = logging.getLogger(__name__)\n\n\nclass AppLoader(object):\n \"\"\"\n Import all subpackages/modules from the app's main directory.\n .. code-block::\n from flask_apploader import AppLoader\n\n def model_callback(modules):\n # Given /myapp/models/foo.py, outputs 'myapp.models.foo'\n for x in modules:\n print(x.__name__)\n\n app_loader = AppLoader(callbacks={'models': model_callback})\n\n def create_app():\n app = Flask(__name__)\n app_loader.init_app(app)\n \"\"\"\n def __init__(self, app=None, groups=None, callbacks=None, load_on_init=None):\n \"\"\"\n :param dict groups:\n Define groups of UNIX glob patterns (list or tuple) to be matched against module paths.\n Defaults to 'models' = '*model*' and 'views' = '*view*'.\n :param dict callbacks:\n Functions to call after groups are loaded.\n Callbacks will be passed a list of module objects that comprise the group.\n Keys should match those in `groups`.\n :param list load_on_init:\n Groups to be loaded during :meth:`init_app`.\n Defaults to ['models'].\n \"\"\"\n self.groups = groups or {\n 'models': ['*model*'],\n 'views': ['*view*']}\n self.callbacks = callbacks or {}\n self.load_on_init = load_on_init or ['models']\n\n if app is not None:\n self.init_app(app)\n\n def init_app(self, app):\n self.app = app\n self.find_modules()\n for x in self.load_on_init:\n self.load_group(x)\n\n def load_group(self, group):\n modules = []\n\n module_paths = self.grouped_module_paths.get(group, [])\n for module_path in module_paths:\n if module_path not in sys.modules:\n logger.debug(\"Importing module: %s\", module_path)\n __import__(module_path)\n modules.append(sys.modules[module_path])\n\n self._execute_callbacks(group, modules)\n return modules\n\n def _execute_callbacks(self, group, *args, **kwargs):\n for callback in ensure_iterable(self.callbacks.get(group, [])):\n callback(*args, **kwargs)\n\n def find_modules(self):\n \"\"\"\n Walk the app's dir and grab any modules that match the predefined patterns.\n \"\"\"\n # set up a place to hold the modules, using the same keys as the patterns\n self.grouped_module_paths = dict((x, []) for x in self.groups.keys())\n\n for directory, dirnames, filenames in os.walk(self.app.root_path):\n # /path/to/myapp/foo --> myapp/foo\n rel_dir = os.path.relpath(directory,\n os.path.join(self.app.root_path, '../')).replace('\\\\', '/')\n\n if rel_dir.endswith('__pycache__'):\n continue\n\n mod_base = rel_dir.replace('/', '.').strip('.')\n\n for mod_name in (v[:-3] for v in filenames if v.endswith('.py')):\n if mod_name == '__init__':\n mod_name = ''\n mod_path = '{}.{}'.format(mod_base, mod_name).strip('.')\n\n #print(directory, rel_dir, mod_base, mod_name, mod_path)\n for group, patterns in iteritems(self.groups):\n if any(fnmatch.fnmatch(mod_path, x) for x in patterns):\n self.grouped_module_paths[group].append(mod_path)\n","repo_name":"sycdan/flask-apploader","sub_path":"flask_apploader/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3609,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"32101493079","text":"import sys\n\nn = int(sys.stdin.readline())\ndata = []\n\nfor i in range(n):\n data.append(list(map(int, sys.stdin.readline().split())))\n\ndpmax = [data[0][0], data[0][1], data[0][2]]\ndpmin = [data[0][0], data[0][1], data[0][2]]\n\nfor i in range(1, n):\n dpmax = max(dpmax[0], dpmax[1]) + data[i][0], max(dpmax) + data[i][1], max(dpmax[1], dpmax[2]) + data[i][2]\n dpmin = min(dpmin[0], dpmin[1]) + data[i][0], min(dpmin) + data[i][1], min(dpmin[1], dpmin[2]) + data[i][2]\nprint(max(dpmax), min(dpmin))","repo_name":"LONGNEW/Problem_Solving","sub_path":"DP/BOJ 2096 내려가기.py","file_name":"BOJ 2096 내려가기.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"26151648854","text":"#!/usr/bin/env python3\n\"\"\"Contains a filtering function\"\"\"\n\nimport logging\nimport re\nfrom typing import List, Tuple\nimport os\nimport mysql.connector\n\nPII_FIELDS: Tuple = (\"name\", \"email\", \"phone\", \"ssn\", \"password\")\n\n\nclass RedactingFormatter(logging.Formatter):\n \"\"\" Redacting Formatter class\n \"\"\"\n\n REDACTION = \"***\"\n FORMAT = \"[HOLBERTON] %(name)s %(levelname)s %(asctime)-15s: %(message)s\"\n SEPARATOR = \";\"\n\n def __init__(self, fields: List[str]):\n super(RedactingFormatter, self).__init__(self.FORMAT)\n self.fields = fields\n\n def format(self, record: logging.LogRecord) -> str:\n \"\"\"\n filters incoming records\n :param record:\n :return:\n filtered result\n \"\"\"\n return filter_datum(self.fields, self.REDACTION,\n super().format(record), self.SEPARATOR)\n\n\ndef filter_datum(fields: List[str], redaction: str,\n message: str, separator: str) -> str:\n \"\"\"\n Obfuscates the sensitive fields in a log message.\n \"\"\"\n for field in fields:\n if field in message:\n message = re.sub(r\"{}=.*?{}\".format(field, separator),\n '{}={}{}'.format(field, redaction, separator),\n message)\n return message\n\n\ndef get_logger() -> logging.Logger:\n \"\"\"\n :return:\n logging.logger object\n \"\"\"\n logger = logging.getLogger(\"user_data\")\n logger.setLevel(logging.INFO)\n logger.propagate = False\n stream_handler = logging.StreamHandler()\n formatter = RedactingFormatter(fields=PII_FIELDS)\n stream_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n return logger\n\n\ndef get_db() -> mysql.connector.connection.MySQLConnection:\n \"\"\" returns a secured connection\"\"\"\n USERNAME: str = os.getenv(\"PERSONAL_DATA_DB_USERNAME\", \"root\")\n PASSWORD: str = os.getenv(\"PERSONAL_DATA_DB_PASSWORD\", \"\")\n HOST: str = os.getenv(\"PERSONAL_DATA_DB_HOST\", \"localhost\")\n DATABASE: str = os.getenv(\"PERSONAL_DATA_DB_NAME\")\n\n return mysql.connector.connect(\n host=HOST,\n user=USERNAME,\n password=PASSWORD,\n database=DATABASE\n )\n\n\ndef main() -> None:\n \"\"\" main function\"\"\"\n db = get_db()\n cursor = db.cursor()\n cursor.execute(\"SELECT * FROM users;\")\n fields = [description[0] for description in cursor.description]\n logger = get_logger()\n for row in cursor:\n row_data = dict(zip(fields, row))\n string = []\n s = RedactingFormatter.SEPARATOR\n for field, value in row_data.items():\n if field in PII_FIELDS:\n string.append(f\"{field}={RedactingFormatter.REDACTION}{s}\")\n elif field == \"last_login\":\n string.append(f\"{field}={value.isoformat()}{s}\")\n else:\n string.append(f\"{field}={value}{s}\")\n logger.info(f\" \".join(string))\n cursor.close()\n db.close()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"DaviesBrown/alx-backend-user-data","sub_path":"0x00-personal_data/filtered_logger.py","file_name":"filtered_logger.py","file_ext":"py","file_size_in_byte":2996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"15195432261","text":"import requests\nimport telegram\nfrom telegram.ext import Updater, MessageHandler, CommandHandler, Filters\nimport time\nfrom timer import Timer\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\n# date = '211117'\n# MovieName = '유체이탈자'\n# Theater = '2D'\ndate = '211114'\nMovieName = '이터널스'\nTheater = 'IMAX관'\n# Theater = '4DX'\nurl = \"http://www.cgv.co.kr/common/showtimes/iframeTheater.aspx?areacode=01&theatercode=0013&date=20\"+date\n\n# For Telegram\ndo_stop = False\ndef stop_command(update, context):\n update.message.repley_text('알람 그만할게~')\n \ntelegram_token = ''\nchat_id = ''\nimax_booking_bot = telegram.Bot(token = telegram_token)\n# updater = Updater(telegram_token, use_context = True)\n# stop_handler = CommandHandler('stop', stop_command)\n# updater.dispatcher.add_handler(stop_handler)\n\nif __name__ == \"__main__\":\n \n isFirst = True\n didyouget = False\n timer = Timer()\n prevtic = timer.get_tic()\n dt = 60*60\n\n now = datetime.now()\n nowDatetime = now.strftime('%Y-%m-%d %H:%M:%S')\n msg = '%s : 이제 예매표 본다..! : %s 일자 %s %s'%(nowDatetime, date, MovieName, Theater)\n print(msg)\n imax_booking_bot.sendMessage(chat_id=chat_id, text=msg)\n while True:\n if True:\n resp = requests.get(url)\n html = resp.text\n soup = BeautifulSoup(html, 'html.parser')\n lists = soup.select('.col-times')\n now = datetime.now()\n nowDatetime = now.strftime('%Y-%m-%d %H:%M:%S')\n for i in range(0,len(lists)):\n if MovieName in str(lists[i]) and Theater in str(lists[i]):\n if '준비중' in str(lists[i]):\n dt = 60*10\n msg = '%s : %s 일자 %s - %s - 준비중..!'%(nowDatetime, date, MovieName, Theater)\n else:\n dt = 1\n msg = '%s : %s 일자 %s - %s - 예매 ㄱㄱ!!'%(nowDatetime, date, MovieName, Theater)\n break\n else:\n msg = '%s : %s 일자 %s - %s - 아직 안뜸...ㅠ'%(nowDatetime, date, MovieName, Theater)\n # updater.start_polling(timeout=0, clean=True)\n # updater.idle()\n nowtic = timer.get_tic()\n if ((isFirst == True) | (nowtic - prevtic > dt)):\n isFirst = False\n # print('dt : ',nowtic - prevtic)\n prevtic = nowtic\n print(msg)\n imax_booking_bot.sendMessage(chat_id=chat_id, text=msg)\n\n time.sleep(3)","repo_name":"Kimbyung-wook/Garage","sub_path":"MovieBookingManager/bookingmanager.py","file_name":"bookingmanager.py","file_ext":"py","file_size_in_byte":2323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"43132225436","text":"# title : 특별한 정렬\n\ndef selectionSort(a, N):\n for i in range(N-1):\n minIdx = i\n for j in range(i+1, N):\n if a[minIdx] > a[j]:\n minIdx = j\n a[i], a[minIdx] = a[minIdx], a[i]\n return a\n\nT = int(input())\n\nfor t in range(1, T+1):\n n = int(input())\n data = list(map(int, input().split()))\n data = selectionSort(data, len(data))\n print(f'#{t}', end=' ')\n for i in range(5):\n print(data[-i-1], end=' ')\n print(data[i], end=' ')\n print()","repo_name":"jinugi214/sw-expert-academy","sub_path":"python_study/D3/4843.py","file_name":"4843.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"26040890865","text":"import smbus\nimport time\n\n\nclass CSPB:\n \"\"\" \n A class for i2c communication with the cluster system power board.\n \n Attributes\n ----------\n i2c_port: int\n the i2c bus or port number. \n address: int\n the i2c bus address of the cluster system power board.\n \n Methods\n -------\n set_power(value):\n Sends the power command value to the cluster system power board.\n \n shutdown(value):\n Sends the shutdown command value to the cluster system power board.\n \n signal_shutdown(value):\n Sends the shutdown signal only command value to the cluster system \n power board.\n \n read_register(register_number):\n returns the data for the specified register.\n \n write_register(register_number, value):\n writes the given data to the specified register.\n \n set_register_number(register_number):\n sets the register number for the read_register command.\n \n send_command(command):\n sends the command string over i2c to the cluster system power board.\n \n Author: Gerard L. Muir\n \"\"\"\n \n # Command character definitions.\n POWER = 0x21 # hex value for ! character\n SLOT_SHUTDOWN = 0x23 # hex value for # character\n SIGNAL_SHUTDOWN = 0x24 # hex value for $ character\n SET_REGISTER = 0x52 # hex value for R character\n WRITE_REGISTER = 0x57 # hex value for W character\n \n DEVICE_REG_MODE1 = 0x00\n \n # CSPB EEPROM register addresses.\n I2C_ADDR_RGSTR_ADDR = 0 # i2c boot up address\n SLT_PWR_UP_RGSTR_ADDR = 1 # slot power up on boot\n APP_DATA_RGSTR_ADDR = 2 # application data\n PWR_UP_DELAY = 3 # slot power up delay\n PWR_DWN_SGNL_DRTN_RGSTR_ADDR = 4 # power down signal duration\n SHTDWN_TIME_OUT_RGSTR_ADDR = 5 # slot shutdown timeout \n MNTR_LN_STTS_RGSTR_ADDR = 128 # slot monitor line status \n PWR_STTS_RGSTR_ADDR = 129 # slot power status\n IN_SHUTDOWN_RGSTR_ADDR = 130 # Slot(s) in shutdown status\n BUS_SETTLING_TIME = .005 # delay time to let i2c bus become available\n # between commands.\n\n\n def __init__(self, i2c_port, address):\n \"\"\"\n Construct all necessary attributes for the CSPB object.\n \n Attributes\n ----------\n i2c_port: int\n the i2c bus or port number. \n address: int\n the i2c bus address of the cluster system power board.\n \"\"\"\n \n self.i2c_port = i2c_port # 0 = /dev/i2c-0 (port I2C0),\n # 1 = /dev/i2c-1 (port I2C1)\n self.address = address # i2c buss address in hex form.\n self.bus = smbus.SMBus(i2c_port)\n \n \n def set_power(self, value):\n \"\"\"\n Sends the power command to the cluster system power board slots.\n \n The power register is a 4 bit value representing the power slots with\n the least significant bit representing the first power slot. \n \n Parameters\n ----------\n value : int (0-15)\n the power register value to write.\n\n Returns\n -------\n None\n\n \"\"\"\n \n cspb_command = [self.POWER, value]\n self.send_command(cspb_command)\n \n \n def shutdown(self, value):\n \"\"\"\n Sends the shutdown command to the cluster system power board slots.\n \n The shutdown register is a 4 bit value representing the power slots with\n the least significant bit representing the first power slot. \n \n Parameters\n ----------\n value : int (0-15)\n the shutdown command value to write.\n\n Returns\n -------\n None\n\n \"\"\"\n \n cspb_command = [self.SLOT_SHUTDOWN, value]\n self.send_command(cspb_command)\n \n \n def signal_shutdown(self, value):\n \"\"\"\n Sends the shutdown signal only command to the cluster system power\n board slots.\n \n The shutdown signal only register is a 4 bit value representing the \n power slots with the least significant bit representing the first power\n slot. \n \n Parameters\n ----------\n value : int (0-15)\n the signal shutdown command value to write.\n\n Returns\n -------\n None\n\n \"\"\"\n \n cspb_command = [self.SIGNAL_SHUTDOWN, value] \n self.send_command(cspb_command)\n \n \n def read_register(self, register_number):\n \"\"\"\n Sends the read register command to the cluster system power board.\n \n Parameters\n ----------\n register_number : int\n the register to read.\n\n Returns\n -------\n the value of the specified register.\n\n \"\"\"\n \n self.set_register_number(register_number)\n time.sleep(self.BUS_SETTLING_TIME)\n return (self.bus.read_byte(self.address))\n \n \n def write_register(self, register_number, value):\n \"\"\"\n Sends the write register command to the cluster system power board.\n \n Parameters\n ----------\n register_number : int\n the register to be written.\n value : int\n the value to be written. (0-255)\n\n Returns\n -------\n None\n \"\"\"\n \n cspb_command = [self.WRITE_REGISTER, register_number, value] \n self.send_command(cspb_command)\n \n \n def set_register_number(self, register_number):\n \"\"\"\n Sends the set register command to the cluster system power board.\n \n Parameters\n ----------\n register_number : int\n the register to be read by the read register command.\n\n Returns\n -------\n None\n\n \"\"\"\n \n cspb_command = [self.SET_REGISTER, register_number] \n self.send_command(cspb_command)\n \n \n def send_command(self, command):\n \"\"\"\n Sends the specified command string over the i2c bus to the cluster\n system power board.\n \n Parameters\n ----------\n command : string\n the command string to be sent.\n\n Returns\n -------\n None\n\n \"\"\"\n self.bus.write_i2c_block_data(self.address, self.DEVICE_REG_MODE1, command)\n \n \n# Print a test value if this class file is run by itself. This is a\n# small self test routine.\nif __name__ == '__main__':\n cspb = CSPB(1, 0x00)\n print(cspb.address)","repo_name":"jerry-muir/cspb","sub_path":"Software/CSPB_Driver/cspb/CSPB.py","file_name":"CSPB.py","file_ext":"py","file_size_in_byte":6566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"27639912874","text":"# Palindrome Permutation: Given a string, write a function to check if it is a permutation of a palindrome. A palindrome is a word or phrase that is the same forwards and backwards. A permutation\n# is a rearrangement of letters. The palindrome does not need to be limited to just dictionary words.\n# 1.5\n# 1.6\n# EXAMPLE\n# Input: Tact Coa\n# Output: True (permutations: \"taco cat\", \"atco eta\", etc.)\n\ndef generate_palindrome(string):\n # Count the number of occurrences of each character in the string\n char_counts = {}\n for char in string:\n if char in char_counts:\n char_counts[char] += 1\n else:\n char_counts[char] = 1\n\n # Construct the even-character string and the odd-character string (if it exists)\n even_chars = \"\"\n odd_char = None\n for char, count in char_counts.items():\n if count % 2 == 0:\n even_chars += char * (count // 2)\n else:\n if odd_char is not None:\n return None # More than one character occurs an odd number of times\n odd_char = char\n\n # Construct the palindrome\n palindrome = even_chars + (odd_char or \"\") + even_chars[::-1]\n return palindrome\n\n\nprint(generate_palindrome(\"taco cat\"))\n","repo_name":"Olayanju-1234/DSA","sub_path":"Arrays & Strings Python/PalindromePermutation.js.py","file_name":"PalindromePermutation.js.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"43934090388","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# @Time : 2019/1/11\r\n# @Author : Edwin\r\n# @Version : Python 3.6\r\n# @File : tasks.py\r\n\r\nfrom __future__ import absolute_import, unicode_literals\r\nfrom celery import shared_task\r\nfrom celery.task import periodic_task\r\nfrom ora_dual import database_method\r\nfrom ora_dual import models\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\nfrom fetch_knob_metric.fetch_mysql_metric_data import fetch_mysql_metric\r\nfrom fetch_knob_metric.JSONUtile import JSONUtil\r\nimport os,time,datetime,random\r\n\r\n@shared_task\r\ndef add(x, y):\r\n return x + y\r\n\r\n\r\n@shared_task\r\ndef mul(x, y):\r\n return x * y\r\n\r\n@shared_task\r\ndef test():\r\n return \"hello world\"\r\n\r\n\r\n@shared_task\r\ndef xsum(numbers):\r\n return sum(numbers)\r\n\r\n# @periodic_task(run_every=5, name=\"run_cmd\")\r\n# #@shared_task\r\n# def run_cmd(cmd=\"df -h\"):\r\n# cmd_out = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)\r\n# return cmd_out.stdout.read()\r\n# #print(result.get())\r\n\r\n@periodic_task(run_every = 3600,name = \"collect_diskspace\")\r\ndef collect_diskspace():\r\n\r\n conn = database_method.initial_connect('system', 'oracle', 'trn')\r\n conn = conn.create_conn()\r\n\r\n space_usage = \"\"\"\r\n SELECT \r\n to_char(sysdate,'yyyy-mm-dd') data_time,\r\n A.TABLESPACE_NAME tablespace_name,\r\n A.TOTAL_SPACE total,\r\n NVL(B.FREE_SPACE, 0) free,\r\n A.TOTAL_SPACE - NVL(B.FREE_SPACE, 0) used,\r\n CASE WHEN A.TOTAL_SPACE=0 THEN 0 ELSE trunc(NVL(B.FREE_SPACE, 0) / A.TOTAL_SPACE * 100, 2) END percent\r\n FROM (SELECT TABLESPACE_NAME, trunc(SUM(BYTES) / 1024 / 1024/1024 ,2) TOTAL_SPACE\r\n FROM DBA_DATA_FILES\r\n GROUP BY TABLESPACE_NAME) A,\r\n (SELECT TABLESPACE_NAME, trunc(SUM(BYTES / 1024 / 1024/1024 ),2) FREE_SPACE\r\n FROM DBA_FREE_SPACE\r\n GROUP BY TABLESPACE_NAME) B\r\n WHERE A.TABLESPACE_NAME = B.TABLESPACE_NAME(+)\r\n ORDER BY 5\r\n \"\"\"\r\n\r\n cursor = conn.cursor()\r\n cursor.execute(space_usage)\r\n usage = cursor.fetchall()\r\n usage_title = [i[0] for i in cursor.description]\r\n usage_data = pd.DataFrame(np.array(usage), columns=usage_title)\r\n print(usage_data)\r\n cursor.close()\r\n\r\n for index, row in usage_data.iterrows():\r\n\r\n # print(row['REDO'])\r\n # for ind in range(len(title)):\r\n # from datetime import datetime\r\n # snap_date=datetime.strptime(row['SNAP_DATE'], '%y/%m/%d').strftime('%Y-%m-%d')\r\n\r\n models.spaceusage.objects.create(\r\n collect_time = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H'),\r\n #data_time=datetime.datetime.strptime(row['DATA_TIME'], '%Y-%m-%d %H'),\r\n tablespace_name=row['TABLESPACE_NAME'],\r\n total=row['TOTAL'],\r\n free=row['FREE'],\r\n used=row['USED'],\r\n percent=row['PERCENT']\r\n )\r\n\r\n@periodic_task(run_every = 3600,name = \"collect_system_metric_period\")\r\ndef collect_system_metric_period():\r\n\r\n conn = database_method.initial_connect('system', 'oracle', 'trn')\r\n conn = conn.create_conn()\r\n system_metric = \"\"\"\r\n select begin_time,end_time,metric_name,metric_unit,average,standard_deviation,sum_squares from dba_hist_sysmetric_summary where to_char(begin_time,'yyyy-mm-dd hh24') = :var\r\n \"\"\"\r\n try:\r\n cursor = conn.cursor()\r\n var = datetime.datetime.strftime(datetime.datetime.now()+datetime.timedelta(hours=-2), '%Y-%m-%d %H')\r\n print(var)\r\n cursor.execute(system_metric,var=var)\r\n collect_system_metric = cursor.fetchall()\r\n collect_system_metric_title = [i[0] for i in cursor.description]\r\n collect_system_metric_data = pd.DataFrame(np.array(collect_system_metric), columns=collect_system_metric_title)\r\n print(collect_system_metric_data)\r\n cursor.close()\r\n\r\n for index, row in collect_system_metric_data.iterrows():\r\n models.system_metric_period.objects.create(\r\n collect_time= datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H'),\r\n begin_time= row['BEGIN_TIME'],\r\n end_time= row['END_TIME'],\r\n metric_name= row['METRIC_NAME'],\r\n data_value = row['METRIC_UNIT'],\r\n metric_average = row['AVERAGE'],\r\n metric_standard = row['STANDARD_DEVIATION'],\r\n metric_squares = row['SUM_SQUARES']\r\n )\r\n except Exception as msg:\r\n print(msg)\r\n\r\n@periodic_task(run_every = 3600,name = \"tablespace_channge\")\r\ndef tablespace_channge():\r\n\r\n conn = database_method.initial_connect('system', 'oracle', 'trn')\r\n conn = conn.create_conn()\r\n\r\n tablespace_change = \"\"\"\r\n select rtime,tablespace_name,tablespace_usedsize_kb,tablespace_size_kb,diff_kb from \r\n (with tmp as\r\n (select rtime,tablespace_name,\r\n sum(tablespace_usedsize_kb) tablespace_usedsize_kb,\r\n sum(tablespace_size_kb) tablespace_size_kb\r\n from (select rtime,\r\n e.tablespace_id,f.tablespace_name as tablespace_name,\r\n (e.tablespace_usedsize) * (f.block_size) / 1024 tablespace_usedsize_kb,\r\n (e.tablespace_size) * (f.block_size) / 1024 tablespace_size_kb\r\n from dba_hist_tbspc_space_usage e,\r\n dba_tablespaces f,\r\n v$tablespace g\r\n where e.tablespace_id = g.TS#\r\n and f.tablespace_name = g.NAME\r\n and f.tablespace_name in ('SOE')\r\n )\r\n group by rtime,tablespace_name)\r\n select tmp.rtime,tmp.tablespace_name,\r\n tablespace_usedsize_kb,\r\n tablespace_size_kb,\r\n (tablespace_usedsize_kb -\r\n LAG(tablespace_usedsize_kb, 1, NULL) OVER(ORDER BY tmp.rtime)) AS DIFF_KB\r\n from tmp,\r\n (select rtime rtime,tablespace_name\r\n from tmp\r\n group by rtime,tablespace_name) t2\r\n where t2.rtime = tmp.rtime and t2.tablespace_name=tmp.tablespace_name\r\n order by rtime)\r\n where to_char(to_date(rtime,'mm/dd/yyyy hh24:mi:ss'),'yyyy-mm-dd hh24') = :var\r\n \"\"\"\r\n try:\r\n cursor = conn.cursor()\r\n var = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H')\r\n cursor.execute(tablespace_change,var=var)\r\n tablespace_change_metric = cursor.fetchall()\r\n tablespace_change_metric_title = [i[0] for i in cursor.description]\r\n tablespace__change_metric_data = pd.DataFrame(np.array(tablespace_change_metric), columns=tablespace_change_metric_title)\r\n print(tablespace__change_metric_data)\r\n cursor.close()\r\n\r\n for index, row in tablespace__change_metric_data.iterrows():\r\n\r\n # print(row['REDO'])\r\n # for ind in range(len(title)):\r\n # from datetime import datetime\r\n # snap_date=datetime.strptime(row['SNAP_DATE'], '%y/%m/%d').strftime('%Y-%m-%d')\r\n\r\n models.spacechange.objects.create(\r\n collect_time = row['RTIME'],\r\n #data_time=datetime.datetime.strptime(row['DATA_TIME'], '%Y-%m-%d %H'),\r\n tablespace_name=row['TABLESPACE_NAME'],\r\n tablespace_usedsize_kb=row['TABLESPACE_USEDSIZE_KB'],\r\n tablespace_size_kb=row['TABLESPACE_SIZE_KB'],\r\n DIFF_KB=row['DIFF_KB']\r\n )\r\n except Exception as msg:\r\n print(msg)\r\n\r\n@periodic_task(run_every = 720,name = \"get_mysql_metric\")\r\ndef get_mysql_metric():\r\n\r\n metric_ = fetch_mysql_metric()\r\n # #before_time = time.strftime(\"%d_%m_%Y-%H_%M_%S\")\r\n begin_time = datetime.datetime.now()\r\n metric_.get_mysql_metric(\"before\",time.strftime(\"%d_%m_%Y-%H_%M_%S\"))\r\n os.chdir(\"/u01/tpcc-mysql\")\r\n\r\n\r\n #随机生成负载\r\n pereiod = random.randint(100, 500)\r\n warehouse_number = random.randint(1,100)\r\n conn_number = random.randint(20,50)\r\n\r\n os.system(\"./tpcc_start -h101.132.149.24 -P3306 -d tpccdb -u root -p Edwin703 -w \"+str(warehouse_number)+\" -c \"+str(conn_number)+\" -r 50 -l \"+str(pereiod))\r\n\r\n end_time = datetime.datetime.now()\r\n metric_ = fetch_mysql_metric()\r\n metric_.get_mysql_metric(\"after\",time.strftime(\"%d_%m_%Y-%H_%M_%S\"))\r\n\r\n os.chdir(\"/oracle_predict/\" + time.strftime(\"%d_%m_%Y\"))\r\n #filename = \"data\" + time.strftime(\"%d_%m_%Y-%H_%M_%S\") + \".summary\"\r\n filename = \"summary.json\"\r\n res_ = {\r\n \"start_time\": int(time.mktime(begin_time.timetuple())),\r\n \"end_time\": int(time.mktime(end_time.timetuple())),\r\n \"observation_time\": (end_time-begin_time).seconds,\r\n \"database_type\": \"mysql\",\r\n \"database_version\": \"5.7\",\r\n \"workload_name\": \"wk1\"\r\n }\r\n with open(filename, 'w') as file_obj:\r\n JSONUtil.dump(res_, file_obj)\r\n #return render(request, \"./node_modules/gentelella/production/fetch_metric_data_ok.html\",{'title':\"系统负载数据收集完成\",\"time\":time.strftime(\"%d_%m_%Y-%H_%M_%S\")})\r\n\r\n os.chdir(\"/oracle_predict/\" + time.strftime(\"%d_%m_%Y\"))\r\n os.system(\"python upload.py \" + \"/oracle_predict/\" + time.strftime(\r\n \"%d_%m_%Y\") + \" WL9FR3445C1UR9UCFA55 http://106.15.227.92:8080/new_result/\")","repo_name":"edwinjiang703/ruiqi_aiops","sub_path":"ora_dual/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":9521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"27579331549","text":"from scipy.io.wavfile import read\nimport soundfile as sf\nimport os\nfrom pathlib import Path\n\n'''\nCheck for corrupted wav data files as a pre-processing step for many speech tasks.\n'''\n\ndef get_all_file_paths_with_extension(data_dir, extension):\n if not os.path.isdir (data_dir):\n return []\n data_path = Path (data_dir)\n audio_paths = list (map (str, list (data_path.glob ('*' + extension))))\n return audio_paths\n\ndef check_audio_status(audio_paths, samplerate=44100, subtype='PCM_16', channels=1):\n defected_list = []\n corrupted_list = []\n valid_audio_paths = []\n for audio_path in audio_paths:\n try:\n _, _ = read (audio_path)\n ob = sf.SoundFile (audio_path)\n if ob.samplerate == samplerate and ob.channels == channels and ob.subtype == subtype:\n valid_audio_paths.append (audio_path)\n continue\n else:\n # print(f\"Defected {os.path.basename (audio_path)}, \" f\"sample rate: {ob.samplerate}, channels: {ob.channels}, subtype: {ob.subtype}\")\n defected_list.append (os.path.basename (audio_path))\n except Exception:\n corrupted_list.append (os.path.basename (audio_path))\n # print (defected_list)\n # print (corrupted_list)\n return defected_list + corrupted_list\n\ndef get_all_folders(dirname):\n subfolders = [f.path for f in os.scandir (dirname) if f.is_dir ()]\n all_folders = []\n for dirname in list (subfolders):\n new_subfolders = [f.path for f in os.scandir (dirname) if f.is_dir ()]\n for x in new_subfolders:\n all_folders.append (x)\n return all_folders\n\ndef check(download_folder):\n for folder in get_all_folders (download_folder):\n parsed_paths = get_all_file_paths_with_extension (folder, '.wav')\n corrupted_files_list = check_audio_status (parsed_paths)\n if check_audio_status (parsed_paths):\n folder = folder.split ('/')\n folder = os.path.join (folder[-1], folder[-2])\n print (folder)\n print (corrupted_files_list)\n\n\nif __name__ == '__main__':\n wav_files = '/ASR' # folder containing wav files\n check (wav_files)\n","repo_name":"ShihabYasin/ImportantCodes","sub_path":"speech-processing/wav-validation/validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"60"} +{"seq_id":"14744833049","text":"from layers import *\nfrom configs import *\n\ndef inference(patches, name='con7_deconv'):\n with tf.variable_scope(name):\n conv1 = conv2d(patches, 3, 3, 16, padding='VALID', name='conv1')\n lrelu1 = leaky_relu(conv1, name='leaky_relu1')\n conv2 = conv2d(lrelu1, 3, 3, 32, padding='VALID', name='conv2')\n lrelu2 = leaky_relu(conv2, name='leaky_relu2')\n conv3 = conv2d(lrelu2, 3, 3, 64, padding='VALID', name='conv3')\n lrelu3 = leaky_relu(conv3, name='leaky_relu3')\n conv4 = conv2d(lrelu3, 3, 3, 128, padding='VALID', name='conv4')\n lrelu4 = leaky_relu(conv4, name='leaky_relu4')\n conv5 = conv2d(lrelu4, 3, 3, 128, padding='VALID', name='conv5')\n lrelu5 = leaky_relu(conv5, name='leaky_relu5')\n conv6 = conv2d(lrelu5, 3, 3, 256, padding='VALID', name='conv6')\n lrelu6 = leaky_relu(conv6, name='leaky_relu6')\n\n rows = int(lrelu6.get_shape()[1])\n cols = int(lrelu6.get_shape()[2])\n channels = int(patches.get_shape()[3])\n # to avoid chessboard artifacts, the filter size must be dividable by the stride\n return deconv2d(lrelu6, 4, 4, [tf.shape(lrelu6)[0], rows*2, cols*2, channels], stride=(2, 2), name='deconv_out')\n\ndef loss(inferences, ground_truthes, huber_width=0.1, weights_decay=0, name='loss'):\n with tf.name_scope(name):\n slice_begin = (int(ground_truthes.get_shape()[1]) - int(inferences.get_shape()[1])) // 2\n slice_end = int(inferences.get_shape()[1]) + slice_begin\n delta = inferences - ground_truthes[:, slice_begin: slice_end, slice_begin: slice_end, :]\n\n delta *= [[[[0.11448, 0.58661, 0.29891]]]] # weights of B, G and R\n l2_loss = tf.pow(delta, 2)\n mse_loss = tf.reduce_mean(tf.reduce_sum(l2_loss, axis=[1, 2, 3]))\n\n if weights_decay > 0:\n weights = tf.get_collection('weights')\n reg_loss = weights_decay * tf.reduce_sum(\n tf.pack([tf.nn.l2_loss(i) for i in weights]), name='regularization_loss')\n tf.summary.scalar('loss', mse_loss + reg_loss)\n return mse_loss + reg_loss\n else:\n tf.summary.scalar('loss', mse_loss)\n return mse_loss\n","repo_name":"Ansore/super_resolution","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"36090526428","text":"# Jan 20 2020\n\n\"\"\"\n😎 The setUp and tearDown \n\n 1. setUp & tearDown will perform for every single test\n 2. testing sequence DO NOT necessarily run in the order you expect! 测试的顺序并非脚本顺序!\n 3. keep ALL of our test isolated from eachother 每个功能的测试必须独立,分治,自洽\n\n# 👀 cls和self之间只能二选一!不能同时继承!\n# recall: @classmethod -> working with the ENTIRE class rather than each instance of the class\n\n# 🎯 mocking: https://youtu.be/6tNS--WetLI?t=1720 ⏭\n\"\"\"\n\nimport unittest\nfrom employee import Employee\nfrom unittest.mock import patch\n\n\n###### With Prints ######\n\n\nclass TestEmployee(unittest.TestCase):\n \"\"\" Run classmethod before ANYTYING & run after EVERYTYING \"\"\"\n @classmethod\n def setUpClass(cls):\n print('Run setupClass!')\n print()\n\n @classmethod\n def tearDownClass(cls):\n print('Run teardownClass!')\n\n def setUp(self):\n print('setUp')\n self.emp_1 = Employee('Corey', 'Schafer', 50_000)\n self.emp_2 = Employee('Sue', 'Smith', 60_000)\n\n def tearDown(self):\n print('tearDown\\n')\n\n def test_email(self):\n print('test_email')\n self.assertEqual(self.emp_1.email, 'Corey.Schafer@google.com')\n self.assertEqual(self.emp_2.email, 'Sue.Smith@google.com')\n\n self.emp_1.first = 'John'\n self.emp_2.first = 'Jane'\n\n self.assertEqual(self.emp_1.email, 'John.Schafer@google.com')\n self.assertEqual(self.emp_2.email, 'Jane.Smith@google.com')\n\n def test_fullname(self):\n print('test_fullname')\n self.assertEqual(self.emp_1.fullname, 'Corey Schafer')\n self.assertEqual(self.emp_2.fullname, 'Sue Smith')\n\n self.emp_1.first = 'John'\n self.emp_2.first = 'Jane'\n\n self.assertEqual(self.emp_1.fullname, 'John Schafer')\n self.assertEqual(self.emp_2.fullname, 'Jane Smith')\n\n def test_raise_pay(self):\n print('test_raise_pay')\n self.emp_1.raise_pay()\n self.emp_2.raise_pay()\n\n self.assertEqual(self.emp_1.pay, 55000)\n self.assertEqual(self.emp_2.pay, 66000)\n\n ##### Mocking #####\n\n def test_monthly_schedule(self):\n with patch('employee.requests.get') as mocked_get:\n mocked_get.return_value.ok = True\n mocked_get.return_value.text = 'Success'\n\n schedule = self.emp_1.monthly_schedule('May')\n mocked_get.assert_called_with('http://company.com/Schafer/May')\n self.assertEqual(schedule, 'Success')\n\n mocked_get.return_value.ok = False\n\n schedule = self.emp_2.monthly_schedule('June')\n mocked_get.assert_called_with('http://company.com/Smith/June')\n self.assertEqual(schedule, 'Bad Response!')\n\n\nif __name__ == '__main__':\n unittest.main()\n\n\n###### setUpClass and tearDownClass ######\n","repo_name":"JosephZYU/Python-2-Intermediate","sub_path":"test_employee_4.mocking.py","file_name":"test_employee_4.mocking.py","file_ext":"py","file_size_in_byte":2876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"14664864725","text":"import cv2\r\nimport numpy as np\r\n#events=[i for i in dir(cv2) if 'EVENT' in i]\r\n#print(events)\r\ndef click_event(event,x,y,flags,param):\r\n if event==cv2.EVENT_LBUTTONDOWN:\r\n print(x,' , ',y)\r\n font=cv2.FONT_HERSHEY_SIMPLEX\r\n strXY=str(x)+' , '+str(y)\r\n cv2.putText(img,strXY,(x,y),font,0.6,(0,255,0),2)\r\n cv2.imshow('image',img)\r\n if event==cv2.EVENT_RBUTTONDOWN:\r\n blue=img[y,x,0]\r\n green=img[y,x,1]\r\n red=img[y,x,2]\r\n font=cv2.FONT_HERSHEY_SIMPLEX\r\n str_BGR=str(blue)+' , '+str(red)+' , '+str(green)\r\n cv2.putText(img,str_BGR,(x,y),font,0.4,(0,0,255),2)\r\n cv2.imshow('image',img)\r\n \r\n#img=np.zeros((512,512,3),np.uint8)\r\n#img=cv2.imread(r'C:\\Users\\apple.jpg')\r\nimg=cv2.imread(r'C:\\Users\\messi5.jpg')\r\ncv2.imshow('image',img)\r\ncv2.setMouseCallback('image',click_event)\r\nk=cv2.waitKey(0)\r\nif k==ord('a'):\r\n cv2.destroyAllWindows()\r\n","repo_name":"PAVAN1410/cv_mouse_events","sub_path":"mouse_event.py","file_name":"mouse_event.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"29263379054","text":"def validIPAddresses(string):\n IPaddress = []\n\n # For the first part [ . at 1st-3rd position]\n for i in range(1, min(len(string), 4)):\n currentIPaddress = ['', '', '', '']\n currentIPaddress[0] = string[:i] # First part of the current address\n\n if not isValidPart(currentIPaddress[0]):\n continue # If not a valid part then move the dot to the next position\n\n # If the first part is valid then look for the second part\n for j in range(i + 1, min(len(string), i + 4)):\n currentIPaddress[1] = string[i:j] # Second part of the current address\n\n if not isValidPart(currentIPaddress[1]):\n continue # If not a valid part then move the dot to the next position\n\n # If the second part is valid then look for the third part\n for k in range(j + 1, min(len(string), j + 4)):\n currentIPaddress[2] = string[j:k] # Third part of the current address\n currentIPaddress[3] = string[k::] # Fourth part of the current address\n\n if isValidPart(currentIPaddress[2]) and isValidPart(currentIPaddress[3]):\n IPaddress.append('.'.join(currentIPaddress))\n return IPaddress\n\n\ndef isValidPart(IPpart):\n IntIP = int(IPpart)\n if IntIP > 255:\n return False\n return len(IPpart) == len(str(IntIP)) # Check for leading 0s.\n\n\n\n\n","repo_name":"Riazul-Islam-Rifat/AlgoExpert","sub_path":"MEDIUM/Valid IP Address/Valid IP Address.py","file_name":"Valid IP Address.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"26285659279","text":"#-*- coding:utf-8 -*-\r\n'''\r\nCreated on 2017年3月22日\r\n\r\n@author: ning.lin\r\n'''\r\nimport os\r\n\r\nimport django\r\n\r\n\r\n#os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"mysite.settings\")\r\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\r\n\r\nif django.VERSION >= (1, 7):\r\n django.setup()\r\n\r\ndef main():\r\n print(BASE_DIR)\r\n from blog.models import Article\r\n #art=Article()\r\n f=open('import1.txt','r')\r\n \r\n for line in f:\r\n if not line.find(\"#\")==-1:#如果找到了#号才执行下面的,否则不执行\r\n title,content,pub_date,update_time=line.split('#')\r\n #ValueError: need more than 1 value to unpack\r\n #可以发现这句话里面并没有#号,而我们要#号进行拆分,本文是因为后面有多个空行\r\n print(title,content,pub_date,update_time)\r\n # art.title=title\r\n # art.content=content\r\n # art.pub_date=pub_date\r\n # art.update_time=update_time\r\n # art.save()\r\n Article.objects.create(title=title,content=content,pub_date=pub_date,update_time=update_time)\r\n #下面这条不会重复导入\r\n Article.objects.get_or_create(title=title,content=content,pub_date=pub_date,update_time=update_time)\r\n f.close()\r\n#使用Model.objects.bulk_create()方式\r\ndef b():\r\n from blog.models import Article\r\n f=open('import1.txt','r') \r\n Blog_List=[]\r\n for line in f:\r\n if not line.find(\"#\")==-1:\r\n title,content,pub_date,update_time=line.split('#')\r\n art=Article(title=title,content=content,pub_date=pub_date,update_time=update_time)\r\n Blog_List.append(art)\r\n print(Blog_List)\r\n #而bulk_create()是执行一条SQL存入多条数据,做会快很多!\r\n Article.objects.bulk_create(Blog_List)\r\n f.close()\r\n#用列表解析代替 for 循环会更快\r\ndef c():\r\n from blog.models import Article\r\n f=open('import1.txt','r') \r\n Blog_List=[]\r\n # 以下四行 也可以用 列表解析 写成下面这样\r\n # BlogList = [Blog(title=line.split('****')[0], content=line.split('****')[1]) for line in f]\r\n for line in f:\r\n if not line.find(\"#\")==-1:\r\n parts=line.split('#')\r\n art=Article(title=parts[0],content=parts[1],pub_date=parts[2],update_time=parts[3])\r\n Blog_List.append(art)\r\n print(Blog_List)\r\n #而bulk_create()是执行一条SQL存入多条数据,做会快很多!\r\n Article.objects.bulk_create(Blog_List)\r\n f.close()\r\nif __name__==\"__main__\":\r\n #main()\r\n #b()\r\n c()\r\n print(\"done\")\r\n ","repo_name":"lnytx/Django","sub_path":"blog/import_blog.py","file_name":"import_blog.py","file_ext":"py","file_size_in_byte":2673,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"37342524985","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nfrom scipy.optimize import curve_fit\n\nx_data = [1, 2, 3, 4, 5]\ny_data = [3, 9, 27, 48, 54]\nx_err = 0.5\ny_err = [1, 2, 3, 4, 5]\n\nplt.errorbar(x_data, y_data, xerr=x_err, yerr=y_err, fmt='o')\n\ndef func(x, a, b):\n return a*x + b\n\n\npopt, pcov = curve_fit(func, x_data, y_data)\nx_range = np.linspace(np.amin(x_data), np.amax(x_data), 100)\nplt.plot(x_range, func(x_range, *popt))\n\n\nplt.title(\"Example of a graph title\")\nplt.xlabel(\"An x-axis label\")\nplt.ylabel(\"A y-axis label using $LaTeX$ syntax\")\nplt.savefig(\"ch4.png\")\nplt.show()\n","repo_name":"wluckin/python_plotting","sub_path":"ch1.py","file_name":"ch1.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"588213179","text":"import os\nfrom bs4 import BeautifulSoup\nimport concurrent.futures\nfrom analyst_class import Analyst\nfrom executive_class import Executive\nimport linecache\nimport sys\nimport csv\nfrom time import sleep\nimport re\nimport html2text\nfrom unidecode import unidecode\n\n\ndef fix_codec():\n in_dir = 'raw_texts'\n out_dir = 'texts2'\n \n try:\n os.mkdir(out_dir)\n except Exception:\n pass\n \n current_files = os.listdir(in_dir)\n \n at_analysts = False\n at_start = False\n has_sep = False\n for item in current_files:\n file_name = item\n \n if file_name == '.DS_Store':\n pass\n \n else:\n with open(os.path.join(in_dir, file_name), 'r') as fin:\n raw_lines = fin.readlines()\n\n for line in raw_lines:\n if line.lower().strip().startswith('**analysts'):\n at_analysts = True\n if at_analysts == True and line.lower().strip().startswith('**'):\n at_start = True\n \n if at_start == False and ' - ' in line.lower().replace('–','-'):\n has_sep = True\n \n at_analysts = False\n at_start = False\n fixed_lines = []\n for line in raw_lines:\n if line.lower().strip().startswith('**analysts'):\n at_analysts = True\n if at_analysts == True and line.lower().strip().startswith('**'):\n at_start = True\n \n if has_sep == True:\n fixed_lines.append(unidecode(line).replace('–','-'))\n else:\n if at_start == False:\n fixed_lines.append(unidecode(line).replace('–','-').replace(',', ' - ', 1).replace(',',''))\n \n else:\n if line.lower().startswith('**'):\n fixed_lines.append(unidecode(line).replace('–','-').replace(',', ' - ', 1).replace(',',''))\n else:\n fixed_lines.append(unidecode(line).replace('–','-'))\n \n \n \n with open(os.path.join(out_dir, file_name), 'w') as fout:\n for line in fixed_lines:\n fout.write(line)\n \n \n \n# function to get the data from a transcript .txt file in order to determine if its complete and then determine output filename\ndef get_page_meta(lines):\n regex = r\"\\b[q][1-4]\\b\" # q[1-4]\n regex2 = r\"\\b[f][1-4][q][0-9]{2}\\b\" # f[1-4]q[xx]\n regex3 = r\"\\b[f][1-4][q][0-9]{4}\\b\" # f[1-4]q[xxxx]\n \n # initialize transcript metadata variables\n date = ''\n name = ''\n ticker = ''\n quarter = ''\n \n has_qa = False\n # remove the lines of the transcript text that are unnecessary/mess up formatting\n fixed_lines = []\n for line in lines:\n if line == '' or line == '\\n' or line.lower().strip() == 'earnings call transcript':\n pass\n else:\n fixed_lines.append(line.replace('\\n','').replace('–','-').strip())\n \n # iterate through each line in the text, grabbing desired data\n reached_date = False\n reached_about = False # company name and ticker\n has_qa = False\n got_quarter = False\n \n for i, line in enumerate(fixed_lines):\n \n # iterate through until we reach and pull out the date\n if line.lower().startswith('transcript') and reached_date == False:\n date_index = i + 2\n date = fixed_lines[date_index]\n \n #format for use in outfiles and spreadsheet\n date_parts = date.split()\n date = ' '.join(date_parts[0:3]).replace(',', '').replace('.', '')\n year = date.split()[-1]\n \n reached_date = True\n \n # iterate through until we reach and pull out the company name and ticker\n if line.lower().startswith('| about:') and reached_about == False:\n company_data = line.split(':')[1].strip()\n name = company_data.split('(')[0].strip()\n ticker = company_data.split('(')[1].strip().split(')')[0]\n reached_about = True\n \n # get the quarter of the call\n if reached_date == True and reached_about == True:\n generic_re = re.compile('{}|{}|{}'.format(regex, regex2, regex3)).findall(line.lower())\n if generic_re:\n quarter = generic_re[0]\n \n got_quarter = True\n \n if reached_date == True and reached_about == True and got_quarter == True:\n file_name = '{name}_{ticker}_{quarter}_{year}'.format(\n name = name,\n ticker = ticker,\n quarter = quarter,\n year = year,\n ).lower().replace(' ','').replace('.','').replace(',','').replace('/','').strip() + '.txt'\n \n \n \n # QUESSTION-AND-ANSWER\n if line.lower().startswith('**question-and-answer'):\n has_qa = True\n \n try: \n if has_qa == True:\n return file_name\n else:\n return None\n except:\n pass\n \n \n# convert scraped html pages to .txt\ndef scrape2text(file):\n archive = 'scrapes'\n out = 'textfiles'\n\n # work through file\n \n location = os.path.join(archive, file)\n \n # read in individual HTML file\n with open(location, 'r') as fin:\n text = fin.read()\n \n # convert individual HTML page to HTML2Text content\n text_maker = html2text.HTML2Text()\n text_maker.ignore_links = True\n text_maker.ignore_images = True\n text_maker.ignore_anchors = True\n text_maker.body_width = 0\n text = text_maker.handle(text)\n \n with open('temp.txt', 'w') as temp:\n temp.write(text)\n \n with open('temp.txt', 'r') as temp:\n temp_lines = temp.readlines()\n \n os.remove('temp.txt')\n \n file_name = get_page_meta(temp_lines)\n \n # write to .txt\n if file_name == None:\n pass\n else:\n fout = os.path.join(out, file_name)\n with open(fout, 'w') as fout:\n fout.write(text)\n\n\n# function to initialize the conversions from the scraped html pages to .txts\ndef init_conversion():\n archive = 'scrapes'\n html_files = os.listdir(archive)\n \n out = 'textfiles'\n try:\n os.mkdir(out)\n except Exception:\n pass\n \n with concurrent.futures.ProcessPoolExecutor(max_workers=None) as executor:\n future_to_file = {executor.submit(scrape2text, file): file for file in html_files}\n \n \n# process files\ndef analyze(filein):\n with open(filein, 'r') as fin:\n lines = fin.readlines()\n \n outputs = 'outputs'\n out_folder = 'YYYYYYYYYYYYYYYYYYYY'\n try:\n os.mkdir(os.path.join(outputs, out_folder))\n except Exception:\n pass\n \n regex = r\"\\b[q][1-4]\\b\" # q[1-4]\n regex2 = r\"\\b[f][1-4][q][0-9]{2}\\b\" # f[1-4]q[xx]\n regex3 = r\"\\b[f][1-4][q][0-9]{4}\\b\" # f[1-4]q[xxxx]\n \n # initialize transcript metadata variables\n date = ''\n name = ''\n ticker = ''\n quarter = ''\n year = ''\n \n # remove the lines of the transcript text that are unnecessary/mess up formatting\n fixed_lines = []\n for line in lines:\n if line == '' or line == '\\n' or line.lower().strip() == 'earnings call transcript':\n pass\n else:\n fixed_lines.append(line.replace('\\n','').replace('–','-').strip())\n \n # iterate through each line in the text, grabbing desired data\n reached_date = False\n reached_about = False # company name and ticker\n through_execs = False\n at_execs = False\n through_analysts = False\n at_analysts = False\n at_qa = False\n got_quarter = False\n \n executives_list = []\n executives = []\n analysts_list = []\n analysts = []\n \n analysts_order = []\n current_speaker = ''\n previous_speaker = ''\n for i, line in enumerate(fixed_lines):\n \n # iterate through until we reach and pull out the date\n if line.lower().startswith('transcript') and reached_date == False:\n date_index = i + 2\n date = fixed_lines[date_index]\n \n #format for use in outfiles and spreadsheet\n date_parts = date.split()\n date = ' '.join(date_parts[0:3]).replace(',', '').replace('.', '')\n year = date.split()[-1]\n \n reached_date = True\n \n # iterate through until we reach and pull out the company name and ticker\n if line.lower().startswith('| about:') and reached_about == False:\n company_data = line.split(':')[1].strip()\n name = company_data.split('(')[0].strip()\n ticker = company_data.split('(')[1].strip().split(')')[0]\n reached_about = True\n \n # get the quarter of the call\n if reached_date == True and reached_about == True and got_quarter == False:\n generic_re = re.compile('{}|{}|{}'.format(regex, regex2, regex3)).findall(line.lower())\n if generic_re:\n quarter = generic_re[0]\n \n got_quarter = True\n \n if reached_date == True and reached_about == True and got_quarter == True:\n out_folder = '{ticker}_{quarter}_{year}'.format(\n ticker = ticker,\n quarter = quarter,\n year = year\n ).lower()\n \n \n \n try:\n os.mkdir(os.path.join(outputs, out_folder))\n except Exception:\n pass\n \n \n \n \n # EXECUTIVES\n # EXECUTIVES\n # EXECUTIVES\n # EXECUTIVES\n # EXECUTIVES\n if line.lower().startswith('**executives'):\n at_execs = True\n # get the executives\n if through_execs == False and at_execs == True:\n if line.lower().startswith('**analysts'):\n through_execs = True\n \n else:\n if ' - ' in line:\n executive = line.lower().split(' - ')\n exec_name = executive[0].strip()\n job = executive[1].strip()\n temp = Executive()\n temp.name = exec_name\n temp.job = job\n output_string = '{exec_name}_{quarter}_{year}_{ticker}_{role}'.format(\n exec_name = exec_name.strip(),\n quarter = quarter.strip(),\n year = year.strip(),\n ticker = ticker.strip(),\n role = job.replace(' ', '').replace(',', '-')\n ).replace('/','-')[:230] + '.txt'\n temp.outfile = os.path.join(out_folder, output_string)\n executives_list.append(temp)\n executives.append(exec_name)\n \n else:\n if(line.lower().startswith('**executives')) or '**' in line:\n pass\n else:\n executive = line.lower().strip()\n exec_name = executive\n job = 'undefined'\n temp = Executive()\n temp.name = exec_name\n temp.job = job\n output_string = '{exec_name}_{quarter}_{year}_{ticker}_{role}'.format(\n exec_name = exec_name.strip(),\n quarter = quarter.strip(),\n year = year.strip(),\n ticker = ticker.strip(),\n role = job.replace(' ', '').replace(',', '-')\n ).replace('/','-')[:230] + '.txt'\n temp.outfile = os.path.join(out_folder, output_string)\n executives_list.append(temp)\n executives.append(exec_name)\n \n # ANALYSTS\n # ANALYSTS\n # ANALYSTS\n # ANALYSTS\n # ANALYSTS\n if line.lower().startswith('**analysts'):\n at_analysts = True\n # get the analysts\n if through_analysts == False and at_analysts == True:\n if line.lower().startswith('**presentation') or line.lower().startswith('**operator') or (line.lower().startswith('**') and not 'analysts' in line.lower()):\n through_analysts = True\n \n else:\n if ' - ' in line:\n analyst = line.lower().split(' - ')\n analyst_name = analyst[0].strip()\n job = analyst[1].strip()\n temp = Analyst()\n temp.name = analyst_name\n temp.company = job\n output_string = '{analyst_name}_{firm}_{quarter}_{year}_{ticker}_{role}'.format(\n analyst_name = analyst_name,\n firm = job.replace(',','').replace('.','').upper().strip(),\n quarter = quarter.strip(),\n year = year.strip(),\n ticker = ticker.strip(),\n role = 'analyst'\n ).replace('/','-')[:230] + '.txt'\n temp.outfile = os.path.join(out_folder, output_string)\n analysts_list.append(temp)\n analysts.append(analyst_name)\n \n else:\n if(line.lower().startswith('**analysts')) or '**' in line:\n pass\n else:\n analyst = line.lower().strip()\n analyst_name = analyst\n job = 'undefined'\n temp = Analyst()\n temp.name = analyst_name\n temp.company = job\n output_string = '{analyst_name}_{firm}_{quarter}_{year}_{ticker}_{role}'.format(\n analyst_name = analyst_name,\n firm = job.replace(',','').replace('.','').upper(),\n quarter = quarter.strip(),\n year = year.strip(),\n ticker = ticker.strip(),\n role = 'analyst'\n ).replace('/','-')[:230] + '.txt'\n temp.outfile = os.path.join(out_folder, output_string)\n analysts_list.append(temp)\n analysts.append(analyst_name)\n \n # QUESSTION-AND-ANSWER\n # QUESSTION-AND-ANSWER\n # QUESSTION-AND-ANSWER\n # QUESSTION-AND-ANSWER\n # QUESSTION-AND-ANSWER\n if line.lower().startswith('**question-and-answer'):\n at_qa = True\n # get q&a data\n if at_qa == True:\n # this part gets the speaker\n if line.startswith('**'):\n current_name = ''\n if ' - ' in line:\n current_name = line.lower().split(' - ')[0].replace('**','')\n previous_speaker = current_speaker[:]\n current_speaker = current_name\n else:\n current_name = line.lower().replace('**','')\n previous_speaker = current_speaker[:]\n current_speaker = current_name\n \n # test\n #if any(current_speaker in name for name in analysts) or any(current_speaker in name for name in executives):\n #print(current_speaker) \n \n # this part handles the actual spoken words \n else:\n # handle words from executives\n if any(current_speaker in name for name in executives):\n for executive in executives_list:\n if current_speaker == executive.name:\n with open(os.path.join(outputs, executive.outfile), 'a') as fout:\n fout.write(line + '\\n')\n \n if fixed_lines[i-1].startswith('**'):\n executive.total_dialogues += 1\n executive.total_words += len(line.split())\n \n for analyst in analysts_list:\n if analyst.name == previous_speaker:\n current_words = line.split()\n num_current_words = len(current_words)\n \n if 'chief executive officer' in str(executive.job) or 'ceo' in str(executive.job):\n analyst.total_ceo_words += num_current_words\n if fixed_lines[i-1].startswith('**'):\n analyst.ceo_dialogues += 1\n elif 'chief financial officer' in str(executive.job) or 'cfo' in str(executive.job):\n analyst.total_cfo_words += num_current_words\n if fixed_lines[i-1].startswith('**'):\n analyst.cfo_dialogues += 1\n elif 'chief operating officer' in str(executive.job) or 'coo' in str(executive.job):\n analyst.total_coo_words += num_current_words\n if fixed_lines[i-1].startswith('**'):\n analyst.coo_dialogues += 1\n \n # handle words from analysts\n elif any(current_speaker in name for name in analysts):\n for analyst in analysts_list:\n if current_speaker == analyst.name:\n with open(os.path.join(outputs, analyst.outfile), 'a') as fout:\n fout.write(line + '\\n')\n \n if fixed_lines[i-1].startswith('**'):\n analyst.total_dialogues += 1\n analyst.total_words += len(line.split())\n \n if current_speaker not in analysts_order:\n analysts_order.append(current_speaker)\n \n \n # else, should be operator text [unknown analyst/executive]\n else:\n if current_speaker == 'operator':\n pass\n elif 'unknown' in current_speaker or 'undefined' in current_speaker:\n text_name = 'unknown.txt'\n file_out = os.path.join(outputs, out_folder, text_name)\n with open(file_out, 'a') as fout:\n fout.write(line + '\\n')\n else:\n pass\n \n \n # set analyst speaking order for each analyst object\n for analyst in analysts_list:\n for person in analysts_order:\n if analyst.name == person:\n analyst.order_in_queue = analysts_order.index(person) + 1\n \n \n \n # output to csv\n output_to_csv = []\n header_row = ['Name', 'Position', 'Firm', 'Quarter', 'Ticker', 'Date', 'Analyst', 'Order of Question',\n 'Number of Analysts', 'Number of Dialogues',\n 'Number of CEO Dialogues', 'Number of CEO Words', 'Number of CFO Dialogues',\n 'Number of CFO Words', 'Number of COO Dialogues', 'Number of COO Words', 'Total Words',\n 'Text']\n output_to_csv.append(header_row)\n \n for executive in executives_list:\n if executive.total_words == 0:\n executive.outfile = 'na/none'\n new_row = [executive.name, executive.job, ticker, quarter, ticker, date,\n 0, 'N/A', len(analysts_list), executive.total_dialogues, 'N/A',\n 'N/A', 'N/A', 'N/A', 'N/A', 'N/A', executive.total_words, executive.outfile.split('/')[1]]\n output_to_csv.append(new_row)\n \n for analyst in analysts_list:\n if analyst.total_words == 0:\n analyst.outfile = 'na/none'\n new_row = [analyst.name, 'analyst', analyst.company.upper(), quarter, ticker, date, 1,\n analyst.order_in_queue, len(analysts_list), analyst.total_dialogues,\n analyst.ceo_dialogues, analyst.total_ceo_words, analyst.cfo_dialogues,\n analyst.total_cfo_words, analyst.coo_dialogues, analyst.total_coo_words, analyst.total_words,\n analyst.outfile.split('/')[1]]\n output_to_csv.append(new_row)\n\n out_dir = os.path.join(outputs, out_folder)\n file_name = '{name}_{quarter}_{year}.csv'.format(\n name = name,\n quarter = quarter,\n year = year\n ).replace(' ', '').replace('/', '').replace(',','').strip()\n save_path = os.path.join(out_dir, file_name)\n \n with open(save_path, 'w') as f:\n writer = csv.writer(f)\n writer.writerows(output_to_csv)\n \n print('success')\n \n \n \ndef main():\n # read-in transcript call text\n #archive = 'fixed_texts'\n archive = 'fixed_texts'\n files = os.listdir(archive)\n \n try:\n os.mkdir('outputs')\n except Exception:\n pass\n \n for file in files:\n filein = os.path.join(archive, file)\n \n analyze(filein)\n\ndef count(): \n cpt = sum([len(files) for r, d, files in os.walk('expanded')])\n print(cpt)\n\n\ndef count_broke():\n bad_dir = os.path.join('outputs', 'YYYYYYYYYYYYYYYYYYYY')\n print(len(os.listdir(bad_dir)))\nmain()\n#count_broke()\n\n\n\n\n ","repo_name":"johnurb/seekingalpha_transcript_extractor","sub_path":"output.py","file_name":"output.py","file_ext":"py","file_size_in_byte":22472,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"60"} +{"seq_id":"12186414392","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nimport xlwt\r\nimport pandas as pd\r\n\r\n\r\ndef ResultNFKB(gene):\r\n string = gene + ' ' + 'NF-'\r\n\r\n param = {\r\n 'term':string,\r\n }\r\n url = requests.get(url='https://pubmed.ncbi.nlm.nih.gov/',params=param)\r\n\r\n demo = url.text\r\n\r\n soup = BeautifulSoup(demo,'lxml')\r\n\r\n result = soup.select('.results-amount .value')\r\n\r\n if result:\r\n for i in result:\r\n return int(i.get_text())\r\n\r\n #只有一个结果的搜索返回界面不一样\r\n elif soup.select('.single-result-redirect-message'):\r\n return 1\r\n else:\r\n return 0\r\n\r\n\r\n\r\ndef ResultTAU(gene):\r\n string = gene + ' ' + 'TAU'\r\n\r\n param = {\r\n 'term':string,\r\n }\r\n url = requests.get(url='https://pubmed.ncbi.nlm.nih.gov/',params=param)\r\n\r\n demo = url.text\r\n\r\n soup = BeautifulSoup(demo,'lxml')\r\n\r\n result = soup.select('.results-amount .value')\r\n\r\n if result:\r\n for i in result:\r\n return int(i.get_text())\r\n\r\n # 只有一个结果的搜索返回界面不一样\r\n elif soup.select('.single-result-redirect-message'):\r\n return 1\r\n else:\r\n return 0\r\n\r\n\r\ndef ResultUBI(gene):\r\n string = gene + ' ' + 'UBIQUITIN'\r\n\r\n param = {\r\n 'term':string,\r\n }\r\n url = requests.get(url='https://pubmed.ncbi.nlm.nih.gov/',params=param)\r\n\r\n demo = url.text\r\n\r\n soup = BeautifulSoup(demo,'lxml')\r\n\r\n result = soup.select('.results-amount .value')\r\n\r\n if result:\r\n for i in result:\r\n return int(i.get_text())\r\n\r\n # 只有一个结果的搜索返回界面不一样\r\n elif soup.select('.single-result-redirect-message'):\r\n return 1\r\n else:\r\n return 0\r\n\r\ndef ResultBAG2(gene):\r\n string = gene + ' ' + 'BAG2'\r\n\r\n param = {\r\n 'term':string,\r\n }\r\n url = requests.get(url='https://pubmed.ncbi.nlm.nih.gov/',params=param)\r\n\r\n demo = url.text\r\n\r\n soup = BeautifulSoup(demo,'lxml')\r\n\r\n result = soup.select('.results-amount .value')\r\n\r\n if result:\r\n for i in result:\r\n return int(i.get_text())\r\n\r\n # 只有一个结果的搜索返回界面不一样\r\n elif soup.select('.single-result-redirect-message'):\r\n return 1\r\n else:\r\n return 0\r\n#输入一个芯片名,返回该基因名\r\ndef GeneName(microName):\r\n\r\n string = microName\r\n\r\n param = {\r\n 'term':string,\r\n }\r\n\r\n url = requests.get(url='https://www.ncbi.nlm.nih.gov/geoprofiles/', params=param)\r\n\r\n demo = url.text\r\n\r\n soup = BeautifulSoup(demo, 'lxml')\r\n\r\n result = soup.select('.rsltcont .title')\r\n\r\n for i in result:\r\n i = i.get_text().split('-')\r\n return i[0]\r\n\r\n\r\n#基因名转换为微阵列芯片名 geneList为包含基因名的列表\r\ndef MicroName(geneList):\r\n path = '/Users/guowenbo/Desktop/OneDrive/生物信息/BioTech/hgu133a.xlsx'\r\n data = pd.read_excel(path,sheet_name='Sheet1')\r\n list1 = []\r\n\r\n #判断输入数据是否为list\r\n if isinstance(geneList,list):\r\n for i in geneList:\r\n name = data.loc[data['SYMBOL'].isin([i])]\r\n name = name.iloc[:, 0:1].values\r\n for name in name:\r\n name = str(name)\r\n name = name.replace(\"'\", '')\r\n name = name.replace(\"[\", '')\r\n name = name.replace(\"]\", '')\r\n list1.append(name)\r\n else:\r\n name = data.loc[data['SYMBOL'].isin([geneList])]\r\n name = name.iloc[:, 0:1].values\r\n\r\n for name in name:\r\n name = str(name)\r\n name = name.replace(\"'\", '')\r\n name = name.replace(\"[\", '')\r\n name = name.replace(\"]\", '')\r\n list1.append(name)\r\n\r\n return list1\r\n\r\nif __name__ == '__main__':\r\n\r\n geneList = ['SMC4','CFHR1','CFH','ZDHHC4','TMA16','TAF1A','FUBP1','USP27X','TMPO']\r\n\r\n nfkbDic = {}\r\n tauDic = {}\r\n ubiquitinDic = {}\r\n bag2Dic = {}\r\n # HSP70 = ['HSPA6_1']\r\n # microName = MicroName(HSP70)\r\n # print(microName)\r\n\r\n\r\n\r\n for i in geneList:\r\n nfkbDic[i] = ResultNFKB(i)\r\n tauDic[i] = ResultTAU(i)\r\n ubiquitinDic[i] = ResultUBI(i)\r\n bag2Dic[i] = ResultBAG2(i)\r\n\r\n #写入xlsx\r\n pathwayList=['ID_REF','NF-KB','TAU','ubiquitin','BAG2']\r\n workbook = xlwt.Workbook(encoding='utf-8')\r\n worksheet = workbook.add_sheet('sheet1')\r\n\r\n #写入基因名\r\n b = 1\r\n for i in geneList:\r\n worksheet.write(b, 0, i)\r\n b = b + 1\r\n\r\n #写入标题栏\r\n a = 1\r\n for i in pathwayList:\r\n worksheet.write(0, a, i)\r\n a = a + 1\r\n\r\n #写入数据\r\n for j in range(len(geneList)):\r\n name = geneList[j]\r\n j = j + 1\r\n # worksheet.write(j,1,microName[j-1]) #写入ID_REF名\r\n worksheet.write(j,2,nfkbDic[name])\r\n worksheet.write(j, 3, tauDic[name])\r\n worksheet.write(j, 4, ubiquitinDic[name])\r\n worksheet.write(j, 5, bag2Dic[name])\r\n\r\n workbook.save('ADgene.xls') # 保存文件\r\n\r\n print('nfkb is',nfkbDic)\r\n print('TAU IS',tauDic)\r\n print('ubiquitin IS', ubiquitinDic)\r\n\r\n # retau = ['203928_x_at','203929_s_at','203930_s_at','206401_s_at','212901_s_at','212905_at','213922_at','216821_at']\r\n # list = []\r\n # for i in retau:\r\n # list.append(GeneName(i).strip())\r\n # print(list)\r\n\r\n","repo_name":"guowenbo1/gse1297","sub_path":"spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":5347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"29522170509","text":"def run():\r\n '''\r\n Reto 47: Crea una lista de 5 numeros pares usando la\r\n función range() e imprime el resultado\r\n '''\r\n par_list = list(range(2, 12, 2))\r\n\r\n print(par_list)\r\n\r\nif __name__ == '__main__':\r\n run()","repo_name":"HaroldRoy/100-Days-of-Python","sub_path":"47_list_range.py","file_name":"47_list_range.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"27399313966","text":"import pickle\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport collections\r\nfrom sklearn.preprocessing import KBinsDiscretizer\r\ndef main():\r\n a = \"DataProcessDiscretization.txt\"\r\n \r\n df = pd.read_pickle('dataframeWithTrajectories.pkl')\r\n #maxva= df['Frame'].max()\r\n #print(df.iloc[260,:])\r\n \r\n suma = sum(df['Frame'].values)/len(df)\r\n print(\"Frame Averange: \",suma)\r\n\r\n North = 0\r\n East = 0\r\n West = 0\r\n South = 0\r\n NorthEast = 0\r\n NorthWest = 0\r\n SouthEast = 0\r\n SouthWest = 0\r\n ArraySpeeds = []\r\n Arrayheight = []\r\n ArrayId = []\r\n speeds =[]\r\n ArrayVideo = []\r\n df1 = df['Frame'].values\r\n df = df['Frame'].values\r\n df = df[df > 50]\r\n print(df)\r\n \r\n if len(df)%2 != 0: \r\n df = df[:-1]\r\n df = df.reshape((int(len(df)/2),2))\r\n est = KBinsDiscretizer(n_bins=10, encode='ordinal', strategy='uniform')\r\n est.fit(df)\r\n b = est.transform(df)\r\n df = b.reshape(-1,1)\r\n dfi = est.bin_edges_[1]\r\n dfIntervals = []\r\n for i in dfi:\r\n dfIntervals.append('{:.4f}'.format(i))\r\n \r\n with open (a, 'rb') as fp:\r\n itemlist = pickle.load(fp) \r\n for t in itemlist:\r\n\r\n for i in t[1]:\r\n speeds =[]\r\n for j in i[1]:\r\n for x in j[1]:\r\n if x[1] == \"North\":\r\n North = North +1\r\n elif x[1] == \"East\":\r\n East = East +1\r\n elif x[1] == \"West\":\r\n West = West +1\r\n elif x[1] == \"South\":\r\n South = South+1\r\n elif x[1] == \"NorthEast\":\r\n NorthEast = NorthEast +1\r\n elif x[1] == \"NorthWest\":\r\n NorthWest = NorthWest +1\r\n elif x[1] == \"SouthEast\":\r\n SouthEast = SouthEast+1\r\n elif x[1] == \"SouthWest\":\r\n SouthWest = SouthWest+1\r\n Arrayheight.append(int(x[2]))\r\n ArraySpeeds.append(int(x[3]))\r\n \r\n \r\n ArrayId.append((i[0],speeds))\r\n ArrayVideo.append((t[0],ArrayId))\r\n # print(ArrayVideo)\r\n #print(North, East,West,South ,NorthEast ,NorthWest ,SouthEast ,SouthWest)\r\n a = [North, East,West,South ,NorthEast ,NorthWest ,SouthEast ,SouthWest]\r\n speeds = collections.Counter(ArraySpeeds) \r\n heigh = collections.Counter(Arrayheight) \r\n #print(speeds)\r\n #print(heigh)\r\n Total = sum(a)\r\n Labels =['North', 'East','West','South' ,'NorthEast','NorthWest' ,'SouthEast' ,'SouthWest'] \r\n y_pos = np.arange(len(Labels))\r\n plt.bar(y_pos, a, color = (0.5,0.1,0.5,0.6))\r\n\r\n plt.title('Cardinal direction')\r\n plt.xlabel('Cardinals')\r\n plt.ylabel('Times')\r\n plt.grid(axis='y', alpha=0.75)\r\n plt.xticks(y_pos, Labels)\r\n plt.show()\r\n\r\n \r\n intervalos = [0,1,2,3,4,5,6,7,8,9,10,11]\r\n \r\n for i in intervalos:\r\n if i < len(dfIntervals):\r\n print(dfIntervals[i], i)\r\n plt.hist(x=df, bins=intervalos, color='#F2AB6D', rwidth=0.85)\r\n plt.title('Frames Histogram')\r\n plt.xlabel('Frames')\r\n plt.ylabel('Times')\r\n plt.grid(axis='y', alpha=0.75)\r\n plt.xticks(intervalos) \r\n plt.show()\r\n\r\n\r\n\r\n intervalos = [0,1,2,3,4,5,6,7,8,9,10,11,12,13]\r\n print(len(ArraySpeeds))\r\n plt.hist(x=ArraySpeeds, bins=intervalos, color='#F2AB6D', rwidth=0.85)\r\n plt.title('Speed Histogram')\r\n plt.xlabel('Speed')\r\n plt.ylabel('Times')\r\n plt.grid(axis='y', alpha=0.75)\r\n plt.xticks(intervalos) \r\n plt.show()\r\n\r\n intervalos = [0,1,2,3,4,5,6,7,8,9,10,11,12,13]\r\n print(len(Arrayheight))\r\n plt.hist(x=Arrayheight, bins=intervalos, color='#F2AB6D', rwidth=0.85)\r\n plt.title('Height Histogram')\r\n plt.xlabel('Height')\r\n plt.ylabel('Times')\r\n plt.grid(axis='y', alpha=0.75)\r\n plt.xticks(intervalos) \r\n plt.show()\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"InDeX696/Detection-of-participants-in-sporting-events","sub_path":"Scripts/HistogramMaker.py","file_name":"HistogramMaker.py","file_ext":"py","file_size_in_byte":4153,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"60"} +{"seq_id":"7884415688","text":"from adhrit.adhrit import main\nimport configparser\nimport os\nimport re\nimport r2pipe\nimport base64\nimport glob\nfrom colorama import Fore, Style\nimport concurrent.futures\nfrom adhrit.recons.dbaccess import dbconnection, create_secrets_table, insert_secretstable, insert_statustable\ndecode_str_list = []\n\ndef isBase64(s):\n try:\n return base64.b64encode(base64.b64decode(s)) == s\n except Exception:\n return False\n\ndef lib_pwn():\n\tn = 0\n\tif os.path.exists('Bytecode'):\n\t\tbinfilepath = glob.glob('Bytecode/lib/arme*')\n\t\ttry:\n\t\t\tgotbinfilepath = binfilepath[0]\n\t\texcept IndexError:\n\t\t\tgotbinfilepath = 'none'\n\t\tif os.path.exists(gotbinfilepath):\n\n\t\t\tbinfiles = glob.glob(str(gotbinfilepath) + '/*.so')\n\n\t\t\tfor thelibfile in binfiles:\n\t\t\t\tprint(Fore.GREEN + \"\\n\\n[INFO]\" + Fore.BLUE + \" Analyzing \" + Fore.GREEN + thelibfile)\n\n\t\t\t\tr = r2pipe.open(thelibfile)\n\t\t\t\t\n\t\t\t\tprint(Fore.GREEN + \"\\n[INFO] \" + Fore.BLUE + \"Seaching for AES keys\")\n\t\t\t\tprint(Fore.YELLOW)\n\t\t\t\tprint(\"\\t\") \n\t\t\t\taeskeys = r.cmd('/ca')\n\t\t\t\tif aeskeys == '':\n\t\t\t\t\tprint(Fore.GREEN + \"\\t[!] \" + Fore.YELLOW + \"No AES Keys found\\n\")\n\t\t\t\telse:\n\t\t\t\t\tprint(Fore.YELLOW + aeskeys + \"\\n\")\n\t\t\t\t\ttmp = f\"AES key(s) found are : {aeskeys}\"\n\t\t\t\t\tdecode_str_list.append(tmp)\n\n\t\t\t\tprint(Fore.GREEN + \"\\n[INFO] \" + Fore.BLUE + \"All Strings\\n\")\n\t\t\t\tallstrings = r.cmdj('rabin2 -z -j ' + thelibfile)\n\n\t\t\t\tprint(Fore.YELLOW)\n\t\t\t\tif allstrings != None:\n\t\t\t\t\tfor val_list in allstrings.values():\n\t\t\t\t\t\tfor json_data in val_list:\n\t\t\t\t\t\t\tfor key, value in json_data.items():\n\t\t\t\t\t\t\t\tif key == 'type' and value == 'utf32le':\n\t\t\t\t\t\t\t\t\tdecode_str_list.append(json_data['string'])\n\n\treturn decode_str_list\n\n\n\ndef url_scanner():\n\n\tprint(\"\\n[+] Scanning URLs\\n\")\n\troot_dir = os.getcwd() \n\tscan_lists = [ glob.glob(root_dir + \"/*.xml\"), glob.glob(root_dir + \"/**/*.smali\", recursive = True)]\n\tnew_scan_list = []\n\turl_regex = r\"(http|ftp|https|file):\\/\\/([\\w\\-_]+(?:(?:\\.[\\w\\-_]+)+))([\\w\\-\\.,@?^=%&:/~\\+#]*[\\w\\-\\@?^=%&/~\\+#])?\"\n\tignore_url = [\"android.com\",]\n\tignore_dir = ['android/', 'org/', 'google/', 'localytics/', 'lib/', 'AndroidManifest.xml','juspay/']\n\tfinal_urls = []\n\n\n\t# Filtering scaning list \n\tfor scan_list in scan_lists:\n\t\tfor each_item in scan_list:\n\t\t\tif any(ignore in each_item for ignore in ignore_dir):\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tnew_scan_list.append(each_item)\n\t\n\tfor file_path in new_scan_list:\n\t\twith open(file_path) as file:\n\t\t\tfor line in file:\n\t\t\t\t\tmatch = re.compile(url_regex).findall(line)\n\t\t\t\t\tif match:\n\t\t\t\t\t\ttup = match[0]\n\t\t\t\t\t\turl = str(tup[0])+'://'+(str(tup[1])+str(tup[2])) \n\t\t\t\t\t\tif any(ignore in url for ignore in ignore_url):\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tfinal_urls.append(url)\n\t\n\n\treturn set(final_urls)\n\n\ndef get_config_data(key):\n\tcheck_deps = configparser.ConfigParser() #re.compile(r'\\b\\b')\n\tcheck_deps.read('config')\n\treturn check_deps.get('config-data', str(key))\n\ndef api_scanner():\n\tapi_lists = []\n\tprint(\"\\n[+] Scanning API keys\\n\")\n\troot_dir = os.getcwd()\n\t#Append the remaining regex here\n\tregex_dic = {'Google Maps API': re.compile(r'\\bAIza.{35}\\b'),\n\t\t\t\t'Twitter Access Token': re.compile(r'\\b[1-9][ 0-9]+-\\(0-9a-zA-Z]{40}\\b'), \n\t\t\t\t'Facebook Access Token': re.compile(r'\\bEAACEdEose0cBA[0-9A-Za-z]+\\b'), \n\t\t\t\t'Gmail OAuth 2.0': re.compile(r'\\b[0-9(+-[0-9A-Za-z_]{32}.apps.qooqleusercontent.com\\b'), \n\t\t\t\t'Stripe\tRestricted API Key': re.compile(r'\\bsk_live_\\(0-9a-zA-Z]{24}\\b'),\n\t\t\t\t'Square\tOAuth Secret': re.compile(r'\\bq0csp-[ 0-9A-Za-z-_]{43}\\b'),\n\t\t\t\t'Paypal/Braintree Access Token': re.compile(r'\\baccess_token,production$[0-9a-z]{161[0-9a,]{32}\\b'),\n\t\t\t\t'Amazon Marketing Services Auth Token': re.compile(r'\\bamzn.mws]{8}-[0-9a-f]{4}-10-9a-f1{4}-[0-9a,]{4}-[0-9a-f]{12}\\b'),\n\t\t\t\t'Twilio\tAPI Key': re.compile(r'\\b55[0-9a-fA-F]{32}\\b'),\n\t\t\t\t'Slack\tAPI Key': re.compile(r'\\bxox.-[0-9]{12}-[0-9]{12}-[0-9a-zA-Z]{24}\\b'),\n\t\t\t\t'AWS Access Key ID': re.compile(r'\\bAKIA[0-9A-Z]{16}\\b')\n\t\t\t\t}\n\n\n\tscan_lists = [glob.glob(root_dir + \"/**/*.smali\", recursive = True), glob.glob(root_dir + \"/*.xml\")]\n\tfor scan_list in scan_lists:\n\t\tfor file_path in scan_list:\n\t\t\twith open(file_path) as file:\n\t\t\t\tfor line in file:\n\t\t\t\t\tfor key in regex_dic:\n\t\t\t\t\t\tmatch = regex_dic[key].findall(line)\n\t\t\t\t\t\tif match:\n\t\t\t\t\t\t\tapi = \"\".join(match)\n\t\t\t\t\t\t\tfile_name = file_path.rsplit('/', 1)[1]\n\t\t\t\t\t\t\tapi_val = str(key) + ' : ' + str(file_name) + ' ==> '+api\n\t\t\t\t\t\t\tapi_lists.append(api_val)\n\treturn set(api_lists)\n\n\n\ndef secret_scanner(hash_of_apk):\n\n\tapi_keys = []\n\n\tdbname = 'adhrit.db'\n\tdbconstatus = dbconnection(dbname)\n\tcreate_secrets_table(dbconstatus)\n\n\n\tpath = hash_of_apk\n\tos.chdir(path)\n\n\twith concurrent.futures.ProcessPoolExecutor() as executor:\n\t\tp1 = executor.submit(url_scanner)\n\t\tp2 = executor.submit(api_scanner)\n\t\tp3 = executor.submit(lib_pwn)\n\t\n\turls = p1.result()\n\tstrings_from_lib = p3.result()\n\tkey = p2.result()\n\tapi_keys.extend(key)\n\n\tprint(strings_from_lib)\n\t\n\tpath = os.getcwd() + '/..'\n\tos.chdir(path)\n\t\n\n\n\tallsecrets = (str(hash_of_apk), str(list(urls)), str(strings_from_lib), str(list(api_keys)))\n\taddtotable = insert_secretstable(dbconstatus, allsecrets)\n\n\n\t#--------------------------------\n\tdbname = \"adhrit.db\"\n\tdbconstatus = dbconnection(dbname)\n\tquery = f\"UPDATE StatusDB SET Secrets='complete' WHERE Hash='{hash_of_apk}';\"\n\taddedornot = insert_statustable(dbconstatus, query)\n\n\n","repo_name":"abhi-r3v0/Adhrit","sub_path":"adhrit/recons/secrets.py","file_name":"secrets.py","file_ext":"py","file_size_in_byte":5307,"program_lang":"python","lang":"en","doc_type":"code","stars":525,"dataset":"github-code","pt":"60"} +{"seq_id":"22568517909","text":"import sys\nimport numpy as np\n\nfrom pycountry_convert import (\n map_countries,\n country_alpha2_to_country_name,\n country_name_to_country_alpha2,\n country_alpha3_to_country_alpha2,\n country_alpha2_to_continent_code,\n COUNTRY_NAME_FORMAT_UPPER\n)\n\n\ncontinent_dico = {\n \"EU\": \"Europe\",\n \"NA\": \"North America\",\n \"OC\": \"Oceania\",\n \"SA\": \"South America\",\n \"AS\": \"Asia\",\n \"AN\": \"Antarctica\",\n \"AF\": \"Africa\"\n}\n\nalpha3_ico = {\n \"UK\": \"GBR\",\n \"ALG\": \"DZA\",\n \"ASA\": \"ASM\",\n \"ANG\": \"AGO\",\n \"ANT\": \"ATG\",\n \"ARU\": \"ABW\",\n \"BAH\": \"BHS\",\n \"BRN\": \"BHR\",\n \"BAN\": \"BGD\",\n \"BAR\": \"BRB\",\n \"BIZ\": \"BLZ\",\n \"BER\": \"BMU\",\n \"BHU\": \"BTN\",\n \"BOT\": \"BWA\",\n \"IVB\": \"VGB\",\n \"BRU\": \"BRN\",\n \"BUL\": \"BGR\",\n \"BUR\": \"BFA\",\n \"CAM\": \"KHM\",\n \"CAY\": \"CYM\",\n \"CHA\": \"TCD\",\n \"CHI\": \"CHL\",\n \"CGO\": \"COG\",\n \"CRC\": \"CRI\",\n \"CRO\": \"HRV\",\n \"DEN\": \"DNK\",\n \"ESA\": \"SLV\",\n \"GEQ\": \"GNQ\",\n \"FIJ\": \"FJI\",\n \"GAM\": \"GMB\",\n \"GER\": \"DEU\",\n \"GRE\": \"GRC\",\n \"GRN\": \"GRD\",\n \"GUA\": \"GTM\",\n \"GUI\": \"GIN\",\n \"GBS\": \"GNB\",\n \"HAI\": \"HTI\",\n \"HON\": \"HND\",\n \"INA\": \"IDN\",\n \"IRI\": \"IRN\",\n \"KUW\": \"KWT\",\n \"LAT\": \"LVA\",\n \"LIB\": \"LBN\",\n \"LES\": \"LSO\",\n \"LBA\": \"LBY\",\n \"MAD\": \"MDG\",\n \"MAW\": \"MWI\",\n \"MAS\": \"MYS\",\n \"MTN\": \"MRT\",\n \"MRI\": \"MUS\",\n \"MON\": \"MCO\",\n \"MGL\": \"MNG\",\n \"MYA\": \"MMR\",\n \"NEP\": \"NPL\",\n \"NED\": \"NLD\",\n \"NCA\": \"NIC\",\n \"NIG\": \"NER\",\n \"NGR\": \"NGA\",\n \"OMA\": \"OMN\",\n \"PLE\": \"PSE\",\n \"PAR\": \"PRY\",\n \"PHI\": \"PHL\",\n \"POR\": \"PRT\",\n \"PUR\": \"PRI\",\n \"SKN\": \"KNA\",\n \"VIN\": \"VCT\",\n \"SAM\": \"WSM\",\n \"KSA\": \"SAU\",\n \"SEY\": \"SYC\",\n \"SIN\": \"SGP\",\n \"SLO\": \"SVN\",\n \"SOL\": \"SLB\",\n \"RSA\": \"ZAF\",\n \"SRI\": \"LKA\",\n \"SUD\": \"SDN\",\n \"SUI\": \"CHE\",\n \"TPE\": \"TWN\",\n \"TAN\": \"TZA\",\n \"TOG\": \"TGO\",\n \"TGA\": \"TON\",\n \"TRI\": \"TTO\",\n \"UAE\": \"ARE\",\n \"ISV\": \"VIR\",\n \"URU\": \"URY\",\n \"VAN\": \"VUT\",\n \"VIE\": \"VNM\",\n \"ZAM\": \"ZMB\",\n \"ZIM\": \"ZWE\",\n}\n\ncountrynames = []\ncontinentcodes = []\n\nwith open(sys.argv[1]) as f:\n for line in f:\n line = line.strip()\n if line == \"UNK\":\n countrynames.append(\"Kosovo\")\n continentcodes.append(continent_dico[\"EU\"])\n else:\n try:\n countrynames.append(country_alpha2_to_country_name(\n country_alpha3_to_country_alpha2(line)))\n except:\n line = alpha3_ico[line]\n countrynames.append(country_alpha2_to_country_name(\n country_alpha3_to_country_alpha2(line)))\n\n continentcodes.append(continent_dico[country_alpha2_to_continent_code(\n country_alpha3_to_country_alpha2(line))])\n\nnp.savetxt(\"continents.csv\", continentcodes, delimiter=',', fmt=\"%s\")\nnp.savetxt(\"countrynames.csv\", countrynames, delimiter=',', fmt=\"%s\")\n","repo_name":"hagax8/arabidopsis_viz","sub_path":"country_convert.py","file_name":"country_convert.py","file_ext":"py","file_size_in_byte":2964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"15426885236","text":"import numpy as np\nfrom pyH2A.Utilities.Energy_Conversion import Energy, kWh, eV\nfrom pyH2A.Utilities.input_modification import insert, process_table\n\nclass Photocatalytic_Plugin:\n\t'''Simulating H2 production using photocatalytic water splitting in plastic baggie reactors.\n\n\tParameters\n\t----------\n\tTechnical Operating Parameters and Specifications > Design Output per Day > Value : float\n\t\tDesign output in (kg of H2)/day, ``process_table()`` is used.\n\tReactor Baggies > Cost Material Top ($/m2) > Value : float\n\t\tCost of baggie top material in $/m2.\n\tReactor Baggies > Cost Material Bottom ($/m2) > Value : float\n\t\tCost of baggie bottom material in $/m2.\n\tReactor Baggies > Number of ports > Value : int\n\t\tNumber of ports per baggie.\n\tReactor Baggies > Other Costs ($) > Value : float\n\t\tOther costs per baggie.\n\tReactor Baggies > Markup factor > Value : float\n\t\tMarkup factor for baggies, typically > 1.\n\tReactor Baggies > Length (m) > Value : float\n\t\tLength of single baggie in m.\n\tReactor Baggies > Width (m) > Value : float\n\t\tWidth of single baggie in m.\n\tReactor Baggies > Height (m) > Value : float\n\t\tHeight of reactor baggie in m. In this simulation this value determines the height\n\t\tof the water level and hence is an important parameter ultimately determining the\n\t\tlevel of light absorption and total catalyst amount.\n\tReactor Baggies > Additional land area (%) > Value : float\n\t\tAdditional land area required, percentage or value > 0. \n\t\tCalculated as: (1 + addtional land area) * baggie area.\n\tReactor Baggies > Lifetime (years) > Value : float\n\t\tLifetime of reactor baggies in years before replacement is required.\n\tCatalyst > Cost per kg ($) > Value : float\n\t\tCost per kg of catalyst.\n\tCatalyst > Concentration (g/L) > Value : float\n\t\tConcentration of catalyst in g/L.\n\tCatalyst > Lifetime (years) > Value : float\n\t\tLifetime of catalysts in year before replacement is required.\n\tCatalyst > Molar Weight (g/mol) > Value : float, optional\n\t\tIf the molar weight of the catalyst (in g/mol) is specified, homogeneous catalyst\n\t\tproperties (TON, TOF etc. are calculated).\n\tCatalyst > Molar Attenuation Coefficient (M^-1 cm^-1) > Value : float, optional\n\t\tIf the molar attenuation coefficient (in M^-1 cm^-1) is specified (along with the molar weight),\n\t\tabsorbance and the fraction of absorbed light are also calculated.\n\tSolar-to-Hydrogen Efficiency > STH (%) > Value : float\n\t\tSolar-to-hydrogen efficiency in percentage or as a value between 0 and 1.\n\tSolar Input > Mean solar input (kWh/m2/day) > Value : float\n\t\tMean solar input in kWh/m2/day, ``process_table()`` is used.\n\tSolar Input > Hourly (kWh/m2) > Value : ndarray\n\t\tHourly irradiation data.\n\n\tReturns\n\t-------\n\tNon-Depreciable Capital Costs > Land required (acres) > Value : float\n\t\tTotal land area required in acres.\n\tNon-Depreciable Capital Costs > Solar Collection Area (m2) > Value : float\n\t\tSolar colelction area in m2.\n\tPlanned Replacement > Planned Replacement Catalyst > Cost ($) : float\n\t\tTotal cost of completely replacing the catalyst once.\n\tPlanned Replacement > Planned Replacement Catalyst > Frequency (years) : float\n\t\tReplacement frequency of catalyst in years, identical to catalyst lifetime.\n\tPlanned Replacement > Planned Replacement Baggie > Cost ($) : float\n\t\tTotal cost of replacing all baggies.\n\tPlanned Replacement > Planned Replacement Baggie > Frequency (years) : float\n\t\tReplacement frequency of baggies in year, identical to baggie lifetime.\n\tDirect Capital Costs - Reactor Baggies > Baggie Cost ($) > Value : float\n\t\tTotal baggie cost.\n\tDirect Capital Costs - Photocatalyst > Catalyst Cost ($) > Value : float\n\t\tTotal catalyst cost.\n\tReactor Baggies > Number > Value : int\n\t\tNumber of individual baggies required for design H2 production capacity.\n\tCatalyst > Properties > Value : dict\n\t\tDictionary containing detailed catalyst properties calculated from provided parameters.\n\t['Photocatalytic_Plugin'].catalyst_properties : dict\n\t\tAttribute containing catalyst properties dictionary.\n\tWater Volume > Volume (liters) > Value : float\n\t\tTotal water volume in liters.\n\t'''\n\n\tdef __init__(self, dcf, print_info):\n\t\tprocess_table(dcf.inp, 'Reactor Baggies', 'Value')\n\t\tprocess_table(dcf.inp, 'Solar Input', 'Value')\n\t\tprocess_table(dcf.inp, 'Solar-to-Hydrogen Efficiency', 'Value')\n\n\t\tself.hydrogen_production(dcf)\n\n\t\tprocess_table(dcf.inp, 'Technical Operating Parameters and Specifications', 'Value')\n\n\t\tself.baggie_cost(dcf)\n\n\t\tprocess_table(dcf.inp, 'Catalyst', 'Value')\n\n\t\tself.catalyst_cost(dcf)\n\t\tself.land_area(dcf)\n\n\t\tself.catalyst_activity(dcf)\n\n\t\tinsert(dcf, 'Non-Depreciable Capital Costs', 'Land required (acres)', 'Value', \n\t\t\t\tself.total_land_area_acres, __name__, print_info = print_info)\n\t\tinsert(dcf, 'Non-Depreciable Capital Costs', 'Solar Collection Area (m2)', 'Value', \n\t\t\t\tself.total_solar_collection_area, __name__, print_info = print_info)\n\t\t\n\t\tinsert(dcf, 'Planned Replacement', 'Planned Replacement Catalyst', 'Cost ($)', \n\t\t\t\tself.catalyst_cost, __name__, print_info = print_info)\n\t\tinsert(dcf, 'Planned Replacement', 'Planned Replacement Catalyst', 'Frequency (years)', \n\t\t\t\tdcf.inp['Catalyst']['Lifetime (years)']['Value'], __name__, print_info = print_info)\n\t\tinsert(dcf, 'Planned Replacement', 'Planned Replacement Baggie', 'Cost ($)', \n\t\t\t\tself.baggies_cost, __name__, print_info = print_info)\n\t\tinsert(dcf, 'Planned Replacement', 'Planned Replacement Baggie', 'Frequency (years)', \n\t\t\t\tdcf.inp['Reactor Baggies']['Lifetime (years)']['Value'], __name__, print_info = print_info)\n\n\t\tinsert(dcf, 'Direct Capital Costs - Reactor Baggies', 'Baggie Cost ($)', 'Value', \n\t\t\t\tself.baggies_cost, __name__, print_info = print_info)\n\t\tinsert(dcf, 'Direct Capital Costs - Photocatalyst', 'Catalyst Cost ($)', 'Value', \n\t\t\t\tself.catalyst_cost, __name__, print_info = print_info)\n\n\t\tinsert(dcf, 'Reactor Baggies', 'Number', 'Value', self.baggie_number, \n\t\t\t\t__name__, print_info = print_info)\n\t\tinsert(dcf, 'Catalyst', 'Properties', 'Value', self.catalyst_properties, \n\t\t\t\t__name__, print_info = print_info)\n\t\tinsert(dcf, 'Water Volume', 'Volume (liters)', 'Value', self.total_volume_liters, \n\t\t\t\t__name__, print_info = print_info)\n\n\tdef hydrogen_production(self, dcf):\n\t\t'''Calculation of hydrogen produced per day per baggie (in kg).\n\t\t'''\n\n\t\tbaggie = dcf.inp['Reactor Baggies']\n\n\t\tself.baggie_area = baggie['Length (m)']['Value'] * baggie['Width (m)']['Value']\n\t\tbaggie_insolation = Energy(self.baggie_area * dcf.inp['Solar Input']['Mean solar input (kWh/m2/day)']['Value'], kWh)\n\n\t\tmol_H2_per_baggie = (baggie_insolation.J * \n\t\t\t\t\tdcf.inp['Solar-to-Hydrogen Efficiency']['STH (%)']['Value']) / Energy(2*1.229, eV).Jmol\n\n\t\tself.kg_H2_per_baggie = (2 * mol_H2_per_baggie)/1000.\n\n\tdef catalyst_activity(self, dcf):\n\t\t'''Calculation of detailed catalyst properties based on provided parameters. If \"Molar Weight (g/mol)\"\n\t\tis specified in \"Catalyst\" table properties of a homogeneous catalyst are also calculated. Furthermore,\n\t\tif \"Molar Attenuation Coefficient (M^-1 cm^-1)\" is also provided, the light absorption properties \n\t\tare calculated.\n\t\t'''\n\n\t\tcatalyst_properties = {}\n\n\t\tpeak_hourly_irradiation_per_m2 = np.amax(dcf.inp['Solar Input']['Hourly (kWh/m2)']['Value'])\n\t\tpeak_hourly_irradiation_per_m2 = Energy(peak_hourly_irradiation_per_m2, kWh)\n\n\t\tpeak_mol_H2_per_m2_per_h = (peak_hourly_irradiation_per_m2.J * \n\t\t\t\t\t\t\t\tdcf.inp['Solar-to-Hydrogen Efficiency']['STH (%)']['Value']) / Energy(2 * 1.229, eV).Jmol\n\n\t\tmean_daily_mol_H2_per_m2 = (Energy(dcf.inp['Solar Input']['Mean solar input (kWh/m2/day)']['Value'], kWh).J *\n\t\t\t\t\t\t\t\t\tdcf.inp['Solar-to-Hydrogen Efficiency']['STH (%)']['Value']) / Energy(2 * 1.229, eV).Jmol\n\n\t\tkg_catalyst_per_m2 = ((dcf.inp['Reactor Baggies']['Height (m)']['Value'] * 1000) * \n\t\t\t\t\t\t\t\tdcf.inp['Catalyst']['Concentration (g/L)']['Value']/1000.)\n\n\t\tactivity_mmol_H2_per_h_per_g_catalyst = 1000 * peak_mol_H2_per_m2_per_h / (kg_catalyst_per_m2 * 1000)\n\n\t\tcatalyst_properties['Peak activity / mmol H2/h/g'] = activity_mmol_H2_per_h_per_g_catalyst\n\t\tcatalyst_properties['Peak H2 production / mol H2/m2/h'] = peak_mol_H2_per_m2_per_h\n\t\tcatalyst_properties['Catalyst Conc. / kg/m2'] = kg_catalyst_per_m2\n\t\tcatalyst_properties['Catalyst Conc. / g/L'] = dcf.inp['Catalyst']['Concentration (g/L)']['Value']\n\t\n\t\tif 'Molar Weight (g/mol)' in dcf.inp['Catalyst']:\n\n\t\t\tcatalyst_mol_per_L = ((dcf.inp['Catalyst']['Concentration (g/L)']['Value']) /\n\t\t\t\t\t\t\t\t dcf.inp['Catalyst']['Molar Weight (g/mol)']['Value'])\n\n\t\t\tliter_per_m2 = dcf.inp['Reactor Baggies']['Height (m)']['Value'] * 1000\n\n\t\t\tmol_catalyst_per_m2 = liter_per_m2 * catalyst_mol_per_L\n\n\t\t\tpeak_TOF_hourly = peak_mol_H2_per_m2_per_h / mol_catalyst_per_m2\n\t\t\taverage_TOF_daily = mean_daily_mol_H2_per_m2 / mol_catalyst_per_m2\n\t\t\tTON = average_TOF_daily * dcf.inp['Catalyst']['Lifetime (years)']['Value'] * 365\n\n\t\t\tcatalyst_properties['Homogeneous'] = {}\n\t\t\tcatalyst_properties['Homogeneous']['Catalyst Conc. / mol/L'] = catalyst_mol_per_L\n\t\t\tcatalyst_properties['Homogeneous']['Catalyst Conc. / mol/m2'] = mol_catalyst_per_m2\n\t\t\tcatalyst_properties['Homogeneous']['Peak TOF / h^-1'] = peak_TOF_hourly\n\t\t\tcatalyst_properties['Homogeneous']['Mean daily TOF / d^-1'] = average_TOF_daily\n\t\t\tcatalyst_properties['Homogeneous']['TON'] = TON\n\n\t\t\tif 'Molar Attenuation Coefficient (M^-1 cm^-1)' in dcf.inp['Catalyst']:\n\t\t\t\tabsorbance = (catalyst_mol_per_L * (dcf.inp['Reactor Baggies']['Height (m)']['Value'] * 100) * \n\t\t\t\t\t\tdcf.inp['Catalyst']['Molar Attenuation Coefficient (M^-1 cm^-1)']['Value'])\n\n\t\t\t\tcatalyst_properties['Homogeneous']['Absorbance'] = absorbance\n\t\t\t\tcatalyst_properties['Homogeneous']['Absorbed light (%)'] = 100 * (1 - 10**(-absorbance))\n\n\t\t\tkg_H2_per_day_TOF_calculation = 1000 * self.catalyst_amount_kg / dcf.inp['Catalyst']['Molar Weight (g/mol)']['Value'] * average_TOF_daily * 2. / 1000.\n\t\t\tkg_H2_per_day_baggie_calculation = self.kg_H2_per_baggie * self.baggie_number\n\n\t\t\tassert abs(kg_H2_per_day_TOF_calculation - kg_H2_per_day_baggie_calculation) < 1e-6, 'Difference between baggie and TOF calculation for daily H2 production: TOF: {0}, Baggie: {0}.'.format(\n\t\t\t\t\tkg_H2_per_day_TOF_calculation, kg_H2_per_day_baggie_calculation)\n\n\t\tself.catalyst_properties = catalyst_properties\n\n\tdef baggie_cost(self, dcf):\n\t\t'''Calculation of cost per baggie, number of required baggies and total baggie cost.\n\t\t'''\n\n\t\tbaggie = dcf.inp['Reactor Baggies']\n\n\t\tmaterial_cost = self.baggie_area * (baggie['Cost Material Top ($/m2)']['Value'] + baggie['Cost Material Bottom ($/m2)']['Value'])\n\t\tport_cost = baggie['Number of ports']['Value'] * baggie['Cost of port ($)']['Value']\n\n\t\tcost_per_baggie = baggie['Markup factor']['Value'] * (material_cost + port_cost + baggie['Other Costs ($)']['Value'])\n\n\t\tself.baggie_number = np.ceil(dcf.inp['Technical Operating Parameters and Specifications']['Design Output per Day']['Value'] / self.kg_H2_per_baggie)\n\t\tself.baggies_cost = self.baggie_number * cost_per_baggie\n\n\tdef catalyst_cost(self, dcf):\n\t\t'''Calculation of individual baggie volume, catalyst amount per baggie, total catalyst amount \n\t\tand total catalyst cost.\n\t\t'''\n\n\t\tbaggie = dcf.inp['Reactor Baggies']\n\n\t\tbaggie_volume_m3 = baggie['Length (m)']['Value'] * baggie['Width (m)']['Value'] * baggie['Height (m)']['Value']\n\t\tbaggie_volume_liters = baggie_volume_m3 * 1000\n\n\t\tself.total_volume_liters = baggie_volume_liters * self.baggie_number\n\n\t\tself.catalyst_amount_per_baggie_kg = baggie_volume_liters * dcf.inp['Catalyst']['Concentration (g/L)']['Value']/1000.\n\t\tself.catalyst_amount_kg = self.catalyst_amount_per_baggie_kg * self.baggie_number\n\n\t\tself.catalyst_cost = self.catalyst_amount_kg * dcf.inp['Catalyst']['Cost per kg ($)']['Value']\n\n\tdef land_area(self, dcf):\n\t\t'''Calculation of total required land area and solar collection area.\n\t\t'''\n\n\t\tbaggie_land_area = self.baggie_number * self.baggie_area\n\t\ttotal_land_area = baggie_land_area * (1. + dcf.inp['Reactor Baggies']['Additional land area (%)']['Value'])\n\n\t\tself.total_land_area_acres = total_land_area * 0.000247105\n\t\tself.total_solar_collection_area = baggie_land_area\n","repo_name":"jschneidewind/pyH2A","sub_path":"src/pyH2A/Plugins/Photocatalytic_Plugin.py","file_name":"Photocatalytic_Plugin.py","file_ext":"py","file_size_in_byte":12055,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"60"} +{"seq_id":"31976588305","text":"from django.db import reset_queries\nfrom rest_framework import generics, status, permissions, exceptions\nfrom rest_framework.response import Response\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom django.shortcuts import get_list_or_404\nfrom drf_yasg.utils import swagger_auto_schema\nfrom drf_yasg import openapi\nfrom .models import Hackathon, Team, Submission\nfrom .serializers import (\n HackathonSerializer,\n TeamDetailSerializer,\n TeamCreateSerializer,\n JoinTeamSerializer,\n SubmissionsSerializer,\n MemberExitSerializer,\n SubmissionRUDSerializer,\n HackathonDetailSerializer\n)\nfrom .permissions import (\n HackathonPermissions,\n AllowCompleteProfile,\n IsLeaderOrSuperUser\n)\n\nquery_param = openapi.Parameter(\n 'user_specific', openapi.IN_QUERY, \n description=\"Query parameter - Returns all teams of hackathons if user_specific value is not specified.\\nTo get team of a user pass user_specific=[y, Y, True]\",\n type=openapi.TYPE_STRING, enum=['y', 'Y', 'True']\n)\n\n@method_decorator(name=\"get\", decorator=swagger_auto_schema(manual_parameters=[query_param]))\nclass HackathonTeamView(generics.ListCreateAPIView):\n \"\"\"\n get:\n Returns a list of teams in a particular hackathon\n post:\n Creates a new team in a hackathon and return the team_id\n \"\"\"\n def get_permissions(self):\n user_specific = self.request.query_params.get('user_specific', None)\n if self.request.method == \"GET\":\n if user_specific in ['y', 'Y', 'True']:\n return [permissions.IsAuthenticated()]\n else:\n return [permissions.AllowAny()]\n else:\n return [permissions.IsAuthenticated(), AllowCompleteProfile()]\n\n def get_serializer_class(self):\n if self.request.method == 'GET':\n return TeamDetailSerializer\n else:\n return TeamCreateSerializer\n\n def get_serializer_context(self):\n return {\n 'request': self.request,\n 'kwargs': self.kwargs\n }\n\n def get_queryset(self, **kwargs):\n user_specific = self.request.query_params.get('user_specific', None)\n if getattr(self, 'swagger_fake_view', False):\n return None\n try:\n hackathon = Hackathon.objects.get(slug=self.kwargs['slug'])\n except Hackathon.DoesNotExist:\n raise exceptions.NotFound(\"Hackathon does not exist!\")\n if user_specific in ['y', 'Y', 'True']:\n queryset = Team.objects.filter(hackathon=hackathon, members=self.request.user).select_related('leader').prefetch_related('members')\n else:\n queryset = Team.objects.filter(hackathon=hackathon).select_related('leader').prefetch_related('members')\n return queryset\n\n def post(self, request, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n team = serializer.save()\n data = {\"team_id\": team.team_id}\n return Response(data, status=status.HTTP_201_CREATED)\n\n\nclass JoinTeamView(generics.GenericAPIView):\n \"\"\"\n patch:\n Join a team with team_id and hackathon_id\n \"\"\"\n serializer_class = JoinTeamSerializer\n permission_classes = [permissions.IsAuthenticated, AllowCompleteProfile]\n def get_queryset(self):\n if getattr(self, 'swagger_fake_view', False):\n return None\n pass\n\n def get_serializer_context(self):\n return {\n 'request': self.request,\n 'kwargs': self.kwargs\n }\n\n def patch(self, request, **kwargs):\n serializer = self.get_serializer()\n serializer.join_team()\n return Response(\"Successfully jonied team!\", status=status.HTTP_200_OK)\n\n\nquery_param = openapi.Parameter(\n 'query', openapi.IN_QUERY, description=\"Query parameter - Returns all hackthons if not specified.\",\n type=openapi.TYPE_STRING, enum=['completed', 'upcoming', 'ongoing'])\n\n\n@method_decorator(name=\"get\", decorator=swagger_auto_schema(manual_parameters=[query_param]))\nclass HackathonListCreateView(generics.ListCreateAPIView):\n \"\"\"\n get:\n Returns list of Hackathons according to query parameter.\n\n post:\n Creates a new hackathon. Only admin can create a hackathon\n \"\"\"\n serializer_class = HackathonSerializer\n\n def get_permissions(self):\n if self.request.method == \"GET\":\n return [permissions.AllowAny()]\n else:\n return [permissions.IsAdminUser()]\n\n def get_queryset(self):\n queryset = Hackathon.objects.all()\n query = self.request.query_params.get('query', None)\n current_date = timezone.now()\n if query is not None:\n if(query == 'ongoing'):\n queryset = Hackathon.objects.filter(\n start__lt=current_date, end__gt=current_date)\n elif(query == 'completed'):\n queryset = Hackathon.objects.filter(\n start__lt=current_date, end__lt=current_date)\n elif(query == 'upcoming'):\n queryset = Hackathon.objects.filter(\n start__gt=current_date, end__gt=current_date)\n else:\n raise exceptions.ValidationError(\"Invalid query parameter!\")\n return queryset\n\n\nclass HackathonsRUDView(generics.RetrieveUpdateDestroyAPIView):\n \"\"\"\n API used to read, update or delete the hackathon objects by their id.\n Only the Super User has the permissions to update or delete hackathon objects.\n \"\"\"\n\n permission_classes = [HackathonPermissions]\n serializer_class = HackathonDetailSerializer\n lookup_field = 'slug'\n queryset = Hackathon.objects.all()\n\n\nclass HackathonSubmissionView(generics.ListCreateAPIView):\n \"\"\"\n API to handle GET and POST for submission.\n For GET method:\n (i) Superuser can get all submissions (in any case).\n (ii) For ongoing hackathon authenticated users will get submissions of their team.\n (iii) For ongoing hackathon unauthenticated users will get ERROR 401 Unauthorized.\n (iv) If hackathon is not ongoing then anyone(even unauthenticated) will get all the submissions.\n For POST method:\n Data that should be send is:\n (i) team_id(joining code)\n (ii) submission_url(url where code is hosted)\n (iii) title of submission(generally title of project)\n (iv) description of submission(generally description of project)\n \"\"\"\n serializer_class = SubmissionsSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n\n def get_queryset(self, **kwargs):\n if getattr(self, 'swagger_fake_view', False):\n return None\n try:\n hackathon = Hackathon.objects.get(slug=self.kwargs['slug'])\n except Hackathon.DoesNotExist:\n raise exceptions.NotFound(\"Hackathon does not exists!\")\n else:\n user = self.request.user\n if hackathon.status == \"Ongoing\":\n if user.is_authenticated:\n if user.is_superuser:\n return Submission.objects.filter(hackathon=hackathon)\n else:\n try:\n team = Team.objects.get(members=user, hackathon=hackathon)\n except Team.DoesNotExist:\n raise exceptions.NotFound(\"Team does not exists!\")\n else:\n return Submission.objects.filter(hackathon=hackathon, team=team)\n else:\n raise exceptions.NotAuthenticated(detail=\"Authentication is required to get submissions of ongoing hackathon!\")\n else:\n return Submission.objects.filter(hackathon=hackathon)\n\n def create(self, request, *args, **kwargs):\n try:\n hackathon = Hackathon.objects.get(slug=self.kwargs['slug'])\n # The default score should remain zero\n # even if user has passed any other value\n if 'score' in request.data:\n request.data['score'] = 0\n if hackathon.status != \"Ongoing\":\n return Response(\"Submissions can only be made to Ongoing Hackathons\", status=status.HTTP_400_BAD_REQUEST)\n team = Team.objects.get(members=request.user, hackathon=hackathon)\n if request.data['team'] != team.team_id:\n return Response(\"You can make submission only for your team\", status=status.HTTP_400_BAD_REQUEST)\n submission = Submission.objects.filter(\n team=team, hackathon=hackathon)\n if len(submission):\n return Response(\"A Submission Already Exists!\", status=status.HTTP_400_BAD_REQUEST)\n # As we are using id as pk for hackathon, and slug for routing\n # so due to foreign key constraints we need to change request data to contain hackathon pk.\n #Similar reason for team.\n request.data['hackathon'] = hackathon.pk\n request.data['team'] = team.pk\n\n except Hackathon.DoesNotExist:\n raise exceptions.NotFound(\"Hackathon does not exist!\")\n except Team.DoesNotExist:\n raise exceptions.NotFound(\"Team does not exist!\")\n except KeyError as e:\n return Response(\"Improper data found.\", status=status.HTTP_400_BAD_REQUEST)\n\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n headers = self.get_success_headers(serializer.data)\n return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)\n\n\nclass TeamView(generics.RetrieveUpdateDestroyAPIView):\n \"\"\"\n API used to read, update or delete the Team objects by their team_id. Only the Super User has the permissions to delete Team objects.\n \"\"\"\n\n def get_permissions(self):\n if self.request.method == \"GET\":\n return [permissions.AllowAny()]\n else:\n return [permissions.IsAuthenticated(), IsLeaderOrSuperUser()]\n\n serializer_class = TeamDetailSerializer\n queryset = Team.objects.all().select_related('leader').prefetch_related('members')\n lookup_field = 'team_id'\n\nclass MemberExitView(generics.GenericAPIView):\n \"\"\"\n Allows only leader to remove any team member.\n Team members can exit but cannot remove others.\n Leader cannot exit team. If leader wants to leave he has to delete the team.\n \"\"\"\n\n serializer_class = MemberExitSerializer\n queryset = Team.objects.all()\n lookup_field = 'team_id'\n permission_classes = [permissions.IsAuthenticated]\n def get_serializer_context(self):\n return {\n 'request': self.request,\n 'kwargs': self.kwargs\n }\n\n def patch(self, request, **kwargs):\n serializer = self.get_serializer()\n serializer.exit_team()\n return Response(\"Successfully removed from the team\",status=status.HTTP_200_OK)\n\nclass SubmissionRUDView(generics.RetrieveUpdateDestroyAPIView):\n \"\"\"\n API used to read, update and delete the particular submissions of particular hackathon.\n \"\"\"\n serializer_class = SubmissionRUDSerializer\n lookup_field = 'id'\n\n def get_permissions(self):\n if self.request.method == \"GET\":\n return [permissions.AllowAny()]\n else:\n return [permissions.IsAuthenticated()]\n\n def get_queryset(self, **kwargs):\n if getattr(self, 'swagger_fake_view', False):\n return None\n\n queryset = Submission.objects.filter(id=self.kwargs['id']).select_related('team', 'hackathon')\n user = self.request.user\n if queryset:\n hackathon = Hackathon.objects.get(id=queryset[0].hackathon_id)\n if hackathon.status == 'Completed':\n if self.request.method == 'GET':\n return queryset\n elif self.request.method == 'DELETE' or self.request.method == 'PUT' or self.request.method == 'PATCH':\n try:\n team = Team.objects.get(members=user, hackathon=hackathon)\n return queryset\n except Team.DoesNotExist:\n raise exceptions.PermissionDenied(detail=\"Not the member of registered Team\")\n elif hackathon.status == 'Ongoing':\n if not user:\n raise exceptions.PermissionDenied(detail=\"Must be Logged in to view ongoing submissions\")\n try:\n team = Team.objects.get(members=user, hackathon=hackathon)\n return queryset\n except Team.DoesNotExist:\n raise exceptions.PermissionDenied(detail=\"Not the member of registered Team\")\n else:\n raise exceptions.PermissionDenied(detail=\"Hackathon is not started yet\")\n else:\n raise exceptions.NotFound(\"Submission does not exist!\")\n","repo_name":"COPS-IITBHU/hackalog-backend","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12972,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"60"} +{"seq_id":"3297145305","text":"__version__ = \"1.7.0\"\n\nfrom click import Group\n\nfrom hypergrowth.framework import load_modules\n\n\nclass Configuration:\n\n def __init__(self,\n controllers: str,\n interfaces: str,\n main_command_group: Group):\n \"\"\"\n :param controllers: path to controllers `example.controller`\n :param interfaces: path to interfaces `example.interfaces`\n :param main_command_group: cli\n \"\"\"\n\n def handle_groups(attribute, attribute_name):\n if isinstance(attribute, Group):\n main_command_group.add_command(attribute)\n\n load_modules(controllers, lambda *args, **kwargs: None)\n load_modules(interfaces, handle_groups)\n","repo_name":"alex4u2nv/hypergrowth","sub_path":"framework/hypergrowth/hypergrowth/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"60"} +{"seq_id":"2522347297","text":"# Problem Id: 1822\n# Problem Name: Sign of the Product of an Array, 数组元素积的符号\n# Problem Url: https://leetcode-cn.com/problems/sign-of-the-product-of-an-array/\n# Problem Level: Easy\n# Language: Python3\n \nclass Solution:\n def arraySign(self, nums: List[int]) -> int:\n posi = 0\n for i in nums:\n if i == 0:\n return 0\n elif i < 0:\n posi += 1\n if posi%2 == 0:\n return 1\n else:\n return -1","repo_name":"siru-xiong/leetcode-solutions","sub_path":"solutions/1822-数组元素积的符号.py","file_name":"1822-数组元素积的符号.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"18441143310","text":"import requests\nfrom bs4 import BeautifulSoup\nurl = 'https://www.mkc.edu.tw/Home?Sn=EV6S9OxI5fUiItcBOR6%2fsn0j5rmStDGs&%E8%A1%8C%E6%94%BF%E5%96%AE%E4%BD%8D'\nres = requests.get(url)\nhtml = BeautifulSoup(res.text)\ndata = html.find('a',{'class':'logo'})\ndata = data.find('img')\nprint(data)\nprint(url + data.get('src'))\nres = requests.get(url + data.get('src'))\nfile = open(r'馬偕護專校徽.txt','w',encoding='utf-8')\nfile.write(res.text)\nfile.close()","repo_name":"Changtzuwei/python","sub_path":"校網爬蟲/行政單位作業.py","file_name":"行政單位作業.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"6711150735","text":"from components.components import *\nfrom components.componentsystem import Viewport\nfrom game.misc.lang import Lang\nfrom game.misc.sounds import Sounds\nfrom util.myenvironment import Environment\nfrom util.utils import Util\n\n\nclass SettingsMenu(Viewport):\n def __init__(self, size: tuple[int, int], environment: Environment):\n super().__init__(size, environment)\n self.setup()\n \n @Util.MonkeyUtils.autoErrorHandling\n def setup(self):\n self.lang = Lang()\n\n self.setCursor(Util.loadSpritesheet(\"data/assets/pointer.bmp\", (18, 18), 1, transparentColor=(69, 78, 91))[0])\n self.setCustomCursorEnabled(True)\n\n # back button in the bottom left\n self.back_button = Button((5, self.size[1] - 45), (200, 40), self.lang.get(Lang.MENU_ACTION_BACK))\n self.back_button.on_click = lambda: (Sounds.playSound(Sounds.MENU_CLICK), Util.backViewport( self.environment))\n \n \n self.registerComponents([self.back_button])\n \n @Util.MonkeyUtils.autoErrorHandling\n def draw(self, environment: dict):\n super().draw(environment)\n\nVIEWPORT_CLASS = SettingsMenu","repo_name":"DylanBruner/SurvivalGame","sub_path":"viewports/settingsmenu.py","file_name":"settingsmenu.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"4319520153","text":"from typing import List\n\n\ndef solveQueens(n: int) -> List[List[int]]:\n col = set()\n pdiag = set()\n ndiag = set()\n\n res = []\n board = [['.'] * n for i in range(n)]\n\n def backtrack(r):\n if r == n:\n copy = [\"\".join(row) for row in board]\n res.append(copy)\n return\n\n for c in range(n):\n if c in col or (r + c) in pdiag or (r - c) in ndiag:\n continue\n else:\n col.add(c)\n pdiag.add(r + c)\n ndiag.add(r - c)\n board[r][c] = \"Q\"\n\n backtrack(r + 1)\n\n col.remove(c)\n pdiag.remove(r + c)\n ndiag.remove(r - c)\n board[r][c] = \".\"\n\n backtrack(0)\n return res\n","repo_name":"joshir/leet-py","sub_path":"backtracking/NQueens.py","file_name":"NQueens.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"18225432113","text":"from django.urls import include\nfrom django.urls import path\nfrom django.urls import register_converter\nfrom rest_framework import routers\n\nfrom common.paths import get_ui_paths\nfrom footnotes import views\nfrom footnotes.path_converters import FootnoteIdConverter\nfrom footnotes.path_converters import FootnoteTypeIdConverter\n\nregister_converter(FootnoteIdConverter, \"footnote_id\")\nregister_converter(FootnoteTypeIdConverter, \"footnote_type_id\")\n\napi_router = routers.DefaultRouter()\napi_router.register(\n r\"footnotes\",\n views.FootnoteViewSet,\n basename=\"footnote\",\n)\napi_router.register(\n r\"footnote_types\",\n views.FootnoteTypeViewSet,\n)\n\ndetail = \"\"\ndescription_detail = \"/description/\"\nui_patterns = get_ui_paths(views, detail, description=description_detail)\n\nui_patterns += [\n path(\n f\"{detail}/descriptions/\",\n views.FootnoteDetailDescriptions.as_view(),\n name=\"footnote-ui-detail-descriptions\",\n ),\n path(\n f\"{detail}/measures/\",\n views.FootnoteDetailMeasures.as_view(),\n name=\"footnote-ui-detail-measures\",\n ),\n path(\n f\"{detail}/version-control/\",\n views.FootnoteDetailVersionControl.as_view(),\n name=\"footnote-ui-detail-version-control\",\n ),\n]\n\nurlpatterns = [\n path(\"footnotes/\", include(ui_patterns)),\n path(\"api/\", include(api_router.urls)),\n]\n","repo_name":"uktrade/tamato","sub_path":"footnotes/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"60"} +{"seq_id":"36780398379","text":"__docformat__ = \"reStructuredText\"\nimport zope.interface\nfrom zope.schema.fieldproperty import FieldProperty\n\nfrom z3c.form.browser import interfaces\nfrom z3c.form.interfaces import INPUT_MODE\nfrom z3c.form.interfaces import IFieldWidget\n\n\nclass WidgetLayoutSupport:\n \"\"\"Widget layout support\"\"\"\n\n def wrapCSSClass(self, klass, pattern='%(class)s'):\n \"\"\"Return a list of css class names wrapped with given pattern\"\"\"\n if klass is not None and pattern is not None:\n return [pattern % {'class': k} for k in klass.split()]\n else:\n return []\n\n def getCSSClass(self, klass=None, error=None, required=None,\n classPattern='%(class)s', errorPattern='%(class)s-error',\n requiredPattern='%(class)s-required'):\n \"\"\"Setup given css class (klass) with error and required postfix\n\n If no klass name is given the widget.wrapper class name/names get used.\n It is also possible if more then one (empty space separated) names\n are given as klass argument.\n\n This method can get used from your form or widget template or widget\n layout template without to re-implement the widget itself just because\n you a different CSS class concept.\n\n The following sample:\n\n
    \n label widget and error\n
    \n\n will render a div tag if the widget field defines required=True:\n\n
    \n label widget and error\n
    \n\n And the following sample:\n\n
    \n label widget and error\n
    \n\n will render a div tag if the widget field defines required=True\n and an error occurs:\n\n
    \n label widget and error\n
    \n\n Note; you need to define a globale widget property if you use\n python:widget (in your form template). And you need to use the\n view scope in your widget or layout templates.\n\n Note, you can set the pattern to None for skip error or required\n rendering. Or you can use a pattern like 'error' or 'required' if\n you like to skip postfixing your default css klass name for error or\n required rendering.\n\n \"\"\"\n classes = []\n # setup class names\n if klass is not None:\n kls = klass\n else:\n kls = self.css\n\n # setup error class names\n if error is not None:\n error = error\n else:\n error = kls\n\n # setup required class names\n if required is not None:\n required = required\n else:\n required = kls\n\n # append error class names\n if self.error is not None:\n classes += self.wrapCSSClass(error, errorPattern)\n # append required class names\n if self.required:\n classes += self.wrapCSSClass(required, requiredPattern)\n # append given class names\n classes += self.wrapCSSClass(kls, classPattern)\n # remove duplicated class names but keep order\n unique = []\n [unique.append(kls) for kls in classes if kls not in unique]\n return ' '.join(unique)\n\n\n@zope.interface.implementer(interfaces.IHTMLFormElement)\nclass HTMLFormElement(WidgetLayoutSupport):\n\n id = FieldProperty(interfaces.IHTMLFormElement['id'])\n klass = FieldProperty(interfaces.IHTMLFormElement['klass'])\n style = FieldProperty(interfaces.IHTMLFormElement['style'])\n title = FieldProperty(interfaces.IHTMLFormElement['title'])\n\n lang = FieldProperty(interfaces.IHTMLFormElement['lang'])\n\n onclick = FieldProperty(interfaces.IHTMLFormElement['onclick'])\n ondblclick = FieldProperty(interfaces.IHTMLFormElement['ondblclick'])\n onmousedown = FieldProperty(interfaces.IHTMLFormElement['onmousedown'])\n onmouseup = FieldProperty(interfaces.IHTMLFormElement['onmouseup'])\n onmouseover = FieldProperty(interfaces.IHTMLFormElement['onmouseover'])\n onmousemove = FieldProperty(interfaces.IHTMLFormElement['onmousemove'])\n onmouseout = FieldProperty(interfaces.IHTMLFormElement['onmouseout'])\n onkeypress = FieldProperty(interfaces.IHTMLFormElement['onkeypress'])\n onkeydown = FieldProperty(interfaces.IHTMLFormElement['onkeydown'])\n onkeyup = FieldProperty(interfaces.IHTMLFormElement['onkeyup'])\n\n disabled = FieldProperty(interfaces.IHTMLFormElement['disabled'])\n tabindex = FieldProperty(interfaces.IHTMLFormElement['tabindex'])\n onfocus = FieldProperty(interfaces.IHTMLFormElement['onfocus'])\n onblur = FieldProperty(interfaces.IHTMLFormElement['onblur'])\n onchange = FieldProperty(interfaces.IHTMLFormElement['onchange'])\n\n # layout support\n css = FieldProperty(interfaces.IHTMLFormElement['css'])\n\n def addClass(self, klass: str):\n \"\"\"Add a class to the HTML element.\n\n See interfaces.IHTMLFormElement.\n \"\"\"\n if not self.klass:\n self.klass = str(klass)\n else:\n # make sure items are not repeated\n parts = self.klass.split() + klass.split()\n # Remove duplicates and keep order.\n # Dictionaries are ordered in Python 3.7+\n parts = list(dict.fromkeys(parts))\n self.klass = \" \".join(parts)\n\n def update(self):\n \"\"\"See z3c.form.interfaces.IWidget\"\"\"\n super().update()\n if self.mode == INPUT_MODE and self.required:\n self.addClass('required')\n\n @property\n def _html_attributes(self) -> list:\n \"\"\"Return a list of HTML attributes managed by this class.\"\"\"\n # This is basically a list of all the FieldProperty names except for\n # the `css` property, which is not an HTML attribute.\n return [\n \"id\",\n \"klass\", # will be changed to `class`\n \"style\",\n \"title\",\n \"lang\",\n \"onclick\",\n \"ondblclick\",\n \"onmousedown\",\n \"onmouseup\",\n \"onmouseover\",\n \"onmousemove\",\n \"onmouseout\",\n \"onkeypress\",\n \"onkeydown\",\n \"onkeyup\",\n \"disabled\",\n \"tabindex\",\n \"onfocus\",\n \"onblur\",\n \"onchange\",\n ]\n\n _attributes = None\n\n @property\n def attributes(self) -> dict:\n # If `attributes` were explicitly set, return them.\n if isinstance(self._attributes, dict):\n return self._attributes\n\n # Otherwise return the default set of non-empty HTML attributes.\n attributes_items = [\n (\"class\" if attr == \"klass\" else attr, getattr(self, attr, None))\n for attr in self._html_attributes\n ]\n self._attributes = {key: val for key, val in attributes_items if val}\n return self._attributes\n\n @attributes.setter\n def attributes(self, value: dict):\n # Store the explicitly set attributes.\n self._attributes = value\n\n\n@zope.interface.implementer(interfaces.IHTMLInputWidget)\nclass HTMLInputWidget(HTMLFormElement):\n\n readonly = FieldProperty(interfaces.IHTMLInputWidget['readonly'])\n alt = FieldProperty(interfaces.IHTMLInputWidget['alt'])\n accesskey = FieldProperty(interfaces.IHTMLInputWidget['accesskey'])\n onselect = FieldProperty(interfaces.IHTMLInputWidget['onselect'])\n\n @property\n def _html_attributes(self) -> list:\n attributes = super()._html_attributes\n attributes.extend(\n [\n \"readonly\",\n \"alt\",\n \"accesskey\",\n \"onselect\",\n ]\n )\n return attributes\n\n\n@zope.interface.implementer(interfaces.IHTMLTextInputWidget)\nclass HTMLTextInputWidget(HTMLInputWidget):\n\n size = FieldProperty(interfaces.IHTMLTextInputWidget['size'])\n maxlength = FieldProperty(interfaces.IHTMLTextInputWidget['maxlength'])\n placeholder = FieldProperty(interfaces.IHTMLTextInputWidget['placeholder'])\n autocapitalize = FieldProperty(\n interfaces.IHTMLTextInputWidget['autocapitalize'])\n\n @property\n def _html_attributes(self) -> list:\n attributes = super()._html_attributes\n attributes.extend(\n [\n \"size\",\n \"maxlength\",\n \"placeholder\",\n \"autocapitalize\",\n ]\n )\n return attributes\n\n\n@zope.interface.implementer(interfaces.IHTMLTextAreaWidget)\nclass HTMLTextAreaWidget(HTMLFormElement):\n\n rows = FieldProperty(interfaces.IHTMLTextAreaWidget['rows'])\n cols = FieldProperty(interfaces.IHTMLTextAreaWidget['cols'])\n readonly = FieldProperty(interfaces.IHTMLTextAreaWidget['readonly'])\n accesskey = FieldProperty(interfaces.IHTMLTextAreaWidget['accesskey'])\n onselect = FieldProperty(interfaces.IHTMLTextAreaWidget['onselect'])\n\n @property\n def _html_attributes(self) -> list:\n attributes = super()._html_attributes\n attributes.extend(\n [\n \"rows\",\n \"cols\",\n \"readonly\",\n \"accesskey\",\n \"onselect\",\n ]\n )\n return attributes\n\n\n@zope.interface.implementer(interfaces.IHTMLSelectWidget)\nclass HTMLSelectWidget(HTMLFormElement):\n\n multiple = FieldProperty(interfaces.IHTMLSelectWidget['multiple'])\n size = FieldProperty(interfaces.IHTMLSelectWidget['size'])\n\n @property\n def _html_attributes(self) -> list:\n attributes = super()._html_attributes\n attributes.extend(\n [\n \"multiple\",\n \"size\",\n ]\n )\n return attributes\n\n\ndef addFieldClass(widget):\n \"\"\"Add a class to the widget that is based on the field type name.\n\n If the widget does not have field, then nothing is done.\n \"\"\"\n if IFieldWidget.providedBy(widget):\n klass = str(widget.field.__class__.__name__.lower() + '-field')\n widget.addClass(klass)\n","repo_name":"zopefoundation/z3c.form","sub_path":"src/z3c/form/browser/widget.py","file_name":"widget.py","file_ext":"py","file_size_in_byte":10122,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"60"} +{"seq_id":"21397224494","text":"\"\"\"\nFile to test web_endpoint lambda.\nV0.02, October 17, 2022, GAW\n\"\"\"\n\nfrom datetime import date, timedelta\nimport json\nfrom unittest.mock import Mock, patch\nimport pytest\n\nfrom . import addSrcToPath\n\nimport security\nimport web_endpoint\nimport webPriceInfo\n\n@pytest.mark.unit\nclass Test_web_endpoint:\n \"\"\"def createSecurity(self, name, symbol, buyPrice, sellPrice, currentPrice,\n lastClosePrice, high52Week):\n mySec = security.Security()\n priceInfo = security.PriceInfo()\n priceInfo.currentPrice = currentPrice\n priceInfo.lastClosePrice = lastClosePrice\n priceInfo.high52Week = high52Week\n mySec.pop_with_priceInfo(name, symbol, buyPrice, sellPrice, priceInfo)\n return mySec\n \"\"\"\n\n def test_get_website_data(self):\n \"\"\"\n Testing that get something back, when run locally.\n \"\"\"\n\n with patch('historicalPricesInterface.HistoricalPricesInterface.remove_old_prices',\n return_value=Mock()) as mock_remove:\n mock_remove.return_value = 5\n\n with patch('securities.Securities.get_web_data', return_value=Mock()) as mock_data:\n mock_data.return_value = self.gen_two_webPriceInfos()\n\n result = web_endpoint.get_website_data({\"queryStringParameters\":\n {\"timeframe\": \"30days\"}},\n \"morestuff\")\n\n # print(\"test result:\" + json.dumps(result))\n assert len(result) > 0\n body = result[\"body\"]\n\n reconstituted = json.loads(body)\n # print(f\"test_get_website_data, {reconstituted=}\")\n assert len(reconstituted) == 2\n\n def gen_two_webPriceInfos(self):\n applesec = security.Security()\n applesec.pop(\"Apple\", \"AAPL\", 123.45, 543.21, 200.33)\n applesec.id = 3\n applesec.fullHistoryDownloaded = False\n\n msftsec = security.Security()\n msftsec.pop(\"Microsoft\", \"MSFT\", 223.45, 343.21, 80.33)\n msftsec.id = 3\n msftsec.fullHistoryDownloaded = False\n\n infos = []\n appleInfo = webPriceInfo.WebPriceInfo()\n appleInfo.populate(applesec, self.gen_prices(applesec))\n infos.append(appleInfo)\n\n msftInfo = webPriceInfo.WebPriceInfo()\n msftInfo.populate(msftsec, self.gen_prices(msftsec))\n infos.append(msftInfo)\n\n return infos\n\n def gen_prices(self, mySec):\n highPrice = mySec.currentPrice\n decPrice = 1.32\n numRows = 30\n curDate = date(2021, 12, 19)\n\n prices = []\n for i in range(0, numRows):\n curDate += timedelta(1)\n curPrice = highPrice - i * decPrice\n prices.append({\"priceDate\": curDate, \"price\": curPrice})\n print(f\"prices: {prices}\")\n\n return prices\n","repo_name":"gregw18/stockPriceRetriever","sub_path":"tests/test_web_endpoint.py","file_name":"test_web_endpoint.py","file_ext":"py","file_size_in_byte":2863,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"74894026112","text":"#!/usr/bin/python3\nimport colorama\nimport emoji\nimport environment\nimport csv\nimport numpy as np\n\n\ndef printV(V, env):\n n = 0\n for i in range(env.maxY):\n for j in range(env.maxX):\n if env.is_wall(n):\n color = colorama.Fore.RED\n elif env.is_gas_station(n):\n color = colorama.Fore.GREEN\n elif env.is_terminal(n):\n color = colorama.Fore.CYAN\n elif env.is_start(n):\n color = colorama.Fore.BLUE\n else:\n color = colorama.Fore.RESET\n\n if V[n] == -0:\n value = 0\n else:\n value = V[n]\n\n print(color + \"%+.3f\" % value, end=\"\")\n print(colorama.Fore.RESET + \" | \", end=\"\")\n n += 1\n print(colorama.Fore.RESET)\n\n\ndef printPolicy(policy, env, iter=0):\n n = 0\n for i in range(env.maxY):\n for j in range(env.maxX):\n arg = np.argmax(policy[n])\n if env.is_wall(n):\n out = emoji.emojize(':white_large_square:', use_aliases=True)\n elif env.is_terminal(n):\n out = emoji.emojize(':white_check_mark:', use_aliases=True)\n elif arg == env.UP and iter != 0:\n out = emoji.emojize(':arrow_up_small:', use_aliases=True)\n elif arg == env.RIGHT and iter != 0:\n out = emoji.emojize(':fast_forward:', use_aliases=True)\n elif arg == env.DOWN and iter != 0:\n out = emoji.emojize(':arrow_down_small:', use_aliases=True)\n elif arg == env.LEFT and iter != 0:\n out = emoji.emojize(':rewind:', use_aliases=True)\n else:\n out = emoji.emojize(':new:', use_aliases=True)\n\n print(out, end=\" \")\n n += 1\n print()\n\n\ndef write_to_csv(list, name=\"statistics.csv\"):\n with open(name, 'w', newline=\"\") as file:\n writer = csv.writer(file)\n writer.writerows(map(lambda x: [x], list))\n\n\ndef value_iteration(env, epsilon=0.000001, discount_factor=1.0):\n def calculate_v_values(V, action, state):\n [(probability, next_state, cost, done)] = env.P[state][action]\n return probability * (cost + discount_factor * V[next_state])\n\n # Initialize states.\n V_new = np.zeros(env.num_states)\n #V_new[38] = -24 # Terminal state.\n policy = np.zeros([env.num_states, env.NUM_ACTIONS])\n iteration = 0\n delta = [0]*200\n\n\n while True:\n V_old = V_new\n print(\"\\nIteration:\", iteration)\n print(\"\\nGrid policy:\")\n printPolicy(policy, env, iteration)\n print(\"\\nGrid Value Function:\")\n printV(V_old, env)\n print(\"\\nDelta (Biggest Value function difference):\", delta[iteration])\n print(\"\\n====================================================================================\")\n\n iteration += 1\n delta[iteration] = 0\n\n V_new = np.zeros(env.num_states)\n\n # Iterate through all states.\n for state in range(env.num_states):\n action_values = np.zeros(env.NUM_ACTIONS)\n\n # Iterate through all actions of state.\n for action in range(env.NUM_ACTIONS):\n # Apply Bellman equation to calculate v.\n action_values[action] = calculate_v_values(V_old, action, state)\n\n # Pick the best action in this state (minimal costs).\n best_action_value = min(action_values)\n\n # Get biggest difference between best action value and our old value function.\n delta[iteration] = max(delta[iteration], abs(best_action_value - V_old[state]))\n\n # Apply Bellman optimality principle.\n V_new[state] = best_action_value\n\n # Update the policy.\n best_action = np.argmin(action_values)\n policy[state] = np.eye(env.NUM_ACTIONS)[best_action]\n\n # Check for convergence.\n if delta[iteration] < epsilon:\n break\n\n print(\"\\nFinished! (\" + str(iteration) + \" Iterations)\")\n print(\"\\nGrid policy:\")\n printPolicy(policy, env, iteration)\n print(\"\\nGrid Value Function:\")\n printV(V_old, env)\n print(\"\\nDelta (Biggest Value function difference):\", delta[iteration])\n\n return policy, V_old, delta\n\n\nif __name__ == \"__main__\":\n env = environment.Environment(\"./map\")\n policy, v, deltas = value_iteration(env)\n #write_to_csv(deltas, 'deltas-19.csv')\n","repo_name":"HendrikPfaff/Value-Iteration","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4444,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"21831576181","text":"\"\"\"\nIn a stock market, there is a product with its infinite stocks. The stock prices are given for N days, where arr[i] denotes the price of the stock on the ith day. There is a rule that a customer can buy at most i stock on the ith day. If the customer has an amount of k amount of money initially, find out the maximum number of stocks a customer can buy.\nFor example, for 3 days the price of a stock is given as 7, 10, 4. You can buy 1 stock worth 7 rs on day 1, 2 stocks worth 10 rs each on day 2 and 3 stock worth 4 rs each on day 3.\n\nExamples:\n\nInput : price[] = { 10, 7, 19 }, \n k = 45.\nOutput : 4\nA customer purchases 1 stock on day 1, \n2 stocks on day 2 and 1 stock on day 3 for \n10, 7 * 2 = 14 and 19 respectively. Hence, \ntotal amount is 10 + 14 + 19 = 43 and number \nof stocks purchased is 4.\n\nInput : price[] = { 7, 10, 4 }, \n k = 100.\nOutput : 6\n\n\n\"\"\"\n\n\n\ndef buyMaximumProducts(stocks,K,n):\n\n items = [[stocks[i],i+1] for i in range(n)]\n\n items.sort()\n\n ans = 0\n for i in range(n):\n ans += min(items[i][2],K/items[1])\n K -= items[i][0] * min(items[i][2],K/items[1])\n return ans\n","repo_name":"deepak01-Hacker/DAily-Practice","sub_path":"Buy Maximum Stocks if i stocks can be bought on i-th day.py","file_name":"Buy Maximum Stocks if i stocks can be bought on i-th day.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"40209497557","text":"import sys\nfrom collections import deque\ninput = sys.stdin.readline\n\nn, m = map(int, input().split())\nr, c, d = map(int, input().split())\ngraph = []\nfor _ in range(n):\n graph.append(list(map(int, input().split())))\nvisited = [[False] * m for _ in range(n)]\nvisited[r][c] = True\n# 북, 동, 남, 서\ndx = [-1, 0, 1, 0]\ndy = [0, 1, 0, -1]\nturnCnt = 0 # 회전 수\ncnt = 1 # 시작한 자리 청��\nwhile True:\n d -= 1\n if d == -1:\n d = 3 # 북 -> 서 -> 남 -> 동\n nx = r + dx[d]\n ny = c + dy[d]\n # 왼쪽 방향에 아직 청소하지 않은 공간이 존재한다면, 그 방향으로 회전한 다음 한 칸을 전진하고 청소\n if not visited[nx][ny] and graph[nx][ny] == 0:\n visited[nx][ny] = True\n r, c = nx, ny\n cnt += 1\n turnCnt = 0 # 이동한 위치에서 다시 탐색하므로 방향 전환 횟수 초기화\n continue\n\n else: # 왼쪽 방향에 청소할 공간이 없으므로 한번 더 왼쪽으로 회전\n turnCnt += 1\n\n if turnCnt == 4: # 네 방향 모두 청소가 이미 되어있거나 벽인 경우\n nx = r - dx[d]\n ny = c - dy[d]\n if graph[nx][ny] == 0:\n r, c = nx, ny # 바라보는 방향을 유지한 채로 후진\n turnCnt = 0 # 이동한 위치에서 다시 탐색하므로 방향 전환 횟수 초기화\n\n else: # 뒤쪽 방향이 벽이라 후진도 할 수 없는 경우 작동을 멈춤\n break\n\nprint(cnt)\n\n\n\n\n\n\n\n\n","repo_name":"seonwook97/Coding_test","sub_path":"Coding_Study/구현/14503.py","file_name":"14503.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"2753547350","text":"from nzmath.combinatorial import stirling2\nfrom scipy.misc import comb, factorial\n\ndef composition(r, n, k):\n \"\"\"\n number of integer solutions\n $x_1 + x_2 + ... + x_n = r$\n $0 < x_i \\leq k$\n \"\"\"\n comp = 0\n for m in range(n + 1):\n if r - 1 - m * k < n - 1:\n break\n if m % 2 == 0:\n comp += (comb(n, m, exact=True) * comb(r - 1 - m * k, n - 1, exact=True))\n else:\n comp -= (comb(n, m, exact=True) * comb(r - 1 - m * k, n - 1, exact=True))\n return comp\n\ndef read_split_contig(N, r, k):\n \"\"\"\n number of ways to get \n $Y_n - Y_1 = r$\n where there are $n$ distinct starting positions\n satisfying $0 < Y_{i + 1} - Y_i \\leq k$\n \"\"\"\n if r == 0:\n if N > 0:\n return 1\n else:\n return 0\n splits = 0\n for n in range(2, min(r + 2, N + 1)):\n splits += (factorial(n, exact=True) * stirling2(N, n) * composition(r, n - 1, k))\n return splits\n\n","repo_name":"tianyang-li/meta-transcriptome","sub_path":"trans-len/help_1.py","file_name":"help_1.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"71236886910","text":"import sys\nsys.stdin = open(\"뿌요뿌요.txt\")\n\ndef Gravity(c):\n stack = []\n for i in range(R - 1, -1, -1):\n if arr[i][c] != '.':\n stack.append(arr[i][c])\n arr[i][c] = '.'\n\n j = R-1\n while stack:\n arr[j][c] = stack.pop(0)\n j -= 1\n\ndef BFS(r, c): #그룹안에 같은색이 4개 인것 찾기\n global sol, chk\n if chk[r][c]:\n return\n color = arr[r][c]\n queue = [(r, c)]\n stack = [(r, c)]\n\n chk[r][c] = 1\n cnt = 0\n while queue:\n r, c = queue.pop(0)\n for k in range(4):\n nr, nc = r + dr[k], c + dc[k]\n if nr < 0 or nr >= R or nc < 0 or nc >= C:\n continue\n if chk[nr][nc]:\n continue\n if arr[nr][nc] == color:\n cnt += 1\n chk[nr][nc] = 1\n stack.append((nr, nc))\n queue.append((nr, nc))\n\n\n # 그룹안에 같은색이 4개이면 터트리기\n\n if cnt >= 3:\n data.append((stack),)\n\n\ndef PuyoPuyo(stack):\n global sol\n if stack:\n chkc = [0] * C\n while stack:\n temp = stack.pop(-1)\n while temp:\n r, c = temp.pop(-1)\n arr[r][c] = '.'\n if not chkc[c]:\n chkc[c] = 1\n\n\n # 터트린 자리 복구하기\n for i in range(C):\n if chkc[i]:\n Gravity(i)\n\n return 0\n else:\n return 1\n\n\nT = int(input())\ndr = [1, -1, 0, 0]\ndc = [0, 0, 1, -1]\nR, C = 12, 6\nfor tc in range(T):\n sol = 0\n arr = [list(input()) for _ in range(R)]\n data = []\n flag = 0\n while flag == 0:\n chk = [[0] * C for _ in range(R)]\n for i in range(R-1, -1, -1):\n for j in range(C): # 색깔 찾기\n if arr[i][j] != '.':\n BFS(i,j)\n sol += 1\n flag = PuyoPuyo(data)\n print(sol-1)","repo_name":"hoyoung2176/TIL","sub_path":"Algorithm/test/AD/뿌요뿌요.py","file_name":"뿌요뿌요.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"12099417195","text":"\n\nimport json\nimport os\n\n# effects that make you miss your turn\ncripef = ['freeze']\nnottargetef = ['shadowed']\n\n\ndef load_effects():\n effectpath = 'Data/Game/Effects/'\n effectdata = {}\n for file in os.listdir(effectpath):\n name, ext = os.path.splitext(file)\n effectdata[name] = json.load(open(effectpath + file))\n return effectdata\n\n\neffectdata = load_effects()\n\n\ndef load_emoji():\n emoji = {}\n for key, value in effectdata.items():\n emoji[key] = value['emoji']\n\n return emoji\n\n\ndef has_effect_list(eflist, target):\n end = False\n for ef in eflist:\n end = has_effect(ef, target) or end\n\n return end\n\n\ndef has_effect(ef, target):\n if ef in [effect for effect, duration in target.effects]:\n return True\n else:\n return False\n\n\ndef get_effect(ef, target):\n return [[effect, duration] for effect, duration in target.effects if effect == ef][0]\n\n\ndef remove_ef(ef, target):\n if has_effect(ef, target):\n to_remove = [[effect, duration] for effect, duration in target.effects if effect == ef][0]\n target.effects.remove(to_remove)\n end(to_remove, target)\n print('removed {}'.format(ef))\n else:\n print('couldnt remove {}'.format(ef))\n\n\ndef start(ef, target):\n if ef == 'burn':\n remove_ef('freeze', target)\n if ef == 'freeze':\n remove_ef('burn', target)\n if ef == 'shielded':\n target.stats['defense'] += 500\n if ef == 'magic_barrier':\n target.stats['magic_defense'] += 500\n if ef == 'strength':\n target.stats['strength'] += 50\n if ef == 'intelligence':\n target.stats['intelligence'] += 50\n if ef == 'invincible':\n target.stats['magic_defense'] += 100000\n target.stats['defense'] += 100000\n if ef == 'cursed':\n remove_ef('strength', target)\n remove_ef('intelligence', target)\n for stat, value in target.stats.items():\n value -= 20\n\n\ndef tick(ef, target):\n if ef == 'burn':\n target.health -= 5\n return 'Burn 🔥 hits {0.name} for 5 damage'.format(target)\n if ef == 'freeze':\n return '{0.name} ❄️ is frozen'.format(target)\n if ef == 'bleed':\n dmg = (target.maxhealth - target.health) / 100\n target.health -= dmg\n return '{0.name} 💉 is bleeding for {} damage'.format(target, dmg)\n if ef == 'shadowed':\n return '{0.name} 🌑 moves in the shadows'.format(target, dmg)\n if ef == 'shielded':\n return '{0.name} 🛡️ hides behind the shield'.format(target, dmg)\n if ef == 'magic_barrier':\n return '{0.name} 🔰️ is protected by a magic barrier'.format(target, dmg)\n if ef == 'strength':\n return '{0.name} 💪️ is full of strength'.format(target, dmg)\n if ef == 'intelligence':\n return '{0.name} 🧠️ is clear minded'.format(target, dmg)\n if ef == 'cleanse':\n for ef in target.effects:\n remove_ef(ef[0], target)\n return '{0.name} 😶️ is immune to effects'.format(target, dmg)\n if ef == 'invincible':\n return '{0.name} 🔱️ is busy not taking damage'.format(target, dmg)\n if ef == 'cursed':\n return '{0.name} 🎃️ suffers under a curse'.format(target, dmg)\n return None\n\n\ndef end(ef, target):\n if ef == 'burn':\n return '{0.name} has stopped burning'.format(target)\n\n if ef == 'freeze':\n return '{0.name} is no longer frozen'.format(target)\n\n if ef == 'bleed':\n return '{0.name} has stopped bleeding'.format(target)\n\n if ef == 'shadowed':\n return '{0.name} has emerged from the shadows'.format(target)\n\n if ef == 'shielded':\n target.stats['defense'] -= 500\n return '{0.name} is no longer protected by the shield'.format(target)\n\n if ef == 'magic_barrier':\n target.stats['magic_defense'] -= 500\n return '{0.name} is no longer protected by the magic barrier'.format(target)\n\n if ef == 'strength':\n target.stats['strength'] -= 50\n return '{0.name} has lost their extra strength'.format(target)\n\n if ef == 'intelligence':\n target.stats['intelligence'] -= 50\n return '{0.name} has lost their extra intelligence'.format(target)\n\n if ef == 'cleanse':\n return '{0.name} is no longer immune against effects'.format(target)\n\n if ef == 'invincible':\n return '{0.name} can take damage again'.format(target)\n\n if ef == 'cursed':\n for stat, value in target.stats.items():\n value += 20\n return '{0.name} can take damage again'.format(target)\n","repo_name":"chluebi/RPGBot","sub_path":"Data/Game/effects.py","file_name":"effects.py","file_ext":"py","file_size_in_byte":4575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"22461934561","text":"import glob\nimport os\nimport os.path as osp\nimport tempfile\nfrom argparse import ArgumentParser\n\nimport cv2\nimport mmcv\n\nfrom easycv.thirdparty.mot.bytetrack.byte_tracker import BYTETracker\nfrom easycv.thirdparty.mot.utils import detection_result_filter, show_result\nfrom .builder import PREDICTORS, build_predictor\n\n\n@PREDICTORS.register_module()\nclass MOTPredictor(object):\n \"\"\"MOT Predictor.\n\n\n Args:\n model_path (str): Path of model path.\n config_file (Optinal[str]): config file path for model and processor to init. Defaults to None.\n score_threshold(float): Specifies the filter score threshold for bbox.\n tracker_config (dict): Specify the parameters of the tracker.\n save_path (str): File path for saving results.\n fps: (int): Specify the fps of the output video.\n \"\"\"\n\n def __init__(\n self,\n model_path=None,\n config_file=None,\n detection_predictor_config={\n 'type': 'DetectionPredictor',\n 'model_path': None,\n 'config_file': None,\n 'score_threshold': 0.5\n },\n tracker_config={\n 'det_high_thresh': 0.2,\n 'det_low_thresh': 0.05,\n 'match_thresh': 1.0,\n 'match_thresh_second': 1.0,\n 'match_thresh_init': 1.0,\n 'track_buffer': 2,\n 'frame_rate': 25\n },\n show_result_config={\n 'score_thr': 0,\n 'show': False\n },\n save_path=None,\n IN_VIDEO=False,\n OUT_VIDEO=False,\n out_dir=None,\n fps=24):\n\n if model_path is not None:\n detection_predictor_config['model_path'] = model_path\n if config_file is not None:\n detection_predictor_config['config_file'] = config_file\n self.model = build_predictor(detection_predictor_config)\n self.tracker = BYTETracker(**tracker_config)\n self.fps = fps\n self.show_result_config = show_result_config\n self.output = save_path\n self.IN_VIDEO = IN_VIDEO\n self.OUT_VIDEO = OUT_VIDEO\n self.out_dir = out_dir\n\n def define_input(self, inputs):\n # support list(dict(str)) as input\n if isinstance(inputs, str):\n inputs = [{'filename': inputs}]\n elif isinstance(inputs, list) and not isinstance(inputs[0], dict):\n tmp = []\n for input in inputs:\n tmp.append({'filename': input})\n inputs = tmp\n\n # define input\n input = inputs[0]['filename']\n if osp.isdir(input):\n imgs = glob.glob(os.path.join(input, '*.jpg'))\n imgs.sort()\n self.IN_VIDEO = False\n else:\n imgs = mmcv.VideoReader(input)\n self.IN_VIDEO = True\n\n return imgs, input\n\n def define_output(self):\n if self.output is not None:\n if self.output.endswith('.mp4'):\n self.OUT_VIDEO = True\n self.out_dir = tempfile.TemporaryDirectory()\n out_path = self.out_dir.name\n _out = self.output.rsplit(os.sep, 1)\n if len(_out) > 1:\n os.makedirs(_out[0], exist_ok=True)\n else:\n self.OUT_VIDEO = False\n out_path = self.output\n os.makedirs(out_path, exist_ok=True)\n else:\n out_path = None\n return out_path\n\n def __call__(self, inputs):\n # define input\n imgs, input = self.define_input(inputs)\n # define output\n out_path = self.define_output()\n\n prog_bar = mmcv.ProgressBar(len(imgs))\n # test and show/save the images\n track_result = None\n track_result_list = []\n for frame_id, img in enumerate(imgs):\n if osp.isdir(input):\n timestamp = frame_id\n else:\n seconds = imgs.vcap.get(cv2.CAP_PROP_POS_MSEC) / 1000\n timestamp = seconds\n\n detection_results = self.model(img)[0]\n\n detection_boxes = detection_results['detection_boxes']\n detection_scores = detection_results['detection_scores']\n detection_classes = detection_results['detection_classes']\n\n detection_boxes, detection_scores, detection_classes = detection_result_filter(\n detection_boxes,\n detection_scores,\n detection_classes,\n target_classes=[0],\n target_thresholds=[0])\n if len(detection_boxes) > 0:\n track_result = self.tracker.update(\n detection_boxes, detection_scores,\n detection_classes) # [id, t, l, b, r, score]\n track_result['timestamp'] = timestamp\n track_result_list.append(track_result)\n\n if self.output is not None:\n if self.IN_VIDEO or self.OUT_VIDEO:\n out_file = osp.join(out_path, f'{frame_id:06d}.jpg')\n else:\n out_file = osp.join(out_path, img.rsplit(os.sep, 1)[-1])\n else:\n out_file = None\n\n if out_file is not None:\n show_result(\n img,\n track_result,\n wait_time=int(1000. / self.fps),\n out_file=out_file,\n **self.show_result_config)\n prog_bar.update()\n\n if self.output and self.OUT_VIDEO:\n print(\n f'making the output video at {self.output} with a FPS of {self.fps}'\n )\n mmcv.frames2video(\n out_path, self.output, fps=self.fps, fourcc='mp4v')\n self.out_dir.cleanup()\n\n return [track_result_list]\n","repo_name":"alibaba/EasyCV","sub_path":"easycv/predictors/mot_predictor.py","file_name":"mot_predictor.py","file_ext":"py","file_size_in_byte":5878,"program_lang":"python","lang":"en","doc_type":"code","stars":1565,"dataset":"github-code","pt":"60"} +{"seq_id":"39213810031","text":"# %%\n# 引入包\nimport keras.backend as K\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.utils import np_utils\nfrom keras.layers.core import Dense, Activation\nfrom keras import regularizers\nfrom sklearn.linear_model import LogisticRegressionCV\nfrom keras.models import Sequential\nfrom keras.optimizers import RMSprop\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.utils import class_weight\nimport pandas as pd\nfrom tensorflow.python.keras.engine.base_layer import AddMetric\nfrom tensorflow.python.keras.metrics import Recall\nplt.style.use('ggplot')\n# %%\n# 导入数据\ninput1 = pd.read_csv(\"data/基本信息.csv\", encoding=\"gbk\")\ninput3 = pd.read_csv(\"data/透析中记录.csv\", encoding=\"gbk\")\ninput2 = pd.read_csv(\"data/血透治疗单.csv\", encoding=\"gbk\")\n# %%\nfor i in input1:\n print(input1[i].value_counts())\nprint(\"----------\")\nfor i in input2:\n print(input2[i].value_counts())\nfor i in input3:\n print(input3[i].value_counts())\n# %%\n# 文件1编码转换\ninput1.replace(\"男\", 1, inplace=True)\ninput1.replace(\"女\", 0, inplace=True)\ninput1[\"birthDay\"] = input1[\"birthDay\"].apply(\n lambda x: 2019-int(x.split(\"-\")[0]))\ninput1.to_csv(\"outputcsv/mid1.csv\", index=None,\n float_format='%.3f', encoding=\"gbk\")\n# %%\n# 文件2聚合\nmid2 = input2.groupby([\"病人id\", \"examineDate\"]).transform('sum')\nmid2[\"病人id\"] = input2[\"病人id\"]\nmid2.drop_duplicates(subset=\"参数记录\", inplace=True, keep=\"first\")\ninput2 = mid2\ninput2.to_csv(\"outputcsv/mid2.csv\", index=None,\n float_format='%.3f', encoding=\"gbk\")\n# %%\n# 文件3处理\ninput3 = input3.drop(\"收缩压\", axis=1)\ninput3.replace(\"\", np.nan, inplace=True)\ninput3.dropna(inplace=True)\n\ninput3.to_csv(\"outputcsv/mid3.csv\", index=None,\n float_format='%.3f', encoding=\"gbk\")\n# %%\n# 读入数据\n# %%\n# merge处理\nmid1 = pd.read_csv(\"outputcsv/mid1.csv\", encoding=\"gbk\")\nmid2 = pd.read_csv(\"outputcsv/mid2.csv\", encoding=\"gbk\")\nmid3 = pd.read_csv(\"outputcsv/mid3.csv\", encoding=\"gbk\")\n#input3 = input3.drop([\"病人id\", \"记录时间\"], axis=1)\n# %%\nmid4 = pd.merge(mid3.drop(\"病人id\", axis=1), mid2, on=\"参数记录\")\nmid4.to_csv(\"outputcsv/mid4.csv\", index=None,\n float_format='%.3f', encoding=\"gbk\")\n# %%\nout = pd.merge(mid4, mid1, on=\"病人id\")\nout = out.rename(columns={'birthDay': '年龄'})\nout.to_csv(\"outputcsv/out.csv\", index=None,\n float_format='%.3f', encoding=\"gbk\")\n\n# %%\nout = pd.read_csv(\"outputcsv/out.csv\", encoding=\"gbk\")\nplt.matshow(out.corr(method=\"pearson\"))\nplt.colorbar()\nplt.show()\n# %%\n# 近零方差属性筛查\nl = []\nla = []\nfor i in out:\n la.append(i[0:1])\n l.append(out[i].value_counts().iloc[0])\nx = range(len(l))\nplt.bar(x, height=l, width=0.4, alpha=0.8, label=\"数量最大值\")\nplt.rcParams['font.sans-serif'] = ['SimHei']\nplt.rcParams['axes.unicode_minus'] = False\nplt.ylabel(\"出现次数\")\nplt.xticks([index for index in x], la)\nplt.xlabel(\"属性\")\nplt.title(\"近零方差变量统计\")\nplt.show()\n# %%\n# 特征选取\nfea = out.drop([\"电导度\", \"透析液温度\"], axis=1)\n\n\n# %%\n# 三种分类标签生成函数定义\n\n\ndef sbp1(df):\n n = len(df)\n series = pd.Series(0, dtype=int).reindex(df.index, method='pad')\n for i in range(n-1):\n sbpt = df['收缩压数值'].iloc[i]\n sbpt1 = df['收缩压数值'].iloc[i+1]\n if sbpt1 < 90.0:\n series.iloc[i] = 1\n series.iloc[n-1] = np.nan\n return series\n\n\ndef sbp2(df):\n n = len(df)\n series = pd.Series(0, dtype=int).reindex(df.index, method='pad')\n for i in range(n - 1):\n sbpt = df['收缩压数值'].iloc[i]\n sbpt1 = df['收缩压数值'].iloc[i+1]\n if sbpt-sbpt1 > 25.0:\n series.iloc[i] = 1\n series.iloc[n-1] = np.nan\n return series\n\n\ndef sbp3(df):\n n = len(df)\n series = pd.Series(0, dtype=int).reindex(df.index, method='pad')\n for i in range(n-1):\n sbpt = df['收缩压数值'].iloc[i]\n sbpt1 = df['收缩压数值'].iloc[i+1]\n if 0.75*sbpt > sbpt1:\n series.iloc[i] = 1\n series.iloc[n-1] = np.nan\n return series\n\n\ndef add123(df):\n df['sbp1'] = sbp1(df)\n df['sbp2'] = sbp2(df)\n df['sbp3'] = sbp3(df)\n return df\n\n\n# %%\nfea = fea.groupby(['病人id', '参数记录']).apply(add123)\nfea = fea.dropna()\nfea.to_csv(\"outputcsv/fea.csv\", index=None,\n float_format='%.3f', encoding=\"gbk\")\n\n# %%\nfor i in fea:\n print(fea[i].value_counts())\n\n\n# %%\n# 再次读入数据\n# 提取原始12列属性中的特征\nfea = pd.read_csv(\"outputcsv/fea.csv\", encoding=\"gbk\")\nfea = fea.drop(\"病人id\", axis=1)\nfea = fea.drop(\"参数记录\", axis=1)\nfea = fea.drop(\"记录时间\", axis=1)\nfea[\"收缩压数值\"] = fea[\"收缩压数值\"]-fea[\"透析前收缩压\"]\nfea = fea.drop(\"透析前收缩压\", axis=1)\nprint(fea.shape)\n\n# %%\n# 自定义的优化recall函数\n\n\ndef getRecall(y_true, y_pred):\n TP = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) # TP\n P = K.sum(K.round(K.clip(y_true, 0, 1)))\n FN = P-TP # FN=P-TP\n recall = TP / (TP + FN + K.epsilon()) # TP/(TP+FN)\n return recall\n\n# %%\n# 热编码实现\n\n\ndef one_hot_encode_object_array(arr):\n uniques, ids = np.unique(arr, return_inverse=True)\n return np_utils.to_categorical(ids, len(uniques))\n# %%\n# 模型生成函数\n\n\ndef Simplemodelcreate():\n model = Sequential()\n model.add(Dense(28, input_shape=(7,)))\n model.add(Activation('sigmoid'))\n model.add(Dense(2))\n model.add(Activation('softmax'))\n model.summary()\n return model\n# %%\n# 数据平衡权重生成\n\n\ndef bal(y):\n class_weight1 = class_weight.compute_class_weight('balanced',\n np.unique(y),\n y)\n cw = dict(enumerate(class_weight1))\n return cw\n\n\n# %%\n# 单层神经网络模拟实现逻辑回归算法\nX = fea.values[:, 0:7]\ny1 = fea.values[:, 7]\ny2 = fea.values[:, 8]\ny3 = fea.values[:, 9]\n# Make one -hot encoder\ntrain_y_ohe1 = one_hot_encode_object_array(y1)\ntrain_y_ohe2 = one_hot_encode_object_array(y2)\ntrain_y_ohe3 = one_hot_encode_object_array(y3)\n# 平衡数据集\ncw1 = bal(y1)\ncw2 = bal(y2)\ncw3 = bal(y3)\nmodel1 = Simplemodelcreate()\nmodel1.compile(loss='binary_crossentropy', metrics=[\n 'accuracy', getRecall], optimizer=RMSprop(lr=0.0001))\nmodel2 = Simplemodelcreate()\nmodel2.compile(loss='binary_crossentropy', metrics=[\n 'accuracy', getRecall], optimizer=RMSprop(lr=0.0001))\nmodel3 = Simplemodelcreate()\nmodel3.compile(loss='binary_crossentropy', metrics=[\n 'accuracy', getRecall], optimizer=RMSprop(lr=0.0001))\nhistory1 = model1.fit(X, train_y_ohe1, verbose=1, epochs=4,\n batch_size=9, validation_split=0.2, shuffle=True, class_weight=cw1)\n\nhistory2 = model2.fit(X, train_y_ohe2, verbose=1, epochs=4,\n batch_size=9, validation_split=0.2, shuffle=True, class_weight=cw2)\n\nhistory3 = model3.fit(X, train_y_ohe3, verbose=1, epochs=4,\n batch_size=9, validation_split=0.2, shuffle=True, class_weight=cw3)\n\nlr1 = model1.predict(X, batch_size=9, verbose=1, steps=None)\nlr2 = model2.predict(X, batch_size=9, verbose=1, steps=None)\nlr3 = model3.predict(X, batch_size=9, verbose=1, steps=None)\nmodel1.save(\"models/LRforsbplabel-1.h5\")\nmodel2.save(\"models/LRforsbplabel-2.h5\")\nmodel3.save(\"models/LRforsbplabel-3.h5\")\nprint(\"model saved!\")\n# %%\n# 模型生成函数\n\n\ndef SVMmodelcreate():\n model = Sequential()\n model.add(Dense(7, input_shape=(7,), kernel_regularizer=regularizers.l2(0.5)))\n model.add(Activation('linear'))\n model.add(Dense(2, kernel_regularizer=regularizers.l2(0.5)))\n model.add(Activation('softmax'))\n model.summary()\n return model\n# %%\n# 自定义损失函数模拟svm分类器\n\n\ndef categorical_squared_hinge(y_true, y_pred):\n y_true = 2. * y_true - 1 \n # trans [0,1] to [-1,1],注意这个,svm类别标签是-1和1\n # hinge loss,参考keras自带的hinge loss\n vvvv = K.maximum(1. - y_true * y_pred, 0.)\n vvv = K.square(vvvv)\n # 文章《Deep Learning using Linear Support Vector Machines》有进行平方\n vv = K.sum(vvv, 1, keepdims=False) \n # axis=len(y_true.get_shape()) - 1\n v = K.mean(vv, axis=-1)\n return v\n\n\n# %%\n# 使用Keras实现svm分类器\nX = fea.values[:, 0:7]\ny1 = fea.values[:, 7]\ny2 = fea.values[:, 8]\ny3 = fea.values[:, 9]\n# Make one -hot encoder\ntrain_y_ohe1 = one_hot_encode_object_array(y1)\ntrain_y_ohe2 = one_hot_encode_object_array(y2)\ntrain_y_ohe3 = one_hot_encode_object_array(y3)\ncw1 = bal(y1)\ncw2 = bal(y2)\ncw3 = bal(y3)\nmodel1 = SVMmodelcreate()\nmodel1.compile(optimizer=RMSprop(lr=0.0001), loss=[\n categorical_squared_hinge], metrics=['accuracy', getRecall])\nmodel2 = SVMmodelcreate()\nmodel2.compile(optimizer=RMSprop(lr=0.0001), loss=[\n categorical_squared_hinge], metrics=['accuracy', getRecall])\nmodel3 = SVMmodelcreate()\nmodel3.compile(optimizer=RMSprop(lr=0.0001), loss=[\n categorical_squared_hinge], metrics=['accuracy', getRecall])\nhistory1 = model1.fit(X, train_y_ohe1, verbose=1, epochs=8,\n batch_size=50, validation_split=0.2, shuffle=True)\n\n# history1 = model1.fit(X, train_y_ohe1, verbose=1, epochs=8,\n# batch_size=50, validation_split=0.2, shuffle=True,class_weight=cw1)\nhistory2 = model2.fit(X, train_y_ohe2, verbose=1, epochs=8,\n batch_size=50, validation_split=0.2, shuffle=True)\n# history2 = model2.fit(X, train_y_ohe2, verbose=1, epochs=8,\n# batch_size=50, validation_split=0.2, shuffle=True,class_weight=cw2)\nhistory3 = model3.fit(X, train_y_ohe3, verbose=1, epochs=8,\n batch_size=50, validation_split=0.2, shuffle=True)\n# history3 = model3.fit(X, train_y_ohe3, verbose=1, epochs=8,\n# batch_size=50, validation_split=0.2, shuffle=True,class_weight=cw3)\nsvm1 = model1.predict(X, batch_size=50, verbose=1, steps=None)\nsvm2 = model2.predict(X, batch_size=50, verbose=1, steps=None)\nsvm3 = model3.predict(X, batch_size=50, verbose=1, steps=None)\nmodel1.save(\"models/SVMforsbplabel-1.h5\")\nmodel2.save(\"models/SVMforsbplabel-2.h5\")\nmodel3.save(\"models/SVMforsbplabel-3.h5\")\nprint(\"model saved!\")\n# %%\n# 多层神经网络\ndef ANNmodelcreate():\n model = Sequential()\n model.add(Dense(144, input_shape=(7,),activation=LeakyReLU()))\n model.add(Dense(72,activation=LeakyReLU()))\n model.add(Dense(36,activation=LeakyReLU()))\n model.add(Dense(18,activation=LeakyReLU()))\n model.add(Dense(2))\n model.add(Activation('softmax'))\n model.summary()\n return model\n\n\n# %%\n# 自己搭建多层神经网络\nX = fea.values[:, 0:7]\ny1 = fea.values[:, 7]\ny2 = fea.values[:, 8]\ny3 = fea.values[:, 9]\n# Make one -hot encoder\nep=6\ntrain_y_ohe1 = one_hot_encode_object_array(y1)\ntrain_y_ohe2 = one_hot_encode_object_array(y2)\ntrain_y_ohe3 = one_hot_encode_object_array(y3)\ncw1 = bal(y1)\ncw2 = bal(y2)\ncw3 = bal(y3)\nmodel1 = ANNmodelcreate()\nmodel1.compile(loss='binary_crossentropy', metrics=[\n 'accuracy', getRecall], optimizer=\"adam\")\nmodel2 = ANNmodelcreate()\nmodel2.compile(loss='binary_crossentropy', metrics=[\n 'accuracy', getRecall], optimizer=\"adam\")\nmodel3 = ANNmodelcreate()\nmodel3.compile(loss='binary_crossentropy', metrics=[\n 'accuracy', getRecall], optimizer=\"adam\")\n\nhistory1 = model1.fit(X, train_y_ohe1, verbose=1, epochs=ep,\n batch_size=20, validation_split=0.2, shuffle=True, class_weight=cw1)\n\nhistory2 = model2.fit(X, train_y_ohe2, verbose=1, epochs=ep,\n batch_size=20, validation_split=0.2, shuffle=True, class_weight=cw2)\n\nhistory3 = model3.fit(X, train_y_ohe3, verbose=1, epochs=ep,\n batch_size=20, validation_split=0.2, shuffle=True, class_weight=cw3)\nann1 = model1.predict(X, batch_size=20, verbose=1, steps=None)\nann2 = model2.predict(X, batch_size=20, verbose=1, steps=None)\nann3 = model3.predict(X, batch_size=20, verbose=1, steps=None)\nmodel1.save(\"models/ANNforsbplabel-1.h5\")\nmodel2.save(\"models/ANNforsbplabel-2.h5\")\nmodel3.save(\"models/ANNforsbplabel-3.h5\")\nprint(\"model saved!\")\n# %%\n# 结果可视化\n\n\ndef show(history):\n print(history.history)\n l = []\n ll = []\n for i in history.history:\n ll.append(i)\n l.append(history.history[i])\n l = np.array(l).T\n # print(l)\n j = range(1, 1+len(l))\n count = 0\n for i in ll:\n plt.plot(j, l[:, count], label=i)\n count = count+1\n plt.rcParams['font.sans-serif'] = ['SimHei']\n plt.rcParams['axes.unicode_minus'] = False\n plt.ylabel(\"训练参数值\")\n plt.xlabel(\"训练轮数\")\n plt.title(\"训练结果可视化\")\n plt.legend()\n plt.show()\n\n\n# %%\n# 训练可视化\nshow(history1)\nshow(history2)\nshow(history3)\n# %%\n# 矩阵标签转换为标签列\n\n\ndef trans(res):\n l = []\n for i in res:\n if i[0] > i[1]:\n l.append(0)\n else:\n l.append(1)\n l = np.array(l)\n return l\n\n\n# %%\n# 结果文件生成\nfea = pd.read_csv(\"outputcsv/fea.csv\", encoding=\"gbk\")\nres = pd.DataFrame()\nres[\"病人id\"] = fea[\"病人id\"]\nres[\"记录时间\"] = fea[\"记录时间\"]\nres[\"真实分类标签1\"] = fea[\"sbp1\"]\nres[\"真实分类标签2\"] = fea[\"sbp2\"]\nres[\"真实分类标签3\"] = fea[\"sbp3\"]\nres[\"LR分类器预测标签1\"] = trans(lr1)\nres[\"LR分类器预测标签2\"] = trans(lr2)\nres[\"LR分类器预测标签3\"] = trans(lr3)\nres[\"SVM分类器预测标签1\"] = trans(svm1)\nres[\"SVM分类器预测标签2\"] = trans(svm2)\nres[\"SVM分类器预测标签3\"] = trans(svm3)\nres[\"ANN分类器预测标签1\"] = trans(ann1)\nres[\"ANN分类器预测标签2\"] = trans(ann2)\nres[\"ANN分类器预测标签3\"] = trans(ann3)\n\nres.to_csv(\"outputcsv/result.csv\", index=None,\n float_format='%.3f', encoding=\"gbk\")\n# %%\n\n# %%\nfor i in res:\n print(res[i].value_counts())\n# %%\nres = pd.DataFrame()\nres[\"真实分类标签1\"] = fea[\"sbp1\"]\nres[\"真实分类标签2\"] = fea[\"sbp2\"]\nres[\"真实分类标签3\"] = fea[\"sbp3\"]\nlabel_list = [\"真实分类标签1\", \"真实分类标签2\", \"真实分类标签3\"]\nx = range(3)\nplt.style.use('ggplot')\nfor i in res:\n print(res[i].value_counts())\n rects1 = plt.bar(x, height=res[i].value_counts(\n ).iloc[0], width=0.2, alpha=0.8, label=\"低风险\")\n rects2 = plt.bar([i + 0.2 for i in x],\n height=res[i].value_counts().iloc[1], width=0.2, label=\"高风险\")\nplt.rcParams['font.sans-serif'] = ['SimHei']\nplt.rcParams['axes.unicode_minus'] = False\nplt.xticks([index + 0.1 for index in x], label_list)\nplt.xlabel(\"标签\")\nplt.ylabel(\"数量\")\nplt.title(\"每种标签的数量直方图\")\nplt.show()\n\n# %%\n","repo_name":"zmzfpc/Dataming","sub_path":"1854116_朱明志_hw3/hw3.py","file_name":"hw3.py","file_ext":"py","file_size_in_byte":14658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"33774460561","text":"#!/usr/bin/python\n#------ This module operates on CASTEP output and input files\nimport sys\nfrom CASTEP_io_module import *\nfrom CASTEP_io_module_cellfile import *\n\n\n\n##############################################\n#------ Main\n##############################################\nname_flakes = Cellfile('CNT_66_CDT_cluster_convergence-newcell.2_flakes.cell')\nname_flakes.readdata()\n\"\"\"\n\t\tself.filename=name\n self.k_points_spacing = None\n self.lattice_vec_lines = None\n self.lattice_vec = None\n self.atomic_position_lines = None\n self.atoms = None\n self.ionic_constraints = None\n self.extra_parameters = None\n\"\"\"\n\n\nname_flakes.get_celldata()\n#print ''.join(name_flakes.atomic_position_lines)\nname_flakes.get_atoms()\n#name_flakes.atoms[0].x += 1\n#name_flakes.update_atomic_lines()\n#print ''.join(name_flakes.atomic_position_lines)\n\nname_flakes.update_lattice_vectorlines()\n#print ''.join(name_flakes.lattice_vec)\n#print ''.join(name_flakes.atomic_position_lines)\n#print ''.join(name_flakes.ionic_constraints)\n#print ''.join(name_flakes.extra_parameters)\nname_flakes.write_cell('test.cell')\n\ntest_structure = Structure()\nname_flakes.populate_structure_class(test_structure)\n#print test_structure.lattice_vectors\n#print [o.x for o in test_structure.atom_list]\n\ntest_Atoms = get_xyz_positions('test_file.xyz')\ntest_vector = get_xyz_cell(test_Atoms)\n\nnew_cell = Cellfile()\nnew_cell.lattice_vec = test_vector\nnew_cell.atoms = test_Atoms\n\nnew_cell.update_atomic_lines()\nnew_cell.update_lattice_vectorlines()\n\nnew_cell.write_cell('supertest.cell')\n\n\n#------ Cstepfilefile test\n\ncastepfile_test = Castepfilefile('CNT_66_CDT_cluster_convergence-newcell.2_flakes.castep')\ncastepfile_test.readdata()\ncastepfile_test.get_lattice_vectors()\ncastepfile_test.get_atomic_position_lines()\ncastepfile_test.get_atomic_positions()\n\n","repo_name":"jgolebiowski/coding-workspace","sub_path":"castep_python/IO_module_testing_v0/CASTEP_script.py","file_name":"CASTEP_script.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"44735070164","text":"import os\n\nfrom celery import Celery\nfrom celery.schedules import crontab\n\n\n# set the default Django settings module for the 'celery' program.\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'HealthPartner.settings')\n\napp = Celery('HealthPartner')\n\n# Using a string here means the worker doesn't have to serialize\n# the configuration object to child processes.\n# - namespace='CELERY' means all celery-related configuration keys\n# should have a `CELERY_` prefix.\napp.config_from_object('django.conf:settings', namespace='CELERY')\n\n\n@app.on_after_configure.connect\ndef setup_periodic_tasks(sender, **kwargs):\n # Calls test('hello') every 10 seconds.\n sender.add_periodic_task(10.0, get_post_by_reddit_Api.s(\"hello\"), name='add every 10')\n\n\n# Load task modules from all registered Django app configs.\n# @app.on_after_finalize\n# app.autodiscover_tasks()\n\n\n@app.task(bind=True)\ndef debug_task(self):\n print(f'Request: {self.request!r}')\n\n@app.task()\ndef get_post_by_reddit_Api(arg):\n # reddit = praw.Reddit(client_id='ISnOA13qK99q4A',\n # client_secret='cEKVwb65zJJoejN6YphDRamyHycdHA',\n # user_agent='my user agent')\n #\n # # to find the top most submission in the subreddit \"HEALTH\"\n # subreddit = reddit.subreddit('HEALTH')\n #\n # for submission in subreddit.top(limit=5):\n # # displays the submission title\n # tweets = Tweets(user_name='hameeed', description=submission.title)\n # tweets.save()\n print('periodic celery task ')\n","repo_name":"umairkhadim10/HealthPartner","sub_path":"HealthPartner/HealthPartner/celery.py","file_name":"celery.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"18613168810","text":"import torch\nfrom mmdet.core.bbox.assigners.assign_result import AssignResult\nfrom mmdet.core.bbox.assigners.base_assigner import BaseAssigner\n\nfrom ..builder import ROTATED_BBOX_ASSIGNERS\n\n\n@ROTATED_BBOX_ASSIGNERS.register_module()\nclass ConvexAssigner(BaseAssigner):\n \"\"\"Assign a corresponding gt bbox or background to each bbox. Each\n proposals will be assigned with `0` or a positive integer indicating the\n ground truth index.\n\n - 0: negative sample, no assigned gt\n - positive integer: positive sample, index (1-based) of assigned gt\n\n Args:\n scale (float): IoU threshold for positive bboxes.\n pos_num (float): find the nearest pos_num points to gt center in this\n level.\n \"\"\"\n\n def __init__(self, scale=4, pos_num=3):\n self.scale = scale\n self.pos_num = pos_num\n\n def get_horizontal_bboxes(self, gt_rbboxes):\n \"\"\"get_horizontal_bboxes from polygons.\n\n Args:\n gt_rbboxes (torch.Tensor): Groundtruth polygons, shape (k, 8).\n\n Returns:\n gt_rect_bboxes (torch.Tensor): The horizontal bboxes, shape (k, 4).\n \"\"\"\n gt_xs, gt_ys = gt_rbboxes[:, 0::2], gt_rbboxes[:, 1::2]\n gt_xmin, _ = gt_xs.min(1)\n gt_ymin, _ = gt_ys.min(1)\n gt_xmax, _ = gt_xs.max(1)\n gt_ymax, _ = gt_ys.max(1)\n gt_rect_bboxes = torch.cat([\n gt_xmin[:, None], gt_ymin[:, None], gt_xmax[:, None], gt_ymax[:,\n None]\n ],\n dim=1)\n\n return gt_rect_bboxes\n\n def assign(self,\n points,\n gt_rbboxes,\n gt_rbboxes_ignore=None,\n gt_labels=None,\n overlaps=None):\n \"\"\"Assign gt to bboxes.\n\n The assignment is done in following steps\n\n 1. compute iou between all bbox (bbox of all pyramid levels) and gt\n 2. compute center distance between all bbox and gt\n 3. on each pyramid level, for each gt, select k bbox whose center\n are closest to the gt center, so we total select k*l bbox as\n candidates for each gt\n 4. get corresponding iou for the these candidates, and compute the\n mean and std, set mean + std as the iou threshold\n 5. select these candidates whose iou are greater than or equal to\n the threshold as positive\n 6. limit the positive sample's center in gt\n\n Args:\n points (torch.Tensor): Points to be assigned, shape(n, 18).\n gt_rbboxes (torch.Tensor): Groundtruth polygons, shape (k, 8).\n gt_rbboxes_ignore (Tensor, optional): Ground truth polygons that\n are labelled as `ignored`, e.g., crowd boxes in COCO.\n gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).\n\n Returns:\n :obj:`AssignResult`: The assign result.\n \"\"\"\n num_points = points.shape[0]\n num_gts = gt_rbboxes.shape[0]\n\n if num_gts == 0 or num_points == 0:\n # If no truth assign everything to the background\n assigned_gt_inds = points.new_full((num_points, ),\n 0,\n dtype=torch.long)\n if gt_labels is None:\n assigned_labels = None\n else:\n assigned_labels = points.new_full((num_points, ),\n -1,\n dtype=torch.long)\n return AssignResult(\n num_gts, assigned_gt_inds, None, labels=assigned_labels)\n\n points_xy = points[:, :2]\n points_stride = points[:, 2]\n points_lvl = torch.log2(points_stride).int()\n lvl_min, lvl_max = points_lvl.min(), points_lvl.max()\n\n assert gt_rbboxes.size(1) == 8, 'gt_rbboxes should be (N * 8)'\n gt_bboxes = self.get_horizontal_bboxes(gt_rbboxes)\n\n # assign gt rbox\n gt_bboxes_xy = (gt_bboxes[:, :2] + gt_bboxes[:, 2:]) / 2\n\n gt_bboxes_wh = (gt_bboxes[:, 2:] - gt_bboxes[:, :2]).clamp(min=1e-6)\n scale = self.scale\n gt_bboxes_lvl = ((torch.log2(gt_bboxes_wh[:, 0] / scale) +\n torch.log2(gt_bboxes_wh[:, 1] / scale)) / 2).int()\n gt_bboxes_lvl = torch.clamp(gt_bboxes_lvl, min=lvl_min, max=lvl_max)\n\n # stores the assigned gt index of each point\n assigned_gt_inds = points.new_zeros((num_points, ), dtype=torch.long)\n # stores the assigned gt dist (to this point) of each point\n assigned_gt_dist = points.new_full((num_points, ), float('inf'))\n points_range = torch.arange(points.shape[0])\n\n for idx in range(num_gts):\n gt_lvl = gt_bboxes_lvl[idx]\n # get the index of points in this level\n lvl_idx = gt_lvl == points_lvl\n points_index = points_range[lvl_idx]\n # get the points in this level\n lvl_points = points_xy[lvl_idx, :]\n # get the center point of gt\n gt_point = gt_bboxes_xy[[idx], :]\n # get width and height of gt\n gt_wh = gt_bboxes_wh[[idx], :]\n # compute the distance between gt center and\n # all points in this level\n points_gt_dist = ((lvl_points - gt_point) / gt_wh).norm(dim=1)\n # find the nearest k points to gt center in this level\n min_dist, min_dist_index = torch.topk(\n points_gt_dist, self.pos_num, largest=False)\n # the index of nearest k points to gt center in this level\n min_dist_points_index = points_index[min_dist_index]\n\n # The less_than_recorded_index stores the index\n # of min_dist that is less then the assigned_gt_dist. Where\n # assigned_gt_dist stores the dist from previous assigned gt\n # (if exist) to each point.\n less_than_recorded_index = min_dist < assigned_gt_dist[\n min_dist_points_index]\n # The min_dist_points_index stores the index of points satisfy:\n # (1) it is k nearest to current gt center in this level.\n # (2) it is closer to current gt center than other gt center.\n min_dist_points_index = min_dist_points_index[\n less_than_recorded_index]\n # assign the result\n assigned_gt_inds[min_dist_points_index] = idx + 1\n assigned_gt_dist[min_dist_points_index] = min_dist[\n less_than_recorded_index]\n\n if gt_labels is not None:\n assigned_labels = assigned_gt_inds.new_full((num_points, ),\n -1,\n dtype=torch.long)\n pos_inds = torch.nonzero(\n assigned_gt_inds > 0, as_tuple=False).squeeze()\n if pos_inds.numel() > 0:\n assigned_labels[pos_inds] = gt_labels[\n assigned_gt_inds[pos_inds] - 1]\n else:\n assigned_labels = None\n\n return AssignResult(\n num_gts, assigned_gt_inds, None, labels=assigned_labels)\n","repo_name":"open-mmlab/mmrotate","sub_path":"mmrotate/core/bbox/assigners/convex_assigner.py","file_name":"convex_assigner.py","file_ext":"py","file_size_in_byte":7235,"program_lang":"python","lang":"en","doc_type":"code","stars":1572,"dataset":"github-code","pt":"60"} +{"seq_id":"31309142884","text":"if __name__ == \"__main__\":\n import sys\n from PySide.QtGui import QApplication\n\n from koi.base_logging import init_logging\n from koi.Configurator import init_i18n,load_configuration, configuration\n init_logging()\n init_i18n()\n load_configuration()\n\n from koi.db_mapping import metadata\n from koi.datalayer.database_session import init_db_session\n init_db_session(configuration.database_url, metadata, False or configuration.echo_query)\n\nfrom PySide.QtCore import Slot,Signal,QModelIndex,Qt\nfrom PySide.QtGui import QDialog, QVBoxLayout, QDialogButtonBox, QFormLayout, QHBoxLayout, QPixmap, QPushButton, QItemSelectionModel, QMessageBox, QLineEdit, QWidget\nfrom sqlalchemy.orm.collections import InstrumentedList\n\nfrom koi.Configurator import mainlog\nfrom koi.datalayer.database_session import session\nfrom koi.gui.PrototypedModelView import PrototypedQuickView,PrototypedModelView\nfrom koi.gui.dialog_utils import TitleWidget, SubFrame, showErrorBox, yesNoBox, showWarningBox\nfrom koi.datalayer.generic_access import recursive_defrost_into, generic_delete, generic_load_all_frozen\nfrom koi.gui.FilteringModel import FilteringModel\n\n\nclass FilterLineEdit(QLineEdit):\n\n key_down = Signal()\n\n def __init__(self):\n super(FilterLineEdit,self).__init__()\n\n def keyPressEvent(self,event):\n if event.key() == Qt.Key_Down:\n event.ignore()\n self.key_down.emit()\n\n return super(FilterLineEdit,self).keyPressEvent(event)\n\n\nclass MetaFormDialog(QDialog):\n\n def preselect_item(self,item):\n # Pay attention ! The selection implictely uses \"__eq__\" to find out\n # an object in the list of objects. So be careful with objects\n # that are outside sessions and which have no __eq__ operation : these\n # will be compared on basis of the python's Id which might defer for\n # 2 objects denoting the same thing. See Customer for an example.\n\n # mainlog.debug(\"Preselect item {}\".format(item))\n # mainlog.debug(\"Preselect item in this list\")\n # mainlog.debug(\" -- \".join(sorted(map(lambda c:c.fullname,self.list_model.objects))))\n t = self.list_model.objects.index(item)\n # mainlog.debug(\"Preselect item index {}\".format(t))\n self.list_view.setCurrentIndex(self.list_model.index(t,0))\n\n\n def __init__(self,parent,dialog_title,list_title,form_title,mapped_klass,table_prototype,form_prototype,sort_criterion,index_builder):\n\n \"\"\"\n sort_criterion is a SQLAlchemy colum used when querying the list of edited objects to sort it.\n index_builder : a function that takes an object of the mapped class and returns a string\n suitable for index building.\n \"\"\"\n super(MetaFormDialog,self).__init__(parent)\n\n self.index_builder = index_builder\n self.sort_criterion = sort_criterion\n self.form_prototype = form_prototype\n self.mapped_klass = mapped_klass\n # Locate the primary key\n # this will work only with a one-field PK\n pk_column = list(filter( lambda c:c.primary_key, self.mapped_klass.__table__.columns))[0]\n self.key_field = pk_column.name\n\n\n self.in_save = False\n\n # The current item is the one currently shown in the\n # form. If it's None, then the form contains data\n # for a soon-to-be created item. Else, it's a frozen\n # copy. Since we work on frozen stuff, we can carry\n # the object around safely\n\n self.current_item = None\n\n\n self.list_model = PrototypedModelView(table_prototype, self)\n\n self.list_model_filtered = FilteringModel(self)\n self.list_model_filtered.setSourceModel(self.list_model)\n\n self.line_in = FilterLineEdit()\n self.line_in.key_down.connect(self._focus_on_list)\n self.line_in.textChanged.connect(self._filter_changed)\n\n self.list_view = PrototypedQuickView(table_prototype, self)\n self.list_view.setTabKeyNavigation(False)\n\n\n self.setWindowTitle(dialog_title)\n self.title_widget = TitleWidget(dialog_title,self)\n\n self.list_view.setModel(self.list_model_filtered)\n self.list_view.horizontalHeader().hide()\n self.list_view.verticalHeader().hide()\n self.list_view.horizontalHeader().setStretchLastSection(True)\n\n\n blayout = QVBoxLayout()\n\n b = QPushButton(_(\"New\"))\n b.setObjectName(\"newButton\")\n\n b.clicked.connect(self.create_action)\n blayout.addWidget(b)\n\n b = QPushButton(_(\"Save\"))\n b.setObjectName(\"saveButton\")\n b.clicked.connect(self.save_action)\n blayout.addWidget(b)\n\n b = QPushButton(_(\"Delete\"))\n b.setObjectName(\"deleteButton\")\n b.clicked.connect(self.delete_action)\n blayout.addWidget(b)\n\n blayout.addStretch()\n\n self.buttons = QDialogButtonBox()\n self.buttons.addButton( QDialogButtonBox.Ok)\n\n # BUG According to QLayout, the layout takes ownership of the widget\n # therefore, we have to pay attention when deleting...\n\n form_layout = QFormLayout()\n for p in self.form_prototype:\n w = p.edit_widget(self)\n w.setEnabled(p.is_editable)\n w.setObjectName(\"form_\" + p.field)\n form_layout.addRow( p.title, w)\n\n top_layout = QVBoxLayout()\n top_layout.addWidget(self.title_widget)\n\n hl = QHBoxLayout()\n\n\n vlayout = QVBoxLayout()\n\n vlayout.addWidget(self.line_in)\n vlayout.addWidget(self.list_view)\n # gbox = QGroupBox(list_title,self)\n # gbox.setLayout(vlayout)\n gbox = SubFrame(list_title,vlayout,self)\n hl.addWidget(gbox)\n\n # gbox = QGroupBox(form_title,self)\n # gbox.setLayout(form_layout)\n gbox = SubFrame(form_title,form_layout,self)\n hl.addWidget(gbox)\n hl.addLayout(blayout)\n\n # hl.setStretch(0,0.3)\n # hl.setStretch(1,0.7)\n # hl.setStretch(2,0)\n\n top_layout.addLayout(hl)\n top_layout.addWidget(self.buttons)\n\n self.setLayout(top_layout) # QWidget takes ownership of the layout\n self.buttons.accepted.connect(self.reject)\n\n QWidget.setTabOrder(self.line_in, self.list_view)\n\n nb_objs = self._refresh_list()\n self.line_in.setFocus()\n self.list_view.selectionModel().currentChanged.connect(self.selected_item_changed) # FIXME Clear ownership issue\n if nb_objs > 0:\n self.list_view.selectRow(0)\n else:\n # Special case to automaticaly enter creation mode when\n # the list is empty\n self.create_action()\n\n @Slot()\n def _focus_on_list(self):\n \"\"\" When the user hits the down key on the filter, we transfer\n the focus to the filtered list\n \"\"\"\n self.list_view.setFocus()\n self.list_view.selectionModel().setCurrentIndex(self.list_view.model().index(0,0), QItemSelectionModel.ClearAndSelect)\n\n\n def _filter_changed(self,s):\n self.list_model_filtered.setFilterFixedString(s)\n # self.list_view.selectRow(0)\n\n self.list_view.selectionModel().setCurrentIndex(self.list_view.model().index(0,0), QItemSelectionModel.ClearAndSelect)\n\n def _refresh_list(self):\n mainlog.debug(\"_refresh_list\")\n self.current_item = None\n objs = self.objects_list()\n\n self.list_model.buildModelFromObjects(objs)\n self.list_model_filtered.setIndexData([self.index_builder(o) for o in objs])\n\n return len(objs)\n\n def _select_on_object_id(self,o_id,update_view_selection=True):\n objects = self.list_model.objects\n\n ndx = -1\n for i in range(self.list_model.rowCount()):\n obj = self.list_model.object_at(i)\n if getattr(obj,self.key_field) == o_id:\n ndx = i\n break\n\n mainlog.debug(\"_select_on_object_id: ndx={}\".format(ndx))\n\n if update_view_selection:\n self.list_view.clearSelection()\n\n # Look where the selected object is in the *filtered* view\n\n filtered_ndx = self.list_model_filtered.mapFromSource( self.list_model.index(ndx,0))\n\n mainlog.debug(\"Filtered ndx isValid = {}\".format(filtered_ndx.isValid()))\n\n if not filtered_ndx.isValid():\n # The object is not visible in the filtered list.\n # So we clear the filter to show everything\n\n # self.line_in.setText(\"\") # This triggers a refresh of the list\n filtered_ndx = self.list_model_filtered.mapFromSource( self.list_model.index(ndx,0))\n\n mainlog.debug(\"Filtered ndx isValid = {}\".format(filtered_ndx.isValid()))\n\n self.list_view.setCurrentIndex(filtered_ndx)\n self.list_view.selectionModel().setCurrentIndex(filtered_ndx, QItemSelectionModel.NoUpdate)\n self.list_view.setFocus()\n\n\n\n def _populate_form(self,obj):\n mainlog.debug(\"_populate_form with {}\".format(obj))\n self.current_item = obj\n\n if obj:\n for p in self.form_prototype:\n mainlog.debug(\" {} -> {}\".format(p.field, getattr(obj,p.field)))\n p.set_edit_widget_data(getattr(obj,p.field))\n else:\n # Clear the form\n for p in self.form_prototype:\n p.set_edit_widget_data( p.default_value())\n\n\n def _load_forms_data(self):\n d = dict()\n\n for p in self.form_prototype:\n # mainlog.debug(\"_load_forms_data : {} = {}\".format(p.field, p.edit_widget_data()))\n d[p.field] = p.edit_widget_data()\n\n if self.current_item:\n d[self.key_field] = getattr(self.current_item, self.key_field)\n else:\n mainlog.debug(\"_load_forms_data : no current item\")\n return d\n\n\n\n def _data_changed(self, form_data, obj):\n \"\"\" True if the data in the hash form_data are different\n than what is in sqla_obj\n \"\"\"\n\n def cmp_instrumented_list(a,b):\n # mainlog.debug(\"cmp_instrumented_list\")\n\n if len(a) != len(b):\n return False\n\n # mainlog.debug(\"cmp_instrumented_list lengths are equal\")\n\n for i in range(len(a)):\n if a[i] != b[i]:\n # mainlog.debug(u\"cmp_instrumented_list {} != {}\".format(a[i],b[i]))\n return False\n\n return True\n\n def pixmap_hash(pixmap):\n \"\"\" Compute a hash of the *content* of a picture.\n I've tried to use QPixmap.cacheKey() but somehow\n it's not dependent on content only (thus two\n Pixmap containing the same picture have different\n cacheKey() (and the documentation is not quite\n clear.\n \"\"\"\n\n import hashlib\n # mainlog.debug(\"Hashing pixmap {} {}\".format(id(pixmap),type(pixmap)))\n m = hashlib.md5()\n m.update(pixmap.toImage().bits())\n return m.digest()\n\n if obj:\n # Form data compared to actual object content\n for p in self.form_prototype:\n if not p.is_editable:\n continue\n\n attr = getattr(obj,p.field)\n new_attr = form_data[p.field]\n\n # Be cool with spaces. Note that we can have surplus\n # spaces from database as well as from the form...\n\n if type(attr) == str:\n attr = attr.strip() or None\n if type(new_attr) == str:\n new_attr = new_attr.strip() or None\n\n # mainlog.debug(u\"MetaFormDialog : field:{} : obj:{} - form:{}\".format(p.field, attr, new_attr))\n\n if ( (type(attr) == QPixmap and pixmap_hash(attr) != pixmap_hash(new_attr)) or\\\n (type(attr) == InstrumentedList and not cmp_instrumented_list(attr,new_attr)) or\\\n (type(attr) != QPixmap and type(attr) != InstrumentedList and attr != new_attr)):\n mainlog.debug(u\"_data_changed2 : data are different on {} : '{}' != '{}'\".format(p.field, attr, new_attr))\n return True\n return False\n else:\n # Form data compared to empty object\n for p in self.form_prototype:\n new_attr = form_data[p.field]\n mainlog.debug(\"MetaFormDialog : empty ! {} : new_attr {}, default = {}\".format(p.field, new_attr, p.default_value()))\n # We compare to a non filled object\n if new_attr and str(new_attr) != u\"\":\n # That seems like a change, but we'll consider it\n # only if we differ from the default value\n # This helps in case we compare to fields which can\n # not be None when empty (for example a combo box with\n # a few values)\n if new_attr != p.default_value():\n return True\n return False\n\n\n def _validate_and_save(self, form_data):\n \"\"\" Returns saved object's id or False is save could not be\n completed (either because there are errors in the validation\n or because there are other technical errors).\n \"\"\"\n\n errors = dict()\n for p in self.form_prototype:\n data = form_data[p.field]\n\n if p.is_editable:\n v = p.validate(data)\n if v != True:\n errors[p.title] = v\n\n if len(errors) > 0:\n info_text = \"\"\n for field,error in errors.items():\n info_text += u\"
  • {}
  • \".format(error)\n\n showErrorBox(_(\"Some of the data you encoded is not right\"),u\"
      {}
    \".format(info_text))\n return False\n\n\n # check = self.check_before_save(self.current_item)\n\n check = True\n\n if check == True:\n try:\n return self.save_object(form_data)\n except Exception as e:\n showErrorBox(_(\"There was an error while saving\"),str(e),e)\n return False\n else:\n showErrorBox(_(\"There was an error while saving\"),check)\n return False\n\n SAVE_DECLINED_BY_USER = -1\n SAVE_FAILED_BECAUSE_OF_ERRORS = -2\n\n def _save_if_necessary(self):\n \"\"\" Returns :\n - primary key if something was saved\n - True if the user choose to not save (for wahtever reason) or a save\n was not needed (no data changed)\n - False if there were errors during the save\n \"\"\"\n\n form_data = self._load_forms_data()\n mainlog.debug(\"_save_if_necessary: form_data = {}\".format(form_data))\n if self._data_changed(form_data, self.current_item):\n ynb = yesNoBox(_(\"Data were changed\"),\n _(\"You have changed some of the data in this. Do you want to save before proceeding ?\"))\n\n if ynb == QMessageBox.Yes:\n saved_obj_id = self._validate_and_save(form_data)\n mainlog.debug(\"_save_if_necessary: saved object id {}\".format(saved_obj_id))\n if saved_obj_id != False:\n mainlog.debug(\"_save_if_necessary: returning {}\".format(saved_obj_id))\n return saved_obj_id\n else:\n # There were errors while trying to save\n return False\n\n return True\n\n\n @Slot(QModelIndex,QModelIndex)\n def selected_item_changed(self, current, previous):\n\n mainlog.debug(\"selected_item_changed old: {} new: {} in_save:{}\".format(previous.row(),current.row(), self.in_save))\n\n # The test below avoids some recursion. It's a bit more clever\n # than it looks. What happens is this. The user modifies\n # some data then ask to change the edited object (in the left list)\n # Doing so it triggers this method. The program then save (if\n # necessary). But that save may trigger a reorganisation of the\n # list. So what the user has selected may be at a different\n # index than the \"current\" one we received as a parameter\n # of this method. To account for that we actually reselect\n # the item in the table. And this triggers the recursion we avoid\n # here. FIXME there is a recursion but the way we avoid\n # it is not 100% satisfactory, we should use a \"semaphore\"\n # for that.\n\n\n if current.isValid() and current.row() >= 0 and \\\n current.row() != previous.row():\n\n ndx = self.list_model_filtered.mapToSource(self.list_model_filtered.index(current.row(),0))\n target = self.list_model.object_at(ndx.row())\n\n mainlog.debug(\"selected_item_changed: trying to save something\")\n if (not self.in_save) and type(self._save_if_necessary()) is int:\n # Something was actually saved\n mainlog.debug(\"selected_item_changed : something was saved starting list refresh\")\n self.in_save = True\n self._refresh_list()\n self._select_on_object_id(getattr(target,self.key_field))\n self.in_save = False\n # mainlog.debug(\"selected_item_changed : done list refresh\")\n self._populate_form(target)\n\n\n\n # FIXME Qt Bug ??? Mismatch between the parameters of the signal in the doc\n # and what I really get (no param...)\n @Slot(bool)\n def create_action(self):\n self.list_view.clearSelection()\n self._populate_form(None)\n self.form_prototype[0].edit_widget(None).setFocus(Qt.OtherFocusReason)\n\n @Slot(bool)\n def save_action(self):\n # mainlog.debug(\"Current row is {}\".format(self.list_view.currentIndex().row()))\n\n self.in_save = True\n form_data = self._load_forms_data()\n obj_id = self._validate_and_save(form_data)\n\n # Following a save, the position of the object in the\n # list might have changed. So we need to reload the\n # list to account for that and we also need to reselect\n # the object\n\n if obj_id != False:\n # mainlog.debug(\"Save action : refreshing\")\n self._refresh_list()\n # mainlog.debug(\"Save action : selecting\")\n self._select_on_object_id(obj_id)\n\n self.in_save = False\n\n\n @Slot(bool)\n def delete_action(self):\n if self.current_item:\n o_id = getattr( self.current_item, self.key_field)\n\n if o_id >= 0: # For some reason I have o_id = 0 somewhere...\n\n # mainlog.debug(\"About to delete {}\".format(o_id))\n\n try:\n if self.delete_object(o_id):\n self.current_item = None # Do this only if delete was successful !\n self.in_save = True\n self._refresh_list()\n\n # The current filter might lead to a 0-length list\n # or we might delete the only item of the list\n # In that case, we clear the form.\n\n if self.list_view.model().rowCount() > 0:\n self.list_view.selectRow(0)\n else:\n self._populate_form(None)\n\n self.in_save = False\n\n except Exception as e:\n showErrorBox(_(\"There was an error while deleting\"),str(e),e)\n return\n\n else:\n mainlog.error(\"The current object has no id => I can't delete it\")\n else:\n showWarningBox(_(\"You have selected nothing for delete.\"),None)\n return\n\n\n def done(self,x):\n current_ndx = self.list_view.currentIndex().row()\n if self._save_if_necessary():\n super(MetaFormDialog,self).done(x)\n # here be dragons\n\n def check_before_save(self,obj):\n return True\n\n\n def save_object(self,form_data):\n \"\"\" Save object hook\n \"\"\"\n\n c = recursive_defrost_into(form_data, self.mapped_klass)\n session().commit()\n return getattr(c, self.key_field)\n\n def delete_object(self,o_id):\n generic_delete(self.mapped_klass, o_id)\n return True\n\n def objects_list(self):\n \"\"\" Reload the objects from the database.\n :return: a list of DTO.\n \"\"\"\n return generic_load_all_frozen(self.mapped_klass, self.sort_criterion)\n\n\nif __name__ == \"__main__\":\n\n app = QApplication(sys.argv)\n # widget = EditCustomerDialog(None)\n # widget = EditEmployeeDialog(None,dao)\n # widget = EditUserDialog(None,dao)\n widget = EditOperationDefinitionsDialog(None)\n widget.show()\n\n app.exec_()\n","repo_name":"wiz21b/koi","sub_path":"koi/gui/MetaFormDialog.py","file_name":"MetaFormDialog.py","file_ext":"py","file_size_in_byte":20756,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"60"} +{"seq_id":"45745867260","text":"'''\n* Difficulty: Medium\n* Asked by: Microsoft\n* Problem: Given a number in the form of a list of digits, return all possible permutations.\nFor example, given [1,2,3], return [[1,2,3],[1,3,2],[2,1,3],[2,3,1],[3,1,2],[3,2,1]].\n\n\nTime Taken:\nRunTime:\nSpace Complexity:\n\nDescription:\n'''\n\ndef permutations(lst):\n if len(lst) == 0:\n return\n if len(lst) == 1:\n return lst\n\n result = []\n\n for i in range(len([lst])):\n char = lst[i]\n permus = lst[:i] + lst[i+1:]\n for perm in permutations([permus]):\n temp = [char] + [perm]\n result.append(temp)\n\n return result\n\n\ndef main():\n for perm in permutations([1,2,3]):\n print (perm)\n\n # for perm in permutations([1,2,3,4]):\n # print (perm)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"PatrickGhadban/DailyCodingProblem","sub_path":"daily9.py","file_name":"daily9.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"22908011882","text":"#from src.db_handling import add_user, add_rating, add_stat, add_mensa, add_feedback, add_food\nimport csv, sqlite3\nimport pandas as pd\nimport os\n\ndb_path = \"mensabot.db\"\ncsv_files = [\"feedback.csv\", \"food.csv\", \"foodrating.csv\", \"mensa.csv\", \"stat.csv\", \"user.csv\"]\n\ncolumn_names = {\n \"mensa\": [\"id\", \"name\", \"short_name\"],\n \"food\": [\"id\", \"description\", \"price\", \"date\", \"mensa_id\"],\n \"user\": [\"id\", \"chat_id\", \"subscription_time\", \"subbed_mensas\"],\n \"foodrating\": [\"id\", \"date\", \"rating\", \"user_id\", \"food_id\"],\n \"stat\": [\"id\", \"datetime\", \"total_users\", \"subbed_users\", \"ratings\"],\n \"feedback\": [\"id\", \"datetime\", \"feedback_text\", \"user_id\"]\n}\n\ncon = sqlite3.connect(db_path)\n\nif __name__ == \"__main__\": \n for csv_file in csv_files:\n table_name = os.path.splitext(os.path.basename(csv_file))[0]\n df = pd.read_csv(csv_file, names=column_names[table_name])\n\n try:\n df.to_sql(table_name, con, if_exists='append', index=False)\n except sqlite3.IntegrityError:\n print(\"Table already inserted.\")","repo_name":"zapfdk/tui-mensa-bot","sub_path":"load_csv_to_sqlite.py","file_name":"load_csv_to_sqlite.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"35474868748","text":"\nimport re\nfrom functools import partial\nfrom pathlib import Path\nfrom typing import Callable, List, Optional, Type, TypeVar, Union\n\nfrom mus.macro.job import MacroJob\nfrom mus.util import ssp\nfrom mus.util.ssp import Atom\n\n\ndef getBasenameNoExtension(filename: Path) -> str:\n rv = filename.name\n if '.' in rv:\n return rv.rsplit('.', 1)[0]\n else:\n return rv\n\n\ndef fn_resolver(match: re.Match,\n job: MacroJob,\n resfunc: Callable) -> str:\n \"\"\"\n Helper function to resolve the different filename\n based template functions.\n\n Args:\n match (re.Match): re match object for template element\n job (MacroJob): Job containing expansion data\n resfunc (callable): Function converting filename\n\n Returns:\n str: resolved template elements\n \"\"\"\n\n mg0 = match.groups()[0]\n matchno = '1' if not mg0 else mg0\n filename = job.data[matchno]\n rv = resfunc(filename)\n return rv\n\n\n# expandable template elements\nTEMPLATE_ELEMENTS = [\n ('%([1-9]?)f', lambda x: str(x)),\n ('%([1-9]?)F', lambda x: str(Path(x).resolve())),\n ('%([1-9]?)n', lambda x: str(Path(x).name)),\n ('%([1-9]?)s', lambda x: str(Path(x).stem)),\n ('%([1-9]?)S', lambda x: str(Path(Path(x).stem).stem)),\n ('%([1-9]?)p', lambda x: str(Path(x).resolve().parent)),\n ('%([1-9]?)P', lambda x: str(Path(x).resolve().parent.parent)),\n]\n\n\ndef resolve_template(\n template: Union[str, Atom],\n job: MacroJob) -> Union[str, Atom]:\n \"\"\"\n Expand a % template based on a filename.\n\n Args:\n template (str|Atom): Template to expand\n job (MacroJob): Job containing relevant data\n\n Returns:\n str|Atom: resolved template\n \"\"\"\n\n # parse over all template elements\n new_template = template\n for rex, resfunc in TEMPLATE_ELEMENTS:\n # Prepare function to expand, pickling with job & resolving function\n resfunc_p = partial(fn_resolver, resfunc=resfunc, job=job)\n new_template = re.sub(rex, resfunc_p, new_template)\n\n if isinstance(template, Atom):\n return template.update(new_template)\n else:\n return new_template\n\n\nclass MacroElementBase():\n \"\"\" Base element - just returns the elements as a string\"\"\"\n def __init__(self,\n macro,\n fragment: str,\n name: str) -> None:\n self.fragment = fragment\n self.macro = macro\n self.name = name\n\n def expand(self):\n raise NotImplementedError\n\n\nclass MacroElementText(MacroElementBase):\n \"\"\"Just a piece of text - but expand % macros\"\"\"\n\n def render(self,\n job: MacroJob) -> str:\n \"\"\"\n Render this text element, expand template\n\n Args:\n job (Type[MacroJob]): Job\n\n Returns:\n str: Rendered fragment\n \"\"\"\n rv = resolve_template(self.fragment, job)\n return rv\n\n def __str__(self):\n return f\"Text : '{self.fragment}'\"\n\n\nclass MacroElementSSP(MacroElementText):\n\n def __str__(self):\n return f\"Output : '{self.fragment}'\"\n\n def render(self,\n job: MacroJob) -> str:\n item = job.data[self.name]\n item = resolve_template(item, job)\n job.rendered[self.name] = item\n return item\n\n def expand(self):\n ssp_expand = ssp.SSP(self.fragment)\n yield from [(self.name, x)\n for x in ssp_expand.stack]\n","repo_name":"mfiers/mus","sub_path":"mus/macro/elements.py","file_name":"elements.py","file_ext":"py","file_size_in_byte":3449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"32435566939","text":"from flask import Flask, render_template, redirect, request\r\nfrom pymongo import MongoClient\r\nimport pandas as pd\r\n\r\napp = Flask(__name__)\r\nclient = MongoClient('mongodb://mesiin592022-0022.westeurope.cloudapp.azure.com:30000/')\r\ndb = client['SalesDB']\r\ncollection = db['Unique_table']\r\n\r\nclasses = \"table table-striped table-bordered\"\r\n\r\nlim = 5\r\n\r\n@app.route('/', methods=['GET', 'POST'])\r\ndef home():\r\n global lim\r\n lim = request.args.get(\"lim\")\r\n lim = 5 if lim is None else int(lim)\r\n return render_template('base.html')\r\n\r\n@app.route('/requete1', methods=(\"POST\", \"GET\"))\r\ndef requete1():\r\n R1 = [{'$match': {\"FirstName_customer\": \"Joseph\"}}, {'$project': {\"SalesID\": 1, \"FirstName_customer\": 1, \"_id\": 0}}]\r\n result = list(collection.aggregate(R1))\r\n df = pd.DataFrame(result).head(lim)\r\n return render_template('simple.html', requete_nb=\"Requete 1\", result=df.to_html(classes=\"table table-striped table-bordered\", index=False))\r\n\r\n@app.route('/requete2', methods=(\"POST\", \"GET\"))\r\ndef requete2():\r\n R2 = [{'$match': {'LastName_employee': 'Ringer'}}, {'$group': {'_id': '$LastName_employee','Sum of quantity': {'$sum': '$Quantity'}}}]\r\n result = list(collection.aggregate(R2))\r\n df = pd.DataFrame(result).head(lim)\r\n return render_template('simple.html', requete_nb=\"Requete 2\", result=df.to_html(classes=\"table table-striped table-bordered\", index=False))\r\n\r\n@app.route('/requete3', methods=(\"POST\", \"GET\"))\r\ndef requete3():\r\n R3 = [{'$match': {\"Name\":{ '$regex' : 'de', '$options' : 'i' }, \"Price\":{'$lt': 500}}},{'$project': {\"SalesID\":1,\"Price\":1,\"_id\":0}}]\r\n result = list(collection.aggregate(R3))\r\n df = pd.DataFrame(result).head(lim)\r\n return render_template('simple.html', requete_nb=\"Requete 3\", result=df.to_html(classes=\"table table-striped table-bordered\", index=False))\r\n\r\n@app.route('/requete4', methods=(\"POST\", \"GET\"))\r\ndef requete4():\r\n R4 = [{'$match': {'$expr': {'$lt': [{'$strLenCP': '$FirstName_employee'}, {'$strLenCP': '$LastName_customer'}]}}}, {'$project': {'Name': 1,'FirstName_employee': 1,'LastName_customer': 1,'_id': 0}}]\r\n result = list(collection.aggregate(R4))\r\n df = pd.DataFrame(result).head(lim)\r\n return render_template('simple.html', requete_nb=\"Requete 4\", result=df.to_html(classes=\"table table-striped table-bordered\", index=False))\r\n\r\n@app.route('/requete5', methods=(\"POST\", \"GET\"))\r\ndef requete5():\r\n middleName_employee = request.args.get(\"middleName_employee\")\r\n middleName_employee = 'e' if middleName_employee is None else middleName_employee\r\n somme = request.args.get(\"somme\")\r\n somme = 1000 if (somme is None or somme=='') else int(somme)\r\n\r\n R5 = [{'$group': {'_id': '$CustomerID','FirstName_customer': {'$first': '$FirstName_customer'},'LastName_customer': {'$first': '$LastName_customer'},'MiddleInitial_employee': {'$first': '$MiddleInitial_employee'},'Somme du nombre d\\'achats': {'$sum': '$Quantity'}}}, {'$match': {'MiddleInitial_employee': middleName_employee,'Somme du nombre d\\'achats': {'$gte': somme}}}, {'$project': {'FirstName_customer': 1,'LastName_customer': 1,'_id': 0}}, {'$sort': {'LastName_customer': 1}}]\r\n result = list(collection.aggregate(R5))\r\n df = pd.DataFrame(result).head(lim)\r\n return render_template('hard_5.html', requete_nb=\"Requete 5\", result=df.to_html(classes=\"table table-striped table-bordered\", index=False), req=R5)\r\n\r\n@app.route('/requete6', methods=(\"POST\", \"GET\"))\r\ndef requete6():\r\n limite = request.args.get(\"limite\")\r\n limite = 1 if limite is None else int(limite)\r\n\r\n R6 = [{'$project': {'Benefit': {'$multiply': ['$Quantity','$Price']},'EmployeeID': 1,'FirstName_employee': 1,'Quantity': 1,'CustomerID': 1,'Price': 1}}, {'$group': {'_id': {'EmployeeID': '$EmployeeID','CustomerID': '$CustomerID'},'CustomerID': {'$first': '$CustomerID'},'EmployeeID': {'$first': '$EmployeeID'},'FirstName_employee': {'$first': '$FirstName_employee'},'Somme des benefices': {'$sum': '$Benefit'}}}, {'$sort': {'Somme des benefices': 1}}, {'$limit': limite}, {'$project': {'FirstName_employee': 1,'_id': 0}}]\r\n result = list(collection.aggregate(R6))\r\n df = pd.DataFrame(result).head(limite)\r\n return render_template('hard_6.html', requete_nb=\"Requete 6\", result=df.to_html(classes=\"table table-striped table-bordered\", index=False), req=R6)\r\n\r\n@app.route('/requete7', methods=(\"POST\", \"GET\"))\r\ndef requete7():\r\n nb = request.args.get(\"nb\")\r\n nb = 3 if nb is None else int(nb)\r\n\r\n R7 = [{'$match': {'$expr': {'$ne': ['$FirstName_customer','$FirstName_employee']}}}, {'$group': {'_id': {'CustomerID': '$CustomerID','LastName_employee': '$LastName_employee'},'LastName_customer': {'$first': '$LastName_customer'},'DistinctCount': {'$sum': 1}}}, {'$match': {'DistinctCount': {'$gte': nb}}}, {'$project': {'LastName_customer': 1,'_id': 0}}, {'$sort': {'LastName_customer': 1}}]\r\n result = list(collection.aggregate(R7))\r\n df = pd.DataFrame(result).head(lim)\r\n return render_template('hard_7.html', requete_nb=\"Requete 7\", result=df.to_html(classes=\"table table-striped table-bordered\", index=False), req=R7)\r\n\r\n@app.route('/requete8', methods=(\"POST\", \"GET\"))\r\ndef requete8():\r\n middleName_employee1 = request.args.get(\"middleName_employee1\")\r\n middleName_employee1 = 'e' if middleName_employee1 is None else middleName_employee1\r\n limite1 = request.args.get(\"limite1\")\r\n limite1 = 1 if limite1 is None else int(limite1)\r\n\r\n R8 = [{'$match': {'MiddleInitial_employee': middleName_employee1}}, {'$group': {'_id': {'EmployeeID': '$EmployeeID','ProductID': '$ProductID'},'Name': {'$first': '$Name'},'Nb': {'$sum': '$Quantity'}}}, {'$sort': {'Nb': -1}}, {'$limit': limite1}, {'$project': {'Name': 1,'Nb': 1,'_id': 0}}]\r\n result = list(collection.aggregate(R8))\r\n df = pd.DataFrame(result).head(limite1)\r\n return render_template('hard_8.html', requete_nb=\"Requete 8\", result=df.to_html(classes=\"table table-striped table-bordered\", index=False), req=R8)\r\n\r\n@app.route('/adminView', methods=(\"POST\", \"GET\"))\r\ndef adminView():\r\n infos = db.command(\"collstats\", \"Unique_table\")\r\n indexes_existants = list(infos[\"indexSizes\"].keys())\r\n nb_shards = len(infos[\"shards\"])\r\n nb_chunks = infos[\"nchunks\"]\r\n stats = []\r\n for k, v in infos[\"shards\"].items():\r\n stats.append({\r\n \"Nom du Shard\": k,\r\n \"Nombre de documents\": infos[\"shards\"][k][\"count\"],\r\n \"Taille des données stockées\": str(round(infos[\"shards\"][k][\"size\"] * 1e-6, 2)) + \" Mo\",\r\n \"Pourcentage des données stockées\": str(round(infos[\"shards\"][k][\"size\"] / infos[\"size\"] * 100, 2)) + \"%\"\r\n })\r\n stats = pd.DataFrame(stats).sort_values(\"Nom du Shard\")\r\n return render_template('AdminView.html',\r\n indexes_existants=indexes_existants,\r\n nb_shards=nb_shards,\r\n nb_chunks=nb_chunks,\r\n result=stats.to_html(classes=\"table table-striped table-bordered\", index=False))\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)","repo_name":"rolanddenizot/Structures_de_donnes_Cloud_Rapport_4","sub_path":"Rapport 4/App_Cloud/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"69839333950","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n'''\ncontroller.py\nZigbee controller\nHolding & caching system state, throtling interactions, managing interrupts & watchdog\n'''\n\nimport asyncio\nimport logging\nimport time\n\nfrom zigbee import State\n\n# Minimum time between two SetState calls [seconds]\nSETSTATE_MIN_PERIOD = 1.\n# Periodic update of internal state with hardware [seconds]\nUPDATE_PERIOD = 30.\n# Watchdog: perdiod of inactivity (no connexion with gateway) after which to revert to a safe state [seconds]\nWATCHDOG_PERIOD = 3*60*60 # 3h\nWATCHDOG_SAFE_STATE = State.OFF\nWATCHDOG_UNSAFE_STATES = [State.ON]\n\n\nclass ZBCtrl():\n '''Main controller for Zigbee, holding & caching system state'''\n\n def __init__(self, zbi, devices):\n '''Constructor: zbi ZBInterface object, devices list of known IDs (IEEE/EUI64)'''\n # Zigbee interface\n self.zbi = zbi\n # Internal state and its lock to R/W it\n self._state = {device: State.NA for device in devices}\n #self._state = {\n # '70:ac:08:ff:fe:7e:0b:xx': State.OFF # IKEA of Sweden TRADFRI control outlet\n #}\n self.lock = asyncio.Lock()\n # Throttling: last change time and its lock to R/W it\n self.lastChange = time.time()\n self.lastChangeLock = asyncio.Lock()\n # Watchdog: last contact time and its lock to R/W it\n self.lastContact = time.time()\n self.lastContactLock = asyncio.Lock()\n # Periodic update & running state\n self.updateTask = None\n\n async def start(self):\n '''Start the controller, its timer and its dependencies (incl. ZBInterface)'''\n await self.zbi.start(self._stateChangeCallback)\n self.running = True\n self.updateTask = asyncio.create_task(self._periodicUpdate())\n #await self.updateTask\n\n async def stop(self):\n ''' Stop controller, its timer and its dependencies'''\n self.running = False\n self.updateTask.cancel()\n await self.zbi.stop()\n\n async def _stateChangeCallback(self, device, state):\n '''Handle state change event'''\n logging.info('Event: updating state: %s=%r' % (device, state))\n async with self.lock:\n devices = self._state.keys()\n if device in devices:\n async with self.lock:\n self._state[device] = state\n\n async def _periodicUpdate(self):\n '''Periodically update the internal state and let the watchdog out'''\n if self.running:\n logging.info('Running periodic update')\n # Internal state update\n logging.debug(' Updating internal state')\n async with self.lock:\n devices = self._state.keys()\n async def updateDeviceState(device):\n try:\n newState = await self.zbi.getDeviceState(device)\n async with self.lock:\n self._state[device] = newState\n except Exception as e:\n logging.error('Unable to get actual state for device %s: %r' % (device, e))\n await asyncio.gather(*[updateDeviceState(device) for device in devices])\n # Watchdog\n logging.debug(' Walking out the watchdog')\n async with self.lastContactLock:\n now = time.time()\n if now - self.lastContact > WATCHDOG_PERIOD:\n logging.warning(' Watchdog: WOOF no news for too long, reverting to safe state')\n async with self.lock:\n unsafeDevices = [device for device in self._state if self._state[device] in WATCHDOG_UNSAFE_STATES]\n async def setSafeDeviceState(device):\n logging.info(' Reverting device %s to safe state: %s' % (device, WATCHDOG_SAFE_STATE))\n try:\n newState = await self.zbi.setDeviceState(device, WATCHDOG_SAFE_STATE)\n async with self.lock:\n self._state[device] = newState\n except Exception as e:\n logging.error(' Unable to revert device %s to safe state: %r' % (device, e))\n await asyncio.gather(*[setSafeDeviceState(device) for device in unsafeDevices])\n self.lastContact = time.time()\n logging.debug(' Periodic update completed. Waiting for %fs' % UPDATE_PERIOD)\n await asyncio.sleep(UPDATE_PERIOD)\n # Relaunching timer (rechecking if still running as it may have changed in between)\n if self.running:\n self.updateTask = asyncio.create_task(self._periodicUpdate())\n\n async def getState(self):\n # Reassuring watchdog\n async with self.lastContactLock:\n self.lastContact = time.time()\n # Returning state\n async with self.lock:\n return self._state.copy()\n\n async def setState(self, newState):\n # Reassuring watchdog\n async with self.lastContactLock:\n self.lastContact = time.time()\n # Throttling\n async with self.lastChangeLock:\n now = time.time()\n if now - self.lastChange < SETSTATE_MIN_PERIOD:\n logging.warning('Only %fs since last call, throttling!' % (now - self.lastChange))\n raise ValueError('Throttling: too many setState requests')\n self.lastChange = now\n # Applying changes\n stateChanges = {}\n async with self.lock:\n for device in self._state:\n if device in newState and self._state[device] != newState[device] and newState[device] in [State.ON, State.OFF]:\n stateChanges[device] = newState[device]\n async def applyStateChange(device, newDeviceState):\n logging.info('Changing state: %s=%s' % (device, newDeviceState))\n actualNewDeviceState = await self.zbi.setDeviceState(device, newDeviceState)\n async with self.lock:\n self._state[device] = actualNewDeviceState\n await asyncio.gather(*[applyStateChange(device, stateChanges[device]) for device in stateChanges])\n\n\nif __name__ == '__main__':\n from zigbee import ZBInterface\n logging.basicConfig(format='%(asctime)s %(levelname)s %(module)s/%(funcName)s %(message)s', encoding='utf-8', level=logging.INFO)\n async def main():\n zbi = ZBInterface()\n devices = [\n '70:ac:08:ff:fe:7e:0b:xx', # IKEA of Sweden TRADFRI control outlet\n ]\n ctrl = ZBCtrl(zbi, devices)\n await ctrl.start()\n outlet = '70:ac:08:ff:fe:7e:0b:xx'\n logging.info(ctrl.getState())\n logging.info(await ctrl.setState({outlet: State.ON}))\n logging.info(ctrl.getState())\n await asyncio.sleep(15)\n logging.info(ctrl.getState())\n logging.info(await ctrl.setState({outlet: State.OFF}))\n await asyncio.sleep(15)\n logging.info(ctrl.getState())\n logging.info(await ctrl.setState({outlet: State.ON}))\n logging.info(ctrl.getState())\n await asyncio.sleep(15)\n logging.info(ctrl.getState())\n await ctrl.stop()\n asyncio.run(main())\n","repo_name":"ekingr/zigbee","sub_path":"controller/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":7227,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"33205208220","text":"scores = [float(i) for i in input().split()]\nmin = scores[0]\nmax = scores[0]\nsum = 0.\n\nfor i, value in enumerate(scores):\n if min > value:\n min = value\n if max < value:\n max = value\n sum += value\nsum = (sum - min - max)/2.\nprint(round(sum, 2))","repo_name":"vikimark/2190101_Com_Prog","sub_path":"03_If/03_If_★_Gymnastic_Score.py","file_name":"03_If_★_Gymnastic_Score.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"74030752511","text":"\"\"\"Adds config flow for FlexMeasure.\"\"\"\nfrom __future__ import annotations\n\nimport logging\n\nimport voluptuous as vol\nfrom croniter import croniter\nfrom homeassistant import config_entries\nfrom homeassistant.const import CONF_NAME\nfrom homeassistant.const import CONF_UNIT_OF_MEASUREMENT\nfrom homeassistant.const import CONF_VALUE_TEMPLATE\nfrom homeassistant.helpers import selector\nfrom homeassistant.helpers.template import Template\nfrom homeassistant.helpers.template import TemplateError\n\nfrom .const import CONF_CONDITION\nfrom .const import CONF_CRON\nfrom .const import CONF_METER_TYPE\nfrom .const import CONF_PERIODS\nfrom .const import CONF_SENSORS\nfrom .const import CONF_SOURCE\nfrom .const import CONF_TW_DAYS\nfrom .const import CONF_TW_FROM\nfrom .const import CONF_TW_TILL\nfrom .const import DOMAIN\nfrom .const import METER_TYPE_SOURCE\nfrom .const import METER_TYPE_TIME\nfrom .const import PREDEFINED_PERIODS\n\n\n_LOGGER: logging.Logger = logging.getLogger(__name__)\nMETER_TYPES_MENU = [\"time\", \"source\"]\nPERIOD_MENU = [\"predefined\", \"custom\"]\nPERIOD_MENU_DONE = [\"predefined\", \"custom\", \"done\"]\n\nPERIOD_OPTIONS = [\n # selector.SelectOptionDict(value=\"none\", label=\"none (no reset)\"),\n selector.SelectOptionDict(value=\"5m\", label=\"5m\"),\n selector.SelectOptionDict(value=\"hour\", label=\"hour\"),\n selector.SelectOptionDict(value=\"day\", label=\"day\"),\n selector.SelectOptionDict(value=\"week\", label=\"week\"),\n selector.SelectOptionDict(value=\"month\", label=\"month\"),\n selector.SelectOptionDict(value=\"year\", label=\"year\"),\n]\n\nDAY_OPTIONS = [\n selector.SelectOptionDict(value=\"0\", label=\"monday\"),\n selector.SelectOptionDict(value=\"1\", label=\"tuesday\"),\n selector.SelectOptionDict(value=\"2\", label=\"wednesday\"),\n selector.SelectOptionDict(value=\"3\", label=\"thursday\"),\n selector.SelectOptionDict(value=\"4\", label=\"friday\"),\n selector.SelectOptionDict(value=\"5\", label=\"saturday\"),\n selector.SelectOptionDict(value=\"6\", label=\"sunday\"),\n]\nDEFAULT_DAYS = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\"]\n\n\nclass FlexMeasureConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):\n def __init__(self) -> None:\n super().__init__()\n self._sensor_config = {}\n self._data = {}\n self._data[CONF_SENSORS] = []\n\n def is_valid_cron(self, value: str) -> bool:\n return croniter.is_valid(value)\n\n def is_valid_template(self, value: str) -> bool:\n if not value:\n return True\n template = Template(value)\n try:\n template.ensure_valid()\n return True\n except TemplateError:\n return False\n\n async def async_step_user(self, user_input=None):\n return self.async_show_menu(step_id=\"user\", menu_options=METER_TYPES_MENU)\n\n async def async_step_time(self, user_input=None):\n errors = {}\n if user_input is not None:\n if not self.is_valid_template(user_input.get(CONF_VALUE_TEMPLATE)):\n errors[CONF_VALUE_TEMPLATE] = \"invalid template\"\n\n if not errors:\n self._data[CONF_METER_TYPE] = METER_TYPE_TIME\n self._data[CONF_NAME] = user_input[CONF_NAME]\n self._sensor_config[CONF_UNIT_OF_MEASUREMENT] = user_input.get(\n CONF_UNIT_OF_MEASUREMENT\n )\n self._sensor_config[CONF_VALUE_TEMPLATE] = user_input.get(\n CONF_VALUE_TEMPLATE\n )\n return await self.async_step_when()\n\n schema = vol.Schema(\n {\n vol.Required(CONF_NAME): selector.TextSelector(),\n vol.Optional(CONF_UNIT_OF_MEASUREMENT): selector.TextSelector(),\n vol.Optional(CONF_VALUE_TEMPLATE): selector.TemplateSelector(),\n }\n )\n return self.async_show_form(step_id=\"time\", data_schema=schema, errors=errors)\n\n async def async_step_source(self, user_input=None):\n errors = {}\n if user_input is not None:\n if not self.is_valid_template(user_input.get(CONF_VALUE_TEMPLATE)):\n errors[CONF_VALUE_TEMPLATE] = \"invalid template\"\n\n if not errors:\n self._data[CONF_METER_TYPE] = METER_TYPE_SOURCE\n self._data[CONF_NAME] = user_input[CONF_NAME]\n self._data[CONF_SOURCE] = user_input[CONF_SOURCE]\n self._sensor_config[CONF_UNIT_OF_MEASUREMENT] = user_input.get(\n CONF_UNIT_OF_MEASUREMENT\n )\n self._sensor_config[CONF_VALUE_TEMPLATE] = user_input.get(\n CONF_VALUE_TEMPLATE\n )\n return await self.async_step_when()\n\n schema = vol.Schema(\n {\n vol.Required(CONF_NAME): selector.TextSelector(),\n vol.Required(CONF_SOURCE): selector.EntitySelector(),\n vol.Optional(CONF_UNIT_OF_MEASUREMENT): selector.TextSelector(),\n vol.Optional(CONF_VALUE_TEMPLATE): selector.TemplateSelector(),\n }\n )\n return self.async_show_form(step_id=\"source\", data_schema=schema, errors=errors)\n\n async def async_step_when(self, user_input=None):\n errors = {}\n _LOGGER.debug(\"User input: %s\", user_input)\n\n if user_input is not None:\n if not self.is_valid_template(user_input.get(CONF_CONDITION)):\n errors[CONF_CONDITION] = \"invalid_template\"\n if not len(user_input.get(CONF_TW_DAYS)) > 0:\n errors[CONF_TW_DAYS] = \"at_least_one_day\"\n\n if not errors:\n self._data.update(user_input)\n return await self.async_step_periods()\n\n schema = vol.Schema(\n {\n vol.Optional(CONF_CONDITION): selector.TemplateSelector(),\n vol.Optional(\n CONF_TW_DAYS, default=DEFAULT_DAYS\n ): selector.SelectSelector(\n selector.SelectSelectorConfig(\n options=DAY_OPTIONS,\n multiple=True,\n mode=selector.SelectSelectorMode.LIST,\n ),\n ),\n vol.Required(CONF_TW_FROM): selector.TimeSelector(),\n vol.Required(CONF_TW_TILL): selector.TimeSelector(),\n }\n )\n return self.async_show_form(step_id=\"when\", data_schema=schema, errors=errors)\n\n async def async_step_periods(self, user_input=None):\n if len(self._data.get(CONF_SENSORS, 0)) > 0:\n return self.async_show_menu(\n step_id=\"periods\", menu_options=PERIOD_MENU_DONE\n )\n return self.async_show_menu(step_id=\"periods\", menu_options=PERIOD_MENU)\n\n async def async_step_predefined(self, user_input=None):\n errors = {}\n if user_input is not None:\n\n if not errors:\n _LOGGER.debug(\"Selected periods: %s.\", user_input[CONF_PERIODS])\n for period in user_input[CONF_PERIODS]:\n sensor = dict(self._sensor_config)\n sensor[CONF_NAME] = period\n sensor[CONF_CRON] = PREDEFINED_PERIODS[period]\n self._data[CONF_SENSORS].append(sensor)\n _LOGGER.debug(\"Sensors: %s.\", self._data)\n return await self.async_step_periods()\n\n schema = vol.Schema(\n {\n vol.Optional(CONF_PERIODS): selector.SelectSelector(\n selector.SelectSelectorConfig(\n options=PERIOD_OPTIONS,\n multiple=True,\n mode=selector.SelectSelectorMode.LIST,\n )\n )\n }\n )\n return self.async_show_form(\n step_id=\"predefined\", data_schema=schema, errors=errors\n )\n\n async def async_step_custom(self, user_input=None):\n\n errors = {}\n if user_input is not None:\n\n if not self.is_valid_cron(user_input[CONF_CRON]):\n errors[CONF_CRON] = \"invalid cron pattern\"\n if not self.is_valid_template(user_input.get(CONF_VALUE_TEMPLATE)):\n errors[CONF_VALUE_TEMPLATE] = \"invalid template\"\n\n if not errors:\n self._data[CONF_SENSORS].append(user_input)\n return await self.async_step_periods()\n\n schema = vol.Schema(\n {\n vol.Required(CONF_NAME): selector.TextSelector(),\n vol.Required(CONF_CRON): selector.TextSelector(),\n # vol.Optional(CONF_DURATION): selector.DurationSelector(\n # selector.DurationSelectorConfig(enable_day=True)\n # ),\n vol.Optional(CONF_UNIT_OF_MEASUREMENT): selector.TextSelector(),\n vol.Optional(CONF_VALUE_TEMPLATE): selector.TemplateSelector(),\n }\n )\n return self.async_show_form(step_id=\"custom\", data_schema=schema, errors=errors)\n\n async def async_step_done(self, user_input=None):\n _LOGGER.debug(\"All stored data: %s\", self._data)\n\n return self.async_create_entry(\n title=self._data[CONF_NAME], data={}, options=self._data\n )\n","repo_name":"danieldotnl/ha-flexmeasure","sub_path":"custom_components/flexmeasure/config_flow.py","file_name":"config_flow.py","file_ext":"py","file_size_in_byte":9203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"31043532374","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.contrib.gis.db.models.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Area',\n fields=[\n ('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),\n ('name', models.CharField(max_length=100)),\n ('poly', django.contrib.gis.db.models.fields.PolygonField(srid=4326)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","repo_name":"dedsm/maps_test","sub_path":"maps/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"5832677362","text":"import unittest\nimport sqlite3\nimport json\nimport os\nimport ml\n\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.metrics import accuracy_score\n\n# TRAINING OUR MODEL\ndf = pd.read_csv('cleaned_kaggle_news.csv')\n\n# Split the data\nDV = 'fake_news' # The dependent variable, text is the independent variable here\n\nX = df.drop([DV], axis = 1) # Drop from our X array because this is the text data that gets trained\ny = df[DV]\n\n# Training on 75% of the data, test on the rest\nX_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.25)\n\ncount_vect = CountVectorizer(max_features = 10000) # limiting to 5000, but room to play with this here!\nX_train_counts = count_vect.fit_transform(X_train['text']) \n# print(count_vect.vocabulary_) # here is our bag of words! \nX_test = count_vect.transform(X_test['text']) # note: we don't fit it to the model! Or else this is all useless\n\n\n# Fit the training dataset on the NB classifier\nNaive = MultinomialNB()\nNaive.fit(X_train_counts, y_train)\n\n\n# Predict the labels on validation dataset\npredictions_NB = Naive.predict(X_test)\n\n# classifier() takes text, a list of strings, as a parameter \n# This function classifies text as 'Fake News' or 'True News'\n# Return 0 for Fake News and 1 for True News\ndef classifier(text):\n Naive = MultinomialNB()\n Naive.fit(X_train_counts, y_train)\n \n word_vec = count_vect.transform(text) \n \n predict = Naive.predict(word_vec)\n return 0 if predict[0] else 1\n\n# Connects this file to the database\n# setUpDataBase() takes db_name, a string, as a parameter \n# creating a database with db_name\ndef setUpDatabase(db_name):\n path = os.path.dirname(os.path.abspath(__file__))\n conn = sqlite3.connect(path+'/'+db_name)\n cur = conn.cursor()\n return cur, conn\n\n# Retrieves the source ID from the 'Sources' table in the database and \n# checks if Source exists in table already \n# setSourceID() takes curr + conn (for connecting to database) \n# and source_name, a string, the name of a table in a database\n# returns the source_id associated with that source\ndef getSourceID(cur, conn, source_name):\n\n cur.execute('CREATE TABLE IF NOT EXISTS Sources (source_id INT, source_name TEXT)')\n \n cur.execute('SELECT source_id, source_name FROM Sources')\n \n id_name_tups = cur.fetchall()\n source_ids = [tup[0] for tup in id_name_tups]\n source_names = [tup[1] for tup in id_name_tups]\n\n # if we already have this source in the sources_table\n if source_name in source_names:\n cur.execute('SELECT source_id FROM Sources WHERE Sources.source_name = \"{}\"'.format(source_name))\n source_id = cur.fetchone()[0]\n \n # if we don't already have this source in the sources table\n else:\n highest_id = getHighestId(cur, conn, 'source_id', 'Sources')\n cur.execute('INSERT INTO Sources (source_id, source_name) VALUES (?,?)', (highest_id, source_name))\n source_id = highest_id\n \n conn.commit()\n return source_id\n\n# Retrieves the highest ID for a respective table to ensure ID is unique\n# getHighestID() takes curr + conn (for connecting to database) as a parameter\n# and takes column_name, a string, (of a table) and table_name, a string \n# returns the highest ID for a column\ndef getHighestId(cur, conn, column_name, table_name):\n cur.execute('SELECT {} FROM {}'.format(column_name, table_name))\n \n section_id_list = [int(tup[0]) for tup in cur.fetchall()]\n\n if section_id_list != []: \n highest_id = max(section_id_list) + 1\n \n else:\n highest_id = 0\n \n conn.commit()\n return highest_id\n\n# Creates 'Calculation_Table'\n# Grabs source_id, article/tweet_id, and content from columns in NYT, Twitter, New API, and WSJ table\n# Inputs data into the 'Calculation_Table' from tables and machine learning calculation based on content\n# takes curr + conn (for connecting to database) as a parameter\n# The columns in the table are as follows: source_id, article_id, ml_classification\ndef compileCalculationTable(cur, conn):\n cur.execute('DROP TABLE IF EXISTS Calculation_Table')\n\n # source_id, article_id, classified as real or fake, expected real or fake\n cur.execute('CREATE TABLE IF NOT EXISTS Calculation_Table (source_id INT, article_id INT, ml_classification INT)')\n\n # Retrieves Source ID, Article/Tweet ID, and Content of Article/Tweet from respective tables\n cur.execute('SELECT source_id, article_id, article_content FROM NYT_ArticleContent')\n nyt_article_tuples = cur.fetchall()\n\n cur.execute('SELECT SourceId, ArticleId, Title FROM News_API')\n news_api_article_tuples = cur.fetchall()\n\n cur.execute('SELECT SourceId, TweetId, Tweet FROM Twitter')\n twitter_article_tuples = cur.fetchall()\n\n cur.execute('SELECT source_id, article_id, article_content FROM WSJ_Article_Content')\n wsj_article_tuples = cur.fetchall()\n\n # Creates tuple of all data gathered \n all_source_tuples = nyt_article_tuples + news_api_article_tuples + twitter_article_tuples + wsj_article_tuples\n \n # Loops through tuples \n for tup in all_source_tuples:\n source_id = tup[0]\n article_id = tup[1]\n article_content = tup[2]\n\n # MACHINE LEARNING CALCULATION\n ml_classification = ml.classifier([article_content])\n\n cur.execute('INSERT INTO Calculation_Table (source_id, article_id, ml_classification) VALUES (?,?,?)', (tup[0], tup[1], ml_classification))\n\n conn.commit()\n\n# Connects to finalProject.db and inserts data into 'Calculation_Table' table \ndef main():\n cur, conn = setUpDatabase('finalProject.db')\n compileCalculationTable(cur, conn)\n conn.close()\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"gho19/Detecting-Fake-News-ML","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":5812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"70634368511","text":"import openpyxl\nimport unidecode\nimport os\n\nwb = openpyxl.load_workbook(\"Semilleros_2020.xlsx\")\n\nannouncement_ws = wb[\"CONVOCATORIAS\"]\ndue_teachers_ws = wb[\"Profesores_Mora\"]\n\n# Obtiene la lista de los profesores en mora\n# La lista contendra listas con las palabras individuales de los profesores\n\ndue_teachers_column = 1\ndue_teachers_start_row = 4\ndue_teachers_names = []\n\n# Recorre las filas con las celdas con los nombres de los profesores\nfor row in due_teachers_ws.iter_rows(min_row=due_teachers_start_row, \n max_row=due_teachers_ws.max_row,\n min_col=1,\n max_col=1):\n # Extrae la celda de la fila\n due_teacher_cell = row[0]\n # Verifica que la celda no este vacia\n if not due_teacher_cell.value or not due_teacher_cell.value.strip():\n continue\n # Extrae el nombre tal cual esta en la celda y le quita los espacios laterales\n due_teacher_raw_name = due_teacher_cell.value.strip()\n # Extrae las palabras individuales del nombre\n due_teacher_name = due_teacher_raw_name.split(\" \")\n temp = due_teacher_name\n # Sanitiza los nombres para sacar los espacios sobrantes\n due_teacher_name = [name.strip() for name in temp if name.strip()]\n \n # Sanitiza los nombres para sacar los acentos\n for index,word in enumerate(due_teacher_name):\n\n due_teacher_name[index] = unidecode.unidecode(word)\n \n due_teachers_names.append(due_teacher_name)\n\n# Ahora verifica los nombres de los directores de grupo\n\nleader_teachers_column = 13\nleader_teachers_start_row = 7\n\n# Recorre las filas con las celdas con los nombres de los profesores\nfor row in announcement_ws.iter_rows(min_row=leader_teachers_start_row, \n max_row=announcement_ws.max_row, \n min_col=leader_teachers_column,\n max_col=leader_teachers_column):\n\n # Extrae la celda de la fila\n leader_teacher_cell = row[0]\n # Verifica que la celda no este vacia\n if not leader_teacher_cell.value or not leader_teacher_cell.value.strip():\n continue\n # Extrae el nombre tal cual esta en la celda y le quita los espacios laterales\n leader_teacher_raw_name = leader_teacher_cell.value.strip()\n # Extrae las palabras individuales del nombre\n leader_teacher_name = leader_teacher_raw_name.split(\" \")\n temp = leader_teacher_name\n # Sanitiza los nombres para sacar los espacios sobrantes\n leader_teacher_name = [name.strip() for name in temp if name.strip()]\n \n # Sanitiza los nombres para sacar los acentos\n for index,word in enumerate(leader_teacher_name):\n\n leader_teacher_name[index] = unidecode.unidecode(word)\n \n # Compara nombres entre el nombre del lider y la lista de nombres de profesores en mora\n is_due = False\n\n for name in due_teachers_names:\n # Busca palabras en comun entre nombres\n word_ocurrences = len([word for word in leader_teacher_name if word in name])\n if word_ocurrences == len(name):\n # Si hay una equivalencia exacta, probablemente este en mora\n is_due = True\n break\n \n if is_due:\n announcement_ws.cell(leader_teacher_cell.row, 1).value= \"MORA\"\n \nwb.save(\"Output.xlsx\")","repo_name":"Gacrucis/Proxima","sub_path":"PY/ETerm/Convocator.py","file_name":"Convocator.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"44156827601","text":"# coding=utf8\n\nimport math\nimport numpy as np\n\ndef lst_string_to_float(list):\n\tres = []\n\tfor line in list:\n\t\tres.append([float(item) for item in line])\n\treturn res\n#Красиво печатаем матрицу.\ndef matrix_print(matrix, ext = 1):\t#ext - количество стобцов расширения матрицы\n\tprint()\n\tfor i in range(len(matrix)):\n\t\tfor ii in range(len(matrix[i]) - ext):\n\t\t\tprint(\"{}\\t\".format(matrix[i][ii]), end='')\n\t\tif 0 != ext:\n\t\t\tprint(\"|\\t\", end='')\n\t\t\tfor ii in range(len(matrix[i]) - ext, len(matrix[i])):\n\t\t\t\tprint(\"{}\\t\".format(matrix[i][ii]), end='')\n\t\tprint()\n#Читаем исходные данные, первые строчки - матрица, последняя - правая часть\ninput_file = open('0_task_input.txt', 'r')\n#Каждую линию нужно разбить на части по символу /t и каждый кусок превратить в float\nA_ext = [line.rstrip('\\n\\r').split('\\t') for line in input_file]\nA_ext = lst_string_to_float(A_ext)\nN = int(A_ext.pop()[0])\nM = int(A_ext.pop()[0])\nmatrix_print(A_ext, 0)\nprint(\"N = {}, M = {}\".format(N, M))\ndef m_plus(A, B):\n\tres = []\n\tfor i in range(len(A)):\n\t\tres.append(list(A[i]))\n\t\tfor ii in range(len(A[i])):\n\t\t\tres[i][ii] = A[i][ii] + B[i][ii]\n\treturn res\n\t\t\t\ndef m_mult_scalar(A, k):\n\tres = []\n\tfor i in range(len(A)):\n\t\tres.append(list(A[i]))\n\t\tfor ii in range(len(A[i])):\n\t\t\tres[i][ii] = A[i][ii] * k\n\treturn res\n\t\ndef m_minus(A, B):\n\tres = []\n\tfor i in range(len(A)):\n\t\tres.append(list(A[i]))\n\t\tfor ii in range(len(A[i])):\n\t\t\tres[i][ii] = A[i][ii] - B[i][ii]\n\treturn res\n\ndef m_T(A):\n\tres = []\n\tfor i in range(len(A[0])):\n\t\tres.append([A[ii][i] for ii in range(len(A))])\n\treturn res\n\ndef m_mult(A, B):\n\tres = [[0 for ii in range(len(B))] for i in range(len(A))]\n\tfor i in range(len(A)):\n\t\tfor ii in range(len(B)):\n\t\t\tfor k in range(len(A[i])):\n\t\t\t\tres[i][ii] += A[i][k] * B[k][ii]\n\treturn res\n\t\ndef norm_inf(A):\n\tres = 0\n\tfor i in range(len(A[0])):\n\t\tres += abs(A[0][i])\n\tfor i in range(len(A)):\n\t\tsum = 0\n\t\tfor ii in range(A[i]):\n\t\t\tsum += abs(A[i][ii])\n\t\tif sum > res:\n\t\t\tres = sum\n\treturn res\n\ndef norm_1(A):\n\tres = 0\n\tfor i in range(len(A)):\n\t\tres += abs(A[i][0])\n\tif(len(A[0])) > 1:\n\t\tfor ii in range(1, len(A)):\n\t\t\tsum = 0\n\t\t\tfor i in range(len(A)):\n\t\t\t\tsum += abs(A[i][ii])\n\t\t\tif sum > res:\n\t\t\t\tres = sum\n\treturn res\n\t\ndef norm_2(A):\n\tres = 0\n\tfor i in range(len(A)):\n\t\tfor ii in range(len(A[i])):\n\t\t\tres += abs(A[i][ii]) ** 2\n\treturn math.sqrt(res)\n\t\ndef v_norm_1(A):\n\tres = 0\n\tfor i in range(len(A)):\n\t\tres += abs(A[i])\n\treturn res\n\t\ndef v_norm_inf(A):\n\tres = abs(A[0])\n\tfor i in range(len(A)):\n\t\tif abs(A[i]) > res:\n\t\t\tres = abs(A[i])\n\treturn res\n# matrix_print(m_plus(A_ext, A_ext), 0)\n# matrix_print(m_minus(A_ext, A_ext), 0)\n# matrix_print(m_T(A_ext), 0)\n# matrix_print(m_mult_scalar(A_ext, 2), 0)\n# matrix_print(m_mult(A_ext, A_ext), 0)\n#1)\n#Найдем по формулам Крамера вектор х для уравнения Ах = В\nx = [0 for i in range(N)]\nb = [[200], [-600]]\ndetA = A_ext[0][0] * A_ext[1][1] - A_ext[0][1] * A_ext[1][0]\nx[0] = (b[0][0] * A_ext[1][1] - b[1][0] * A_ext[0][1]) / detA\nx[1] = (b[1][0] * A_ext[0][0] - b[0][0] * A_ext[1][0]) / detA\ndiscrepancy = [0 for i in range(N)]\nfor i in range(N):\n\tfor ii in range(M):\n\t\tdiscrepancy[i] += A_ext[i][ii] * x[ii]\n\t\t#print(discrepancy)\n\tdiscrepancy[i] -= b[i][0]\nprint(\"x = {}\".format(x))\nprint(\"Вектор невязки = {}\".format(discrepancy))\n#Найдем по формулам Крамера вектор х для у��авнения Ах = (B + delta B)\nx_delta = [0 for i in range(N)]\nb_new = [[199], [-601]] #b + delta_b\ndelta_b = [[-1], [-1]]\ndetA = A_ext[0][0] * A_ext[1][1] - A_ext[0][1] * A_ext[1][0]\nx_delta[0] = (b_new[0][0] * A_ext[1][1] - b_new[1][0] * A_ext[0][1]) / detA\nx_delta[1] = (b_new[1][0] * A_ext[0][0] - b_new[0][0] * A_ext[1][0]) / detA\ndiscrepancy_new = [0 for i in range(N)]\nfor i in range(N):\n\tfor ii in range(M):\n\t\tdiscrepancy_new[i] += A_ext[i][ii] * x_delta[ii]\n\t\t#print(discrepancy_new)\n\tdiscrepancy_new[i] -= b_new[i][0]\nprint(\"Для возмущенной системы:\\nx = {}\".format(x_delta))\nprint(\"Вектор невязки = {}\".format(discrepancy_new))\n#3) Найдем фактическую относительную погрешшность\nsigma = v_norm_inf([x_delta[i] - x[i] for i in range(N)]) / v_norm_inf(x)\nprint(\"Фактическая относительную погрешность = {}\".format(sigma))\n#4) Вычислим число обусловленности\nmA = np.matrix(A_ext)\nA_inv = np.linalg.inv(mA)\nA_inv = np.squeeze(np.asarray(A_inv))\ncondition = norm_1(A_ext) * norm_1(A_inv)\nprint(\"Число обусловленности = {}\".format(condition))\n#5) Найдем теоретическую относительную погрешность\ndelta = 0\nnorm_b = 0\nfor i in range(len(delta_b)):\n\tdelta += abs(delta_b[i][0])\n\tnorm_b += abs(b[i][0])\nsigma_b = delta/ norm_b\nprint(\"Теоретическая относительная погрешность = {}\".format(sigma_b * condition))","repo_name":"dimmddr/numericalMethods","sub_path":"2 семестр/0_task.py","file_name":"0_task.py","file_ext":"py","file_size_in_byte":5118,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"4745645751","text":"\"\"\"\nDownloads models from Hugging Face to models/username_modelname.\n\"\"\"\n\nimport argparse\nimport base64\nimport datetime\nimport hashlib\nimport json\nfrom os import environ\nfrom pathlib import Path\nfrom re import Pattern, compile\nfrom typing import Dict, List, Literal, Optional\n\nfrom requests import HTTPError, Response, Session\n\ntry:\n from ..utils.logger import ApiLogger\nexcept ImportError:\n from logging import getLogger as ApiLogger\n\ntry:\n from typing_extensions import TypedDict\n\n\nexcept ImportError:\n print(\"Failed to import typing_extensions, using TypedDict from typing\")\n from typing import TypedDict # When dependencies aren't installed yet\ntry:\n from tqdm import tqdm\n from tqdm.contrib.concurrent import thread_map\n\nexcept ImportError:\n tqdm = thread_map = None\n\nlogger = ApiLogger(__name__)\n\n\nClassification = Literal[\"text\", \"pytorch\", \"safetensors\", \"ggml\", \"pt\"]\n\n\nclass HuggingfaceModelInfo(TypedDict):\n links: List[str]\n file_names: List[str]\n sha256: List[List[str]]\n is_lora: bool\n classifications: List[Classification]\n\n\nclass HuggingfaceDownloader:\n _base_url: str = \"https://huggingface.co\"\n _branch_pattern: Pattern = compile(r\"^[a-zA-Z0-9._-]+$\")\n _pytorch_pattern: Pattern = compile(r\"(pytorch|adapter)_model.*\\.bin\")\n _safetensors_pattern: Pattern = compile(r\".*\\.safetensors\")\n _pt_pattern: Pattern = compile(r\".*\\.pt\")\n _ggml_pattern: Pattern = compile(r\".*\\.(bin|gguf)\")\n _tokenizer_pattern: Pattern = compile(r\"(tokenizer|ice).*\\.model\")\n _text_pattern: Pattern = compile(r\".*\\.(txt|json|py|md)\")\n\n def __init__(\n self,\n model: str,\n branch: str = \"main\",\n threads: int = 1,\n base_folder: Optional[str] = None,\n clean: bool = False,\n check: bool = False,\n text_only: bool = False,\n start_from_scratch: bool = False,\n ) -> None:\n self.session = Session()\n user: Optional[str] = environ.get(\"HF_USER\")\n password: Optional[str] = environ.get(\"HF_PASS\")\n if user and password:\n self.session.auth = (user, password)\n\n # Cleaning up the model/branch names\n try:\n self._model = model\n self._branch = branch\n self.threads = threads\n self.base_folder = (\n Path(base_folder.lower()) if base_folder else None\n )\n self.clean = clean\n self.check = check\n self.text_only = text_only\n self.start_from_scratch = start_from_scratch\n self.progress_bar = None\n\n # Getting the model info from Huggingface\n self.hf_info: HuggingfaceModelInfo = (\n self._get_model_info_from_huggingface()\n )\n except ValueError as err_branch:\n logger.error(err_branch)\n raise\n except HTTPError as err_http:\n logger.error(err_http)\n raise\n\n @property\n def model(self) -> str:\n if self._model.endswith(\"/\"):\n return self._model.lower()[:-1]\n return self._model.lower()\n\n @property\n def branch(self) -> str:\n if not self._branch_pattern.match(self._branch or \"main\"):\n raise ValueError(\n \"Invalid branch name. Only alphanumeric characters, \"\n \"period, underscore and dash are allowed.\"\n )\n return self._branch.lower()\n\n @property\n def output_folder(self) -> Path:\n if self.base_folder is None:\n return (\n Path(\"models\")\n if not self.hf_info[\"is_lora\"]\n else Path(\"loras\")\n )\n return self.base_folder\n\n @classmethod\n def from_repository(\n cls,\n model: str, # e.g. \"facebook/opt-1.3b\"\n branch: str = \"main\",\n threads: int = 1,\n base_folder: Optional[str] = None,\n clean: bool = False,\n check: bool = False,\n text_only: bool = False,\n start_from_scratch: bool = False,\n ) -> \"HuggingfaceDownloader\":\n self = cls(\n model=model,\n branch=branch,\n threads=threads,\n base_folder=base_folder,\n clean=clean,\n check=check,\n text_only=text_only,\n start_from_scratch=start_from_scratch,\n )\n\n # Getting the output folder\n logger.info(\n \"Links:\"\n + \"\".join([f\"\\n- {link}\" for link in self.hf_info[\"links\"]])\n + \"\\n\"\n \"SHA256:\"\n + \"\".join(\n [\n f\"\\n- {fname}: {fhash}\"\n for fname, fhash in self.hf_info[\"sha256\"]\n ]\n )\n + \"\\n\"\n f\"Is LoRA: {self.hf_info['is_lora']}\\n\"\n f\"Output folder: {self.output_folder}\"\n )\n\n if self.check:\n # Check previously downloaded files\n self.check_model_files_by_sha256()\n else:\n # Download files\n self.download_model_files()\n return self\n\n def download_model_files(\n self, links: Optional[List[str]] = None\n ) -> None:\n # Creating the folder and writing the metadata\n output_folder: Path = self.output_folder\n output_folder.mkdir(parents=True, exist_ok=True)\n metadata: str = (\n f\"- url: {self._base_url}/{self.model}\\n\"\n f\"- branch: {self.branch}\\n\"\n f'- download date: {datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")}\\n' # noqa: E501\n )\n logger.info(f\"Downloading with metadata:\\n{metadata}\")\n\n sha256_str: str = \"\\n\".join(\n [f\" {item[1]} {item[0]}\" for item in self.hf_info[\"sha256\"]]\n )\n if sha256_str:\n metadata += f\"sha256sum:\\n{sha256_str}\"\n\n metadata += \"\\n\"\n (output_folder / \"huggingface-metadata.txt\").write_text(metadata)\n\n # Downloading the files\n logger.info(f\"Downloading the model to {output_folder}...\")\n self._start_download_threads(links=links or self.hf_info[\"links\"])\n\n def get_single_file(self, url: str) -> None:\n file_name = Path(url.rsplit(\"/\", 1)[1])\n output_path = self.output_folder / file_name\n headers: Dict[str, str] = {}\n mode: str = \"wb\"\n if output_path.exists() and not self.start_from_scratch:\n # Check if the file has already been downloaded completely\n response: Response = self.session.get(\n url, stream=True, timeout=20\n )\n total_size: int = int(response.headers.get(\"content-length\", 0))\n if output_path.stat().st_size >= total_size:\n logger.info(f\"{file_name} already exists. Skipping...\")\n return\n\n # Otherwise, resume the download from where it left off\n logger.info(\n f\"Resuming download of {file_name} \"\n f\"from {output_path.stat().st_size / 1024**2} MB \"\n f\"to {total_size / 1024**2} MB\"\n )\n headers = {\"Range\": f\"bytes={output_path.stat().st_size}-\"}\n mode = \"ab\"\n\n with self.session.get(\n url, stream=True, headers=headers, timeout=20\n ) as response:\n # Do not continue the download if the request was unsuccessful\n response.raise_for_status()\n total_size = int(response.headers.get(\"content-length\", 0))\n block_size: int = 1024 * 1024 # 1MB\n with open(output_path, mode) as f:\n t = (\n tqdm(\n total=total_size,\n unit=\"iB\",\n unit_scale=True,\n bar_format=(\n \"{l_bar}{bar}| \"\n \"{n_fmt:6}/{total_fmt:6} {rate_fmt:6}\"\n ),\n )\n if tqdm is not None\n else None\n )\n count: int = 0\n for data in response.iter_content(block_size):\n if t is not None:\n t.update(len(data))\n f.write(data)\n if total_size != 0 and self.progress_bar is not None:\n count += len(data)\n self.progress_bar(\n float(count) / float(total_size),\n f\"Downloading {file_name}\",\n )\n # tqdm 객체가 있으면 close 메서드를 호출합니다.\n if t is not None:\n t.close()\n\n def check_model_files_by_sha256(self) -> bool:\n # Validate the checksums\n is_validated: bool = True\n output_folder: Path = self.output_folder\n for single_sha256 in self.hf_info[\"sha256\"]:\n fname, fhash = single_sha256\n fpath = output_folder / Path(fname)\n\n if not fpath.exists():\n logger.info(f\"The following file is missing: {fpath}\")\n is_validated = False\n continue\n\n with open(output_folder / Path(fname), \"rb\") as f:\n real_hash = hashlib.sha256(f.read()).hexdigest()\n if real_hash != fhash:\n logger.info(f\"Checksum failed: {fname} {fhash}\")\n is_validated = False\n else:\n logger.info(f\"Checksum validated: {fname} {fhash}\")\n\n if is_validated:\n logger.info(\"[+] Validated checksums of all model files!\")\n else:\n logger.error(\n \"[-] Invalid checksums. Rerun downloader with the --clean flag\"\n )\n return is_validated\n\n def is_ggml(self, file_name: str) -> bool:\n return (\n self._ggml_pattern.match(file_name) is not None\n and self._pytorch_pattern.match(file_name) is None\n )\n\n def _start_download_threads(self, links: List[str]) -> None:\n if links is None:\n links = self.hf_info[\"links\"]\n if thread_map is not None:\n thread_map(\n lambda url: self.get_single_file(url),\n links,\n max_workers=min(self.threads, len(links)),\n disable=True,\n )\n else:\n from concurrent.futures import ThreadPoolExecutor, as_completed\n\n with ThreadPoolExecutor(\n max_workers=min(self.threads, len(links))\n ) as executor:\n [\n future.result()\n for future in as_completed(\n [\n executor.submit(self.get_single_file, url)\n for url in links\n ]\n )\n ]\n\n def _get_model_info_from_huggingface(self) -> HuggingfaceModelInfo:\n model, branch = self.model, self.branch\n page: str = f\"/api/models/{model}/tree/{branch}\"\n cursor: bytes = b\"\"\n\n links: List[str] = []\n file_names: List[str] = []\n sha256: List[List[str]] = []\n classifications: List[Classification] = []\n has_pytorch: bool = False\n has_pt: bool = False\n has_ggml: bool = False\n has_safetensors: bool = False\n is_lora: bool = False\n while True:\n url: str = f\"{self._base_url}{page}\" + (\n f\"?cursor={cursor.decode()}\" if cursor else \"\"\n )\n response: Response = self.session.get(url, timeout=20)\n response.raise_for_status()\n content: bytes = response.content\n\n json_decoded: dict = json.loads(content)\n if not json_decoded:\n break\n\n for json_idx in range(len(json_decoded)):\n file_name: str = json_decoded[json_idx][\"path\"]\n file_names.append(file_name)\n if file_name.endswith(\n (\"adapter_config.json\", \"adapter_model.bin\")\n ):\n is_lora = True\n\n (\n is_pytorch,\n is_safetensors,\n is_pt,\n is_possibly_ggml,\n is_tokenizer,\n is_text,\n ) = (\n self._pytorch_pattern.match(file_name),\n self._safetensors_pattern.match(file_name),\n self._pt_pattern.match(file_name),\n self._ggml_pattern.match(file_name),\n self._tokenizer_pattern.match(file_name),\n self._text_pattern.match(file_name),\n )\n\n if is_text is None:\n is_text = is_tokenizer\n if any(\n (\n is_pytorch,\n is_safetensors,\n is_pt,\n is_possibly_ggml,\n is_tokenizer,\n is_text,\n )\n ):\n if \"lfs\" in json_decoded[json_idx]:\n sha256.append(\n [file_name, json_decoded[json_idx][\"lfs\"][\"oid\"]]\n )\n\n if is_text:\n links.append(\n f\"https://huggingface.co/{model}/resolve/{branch}/{file_name}\" # noqa: E501\n )\n classifications.append(\"text\")\n continue\n\n if not self.text_only:\n links.append(\n f\"https://huggingface.co/{model}/resolve/{branch}/{file_name}\" # noqa: E501\n )\n if is_safetensors:\n has_safetensors = True\n classifications.append(\"safetensors\")\n elif is_pytorch:\n has_pytorch = True\n classifications.append(\"pytorch\")\n elif is_pt:\n has_pt = True\n classifications.append(\"pt\")\n elif is_possibly_ggml and not is_pytorch:\n has_ggml = True # noqa: F841\n classifications.append(\"ggml\")\n\n cursor = base64.b64encode(\n (\n base64.b64encode(\n f'{{\"file_name\":\"{json_decoded[-1][\"path\"]}\"}}'.encode() # noqa: E501\n )\n + b\":50\"\n )\n ).replace(b\"=\", b\"%3D\")\n\n # If both pytorch and safetensors are available,\n # download safetensors only\n if (has_pytorch or has_pt) and has_safetensors:\n for json_idx in range(len(classifications) - 1, -1, -1):\n if classifications[json_idx] in (\"pytorch\", \"pt\"):\n links.pop(json_idx)\n return HuggingfaceModelInfo(\n links=links,\n file_names=file_names,\n sha256=sha256,\n is_lora=is_lora,\n classifications=classifications,\n )\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"model\",\n type=str,\n default=None,\n nargs=\"?\",\n help=\"The model you'd like to download. e.g. facebook/opt-1.3b\",\n )\n\n parser.add_argument(\n \"--branch\",\n type=str,\n default=\"main\",\n help=\"Name of the Git branch to download from.\",\n )\n parser.add_argument(\n \"--threads\",\n type=int,\n default=1,\n help=\"Number of files to download simultaneously.\",\n )\n parser.add_argument(\n \"--text-only\",\n action=\"store_true\",\n help=\"Only download text files (txt/json).\",\n )\n parser.add_argument(\n \"--output\",\n type=str,\n default=None,\n help=\"The folder where the model should be saved.\",\n )\n parser.add_argument(\n \"--clean\",\n action=\"store_true\",\n help=\"Does not resume the previous download.\",\n )\n parser.add_argument(\n \"--check\",\n action=\"store_true\",\n help=\"Validates the checksums of model files.\",\n )\n args = parser.parse_args()\n\n if args.model is None:\n parser.error(\n \"Error: Please specify the model you'd like to download \"\n \"(e.g. 'python download-model.py facebook/opt-1.3b').\"\n )\n\n HuggingfaceDownloader.from_repository(\n model=args.model,\n branch=args.branch,\n threads=args.threads,\n base_folder=args.output,\n clean=args.clean,\n check=args.check,\n text_only=args.text_only,\n )\n","repo_name":"c0sogi/llama-api","sub_path":"llama_api/utils/huggingface_downloader.py","file_name":"huggingface_downloader.py","file_ext":"py","file_size_in_byte":16870,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"60"} +{"seq_id":"39501841950","text":"\"\"\"empty message\n\nRevision ID: eed07ef400ae\nRevises: 4d40aaa81c6c\nCreate Date: 2023-09-06 20:26:24.722859\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'eed07ef400ae'\ndown_revision = '4d40aaa81c6c'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('user', schema=None) as batch_op:\n batch_op.add_column(sa.Column('profile_image_url', sa.String(length=255), nullable=True))\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('user', schema=None) as batch_op:\n batch_op.drop_column('profile_image_url')\n\n # ### end Alembic commands ###\n","repo_name":"metantonio/nombre-proyecto-final-cr2","sub_path":"migrations/versions/eed07ef400ae_.py","file_name":"eed07ef400ae_.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"60"} +{"seq_id":"34961084193","text":"# To add a new cell, type '# %%'\n# To add a new markdown cell, type '# %% [markdown]'\n\nfrom typing import List\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport os\nimport sys\nimport glob\nimport re\nimport copy\n\nimport settings\nfrom settings import QUERY_ARGS\n\n\nclass Analyzer:\n\n CSV_SPECIFIER = \"\"\n\n # ATTENTION: This structure must be the same with generate_queries\n queryArg = copy.deepcopy(settings.QUERY_ARGS)\n\n # Column names that all queries have in common\n mainHeader = settings.MAIN_HEADER\n\n # Specific column names per Query\n headers = settings.HEADER_PER_QUERY\n # how many columns are related for rates.\n ratesPerQuery = settings.RATES_PER_QUERY\n\n def __init__(self, specifier, queryName=\"all\", action=None):\n valid_specifiers = settings.VALID_CSV_SPECIFIERS\n PATH = settings.PATH\n if specifier:\n self.fileName = PATH + queryName + \"_\" + self.CSV_SPECIFIER + \".csv\"\n else:\n self.fileName = PATH + queryName + \".csv\"\n if specifier not in valid_specifiers:\n print(specifier + \" is not a valid specifier @ analyzer.py\")\n exit(-1)\n\n if queryName == \"all\": \n # not using settings.PATH because vscode has different relative paths.\n for file in glob.glob(settings.PATH + \"/*_\"+ specifier + \".csv\"):\n self.fileName = file\n self.queryName = os.path.basename(file).split(\"_\")[0] # remove extension\n print(self.fileName)\n print(self.queryName)\n print(action)\n self.setQuery()\n if action == \"scatter\":\n if \"equal\" in specifier:\n self.analyzeDistributionParallelism()\n elif \"metric\" in specifier:\n self.analyzeDistribution(False)\n elif queryName in self.headers:\n print(\"Reading file: \" + self.fileName)\n self.queryName = queryName\n self.setQuery()\n if action == 'extract':\n pass\n #self.calcEfficientMetrics()\n elif action == \"scatter\":\n if \"equal\" in specifier:\n #self.analyzeParallelization(True)\n # self.analyzeDistributionEqual()\n self.analyzeDistributionParallelism()\n elif \"metric\" in specifier:\n #self.analyzeParallelization(False)\n self.analyzeDistribution(False)\n elif specifier == \"\":\n self.analyzeDistributionEqual()\n elif action == \"test\":\n df = self.aggregate_func(\n self.data[(self.data[\"total_op_instances\"] == 2.0)\n & (self.data[\"operator\"] == \"Mapper\")], 2)\n print(df.tail(15))\n else:\n print(\"Inserted invalid action [\" + action +\n \"] in analyzer.py\")\n\n def setQuery(self):\n \"\"\" Read the corresponding csv file for the set queryName\n Store the results in a pandas.Dataframe and filter out duplicate/NaN rows \"\"\"\n print(self.fileName)\n self.data = pd.read_csv(self.fileName,\n names=self.mainHeader[:2] +\n self.headers[self.queryName] +\n self.mainHeader[2:],delimiter=',', index_col=False)\n print(self.mainHeader[:2] +\n self.headers[self.queryName] +\n self.mainHeader[2:])\n print(\"Starting df len:\" + str(len(self.data)))\n print(self.data.head())\n # remove any row that has a NaN value. Removing the rows with no results.\n # Bug on the my_db.py or monitor.py.\n self.data = self.data.dropna()\n print(\"REMOVED NA. Current df len: \" + str(len(self.data)))\n\n \n def aggregate_func(self, curDF, parallelismVal):\n uniqueParallel = curDF[\"total_op_instances\"].unique()\n print(uniqueParallel)\n \"\"\"\n Aggregate all the parallel instances of an operator.\n DF: must be related to only 1 operator with a specific parallelism value.\n \"\"\"\n if len(uniqueParallel) != 1 and uniqueParallel[0] != [parallelismVal]:\n print(\n \"Aggragate function error. Dataframe contains more parallel instances than the given \"\n + str(parallelismVal))\n raise Exception(\"Wrong input in aggregate_func\")\n columns = list(curDF.columns)\n aggregation_functions = {c: \"first\" for c in columns[:-4]}\n for c in columns[-4:]:\n aggregation_functions[c] = \"sum\"\n aggregation_functions[\"op_instance_id\"] = \"sum\"\n aggregation_functions[\"total_op_instances\"] = \"sum\"\n\n sortingList = [\"id\", \"epoch_timestamp\"]\n sortingList.extend(settings.RATES_NAMES_PER_QUERY[self.queryName])\n #sortingList.append(\"op_instance_id\")\n\n sortDF = curDF.sort_values(by=sortingList)\n sortDF = sortDF.reset_index(drop=True)\n # aggregate. We observe that op_instance_id is 6 (1+2+3) and total_op_instances is 9 (3+3+3) as we wanted\n return sortDF.groupby(\n sortDF.index //\n parallelismVal).aggregate(aggregation_functions).reindex(\n columns=curDF.columns)\n \"\"\"\n def calcEfficientMetrics(self, percent=0.05):\n \\\"\"\"\n Looks the {percent} of the maximun output rates and saves the mean input values of paralelization and rates.\n # WRONG WAY of calculate efficient metrics since we care about all the spectrum of the data not the 5% of the best.\n \\\"\"\"\n startIndex = len(self.queryArg[self.queryName]) + 4\n uniqueOperators = self.data[\"operator\"].unique()\n file_path = settings.PATH + \"plots/\" + self.queryName + \"/\" + \"bestResults_\" + self.CSV_SPECIFIER + \".txt\"\n skipSet = set({0, 'NaN', 'nan'})\n with open(file_path, 'w+') as f:\n for operator in sorted(uniqueOperators):\n df = self.data[self.data[\"operator\"] == operator]\n f.write(\"OPERATOR__ID : \" + operator + \"\\n\")\n print(\"OPERATOR__ID : \" + operator)\n for i in range(startIndex, startIndex + 4):\n currentOutputRateName = df.columns[i]\n bestDF = df.sort_values(by=currentOutputRateName,\n ascending=False)\n uniqueValues = int(\n sum(bestDF[currentOutputRateName].unique()))\n if uniqueValues in skipSet:\n continue\n print(\"------------\" + currentOutputRateName +\n \"------------\")\n print(uniqueValues)\n print(len(bestDF))\n bestDF = bestDF.head(round(percent * len(bestDF)))\n print(len(bestDF))\n curOutputRateMeanVal = round(\n bestDF[currentOutputRateName].mean(), 2)\n f.write(\"--------\" + currentOutputRateName + \" == \" +\n str(curOutputRateMeanVal) + \"--------\" + \"\\n\")\n #top20Percent\n print(currentOutputRateName)\n for key in self.headers[self.queryName]:\n mean = round(bestDF[key].mean(), 2)\n result = f'\\t\\t{key} == {mean} \\n'\n f.write(result)\n f.write(\"\\n\")\n print()\n\n def analyzeRates(self):\n data = self.data\n uniqueOperators = data[\"operator\"].unique()\n print(uniqueOperators)\n for operator in uniqueOperators:\n data = self.data[self.data[\"operator\"] == operator]\n for rate in range(self.ratesPerQuery[self.queryName]):\n uniqueRates = data.iloc[:, rate + 1].unique()\n uniqueRates = np.sort(uniqueRates)\n meanTrueProc = []\n meanTrueOut = []\n meanObservedOut = []\n meanObservedProc = []\n for i, rateValue in enumerate(uniqueRates):\n meanDF = data[data.iloc[:, rate + 1] == rateValue]\n if self.ratesPerQuery[self.queryName] > 1:\n if (len(meanDF.iloc[:, rate if rate ==\n uniqueRates[-1] else rate +\n 2].unique()) > 1):\n uniqueRates = np.delete(uniqueRates, i)\n continue\n meanDFtrueProc = meanDF[\"true_processing_rate\"].mean()\n meanDFtrueOut = meanDF[\"true_output_rate\"].mean()\n meanDFobservedOut = meanDF[\"observed_output_rate\"].mean()\n meanDFobservedProc = meanDF[\n \"observed_processing_rate\"].mean()\n meanTrueOut.append(meanDFtrueOut)\n meanTrueProc.append(meanDFtrueProc)\n meanObservedOut.append(meanDFobservedOut)\n meanObservedProc.append(meanDFobservedProc)\n self.plot_graph(\n f\"{operator}__{data.columns[rate+1]}_TrueOutput\",\n data.columns[rate + 1],\n \"true_output_rate\",\n uniqueRates,\n meanTrueOut,\n )\n self.plot_graph(\n f\"{operator}__{data.columns[rate+1]}_TrueProc\",\n data.columns[rate + 1],\n \"true_processing_rate\",\n uniqueRates,\n meanTrueProc,\n )\n self.plot_graph(\n f\"{operator}__{data.columns[rate+1]}_ObservedOutput\",\n data.columns[rate + 1],\n \"observed_output_rate\",\n uniqueRates,\n meanObservedOut,\n )\n self.plot_graph(\n f\"{operator}__{data.columns[rate+1]}_ObservedProc\",\n data.columns[rate + 1],\n \"observed_processing_rate\",\n uniqueRates,\n meanObservedProc,\n )\n\n def analyzeParallelization(self, equalParallelizationOperators=False):\n data = self.data\n uniqueOperators = data[\"operator\"].unique()\n numberOfOperators = (len(data.columns) - len(self.mainHeader) -\n self.ratesPerQuery[self.queryName])\n\n for operator in uniqueOperators:\n operatorDF = data[data[\"operator\"] == operator]\n print(\"SELECTED operator:\" + operator)\n parallelOperators = [i for i in range(1, numberOfOperators + 1)]\n # queries generated with the same parallelization values for all operators\n uniqueParallelizationArray = operatorDF.iloc[:, self.ratesPerQuery[\n self.queryName] + parallelOperators[0]].unique()\n print(uniqueParallelizationArray)\n\n for parallelColumn in parallelOperators:\n meanTrueProc = [[]\n for i in range(len(uniqueParallelizationArray))\n ]\n meanTrueOut = [[]\n for i in range(len(uniqueParallelizationArray))]\n meanObservedOut = [\n [] for i in range(len(uniqueParallelizationArray))\n ]\n meanObservedProc = [\n [] for i in range(len(uniqueParallelizationArray))\n ]\n uniqueRatesList = [\n [] for i in range(len(uniqueParallelizationArray))\n ]\n for parallelIndex, parallelizationValue in enumerate(\n uniqueParallelizationArray):\n parallelOperatorName = operatorDF.columns[\n self.ratesPerQuery[self.queryName] + parallelColumn]\n df = None\n if equalParallelizationOperators:\n for parallelColumn in parallelOperators:\n parallelOperatorName = operatorDF.columns[\n self.ratesPerQuery[self.queryName] +\n parallelColumn]\n print(\"OPE_PARAL\" + str(parallelColumn) + \" :\" +\n parallelOperatorName + \"==\" +\n str(parallelizationValue))\n df = operatorDF[\n operatorDF.\n iloc[:, self.ratesPerQuery[self.queryName] +\n parallelColumn] == parallelizationValue]\n else:\n # Incrementing only one operator at a time and keep the rest equal to 1\n print(\"OPE_PARAL:\" + parallelOperatorName + \"==\" +\n str(parallelizationValue))\n df = operatorDF[\n operatorDF.\n iloc[:, self.ratesPerQuery[self.queryName] +\n parallelColumn] == parallelizationValue]\n rateName = \"\"\n if not df:\n continue\n for rate in range(1,\n self.ratesPerQuery[self.queryName] + 1):\n rateName = df.columns[rate]\n print(\"SELECTED RATE:\" + rateName)\n uniqueRates = df.iloc[:, rate].unique()\n uniqueRates = np.sort(uniqueRates)\n if len(uniqueRates) == 0:\n print(f\"{operator}:: {rateName} NO VALUES?\")\n uniqueRatesList[parallelIndex].extend(\n uniqueRates.tolist())\n for i, rateValue in enumerate(uniqueRates):\n meanDF = df[df.iloc[:, rate] == rateValue]\n secondRate = meanDF.iloc[:, rate +\n 1 if rate == 1 else rate -\n 1].unique()\n if self.ratesPerQuery[self.queryName] > 1:\n equalToSecondRate = self.queryArg[\n self.queryName][list(\n self.queryArg[self.queryName])\n [rate if rate == 1 else 0]]\n meanDF = meanDF[\n meanDF.iloc[:, rate +\n 1 if rate == 1 else rate -\n 1] == equalToSecondRate]\n cn = meanDF.columns[rate +\n 1 if rate == 1 else rate -\n 1]\n uniqueSRate = meanDF.iloc[:,\n rate + 1 if rate ==\n 1 else rate -\n 1].unique()\n print(\n f\"{operator}:: {parallelOperatorName}=={parallelizationValue} && {rateName} == {rateValue} {equalToSecondRate} == {cn} || {uniqueSRate}\"\n )\n\n # print(meanDF.iloc[:, rate +1 if rate == 1 else rate -1].unique()[:10])\n # print(meanDF.head(20))\n # print(meanDF)\n # print()\n # if self.ratesPerQuery[self.queryName] > 1:\n # if len(meanDF.iloc[:, rate if rate == uniqueRates[-1] else rate + 2].unique()) > 1:\n # uniqueRates = np.delete(uniqueRates, i)\n # continue\n meanDFtrueProc = meanDF[\n \"true_processing_rate\"].mean()\n meanDFtrueOut = meanDF[\"true_output_rate\"].mean()\n meanDFobservedOut = meanDF[\n \"observed_output_rate\"].mean()\n meanDFobservedProc = meanDF[\n \"observed_processing_rate\"].mean()\n meanTrueOut[parallelIndex].append(meanDFtrueOut)\n meanTrueProc[parallelIndex].append(meanDFtrueProc)\n meanObservedOut[parallelIndex].append(\n meanDFobservedOut)\n meanObservedProc[parallelIndex].append(\n meanDFobservedProc)\n if equalParallelizationOperators:\n parallelOperatorName = \"\"\n else:\n parallelOperatorName = parallelOperatorName[:6]\n rateName = rateName[:8]\n operator = operator[:8]\n self.multiple_lines_plot(\n f\"{operator}__{rateName}_{parallelOperatorName}_TrueOutput\",\n rateName,\n \"MeanTrueOutput\",\n uniqueRatesList,\n meanTrueOut,\n )\n self.multiple_lines_plot(\n f\"{operator}__{rateName}_{parallelOperatorName}_TrueProc\",\n rateName,\n \"MeanTrueProcessing\",\n uniqueRatesList,\n meanTrueProc,\n )\n self.multiple_lines_plot(\n f\"{operator}__{rateName}_{parallelOperatorName}_ObservedOutput\",\n rateName,\n \"MeanObservedOutput\",\n uniqueRatesList,\n meanObservedOut,\n )\n self.multiple_lines_plot(\n f\"{operator}__{rateName}_{parallelOperatorName}_ObservedProc\",\n rateName,\n \"MeanObservedProcessing\",\n uniqueRatesList,\n meanObservedProc,\n )\n # All parallelizationOperators are equal so do not loop through again\n if equalParallelizationOperators:\n break\n\n def analyzeDistributionEqual(self):\n print(\"EQUAL DISTRIBUTION\")\n data = self.data\n uniqueOperators = data[\"operator\"].unique()\n numberOfOperators = (len(data.columns) - len(self.mainHeader) -\n self.ratesPerQuery[self.queryName])\n\n for operator in uniqueOperators:\n print(\"SELECTED operator:\" + operator)\n operatorDF = data[data[\"operator\"] == operator]\n\n parallelOperators = [i for i in range(1, numberOfOperators + 1)]\n for parallelColumn in parallelOperators:\n uniqueParallelizationArray = \\\n operatorDF.iloc[:, self.ratesPerQuery[self.queryName] +parallelOperators[0]].unique()\n uniqueTrueProc = [\n [] for i in range(len(uniqueParallelizationArray))\n ]\n uniqueTrueOut = [\n [] for i in range(len(uniqueParallelizationArray))\n ]\n uniqueObservedOut = [\n [] for i in range(len(uniqueParallelizationArray))\n ]\n uniqueObservedProc = [\n [] for i in range(len(uniqueParallelizationArray))\n ]\n for parallelIndex, parallelizationValue in enumerate(\n uniqueParallelizationArray):\n parallelOperatorName = operatorDF.columns[\n self.ratesPerQuery[self.queryName] + parallelColumn]\n df = None\n # assign the selected parallelism value to all operators\n for parallelColumn in parallelOperators:\n parallelOperatorName = operatorDF.columns[\n self.ratesPerQuery[self.queryName] +\n parallelColumn]\n print(\"PARALLELISM:: \" + str(parallelColumn) + \" :\" +\n parallelOperatorName + \"==\" +\n str(parallelizationValue))\n df = operatorDF[\n operatorDF.\n iloc[:, self.ratesPerQuery[self.queryName] +\n parallelColumn] == parallelizationValue]\n\n rateName = \"\"\n print(len(df))\n for primaryRate in range(\n 1, self.ratesPerQuery[self.queryName] + 1):\n rateName = df.columns[primaryRate]\n print(\"SELECTED RATE:\" + rateName)\n\n uniqueRates = df.iloc[:, primaryRate].unique()\n uniqueRates = np.sort(uniqueRates)\n\n # NOTE:\n if len(uniqueRates) == 0:\n print(f\"{operator}:: {rateName} NO VALUES?\")\n\n # uniqueRates = [\n # uniqueRates[index]\n # for index in range(len(uniqueRates))\n # if index % 2 != 0\n # ]\n\n for i, rateValue in enumerate(uniqueRates):\n meanDF = df[df.iloc[:, primaryRate] == rateValue]\n # secondRate = meanDF.iloc[:, primaryRate +\n # 1 if primaryRate == 1 else primaryRate -\n # 1].unique()\n\n # Reduce noise - Select the default values for the rest rates\n for secondaryRate in range(\n 1, self.ratesPerQuery[self.queryName] + 1):\n if secondaryRate == primaryRate:\n continue\n keys = list(\n self.queryArg[self.queryName].keys())\n print(\"Secondrate selected :\" +\n keys[secondaryRate - 1] + \"==\" +\n str(self.queryArg[self.queryName][keys[\n secondaryRate - 1]]) + \" len of df\" +\n str(len(meanDF)))\n meanDF = meanDF[meanDF.iloc[:,\n secondaryRate] ==\n self.queryArg[self.queryName][\n keys[secondaryRate - 1]]]\n print(len(meanDF))\n\n parallelOperatorName = \"Parallelism\"\n rateName = rateName[:8]\n operator = operator[:8]\n uniqueDFtrueProc = meanDF[\n \"true_processing_rate\"].unique()\n uniqueDFtrueOut = meanDF[\n \"true_output_rate\"].unique()\n uniqueDFobservedOut = meanDF[\n \"observed_output_rate\"].unique()\n uniqueDFobservedProc = meanDF[\n \"observed_processing_rate\"].unique()\n #print(uniqueDFobservedProc)\n uniqueTrueOut[parallelIndex].extend(\n uniqueDFtrueOut)\n uniqueTrueProc[parallelIndex].extend(\n uniqueDFtrueProc)\n uniqueObservedOut[parallelIndex].extend(\n uniqueDFobservedOut)\n uniqueObservedProc[parallelIndex].extend(\n uniqueDFobservedProc)\n\n legend = [\n \"v=\" + str(i) for i in uniqueParallelizationArray\n ]\n\n self.plot_scatter_ecdf(\n uniqueTrueOut,\n \"TrueOutput\",\n f\"{operator}__{rateName}_{parallelOperatorName}={parallelizationValue}_MeanTrueOutput\",\n legend=legend)\n self.plot_scatter_ecdf(\n uniqueTrueProc,\n \"TrueProcessing\",\n f\"{operator}__{rateName}_{parallelOperatorName}={parallelizationValue}_TrueProcessing\",\n legend=legend)\n self.plot_scatter_ecdf(\n uniqueObservedOut,\n \"ObservedOutput\",\n f\"{operator}__{rateName}_{parallelOperatorName}={parallelizationValue}_ObservedOutput\",\n legend=legend)\n self.plot_scatter_ecdf(\n uniqueObservedProc,\n \"ObservedProcessing\",\n f\"{operator}__{rateName}_{parallelOperatorName}={parallelizationValue}_ObservedProcessing\",\n legend=legend)\n break\n \"\"\"\n\n def analyzeDistribution(self, equalParallelizationOperators=False):\n data = self.data\n uniqueOperators = data[\"operator\"].unique()\n numberOfOperators = (len(settings.QUERY_ARGS[self.queryName]) -\n self.ratesPerQuery[self.queryName])\n #print(data.head(5))\n #print(numberOfOperators)\n\n for operator in uniqueOperators:\n print(\"SELECTED operator:\" + operator)\n operatorDF = data[data[\"operator\"] == operator]\n columnsBeforeInput = 2 # operator + id = 2\n parallelOperators = [\n i for i in range(columnsBeforeInput, columnsBeforeInput +\n numberOfOperators)\n ]\n print(parallelOperators)\n # Select all operators\n for parallelColumn in parallelOperators:\n uniqueParallelizationArray = \\\n operatorDF.iloc[:,self.ratesPerQuery[self.queryName] + parallelColumn].unique()\n print(\n f'{operatorDF.columns[parallelColumn+self.ratesPerQuery[self.queryName]]} == {uniqueParallelizationArray}'\n )\n for parallelIndex, parallelizationValue in enumerate(\n uniqueParallelizationArray):\n parallelOperatorName = operatorDF.columns[\n self.ratesPerQuery[self.queryName] + parallelColumn]\n df = None\n\n # Incrementing only one operator at a time and keep the rest equal to 1\n print(\"PARALLELISM:: \" + parallelOperatorName + \"==\" +\n str(parallelizationValue))\n\n df = operatorDF[\n operatorDF.iloc[:, self.ratesPerQuery[self.queryName] +\n parallelColumn] ==\n parallelizationValue]\n # for parallelColumn2 in parallelOperators:\n # if parallelColumn2 == parallelColumn:\n # continue\n # parallelOperatorName = operatorDF.columns[\n # self.ratesPerQuery[self.queryName] +\n # parallelColumn2]\n # print(\"PARALLELISM:: \" + str(parallelColumn2) + \" :\" +\n # parallelOperatorName + \"== 1\")\n # df = df[df.iloc[:, self.ratesPerQuery[self.queryName] +\n # parallelColumn2] == 1]\n\n rateName = \"\"\n\n for primaryRate in range(\n columnsBeforeInput, columnsBeforeInput +\n self.ratesPerQuery[self.queryName]):\n rateName = df.columns[primaryRate]\n print(\"SELECTED RATE:\" + rateName)\n\n uniqueRates = data.iloc[:, primaryRate].unique()\n\n uniqueRates = sorted(uniqueRates)\n\n if len(uniqueRates) == 0:\n print(f\"{operator}:: {rateName} NO VALUES?\")\n # uniqueRates = [\n # self.queryArg[self.queryName][list(self.queryArg[\n # self.queryName].keys())[primaryRate - 1]] * i\n # for i in range(1, 11)\n # ]\n print()\n print(uniqueRates)\n print()\n\n uniqueTrueProc = [[] for i in range(len(uniqueRates))]\n uniqueTrueOut = [[] for i in range(len(uniqueRates))]\n uniqueObservedOut = [[]\n for i in range(len(uniqueRates))]\n uniqueObservedProc = [[]\n for i in range(len(uniqueRates))]\n\n for i, rateValue in enumerate(uniqueRates):\n meanDF = df[df.iloc[:, primaryRate] == rateValue]\n if meanDF[\"total_op_instances\"].unique()[0] > 1.0:\n print(\"Aggregated from: \" + str(len(meanDF)))\n meanDF = self.aggregate_func(\n meanDF,\n meanDF[\"total_op_instances\"].unique()[0])\n print(\n \"Len : \" + str(len(meanDF)) + \" \" +\n str(meanDF[\"total_op_instances\"].unique()))\n # secondRate = meanDF.iloc[:, primaryRate +\n # 1 if primaryRate == 1 else primaryRate -\n # 1].unique()\n\n # Reduce noise - Select the default values for the rest rates\n # for secondaryRate in range(\n # 1, self.ratesPerQuery[self.queryName] + 1):\n # if secondaryRate == primaryRate:\n # continue\n # keys = list(\n # self.queryArg[self.queryName].keys())\n # print(\"Secondrate selected :\" +\n # keys[secondaryRate - 1] + \"==\" +\n # str(self.queryArg[self.queryName][keys[\n # secondaryRate - 1]]) +\n # \" len of df: \" + str(len(meanDF)))\n # meanDF = meanDF[meanDF.iloc[:,\n # secondaryRate] ==\n # self.queryArg[self.queryName][\n # keys[secondaryRate - 1]]]\n\n parallelOperatorName = parallelOperatorName[:6]\n rateName = rateName[:10]\n operator = operator[:8] + \"OP\"\n uniqueDFtrueProc = meanDF[\"true_proc_rate\"].tolist(\n )\n uniqueDFtrueOut = meanDF[\n \"true_output_rate\"].tolist()\n uniqueDFobservedOut = meanDF[\n \"observed_output_rate\"].tolist()\n uniqueDFobservedProc = meanDF[\n \"observed_proc_rate\"].tolist()\n\n uniqueTrueOut[i].extend(uniqueDFtrueOut)\n uniqueTrueProc[i].extend(uniqueDFtrueProc)\n uniqueObservedOut[i].extend(uniqueDFobservedOut)\n uniqueObservedProc[i].extend(uniqueDFobservedProc)\n\n legend = [\"v=\" + str(i) for i in uniqueRates]\n self.plot_scatter_ecdf(\n uniqueTrueOut,\n \"TrueOutput [rec/ut]\",\n f\"{operator}__{rateName}_{parallelOperatorName}={parallelizationValue}_TrueOutput\",\n legend=legend)\n self.plot_scatter_ecdf(\n uniqueTrueProc,\n \"TrueProcessing [rec/ut]\",\n f\"{operator}__{rateName}_{parallelOperatorName}={parallelizationValue}_TrueProcessing\",\n legend=legend)\n self.plot_scatter_ecdf(\n uniqueObservedOut,\n \"ObservedOutput [rec/ot]\",\n f\"{operator}__{rateName}_{parallelOperatorName}={parallelizationValue}_ObservedOutput\",\n legend=legend)\n self.plot_scatter_ecdf(\n uniqueObservedProc,\n \"ObservedProcessing [rec/ot]\",\n f\"{operator}__{rateName}_{parallelOperatorName}={parallelizationValue}_ObservedProcessing\",\n legend=legend)\n # All parallelizationOperators are equal so do not loop through again\n if equalParallelizationOperators:\n break\n def analyzeDistributionParallelism(self):\n print(\"EQUAL PARALLELISM DISTRIBUTION\")\n data = self.data\n uniqueOperators = data[\"operator\"].unique()\n numberOfOperators = (len(settings.QUERY_ARGS[self.queryName]) -\n self.ratesPerQuery[self.queryName])\n\n for operator in uniqueOperators:\n print(\"SELECTED operator:\" + operator)\n operatorDF = data[data[\"operator\"] == operator]\n\n columnsBeforeInput = 2 # operator + id = 2\n parallelOperators = [\n i for i in range(columnsBeforeInput, columnsBeforeInput +\n numberOfOperators)\n ]\n uniqueParallelizationArray = \\\n operatorDF.iloc[:, self.ratesPerQuery[self.queryName] +parallelOperators[0]].unique()\n uniqueTrueProc = [[]\n for i in range(len(uniqueParallelizationArray))]\n uniqueTrueOut = [[]\n for i in range(len(uniqueParallelizationArray))]\n uniqueObservedOut = [\n [] for i in range(len(uniqueParallelizationArray))\n ]\n uniqueObservedProc = [\n [] for i in range(len(uniqueParallelizationArray))\n ]\n for parallelIndex, parallelValue in enumerate(\n uniqueParallelizationArray):\n df = operatorDF[\n operatorDF.iloc[:, self.ratesPerQuery[self.queryName] +\n parallelOperators[0]] == parallelValue]\n\n for no in range(1, len(parallelOperators)):\n df = df[df.iloc[:, self.ratesPerQuery[self.queryName] +\n parallelOperators[no]] == parallelValue]\n parName1 = data.columns[self.ratesPerQuery[self.queryName]\n + parallelOperators[no]]\n print(f'{parName1}#{no} == {parallelValue}')\n\n parName = data.columns[self.ratesPerQuery[self.queryName] +\n parallelOperators[0]]\n print(f'{parName} == {parallelValue}')\n dfNew = None\n dfNew = self.aggregate_func(df, parallelValue)\n print(\"total instances \" +\n str(dfNew[\"total_op_instances\"].unique()))\n #operator = operator[:8]\n uniqueDFtrueOut = None\n uniqueDFtrueProc = None\n uniqueDFobservedOut = None\n uniqueDFobservedProc = None\n \n uniqueDFtrueProc = dfNew[\"true_proc_rate\"].tolist()\n uniqueDFtrueOut = dfNew[\"true_output_rate\"].tolist()\n uniqueDFobservedOut = dfNew[\"observed_output_rate\"].tolist()\n uniqueDFobservedProc = dfNew[\"observed_proc_rate\"].tolist()\n for i, v in enumerate(uniqueDFtrueOut):\n if v > 1.5 * 10**7:\n uniqueDFtrueOut.pop(i)\n uniqueTrueOut[parallelIndex].extend(uniqueDFtrueOut)\n uniqueTrueProc[parallelIndex].extend(uniqueDFtrueProc)\n uniqueObservedOut[parallelIndex].extend(uniqueDFobservedOut)\n uniqueObservedProc[parallelIndex].extend(uniqueDFobservedProc)\n uniqueDFtrueOut = None\n uniqueDFtrueProc = None\n uniqueDFobservedOut = None\n uniqueDFobservedProc = None\n print()\n\n parallelOperatorName = \"Parallelism\"\n legend = [\"v=\" + str(i) for i in uniqueParallelizationArray]\n\n self.plot_scatter_ecdf(uniqueTrueOut,\n \"TrueOutput [rec/ut]\",\n f\"{operator}_TrueOutput\",\n legend=legend,\n path=\"parallelism\")\n self.plot_scatter_ecdf(uniqueTrueProc,\n \"TrueProcessing [rec/ut]\",\n f\"{operator}_TrueProcessing\",\n legend=legend,\n path=\"parallelism\")\n self.plot_scatter_ecdf(uniqueObservedOut,\n \"ObservedOutput [rec/ot]\",\n f\"{operator}_ObservedOutput\",\n legend=legend,\n path=\"parallelism\")\n self.plot_scatter_ecdf(uniqueObservedProc,\n \"ObservedProcessing [rec/ot]\",\n f\"{operator}_ObservedProcessing\",\n legend=legend,\n path=\"parallelism\")\n\n def plot_graph(self,\n title,\n x_axis,\n y_axis,\n x: List[int],\n y: List[int],\n save=True):\n \"\"\"\n Plot a graph based on x,y arrays\n Store result in PLOT_DIR\n \"\"\"\n if sum(x) == 0 or sum(y) == 0 or len(x) < 3 or len(y) < 3:\n print(title + str(x) + str(y))\n return None\n plt.clf()\n fig, ax = plt.subplots()\n\n plt.plot(x, y)\n\n plt.xlabel(x_axis)\n plt.ylabel(y_axis)\n\n plt.title(title)\n plt.tight_layout()\n if save == True:\n path = settings.PLOT_DIR + self.queryName + \"_rate/\"\n self.check_path(path)\n plt.savefig(path + self.checkStr(title) + \".png\")\n else:\n plt.show()\n plt.close(fig)\n\n def multiple_lines_plot(self,\n title,\n x_axis,\n y_axis,\n x: List[List[int]],\n y: List[List[int]],\n save=True):\n \"\"\"\n Plot a graph based on x,y List of lists\n Each line represented by x-y sublists are on the same plot.\n Store result in PLOT_DIR\n \"\"\"\n fig, ax = plt.subplots()\n\n if len(x) != len(y):\n print(\"x and y must be same length\")\n return\n for n in range(len(x)):\n if sum(x[n]) == 0 or sum(y[n]) == 0:\n return\n plt.plot(x[n], y[n])\n\n plt.xlabel(x_axis)\n plt.ylabel(y_axis)\n plt.legend([\"Value = 1\", \"V = 2\", \"V = 3\"], loc=\"lower right\")\n plt.title(title)\n plt.tight_layout()\n if save == True:\n path = settings.PLOT_DIR + self.queryName + \"/\" + self.CSV_SPECIFIER + \"/\"\n self.check_path(path)\n plt.savefig(path + self.checkStr(title) + \".png\")\n else:\n plt.show()\n plt.close()\n\n def plot_scatter_ecdf(\n self,\n data,\n xlabel,\n title,\n legend=[\"Parallelism = 1\", \"P = 2\", \"P = 3\", \"P = 4\"],\n scatter_size=5,\n multiple=True,\n path=\"\"):\n def ecdf(data):\n \"\"\" Compute ECDF \"\"\"\n #print(type(data))\n x, n = None, 0\n if isinstance(data, list):\n #print(data)\n x = sorted(data)\n n = len(x)\n if sum(x) == 0 or n < 5:\n return None, None\n elif isinstance(data, np.ndarray):\n x = np.sort(data)\n n = x.size\n if np.sum(x) == 0 or n < 5:\n return None, None\n else:\n print(data)\n print(type(data))\n print(\"NOT NUMPY\")\n return None, None\n y = np.arange(1, n + 1) / n\n return (x, y)\n\n if multiple:\n for subplotX in data:\n xsub, ysub = ecdf(subplotX)\n if xsub is not None and ysub is not None:\n plt.scatter(x=xsub, y=ysub, s=scatter_size)\n else:\n print(self.checkStr(title) + \" is EMPTY\")\n return\n else:\n x, y = ecdf(data)\n if x is not None and y is not None:\n print(x, y)\n plt.scatter(x=x, y=y, s=scatter_size)\n else:\n return\n # if path != \"\":\n # path = \"_\" + path\n plt.xlabel(xlabel)\n plt.ylabel(\"Percentage (%)\")\n plt.legend(legend, loc=\"lower right\")\n plt.title(title)\n plt.tight_layout()\n path = settings.PLOT_DIR + self.queryName + \"/\" + self.CSV_SPECIFIER + \"/scatter\" + path + \"/\"\n self.check_path(path)\n plt.savefig(path + self.checkStr(title) + \".png\")\n plt.close()\n\n def checkStr(self, title: str) -> str:\n \"\"\" Removes any non alphanumerical character from input string.\"\"\"\n return re.sub(r\"\\W+\", \"\", title)\n\n def check_path(self, path):\n \"\"\" Checks if given path is valid otherwise create it\"\"\"\n if not os.path.exists(path):\n os.makedirs(path)\n\n\ndef csv_unifier():\n \"\"\"Takes _metrics.csv and _equalmetrics.csv and combines them to one\"\"\"\n valid_query_names = settings.VALID_QUERIES_NAMES\n PATH = settings.PATH\n specifiers = settings.VALID_CSV_SPECIFIERS\n for query in valid_query_names:\n for speficier in specifiers:\n filepath = PATH + query + \"_\" + speficier + \".csv\"\n if os.path.exists(filepath):\n print('reading ' + filepath)\n fin = open(filepath, \"r\")\n dataRead = fin.read()\n fin.close()\n fout = open(PATH + query + \"_all.csv\", 'a+')\n fout.write(dataRead)\n fout.close()\n else:\n print(filepath + \" not found\")\n\n\nif __name__ == \"__main__\":\n args = sys.argv\n if len(args) == 1:\n print(\"No arguments at \" + args[0] + \" exiting...\")\n exit(-1)\n\n valid_query_names = settings.VALID_QUERIES_NAMES\n\n if args[1] == 'combine':\n csv_unifier()\n exit(1)\n\n if args[1] in valid_query_names or args[1] == \"all\":\n Analyzer(queryName=args[1],\n specifier=args[2] if len(args) >= 3 else \"\",\n action=args[3] if len(args) == 4 else None)\n else:\n print(\"Argument [\" + args[1] + \"] is invalid. Exiting \" + args[0])\n","repo_name":"alex-rantos/StreamingSystemsEvaluation","sub_path":"src/analyzer.py","file_name":"analyzer.py","file_ext":"py","file_size_in_byte":44780,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"10467321473","text":"\"\"\"excersice 7\n Healthy Programmer\"\"\"\n\n\n\"\"\"\n 9 to 5 pm\n concept\n water -water.mp3 (3.5 litres) - Drank - log- Every 40min\n \n Eyes - eyes.mp3 (30 minutes) - ex-done-log- Every 30 min\n physical activity - physical.mp3 every 45 minutes - ex-don - log \n rules\n pygame module to play audio\"\"\"\n\n\nfrom pygame import mixer\nfrom datetime import datetime\nfrom time import time\n\ndef musiconloop(file, stopper):\n mixer.init()\n mixer.music.load(file)\n mixer.music.play()\n while True:\n a = input()\n if a == stopper:\n mixer.music.stop()\n break\ndef log_now(msg):\n with open(\"mylogs.txt\", \"a\") as f:\n f.write(f\"{msg} {datetime.now()}\\n\")\n\nif __name__ == '__main__':\n init_water = time()\n init_eyes = time()\n init_exercise = time()\n watersecs = 40*60\n exsecs = 45*60\n eyesecs = 30*60\n\n while True:\n if time() - init_water > watersecs:\n print(\"water Drinking time, Enter 'drank' to stop the alarm.\")\n musiconloop('water.mp3', 'drank')\n init_water = time()\n log_now(\"Drank water at\")\n if time() - init_eyes > eyesecs:\n print(\"eye exercise time, Enter 'doneeyes' to stop the alarm.\")\n musiconloop('eye.mp3', 'doneeyes')\n init_eyes = time()\n log_now(\"Eyes Relaxed at\")\n if time() - init_exercise > exsecs:\n print(\"Physical Activity time, Enter 'donephy' to stop the alarm.\")\n musiconloop('physical.mp3', 'donephy')\n init_exercise = time()\n log_now(\"physical activity done at\")\n\"\"\"\n# time - init > exercise\"\"\"\n\n\"\"\" it means time is time now means live time\n intin time means when last he do activity that time \n means last when you water drink that is your init time\n is greater then the time limit then its run these is main operation in these\"\"\"\n\n\n\n","repo_name":"bhatt-nisarg/PycharmProjects","sub_path":"pythontus/mp3sounds/ex7Healthyprogrammer.py","file_name":"ex7Healthyprogrammer.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"31138739457","text":"from flask import Flask, render_template, request, redirect, url_for, jsonify, abort\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nfrom sqlalchemy import func\r\nfrom flask_migrate import Migrate\r\nimport sys\r\n\r\napp = Flask(__name__)\r\n\r\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://sh_ubuntu:93water94@localhost:5432/todoapp_new'\r\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\r\n\r\n# expire_on_commit is set to true by default, such that all instances will expire after each commit\r\n# if needed, we can set session_options={\"expire_on_commit\":False} but that can have unintended side effects\r\n# e.g. db = SQLAlchemy(app, session_options={\"expire_on_commit\":False})\r\ndb = SQLAlchemy(app)\r\n\r\nmigrate = Migrate(app, db) # create an instance of the Migrate class to link to command line scripts\r\n\r\nclass ToDo(db.Model):\r\n __tablename__='newww_todo'\r\n id = db.Column(db.Integer,primary_key=True)\r\n title = db.Column(db.String(),nullable=False)\r\n description = db.Column(db.String(),nullable=True)\r\n completed = db.Column(db.Boolean, nullable=False, default=False)\r\n deadline = db.Column(db.DateTime,nullable=True)\r\n list_id = db.Column(db.Integer, db.ForeignKey('todolists.id'), nullable=False)\r\n\r\n def __repr__(self):\r\n return f''\r\n\r\nclass ToDoList(db.Model):\r\n __tablename__ ='todolists'\r\n id = db.Column(db.Integer,primary_key=True)\r\n name = db.Column(db.String(), nullable=False)\r\n description = db.Column(db.String(), nullable=True)\r\n todos = db.relationship('ToDo', backref='list', lazy=True, cascade = 'all, delete-orphan') # lazy=True by default\r\n\r\n def __repr__(self):\r\n return f''\r\n\r\n# db.create_all()\r\n\r\n#todo = ToDo(title='Finish Lesson 5')\r\n#todo2 = ToDo(title='Complete first project')\r\n#todo3 = ToDo(title='Sleep')\r\n#db.session.add_all([todo,todo2,todo3])\r\n \r\n#db.session.commit()\r\n\r\n# define what template to show to users when users visit the homepage\r\n# by default, flask looks for your templates in the templates folder\r\n\r\n### method 1 of data input : synchronous request from form input\r\n### in this case the form in index.html will have action and methods defined\r\n### and server will handle it using the below function when request is received\r\n\r\n# @app.route('/create-todo',methods=['POST'])\r\n#def create_sync():\r\n# todo_title=request.form.get('title')\r\n# todo_desc=request.form.get('description')\r\n# new_todo=ToDo(title=todo_title,description=todo_desc)\r\n# db.session.add(new_todo)\r\n# db.session.commit()\r\n# return redirect(url_for('index')) # this refreshes the page\r\n\r\n### method 2 of data input: acynchronous request from form input\r\n### client will fetch response from server after completion and handles the response\r\n### server will not define how the response will be handled and how the view is updated\r\n\r\n@app.route('/list/create',methods=['POST'])\r\ndef createlist_unsync():\r\n listname=request.get_json()['listname']\r\n listdescription=request.get_json()['listdescription'] \r\n \r\n # initialise the variables to be used\r\n error=False\r\n body={}\r\n \r\n try:\r\n new_todolist=ToDoList(name=listname,description=listdescription)\r\n db.session.add(new_todolist)\r\n db.session.commit()\r\n body['id']=new_todolist.id\r\n body['name']=new_todolist.name\r\n body['description']=new_todolist.description\r\n except:\r\n error=True\r\n db.session.rollback()\r\n print(sys.exc_info())\r\n finally:\r\n db.session.close() # return connection to connection pool for more efficient resource mgmt\r\n if error:\r\n abort(400)\r\n else:\r\n return jsonify(body)\r\n\r\n@app.route('/lists//delete',methods=['DELETE'])\r\ndef delete_list(listID):\r\n error=False\r\n try:\r\n list=ToDoList.query.get(listID)\r\n for todo in list.todos:\r\n db.session.delete(todo)\r\n \r\n db.session.delete(list)\r\n db.session.commit()\r\n except:\r\n db.session.rollback()\r\n finally:\r\n db.session.close()\r\n if error:\r\n abort(500)\r\n else:\r\n return jsonify({'success': True})\r\n\r\n@app.route('/lists//set-completed',methods=['POST'])\r\ndef list_completed(listID):\r\n error=False\r\n try:\r\n list = ToDoList.query.get(listID)\r\n for todo in list.todos:\r\n todo.completed = True\r\n db.session.commit()\r\n except:\r\n db.session.rollback()\r\n finally:\r\n db.session.close()\r\n if error:\r\n abort(500)\r\n else:\r\n return jsonify({'success': True})\r\n\r\n@app.route('/lists//todo/create',methods=['POST'])\r\ndef create_unsync(listID):\r\n title=request.get_json()['title']\r\n description=request.get_json()['description'] \r\n \r\n # initialise the variables to be used\r\n error=False\r\n body={}\r\n \r\n try:\r\n new_todo=ToDo(title=title,description=description,list_id=listID)\r\n db.session.add(new_todo)\r\n db.session.commit()\r\n body['id']=new_todo.id\r\n body['title']=new_todo.title\r\n body['description']=new_todo.description\r\n except:\r\n error=True\r\n db.session.rollback()\r\n print(sys.exc_info())\r\n finally:\r\n db.session.close() # return connection to connection pool for more efficient resource mgmt\r\n if error:\r\n abort(400)\r\n else:\r\n return jsonify(body)\r\n\r\n@app.route('/todo//set-completed',methods=['POST'])\r\ndef set_completed(todoID):\r\n complete_status = request.get_json()['completed']\r\n error=False\r\n try:\r\n todo = ToDo.query.get(todoID)\r\n todo.completed = complete_status\r\n db.session.commit()\r\n except:\r\n db.session.rollback()\r\n finally:\r\n db.session.close()\r\n if error:\r\n abort(500)\r\n else:\r\n return redirect(url_for('index'))\r\n\r\n@app.route('/todo//delete',methods=['DELETE'])\r\ndef delete_todo(todoID):\r\n error=False\r\n try:\r\n ToDo.query.filter_by(id=todoID).delete()\r\n db.session.commit()\r\n except:\r\n db.session.rollback()\r\n finally:\r\n db.session.close()\r\n if error:\r\n abort(500)\r\n else:\r\n return jsonify({'success': True})\r\n\r\n@app.route('/lists/')\r\ndef get_todo_list(list_id):\r\n return render_template('index.html',\r\n lists=ToDoList.query.order_by('id'),\r\n activelist=ToDoList.query.get(list_id),\r\n todos=ToDo.query.filter_by(list_id=list_id).order_by('id').all())\r\n\r\n@app.route('/')\r\ndef index():\r\n return redirect(url_for('get_todo_list',list_id=1)) # can consider how to set this to a variable\r\n","repo_name":"venice94/public_repo","sub_path":"todo_app.py","file_name":"todo_app.py","file_ext":"py","file_size_in_byte":6688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"22957620939","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 22 17:19:30 2022\n\n@author: afzal-admin\n\"\"\"\nimport numpy as np\n\n#n:number of points\n#filename\n#tf_new: new time\n\ndef spin_up_firn(file_name,Grid,tf_new,n,t_interest):\n print('Spinup initiated')\n data = np.load(file_name)\n t=data['t']\n s_w_sol =data['s_w_sol']\n #phi=data['phi']\n phi_w_sol =data['phi_w_sol']\n phi_i_sol =data['phi_i_sol']\n H_sol =data['H_sol']\n T_sol =data['T_sol']\n s_w_sol =data['s_w_sol']\n \n #geometry\n '''\n Xc=data['Xc']\n Yc=data['Yc']\n Xc_col=data['Xc_col']\n Yc_col=data['Yc_col']\n Grid.Nx=data['Grid_Nx']\n Grid.Ny=data['Grid_Ny']\n Grid.xc=data['Grid_xc']\n Grid.yc=data['Grid_yc']\n Grid.xf=data['Grid_xf']\n Grid.yf=data['Grid_yf']\n '''\n \n s_w = s_w_sol[:,-1]\n phi_i = phi_i_sol[:,-1]\n phi_w = phi_w_sol[:,-1]\n phi_nw = 1- phi_i - phi_w\n phi = 1 - phi_i\n H = H_sol[:,-1]\n T = T_sol[:,-1]\n time = t[-1]\n t_new = np.linspace(t[-1],tf_new,n)\n t_interest = np.append(t_interest,t_new[1:])\n i = 1\n s_w = np.transpose([s_w])\n tf =tf_new\n s_w_sol = s_w_sol\n t = t.tolist()\n print('Spinup finished')\n return Grid,s_w,time,tf_new,time,t_interest,i,tf,t,s_w_sol,phi_w_sol,phi_w,phi_i_sol,phi_i,H_sol,H,T_sol,T,phi","repo_name":"mashadab/ice-layer-formation","sub_path":"Solver/spin_up_firn.py","file_name":"spin_up_firn.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"11783777979","text":"from common.json import ModelEncoder\nfrom .models import Product, Category\n\nclass CategoryEncoder(ModelEncoder):\n model = Category\n properties = [\n \"id\",\n \"name\",\n \"parent_category\",\n ]\n def get_extra_data(self, o):\n extra_data = super().get_extra_data(o)\n if o.parent_category:\n extra_data[\"parent_category\"] = CategoryEncoder().default(o.parent_category)\n return extra_data\n\nclass ProductEncoder(ModelEncoder):\n model = Product\n properties = [\n \"id\",\n \"title\",\n \"price\",\n \"description\",\n \"image\",\n \"category\",\n ]\n encoders = {\n \"category\": CategoryEncoder(),\n }\n","repo_name":"jenkimh/vendora","sub_path":"inventory/api/inventory_app/encoders.py","file_name":"encoders.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"75410512830","text":"#!/usr/bin/env python\n# Created on: Jul 1, 2023 at 9:50:48 AM\n__author__ = \"Michael Cuoco\"\n\nimport re, logging, time\n\nlogger = logging.getLogger(__name__) # configure logging\nfrom collections import deque\nfrom typing import Generator\nfrom pysam import AlignmentFile, AlignedSegment\nimport numpy as np\nimport pandas as pd\nfrom .schemas import TAGS, Read\nimport pyarrow as pa\nimport pyarrow.parquet as pq\n\nCHROMOSOMES = [f\"chr{c}\" for c in range(1, 23)] + [\"chrX\", \"chrY\"]\n\n\ndef isref_read(read: AlignedSegment) -> bool:\n \"return True if read is ref, False if non-ref based on mate tag\"\n assert read.is_read1, \"Read must be read 1\"\n\n # get cigar at start of read, accounting for strand\n if not read.has_tag(\"MC\"):\n return False\n\n cigar = re.findall(r\"(\\d+)([MIDNSHP=X])\", str(read.get_tag(\"MC\")))\n end = cigar[-1] if not read.is_reverse else cigar[0]\n clipped = int(end[0]) if end[1] == \"H\" or end[1] == \"S\" else 0\n\n return read.is_proper_pair and (clipped < 30)\n\n\ndef read_to_namedtuple(read: AlignedSegment) -> Read:\n \"convert pysam.AlignedSegment to hashable namedtuple Read\"\n return Read(\n read.reference_name,\n read.reference_start,\n read.reference_end,\n read.infer_read_length(),\n read.is_read1,\n read.is_read2,\n not read.is_reverse,\n read.is_reverse,\n read.mapping_quality,\n len(read.get_tag(\"SA\").split(\";\")[:-1]) if read.has_tag(\"SA\") else 0, # type: ignore\n read.cigarstring,\n read.get_tag(\"AS\") if read.has_tag(\"AS\") else None,\n read.get_tag(\"L1\") if read.has_tag(\"L1\") else None,\n read.get_tag(\"LS\") if read.has_tag(\"LS\") else None,\n read.get_tag(\"LE\") if read.has_tag(\"LE\") else None,\n read.get_tag(\"LA\") if read.has_tag(\"LA\") else None,\n read.get_tag(\"MS\") if read.has_tag(\"MS\") else None,\n read.get_tag(\"ML\") if read.has_tag(\"ML\") else None,\n read.get_tag(\"MQ\") if read.has_tag(\"MQ\") else None,\n read.get_tag(\"MC\") if read.has_tag(\"MC\") else None,\n read.next_reference_name,\n read.next_reference_start,\n read.is_proper_pair,\n isref_read(read),\n read.is_duplicate,\n )\n\n\ndef gini(array):\n \"\"\"Calculate the Gini coefficient of a numpy array.\"\"\"\n # based on bottom eq: http://www.statsdirect.com/help/content/image/stat0206_wmf.gif\n # from: http://www.statsdirect.com/help/default.htm#nonparametric_methods/gini.htm\n array = array.flatten() # all values are treated equally, arrays must be 1d\n if np.amin(array) < 0:\n array -= np.amin(array) # values cannot be negative\n array += 0.0000001 # values cannot be 0\n array = np.sort(array) # values must be sorted\n index = np.arange(1, array.shape[0] + 1) # index per array element\n n = array.shape[0] # number of array elements\n return (np.sum((2 * index - n - 1) * array)) / (\n n * np.sum(array)\n ) # Gini coefficient\n\n\n# TODO: remove collect_localmax and associated code\n# TODO: move features and stats methods outside of class\nclass SlidingWindow(object):\n def __init__(\n self,\n bam: AlignmentFile,\n contigs: list | None = None,\n min_mapq: int = 0,\n peaks: bool = False,\n collect_features: bool = False,\n collect_localmax: bool = False,\n ) -> None:\n # save inputs to attributes\n self.bam = bam\n\n # validate contigs\n if contigs:\n for c in contigs:\n if c not in CHROMOSOMES:\n raise Exception(f\"{c} is not valid chromosome\")\n self.contigs = contigs\n else:\n self.contigs = []\n for i in range(bam.nreferences):\n if bam.get_reference_name(i) in CHROMOSOMES:\n self.contigs.append(bam.get_reference_name(i))\n\n # save settings\n self.min_mapq = min_mapq\n self.peaks = peaks\n self.mode = \"peaks\" if self.peaks else \"windows\"\n self.collect_features = collect_features\n self.collect_localmax = collect_localmax\n\n self.read_filter = (\n lambda x: x.is_read1\n and x.is_mapped\n and (not x.is_secondary)\n and (not x.is_supplementary)\n )\n\n # count reads in bam satisfying read filter\n total_reads = bam.count(read_callback=self.read_filter)\n self.size_factor = total_reads / 1e6\n logger.info(f\"{total_reads} filtered reads in the bam file\")\n\n # reset read filter to include min_mapq\n self.read_filter = (\n lambda x: x.is_read1\n and x.is_mapped\n and (not x.is_secondary)\n and (not x.is_supplementary)\n and (x.mapping_quality >= min_mapq)\n )\n\n def windows(\n self,\n reads,\n size: int = 200,\n step: int = 1,\n min_rpm: float = 0,\n min_reads: int = 0,\n ) -> Generator:\n \"Slide window across contig and yield each window\"\n try:\n r = next(reads)\n assert type(r) == Read, \"Reads must be of type Read\"\n contig = r.reference_name\n reflen = self.bam.get_reference_length(r.reference_name)\n except StopIteration:\n return\n\n w = deque()\n for start in range(0, reflen + 1, step):\n end = start + size if start + size < reflen else reflen\n\n # while w is not empty and the first read is outside the window\n while w and w[0].reference_start < start:\n w.popleft().isref_read\n\n # while the next read is inside the window\n while r.reference_start < end:\n w.append(r)\n try:\n r = next(reads)\n assert r.reference_name == contig, \"Reads are not sorted by contig\"\n except StopIteration:\n break\n\n # return window if it has min_rpm\n if (len(w) / self.size_factor >= min_rpm) and (len(w) >= min_reads):\n yield {\n \"Chromosome\": contig,\n \"Start\": start,\n \"End\": end,\n \"reads\": set(w),\n }\n\n def merge(self, windows: Generator, bandwidth: int = 0) -> Generator:\n \"\"\"\n Merge overlapping windows.\n :param windows: generator of windows\n :param bandwidth: maximum distance between windows to merge\n \"\"\"\n\n # filter windows for non-empty\n windows = filter(lambda x: len(x[\"reads\"]) > 0, windows)\n try:\n p = next(windows) # grab first window\n except StopIteration: # skip if no peaks\n return\n\n for n in windows:\n # if the next window is within the bandwidth, merge\n if n[\"Start\"] <= (p[\"End\"] + bandwidth):\n p[\"End\"] = n[\"End\"]\n p[\"reads\"] = p[\"reads\"].union(n[\"reads\"])\n # otherwise, yield the previous window and start a new one\n else:\n # yield merged windpow\n yield p\n assert (\n p[\"Chromosome\"] == n[\"Chromosome\"]\n ), \"Windows are not sorted by contig\"\n # start new window\n p = n\n\n def stats(self, p: dict):\n \"\"\"\n Calculate basic stats about a region\n :param p: dict of region, must contain \"reads\" key\n \"\"\"\n\n assert \"reads\" in p, \"Region must contain reads\"\n\n # iterate over reads to calculate stats\n p[\"n_ref_reads\"], p[\"max_mapq\"], p[\"n_duplicates\"] = 0, 0, 0\n starts = []\n fwd, rev = 0, 0\n for r in p[\"reads\"]:\n if r.is_duplicate:\n p[\"n_duplicates\"] += 1\n continue\n\n if r.is_reverse:\n rev += 1\n else:\n fwd += 1\n p[\"max_mapq\"] = max(p[\"max_mapq\"], r.mapping_quality)\n p[\"n_ref_reads\"] += r.isref_read\n\n if r.is_forward:\n starts.append(r.reference_start)\n else:\n starts.append(r.reference_end)\n p[\"n_unique_starts\"] = len(set(starts))\n p[\"n_reads\"] = fwd + rev\n\n # add strand info\n if (fwd == 0) and (rev > 0):\n p[\"Strand\"] = \"-\"\n elif (rev == 0) and (fwd > 0):\n p[\"Strand\"] = \"+\"\n\n # remove reads\n del p[\"reads\"]\n\n return p\n\n def features(self, p: dict) -> dict:\n \"\"\"\n Extract features from a window of reads\"\n :param p: dict of region, must contain \"reads\" key\n \"\"\"\n\n assert \"reads\" in p, \"Region must contain reads\"\n\n # initialize lists for features\n l = dict()\n for k in TAGS + [\"3end\", \"5end\", \"mapq\", \"starts\"]:\n l[k] = []\n\n # initialize features dict\n f = {\n \"Chromosome\": p[\"Chromosome\"],\n \"Start\": p[\"Start\"],\n \"End\": p[\"End\"],\n \"n_reads\": 0,\n \"n_fwd\": 0,\n \"n_rev\": 0,\n \"n_duplicates\": 0,\n \"n_proper_pairs\": 0,\n \"n_ref_reads\": 0,\n \"3end_gini\": float(0),\n \"5end_gini\": float(0),\n \"max_mapq\": 0,\n \"rpm\": float(0),\n \"orientation_bias\": float(0),\n \"frac_proper_pairs\": float(0),\n \"n_unique_starts\": 0,\n }\n\n for tag in TAGS:\n for n in [0, 0.25, 0.5, 0.75, 1]:\n f[tag + \"_q\" + str(n)] = float(0)\n f[tag + \"_mean\"] = float(0)\n\n # collect features from the reads in the window\n for i, r in enumerate(p[\"reads\"]):\n if not r.is_read1:\n raise Exception(\"Reads must all be read1\")\n\n if i == 0:\n start = r.reference_start\n\n if r.is_duplicate:\n f[\"n_duplicates\"] += 1\n continue\n\n l[\"starts\"].append(r.reference_start)\n if r.is_forward:\n l[\"3end\"].append(r.reference_end - start)\n l[\"5end\"].append(r.reference_start - start)\n else:\n l[\"3end\"].append(r.reference_start - start)\n l[\"5end\"].append(r.reference_end - start)\n\n l[\"mapq\"].append(r.mapping_quality)\n f[\"n_proper_pairs\"] += r.is_proper_pair\n f[\"n_ref_reads\"] += r.isref_read\n\n if r.is_reverse:\n f[\"n_rev\"] += 1\n else:\n f[\"n_fwd\"] += 1\n\n for tag in TAGS:\n if \"_normed\" in tag:\n if getattr(r, tag.replace(\"_normed\", \"\")):\n if tag in [\n \"L1_alignment_score_normed\",\n \"mate_alignment_score_normed\",\n ]: # adjust alignments scores for read length\n l[tag].append(\n getattr(r, tag.replace(\"_normed\", \"\"))\n / getattr(r, \"mate_read_length\")\n )\n elif tag in [\"alignment_score_normed\"]:\n l[tag].append(\n getattr(r, tag.replace(\"_normed\", \"\"))\n / getattr(r, \"read_length\")\n )\n elif getattr(r, tag):\n l[tag].append(getattr(r, tag))\n\n # compute mean and quantiles for these features\n for tag in TAGS:\n if len(l[tag]) > 0:\n quantiles = np.quantile(l[tag], [0, 0.25, 0.5, 0.75, 1])\n for n, q in zip([0, 0.25, 0.5, 0.75, 1], quantiles):\n f[tag + \"_q\" + str(n)] = q\n f[tag + \"_mean\"] = np.mean(l[tag])\n\n f[\"n_reads\"] = f[\"n_fwd\"] + f[\"n_rev\"]\n\n # if reads are empty, return empty features\n if f[\"n_reads\"] == 0:\n return f\n\n f[\"3end_gini\"] = gini(np.array(l[\"3end\"], dtype=np.float64))\n f[\"5end_gini\"] = gini(np.array(l[\"5end\"], dtype=np.float64))\n f[\"max_mapq\"] = max(l[\"mapq\"])\n f[\"n_unique_starts\"] = len(set(l[\"starts\"]))\n\n return f\n\n def make_regions(\n self,\n strand_split: bool = False,\n **kwargs,\n ) -> Generator:\n \"\"\"\n Make windows on the given contigs\n :param strand_split: optionally generate windows separately for each strand\n :param kwargs: arguments to pass to windows()\n \"\"\"\n\n # define read group filters\n if strand_split:\n rg = [\n lambda x: x.is_read1 and x.is_forward,\n lambda x: x.is_read1 and x.is_reverse,\n ]\n else:\n rg = [lambda x: x.is_read1]\n\n for c in self.contigs:\n for f in rg:\n reads = filter(self.read_filter, self.bam.fetch(c))\n reads = filter(f, reads)\n reads = map(read_to_namedtuple, reads)\n\n # define window generator\n if self.peaks:\n windows = self.merge(self.windows(reads, **kwargs))\n else:\n windows = self.windows(reads, **kwargs)\n\n # yield windows\n for w in windows:\n if self.collect_features:\n yield self.features(w)\n else:\n yield self.stats(w)\n\n def write_regions(self, outfile: str, **kwargs) -> None:\n \"\"\"\n Generate regions and write to disk\n :param outfile: path to output file\n :param kwargs: arguments to pass to make_regions()\n \"\"\"\n\n # grab first region\n iter = self.make_regions(**kwargs)\n r = next(iter)\n\n if self.collect_localmax:\n for i in [2, 4, 8, 16, 32]:\n r[f\"localmax_{i}_reads\"] = 0\n r[f\"localmax_{i}_rpm\"] = float(0)\n schema = pa.Schema.from_pandas(pd.Series(r).to_frame().T)\n\n regions = [] # initialize list of regions\n with pq.ParquetWriter(outfile, schema, compression=\"gzip\") as writer:\n # write regions to disk in batches by chromosome\n for c in self.contigs:\n start = time.perf_counter()\n\n # collect regions for this chromosome\n while r[\"Chromosome\"] == c:\n regions.append(r)\n try:\n r = next(iter)\n except StopIteration:\n break\n\n logger.info(\n f\"Generated {len(regions)} {self.mode} on {c} in {time.perf_counter() - start:.2f} seconds\"\n )\n\n # skip empty chromosomes\n if len(regions) == 0:\n continue\n\n # convert regions to dataframe\n regions = pd.DataFrame(regions)\n\n # compute local max\n if self.collect_localmax:\n for localbw in [2, 4, 8, 16, 32]:\n lm = regions[\"n_reads\"].rolling(localbw, center=True).max()\n regions[f\"localmax_{localbw}_reads\"] = lm\n regions[f\"localmax_{localbw}_rpm\"] = lm / self.size_factor\n\n # remove empty regions\n regions = regions.loc[regions.n_reads > 0, :].fillna(0)\n\n # compute metrics on vector scale\n regions[\"rpm\"] = regions[\"n_reads\"] / self.size_factor\n if self.collect_features:\n regions[\"orientation_bias\"] = (\n np.maximum(regions[\"n_fwd\"], regions[\"n_rev\"])\n / regions[\"n_reads\"]\n )\n regions[\"frac_proper_pairs\"] = (\n regions[\"n_proper_pairs\"] / regions[\"n_reads\"]\n )\n regions[\"frac_duplicates\"] = regions[\"n_duplicates\"] / (\n regions[\"n_reads\"] + regions[\"n_duplicates\"]\n )\n\n # write to disk\n logger.info(f\"Writing {len(regions)} {self.mode} on {c} to disk\")\n writer.write_table(pa.Table.from_pandas(regions, schema=schema))\n\n # reset regions list\n regions = []\n","repo_name":"mikecuoco/sz_slavseq","sub_path":"workflow/scripts/pyslavseq/sliding_window.py","file_name":"sliding_window.py","file_ext":"py","file_size_in_byte":16305,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"28294475955","text":"\"\"\"Declaration of variables\"\"\"\nboard = [\"-\", \"-\", \"-\", \"-\", \"-\", \"-\", \"-\", \"-\", \"-\"]\nplayer1 = \"X\"\nplayer2 = \"O\"\ngturn = player1\nisGameFinished = None\n\n\"\"\"Functions declaration and realization\"\"\"\n\n\ndef make_move(turn):\n penalty_counter = 0\n print(\"Enter cell number from 0-9: \")\n print(\"0\", \"1\", \"2\")\n print(\"3\", \"4\", \"5\")\n print(\"6\", \"7\", \"8\")\n a = int(input())\n # Check if chosen field is taken\n while board[a] != \"-\":\n print(\"Field is taken, please try another one: \")\n a = int(input())\n penalty_counter += 1\n if penalty_counter == 2:\n # And punish if player trying to prevent program to end by always choosing taken tile\n print(\"Player\", turn, \"disqualified\")\n quit()\n if turn == player1:\n board[a] = \"X\"\n turn = player2\n elif turn == player2:\n board[a] = \"O\"\n turn = player1\n draw_board()\n return turn\n\n\ndef draw_board():\n # Function which displays current situation on board\n print(board[0], board[1], board[2])\n print(board[3], board[4], board[5])\n print(board[6], board[7], board[8])\n\n\ndef check_for_win():\n # Function which checks if win conditions has been satisfied\n if board[0] == board[1] and board[1] == board[2] and board[2] != \"-\":\n print(\"Player\", board[1], \"wins \")\n return True\n elif board[3] == board[4] and board[4] == board[5] and board[5] != \"-\":\n print(\"Player\", board[3], \"wins \")\n return True\n elif board[6] == board[7] and board[7] == board[8] and board[8] != \"-\":\n print(\"Player\", board[6], \"wins \")\n return True\n elif board[0] == board[3] and board[3] == board[6] and board[6] != \"-\":\n print(\"Player\", board[0], \"wins \")\n return True\n elif board[1] == board[4] and board[4] == board[7] and board[7] != \"-\":\n print(\"Player\", board[1], \"wins \")\n return True\n elif board[2] == board[5] and board[5] == board[8] and board[8] != \"-\":\n print(\"Player\", board[2], \"wins \")\n return True\n elif board[0] == board[4] and board[4] == board[8] and board[8] != \"-\":\n print(\"Player\", board[0], \"wins \")\n return True\n elif board[6] == board[4] and board[4] == board[2] and board[2] != \"-\":\n print(\"Player\", board[6], \"wins \")\n return True\n else:\n return False\n\n\n\"\"\"Main Body\"\"\"\n\n\ndraw_board()\nfor i in range(len(board)):\n gturn = make_move(gturn)\n isGameFinished = check_for_win()\n if isGameFinished:\n print(\"Game Over\")\n break\nif not isGameFinished:\n print(\"Draw\")\n","repo_name":"IanMess/TicTacToe","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"39734387064","text":"import itertools\nimport os\nimport pickle\nfrom collections import OrderedDict\n\nimport Pyro4\nimport matplotlib\nimport numpy as np\nfrom django.http import HttpResponse\nfrom rest_framework.authentication import SessionAuthentication, \\\n BasicAuthentication\nfrom rest_framework.decorators import api_view\nfrom rest_framework.decorators import authentication_classes\nfrom rest_framework.decorators import parser_classes\nfrom rest_framework.decorators import permission_classes\nfrom rest_framework.parsers import JSONParser\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom sklearn.metrics.pairwise import cosine_similarity\n\n# in theory prevents matplotlib from wanting an x server to render things which will\n# raise a tkinter exception if executed on a server\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\n\n\ndef timestamps_to_dts(timestamps):\n \"\"\" Converts a list of timestamps to time deltas \"\"\"\n orig = timestamps[0]\n timestamps = [i - orig for i in timestamps]\n timestamps[1:] = [timestamps[idx + 1] - timestamps[idx] for idx, i in\n enumerate(timestamps[1:])]\n return timestamps\n\n\ndef translate(data):\n \"\"\"\n Convert browser input data to the format that the neural network used\n while training:\n (timestamp, e.buttons, type, x, y)\n We'll also convert the timestamp to time differences and\n from milliseconds to seconds\n \"\"\"\n\n result = []\n data = np.array(data, dtype=object)\n dts = timestamps_to_dts(data[:, 0]) # convert to time deltas\n dts = [i / 1000.0 for i in dts] # convert to seconds\n data[:, 0] = dts\n\n buttons_map = {0: 'NoButton', 1: 'Left', 2: 'Right'}\n states_map = {0: 'Move', 1: 'Pressed', 2: 'Released', 3: 'Drag'}\n\n data[:, 1] = [buttons_map.get(i, 'Unknown') for i in data[:, 1]]\n data[:, 2] = [states_map.get(i, 'Unknown') for i in data[:, 2]]\n\n return data.tolist()\n\n\n# Use pickle as an improvised database\ndef persist(id, obj):\n with open('state/user_{}_state.pkl'.format(id), 'w+b') as tmp:\n pickle.dump(obj, tmp)\n\n\ndef load(id):\n with open('state/user_{}_state.pkl'.format(id), 'rb') as tmp:\n return pickle.load(tmp)\n\n\ndef reset_state(id):\n with open('state/user_{}_state.pkl'.format(id), 'w+b') as tmp:\n pickle.dump(OrderedDict(), tmp)\n\n\ndef get_or_create_state(id):\n state = OrderedDict()\n\n try:\n state = load(id)\n except Exception:\n pass\n\n return state\n\n\ndef plot_confusion_matrix(cm, classes, range_min,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n plt.imshow(cm, interpolation='nearest', cmap=cmap, vmin=range_min, vmax=1.0)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\n thresh = 0.8\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, \"{:.5f}\".format(cm[i, j]),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('Users')\n plt.xlabel('Users')\n\n\n@api_view(['GET'])\n@authentication_classes((SessionAuthentication, BasicAuthentication))\n@permission_classes((IsAuthenticated,))\n@parser_classes((JSONParser,))\ndef compare(request):\n \"\"\" Returns a PNG image with a confusion matrix graph that shows how the\n network compares any user against every other user \"\"\"\n\n if request.method == 'GET':\n\n rangemin = float(request.query_params['rangemin'])\n\n state = get_or_create_state(request.user.id)\n\n confusion_matrix = np.zeros((len(state.items()), len(state.items())))\n # this can be done with itertools but it uses lexicographic order?\n combinations = [(x, y) for x in enumerate(state.items()) for y in\n enumerate(state.items())]\n for (i, (user1, embedding1)), (j, (user2, embedding2)) in combinations:\n confusion_matrix[i, j] = cosine_similarity(embedding1, embedding2)\n\n fig = plt.figure()\n plot_confusion_matrix(confusion_matrix, state.keys(), rangemin)\n canvas = FigureCanvas(fig)\n\n response = HttpResponse(content_type='image/png')\n canvas.print_png(response)\n return response\n\n\n@api_view(['POST'])\n@authentication_classes((SessionAuthentication, BasicAuthentication))\n@permission_classes((IsAuthenticated,))\n@parser_classes((JSONParser,))\ndef reset(request):\n \"\"\" Resets the saved recognition signatures\"\"\"\n if request.method == 'POST':\n reset_state(request.user.id)\n return Response()\n\n\n@api_view(['POST'])\n@authentication_classes((SessionAuthentication, BasicAuthentication))\n@permission_classes((IsAuthenticated,))\n@parser_classes((JSONParser,))\ndef recognize(request):\n \"\"\"\n Sends the recorded input sequence the neural network which computes a \n signature. The signature is saved and associated with the given user.\n \"\"\"\n if request.method == 'POST':\n state = get_or_create_state(request.user.id)\n\n user = request.data['user']\n data = translate(request.data['input'])\n\n detector = Pyro4.Proxy('PYRONAME:neuralauth.detector')\n embedding = np.array(detector.prepare_and_predict(data, 1024, 768))\n\n # We assume the average embedding will be representative of all the\n # recorded sequences\n try:\n state[user] = np.mean([state[user], embedding], axis=0)\n except KeyError:\n state[user] = embedding\n\n persist(request.user.id, state)\n\n return Response(state[user])\n","repo_name":"manumartin/neuralauth","sub_path":"webapp/neuralauth/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"72898596030","text":"#!/usr/bin/python3\ndef safe_print_list(my_list=[], x=0):\n i = 0\n item = \"\"\n try:\n for i in range(0, x):\n print(\"{}\".format(my_list[i]), end='')\n i += 1\n if i == x:\n break\n except IndexError:\n None\n print()\n return i\n","repo_name":"Nourezzehi/holbertonschool-higher_level_programming","sub_path":"python-exceptions/0-safe_print_list.py","file_name":"0-safe_print_list.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"32443897865","text":"ff = open('input','r')\n\nhighest = 0;\n\nfor line in ff:\n x = 0;\n for c in line:\n if (c == 'B' or c == 'R'):\n x = (x << 1) + 1 \n else:\n x = (x << 1) + 0\n x = x / 2\n print(str(line[:-1]) + ' ' + str(x))\n if (x > highest):\n highest = x\n\nprint(highest)\n","repo_name":"kufena/AdventOfCode2020","sub_path":"5/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"38531324828","text":"from heapq import heappop, heappush\n\n\nclass Solution:\n def kSmallestPairs(self, nums1: List[int], nums2: List[int], k: int) -> List[List[int]]:\n if not nums1 or not nums2: return []\n\n result = []\n\n l1, l2 = len(nums1), len(nums2)\n\n q = [(nums1[i] + nums2[0], nums1[i], nums2[0], 0) for i in range(len(nums1))]\n\n while q and k:\n t0, t1, t2, t3 = heappop(q)\n result.append([t1, t2])\n if t3 < l2 - 1: heappush(q, (t1 + nums2[t3 + 1], t1, nums2[t3 + 1], t3 + 1))\n k -= 1\n\n return result\n","repo_name":"CrazySquirrel/Outtalent","sub_path":"Leetcode/373. Find K Pairs with Smallest Sums/solution3.py","file_name":"solution3.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"60"} +{"seq_id":"5529363884","text":"\"\"\"\nUtilities to visualize document results in html using pandas dataframes\n\nMain entry functon is `write_dataframe_html`\n\nExample::\n\n search = Search(configfile)\n df = dsutil.search_dataframe_concepts(\n search,\n original_df,\n text_colname='text',\n doc_results_colname='Doc Results'\n ).copy().drop(columns=['text'])\n df.set_index('Key', inplace=True)\n writer = HTMLWriter()\n with open('temp-results-table.html', 'w') as ofp:\n dsutil.write_dataframe_html(df, writer, ofp)\n\n\"\"\"\n\nimport html\nfrom io import StringIO\nfrom typing import Optional\n\nHTML_HEAD_CODE_START = \"\"\"\n\n\n \n\"\"\"\n\nCSS_CODE = \"\"\"\n\"\"\"\n\nJAVASCRIPT_CODE = \"\"\"\n\n\"\"\"\n\nHTML_HEAD_CODE_END = \"\"\"\n\n\n\n\"\"\"\n\nHTML_BODY_END = \"\"\"\n\n\n\"\"\"\n\n\ndef write_table_row(arr, style: str = \"\", index_in_first_column: bool = True) -> str:\n \"\"\"\n Write row of html table.\n\n Args:\n arr: numpy.array: Array of values for the row\n style: str: CSS style for row (Default value = \"\")\n index_in_first_column: bool: if True, then use index in the first column to create a visibility button for text details (Default value = True)\n\n Returns:\n str: HTML for the row (with escaped content)\n \"\"\"\n if style:\n result = f''\n else:\n result = f\"\"\n if index_in_first_column:\n # result += f'+'\n result += f''\n result += f\"\".join(map(html.escape, arr[1:].astype(str)))\n else:\n result += f\"\".join(map(html.escape, arr.astype(str)))\n result += f\"\"\n return result\n\n\ndef write_table_head(\n arr,\n rowtag: str = \"tr\",\n style: str = \"\",\n coltag: str = \"th\",\n insert_first_col: str = \"+\",\n) -> str:\n \"\"\"\n Write head of html table.\n\n Args:\n arr: numpy.array: Array of values for the row\n rowtag: str: (Default value = \"tr\")\n style: str: CSS style for the row (Default value = \"\")\n coltag: str: (Default value = \"th\")\n insert_first_col: str: String to insert for the first column, if not empty (Default value = \"+\")\n\n Returns:\n str: HTML for the head row (with escaped content)\n \"\"\"\n if style:\n style = f' style=\"{style}\"'\n result = f\"<{rowtag}{style}><{coltag}>\"\n if insert_first_col:\n result += f\"{insert_first_col}<{coltag}>\"\n result += f\"<{coltag}>\".join(map(html.escape, arr.astype(str)))\n result += f\"\\n\"\n return result\n\n\ndef write_doc_row(arr) -> str:\n \"\"\"\n Write row of document dataframe table\n\n Args:\n arr: numpy.array: Array of values for the row\n\n Note: In arr, index must be in first column; html for the text details must be in last column,\n\n Returns:\n str: HTML for the document row (with escaped content)\n \"\"\"\n #\n # rowtag = ''\n rowtag = f''\n # rowtag = ''\n celltag = f''\n return write_table_row(arr[:-1]) + f\"\\n{rowtag}{celltag}\\n\" + arr[-1] + \"\"\n\n\ndef write_doc_table(df, stream=None) -> Optional[str]:\n \"\"\"\n Write document dataframe table as html\n\n Dataframe has first column index, last column html for text details, and interleaving values to display in table\n\n Args:\n df: pandas.DataFrame: Dataframe to write\n stream: If None, create String stream and return its value, else write to stream (Default value = None)\n\n Returns:\n str | None: If stream is None, return string value, else function just writes to stream\n \"\"\"\n if stream is None:\n stream = StringIO()\n return_string = True\n else:\n return_string = False\n stream.write(HTML_HEAD_CODE_START)\n stream.write(CSS_CODE)\n stream.write(JAVASCRIPT_CODE)\n stream.write(HTML_HEAD_CODE_END)\n stream.write('\\n')\n stream.write(write_table_head(df.columns[1:-1], style=\"text-align: left\"))\n stream.write(\"\")\n stream.write(\"\\n\".join(df.apply(write_doc_row, axis=1, raw=True).values))\n stream.write(\"
    \\n\")\n stream.write(HTML_BODY_END)\n if return_string:\n return stream.getvalue()\n\n\ndef write_dataframe_html(\n dataframe, writer, stream=None, doc_results_colname=\"Doc Results\"\n) -> Optional[str]:\n \"\"\"\n Write dataframe to html as interspersed data row and text results row.\n\n Must have a column with DocResult(s), which will be formatted to html.\n Doc results can be a list, whose html will be concatenated.\n (If no DocResults, then pandas.DataFrame.to_html() should be used.)\n\n Args:\n dataframe: pandas.DataFrame: Dataframe to write\n writer: search.writer.HTMLWriter: Writer to format DocResult to html\n stream: If None, create String stream and return its value, else write to stream (Default value = None)\n doc_results_colname: Name of the column in the dataframe with a DocResult (or list of DocResult) (Default value = \"Doc Results\")\n\n Returns:\n str | None: If stream is None, return string value, else function just writes to stream\n \"\"\"\n if stream is None:\n stream = StringIO()\n return_string = True\n else:\n return_string = False\n dataframe[\"doc_result_html\"] = dataframe[doc_results_colname].apply(\n lambda x: writer.get_doc_result_html(x)\n if not isinstance(x, list)\n else \"\\n\".join(writer.get_doc_result_html(d) for d in x)\n )\n write_doc_table(dataframe.drop(columns=[doc_results_colname]).reset_index(), stream)\n if return_string:\n return stream.getvalue()\n","repo_name":"markgraves/leat","sub_path":"leat/dsutil/html_results_table.py","file_name":"html_results_table.py","file_ext":"py","file_size_in_byte":6868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"7518589138","text":"import json\nfrom dataclasses import dataclass, field\nfrom time import sleep\nfrom typing import Any, Dict, List, Optional, Tuple\n\nimport arrow\nfrom loguru import logger\n\nfrom subdivisions.base import AWSClientMixin, SubDivisions\nfrom subdivisions.builders.events import SubDivisionsEventsBuilder\nfrom subdivisions.builders.sns import SubDivisionsSNSBuilder\nfrom subdivisions.builders.sqs import SubDivisionsSQSBuilder\nfrom subdivisions.config import sub_config\nfrom subdivisions.exceptions import PubSubException\n\n\n@dataclass\nclass SubClient(SubDivisions, AWSClientMixin):\n _event_name: Optional[str] = None\n received_handlers: Optional[List[Tuple[str, str]]] = field(default_factory=list)\n\n @property\n def event_name(self):\n return SubDivisionsEventsBuilder(topic=self.topic).event_name\n\n def _prepare_subscribe(self):\n events_builder = SubDivisionsEventsBuilder(topic=self.topic)\n # 1. If Topic exists, proceed\n if not events_builder.topic_exists():\n # 2. Raise and suggest best match if found\n if events_builder.similar_topic_exists():\n raise PubSubException(\n f\"Topic '{self.event_name}' not found. \"\n f\"Did you mean '{events_builder.best_match}'?\"\n )\n raise PubSubException(f\"Topic '{self.event_name}' not found.\")\n\n # 2. If queue does not exist, create it\n sqs_builder = SubDivisionsSQSBuilder(topic=self.topic)\n if not sqs_builder.queue_exists():\n # Create SQS Queue with encryption and dead_letter\n\n dead_letter_queue_arn = sqs_builder.create_queue(\n is_dead_letter=True\n ).queue_arn\n topic_queue_arn = sqs_builder.create_queue(\n dead_letter_arn=dead_letter_queue_arn\n ).queue_arn\n\n # Subscribe Queue with SNS\n sns_builder = SubDivisionsSNSBuilder(topic=self.topic)\n sns_builder.create_sns_topic()\n sns_builder.subscribe_sns_topic(topic_queue_arn)\n\n self.wait_for_queue_ready()\n\n def wait_for_queue_ready(self):\n # Wait until new SQS Queue is available. (aprox. 30 seconds)\n tentative = 1\n while tentative < 11:\n try:\n logger.debug(f\"Attempting {tentative} to reach new SQS queue...\")\n sqs_builder = SubDivisionsSQSBuilder(topic=self.topic)\n queue_url = sqs_builder.get_queue().queue_url\n logger.info(f\"New SQS queue is available. Queue urls is: {queue_url}\")\n break\n except PubSubException:\n tentative += 1\n sleep(5)\n\n def _create_topic(self):\n # 2. Create SNS Topic\n sns_builder = SubDivisionsSNSBuilder(topic=self.topic)\n topic_sns_arn = sns_builder.create_sns_topic().sns_arn\n\n # Create/Update Eventbridge Rule\n events_builder = SubDivisionsEventsBuilder(topic=self.topic)\n events_builder.put_rule()\n events_builder.put_target(topic_sns_arn)\n\n def _prepare_send_message(self):\n # 1. If Topic exists, send message\n events_builder = SubDivisionsEventsBuilder(topic=self.topic)\n if events_builder.topic_exists():\n return\n # 2. If not exists, check for similar. If exists raise and suggest best match\n if events_builder.similar_topic_exists():\n raise PubSubException(\n f\"Topic '{self.event_name}' not found. \"\n f\"Did you mean '{events_builder.best_match}'?\"\n )\n\n # 3. If not exists and we dont find similar,\n # and auto-create are forbidden, raise error\n if not sub_config.auto_create_new_topic:\n raise PubSubException(\n f\"Topic '{self.event_name}' not found. Auto creation not allowed.\"\n )\n\n # 4. Create new Topic\n self._create_topic()\n\n def send(self, message: Dict[Any, Any]):\n \"\"\"Send new message for Eventbridge.\n\n This command will send the message to the topic selected.\n You must define a topic before send this command::\n\n .. code-block:: python\n client = SubClient()\n client = UserEvents.USER_LOGGED_IN\n\n Message Format\n ==============\n\n The message will be sent with the following format::\n\n .. code-block:: json\n {\n \"DetailType\": self.topic,\n \"Source\": sub_config.source_name,\n \"Detail\": {\n \"event\": ,\n \"datetime\": ,\n \"payload\": \n }\n }\n\n Sending Message for the First Time\n ==================================\n\n First, we will check if Eventbridge,\n has a rule for this topic. For example,\n for topic USER_REGISTERED, we will look for A55UserRegistered\n rule. If it does not exist, we will create it automatically,\n before send the message.\n\n Matching Rules\n ==============\n\n This payload will be send to AWS Eventbridge,\n which will try to match the payload events\n looking for the fields \"DetailType\"\n and \"Detail\".\"event\". That rule will redirect the\n message to his correspondent SNS Topic, as per\n Eventbridge Destinations configuration.\n\n :param message: A python dictionary, for the message\n :returns: None\n \"\"\"\n\n if not isinstance(message, dict):\n raise ValueError(\"PubSub Message must be a dictionary\")\n\n if not self.topic:\n raise ValueError(\"You must define a topic before send messages\")\n\n try:\n self._prepare_send_message()\n\n logger.info(f\"Send message for topic: {self.topic}...\")\n\n payload = {\n \"event\": self.topic,\n \"datetime\": arrow.utcnow().isoformat(),\n \"payload\": message,\n }\n\n logger.debug(\n f\"Source is: {sub_config.source_name}. \"\n f\"Detail Type is: {self.topic}. Message is: {payload}\"\n )\n response = self.get_client(\"events\").put_events(\n Entries=[\n {\n \"DetailType\": self.topic,\n \"Source\": sub_config.source_name,\n \"Detail\": json.dumps(payload),\n }\n ]\n )\n logger.debug(f\"Send message response: {response}\")\n\n if response[\"FailedEntryCount\"] > 0:\n raise PubSubException(\"Cannot send message.\")\n logger.info(f\"Message send successfully for topic: {self.topic}\")\n except Exception as error:\n logger.error(error)\n raise PubSubException() from error\n\n def get_messages(self, from_dead_letter: bool = False, auto_remove: bool = False):\n \"\"\"Get Messages for selected Topic.\n\n This command will receive all messages available to the topic selected.\n You must define a topic before send this command::\n\n .. code-block:: python\n client = SubClient()\n client = UserEvents.USER_LOGGED_IN\n\n Message Format\n ==============\n\n The AWS SQS will send to us all availables messages\n with the following format::\n\n .. code-block:: json\n {\n \"Messages: [\n \"Body: {\n \"Message\": {\n \"event\": ,\n \"datetime\": ,\n \"payload\": \n }\n }\n ]\n }\n\n Subdivisions will get the \"Body\" from each messages\n and return to you only the \"payload\" data.\n\n Receiving Messages for the First Time\n =====================================\n\n First, we will check in SQS if the SNS\n corresponding Topic Signature exists. For example,\n for topic USER_REGISTERED, we will look for `a55__`\n signature. If it does not exist, we will create it automatically,\n before receive messages.\n\n Note the first time you running the `get_messages` command, no SQS queues\n exist. Any message already send to this Topic are lost (because none\n of them was transmitted to the new signature queues).\n All messages sent before create the SQS queues, must be send again.\n\n :param from_dead_letter: Receive from topic's dead letter queue\n :param auto_remove: Remove messages after receiving them.\n :return: None\n \"\"\"\n try:\n self._prepare_subscribe()\n\n sqs_builder = SubDivisionsSQSBuilder(topic=self.topic)\n queue_url = sqs_builder.get_queue(\n from_dead_letter=from_dead_letter\n ).queue_url\n\n message_list = []\n while True:\n response = self.get_client(\"sqs\").receive_message(\n QueueUrl=queue_url, MaxNumberOfMessages=10\n )\n if not response.get(\"Messages\"):\n break\n\n message_list += [\n json.loads(json.loads(message[\"Body\"])[\"Message\"])\n for message in response[\"Messages\"]\n ]\n self.received_handlers += [\n (queue_url, message[\"ReceiptHandle\"])\n for message in response[\"Messages\"]\n ]\n\n logger.info(\n f\"Received {len(message_list)} message(s) \"\n f\"from queue: {sqs_builder.queue_name}.\"\n )\n\n if (sub_config.auto_remove_from_queue or auto_remove) and len(\n self.received_handlers\n ) > 0:\n self.delete_received_messages()\n else:\n logger.debug(\n f\"Received {len(message_list)} message(s) \"\n f\"are still in queue: {sqs_builder.queue_name}.\"\n )\n\n return message_list\n except Exception as error:\n logger.error(error)\n raise PubSubException() from error\n\n def delete_received_messages(self):\n for queue_url, receipt_handle in self.received_handlers:\n self.get_client(\"sqs\").delete_message(\n QueueUrl=queue_url, ReceiptHandle=receipt_handle\n )\n queue_name = SubDivisionsSQSBuilder(topic=self.topic).get_queue()\n logger.info(\n f\"Removed {len(self.received_handlers)} \"\n f\"message(s) from queue: {queue_name}.\"\n )\n self.received_handlers = []\n","repo_name":"chrismaille/subdivisions","sub_path":"subdivisions/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":11112,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"32130905539","text":"\nimport dill\n\nfrom examples.python.distributed import config, core, core_pb2, ppu_device, py_device\n\n\nclass Context:\n def __init__(self, zconf: core_pb2.WorldDesc):\n config.validate_config(zconf)\n self.zconf = zconf\n self.devices: [core.DeviceClient] = []\n self._host_node_id = \"\"\n\n for ddesc in zconf.devices:\n if ddesc.kind == core_pb2.DeviceKind.PYRT:\n node = config.find_node(zconf, ddesc.py_device.node_id)\n dev = py_device.PyDeviceClient(self, ddesc.rank, node.addr)\n elif ddesc.kind == core_pb2.DeviceKind.PPU:\n assert len(ddesc.ppu_device.node_ids) >= 1\n node_addrs = [\n config.find_node(zconf, node_id).addr\n for node_id in ddesc.ppu_device.node_ids\n ]\n dev = ppu_device.PpuDeviceClient(self, ddesc.rank, node_addrs,\n ddesc)\n else:\n raise ValueError(\"unsupported device kind={}\", ddesc.kind)\n self.devices.append(dev)\n\n def get_dev(self, zrank: int) -> core.DeviceClient:\n matches = [dev for dev in self.devices if dev.zrank == zrank]\n if len(matches) != 1:\n raise ValueError(\"more than 1 ({}) found\".format(len(matches)))\n return matches[0]\n\n def host_node_id(self):\n return self._host_node_id\n\n\nclass WorkerContext(Context):\n def __init__(self, zconf: core_pb2.WorldDesc, host_node_id):\n super().__init__(zconf)\n self._host_node_id = host_node_id\n\n\nclass DriverContext(Context):\n def __init__(self, zconf: core_pb2.WorldDesc):\n super().__init__(zconf)\n self._uuid = 0\n\n def new_name(self):\n self._uuid = self._uuid + 1\n return \"V{}\".format(self._uuid)\n\n def transfer(self, obj, dst_rank: int) -> core.DeviceObject:\n if not isinstance(obj, core.DeviceObject):\n nname = self.new_name()\n return self.get_dev(dst_rank).put(obj, nname)\n\n if obj.zrank == dst_rank:\n # already resides on this device.\n return obj\n\n src_dev = self.get_dev(obj.zrank)\n dst_dev = self.get_dev(dst_rank)\n\n # FIXME(jint) dont use internal methods like zsymbols & _submit\n if src_dev.kind() == core_pb2.DeviceKind.PYRT:\n # source device is a PYRT, ask it to put.\n def wrapper(server, dname, drank, obj):\n src_obj = server.zsymbols[obj.zname]\n dev = server.zctx.get_dev(drank)\n # fetch the object within server's context\n dev.put(src_obj, dname)\n\n dname = self.new_name()\n routine = dill.dumps((wrapper, [dname, dst_rank, obj], {}),\n recurse=True)\n src_dev._submit(routine, \"PY-TRANS:{}\".format(dname))\n return core.DeviceObject(dst_rank, dname)\n elif dst_dev.kind() == core_pb2.DeviceKind.PYRT:\n # dest device is PYRT, ask it to get.\n def wrapper(server, dname, obj):\n dev = server.zctx.get_dev(obj.zrank)\n # fetch the object within server's context\n server.zsymbols[dname] = dev.get(obj)\n\n dname = self.new_name()\n routine = dill.dumps((wrapper, [dname, obj], {}), recurse=True)\n dst_dev._submit(routine, \"PY-TRANS:{}\".format(dname))\n return core.DeviceObject(dst_rank, dname)\n else:\n raise RuntimeError(\"transfer from {} to {} not supported\".format(\n src_dev.kind(), dst_dev.kind()))\n\n def get_py_dev_rank_from_node(self, node_id: str):\n for ddesc in self.zconf.devices:\n if (ddesc.kind == core_pb2.DeviceKind.PYRT\n and ddesc.py_device.node_id == node_id):\n return ddesc.rank\n return None\n","repo_name":"peiji1981/ppu","sub_path":"examples/python/distributed/context.py","file_name":"context.py","file_ext":"py","file_size_in_byte":3901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"8554048218","text":"import socket\nimport os\nimport time\nimport argparse\nimport threading\n\nFORMAT = 'utf-8' # Format in which a file is encoded. This format works for .txt, .mp4 file extensions.\nSIZE = 1024 # The size of the packets sent over the TCP conneciton, could be smaller, if the router fragments it based on some other smaller MTU.\nPORTS_AVAILABLE = [4455, 4456, 4457, 4458, 4459, 4460] # Total ports availble to the server for listening clients. Each port number paired with the machine's IP gives a separate server.\n\npackets = 20\n\ndef log(logtype, message):\n \"\"\"\n Prints out a log message on screen.\n logtype: type of the message i.e. connection, activation, disconnection etc.\n message: the message to be shown on log.\n \"\"\"\n\n LOG_TYPES = ['[STARTING]', '[ACTIVE]', '[CONNECTED]', '[DISCONNECTED]', '[SENT]', '[ERROR]' ,'[INFO]'] # Statuses\n print(LOG_TYPES[logtype] + '\\t' + message)\n\ndef fragment(string, parts):\n \"\"\"Divides the input string of bytes into \"parts\" parts.\n string: The byte string to be divided\n parts: Number of parts in which the string is to be divided\n \"\"\"\n k, m = divmod(len(string), parts)\n return (string[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(parts))\n\ndef makeserver(status, servernumber):\n \"\"\"\n Create the server.\n servernumber: based on the number of servers created (0, 1, 2, ...) increases linearly\n \"\"\"\n\n # IP = '10.7.81.13' # The IP of the server machine\n # IP = socket.gethostbyname(socket.gethostname()) # For file transfer on a local machine.\n IP = '10.7.49.208' # For file transfer over the internet. Use your device's IP here. Use 'ipconfig' in command prompt for more details on your IP.\n PORT = PORTS_AVAILABLE[servernumber]\n\n ADDR = (IP,PORT)\n\n global server_sockets\n server_sockets = []\n\n log(0, f'Server {servernumber} is starting.')\n server = socket.socket(socket.AF_INET,socket.SOCK_STREAM) # AF_INET for IPv4, SOCK-STREAM for TCP socket, use SOCK_DGRAM for UDP socket.\n server_sockets.append(server) # Append the server socket just made into the array of server sockets.\n \n\n server_sockets[servernumber].bind(ADDR)\n server_sockets[servernumber].listen()\n log(1, f\"Server {servernumber} is active.\")\n\n while True:\n conn, addr = server_sockets[servernumber].accept()\n log(2, f\"{addr} connected.\")\n\n ## Sending file(mp4) to the client\n \n # Getting information of the file to be sent\n file_name = file_location\n file_size = os.path.getsize(file_name)\n\n # Send the file details to the client\n with open(file_name, \"rb\") as file:\n # conn.send(file_name.encode(FORMAT))\n conn.send(str(file_size).encode(FORMAT))\n\n data = file.read(file_size)\n \n s = (int(conn.recv(1024))) # Receive the segment number to send from a particular server.\n\n segments_gen = fragment(data, totalservers) # Fragment the data into available servers.\n segments = [] # Array that contains data to be sent from each server, based on index (0 index has the data to be sent by server 0 and so on).\n for k in segments_gen: # Injection of the data into the segements[] array\n segments.append(k)\n\n sub_segments_gen = fragment(segments[s], packets) # Divide the segments further into about 20 packets.\n sub_segments = [] # Array that contains subsegments (parts of segment) for every serever (separate array for every server).\n for k in sub_segments_gen:\n sub_segments.append(k)\n seg_num = str(s).encode()\n conn.send(seg_num) # Sending the segment number requested by client.\n\n for i in range(packets): # Sending the subsegments to the client.\n conn.sendall(sub_segments[i])\n\n conn.close()\n log(3, f\"{addr} has disconnected.\")\n \ndef closeserver(closestring):\n \"\"\"\n Closes the server according to the command in closestring.\n closestring: the string that initiates server closing process. like 'k0' for server 0\n \"\"\"\n server_status[int(closestring[1:2])] = False # For instance [True, True, True] --> [False, True, True] for 'kli 0'\n server_sockets[int(closestring[1:2])].close() # For instance [active, active, active] --> [inactive, active, active] for 'kil 0'\n\ndef main(status, servercount):\n for servernumber in range(servercount):\n serverthread = threading.Thread(target=makeserver, args=(status, servernumber))\n serverthread.start()\n\ndef refresh():\n \"\"\"\n Refreshes the status of server\n \"\"\"\n while True:\n time.sleep(interval)\n clrscr()\n for i in range(totalservers):\n print(\n log(6, f\"Server {i}\\tPort: {PORTS_AVAILABLE[i]} Status: {server_status[i]}, to shutdwon server {i}, type 'k{i}' \"))\n\ndef clrscr():\n \"\"\"\n Clear Console Screen\n \"\"\"\n os.system('cls' if os.name == 'nt' else 'clear')\n\nparser = argparse.ArgumentParser()\n\n# adding parameters\nparser.add_argument('-i', '--status_interval', help=\"Time interval in seconds between server status reporting.\", type=int, default=2)\nparser.add_argument('-n', '--num_servers', help=\"Total number of virtual servers.\", type=int)\nparser.add_argument('-f', '--file_location', help=\"Address pointing to the file location.\", default=\"to_be_sent.mp4\")\nparser.add_argument('-p', '--list_of_ports', nargs='+',\n help=\"List of port numbers(‘n’ port numbers, one for each server).\", type=int, metavar=\"port_list\", default=PORTS_AVAILABLE)\nargs = parser.parse_args()\n\ninterval = args.status_interval # Time interval between refreshes (seconds).\ntotalservers = args.num_servers # Total number of the servers.\nfile_location = args.file_location # Location of the file to be sent.\nPORTS_AVAILABLE = args.list_of_ports # Changes the list of ports to the list of ports assigned by the user.\nserver_status = [True] * totalservers # Defines an array of boolean values, where true means the server is running.\nserver_threads = []\n\nfor portnumber in PORTS_AVAILABLE:\n if portnumber < 1024:\n log(5, 'Invalid port number.')\n exit()\n\nif __name__ == '__main__':\n main(server_status, totalservers)\n\noutputThread = threading.Thread(target=refresh)\noutputThread.start()\nwhile True:\n command = input()\n closeserver(command)\n print(command) ","repo_name":"TaimoorIkram/multi-server-file-downloader","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":6411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"21283068963","text":"import uuid\n\nfrom django.conf import settings\nfrom django.db import models\n\n# Projectroles dependency\nfrom projectroles.models import Project\n\n# Samplesheets dependency\nfrom samplesheets.models import Assay\n\nimport landingzones.constants as lc\n\n# Access Django user model\nAUTH_USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')\n\n\nclass LandingZone(models.Model):\n \"\"\"Class representing an user's iRODS landing zone for an assay\"\"\"\n\n #: Title of the landing zone\n title = models.CharField(\n max_length=255, unique=False, help_text='Title of the landing zone'\n )\n\n #: Project in which the landing zone belongs\n project = models.ForeignKey(\n Project,\n related_name='landing_zones',\n help_text='Project in which the landing zone belongs',\n on_delete=models.CASCADE,\n )\n\n #: User who owns the landing zone\n user = models.ForeignKey(\n AUTH_USER_MODEL,\n related_name='landing_zones',\n help_text='User who owns the landing zone',\n on_delete=models.CASCADE,\n )\n\n #: Assay for which the landing zone belongs\n assay = models.ForeignKey(\n Assay,\n related_name='landing_zones',\n help_text='Assay for which the landing zone belongs',\n on_delete=models.CASCADE,\n )\n\n #: Status of landing zone\n status = models.CharField(\n max_length=64,\n null=False,\n blank=False,\n default=lc.ZONE_STATUS_CREATING,\n help_text='Status of landing zone',\n )\n\n #: Additional status information\n status_info = models.CharField(\n max_length=1024,\n null=True,\n blank=True,\n default=lc.DEFAULT_STATUS_INFO[lc.ZONE_STATUS_CREATING],\n help_text='Additional status information',\n )\n\n #: DateTime of last folder modification\n date_modified = models.DateTimeField(\n auto_now=True, help_text='DateTime of last landing zone modification'\n )\n\n #: Landing zone description (optional)\n description = models.TextField(\n unique=False,\n blank=True,\n help_text='Landing zone description (optional)',\n )\n\n #: Message displayed to project members on zone move (optional)\n user_message = models.CharField(\n max_length=1024,\n unique=False,\n blank=True,\n help_text='Message displayed to project members on successful zone '\n 'moving if member notifications are enabled (optional)',\n )\n\n #: Special configuration\n configuration = models.CharField(\n max_length=64,\n unique=False,\n blank=True,\n null=True,\n help_text='Special configuration (optional, leave blank for a '\n 'standard landing zone)',\n )\n\n #: Configuration data (for storing plugin-specific settings)\n config_data = models.JSONField(\n default=dict,\n help_text='Configuration data (for storing plugin-specific settings)',\n )\n\n #: Landing zone SODAR UUID\n sodar_uuid = models.UUIDField(\n default=uuid.uuid4, unique=True, help_text='Landing zone SODAR UUID'\n )\n\n class Meta:\n ordering = ['project', 'assay__file_name', 'title']\n # Ensure name is unique within project and user\n unique_together = ('title', 'project', 'user')\n\n def __str__(self):\n return '{}: {}/{}'.format(\n self.project.title, self.user.username, self.title\n )\n\n def __repr__(self):\n values = (self.project.title, self.user.username, self.title)\n return 'LandingZone({})'.format(', '.join(repr(v) for v in values))\n\n # Custom row-level functions\n\n def get_project(self):\n \"\"\"Get project in cases where multiple object types may be included\"\"\"\n return self.project\n\n def set_status(self, status, status_info=None):\n \"\"\"Set zone status\"\"\"\n if status not in lc.ZONE_STATUS_TYPES:\n raise TypeError('Unknown status \"{}\"'.format(status))\n self.status = status\n if status_info:\n self.status_info = status_info\n else:\n self.status_info = lc.DEFAULT_STATUS_INFO[status][:1024]\n self.save()\n\n def is_locked(self):\n \"\"\"\n Return True/False depending whether write access to zone is currently\n locked.\n \"\"\"\n return self.status in lc.STATUS_LOCKING\n\n def can_display_files(self):\n \"\"\"\n Return True/False depending whether file info should be displayed.\n \"\"\"\n return self.status in lc.STATUS_DISPLAY_FILES\n","repo_name":"bihealth/sodar-server","sub_path":"landingzones/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4503,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"60"} +{"seq_id":"23556947555","text":"def calcLength(src, size):\n res = \"\"\n for i in range(0,len(src)):\n # cnt = 1\n cur = src[i:i+size]\n print(cur)\n \n return len(res)\n\n\ndef solution(s):\n min = len(s) # 압축이 아닌 상태 초기 값\n for size in range(1,len(s)):\n temp = calcLength(s, size) \n if min > temp:\n min = temp\n return min\n\n\ndef main():\n solution(\"aabbaccc\")\n\n\nif __name__ == '__main__':\n main()","repo_name":"KwakSeungeun/Programmers","sub_path":"2020_kakao_01.py","file_name":"2020_kakao_01.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"27475906429","text":"class Solution:\n def lognestString(self, word: str):\n i = 0\n while i < len(word):\n if word.count(word[i]) > 1:\n word = word[:i] + word[i+1:]\n i = 0\n\n else:\n i += 1\n\n return word\n\ns = Solution()\n\nprint(s.lognestString(\"pwwkew\"))","repo_name":"egepaksoy/LongestSubstring","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"37092852702","text":"#to make 0 every data\nwith open(\"countries.txt\",\"r+\") as file:\n lines = file.readlines()\n new_lines = []\n for line in lines:\n tmp_str = \"\"\n for i in range(len(line)-1,-1,-1):\n if line[i] == '\\t':\n if not len(line[:i])==1:\n tmp_str = line[:i]+\"\\t0\\n\"\n break\n new_lines.append(tmp_str)\n file.writelines(new_lines)\n","repo_name":"Musa-Sina-Ertugrul/NglView_website","sub_path":"tmp_script.py","file_name":"tmp_script.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"4293330316","text":"\nN = int(input())\n\nls = [int(input()) for i in range(N)]\n\nans_min = sum(ls)\n\nfor i in range(2**N):\n first_niku, second_niku = 0,0\n for j in range(N):\n if (1< minutes:\n ans_min = minutes\n\nprint(ans_min)\n\n#https://atcoder.jp/contests/arc029/tasks/arc029_1","repo_name":"murakami10/atc_python","sub_path":"solved/02/arc029a.py","file_name":"arc029a.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"24594781767","text":"import math\nimport numpy as np\n\n\nclass Cleaner:\n # This class takes as input a '.pdb' file and outputs a list containing only the 'c3' atoms in that file.\n def __init__(self, path_pdb_file):\n self.path_pdb_file = path_pdb_file\n self.atom_list = []\n self.c3_list = []\n\n # 'Return' a List containing all atoms\n def atom_extractor(self):\n pdb_file = open(self.path_pdb_file, 'r')\n for line in pdb_file:\n if line[0:4] == 'ATOM':\n cols = line.strip('\\n')\n # Handles the particular structure of the '.pdb' file\n atom = [cols[0:6], cols[6:11], cols[12:16], cols[16:17], cols[17:20], cols[21:22], cols[22:26],\n cols[26:27], cols[30:38], cols[38:46], cols[46:54], cols[54:60], cols[60:66], cols[76:78],\n cols[78:80]]\n elm = [i.replace(' ', '') for i in atom]\n self.atom_list.append(elm)\n pdb_file.close()\n\n # Select only c3' atoms of the atom_list\n def c3_extractor(self):\n for elm in self.atom_list:\n if elm[2] == \"C3'\":\n self.c3_list.append(elm)\n\n def run(self):\n self.atom_extractor()\n self.c3_extractor()\n return self.c3_list\n\n\ndef distance_calculator(res1, res2):\n \"\"\"\n Calculates the distance between two residuals\n :param res1: Residual 1\n :param res2: Residual 2\n :return: A float representing the distance between the two residuals taken as input\n \"\"\"\n res1 = list(np.float_(res1))\n res2 = list(np.float_(res2))\n distance = ((res1[0] - res2[0]) ** 2 + (res1[1] - res2[1]) ** 2 + (res1[2] - res2[2]) ** 2) ** (1 / 2)\n return distance\n\n\ndef bp_attribution(c3_list, objective, frequency=None, gibbs_free_energy=None, energy=None):\n \"\"\"\n From a list of bases one creates pairs according to pre-established conditions and either adds them according to\n the distance and the base pair to a list or calculates the Gibbs free energy\n :param c3_list: List where each element is a line of a C3' atom extracted from a pdb file and preprocessed by the Cleaner class.\n :param objective: 'training' or 'scoring' depending on whether the function is used to train the objective function (training)\n or to estimate the Gibbs free energy of an RNA conformation (scoring).\n :param frequency: Only for 'training'\n The 'frequency' list is of the form [[[AA], [0,0...0]], [[AC], [0,0...0]] ... [[XX], [0,0...0]]] with the last element of the list 'XX' contains the reference frequency.\n In this part the corresponding distance interval is incremented by 1 for each base pair.\n :param gibbs_free_energy: Only for 'scoring'\n :param energy: Path to a folder 'Energy' containing 10 files (one per base pair) of 20 lines each.\n :return:\n \"\"\"\n for res1 in c3_list:\n compare = False\n for res2 in c3_list:\n if compare: # Allows a base pair to be processed only once\n if res1[5] == res2[5] and (int(res1[6]) + 4) <= int(res2[6]): # Conditions: Only 'intra-chain' distances are considered & Only consider residues separated by at least 3 positions on the sequence\n bp = ''.join(sorted((res1[4] + res2[4])))\n dist = distance_calculator(res1[8:11], res2[8:11])\n if objective == 'training':\n if 0 <= dist <= 20:\n for bp_tot in frequency:\n if bp == bp_tot[0]:\n int_dist = math.floor(dist)\n bp_tot[1][int_dist] += 1\n frequency[10][1][int_dist] += 1\n else: # type == 'scoring'\n if 0.5 <= dist <= 19.5:\n gibbs_free_energy += linear_interpolation(bp, dist, energy)\n if res1 == res2:\n compare = True\n if objective == 'training':\n return frequency\n else:\n return gibbs_free_energy\n\n\ndef linear_interpolation(bp, dist, energy):\n \"\"\"\n A score value is calculated using linear interpolation.\n Each line is taken as the average of the interval. (e.g. line 1 corresponding to the interval [0, 1] is\n considered to be the value 0.5 in the interpolation.)\n Thus, only values between 0.5 and 19.5 are taken into account.\n Note this program is only used in the 'scoring.py' script but is in this file to avoid an error due to a circular import.\n :param energy: Path to a folder 'Energy' containing 10 files (one per base pair) of 20 lines each.\n :param bp: A base pair\n :param dist: Distance between the two bases\n :return: Energy associated with base pair and distance\n \"\"\"\n with open(energy + '/' + bp, 'r') as energy:\n content = energy.readlines()\n energy_before = float(content[math.floor(dist) - 1])\n energy_after = float(content[math.floor(dist)])\n energy = energy_before + (dist - (math.floor(dist) - 0.5)) * (energy_after - energy_before) # Linear Interpolation Formula\n return energy\n","repo_name":"h-escoffier/RNA-ERGY","sub_path":"shared.py","file_name":"shared.py","file_ext":"py","file_size_in_byte":5121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"45584079899","text":"from msct_parser import Parser\nimport sys\n\n\n# DEFAULT PARAMETERS\nclass Param:\n ## The constructor\n def __init__(self):\n self.verbose = 1\n\n\n# PARSER\n# ==========================================================================================\ndef get_parser():\n # parser initialisation\n parser = Parser(__file__)\n\n # initialize parameters\n param = Param()\n param_default = Param()\n\n # Initialize the parser\n parser = Parser(__file__)\n parser.usage.set_description('Convert image file to another type.')\n parser.add_option(name=\"-i\",\n type_value=\"file\",\n description=\"File input\",\n mandatory=True,\n example='data.nii.gz')\n parser.add_option(name=\"-o\",\n type_value=\"file_output\",\n description=\"File output (indicate new extension)\",\n mandatory=True,\n example=['data.nii'])\n parser.add_option(name=\"-squeeze\",\n type_value='multiple_choice',\n description='Sueeze data dimension (remove unused dimension).',\n mandatory=False,\n example=['0', '1'],\n default_value='1')\n return parser\n\n\n# conversion\n# ==========================================================================================\ndef convert(fname_in, fname_out, squeeze_data=True, type=None, verbose=1):\n \"\"\"\n Convert data\n :return True/False\n \"\"\"\n from msct_image import Image\n from sct_utils import printv\n printv('sct_convert -i '+fname_in+' -o '+fname_out, verbose, 'code')\n # Open file\n im = Image(fname_in)\n # Save file\n im.setFileName(fname_out)\n if type is not None:\n im.changeType(type=type)\n im.save(squeeze_data=squeeze_data)\n return im\n\n\n# MAIN\n# ==========================================================================================\ndef main(args = None):\n\n if not args:\n args = sys.argv[1:]\n\n # Building the command, do sanity checks\n parser = get_parser()\n arguments = parser.parse(sys.argv[1:])\n fname_in = arguments[\"-i\"]\n fname_out = arguments[\"-o\"]\n squeeze_data = bool(int(arguments['-squeeze']))\n\n # convert file\n convert(fname_in, fname_out, squeeze_data=squeeze_data)\n\n\n\n# START PROGRAM\n# ==========================================================================================\nif __name__ == \"__main__\":\n # initialize parameters\n param = Param()\n # call main function\n main()\n\n\n\n# import os\n# import sys\n# import commands\n# import getopt\n# import sct_utils as sct\n# import nibabel as nib\n# from scipy.io import netcdf\n#\n#\n# # DEFAULT PARAMETERS\n# class Param:\n# ## The constructor\n# def __init__(self):\n# self.debug = 0\n# self.verbose = 1\n#\n# # main\n# #=======================================================================================================================\n# def main():\n#\n# # Initialization\n# fname_data = ''\n# fname_out = ''\n# verbose = param.verbose\n#\n# # Parameters for debug mode\n# if param.debug:\n# print '\\n*** WARNING: DEBUG MODE ON ***\\n'\n# else:\n# # Check input parameters\n# try:\n# opts, args = getopt.getopt(sys.argv[1:], 'hi:o:v:')\n# except getopt.GetoptError:\n# usage()\n# if not opts:\n# usage()\n# for opt, arg in opts:\n# if opt == '-h':\n# usage()\n# elif opt in ('-i'):\n# fname_data = arg\n# elif opt in ('-o'):\n# fname_out = arg\n# elif opt in ('-v'):\n# verbose = int(arg)\n#\n# # display usage if a mandatory argument is not provided\n# if fname_data == '':\n# usage()\n#\n# cmd = 'which mnc2nii'\n# status, output = commands.getstatusoutput(cmd)\n# if not output:\n# sct.printv('ERROR: minc-toolkit not installed...',1,'error')\n# if output != '/opt/minc/bin/mnc2nii':\n# sct.printv('ERROR: the minc-toolkit that you use is not the correct one. Please contact SCT administrator.')\n#\n# # Check file existence\n# sct.printv('\\nCheck file existence...', verbose)\n# sct.check_file_exist(fname_data, verbose)\n#\n# # extract names\n# fname_data = os.path.abspath(fname_data)\n# path_in, file_in, ext_in = sct.extract_fname(fname_data)\n# if fname_out == '':\n# path_out, file_out, ext_out = '', file_in, '.nii'\n# fname_out = path_out+file_out+ext_out\n# else:\n# fname_out = os.path.abspath(fname_out)\n# path_in, file_in, ext_in = sct.extract_fname(fname_data)\n# path_out, file_out, ext_out = sct.extract_fname(fname_out)\n#\n# if ext_in=='.nii' and ext_out=='.mnc':\n# nii2mnc(fname_data,fname_out)\n# elif ext_in=='.nii.gz' and ext_out=='.mnc':\n# niigz2mnc(fname_data,fname_out)\n# elif ext_in=='.mnc' and ext_out=='.nii':\n# mnc2nii(fname_data,fname_out)\n# elif ext_in=='.mnc' and ext_out=='.nii.gz':\n# mnc2niigz(fname_data,fname_out)\n# elif ext_in=='.nii' and ext_out=='.header':\n# nii2volviewer(fname_data,fname_out)\n# elif ext_in=='.nii.gz' and ext_out=='.header':\n# niigz2volviewer(fname_data,fname_out)\n# elif ext_in=='.mnc' and ext_out=='.header':\n# mnc2volviewer(fname_data,fname_out)\n#\n# # remove temp files\n# sct.run('rm -rf '+path_in+'tmp.*', param.verbose)\n#\n#\n# # Convert file from nifti to minc\n# # ==========================================================================================\n# def nii2mnc(fname_data,fname_out):\n# print \"Converting from nifti to minc\"\n# sct.run(\"nii2mnc \"+fname_data+\" \"+fname_out)\n#\n# # Convert file from nifti to minc\n# # ==========================================================================================\n# def niigz2mnc(fname_data,fname_out):\n# print \"Converting from nifti to minc\"\n# path_in, file_in, ext_in = sct.extract_fname(fname_data)\n# fname_data_tmp=path_in+\"tmp.\"+file_in+\".nii\"\n# sct.run(\"gunzip -c \"+fname_data+\" >\"+fname_data_tmp)\n# sct.run(\"nii2mnc \"+fname_data_tmp+\" \"+fname_out)\n#\n# # Convert file from minc to nifti\n# # ==========================================================================================\n# def mnc2nii(fname_data,fname_out):\n# print \"Converting from minc to nifti\"\n# sct.run(\"mnc2nii \"+fname_data+\" \"+fname_out)\n#\n# # Convert file from minc to nifti\n# # ==========================================================================================\n# def mnc2niigz(fname_data,fname_out):\n# print \"Converting from minc to nifti\"\n# path_out, file_out, ext_out = sct.extract_fname(fname_out)\n# fname_data_tmp=path_out+file_out+\".nii\"\n# sct.run(\"mnc2nii \"+fname_data+\" \"+fname_data_tmp)\n# sct.run(\"gzip \"+fname_data_tmp)\n#\n# # Convert file from nifti to volumeviewer\n# # ==========================================================================================\n# def nii2volviewer(fname_data,fname_out):\n# print \"Converting from nifti to volume viewer\"\n# path_in, file_in, ext_in = sct.extract_fname(fname_data)\n# path_out, file_out, ext_out = sct.extract_fname(fname_out)\n# fname_data_nii = path_out+\"tmp.\"+file_out+'.mnc'\n# nii2mnc(fname_data,fname_data_nii)\n# mnc2volviewer(fname_data_nii,path_out+file_out)\n#\n# # Convert file from nifti to volumeviewer\n# # ==========================================================================================\n# def niigz2volviewer(fname_data,fname_out):\n# print \"Converting from nifti to volume viewer\"\n# path_in, file_in, ext_in = sct.extract_fname(fname_data)\n# path_out, file_out, ext_out = sct.extract_fname(fname_out)\n# fname_data_mnc = path_out+\"tmp.\"+file_out+'.mnc'\n# niigz2mnc(fname_data,fname_data_mnc)\n# mnc2volviewer(fname_data_mnc,path_out+file_out)\n#\n# # Convert file from minc to volumeviewer\n# # ==========================================================================================\n# def mnc2volviewer(fname_data,fname_out):\n# print \"Converting from minc to volume viewer\"\n# sct.run(\"isct_minc2volume-viewer \"+fname_data+\" -o \"+fname_out)\n#\n#\n# # Print usage\n# # ==========================================================================================\n# def usage():\n# print \"\"\"\n# \"\"\"+os.path.basename(__file__)+\"\"\"\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Part of the Spinal Cord Toolbox \n#\n# DESCRIPTION\n# Convert files from nifti to minc, minc to nifti or nifti to volume viewer.\n#\n# USAGE\n# \"\"\"+os.path.basename(__file__)+\"\"\" -i \n#\n# MANDATORY ARGUMENTS\n# -i input volume\n#\n# OPTIONAL ARGUMENTS\n# -o output volume. Add extension. Default=\"data\".nii\n# -v {0,1} verbose. Default=\"\"\"+str(param_default.verbose)+\"\"\"\n# -h help. Show this message\n# \"\"\"\n# # exit program\n# sys.exit(2)\n\n","repo_name":"ReyhanehA/GDP40","sub_path":"97182_sct_convert.py_C__Users_user_Desktop_data_2_data_google_data_neuropoly_spinalcordtoolbox_s.py","file_name":"97182_sct_convert.py_C__Users_user_Desktop_data_2_data_google_data_neuropoly_spinalcordtoolbox_s.py","file_ext":"py","file_size_in_byte":9308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"42122773506","text":"\"\"\"\nSettings and configuration from Joomla!.\n\nValues will be read from Joomla! configuration file specified by the\nJOOMLA_CONFIGURATION environment variable, and then from\ndjoonga.conf.global_settings; see the global settings file for\na list of all possible variables.\n\"\"\"\n\nimport os\nfrom djoonga.utils.exceptions import ConfigurationNotFound\nimport subprocess\nimport phpserialize\nimport StringIO\n\nENVIRONMENT_VARIABLE = \"JOOMLA_CONFIGURATION\"\n\ntry:\n configuration = os.environ[ENVIRONMENT_VARIABLE]\n if not configuration: # If it's set but is an empty string.\n raise KeyError\nexcept KeyError:\n # NOTE: This is arguably an EnvironmentError, but that causes\n # problems with Python's interactive help.\n raise ConfigurationNotFound(\"Configuration cannot be loaded, because environment variable %s is undefined.\" % ENVIRONMENT_VARIABLE)\n\nif not os.path.exists(configuration):\n raise ConfigurationNotFound(\"Configuration file could not be found in %s\" % configuration)\n\npath = os.path.abspath(os.path.join(os.getcwd(), configuration))\n\ncmd = ['php','-r','''include('%s'); $config = new JConfig; echo serialize($config);'''%path ]\ns = subprocess.Popen(cmd, stdout=subprocess.PIPE)\noutput, error = s.communicate()\ns = StringIO.StringIO()\ns.write(output)\ns.seek(0)\n\njconfig = phpserialize.load(s, object_hook=phpserialize.phpobject)\n\n","repo_name":"taras/djoonga","sub_path":"src/djoonga/src/djoonga/conf/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"60"} +{"seq_id":"6158272276","text":"import numpy as np\nimport scipy.stats as st\n\nfrom instream import InStream\n\nimport os\nimport sys\nimport math\n\ndef ci_mean(data, alpha=0.95):\n # https://www.kite.com/python/answers/how-to-compute-the-confidence-interval-of-a-sample-statistic-in-python\n # https://www.kite.com/python/examples/702/scipy-compute-a-confidence-interval-from-a-dataset\n\n #define sample data\n #data = [12, 12, 13, 13, 15, 16, 17, 22, 23, 25, 26, 27, 28, 28, 29]\n\n #data = [0.72,0.74, 0.73, 0.70, 0.75]\n\n #create 95% confidence interval for population mean weight from t-distribution\n ci = st.t.interval(alpha=alpha, df=len(data)-1, loc=np.mean(data), scale=st.sem(data)) \n #print( 'CI=' + str(ci) )\n \n # So CI gives bound around mean, we want the MOE\n # CI=(0.7041161161190002, 0.7518838838809998)\n u = np.mean(data)\n \n \n #hiDiff = ci[1] - u\n #lowDiff = u - ci[0]\n #print( 'hiDiff ' + str(hiDiff) )\n #print( 'lowDiff ' + str(lowDiff) )\n\n moe = ci[1] - u # can use just upperbound, same diff from u to lower bound\n return moe\n \n\ndef ci_proportion(p_hat_list, confidenceLevel):\n confidenceLevels = { 90:1.64, 95:1.96, 98:2.33, 99:2.58 }\n\n p_hat = np.mean( p_hat_list )\n\n n = len( p_hat_list )\n\n SE_hat_p = np.sqrt(p_hat*(1-p_hat)/n)\n moe = 2 * SE_hat_p\n lb = np.round( p_hat - moe, 2 )\n ub = np.round( p_hat + moe, 2 )\n\n print('With 95% confidence between {} and {}'.format(lb, ub) )\n\n\n\ndef parseAccuracy( resultFile ):\n stream = InStream(resultFile)\n accuracy = None\n while( not stream.isEmpty() ):\n line = stream.readLine()\n #line = line.lstrip() # funny character in front of tdidf result.txt\n line = line.strip()\n if( line.startswith('Accuracy:') ):\n #print( ' ' + line )\n # Accuracy:0.68\n # Accuracy:0.89\n tokens = line.split(':')\n accuracy = float(tokens[1])\n break\n stream.close()\n return accuracy\n\n\ndef parseProcessingTime( resultFile ):\n stream = InStream(resultFile)\n processingTime = None\n while( not stream.isEmpty() ):\n line = stream.readLine()\n #line = line.lstrip() # funny character in front of tdidf result.txt\n line = line.strip()\n if( line.startswith('classification_processing_time=') ):\n #print( ' ' + line )\n # classification_processing_time=1434.2269115447998\n # Accuracy:0.89\n tokens = line.split('=')\n processingTime = float(tokens[1])\n break\n stream.close()\n return processingTime\n\n\ndef main():\n #data = [0.72,0.74, 0.73, 0.70, 0.75]\n ###ci_proportion( data, 95 )\n #ci_mean(data)\n\n if( len(sys.argv) != 2 ):\n print('Usage: